diff --git a/cmake/BuildLibEV.cmake b/cmake/BuildLibEV.cmake
index 3adb93ba5159933026ba0bfc2c26e97d8b4e7d68..c9e3a90bdce956dabcd346e6e233f4b70e2d4d34 100644
--- a/cmake/BuildLibEV.cmake
+++ b/cmake/BuildLibEV.cmake
@@ -29,7 +29,7 @@ macro(libev_build)
         #
         # Enable Linux-specific event notification API (man inotify)
         set(ev_compile_flags "${ev_compile_flags} -DEV_USE_INOTIFY")
-    elseif (TARGET_OS_FREEBSD)
+    elseif (TARGET_OS_FREEBSD OR TARGET_OS_DARWIN)
         #
         # On FreeBSD build libev loop on top of
         set(ev_compile_flags "${ev_compile_flags} -DEV_USE_KQUEUE")
diff --git a/cmake/luajit.cmake b/cmake/luajit.cmake
index ff971986e0ec8263eede1d23da7ed312f49627e8..d6f8a508df5cc90f2c688b5854cfa8d44fe05ed3 100644
--- a/cmake/luajit.cmake
+++ b/cmake/luajit.cmake
@@ -158,7 +158,7 @@ macro(luajit_build)
         set (luajit_xcflags ${luajit_xcflags}
             -DLUA_USE_APICHECK -DLUA_USE_ASSERT)
     else ()
-        set (luajit_cсopt -O2)
+        set (luajit_ccopt -O2)
         set (luajit_ccdbebug "")
     endif()
     # Pass sysroot settings on OSX
diff --git a/doc/sphinx/book/box/box_space.rst b/doc/sphinx/book/box/box_space.rst
index 23c1ac28329fb7edcfd42b860ca5c1317234c44c..e64343628a84977c25bc4cfb28537dd6d9c420c4 100644
--- a/doc/sphinx/book/box/box_space.rst
+++ b/doc/sphinx/book/box/box_space.rst
@@ -339,10 +339,10 @@ A list of all ``box.space`` functions follows, then comes a list of all
         |
         | :codenormal:`-- In the following update ...`
         | :codenormal:`--    The idea is to delete field[2], then subtract 3 from field[3], but ...`
-        | :codenormal:`--    after the delete, there is a renumbering -- so field[3] becomes field[2]`
+        | :codenormal:`--    after the delete, there is a renumbering, so field[3] becomes field[2]`
         | :codenormal:`--    before we subtract 3 from it, and that's why the seventh argument is 2 not 3.`
         | :codenormal:`--    Therefore, after the following update, field[1] = 999, field[2] = 0.`
-        | :codenormal:`box.space.tester:update({999}, {{'-- ', 2, 1}, {'-', 2, 3}})`
+        | :codenormal:`box.space.tester:update({999}, {{'#', 2, 1}, {'-', 2, 3}})`
         |
         | :codenormal:`-- In the following update ...`
         | :codenormal:`--    We're making a long string so that splice will work in the next example.`
diff --git a/doc/sphinx/book/i1.png b/doc/sphinx/book/i1.png
new file mode 100644
index 0000000000000000000000000000000000000000..ddc32183f1954b73257d4089c4f9aeb708062dc9
Binary files /dev/null and b/doc/sphinx/book/i1.png differ
diff --git a/doc/sphinx/book/i10.png b/doc/sphinx/book/i10.png
new file mode 100644
index 0000000000000000000000000000000000000000..3289017e9abcfe4e4db8fc065103df0b941ba2e6
Binary files /dev/null and b/doc/sphinx/book/i10.png differ
diff --git a/doc/sphinx/book/i12.png b/doc/sphinx/book/i12.png
new file mode 100644
index 0000000000000000000000000000000000000000..b2c8cdd9d6c0d590b303d55e94d84d43a7ced795
Binary files /dev/null and b/doc/sphinx/book/i12.png differ
diff --git a/doc/sphinx/book/i13.png b/doc/sphinx/book/i13.png
new file mode 100644
index 0000000000000000000000000000000000000000..fe2d4745f96f8d179ba86469834e3a6cdac3c655
Binary files /dev/null and b/doc/sphinx/book/i13.png differ
diff --git a/doc/sphinx/book/i14.png b/doc/sphinx/book/i14.png
new file mode 100644
index 0000000000000000000000000000000000000000..a6c36a6896b251a6b20acb948ca9d6abdb1d9baa
Binary files /dev/null and b/doc/sphinx/book/i14.png differ
diff --git a/doc/sphinx/book/i2.png b/doc/sphinx/book/i2.png
new file mode 100644
index 0000000000000000000000000000000000000000..534eed72f9057079d3e2ee655eba44bd0764b2e7
Binary files /dev/null and b/doc/sphinx/book/i2.png differ
diff --git a/doc/sphinx/book/i3.png b/doc/sphinx/book/i3.png
new file mode 100644
index 0000000000000000000000000000000000000000..317b42bc1ca22834fb07eb80a1fbd649c4d03392
Binary files /dev/null and b/doc/sphinx/book/i3.png differ
diff --git a/doc/sphinx/book/i4.png b/doc/sphinx/book/i4.png
new file mode 100644
index 0000000000000000000000000000000000000000..5ca9c5f5f06bee4e100d6dbf57a14f8b7031ef19
Binary files /dev/null and b/doc/sphinx/book/i4.png differ
diff --git a/doc/sphinx/book/i5.png b/doc/sphinx/book/i5.png
new file mode 100644
index 0000000000000000000000000000000000000000..352fd153d98aded378823b9a326c23bc286a1f9d
Binary files /dev/null and b/doc/sphinx/book/i5.png differ
diff --git a/doc/sphinx/book/i6.png b/doc/sphinx/book/i6.png
new file mode 100644
index 0000000000000000000000000000000000000000..851c07580d4ed0cefdf63821c649c6b1f2b5272b
Binary files /dev/null and b/doc/sphinx/book/i6.png differ
diff --git a/doc/sphinx/book/i7.png b/doc/sphinx/book/i7.png
new file mode 100644
index 0000000000000000000000000000000000000000..6a1770690a584527d4d9e3ce0dc194c56264fa72
Binary files /dev/null and b/doc/sphinx/book/i7.png differ
diff --git a/doc/sphinx/book/i8.png b/doc/sphinx/book/i8.png
new file mode 100644
index 0000000000000000000000000000000000000000..b54100940ae38d139d9b1b34ec45fd52a583ad7f
Binary files /dev/null and b/doc/sphinx/book/i8.png differ
diff --git a/doc/sphinx/book/i9.png b/doc/sphinx/book/i9.png
new file mode 100644
index 0000000000000000000000000000000000000000..e817ffc8b6aca6909f041fe39d35207553828d8c
Binary files /dev/null and b/doc/sphinx/book/i9.png differ
diff --git a/doc/sphinx/book/index.rst b/doc/sphinx/book/index.rst
index 4966cbdf13fb9b7799671a7e0f1b62131d7670a3..ba083f5fecddec037d62c368f927b131edbfe8c3 100644
--- a/doc/sphinx/book/index.rst
+++ b/doc/sphinx/book/index.rst
@@ -18,3 +18,4 @@
     app_b_proctitle
     app_c_lua_tutorial
     app_d_plugins
+    sophia
diff --git a/doc/sphinx/book/sophia.rst b/doc/sphinx/book/sophia.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f3d5357c90f4ace42c9e184b9ec0cf71c1776405
--- /dev/null
+++ b/doc/sphinx/book/sophia.rst
@@ -0,0 +1,277 @@
+.. _sophia:
+
+-------------------------------------------------------------------------------
+                        Appendix E. sophia
+-------------------------------------------------------------------------------
+
+Sophia's features are:
+
+* Full ACID compliance
+* Multi-Version Concurrency Control (MVCC)
+* Pure Append-Only
+* Multi-threaded (Client access and Engine scalability)
+* Multi-databases support (Single environment and WAL)
+* Multi-Statement and Single-Statement Transactions (Snapshot Isolation (SI), multi-databases)
+* Asynchronous or synchronous transaction execution (Callback triggered versus blocking)
+* Separate storage formats: key-value (Default), or document (Keys are part of value)
+* Update without read
+* Consistent Cursors
+* Prefix search
+* Point-in-Time Snapshots
+* Versional database creation and asynchronous shutdown/drop
+* Asynchronous Online/Hot Backup
+* Compression (Per region, both lz4 and zstd are supported)
+* Metadata Compression (By default)
+* Key Compression (Compress key duplicates, including suffixes)
+* Easy to use (Minimalist API)
+* Easy to integrate (Native support of using as storage engine)
+* Easy to write bindings (Very FFI-friendly, API designed to be stable in future)
+* Easy to build in (Amalgamated, compiles into two C files)
+* Event loop friendly
+* Zero-Configuration (Tuned by default)
+* Implemented as a small library *written in C* with zero dependencies
+* BSD Licensed
+
+It is appropriate for databases that cannot fit in memory, where access via secondary keys is not required.
+
+In sophia terminology: |br|
+There is one Environment. |br|
+An Environment has N Databases -- a sophia database is like a Tarantool `space`. |br|
+A Database has N Nodes. |br|
+A Node has one Node File. |br|
+A Node File has N Branches. |br|
+A Branch has N Regions -- a sophia Region is like a B-tree `page`. |br|
+A Region has keys and values -- a sophia key-value is like a Tarantool `tuple`.
+
+A key and its associated value are together, so when one accesses a key one gets the whole tuple.
+In other words, in sophia the data is stored in the index.
+There are up to two in-memory copies of an index, as well as the copy in the Node File.
+
+For operations that insert or update tuples -- called Set operations in sophia --
+sophia makes changes to in-memory copies of the index, and writes
+to Tarantool's Write-ahead Log. A scheduler assigns tasks to multiple
+background threads for transferring index data from memory to disk,
+and for reorganizing Branches. To support transactions, Set operations
+can be delayed until an explicit commit. If multiple users access the
+same tuples simultaneously, the concurrency control method is `MVCC`_
+and the isolation level is `Snapshot`_.
+
+.. _MVCC: https://en.wikipedia.org/wiki/Multiversion_concurrency_control
+.. _Snapshot: https://en.wikipedia.org/wiki/Snapshot_isolation
+
+
+Formally, in terms of disk accesses, sophia has the following algorithmic complexity: |br|
+For **Set** the worst case is O(1) append-only key writes to the Write-Ahead Log  + in-memory node index searches + in-memory index inserts |br|
+For **Delete** the worst case is O(1) key writes and in-memory index inserts (the same as **Set**) |br|
+For **Get** the worst case is `amortized`_ O(max\_branch\_count\_per\_node) random Region reads from a single node file, which itself does in-memory index search + in-memory Region search |br|
+For **Range** queries, the worst case of full Database scan is amortized O(total\_Region\_count) + in-memory key-index searches for each Node
+
+.. _amortized: https://en.wikipedia.org/wiki/Amortized_analysis
+
+In the rest of this section, to illustrate internals, we will discuss this example: |br|
+1. filling an empty database with one million tuples (we'll call them "keys" to emphasize the indexed nature), |br|
+2. reading all stored tuples in the original order.
+
+INSERTING THE FIRST 200,000 KEYS: |br|
+During the first 200,000 Set operations, inserted keys first go to the
+in-memory index. To maintain persistence, information about each Set
+operation is written to Tarantool's Write-ahead Log.
+
+.. image:: i1.png
+    :align: center
+    :alt: i1.png
+
+At this point we have keys in an in-memory index
+and records in the Write-ahead Log.
+
+INSERTING THE NEXT 300,000 KEYS:
+As the in-memory index becomes too large for available memory,
+the index must be copied from memory to disk. The on-disk copy of
+the in-memory index is called a Branch.
+To save the Branch, a new file is created,
+the Node File. We will call it **db file** for this example.
+
+The scheduler wakes a worker thread in the background,
+a Branch Creation Thread. The thread creates a second
+in-memory index. If there are Set operations taking place
+while the thread is working, their contention effect will
+be small because they will operate on the second in-memory
+index.
+
+.. image:: i2.png
+    :align: center
+    :alt: i2.png
+
+
+When the Branch Creation Thread finishes the task, the
+first in-memory index is freed.
+
+.. image:: i3.png
+    :align: center
+    :alt: i3.png
+
+INSERTING THE NEXT 200,000 KEYS: |br|
+Several times, the in-memory index becomes too large
+and a Branch Creation Thread transfers the keys to a Branch.
+The Branches have been appended to the end of db file.
+The number of created Branches becomes large.
+
+
+.. image:: i4.png
+    :align: center
+    :alt: i4.png
+
+There is a user-settable maximum number of Branches
+per Node. When the number of Branches reaches this maximum,
+the sophia scheduler wakes a **Compaction Thread** 
+for the db file. The Compaction Thread merges the keys
+in all the Branches, and creates one or more new db files.
+
+.. image:: i5.png
+    :align: center
+    :alt: i5.png
+
+
+Now there are multiple pairs of in-memory indexes, and each
+pair has an associated db file. The combination of the
+in-memory indexes and the db file is called a **Node**,
+and the db file is called a **Node File**.
+
+.. image:: i6.png
+    :align: center
+    :alt: i6.png
+
+
+Thus the contents of a Node are: a range of sorted key values,
+stored in Branches of a Node File and (when necessary) in memory.
+Since the ranges do not overlap, each Node can be handled independently.
+Therefore, while one of the background threads is working on
+Node 1, another background thread can be working on Node 2,
+without contention. That means that all the background operations
+-- Branch Creation, Compaction, Garbage Collection, and Backup --
+can take place in parallel on multiple threads.
+
+The foregoing explanation will now be repeated with different wording.
+
+Before the Compaction there was one Node, which was created automatically
+when the Database was initialized. The Node had: |br|
+(a) an in-memory index with some keys in it, |br|
+(b) a Node File with several Branches, |br|
+(c) a Write-Ahead Log file recording the Set operations, in the order they happened.
+
+The number of branches became too big, so the sophia scheduler
+starts the Compaction Thread and creates two new Nodes.
+
+.. image:: i7.png
+    :align: center
+    :alt: i7.png
+
+So, each of the two new Node Files contains half of the keys that were in
+the original Node. The Node's in-memory indexes are split in the same way.
+
+After the splitting, sophia must take into
+account that: while the Compaction was going on in the background,
+there might have been more Set operations taking place in parallel. These Set
+operations would have changed one of the in-memory indexes,
+and these changes too will be merged.
+
+When the Compaction Thread finishes, the original Node is deleted, and
+information about the new Nodes is inserted into an in-memory **Node Index**.
+
+.. image:: i8.png
+    :align: center
+    :alt: i8.png
+
+
+This Node Index is used for all Set operations and all searches.
+Since the Node Index has the minimum and maximum key values that
+are in each Node, it is straightforward to scan it to find what
+Node would contain a particular key value.
+
+.. image:: i9.png
+    :align: center
+    :alt: i9.png
+
+INSERTING THE LAST 300,000 KEYS: |br|
+The final 300,000 Set operations take place; the background
+threads continue to create new Branches and do more Compactions.
+After the millionth insertion, the Database has four Nodes.
+
+.. image:: i10.png
+    :align: center
+    :alt: i10.png
+
+The inserting is done. Now, because the words "memory" and "disk" have appeared
+in this explanation several times, here are a few words about how
+sophia is designed to use these resources most efficiently. |br|
+() If there is more memory available, then Branch Creation
+and Compaction will be less frequent, and there will be
+fewer disk accesses. |br|
+() The best sophia performance will occur if
+there is no setting of a memory limit, but this must be
+balanced against other considerations, such as requirements
+for the memtx storage engine. If there is a setting of a
+memory limit, the sophia scheduler will give priority to
+the Nodes that have the largest in-memory indexes, so that
+the largest memory blocks are freed first. |br|
+() To make the most of hard drives and Flash, sophia will delay
+operations that require disk access (except the writing
+of the Write-ahead Log which is specially tunable), so
+that the accesses are done in large sequential blocks. |br|
+() Overwriting does not occur; sophia is an "append-only" engine.
+
+READING THE MILLION KEYS: |br|
+We will now start to read the million rows
+in the order that they were inserted, which was random.
+
+.. image:: i12.png
+    :align: center
+    :alt: i12.png
+
+
+During the Get (search), sophia first finds the correct
+Node by looking in the Node Index.
+Then it searches the Node's first in-memory index,
+and/or the Node's second in-memory index, and/or
+each Branch of the Node, starting from the end
+of the Node File.
+
+Remember that a Branch is divided into Regions,
+which are like what would be called "pages" or
+"blocks" in a B-tree. For each Branch, there
+is a list of the Regions and their minimum/maximum
+key values -- the Region Index -- as well as some metadata.
+
+
+.. image:: i13.png
+    :align: center
+    :alt: i13.png
+
+
+Region Indexes are loaded into memory when the Database is opened.
+Since the Database's Node Index and the Region Indexes are normally
+in-memory, searching and retrieving a tuple might require only zero
+or one disk accesses. However, when memory is limited and there are
+many Branches, search time may rise.
+For each additional Branch there is a possible additional disk
+access during a search. Also, it is impossible to maintain memory
+limits without doing a Branch Creation process,
+because new Set operations might occur more quickly
+than the Compaction process can run.
+
+
+.. image:: i14.png
+    :align: center
+    :alt: i14.png
+
+
+Sophia is read optimized. It is very likely that the
+most recently created Branches (hot data) will be in the file system cache.
+The scheduler will give priority to the Nodes which have the
+largest in-memory indexes and the most Branches.
+
+The scheduler may also try to arrange that a Node will have
+only one Branch, which will ensure the average number of disk
+seeks for each search is O(1).
+
+
diff --git a/doc/sphinx/dev_guide/box-protocol.rst b/doc/sphinx/dev_guide/box-protocol.rst
index 121ee6c354a267ff5fb16bb31db4acb39b717ed4..f3a4bff721162da7923c62e05a932c552207c6ba 100644
--- a/doc/sphinx/dev_guide/box-protocol.rst
+++ b/doc/sphinx/dev_guide/box-protocol.rst
@@ -109,6 +109,7 @@ Let's list them here too:
     <function_name> ::= 0x22
     <username>      ::= 0x23
     <expression>    ::= 0x27
+    <def_tuple>     ::= 0x28
     <data>          ::= 0x30
     <error>         ::= 0x31
 
@@ -124,6 +125,7 @@ Let's list them here too:
     <call>    ::= 0x06
     <auth>    ::= 0x07
     <eval>    ::= 0x08
+    <upsert>  ::= 0x09
     -- Admin command codes
     <ping>    ::= 0x40
 
@@ -378,6 +380,48 @@ It's an error to specify an argument of a type that differs from expected type.
                         MP_MAP
 
 
+* UPSERT: CODE - 0x09
+  Update tuple if it would be found elsewhere try to insert tuple. Always use primary index for key.
+
+  
+  
+.. code-block:: bash
+
+    UPSERT BODY:
+    
+    +==================+==========================+
+    |                  |                          |
+    |   0x10: SPACE_ID |   0x20: KEY              |
+    | MP_INT: MP_INT   | MP_INT: MP_ARRAY         |
+    |                  |                          |
+    +==================+==========================+
+    |                  |             +~~~~~~~~~~+ |
+    |                  |             |          | |
+    |                  | (DEF_TUPLE) |    OP    | |
+    |   0x21: TUPLE    |       0x28: |          | |
+    | MP_INT: MP_ARRAY |     MP_INT: +~~~~~~~~~~+ |
+    |                  |               MP_ARRAY   |
+    +==================+==========================+
+                    MP_MAP
+  
+    Operations structure same as for UPDATE operation.
+       0           2
+    +-----------+==========+==========+
+    |           |          |          |
+    |    OP     | FIELD_NO | ARGUMENT |
+    | MP_FIXSTR |  MP_INT  |  MP_INT  |
+    |           |          |          |
+    +-----------+==========+==========+
+                  MP_ARRAY
+    
+    Support operations: 
+    '+' - add a value to a numeric field. If the filed is not numeric, it's changed to 0 first. If the field does not exist, the operation is skipped. There is no error in case of overflow either, the value simply wraps around in C style. The range of the integer is MsgPack: from -2^63 to 2^64-1
+    '-' - same as the previous, but subtract a value
+    '=' - assign a field to a value. The field must exist, if it does not exist, the operation is skipped.
+    '!' - insert a field. It's only possible to insert a field if this create no nil "gaps" between fields. E.g. it's possible to add a field between existing fields or as the last field of the tuple.
+    '#' - delete a field. If the field does not exist, the operation is skipped. It's not possible to change with update operations a part of the primary key (this is validated before performing upsert).
+
+
 ================================================================================
                          Response packet structure
 ================================================================================
diff --git a/doc/sphinx/reference/socket.rst b/doc/sphinx/reference/socket.rst
index 9ad7f28849959233df2b3b048b6e982dd3b4d47f..6d7ce57a5747f9e36e90123c0690ab2f37284065 100644
--- a/doc/sphinx/reference/socket.rst
+++ b/doc/sphinx/reference/socket.rst
@@ -336,7 +336,6 @@ the function invocations will look like ``sock:function_name(...)``.
             * SO_ACCEPTCONN
             * SO_BINDTODEVICE
             * SO_BROADCAST
-            * SO_BSDCOMPAT
             * SO_DEBUG
             * SO_DOMAIN
             * SO_ERROR
diff --git a/doc/www/content/newsite/download.yml.in b/doc/www/content/newsite/download.yml.in
index d9bdd48ed82543fd47823a51550138ad4490d207..897604a3b8009d6afeeedf1af6b164eb47b29f0a 100644
--- a/doc/www/content/newsite/download.yml.in
+++ b/doc/www/content/newsite/download.yml.in
@@ -136,7 +136,7 @@ blocks :
       - format : rst
         content: |
           - PHP PECL driver,   `<https://github.com/tarantool/tarantool-php>`_
-          - PHP PEAR driver,   `<https://github.com/tarantool-php/client>`_
+          - Pure PHP driver,   `<https://github.com/tarantool-php/client>`_
           - Java driver,       `Maven repository`_
           - Python driver,     `<http://pypi.python.org/pypi/tarantool>`_
           - Ruby driver,       `<https://github.com/funny-falcon/tarantool16-ruby>`_
diff --git a/src/lua/bsdsocket.cc b/src/lua/bsdsocket.cc
index 75c9fa4b7b56af7050263dd95040e2595c62bae3..db5a6866e390ab1a1838952e57d05ad352b02010 100644
--- a/src/lua/bsdsocket.cc
+++ b/src/lua/bsdsocket.cc
@@ -186,9 +186,6 @@ static const struct { char name[32]; int value, type, rw; } so_opts[] = {
 #ifdef SO_BROADCAST
 	{"SO_BROADCAST",	SO_BROADCAST,		1,	1, },
 #endif
-#ifdef SO_BSDCOMPAT
-	{"SO_BSDCOMPAT",	SO_BSDCOMPAT,		1,	1, },
-#endif
 #ifdef SO_DEBUG
 	{"SO_DEBUG",		SO_DEBUG,		1,	1, },
 #endif
@@ -254,15 +251,9 @@ static const struct { char name[32]; int value, type, rw; } so_opts[] = {
 #endif
 #ifdef SO_PROTOCOL
 	{"SO_PROTOCOL",		SO_PROTOCOL,		1,	0, },
-#else
-#define SO_PROTOCOL	38
 #endif
 
-#ifdef SO_TYPE
 	{"SO_TYPE",		SO_TYPE,		1,	0, },
-#else
-#define SO_TYPE		3
-#endif
 
 	{"",			0,			0,	0, }
 };
@@ -721,68 +712,66 @@ lbox_bsdsocket_getaddrinfo(struct lua_State *L)
 	return 1;
 }
 
-static void
-lbox_bsdsocket_update_proto_type(struct lua_State *L, int fh)
-{
-	if (lua_isnil(L, -1))
-		return;
-
-	int save_errno = errno;
-
-	int value;
-	socklen_t len = sizeof(value);
-
-	if (getsockopt(fh, SOL_SOCKET, SO_PROTOCOL, &value, &len) == 0) {
-		lua_pushliteral(L, "protocol");
-		lbox_bsdsocket_push_protocol(L, value);
-		lua_rawset(L, -3);
-	}
-
-	len = sizeof(value);
-	if (getsockopt(fh, SOL_SOCKET, SO_TYPE, &value, &len) == 0) {
-		lua_pushliteral(L, "type");
-		lbox_bsdsocket_push_sotype(L, value);
-		lua_rawset(L, -3);
-	}
-	errno = save_errno;
-
-}
-
 static int
-lbox_bsdsocket_soname(struct lua_State *L)
+lbox_bsdsocket_name(struct lua_State *L,
+                    int (*getname_func) (int, struct sockaddr *, socklen_t *))
 {
 	lua_pushvalue(L, 1);
 	int fh = lua_tointeger(L, -1);
 	lua_pop(L, 1);
 
-
 	struct sockaddr_storage addr;
 	socklen_t len = sizeof(addr);
-	if (getsockname(fh, (struct sockaddr *)&addr, &len) != 0) {
+	if (getname_func(fh, (struct sockaddr *)&addr, &len) != 0) {
 		lua_pushnil(L);
 		return 1;
 	}
 	lbox_bsdsocket_push_addr(L, (const struct sockaddr *)&addr, len);
-	lbox_bsdsocket_update_proto_type(L, fh);
+	if (lua_isnil(L, -1))
+		return 1;
+
+	int type;
+	len = sizeof(type);
+	if (getsockopt(fh, SOL_SOCKET, SO_TYPE, &type, &len) == 0) {
+		lua_pushliteral(L, "type");
+		lbox_bsdsocket_push_sotype(L, type);
+		lua_rawset(L, -3);
+	} else {
+		type = -1;
+	}
+
+	int protocol = 0;
+#ifdef SO_PROTOCOL
+    len = sizeof(protocol);
+	if (getsockopt(fh, SOL_SOCKET, SO_PROTOCOL, &protocol, &len) == 0) {
+		lua_pushliteral(L, "protocol");
+		lbox_bsdsocket_push_protocol(L, protocol);
+		lua_rawset(L, -3);
+	}
+#else
+	if (addr.ss_family == AF_INET || addr.ss_family == AF_INET6) {
+		if (type == SOCK_STREAM)
+			protocol = IPPROTO_TCP;
+		if (type == SOCK_DGRAM)
+			protocol = IPPROTO_UDP;
+	}
+	lua_pushliteral(L, "protocol");
+	lbox_bsdsocket_push_protocol(L, protocol);
+	lua_rawset(L, -3);
+#endif
 	return 1;
 }
 
 static int
-lbox_bsdsocket_peername(struct lua_State *L)
+lbox_bsdsocket_soname(struct lua_State *L)
 {
-	lua_pushvalue(L, 1);
-	int fh = lua_tointeger(L, -1);
-	lua_pop(L, 1);
+	return lbox_bsdsocket_name(L, getsockname);
+}
 
-	struct sockaddr_storage addr;
-	socklen_t len = sizeof(addr);
-	if (getpeername(fh, (struct sockaddr *)&addr, &len) != 0) {
-		lua_pushnil(L);
-		return 1;
-	}
-	lbox_bsdsocket_push_addr(L, (const struct sockaddr *)&addr, len);
-	lbox_bsdsocket_update_proto_type(L, fh);
-	return 1;
+static int
+lbox_bsdsocket_peername(struct lua_State *L)
+{
+	return lbox_bsdsocket_name(L, getpeername);
 }
 
 static int
diff --git a/test-run b/test-run
index 6b4dcd67126709a92e722a62c8a87a6ec90ceb0f..393cdcb4d73728beb831ee6c1e44064250b25b39 160000
--- a/test-run
+++ b/test-run
@@ -1 +1 @@
-Subproject commit 6b4dcd67126709a92e722a62c8a87a6ec90ceb0f
+Subproject commit 393cdcb4d73728beb831ee6c1e44064250b25b39
diff --git a/test/box/bsdsocket.result b/test/box/bsdsocket.result
index 9491d7b2a8d9acdce4ff3ebffd57b640ff51ee60..1e2f905354f43363164f67f8715e1deb50034bbc 100644
--- a/test/box/bsdsocket.result
+++ b/test/box/bsdsocket.result
@@ -39,22 +39,10 @@ socket('PF_INET', 'SOCK_STREAM', 'tcp121222');
 s = socket('PF_INET', 'SOCK_STREAM', 'tcp')
 ---
 ...
-s:wait(.01)
----
-- RW
-...
 type(s)
 ---
 - table
 ...
-s:errno()
----
-- 0
-...
-type(s:error())
----
-- nil
-...
 -- Invalid arguments
 --# setopt delimiter ';'
 for k in pairs(getmetatable(s).__index) do
@@ -270,10 +258,6 @@ s:error()
 ---
 - null
 ...
-s:setsockopt('SOL_SOCKET', 'SO_BSDCOMPAT', false)
----
-- true
-...
 s:setsockopt('SOL_SOCKET', 'SO_DEBUG', false)
 ---
 - true
@@ -319,10 +303,6 @@ s:linger()
 - false
 - 1
 ...
-s:shutdown('R')
----
-- true
-...
 s:close()
 ---
 - true
@@ -345,14 +325,6 @@ s:listen(128)
 sc = socket('PF_INET', 'SOCK_STREAM', 'tcp')
 ---
 ...
-sc:writable()
----
-- true
-...
-sc:readable()
----
-- true
-...
 sc:sysconnect('127.0.0.1', 3457) or errno() == errno.EINPROGRESS
 ---
 - true
@@ -631,10 +603,6 @@ sc:nonblock(true)
 ---
 - true
 ...
-sc:readable()
----
-- true
-...
 sc:sysconnect('127.0.0.1', 3458) or errno() == errno.EINPROGRESS
 ---
 - true
@@ -859,11 +827,6 @@ s:bind('127.0.0.1', port)
 ---
 - true
 ...
-socket.tcp_connect('127.0.0.1', port), errno() == errno.ECONNREFUSED
----
-- null
-- true
-...
 s:listen()
 ---
 - true
@@ -1484,14 +1447,6 @@ os.remove(path)
 s = socket('AF_UNIX', 'SOCK_STREAM', 0)
 ---
 ...
-x = s:wait()
----
-...
--- waiters is map
-s.waiters
----
-- {}
-...
 -- check __serialize hook
 json.decode(json.encode(s)).fd == s:fd()
 ---
diff --git a/test/box/bsdsocket.test.lua b/test/box/bsdsocket.test.lua
index 6fd56f1e99b7979434ab3f1f077eae93a51ac7e9..1a431295f0d67dc9bbcfaa495acf2de5f892e867 100644
--- a/test/box/bsdsocket.test.lua
+++ b/test/box/bsdsocket.test.lua
@@ -13,10 +13,7 @@ type(socket)
 socket('PF_INET', 'SOCK_STREAM', 'tcp121222');
 
 s = socket('PF_INET', 'SOCK_STREAM', 'tcp')
-s:wait(.01)
 type(s)
-s:errno()
-type(s:error())
 -- Invalid arguments
 --# setopt delimiter ';'
 for k in pairs(getmetatable(s).__index) do
@@ -89,7 +86,6 @@ sevres[1].host
 s:setsockopt('SOL_SOCKET', 'SO_BROADCAST', false)
 s:getsockopt('SOL_SOCKET', 'SO_TYPE')
 s:error()
-s:setsockopt('SOL_SOCKET', 'SO_BSDCOMPAT', false)
 s:setsockopt('SOL_SOCKET', 'SO_DEBUG', false)
 s:getsockopt('SOL_SOCKET', 'SO_DEBUG')
 s:setsockopt('SOL_SOCKET', 'SO_ACCEPTCONN', 1)
@@ -101,7 +97,6 @@ s:linger(true, 1)
 s:linger()
 s:linger(false, 1)
 s:linger()
-s:shutdown('R')
 s:close()
 
 s = socket('PF_INET', 'SOCK_STREAM', 'tcp')
@@ -111,8 +106,6 @@ s:listen(128)
 
 sc = socket('PF_INET', 'SOCK_STREAM', 'tcp')
 
-sc:writable()
-sc:readable()
 sc:sysconnect('127.0.0.1', 3457) or errno() == errno.EINPROGRESS
 sc:writable(10)
 sc:write('Hello, world')
@@ -208,7 +201,6 @@ sc = socket('PF_INET', 'SOCK_STREAM', 'tcp')
 sc ~= nil
 sc:getsockopt('SOL_SOCKET', 'SO_ERROR')
 sc:nonblock(true)
-sc:readable()
 sc:sysconnect('127.0.0.1', 3458) or errno() == errno.EINPROGRESS
 string.match(tostring(sc), ', peer') == nil
 sc:writable()
@@ -279,7 +271,6 @@ socket.tcp_connect('127.0.0.1', 80, 0.00000000001)
 port = 35490
 s = socket('AF_INET', 'SOCK_STREAM', 'tcp')
 s:bind('127.0.0.1', port)
-socket.tcp_connect('127.0.0.1', port), errno() == errno.ECONNREFUSED
 s:listen()
 sc, e = socket.tcp_connect('127.0.0.1', port), errno()
 sc ~= nil
@@ -503,9 +494,6 @@ os.remove(path)
 
 -- Test serializers with sockets
 s = socket('AF_UNIX', 'SOCK_STREAM', 0)
-x = s:wait()
--- waiters is map
-s.waiters
 -- check __serialize hook
 json.decode(json.encode(s)).fd == s:fd()
 yaml.decode(yaml.encode(s)).fd == s:fd()