diff --git a/README.md b/README.md
index 6fff09fb017f203294c4e8d52a7eaf9285b9ab5d..821df0a376957811e09a9c10494f08ab5608fb0d 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ The build depends on the following external libraries:
 Please follow these steps to compile Tarantool:
 
     # If compiling from git
-    tarantool $ git submodule update --init
+    tarantool $ git submodule update --init --recursive
 
     tarantool $ cmake .
     tarantool $ make
diff --git a/doc/www/content/newsite/benchmark.yml b/doc/www/content/newsite/benchmark.yml
index ecc7e23f3068ad6935158be7be36561d5f14e4d6..8a5cbd35c1018329a4ea02f89393477a61b3a366 100644
--- a/doc/www/content/newsite/benchmark.yml
+++ b/doc/www/content/newsite/benchmark.yml
@@ -35,10 +35,8 @@ blocks  :
 
       However, to compare with the rest of the world,
       a standardized benchmarking kit is more appropriate.
-      This is why Yahoo! Cloud Serving Benchmark |copy| was
-      used to produce the charts below. A fork of YCSB with
-      Tarantool support is available
-      `here <https://github.com/bigbes92/YCSB>`_. Since YCSB
+      This is why `Yahoo! Cloud Serving Benchmark |copy| <http://github.com/brianfrankcooper/YCSB>`_
+      was used to produce the charts below. Since YCSB
       was developed to compare cloud key/value servers, it
       provides a very narrow view at performance of a tested
       server. For example, performance of secondary keys or
@@ -80,5 +78,5 @@ blocks  :
       averaged over 8 consecutive test runs.
 
       Configuration files for the tested systems can be found
-      `on it's Github page <https://github.com/bigbes92/ycsb-expand-db/tree/master/confs>`_.
+      `on its Github page <https://github.com/bigbes/ycsb-expand-db/tree/master/confs>`_.
 ...
diff --git a/doc/www/content/newsite/download.yml.in b/doc/www/content/newsite/download.yml.in
index ae3f6dd771b02b1f93281493d44d38e9a16308ca..d0019a454549b3baf6f2bb845b08dd6fd205e306 100644
--- a/doc/www/content/newsite/download.yml.in
+++ b/doc/www/content/newsite/download.yml.in
@@ -135,7 +135,8 @@ blocks :
     - - "Connectors"
       - format : rst
         content: |
-          - PHP driver,        `<https://github.com/tarantool/tarantool-php>`_
+          - PHP PECL driver,   `<https://github.com/tarantool/tarantool-php>`_
+          - PHP PEAR driver,   `<https://github.com/tarantool-php/client>`_
           - Java driver,       `Maven repository`_
           - Python driver,     `<http://pypi.python.org/pypi/tarantool>`_
           - Ruby driver,       `<https://github.com/funny-falcon/tarantool16-ruby>`_
diff --git a/src/assoc.h b/src/assoc.h
index 5ca118631c565610274510d2e25d13d4534f7fdf..e42bcd65368e9905f0e69904b0e362f98f865ac8 100644
--- a/src/assoc.h
+++ b/src/assoc.h
@@ -35,6 +35,8 @@ extern "C" {
 #define MH_UNDEF
 #endif
 
+#include "third_party/PMurHash.h"
+
 /*
  * Map: (i32) => (void *)
  */
@@ -53,6 +55,55 @@ struct mh_i32ptr_node_t {
 #define mh_cmp_key(a, b, arg) ((a) != (b->key))
 #include "salad/mhash.h"
 
+/*
+ * Map: (char * with length) => (void *)
+ */
+enum {
+	MH_STRN_HASH_SEED = 13U
+};
+
+static inline uint32_t
+mh_strn_hash(const char *str, size_t len)
+{
+	uint32_t h = MH_STRN_HASH_SEED;
+	uint32_t carry = 0;
+	PMurHash32_Process(&h, &carry, str, len);
+	return PMurHash32_Result(h, carry, len);
+}
+
+#define mh_name _strnptr
+struct mh_strnptr_key_t {
+	const char *str;
+	size_t len;
+	uint32_t hash;
+};
+#define mh_key_t struct mh_strnptr_key_t *
+
+struct mh_strnptr_node_t {
+	const char *str;
+	size_t len;
+	uint32_t hash;
+	void *val;
+};
+#define mh_node_t struct mh_strnptr_node_t
+
+#define mh_arg_t void *
+#define mh_hash(a, arg) ((a)->hash)
+#define mh_hash_key(a, arg) ((a)->hash)
+#define mh_cmp(a, b, arg) ((a)->len != (b)->len || \
+			    strncmp((a)->str, (b)->str, (a)->len))
+#define mh_cmp_key(a, b, arg) mh_cmp(a, b, arg)
+#include "salad/mhash.h"
+
+static inline mh_int_t
+mh_strnptr_find_inp(struct mh_strnptr_t *h, const char *str, size_t len)
+{
+	uint32_t hash = mh_strn_hash(str, len);
+	struct mh_strnptr_key_t key = {str, len, hash};
+	return mh_strnptr_find(h, &key, NULL);
+};
+
+
 #if defined(__cplusplus)
 } /* extern "C" */
 #endif /* defined(__cplusplus) */
diff --git a/src/box/schema.cc b/src/box/schema.cc
index 0c8992b2808b1a2da7ccf29c8d5da1c9833fa6cc..5ba0659781703515b04c048e09e26d9d7b3b4a37 100644
--- a/src/box/schema.cc
+++ b/src/box/schema.cc
@@ -57,6 +57,7 @@
 /** All existing spaces. */
 static struct mh_i32ptr_t *spaces;
 static struct mh_i32ptr_t *funcs;
+static struct mh_strnptr_t *funcs_by_name;
 int sc_version;
 
 bool
@@ -233,6 +234,7 @@ schema_init()
 	/* Initialize the space cache. */
 	spaces = mh_i32ptr_new();
 	funcs = mh_i32ptr_new();
+	funcs_by_name = mh_strnptr_new();
 	/*
 	 * Create surrogate space objects for the mandatory system
 	 * spaces (the primal eggs from which we get all the
@@ -346,8 +348,19 @@ func_cache_replace(struct func_def *def)
 			       "dictionary cache (stored function).");
 	}
 	const struct mh_i32ptr_node_t node = { def->fid, func };
-	mh_int_t k = mh_i32ptr_put(funcs, &node, NULL, NULL);
-	if (k == mh_end(funcs)) {
+	mh_int_t k1 = mh_i32ptr_put(funcs, &node, NULL, NULL);
+	if (k1 == mh_end(funcs)) {
+		func_delete(func);
+		goto error;
+	}
+	size_t def_name_len = strlen(func->def.name);
+	uint32_t name_hash = mh_strn_hash(func->def.name, def_name_len);
+	const struct mh_strnptr_node_t strnode = {
+		func->def.name, def_name_len, name_hash, func };
+
+	mh_int_t k2 = mh_strnptr_put(funcs_by_name, &strnode, NULL, NULL);
+	if (k2 == mh_end(funcs_by_name)) {
+		mh_i32ptr_del(funcs, k1, NULL);
 		func_delete(func);
 		goto error;
 	}
@@ -362,6 +375,10 @@ func_cache_delete(uint32_t fid)
 	struct func *func = (struct func *)
 		mh_i32ptr_node(funcs, k)->val;
 	mh_i32ptr_del(funcs, k, NULL);
+	k = mh_strnptr_find_inp(funcs_by_name, func->def.name,
+				strlen(func->def.name));
+	if (k != mh_end(funcs))
+		mh_strnptr_del(funcs_by_name, k, NULL);
 	func_delete(func);
 }
 
@@ -374,6 +391,15 @@ func_by_id(uint32_t fid)
 	return (struct func *) mh_i32ptr_node(funcs, func)->val;
 }
 
+struct func *
+func_by_name(const char *name, uint32_t name_len)
+{
+	mh_int_t func = mh_strnptr_find_inp(funcs_by_name, name, name_len);
+	if (func == mh_end(funcs_by_name))
+		return NULL;
+	return (struct func *) mh_strnptr_node(funcs_by_name, func)->val;
+}
+
 bool
 schema_find_grants(const char *type, uint32_t id)
 {
diff --git a/src/box/schema.h b/src/box/schema.h
index 93d6076d036c2a4269e287eeefebe6d8ca5b22fb..170ad569c19c926e323eaa32495ed22ef939c5fa 100644
--- a/src/box/schema.h
+++ b/src/box/schema.h
@@ -139,12 +139,8 @@ func_cache_find(uint32_t fid)
 	return func;
 }
 
-static inline struct func *
-func_by_name(const char *name, uint32_t name_len)
-{
-	uint32_t fid = schema_find_id(SC_FUNC_ID, 2, name, name_len);
-	return func_by_id(fid);
-}
+struct func *
+func_by_name(const char *name, uint32_t name_len);
 
 
 /**
diff --git a/test/replication/conflict.result b/test/replication/conflict.result
new file mode 100644
index 0000000000000000000000000000000000000000..7bc5dceb093750395e51f200ec3cf2a43e8546ca
--- /dev/null
+++ b/test/replication/conflict.result
@@ -0,0 +1,63 @@
+box.schema.user.grant('guest', 'replication')
+---
+...
+reset master-master replication
+parallel send: box.space.test:update(1, {{'#', 2, 1}})
+parallel send: box.space.test:update(1, {{'#', 2, 1}})
+replication state is correct
+box.space.test:select{1}
+---
+- - [1]
+...
+box.space.test:select{1}
+---
+- - [1]
+...
+reset master-master replication
+parallel send: box.space.test:insert{20, 1}
+parallel send: box.space.test:insert{20, 2}
+replication state is correct
+reset master-master replication
+parallel send: box.space.test:update(2, {{'=', 2, 1}})
+parallel send: box.space.test:update(2, {{'=', 2, 2}})
+replication state is correct
+reset master-master replication
+parallel send: box.space.test:update(1, {{'+', 2, 1}})
+parallel send: box.space.test:update(1, {{'+', 2, 2}})
+replication state is correct
+box.space.test:select{1}
+---
+- - [1, 4]
+...
+box.space.test:select{1}
+---
+- - [1, 4]
+...
+reset master-master replication
+parallel send: box.space.test:delete(999)
+parallel send: box.space.test:delete(999)
+replication state is correct
+box.space.test:select{}
+---
+- - [1, 1]
+  - [2, 4]
+  - [3, 9]
+  - [4, 16]
+  - [5, 25]
+  - [6, 36]
+  - [7, 49]
+  - [8, 64]
+  - [9, 81]
+...
+box.space.test:select{}
+---
+- - [1, 1]
+  - [2, 4]
+  - [3, 9]
+  - [4, 16]
+  - [5, 25]
+  - [6, 36]
+  - [7, 49]
+  - [8, 64]
+  - [9, 81]
+...
diff --git a/test/replication/conflict.test.py b/test/replication/conflict.test.py
new file mode 100644
index 0000000000000000000000000000000000000000..f482fa6e7d2e401d116eb84dd1d6ec8e3bddab5c
--- /dev/null
+++ b/test/replication/conflict.test.py
@@ -0,0 +1,120 @@
+from lib.tarantool_server import TarantoolServer
+from time import sleep
+import yaml
+
+def check_replication(nodes, select_args=''):
+    for node in nodes:
+        node.admin('box.space.test:select{%s}' % select_args)
+
+master = server
+master.admin("box.schema.user.grant('guest', 'replication')")
+
+replica = TarantoolServer(server.ini)
+replica.script = 'replication/replica.lua'
+replica.vardir = server.vardir
+replica.rpl_master = master
+replica.deploy()
+
+def parallel_run(cmd1, cmd2, compare):
+    print 'parallel send: %s' % cmd1
+    print 'parallel send: %s' % cmd2
+    master.admin.socket.sendall('%s\n' % cmd1)
+    replica.admin.socket.sendall('%s\n' % cmd2)
+
+    master.admin.socket.recv(2048)
+    replica.admin.socket.recv(2048)
+
+    # wait for status changing in tarantool
+    master_status = yaml.load(master.admin(
+        'box.info().replication.status', silent=True
+    ))[0]
+    replica_status = yaml.load(replica.admin(
+        'box.info().replication.status', silent=True
+    ))[0]
+
+    # wait for status
+    results = [f(master_status, replica_status) for f in compare]
+    while True:
+        sleep(0.01)
+        if any(results):
+            print 'replication state is correct'
+            break
+
+def prepare_cluster():
+    print 'reset master-master replication'
+    master.stop()
+    master.cleanup(True)
+    master.start()
+    master.admin("box.schema.user.grant('guest', 'replication')", silent=True)
+
+    replica.stop()
+    replica.cleanup(True)
+    replica.start()
+
+    master.admin("box.cfg{replication_source='%s'}" % replica.iproto.uri, silent=True)
+    r1_id = replica.get_param('server')['id']
+    r2_id = master.get_param('server')['id']
+
+    master.admin("space = box.schema.space.create('test')", silent=True)
+    master.admin("index = space:create_index('primary', { type = 'tree'})", silent=True)
+    master.admin('for k = 1, 9 do space:insert{k, k*k} end', silent=True)
+
+    # wait lsn
+    replica.wait_lsn(r2_id, master.get_lsn(r2_id))
+    master.wait_lsn(r1_id, replica.get_lsn(r1_id))
+
+# test1: double update in master and replica
+prepare_cluster()
+parallel_run(
+    "box.space.test:update(1, {{'#', 2, 1}})",
+    "box.space.test:update(1, {{'#', 2, 1}})",
+    [
+        lambda x,y: x == 'stopped' or y == 'stopped',
+        lambda x,y: x == 'connected' and y == 'connected',
+    ]
+)
+check_replication([master, replica], '1')
+
+# test2: insert different values with single id
+prepare_cluster()
+parallel_run(
+    'box.space.test:insert{20, 1}',
+    'box.space.test:insert{20, 2}',
+    [
+        lambda x,y: x == 'stopped' or y == 'stopped',
+        lambda x,y: x == 'connected' and y == 'connected',
+    ]
+)
+
+# test3: update different values
+prepare_cluster()
+parallel_run(
+    "box.space.test:update(2, {{'=', 2, 1}})",
+    "box.space.test:update(2, {{'=', 2, 2}})",
+    [lambda x,y: x == 'connected' and y == 'connected',]
+)
+
+# test4: CRDT increment with update
+prepare_cluster()
+parallel_run(
+    "box.space.test:update(1, {{'+', 2, 1}})",
+    "box.space.test:update(1, {{'+', 2, 2}})",
+    [lambda x,y: x == 'connected' and y == 'connected',]
+)
+check_replication([master, replica], '1')
+
+# test5: delete not existing key
+prepare_cluster()
+parallel_run(
+    "box.space.test:delete(999)",
+    "box.space.test:delete(999)",
+    [lambda x,y: x == 'connected' and y == 'connected',]
+)
+check_replication([master, replica])
+
+# cleanup
+replica.stop()
+replica.cleanup(True)
+server.stop()
+server.cleanup(True)
+server.deploy()