diff --git a/.gitmodules b/.gitmodules
index 4a28f773cdc312b55ced964810777dfa728d593b..229b20d7f902547ce58b53e8c2b35aa4192c82dd 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -8,9 +8,9 @@
 	path = third_party/luafun
 	url = https://github.com/rtsisyk/luafun.git
 [submodule "sophia"]
-       path = third_party/sophia
-       url = https://github.com/tarantool/sophia.git
-       branch = current
+	path = third_party/sophia
+	url = https://github.com/tarantool/sophia.git
+	branch = tarantool-master
 [submodule "test-run"]
 	path = test-run
 	url = https://github.com/tarantool/test-run.git
diff --git a/extra/dist/tarantoolctl b/extra/dist/tarantoolctl
index 2e5b5180d7eddf8f363bd1500c3f508c89ced276..85d7e41360aaf7601f8d26263adcdbcc808d6956 100755
--- a/extra/dist/tarantoolctl
+++ b/extra/dist/tarantoolctl
@@ -404,7 +404,11 @@ local function wrapper_cfg(cfg)
     -- force these startup options
     --
     cfg.pid_file = default_cfg.pid_file
-    cfg.username = default_cfg.username
+    if os.getenv('USER') ~= default_cfg.username then
+        cfg.username = default_cfg.username
+    else
+        cfg.username = nil
+    end
     if cfg.background == nil then
         cfg.background = true
     end
diff --git a/src/box/box.cc b/src/box/box.cc
index c43982f5efdf30e49bd5f9d2d23c9896dd0a77a1..2b8a2b62c56808392e350df6a6f559d98c069709 100644
--- a/src/box/box.cc
+++ b/src/box/box.cc
@@ -649,6 +649,9 @@ box_upsert(uint32_t space_id, uint32_t index_id, const char *tuple,
 static void
 box_on_cluster_join(const tt_uuid *server_uuid)
 {
+	if (is_ro)
+		tnt_raise(LoggedError, ER_READONLY);
+
 	/** Find the largest existing server id. */
 	struct space *space = space_cache_find(BOX_CLUSTER_ID);
 	class MemtxIndex *index = index_find_system(space, 0);
@@ -669,11 +672,15 @@ box_on_cluster_join(const tt_uuid *server_uuid)
 void
 box_process_join(int fd, struct xrow_header *header)
 {
+	assert(header->type == IPROTO_JOIN);
+
 	/* Check permissions */
 	access_check_universe(PRIV_R);
 	access_check_space(space_cache_find(BOX_CLUSTER_ID), PRIV_W);
 
-	assert(header->type == IPROTO_JOIN);
+	/* Check that we actually can register a new replica */
+	if (is_ro)
+		tnt_raise(LoggedError, ER_READONLY);
 
 	/* Process JOIN request via a replication relay */
 	relay_join(fd, header, recovery->server_id,
@@ -711,19 +718,18 @@ box_set_server_uuid()
 	assert(r->server_id == 0);
 
 	/* Unregister local server if it was registered by bootstrap.bin */
-	if (vclock_has(&r->vclock, 1))
-		boxk(IPROTO_DELETE, BOX_CLUSTER_ID, "%u", 1);
-	assert(!vclock_has(&r->vclock, 1));
+	boxk(IPROTO_DELETE, BOX_CLUSTER_ID, "%u", 1);
 
 	/* Register local server */
 	tt_uuid_create(&r->server_uuid);
 	boxk(IPROTO_INSERT, BOX_CLUSTER_ID, "%u%s",
 	     1, tt_uuid_str(&r->server_uuid));
-	assert(vclock_has(&r->vclock, 1));
-
-	/* Remove surrogate server */
-	vclock_del_server(&r->vclock, 0);
 	assert(r->server_id == 1);
+
+	/* Ugly hack: bootstrap always starts from scratch */
+	vclock_create(&r->vclock);
+	vclock_add_server(&r->vclock, 1);
+	assert(vclock_sum(&r->vclock) == 0);
 }
 
 /** Insert a new cluster into _schema */
diff --git a/src/box/cluster.cc b/src/box/cluster.cc
index 2118f4c502a6ec20c710480232c4f74b055c0ec7..502a18ab050b5339d81e1bb8b6099b31ce38cafa 100644
--- a/src/box/cluster.cc
+++ b/src/box/cluster.cc
@@ -78,10 +78,16 @@ cluster_add_server(uint32_t server_id, const struct tt_uuid *server_uuid)
 	/** Checked in the before-commit trigger */
 	assert(!tt_uuid_is_nil(server_uuid));
 	assert(!cserver_id_is_reserved(server_id) && server_id < VCLOCK_MAX);
-	assert(!vclock_has(&r->vclock, server_id));
 
-	/* Add server */
-	vclock_add_server_nothrow(&r->vclock, server_id);
+	/*
+	 * Add a new server into vclock if the specified server_id has never
+	 * been registered in the cluster. A fresh server starts from
+	 * LSN = 0 (i.e. a first request is LSN = 1). LSN starts from the
+	 * last known value in case server was registered and then
+	 * unregistered somewhere in the past.
+	 */
+	if (!vclock_has(&r->vclock, server_id))
+		vclock_add_server_nothrow(&r->vclock, server_id);
 	if (tt_uuid_is_equal(&r->server_uuid, server_uuid)) {
 		/* Assign local server id */
 		assert(r->server_id == 0);
@@ -120,7 +126,15 @@ cluster_del_server(uint32_t server_id)
 	struct recovery *r = ::recovery;
 	assert(!cserver_id_is_reserved(server_id) && server_id < VCLOCK_MAX);
 
-	vclock_del_server(&r->vclock, server_id);
+	/*
+	 * Don't remove servers from vclock here.
+	 * The vclock_sum() must always grow, it is a core invariant of
+	 * the recovery subsystem. Further attempts to register a server
+	 * with the removed server_id will re-use LSN from the last value.
+	 * Servers with LSN == 0 also can't not be safely removed.
+	 * Some records may arrive later on due to asynchronus nature of
+	 * replication.
+	 */
 	if (r->server_id == server_id) {
 		r->server_id = 0;
 		box_set_ro(true);
diff --git a/src/box/vclock.h b/src/box/vclock.h
index 769201aa17d2f4820c3724ee51272a179f2e6c7b..7614b104c262ee3d89299e105480fead731e2f13 100644
--- a/src/box/vclock.h
+++ b/src/box/vclock.h
@@ -149,6 +149,13 @@ vclock_sum(const struct vclock *vclock)
 	return vclock->signature;
 }
 
+/**
+ * Add a new server to vclock.
+ *
+ * Please never, ever, ever remove servers with LSN > 0 from vclock!
+ * The vclock_sum() must always grow, it is a core invariant of the recovery
+ * subsystem!
+ */
 static inline void
 vclock_add_server_nothrow(struct vclock *vclock, uint32_t server_id)
 {
@@ -289,15 +296,6 @@ vclock_add_server(struct vclock *vclock, uint32_t server_id)
 	vclock_add_server_nothrow(vclock, server_id);
 }
 
-static inline void
-vclock_del_server(struct vclock *vclock, uint32_t server_id)
-{
-	assert(vclock_has(vclock, server_id));
-	vclock->lsn[server_id] = 0;
-	vclock->map &= ~(1 << server_id);
-	vclock->signature = vclock_calc_sum(vclock);
-}
-
 #endif /* defined(__cplusplus) */
 
 #endif /* INCLUDES_TARANTOOL_VCLOCK_H */
diff --git a/src/box/xrow.h b/src/box/xrow.h
index b7cceafeebfc2c93d996cd3fa6cc77b9675b8b96..7c62714394fe20d188a31c6992916f6b0eecde52 100644
--- a/src/box/xrow.h
+++ b/src/box/xrow.h
@@ -52,7 +52,7 @@ struct xrow_header {
 	uint32_t type;
 	uint32_t server_id;
 	uint64_t sync;
-	uint64_t lsn;
+	int64_t lsn; /* LSN must be signed for correct comparison */
 	double tm;
 
 	int bodycnt;
diff --git a/test/replication-py/cluster.result b/test/replication-py/cluster.result
index acb0026bc1d19263f1df7c8849955e2e70dd04be..8079211fecb66291c68c486aaae776330c931e8e 100644
--- a/test/replication-py/cluster.result
+++ b/test/replication-py/cluster.result
@@ -89,113 +89,238 @@ box.space._cluster:delete(5)
 ---
 - [5, 'a48a19a3-26c0-4f8c-a5b5-77377bab389b']
 ...
-box.info.vclock[5] == nil
+box.info.vclock[5] == 0
 ---
 - true
 ...
 -------------------------------------------------------------
-gh-527: update vclock on delete from box.space._cluster
+Start a new replica and check box.info on the start
 -------------------------------------------------------------
 box.schema.user.grant('guest', 'replication')
 ---
 ...
-box.space._schema:insert{"test", 48}
+box.info.server.id == 2
 ---
-- ['test', 48]
+- true
 ...
-box.info.server.id
+not box.info.server.ro
 ---
-- 2
+- true
 ...
-box.info.server.ro
+box.info.server.lsn == 0
+---
+- true
+...
+box.info.vclock[2] == 0
+---
+- true
+...
+-------------------------------------------------------------
+Modify data to change LSN and check box.info
+-------------------------------------------------------------
+box.space._schema:insert{"test", 48}
 ---
-- false
+- ['test', 48]
 ...
-box.info.server.lsn
+box.info.server.lsn == 1
 ---
-- 1
+- true
 ...
-box.info.vclock[2]
+box.info.vclock[2] == 1
 ---
-- 1
+- true
 ...
-box.space._cluster:delete{2}
+-------------------------------------------------------------
+Unregister replica and check box.info
+-------------------------------------------------------------
+box.space._cluster:delete{2} ~= nil
 ---
-- [2, '<replica uuid>']
+- true
 ...
-box.info.server.id
+box.info.server.id ~= 2
 ---
-- 0
+- true
 ...
 box.info.server.ro
 ---
 - true
 ...
-box.info.server.lsn
+box.info.server.lsn == -1
 ---
-- -1
+- true
 ...
-box.info.vclock[2]
+box.info.vclock[2] == 1
 ---
-- null
+- true
 ...
 box.space._schema:replace{"test", 48}
 ---
 - error: Can't modify data because this server is in read-only mode.
 ...
-box.space._cluster:insert{10, "<replica uuid>"}
+-------------------------------------------------------------
+Re-register replica with the same server_id
+-------------------------------------------------------------
+box.space._cluster:insert{2, "<replica uuid>"} ~= nil
 ---
-- [10, '<replica uuid>']
+- true
 ...
-box.info.server.id
+box.info.server.id == 2
 ---
-- 10
+- true
 ...
-box.info.server.ro
+not box.info.server.ro
 ---
-- false
+- true
 ...
-box.info.server.lsn
+box.info.server.lsn == 1
 ---
-- 0
+- true
+...
+box.info.vclock[2] == 1
+---
+- true
 ...
-box.info.vclock[2]
+-------------------------------------------------------------
+Re-register replica with a new server_id
+-------------------------------------------------------------
+box.space._cluster:delete{2} ~= nil
 ---
-- null
+- true
 ...
-box.info.vclock[10]
+box.space._cluster:insert{10, "<replica uuid>"} ~= nil
 ---
-- 0
+- true
 ...
+box.info.server.id == 10
+---
+- true
+...
+not box.info.server.ro
+---
+- true
+...
+box.info.server.lsn == 0
+---
+- true
+...
+box.info.vclock[2] == 1
+---
+- true
+...
+box.info.vclock[10] == 0
+---
+- true
+...
+-------------------------------------------------------------
+Check that server_id can't be changed by UPDATE
+-------------------------------------------------------------
 box.space._cluster:update(10, {{'=', 1, 11}})
 ---
 - error: Attempt to modify a tuple field which is part of index 'primary' in space
     '_cluster'
 ...
-box.info.server.id
+box.info.server.id == 10
+---
+- true
+...
+not box.info.server.ro
+---
+- true
+...
+box.info.server.lsn == 0
+---
+- true
+...
+box.info.vclock[2] == 1
+---
+- true
+...
+box.info.vclock[10] == 0
+---
+- true
+...
+box.info.vclock[11] == nil
+---
+- true
+...
+-------------------------------------------------------------
+Unregister replica and check box.info (second attempt)
+-------------------------------------------------------------
+box.space._cluster:delete{10} ~= nil
+---
+- true
+...
+box.info.server.id ~= 2
 ---
-- 10
+- true
 ...
 box.info.server.ro
 ---
-- false
+- true
 ...
-box.info.server.lsn
+box.info.server.lsn == -1
 ---
-- 0
+- true
 ...
-box.info.vclock[2]
+box.info.vclock[10] == 0
 ---
-- null
+- true
 ...
-box.info.vclock[10]
+-------------------------------------------------------------
+JOIN replica to read-only master
+-------------------------------------------------------------
+'ER_READONLY' exists in server log
+-------------------------------------------------------------
+Sync master with replica
+-------------------------------------------------------------
+box.cfg{ replication_source = '<replication_source>' }
 ---
-- 0
 ...
-box.info.vclock[11]
+box.info.vclock[2] == 1
 ---
-- null
+- true
 ...
+box.info.vclock[10] == 0
+---
+- true
+...
+box.info.vclock[11] == nil
+---
+- true
+...
+box.cfg{ replication_source = '' }
+---
+...
+-------------------------------------------------------------
+Start a new replica and check that server_id, LSN is re-used
+-------------------------------------------------------------
+box.snapshot()
+---
+- ok
+...
+box.info.vclock[2] == 1
+---
+- true
+...
+box.info.server.id == 2
+---
+- true
+...
+not box.info.server.ro
+---
+- true
+...
+box.info.vclock[2] == 1
+---
+- true
+...
+box.info.vclock[10] == 0
+---
+- true
+...
+-------------------------------------------------------------
+Cleanup
+-------------------------------------------------------------
 box.schema.user.revoke('guest', 'replication')
 ---
 ...
diff --git a/test/replication-py/cluster.test.py b/test/replication-py/cluster.test.py
index 4d3ac78ec5826ca01e430791484bfda3b6c4d152..7d5e7130929d780e2ce15d22ebdaf1f06dbba05b 100644
--- a/test/replication-py/cluster.test.py
+++ b/test/replication-py/cluster.test.py
@@ -140,7 +140,8 @@ new_uuid = 'a48a19a3-26c0-4f8c-a5b5-77377bab389b'
 server.admin("box.space._cluster:replace{{5, '{0}'}}".format(new_uuid))
 # Delete is OK
 server.admin("box.space._cluster:delete(5)")
-server.admin("box.info.vclock[5] == nil")
+# gh-1219: LSN must not be removed from vclock on unregister
+server.admin("box.info.vclock[5] == 0")
 
 # Cleanup
 server.stop()
@@ -148,9 +149,8 @@ server.script = script
 server.deploy()
 
 print '-------------------------------------------------------------'
-print 'gh-527: update vclock on delete from box.space._cluster'
+print 'Start a new replica and check box.info on the start'
 print '-------------------------------------------------------------'
-
 # master server
 master = server
 master_id = master.get_param('server')['id']
@@ -166,43 +166,150 @@ replica.wait_lsn(master_id, master.get_lsn(master_id))
 replica_id = replica.get_param('server')['id']
 replica_uuid = replica.get_param('server')['uuid']
 sys.stdout.push_filter(replica_uuid, '<replica uuid>')
-replica.admin('box.space._schema:insert{"test", 48}')
 
-replica.admin('box.info.server.id')
-replica.admin('box.info.server.ro')
-replica.admin('box.info.server.lsn') # 1
-replica.admin('box.info.vclock[%d]' % replica_id)
+replica.admin('box.info.server.id == %d' % replica_id)
+replica.admin('not box.info.server.ro')
+replica.admin('box.info.server.lsn == 0')
+replica.admin('box.info.vclock[%d] == 0' % replica_id)
+
+print '-------------------------------------------------------------'
+print 'Modify data to change LSN and check box.info'
+print '-------------------------------------------------------------'
+replica.admin('box.space._schema:insert{"test", 48}')
+replica.admin('box.info.server.lsn == 1')
+replica.admin('box.info.vclock[%d] == 1' % replica_id)
 
-master.admin('box.space._cluster:delete{%d}' % replica_id)
+print '-------------------------------------------------------------'
+print 'Unregister replica and check box.info'
+print '-------------------------------------------------------------'
+# gh-527: update vclock on delete from box.space._cluster'
+master.admin('box.space._cluster:delete{%d} ~= nil' % replica_id)
 replica.wait_lsn(master_id, master.get_lsn(master_id))
-replica.admin('box.info.server.id')
+replica.admin('box.info.server.id ~= %d' % replica_id)
 replica.admin('box.info.server.ro')
-replica.admin('box.info.server.lsn') # -1
-replica.admin('box.info.vclock[%d]' % replica_id)
-# replica is read-only
+# Backward-compatibility: box.info.server.lsn is -1 instead of nil
+replica.admin('box.info.server.lsn == -1')
+# gh-1219: LSN must not be removed from vclock on unregister
+replica.admin('box.info.vclock[%d] == 1' % replica_id)
 replica.admin('box.space._schema:replace{"test", 48}')
 
+print '-------------------------------------------------------------'
+print 'Re-register replica with the same server_id'
+print '-------------------------------------------------------------'
+master.admin('box.space._cluster:insert{%d, "%s"} ~= nil' %
+    (replica_id, replica_uuid))
+replica.wait_lsn(master_id, master.get_lsn(master_id))
+replica.admin('box.info.server.id == %d' % replica_id)
+replica.admin('not box.info.server.ro')
+# gh-1219: LSN must not be removed from vclock on unregister
+replica.admin('box.info.server.lsn == 1')
+replica.admin('box.info.vclock[%d] == 1' % replica_id)
+
+print '-------------------------------------------------------------'
+print 'Re-register replica with a new server_id'
+print '-------------------------------------------------------------'
+master.admin('box.space._cluster:delete{%d} ~= nil' % replica_id)
+replica.wait_lsn(master_id, master.get_lsn(master_id))
 replica_id2 = 10
-master.admin('box.space._cluster:insert{%d, "%s"}' %
+master.admin('box.space._cluster:insert{%d, "%s"} ~= nil' %
     (replica_id2, replica_uuid))
 replica.wait_lsn(master_id, master.get_lsn(master_id))
-replica.admin('box.info.server.id')
-replica.admin('box.info.server.ro')
-replica.admin('box.info.server.lsn') # 0
-replica.admin('box.info.vclock[%d]' % replica_id)
-replica.admin('box.info.vclock[%d]' % replica_id2)
+replica.admin('box.info.server.id == %d' % replica_id2)
+replica.admin('not box.info.server.ro')
+replica.admin('box.info.server.lsn == 0')
+replica.admin('box.info.vclock[%d] == 1' % replica_id)
+replica.admin('box.info.vclock[%d] == 0' % replica_id2)
 
+print '-------------------------------------------------------------'
+print 'Check that server_id can\'t be changed by UPDATE'
+print '-------------------------------------------------------------'
 replica_id3 = 11
-# Tuple is read-only
 server.admin("box.space._cluster:update(%d, {{'=', 1, %d}})" %
     (replica_id2, replica_id3))
 replica.wait_lsn(master_id, master.get_lsn(master_id))
-replica.admin('box.info.server.id')
+replica.admin('box.info.server.id == %d' % replica_id2)
+replica.admin('not box.info.server.ro')
+replica.admin('box.info.server.lsn == 0')
+replica.admin('box.info.vclock[%d] == 1' % replica_id)
+replica.admin('box.info.vclock[%d] == 0' % replica_id2)
+replica.admin('box.info.vclock[%d] == nil' % replica_id3)
+
+print '-------------------------------------------------------------'
+print 'Unregister replica and check box.info (second attempt)'
+print '-------------------------------------------------------------'
+# gh-527: update vclock on delete from box.space._cluster'
+master.admin('box.space._cluster:delete{%d} ~= nil' % replica_id2)
+replica.wait_lsn(master_id, master.get_lsn(master_id))
+replica.admin('box.info.server.id ~= %d' % replica_id)
 replica.admin('box.info.server.ro')
-replica.admin('box.info.server.lsn') # 0
-replica.admin('box.info.vclock[%d]' % replica_id)
-replica.admin('box.info.vclock[%d]' % replica_id2)
-replica.admin('box.info.vclock[%d]' % replica_id3)
+# Backward-compatibility: box.info.server.lsn is -1 instead of nil
+replica.admin('box.info.server.lsn == -1')
+replica.admin('box.info.vclock[%d] == 0' % replica_id2)
+
+print '-------------------------------------------------------------'
+print 'JOIN replica to read-only master'
+print '-------------------------------------------------------------'
+
+#gh-1230 Assertion vclock_has on attempt to JOIN read-only master
+failed = TarantoolServer(server.ini)
+failed.script = 'replication-py/failed.lua'
+failed.vardir = server.vardir
+failed.rpl_master = replica
+failed.name = "failed"
+try:
+    failed.deploy()
+except Exception as e:
+    line = "ER_READONLY"
+    if failed.logfile_pos.seek_once(line) >= 0:
+        print "'%s' exists in server log" % line
+
+print '-------------------------------------------------------------'
+print 'Sync master with replica'
+print '-------------------------------------------------------------'
+
+# Sync master with replica
+replication_source = yaml.load(replica.admin('box.cfg.listen', silent = True))[0]
+sys.stdout.push_filter(replication_source, '<replication_source>')
+master.admin("box.cfg{ replication_source = '%s' }" % replication_source)
+
+master.wait_lsn(replica_id, replica.get_lsn(replica_id))
+master.admin('box.info.vclock[%d] == 1' % replica_id)
+master.admin('box.info.vclock[%d] == 0' % replica_id2)
+master.admin('box.info.vclock[%d] == nil' % replica_id3)
+
+master.admin("box.cfg{ replication_source = '' }")
+replica.stop()
+replica.cleanup(True)
+
+print '-------------------------------------------------------------'
+print 'Start a new replica and check that server_id, LSN is re-used'
+print '-------------------------------------------------------------'
+
+#
+# gh-1219: Proper removal of servers with non-zero LSN from _cluster
+#
+# Snapshot is required. Otherwise a relay will skip records made by previous
+# replica with the re-used id.
+master.admin("box.snapshot()")
+master.admin('box.info.vclock[%d] == 1' % replica_id)
+
+replica = TarantoolServer(server.ini)
+replica.script = 'replication/replica.lua'
+replica.vardir = server.vardir
+replica.rpl_master = master
+replica.deploy()
+replica.wait_lsn(master_id, master.get_lsn(master_id))
+# Check that replica_id was re-used
+replica.admin('box.info.server.id == %d' % replica_id)
+replica.admin('not box.info.server.ro')
+# All records were succesfully recovered.
+replica.admin('box.info.vclock[%d] == 1' % replica_id)
+replica.admin('box.info.vclock[%d] == 0' % replica_id2)
+
+print '-------------------------------------------------------------'
+print 'Cleanup'
+print '-------------------------------------------------------------'
+
 replica.stop()
 replica.cleanup(True)
 
diff --git a/test/replication-py/failed.lua b/test/replication-py/failed.lua
new file mode 100644
index 0000000000000000000000000000000000000000..3a08208e065f353d7999aebea94592b85e5133c5
--- /dev/null
+++ b/test/replication-py/failed.lua
@@ -0,0 +1,9 @@
+#!/usr/bin/env tarantool
+
+box.cfg({
+    listen              = os.getenv("LISTEN"),
+    replication_source  = os.getenv("MASTER"),
+    slab_alloc_arena    = 0.1,
+})
+
+require('console').listen(os.getenv('ADMIN'))
diff --git a/third_party/sophia b/third_party/sophia
index b7ef75675d8289c93065423731708ae5687de0a4..bd2740915d7f0f9752724cb7d221e4ef24e5d87b 160000
--- a/third_party/sophia
+++ b/third_party/sophia
@@ -1 +1 @@
-Subproject commit b7ef75675d8289c93065423731708ae5687de0a4
+Subproject commit bd2740915d7f0f9752724cb7d221e4ef24e5d87b