From 9ad1bd15f425f0663a4e26f4d81fa62d681e24cd Mon Sep 17 00:00:00 2001
From: Serge Petrenko <sergepetrenko@tarantool.org>
Date: Sat, 27 Mar 2021 21:15:17 +0300
Subject: [PATCH] applier: fix not releasing the latch on apply_synchro_row()
 fail

Once apply_synchro_row() failed, applier_apply_tx() would simply raise
an error without unlocking replica latch. This lead to all the appliers
hanging indefinitely on trying to lock the latch for this replica.

In scope of #5566
---
 changelogs/unreleased/applier-hang-synchro.md |   4 +
 src/box/applier.cc                            |   4 +-
 test/replication/hang_on_synchro_fail.result  | 130 ++++++++++++++++++
 .../replication/hang_on_synchro_fail.test.lua |  57 ++++++++
 test/replication/suite.cfg                    |   1 +
 test/replication/suite.ini                    |   2 +-
 6 files changed, 195 insertions(+), 3 deletions(-)
 create mode 100644 changelogs/unreleased/applier-hang-synchro.md
 create mode 100644 test/replication/hang_on_synchro_fail.result
 create mode 100644 test/replication/hang_on_synchro_fail.test.lua

diff --git a/changelogs/unreleased/applier-hang-synchro.md b/changelogs/unreleased/applier-hang-synchro.md
new file mode 100644
index 0000000000..66adbd1f94
--- /dev/null
+++ b/changelogs/unreleased/applier-hang-synchro.md
@@ -0,0 +1,4 @@
+## bugfix/replication
+
+* Fix applier hang on a replica after it fails to process CONFIRM or ROLLBACK
+  message coming from a master.
diff --git a/src/box/applier.cc b/src/box/applier.cc
index e6d9673dd0..41abe64f98 100644
--- a/src/box/applier.cc
+++ b/src/box/applier.cc
@@ -1055,8 +1055,8 @@ applier_apply_tx(struct applier *applier, struct stailq *rows)
 		 * each other.
 		 */
 		assert(first_row == last_row);
-		if (apply_synchro_row(first_row) != 0)
-			diag_raise();
+		if ((rc = apply_synchro_row(first_row)) != 0)
+			goto finish;
 	} else if ((rc = apply_plain_tx(rows, replication_skip_conflict,
 					true)) != 0) {
 		goto finish;
diff --git a/test/replication/hang_on_synchro_fail.result b/test/replication/hang_on_synchro_fail.result
new file mode 100644
index 0000000000..9f6fac00bb
--- /dev/null
+++ b/test/replication/hang_on_synchro_fail.result
@@ -0,0 +1,130 @@
+-- test-run result file version 2
+test_run = require('test_run').new()
+ | ---
+ | ...
+fiber = require('fiber')
+ | ---
+ | ...
+--
+-- All appliers could hang after failing to apply a synchronous message: either
+-- CONFIRM or ROLLBACK.
+--
+box.schema.user.grant('guest', 'replication')
+ | ---
+ | ...
+
+_ = box.schema.space.create('sync', {is_sync=true})
+ | ---
+ | ...
+_ = box.space.sync:create_index('pk')
+ | ---
+ | ...
+
+old_synchro_quorum = box.cfg.replication_synchro_quorum
+ | ---
+ | ...
+box.cfg{replication_synchro_quorum=3}
+ | ---
+ | ...
+-- A huge timeout so that we can perform some actions on a replica before
+-- writing ROLLBACK.
+old_synchro_timeout = box.cfg.replication_synchro_timeout
+ | ---
+ | ...
+box.cfg{replication_synchro_timeout=1000}
+ | ---
+ | ...
+
+test_run:cmd('create server replica with rpl_master=default,\
+              script="replication/replica.lua"')
+ | ---
+ | - true
+ | ...
+test_run:cmd('start server replica')
+ | ---
+ | - true
+ | ...
+
+_ = fiber.new(box.space.sync.insert, box.space.sync, {1})
+ | ---
+ | ...
+test_run:wait_lsn('replica', 'default')
+ | ---
+ | ...
+
+test_run:switch('replica')
+ | ---
+ | - true
+ | ...
+
+box.error.injection.set('ERRINJ_WAL_IO', true)
+ | ---
+ | - ok
+ | ...
+
+test_run:switch('default')
+ | ---
+ | - true
+ | ...
+
+box.cfg{replication_synchro_timeout=0.01}
+ | ---
+ | ...
+
+test_run:switch('replica')
+ | ---
+ | - true
+ | ...
+
+test_run:wait_upstream(1, {status='stopped',\
+                           message_re='Failed to write to disk'})
+ | ---
+ | - true
+ | ...
+box.error.injection.set('ERRINJ_WAL_IO', false)
+ | ---
+ | - ok
+ | ...
+
+-- Applier is killed due to a failed WAL write, so restart replication to
+-- check whether it hangs or not. Actually this single applier would fail an
+-- assertion rather than hang, but all the other appliers, if any, would hang.
+old_repl = box.cfg.replication
+ | ---
+ | ...
+box.cfg{replication=""}
+ | ---
+ | ...
+box.cfg{replication=old_repl}
+ | ---
+ | ...
+
+test_run:wait_upstream(1, {status='follow'})
+ | ---
+ | - true
+ | ...
+
+-- Cleanup.
+test_run:switch('default')
+ | ---
+ | - true
+ | ...
+test_run:cmd('stop server replica')
+ | ---
+ | - true
+ | ...
+test_run:cmd('delete server replica')
+ | ---
+ | - true
+ | ...
+box.cfg{replication_synchro_quorum=old_synchro_quorum,\
+        replication_synchro_timeout=old_synchro_timeout}
+ | ---
+ | ...
+box.space.sync:drop()
+ | ---
+ | ...
+box.schema.user.revoke('guest', 'replication')
+ | ---
+ | ...
+
diff --git a/test/replication/hang_on_synchro_fail.test.lua b/test/replication/hang_on_synchro_fail.test.lua
new file mode 100644
index 0000000000..6c3b09fab8
--- /dev/null
+++ b/test/replication/hang_on_synchro_fail.test.lua
@@ -0,0 +1,57 @@
+test_run = require('test_run').new()
+fiber = require('fiber')
+--
+-- All appliers could hang after failing to apply a synchronous message: either
+-- CONFIRM or ROLLBACK.
+--
+box.schema.user.grant('guest', 'replication')
+
+_ = box.schema.space.create('sync', {is_sync=true})
+_ = box.space.sync:create_index('pk')
+
+old_synchro_quorum = box.cfg.replication_synchro_quorum
+box.cfg{replication_synchro_quorum=3}
+-- A huge timeout so that we can perform some actions on a replica before
+-- writing ROLLBACK.
+old_synchro_timeout = box.cfg.replication_synchro_timeout
+box.cfg{replication_synchro_timeout=1000}
+
+test_run:cmd('create server replica with rpl_master=default,\
+              script="replication/replica.lua"')
+test_run:cmd('start server replica')
+
+_ = fiber.new(box.space.sync.insert, box.space.sync, {1})
+test_run:wait_lsn('replica', 'default')
+
+test_run:switch('replica')
+
+box.error.injection.set('ERRINJ_WAL_IO', true)
+
+test_run:switch('default')
+
+box.cfg{replication_synchro_timeout=0.01}
+
+test_run:switch('replica')
+
+test_run:wait_upstream(1, {status='stopped',\
+                           message_re='Failed to write to disk'})
+box.error.injection.set('ERRINJ_WAL_IO', false)
+
+-- Applier is killed due to a failed WAL write, so restart replication to
+-- check whether it hangs or not. Actually this single applier would fail an
+-- assertion rather than hang, but all the other appliers, if any, would hang.
+old_repl = box.cfg.replication
+box.cfg{replication=""}
+box.cfg{replication=old_repl}
+
+test_run:wait_upstream(1, {status='follow'})
+
+-- Cleanup.
+test_run:switch('default')
+test_run:cmd('stop server replica')
+test_run:cmd('delete server replica')
+box.cfg{replication_synchro_quorum=old_synchro_quorum,\
+        replication_synchro_timeout=old_synchro_timeout}
+box.space.sync:drop()
+box.schema.user.revoke('guest', 'replication')
+
diff --git a/test/replication/suite.cfg b/test/replication/suite.cfg
index 7e70045924..c1c3294388 100644
--- a/test/replication/suite.cfg
+++ b/test/replication/suite.cfg
@@ -22,6 +22,7 @@
     "status.test.lua": {},
     "wal_off.test.lua": {},
     "hot_standby.test.lua": {},
+    "hang_on_synchro_fail.test.lua": {},
     "rebootstrap.test.lua": {},
     "wal_rw_stress.test.lua": {},
     "force_recovery.test.lua": {},
diff --git a/test/replication/suite.ini b/test/replication/suite.ini
index dcd711a2a9..fc161700a0 100644
--- a/test/replication/suite.ini
+++ b/test/replication/suite.ini
@@ -3,7 +3,7 @@ core = tarantool
 script =  master.lua
 description = tarantool/box, replication
 disabled = consistent.test.lua
-release_disabled = catch.test.lua errinj.test.lua gc.test.lua gc_no_space.test.lua before_replace.test.lua qsync_advanced.test.lua qsync_errinj.test.lua quorum.test.lua recover_missing_xlog.test.lua sync.test.lua long_row_timeout.test.lua gh-4739-vclock-assert.test.lua gh-4730-applier-rollback.test.lua gh-5140-qsync-casc-rollback.test.lua gh-5144-qsync-dup-confirm.test.lua gh-5167-qsync-rollback-snap.test.lua gh-5506-election-on-off.test.lua gh-5536-wal-limit.test.lua
+release_disabled = catch.test.lua errinj.test.lua gc.test.lua gc_no_space.test.lua before_replace.test.lua qsync_advanced.test.lua qsync_errinj.test.lua quorum.test.lua recover_missing_xlog.test.lua sync.test.lua long_row_timeout.test.lua gh-4739-vclock-assert.test.lua gh-4730-applier-rollback.test.lua gh-5140-qsync-casc-rollback.test.lua gh-5144-qsync-dup-confirm.test.lua gh-5167-qsync-rollback-snap.test.lua gh-5506-election-on-off.test.lua gh-5536-wal-limit.test.lua hang_on_synchro_fail.test.lua
 config = suite.cfg
 lua_libs = lua/fast_replica.lua lua/rlimit.lua
 use_unix_sockets = True
-- 
GitLab