From b67a8100dcbc1c89bfad10c36b4455fc4ab20d80 Mon Sep 17 00:00:00 2001
From: "ms.evilhat" <ms.evilhat@gmail.com>
Date: Wed, 15 Mar 2023 16:34:26 +0300
Subject: [PATCH] fix(test): fix flaky test on cluster with replication

tarantool uses async replication by default, and it is possible to try selecting from a replica that does not have the space or the data yet, which can cause flaky tests
now, we configure storage replicaset and enable sync replication for spaces to work with
Closes #369
---
 ...ation.lua => cluster_sync_replication.lua} | 34 +++++++++++++++++++
 .../test/integration/sql_cache_test.lua       |  7 ++--
 2 files changed, 38 insertions(+), 3 deletions(-)
 rename sbroad-cartridge/test_app/test/helper/{cluster_async_replication.lua => cluster_sync_replication.lua} (61%)

diff --git a/sbroad-cartridge/test_app/test/helper/cluster_async_replication.lua b/sbroad-cartridge/test_app/test/helper/cluster_sync_replication.lua
similarity index 61%
rename from sbroad-cartridge/test_app/test/helper/cluster_async_replication.lua
rename to sbroad-cartridge/test_app/test/helper/cluster_sync_replication.lua
index 35f1eaafee..dad63e8cf7 100644
--- a/sbroad-cartridge/test_app/test/helper/cluster_async_replication.lua
+++ b/sbroad-cartridge/test_app/test/helper/cluster_sync_replication.lua
@@ -2,6 +2,7 @@
 -- Add common configuration here.
 
 local fio = require('fio')
+local fiber = require("fiber")
 local t = require('luatest')
 local cartridge_helpers = require('cartridge.test-helpers')
 local config_handler = require('test.helper.config_handler')
@@ -56,6 +57,39 @@ helper.start_test_cluster = function (cfg)
     })
 
     helper.cluster:start()
+
+    local storage11 = helper.cluster:server("storage-1-1").net_box
+    local storage12 = helper.cluster:server("storage-1-2").net_box
+
+    -- storage master
+    storage11:eval("box.cfg{election_mode='candidate'}")
+    -- storage replica
+    storage12:eval("box.cfg{election_mode='voter'}")
+
+    local WAITING_TIMEOUT = 20
+    local fiber_sleep = 0.01
+    local wait_start = fiber.clock()
+
+    local s11_ro = storage11:eval("return box.info.ro")
+    local s12_ro = storage12:eval("return box.info.ro")
+
+    -- wait until new replicaset configuration finishes
+    -- when it happens, master will be readable/writable while replica will be only readable
+    while s11_ro do
+        fiber.sleep(fiber_sleep)
+        s11_ro = storage11:eval("return box.info.ro")
+        s12_ro = storage12:eval("return box.info.ro")
+
+        local current_time = fiber.clock()
+        if current_time > wait_start + WAITING_TIMEOUT then
+            t.fail("timeout exceed waiting replication")
+        end
+    end
+    t.assert_equals({s11_ro, s12_ro}, {false, true})
+
+    -- replicaset has master and one replica
+    storage11:eval("box.cfg{replication_synchro_quorum=2}")
+
     helper.cluster:upload_config(cfg)
 end
 
diff --git a/sbroad-cartridge/test_app/test/integration/sql_cache_test.lua b/sbroad-cartridge/test_app/test/integration/sql_cache_test.lua
index 954977eb49..6dbe0f4ebf 100644
--- a/sbroad-cartridge/test_app/test/integration/sql_cache_test.lua
+++ b/sbroad-cartridge/test_app/test/integration/sql_cache_test.lua
@@ -1,7 +1,7 @@
 local t = require('luatest')
 local g = t.group('configuration.sql_cache')
 
-local helper = require('test.helper.cluster_async_replication')
+local helper = require('test.helper.cluster_sync_replication')
 local cluster = nil
 
 g.before_all(
@@ -10,9 +10,10 @@ g.before_all(
         cluster = helper.cluster
 
         local storage1 = cluster:server("storage-1-1").net_box
-        storage1:call("box.execute", { [[truncate table "space_for_breake_cache"]] })
-
         local storage2 = cluster:server("storage-2-1").net_box
+
+        storage1:call("box.space.space_for_breake_cache:alter", { {is_sync = true}})
+        storage1:call("box.execute", { [[truncate table "space_for_breake_cache"]] })
         storage2:call("box.execute", { [[truncate table "space_for_breake_cache"]] })
     end
 )
-- 
GitLab