From 24084239fdba1dc9c3774f81b8d5877ed81810fb Mon Sep 17 00:00:00 2001
From: Mergen Imeev <imeevma@tarantool.org>
Date: Thu, 17 Aug 2023 17:58:32 +0300
Subject: [PATCH] config: introduce initial support of vshard

This patch introduces initial support for the vshard configuration.
There is still a lot to be done in both vshard and the config to be able
to run vshard naturally. Key support restrictions introduced in the
patch:
1) at the moment there are only two roles: storage and router;
2) the entire config is considered a configuration for one sharded
system;
3) the rebalancer is currently disabled;
4) The router can automatically find all masters, but once all masters
are found, any changes to the masters will be ignored until
vshard.router.cfg() is called manually.

Closes #9007

NO_DOC=Will be described when full support for vshard is introduced.
---
 .../unreleased/gh-8861-vshard-options.md      |   1 +
 src/box/CMakeLists.txt                        |   1 +
 src/box/lua/config/applier/box_cfg.lua        |   2 +
 src/box/lua/config/applier/sharding.lua       |  84 ++++
 src/box/lua/config/configdata.lua             | 154 +++++++-
 src/box/lua/config/init.lua                   |   1 +
 src/box/lua/init.c                            |   5 +
 test/config-luatest/config_test.lua           |   5 +-
 test/config-luatest/vshard_test.lua           | 362 ++++++++++++++++++
 9 files changed, 597 insertions(+), 18 deletions(-)
 create mode 100644 src/box/lua/config/applier/sharding.lua
 create mode 100644 test/config-luatest/vshard_test.lua

diff --git a/changelogs/unreleased/gh-8861-vshard-options.md b/changelogs/unreleased/gh-8861-vshard-options.md
index 4ebda7bd90..ed0c72191f 100644
--- a/changelogs/unreleased/gh-8861-vshard-options.md
+++ b/changelogs/unreleased/gh-8861-vshard-options.md
@@ -1,3 +1,4 @@
 ## feature/config
 
 * Most of vshard options are now added in the config (gh-9007).
+* Added initial support of vshard (gh-9007).
diff --git a/src/box/CMakeLists.txt b/src/box/CMakeLists.txt
index 6e814460e5..65c29ac0b0 100644
--- a/src/box/CMakeLists.txt
+++ b/src/box/CMakeLists.txt
@@ -44,6 +44,7 @@ lua_source(lua_sources lua/config/applier/console.lua     config_applier_console
 lua_source(lua_sources lua/config/applier/credentials.lua config_applier_credentials_lua)
 lua_source(lua_sources lua/config/applier/fiber.lua       config_applier_fiber_lua)
 lua_source(lua_sources lua/config/applier/mkdir.lua       config_applier_mkdir_lua)
+lua_source(lua_sources lua/config/applier/sharding.lua    config_applier_sharding_lua)
 lua_source(lua_sources lua/config/cluster_config.lua      config_cluster_config_lua)
 lua_source(lua_sources lua/config/configdata.lua          config_configdata_lua)
 lua_source(lua_sources lua/config/init.lua                config_init_lua)
diff --git a/src/box/lua/config/applier/box_cfg.lua b/src/box/lua/config/applier/box_cfg.lua
index 619cf7ed48..54c91ee56d 100644
--- a/src/box/lua/config/applier/box_cfg.lua
+++ b/src/box/lua/config/applier/box_cfg.lua
@@ -245,6 +245,8 @@ local function apply(config)
     local names = configdata:names()
     box_cfg.instance_name = names.instance_name
     box_cfg.replicaset_name = names.replicaset_name
+    box_cfg.instance_uuid = names.instance_uuid
+    box_cfg.replicaset_uuid = names.replicaset_uuid
 
     -- Set bootstrap_leader option.
     box_cfg.bootstrap_leader = configdata:bootstrap_leader()
diff --git a/src/box/lua/config/applier/sharding.lua b/src/box/lua/config/applier/sharding.lua
new file mode 100644
index 0000000000..195adaea2e
--- /dev/null
+++ b/src/box/lua/config/applier/sharding.lua
@@ -0,0 +1,84 @@
+local log = require('internal.config.utils.log')
+local fiber = require('fiber')
+_G.vshard = nil
+
+local fiber_wait_ro_rw
+
+local function vshard_cfg(config)
+    local configdata = config._configdata
+    local roles = configdata:get('sharding.roles')
+    if roles == nil then
+        return
+    end
+    if _G.vshard == nil then
+        _G.vshard = require('vshard')
+    end
+    for _, role in pairs(roles) do
+        local cfg = configdata:sharding()
+        --
+        -- Make vshard repeat current box.cfg options (see vshard/issues/428).
+        -- TODO: delete when box.cfg{} will not be called in vshard.
+        --
+        cfg.listen = box.cfg.listen
+        cfg.read_only = box.cfg.read_only
+        cfg.replication = box.cfg.replication
+
+        if role == 'storage' then
+            local names = configdata:names()
+            local replicaset_uuid = names.replicaset_uuid
+            assert(replicaset_uuid == box.cfg.replicaset_uuid)
+            local instance_uuid = names.instance_uuid
+            assert(instance_uuid == box.cfg.instance_uuid)
+            local this_replicaset_cfg = cfg.sharding[replicaset_uuid]
+            --
+            -- Currently, the replicaset master must set itself as the master in
+            -- its own configuration.
+            -- TODO: remove when vshard introduces auto-discovery of masters.
+            --
+            if not box.info.ro then
+                this_replicaset_cfg.master = nil
+                this_replicaset_cfg.replicas[instance_uuid].master = true
+            end
+            log.info('sharding: apply sharding config')
+            _G.vshard.storage.cfg(cfg, instance_uuid)
+            --
+            -- Currently, replicaset masters may not be aware of all other
+            -- masters, so the rebalancer is disabled.
+            -- TODO: remove when vshard introduces auto-discovery of masters.
+            --
+            if _G.vshard.storage.internal.is_rebalancer_active then
+                log.info('sharding: disable rebalancer')
+                _G.vshard.storage.rebalancer_disable()
+            end
+        end
+        if role == 'router' then
+            _G.vshard.router.cfg(cfg)
+        end
+    end
+end
+
+local function wait_ro_rw(config)
+    while true do
+        if box.info.ro then
+            box.ctl.wait_rw()
+        else
+            box.ctl.wait_ro()
+        end
+        local ok, err = pcall(vshard_cfg, config)
+        if not ok then
+            log.error(err)
+        end
+    end
+end
+
+local function apply(config)
+    vshard_cfg(config)
+    if fiber_wait_ro_rw == nil then
+        fiber_wait_ro_rw = fiber.create(wait_ro_rw, config)
+    end
+end
+
+return {
+    name = 'sharding',
+    apply = apply,
+}
diff --git a/src/box/lua/config/configdata.lua b/src/box/lua/config/configdata.lua
index db7f7657b5..f0065b9f1b 100644
--- a/src/box/lua/config/configdata.lua
+++ b/src/box/lua/config/configdata.lua
@@ -4,6 +4,8 @@
 -- Intended to be used as an immutable object.
 
 local fun = require('fun')
+local urilib = require('uri')
+local digest = require('digest')
 local instance_config = require('internal.config.instance_config')
 local cluster_config = require('internal.config.cluster_config')
 
@@ -62,9 +64,131 @@ function methods.names(self)
         group_name = self._group_name,
         replicaset_name = self._replicaset_name,
         instance_name = self._instance_name,
+        replicaset_uuid = self._replicaset_uuid,
+        instance_uuid = self._instance_uuid,
     }
 end
 
+local function uuid_from_name(str)
+    local sha = digest.sha1_hex(str)
+    return sha:sub(1,8)..'-'..sha:sub(9,12)..'-'..sha:sub(13,16)..'-'..
+           '00'..sha:sub(17,18)..'-'..sha:sub(19,30)
+end
+
+local function instance_sharding(iconfig, instance_name)
+    local roles = instance_config:get(iconfig, 'sharding.roles')
+    if roles == nil or #roles == 0 then
+        return nil
+    end
+    assert(type(roles) == 'table')
+    local is_storage = false
+    for _, role in pairs(roles) do
+        is_storage = is_storage or role == 'storage'
+    end
+    if not is_storage then
+        return nil
+    end
+    local zone = instance_config:get(iconfig, 'sharding.zone')
+    local uri = instance_config:instance_uri(iconfig, 'sharding')
+    --
+    -- Currently, vshard does not accept URI without a username. So if we got a
+    -- URI without a username, use "guest" as the username without a password.
+    --
+    local u, err = urilib.parse(uri)
+    -- NB: The URI is validated, so the parsing can't fail.
+    assert(u ~= nil, err)
+    if u.login == nil then
+        u.login = 'guest'
+        uri = urilib.format(u, true)
+    end
+    return {
+        uri = uri,
+        zone = zone,
+        name = instance_name,
+    }
+end
+
+local function apply_vars_f(data, w, vars)
+    if w.schema.type == 'string' and data ~= nil then
+        assert(type(data) == 'string')
+        return (data:gsub('{{ *(.-) *}}', function(var_name)
+            if vars[var_name] ~= nil then
+                return vars[var_name]
+            end
+            w.error(('Unknown variable %q'):format(var_name))
+        end))
+    end
+    return data
+end
+
+local function iconfig_apply_vars(iconfig, vars)
+    return instance_config:map(iconfig, apply_vars_f, vars)
+end
+
+function methods.sharding(self)
+    local sharding = {}
+    for _, group in pairs(self._cconfig.groups) do
+        for replicaset_name, value in pairs(group.replicasets) do
+            local lock
+            local replicaset_uuid
+            local replicaset_cfg = {}
+            for instance_name, _ in pairs(value.instances) do
+                local vars = {instance_name = instance_name}
+                local iconfig = cluster_config:instantiate(self._cconfig,
+                                                           instance_name)
+                iconfig = instance_config:apply_default(iconfig)
+                iconfig = iconfig_apply_vars(iconfig, vars)
+                if lock == nil then
+                    lock = instance_config:get(iconfig, 'sharding.lock')
+                end
+                local isharding = instance_sharding(iconfig, instance_name)
+                if isharding ~= nil then
+                    if replicaset_uuid == nil then
+                        replicaset_uuid = instance_config:get(iconfig,
+                            'database.replicaset_uuid')
+                        if replicaset_uuid == nil then
+                            replicaset_uuid = uuid_from_name(replicaset_name)
+                        end
+                    end
+                    local instance_uuid = instance_config:get(iconfig,
+                        'database.instance_uuid')
+                    if instance_uuid == nil then
+                        instance_uuid = uuid_from_name(instance_name)
+                    end
+                    replicaset_cfg[instance_uuid] = isharding
+                end
+            end
+            if next(replicaset_cfg) ~= nil then
+                assert(replicaset_uuid ~= nil)
+                sharding[replicaset_uuid] = {
+                    replicas = replicaset_cfg,
+                    master = 'auto',
+                    lock = lock,
+                }
+            end
+        end
+    end
+    local cfg = {sharding = sharding}
+
+    local vshard_global_options = {
+        'shard_index',
+        'bucket_count',
+        'rebalancer_disbalance_threshold',
+        'rebalancer_max_receiving',
+        'rebalancer_max_sending',
+        'sync_timeout',
+        'connection_outdate_delay',
+        'failover_ping_timeout',
+        'discovery_mode',
+        'sched_ref_quota',
+        'sched_move_quota',
+    }
+    for _, v in pairs(vshard_global_options) do
+        cfg[v] = instance_config:get(self._iconfig_def, 'sharding.'..v)
+    end
+    return cfg
+end
+
 -- Should be called only if the 'manual' failover method is
 -- configured.
 function methods.leader(self)
@@ -87,23 +211,6 @@ local mt = {
     __index = methods,
 }
 
-local function apply_vars_f(data, w, vars)
-    if w.schema.type == 'string' and data ~= nil then
-        assert(type(data) == 'string')
-        return (data:gsub('{{ *(.-) *}}', function(var_name)
-            if vars[var_name] ~= nil then
-                return vars[var_name]
-            end
-            w.error(('Unknown variable %q'):format(var_name))
-        end))
-    end
-    return data
-end
-
-local function iconfig_apply_vars(iconfig, vars)
-    return instance_config:map(iconfig, apply_vars_f, vars)
-end
-
 local function new(iconfig, cconfig, instance_name)
     -- Precalculate configuration with applied defaults.
     local iconfig_def = instance_config:apply_default(iconfig)
@@ -119,6 +226,17 @@ local function new(iconfig, cconfig, instance_name)
     local found = cluster_config:find_instance(cconfig, instance_name)
     assert(found ~= nil)
 
+    local replicaset_uuid = instance_config:get(iconfig_def,
+        'database.replicaset_uuid')
+    local instance_uuid = instance_config:get(iconfig_def,
+        'database.instance_uuid')
+    if replicaset_uuid == nil then
+        replicaset_uuid = uuid_from_name(found.replicaset_name)
+    end
+    if instance_uuid == nil then
+        instance_uuid = uuid_from_name(instance_name)
+    end
+
     -- Save instance configs of the peers from the same replicaset.
     local peers = {}
     for peer_name, _ in pairs(found.replicaset.instances) do
@@ -222,6 +340,8 @@ local function new(iconfig, cconfig, instance_name)
         _iconfig_def = iconfig_def,
         _cconfig = cconfig,
         _peer_names = peer_names,
+        _replicaset_uuid = replicaset_uuid,
+        _instance_uuid = instance_uuid,
         _peers = peers,
         _group_name = found.group_name,
         _replicaset_name = found.replicaset_name,
diff --git a/src/box/lua/config/init.lua b/src/box/lua/config/init.lua
index 16f1c28f83..3f45bf11a1 100644
--- a/src/box/lua/config/init.lua
+++ b/src/box/lua/config/init.lua
@@ -149,6 +149,7 @@ function methods._initialize(self)
     self:_register_applier(require('internal.config.applier.credentials'))
     self:_register_applier(require('internal.config.applier.console'))
     self:_register_applier(require('internal.config.applier.fiber'))
+    self:_register_applier(require('internal.config.applier.sharding'))
     self:_register_applier(require('internal.config.applier.app'))
 
     if extras ~= nil then
diff --git a/src/box/lua/init.c b/src/box/lua/init.c
index 9be8ae05e9..6f58efc475 100644
--- a/src/box/lua/init.c
+++ b/src/box/lua/init.c
@@ -143,6 +143,7 @@ extern char session_lua[],
 	config_applier_credentials_lua[],
 	config_applier_fiber_lua[],
 	config_applier_mkdir_lua[],
+	config_applier_sharding_lua[],
 	config_cluster_config_lua[],
 	config_configdata_lua[],
 	config_init_lua[],
@@ -381,6 +382,10 @@ static const char *lua_sources[] = {
 	"internal.config.applier.mkdir",
 	config_applier_mkdir_lua,
 
+	"config/applier/sharding",
+	"internal.config.applier.sharding",
+	config_applier_sharding_lua,
+
 	"config/init",
 	"config",
 	config_init_lua,
diff --git a/test/config-luatest/config_test.lua b/test/config-luatest/config_test.lua
index 785a478d5e..a81ee9756c 100644
--- a/test/config-luatest/config_test.lua
+++ b/test/config-luatest/config_test.lua
@@ -138,7 +138,10 @@ g.test_configdata = function()
         instance_name = "instance-001",
         replicaset_name = "replicaset-001",
     }
-    t.assert_equals(data:names(), expected_names)
+    local res_names = data:names()
+    res_names.instance_uuid = nil
+    res_names.replicaset_uuid = nil
+    t.assert_equals(res_names, expected_names)
 
     t.assert_equals(data:peers(), {'instance-001', 'instance-002'})
 end
diff --git a/test/config-luatest/vshard_test.lua b/test/config-luatest/vshard_test.lua
new file mode 100644
index 0000000000..d48fb8014b
--- /dev/null
+++ b/test/config-luatest/vshard_test.lua
@@ -0,0 +1,362 @@
+local fun = require('fun')
+local t = require('luatest')
+local treegen = require('test.treegen')
+local server = require('test.luatest_helpers.server')
+local helpers = require('test.config-luatest.helpers')
+
+local g = helpers.group()
+
+local has_vshard = pcall(require, 'vshard')
+
+g.test_fixed_masters = function(g)
+    t.skip_if(not has_vshard, 'Module "vshard" is not available')
+    local dir = treegen.prepare_directory(g, {}, {})
+    local config = [[
+    credentials:
+      users:
+        guest:
+          roles: [super]
+
+    iproto:
+      listen: 'unix/:./{{ instance_name }}.iproto'
+      advertise:
+        sharding: 'storage:storage@'
+
+    sharding:
+      bucket_count: 1234
+      sched_ref_quota: 258
+
+    groups:
+      group-001:
+        replicasets:
+          replicaset-001:
+            database:
+              replicaset_uuid: '11111111-1111-1111-0011-111111111111'
+            instances:
+              instance-001:
+                database:
+                  mode: rw
+                sharding:
+                  roles: [storage]
+              instance-002:
+                sharding:
+                  roles: [storage]
+          replicaset-002:
+            instances:
+              instance-003:
+                database:
+                  instance_uuid: '22222222-2222-2222-0022-222222222222'
+                  mode: rw
+                sharding:
+                  roles: [storage]
+              instance-004:
+                sharding:
+                  roles: [storage]
+          replicaset-003:
+            instances:
+              instance-005:
+                database:
+                  mode: rw
+                sharding:
+                  roles: [router]
+    ]]
+    local config_file = treegen.write_script(dir, 'config.yaml', config)
+    local opts = {
+        env = {LUA_PATH = os.environ()['LUA_PATH']},
+        config_file = config_file,
+        chdir = dir,
+    }
+    g.server_1 = server:new(fun.chain(opts, {alias = 'instance-001'}):tomap())
+    g.server_2 = server:new(fun.chain(opts, {alias = 'instance-002'}):tomap())
+    g.server_3 = server:new(fun.chain(opts, {alias = 'instance-003'}):tomap())
+    g.server_4 = server:new(fun.chain(opts, {alias = 'instance-004'}):tomap())
+    g.server_5 = server:new(fun.chain(opts, {alias = 'instance-005'}):tomap())
+
+    g.server_1:start({wait_until_ready = false})
+    g.server_2:start({wait_until_ready = false})
+    g.server_3:start({wait_until_ready = false})
+    g.server_4:start({wait_until_ready = false})
+    g.server_5:start({wait_until_ready = false})
+
+    g.server_1:wait_until_ready()
+    g.server_2:wait_until_ready()
+    g.server_3:wait_until_ready()
+    g.server_4:wait_until_ready()
+    g.server_5:wait_until_ready()
+
+    -- Check that cluster was created.
+    local info = g.server_1:eval('return box.info')
+    t.assert_equals(info.name, 'instance-001')
+    t.assert_equals(info.replicaset.name, 'replicaset-001')
+
+    info = g.server_2:eval('return box.info')
+    t.assert_equals(info.name, 'instance-002')
+    t.assert_equals(info.replicaset.name, 'replicaset-001')
+
+    info = g.server_3:eval('return box.info')
+    t.assert_equals(info.name, 'instance-003')
+    t.assert_equals(info.replicaset.name, 'replicaset-002')
+
+    info = g.server_4:eval('return box.info')
+    t.assert_equals(info.name, 'instance-004')
+    t.assert_equals(info.replicaset.name, 'replicaset-002')
+
+    info = g.server_5:eval('return box.info')
+    t.assert_equals(info.name, 'instance-005')
+    t.assert_equals(info.replicaset.name, 'replicaset-003')
+
+    t.assert_equals(g.server_1:eval('return box.info.ro'), false)
+    t.assert_equals(g.server_2:eval('return box.info.ro'), true)
+    t.assert_equals(g.server_3:eval('return box.info.ro'), false)
+    t.assert_equals(g.server_4:eval('return box.info.ro'), true)
+    t.assert_equals(g.server_5:eval('return box.info.ro'), false)
+
+    -- Check vshard config on each instance.
+    local exp = {
+        bucket_count = 1234,
+        discovery_mode = "on",
+        failover_ping_timeout = 5,
+        listen = "unix/:./instance-002.iproto",
+        read_only = true,
+        rebalancer_disbalance_threshold = 1,
+        rebalancer_max_receiving = 100,
+        rebalancer_max_sending = 1,
+        replication = {
+            "unix/:./instance-001.iproto",
+            "unix/:./instance-002.iproto"
+        },
+        sched_move_quota = 1,
+        sched_ref_quota = 258,
+        shard_index = "bucket_id",
+        sync_timeout = 1,
+        sharding = {
+            ["11111111-1111-1111-0011-111111111111"] = {
+                master = "auto",
+                replicas = {
+                    ["ef10b92d-9ae9-e7bb-004c-89d8fb468341"] = {
+                        name = "instance-002",
+                        uri = "storage:storage@unix/:./instance-002.iproto",
+                    },
+                    ["ffe08155-a26d-bd7c-0024-00ee6815a41c"] = {
+                        name = "instance-001",
+                        uri = "storage:storage@unix/:./instance-001.iproto",
+                    },
+                },
+                weight = 1,
+            },
+            ["d1f75e70-6883-d7fe-0087-e582c9c67543"] = {
+                master = "auto",
+                replicas = {
+                    ["22222222-2222-2222-0022-222222222222"] = {
+                        name = "instance-003",
+                        uri = "storage:storage@unix/:./instance-003.iproto",
+                    },
+                    ["50367d8e-488b-309b-001a-138a0c516772"] = {
+                        name = "instance-004",
+                        uri = "storage:storage@unix/:./instance-004.iproto"
+                    },
+                },
+                weight = 1,
+            },
+        },
+    }
+
+    -- Non-master storages.
+    local exec = 'return vshard.storage.internal.current_cfg'
+    local res = g.server_2:eval(exec)
+    t.assert_equals(res, exp)
+    res = g.server_4:eval(exec)
+    t.assert_equals(res.sharding, exp.sharding)
+
+    -- Router.
+    exec = 'return vshard.router.internal.static_router.current_cfg'
+    res = g.server_5:eval(exec)
+    t.assert_equals(res.sharding, exp.sharding)
+
+    exp = {
+        ["11111111-1111-1111-0011-111111111111"] = {
+            replicas = {
+                ["ef10b92d-9ae9-e7bb-004c-89d8fb468341"] = {
+                    name = "instance-002",
+                    uri = "storage:storage@unix/:./instance-002.iproto",
+                },
+                ["ffe08155-a26d-bd7c-0024-00ee6815a41c"] = {
+                    name = "instance-001",
+                    uri = "storage:storage@unix/:./instance-001.iproto",
+                    master = true,
+                },
+            },
+            weight = 1,
+        },
+        ["d1f75e70-6883-d7fe-0087-e582c9c67543"] = {
+            master = "auto",
+            replicas = {
+                ["22222222-2222-2222-0022-222222222222"] = {
+                    name = "instance-003",
+                    uri = "storage:storage@unix/:./instance-003.iproto",
+                },
+                ["50367d8e-488b-309b-001a-138a0c516772"] = {
+                    name = "instance-004",
+                    uri = "storage:storage@unix/:./instance-004.iproto"
+                },
+            },
+            weight = 1,
+        },
+    }
+
+    -- Master storages.
+    exec = 'return vshard.storage.internal.current_cfg'
+    res = g.server_1:eval(exec)
+    t.assert_equals(res.sharding, exp)
+
+    exp = {
+        ["11111111-1111-1111-0011-111111111111"] = {
+            master = "auto",
+            replicas = {
+                ["ef10b92d-9ae9-e7bb-004c-89d8fb468341"] = {
+                    name = "instance-002",
+                    uri = "storage:storage@unix/:./instance-002.iproto",
+                },
+                ["ffe08155-a26d-bd7c-0024-00ee6815a41c"] = {
+                    name = "instance-001",
+                    uri = "storage:storage@unix/:./instance-001.iproto",
+                },
+            },
+            weight = 1,
+        },
+        ["d1f75e70-6883-d7fe-0087-e582c9c67543"] = {
+            replicas = {
+                ["22222222-2222-2222-0022-222222222222"] = {
+                    name = "instance-003",
+                    uri = "storage:storage@unix/:./instance-003.iproto",
+                    master = true,
+                },
+                ["50367d8e-488b-309b-001a-138a0c516772"] = {
+                    name = "instance-004",
+                    uri = "storage:storage@unix/:./instance-004.iproto"
+                },
+            },
+            weight = 1,
+        },
+    }
+    res = g.server_3:eval(exec)
+    t.assert_equals(res.sharding, exp)
+
+    -- Check that basic sharding works.
+    exec = [[
+        function put(v)
+            box.space.a:insert({v.id, v.bucket_id})
+            return true
+        end
+
+        function get(id)
+            return box.space.a:get(id)
+        end
+
+        box.schema.func.create('put')
+        box.schema.role.grant('public', 'execute', 'function', 'put')
+        box.schema.func.create('get')
+        box.schema.role.grant('public', 'execute', 'function', 'get')
+        local format = {{'id', 'unsigned'}, {'bucket_id', 'unsigned'}}
+        a = box.schema.space.create('a', {format = format})
+        a:create_index('id', {parts = {'id'}})
+        a:create_index('bucket_id', {parts = {'bucket_id'}, unique = false})
+    ]]
+    g.server_1:eval(exec)
+    g.server_3:eval(exec)
+    t.helpers.retrying({timeout = 60}, function()
+        t.assert_equals(g.server_2:eval([[return box.space.a:select()]]), {})
+        t.assert_equals(g.server_4:eval([[return box.space.a:select()]]), {})
+    end)
+
+    exec = [[
+        vshard.router.bootstrap()
+        vshard.router.call(1, 'write', 'put', {{id = 1, bucket_id = 1}})
+        vshard.router.call(800, 'write', 'put', {{id = 800, bucket_id = 800}})
+    ]]
+    g.server_5:eval(exec)
+    t.helpers.retrying({timeout = 60}, function()
+        local res = g.server_2:eval([[return box.space.a:select()]])
+        t.assert_equals(res, {{800, 800}})
+        res = g.server_4:eval([[return box.space.a:select()]])
+        t.assert_equals(res, {{1, 1}})
+    end)
+
+    -- Check that vshard cfg is reformed when master is changed.
+    g.server_1:eval([[box.cfg{read_only = true}]])
+    exec = 'return vshard.storage.internal.current_cfg'
+    exp = {
+        ["11111111-1111-1111-0011-111111111111"] = {
+            master = "auto",
+            replicas = {
+                ["ef10b92d-9ae9-e7bb-004c-89d8fb468341"] = {
+                    name = "instance-002",
+                    uri = "storage:storage@unix/:./instance-002.iproto",
+                },
+                ["ffe08155-a26d-bd7c-0024-00ee6815a41c"] = {
+                    name = "instance-001",
+                    uri = "storage:storage@unix/:./instance-001.iproto",
+                },
+            },
+            weight = 1,
+        },
+        ["d1f75e70-6883-d7fe-0087-e582c9c67543"] = {
+            master = "auto",
+            replicas = {
+                ["22222222-2222-2222-0022-222222222222"] = {
+                    name = "instance-003",
+                    uri = "storage:storage@unix/:./instance-003.iproto",
+                },
+                ["50367d8e-488b-309b-001a-138a0c516772"] = {
+                    name = "instance-004",
+                    uri = "storage:storage@unix/:./instance-004.iproto"
+                },
+            },
+            weight = 1,
+        },
+    }
+    t.helpers.retrying({timeout = 60}, function()
+        local res = g.server_1:eval(exec)
+        t.assert_equals(res.sharding, exp)
+        res = g.server_2:eval(exec)
+        t.assert_equals(res.sharding, exp)
+    end)
+
+    g.server_2:eval([[box.cfg{read_only = false}]])
+    res = g.server_1:eval(exec)
+    t.assert_equals(res.sharding, exp)
+    exp = {
+        ["11111111-1111-1111-0011-111111111111"] = {
+            replicas = {
+                ["ef10b92d-9ae9-e7bb-004c-89d8fb468341"] = {
+                    name = "instance-002",
+                    uri = "storage:storage@unix/:./instance-002.iproto",
+                    master = true,
+                },
+                ["ffe08155-a26d-bd7c-0024-00ee6815a41c"] = {
+                    name = "instance-001",
+                    uri = "storage:storage@unix/:./instance-001.iproto",
+                },
+            },
+            weight = 1,
+        },
+        ["d1f75e70-6883-d7fe-0087-e582c9c67543"] = {
+            master = "auto",
+            replicas = {
+                ["22222222-2222-2222-0022-222222222222"] = {
+                    name = "instance-003",
+                    uri = "storage:storage@unix/:./instance-003.iproto",
+                },
+                ["50367d8e-488b-309b-001a-138a0c516772"] = {
+                    name = "instance-004",
+                    uri = "storage:storage@unix/:./instance-004.iproto"
+                },
+            },
+            weight = 1,
+        },
+    }
+    t.helpers.retrying({timeout = 60}, function()
+        local res = g.server_2:eval(exec)
+        t.assert_equals(res.sharding, exp)
+    end)
+end
-- 
GitLab