diff --git a/src/traft/rpc/sharding.rs b/src/traft/rpc/sharding.rs index 293e8468119c9fcc2a358c13f7eca27802a29017..bec5accf69861a6c6ccb72e8ada3364eed16f9d4 100644 --- a/src/traft/rpc/sharding.rs +++ b/src/traft/rpc/sharding.rs @@ -146,7 +146,7 @@ pub mod cfg { } Ok(Self { sharding, - discovery_mode: DiscoveryMode::Off, + discovery_mode: DiscoveryMode::On, }) } } diff --git a/test/int/test_replication.py b/test/int/test_replication.py index 950cc8b4264cd8317b236f429b01cea94dbd1b06..38cecd0b362098a5f48e13157c896174acce29f1 100644 --- a/test/int/test_replication.py +++ b/test/int/test_replication.py @@ -1,5 +1,6 @@ import funcy # type: ignore import pytest +import time from conftest import ( Cluster, @@ -126,3 +127,41 @@ def test_replication_works(cluster1: Cluster): wait_replicas_joined(i2, 2) wait_replicas_joined(i3, 2) + + +def test_bucket_discovery_single(instance: Instance): + @funcy.retry(tries=30, timeout=0.2) + def wait_buckets_awailable(i: Instance, expected: int): + assert expected == i.call("vshard.router.info")["bucket"]["available_rw"] + + wait_buckets_awailable(instance, 3000) + + +@pytest.mark.xfail( + run=True, + reason=( + "currently we bootstrap vshard even before the first replicaset is filled, " + "but we shouldn't" + ), +) +def test_bucket_discovery_respects_replication_factor(cluster: Cluster): + i1, *_ = cluster.deploy(instance_count=1, init_replication_factor=2) + time.sleep(1) + assert 0 == i1.call("vshard.router.info")["bucket"]["available_rw"] + + +@funcy.retry(tries=30, timeout=0.2) +def wait_has_buckets(i: Instance, expected_active: int): + i.call("vshard.storage.rebalancer_wakeup") + storage_info = i.call("vshard.storage.info") + assert expected_active == storage_info["bucket"]["active"] + + +def test_bucket_rebalancing(cluster: Cluster): + i1, *_ = cluster.deploy(instance_count=1, init_replication_factor=1) + + i2 = cluster.add_instance() + wait_has_buckets(i2, 1500) + + i3 = cluster.add_instance() + wait_has_buckets(i3, 1000)