From c9c9168f29b883fbd0f2dfba9cbbd35cbca8e60c Mon Sep 17 00:00:00 2001
From: Georgy Moshkin <gmoshkin@picodata.io>
Date: Fri, 2 Dec 2022 15:39:09 +0300
Subject: [PATCH] refactor: rename box.space._picodata_cluster_state ->
 box.space._picodata_property

---
 src/main.rs                      | 18 +++++++++---------
 src/storage.rs                   | 23 ++++++++++++-----------
 src/traft/mod.rs                 |  2 +-
 src/traft/node.rs                |  8 ++++----
 test/int/test_basics.py          |  6 +++---
 test/int/test_joining.py         |  5 ++---
 test/int/test_migration.py       |  2 +-
 test/int/test_network_effects.py |  2 +-
 8 files changed, 33 insertions(+), 33 deletions(-)

diff --git a/src/main.rs b/src/main.rs
index c0d687efea..311fd6b764 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -12,7 +12,7 @@ use ::tarantool::transaction::start_transaction;
 use std::convert::TryFrom;
 use std::time::{Duration, Instant};
 use storage::Clusterwide;
-use storage::{ClusterwideSpace, StateKey};
+use storage::{ClusterwideSpace, ProperyName};
 use traft::rpc;
 use traft::RaftSpaceAccess;
 
@@ -385,8 +385,8 @@ fn picolib_setup(args: &args::Run) {
         "push_schema_version",
         tlua::function1(|id: u64| -> traft::Result<()> {
             let op = OpDML::replace(
-                ClusterwideSpace::State,
-                &(StateKey::DesiredSchemaVersion, id),
+                ClusterwideSpace::Property,
+                &(ProperyName::DesiredSchemaVersion, id),
             )?;
             node::global()?.propose_and_wait(op, Duration::MAX)??;
             Ok(())
@@ -402,8 +402,8 @@ fn picolib_setup(args: &args::Run) {
                 None => return Ok(()),
             };
             let op = OpDML::replace(
-                ClusterwideSpace::State,
-                &(StateKey::DesiredSchemaVersion, id),
+                ClusterwideSpace::Property,
+                &(ProperyName::DesiredSchemaVersion, id),
             )?;
             node.propose_and_wait(op, Duration::MAX)??;
             event::wait(Event::MigrateDone)
@@ -811,16 +811,16 @@ fn start_boot(args: &args::Run) {
         init_entries_push_op(traft::Op::persist_peer(peer));
         init_entries_push_op(
             OpDML::insert(
-                ClusterwideSpace::State,
-                &(StateKey::ReplicationFactor, args.init_replication_factor),
+                ClusterwideSpace::Property,
+                &(ProperyName::ReplicationFactor, args.init_replication_factor),
             )
             .expect("cannot fail")
             .into(),
         );
         init_entries_push_op(
             OpDML::insert(
-                ClusterwideSpace::State,
-                &(StateKey::DesiredSchemaVersion, 0),
+                ClusterwideSpace::Property,
+                &(ProperyName::DesiredSchemaVersion, 0),
             )
             .expect("cannot fail")
             .into(),
diff --git a/src/storage.rs b/src/storage.rs
index 5ed7018314..da863b68ad 100644
--- a/src/storage.rs
+++ b/src/storage.rs
@@ -21,7 +21,7 @@ use std::marker::PhantomData;
     pub enum ClusterwideSpace {
         Group = "_picodata_raft_group",
         Address = "_picodata_peer_address",
-        State = "_picodata_cluster_state",
+        Property = "_picodata_property",
         Replicaset = "_picodata_replicaset",
         Migration = "_picodata_migration",
     }
@@ -65,12 +65,12 @@ impl ClusterwideSpace {
 }
 
 ////////////////////////////////////////////////////////////////////////////////
-// StateKey
+// ProperyName
 ////////////////////////////////////////////////////////////////////////////////
 
 ::tarantool::define_str_enum! {
-    /// An enumeration of builtin raft spaces
-    pub enum StateKey {
+    /// An enumeration of [`ClusterwideSpace::Property`] key names.
+    pub enum ProperyName {
         ReplicationFactor = "replication_factor",
         VshardBootstrapped = "vshard_bootstrapped",
         DesiredSchemaVersion = "desired_schema_version",
@@ -107,14 +107,13 @@ impl Clusterwide {
 ////////////////////////////////////////////////////////////////////////////////
 
 /// A struct for accessing storage of the cluster-wide key-value state
-/// (currently cluster_state).
 #[derive(Clone, Debug)]
 pub struct State {
     space: Space,
 }
 
 impl State {
-    const SPACE_NAME: &'static str = ClusterwideSpace::State.as_str();
+    const SPACE_NAME: &'static str = ClusterwideSpace::Property.as_str();
     const INDEX_PRIMARY: &'static str = "pk";
 
     pub fn new() -> tarantool::Result<Self> {
@@ -137,7 +136,7 @@ impl State {
     }
 
     #[inline]
-    pub fn get<T>(&self, key: StateKey) -> tarantool::Result<Option<T>>
+    pub fn get<T>(&self, key: ProperyName) -> tarantool::Result<Option<T>>
     where
         T: DecodeOwned,
     {
@@ -149,20 +148,22 @@ impl State {
 
     #[allow(dead_code)]
     #[inline]
-    pub fn put(&self, key: StateKey, value: &impl serde::Serialize) -> tarantool::Result<()> {
+    pub fn put(&self, key: ProperyName, value: &impl serde::Serialize) -> tarantool::Result<()> {
         self.space.put(&(key, value))?;
         Ok(())
     }
 
     #[inline]
     pub fn vshard_bootstrapped(&self) -> tarantool::Result<bool> {
-        Ok(self.get(StateKey::VshardBootstrapped)?.unwrap_or_default())
+        Ok(self
+            .get(ProperyName::VshardBootstrapped)?
+            .unwrap_or_default())
     }
 
     #[inline]
     pub fn replication_factor(&self) -> tarantool::Result<usize> {
         let res = self
-            .get(StateKey::ReplicationFactor)?
+            .get(ProperyName::ReplicationFactor)?
             .expect("replication_factor must be set at boot");
         Ok(res)
     }
@@ -170,7 +171,7 @@ impl State {
     #[inline]
     pub fn desired_schema_version(&self) -> tarantool::Result<u64> {
         let res = self
-            .get(StateKey::DesiredSchemaVersion)?
+            .get(ProperyName::DesiredSchemaVersion)?
             .unwrap_or_default();
         Ok(res)
     }
diff --git a/src/traft/mod.rs b/src/traft/mod.rs
index ccbf6b579e..968a848c09 100644
--- a/src/traft/mod.rs
+++ b/src/traft/mod.rs
@@ -189,7 +189,7 @@ impl Op {
             }
             Self::Dml(op) => {
                 let res = Box::new(op.result());
-                if op.space() == &ClusterwideSpace::State {
+                if op.space() == &ClusterwideSpace::Property {
                     event::broadcast(Event::ClusterStateChanged);
                 }
                 res
diff --git a/src/traft/node.rs b/src/traft/node.rs
index 062d4396b8..ae84a8084f 100644
--- a/src/traft/node.rs
+++ b/src/traft/node.rs
@@ -31,7 +31,7 @@ use crate::governor::raft_conf_change;
 use crate::governor::waiting_migrations;
 use crate::kvcell::KVCell;
 use crate::r#loop::{FlowControl, Loop};
-use crate::storage::{Clusterwide, ClusterwideSpace, StateKey};
+use crate::storage::{Clusterwide, ClusterwideSpace, ProperyName};
 use crate::stringify_cfunc;
 use crate::traft::rpc;
 use crate::traft::ContextCoercion as _;
@@ -391,7 +391,7 @@ impl NodeImpl {
                 let replication_factor = self
                     .storage
                     .state
-                    .get(StateKey::ReplicationFactor)?
+                    .get(ProperyName::ReplicationFactor)?
                     .ok_or_else(|| Error::other("missing replication_factor value in storage"))?;
                 Topology::from_peers(peers).with_replication_factor(replication_factor)
             }
@@ -1366,8 +1366,8 @@ fn raft_conf_change_loop(
                     // gets reconfigured
                     node.propose_and_wait(
                         traft::OpDML::replace(
-                            ClusterwideSpace::State,
-                            &(StateKey::VshardBootstrapped, true),
+                            ClusterwideSpace::Property,
+                            &(ProperyName::VshardBootstrapped, true),
                         )?,
                         // TODO: don't hard code the timeout
                         Duration::from_secs(3),
diff --git a/test/int/test_basics.py b/test/int/test_basics.py
index cb781627b4..90aa90daf5 100644
--- a/test/int/test_basics.py
+++ b/test/int/test_basics.py
@@ -219,8 +219,8 @@ def test_raft_log(instance: Instance):
 +-----+----+-----+--------+
 |  1  | 1  |1.0.1|Insert(_picodata_peer_address, [1,"127.0.0.1:{p}"])|
 |  2  | 1  |1.0.2|PersistPeer(i1, 1, r1, Offline(0), {b})|
-|  3  | 1  |1.0.3|Insert(_picodata_cluster_state, ["replication_factor",1])|
-|  4  | 1  |1.0.4|Insert(_picodata_cluster_state, ["desired_schema_version",0])|
+|  3  | 1  |1.0.3|Insert(_picodata_property, ["replication_factor",1])|
+|  4  | 1  |1.0.4|Insert(_picodata_property, ["desired_schema_version",0])|
 |  5  | 1  |     |AddNode(1)|
 |  6  | 2  |     |-|
 |  7  | 2  |1.1.1|PersistPeer(i1, 1, r1, Offline(0) -> Online(1), {b})|
@@ -228,7 +228,7 @@ def test_raft_log(instance: Instance):
 |  9  | 2  |1.1.3|PersistPeer(i1, 1, r1, Replicated(1) -> Online(1), {b})|
 | 10  | 2  |1.1.4|Insert(_picodata_replicaset, ["r1","e0df68c5-e7f9-395f-86b3-30ad9e1b7b07","i1",1.0,0])|
 | 11  | 2  |1.1.5|PersistPeer(i1, 1, r1, ShardingInitialized(1) -> Online(1), {b})|
-| 12  | 2  |1.1.6|Replace(_picodata_cluster_state, ["vshard_bootstrapped",true])|
+| 12  | 2  |1.1.6|Replace(_picodata_property, ["vshard_bootstrapped",true])|
 | 13  | 2  |1.1.7|PersistPeer(i1, 1, r1, Online(1), {b})|
 +-----+----+-----+--------+
 """.format(  # noqa: E501
diff --git a/test/int/test_joining.py b/test/int/test_joining.py
index 0f79f5d21a..bd1540541b 100644
--- a/test/int/test_joining.py
+++ b/test/int/test_joining.py
@@ -172,8 +172,7 @@ def test_init_replication_factor(cluster: Cluster):
     # Scenario: first instance shares --init-replication-factor to the whole cluster
     #   Given an Leader instance with --init_replication_factor=2
     #   When a new instances with different --init-replication-factor joins to the cluster
-    #   Then all of them have cluster_state[replication_factor] equals to the Leader
-    #   And there are two replicasets in the cluster
+    #   Then there are two replicasets in the cluster
 
     i1 = cluster.add_instance(init_replication_factor=2)
     i2 = cluster.add_instance(init_replication_factor=3)
@@ -181,7 +180,7 @@ def test_init_replication_factor(cluster: Cluster):
 
     def read_replication_factor(instance):
         return instance.eval(
-            'return pico.space.cluster_state:get("replication_factor").value'
+            'return pico.space.property:get("replication_factor").value'
         )
 
     assert read_replication_factor(i1) == 2
diff --git a/test/int/test_migration.py b/test/int/test_migration.py
index e13e1dcbcc..699092e65c 100644
--- a/test/int/test_migration.py
+++ b/test/int/test_migration.py
@@ -16,7 +16,7 @@ def test_push_schema_version(cluster: Cluster):
     i1.promote_or_fail()
     i1.eval("pico.push_schema_version(3)")
     key = "desired_schema_version"
-    assert [[key, 3]] == i2.call("pico.space.cluster_state:select", [key])
+    assert [[key, 3]] == i2.call("pico.space.property:select", [key])
 
 
 def test_apply_migrations(cluster: Cluster):
diff --git a/test/int/test_network_effects.py b/test/int/test_network_effects.py
index 62adcf366f..f057b188cc 100644
--- a/test/int/test_network_effects.py
+++ b/test/int/test_network_effects.py
@@ -68,7 +68,7 @@ def test_log_rollback(cluster3: Cluster):
     i3.assert_raft_status("Follower")
 
     def propose_state_change(srv: Instance, value):
-        code = 'pico.space.cluster_state:put({"test-timeline", "%s"})' % value
+        code = 'pico.space.property:put({"test-timeline", "%s"})' % value
         return srv.raft_propose_eval(code, 0.1)
 
     propose_state_change(i1, "i1 is a leader")
-- 
GitLab