Skip to content
Snippets Groups Projects
Commit 7337f105 authored by Georgy Moshkin's avatar Georgy Moshkin :speech_balloon:
Browse files

rename: more consistent naming is a couple of places

parent 9d91b461
No related branches found
No related tags found
1 merge request!573Implement basic ACL functions
...@@ -573,7 +573,7 @@ impl Clusterwide { ...@@ -573,7 +573,7 @@ impl Clusterwide {
// If we're not the replication master, the rest of the data will come // If we're not the replication master, the rest of the data will come
// via tarantool replication. // via tarantool replication.
if is_master { if is_master {
self.apply_ddl_changes_on_replicaset_master(&old_space_versions)?; self.apply_ddl_changes_on_master(&old_space_versions)?;
set_local_schema_version(data.schema_version)?; set_local_schema_version(data.schema_version)?;
} }
...@@ -596,7 +596,7 @@ impl Clusterwide { ...@@ -596,7 +596,7 @@ impl Clusterwide {
Ok(()) Ok(())
} }
pub fn apply_ddl_changes_on_replicaset_master( pub fn apply_ddl_changes_on_master(
&self, &self,
old_space_versions: &HashMap<SpaceId, u64>, old_space_versions: &HashMap<SpaceId, u64>,
) -> traft::Result<()> { ) -> traft::Result<()> {
......
...@@ -863,7 +863,7 @@ impl NodeImpl { ...@@ -863,7 +863,7 @@ impl NodeImpl {
} }
Op::DdlCommit => { Op::DdlCommit => {
let v_local = local_schema_version().expect("storage error"); let v_local = local_schema_version().expect("storage error");
let pending_version = storage_properties let v_pending = storage_properties
.pending_schema_version() .pending_schema_version()
.expect("storage error") .expect("storage error")
.expect("granted we don't mess up log compaction, this should not be None"); .expect("granted we don't mess up log compaction, this should not be None");
...@@ -873,7 +873,7 @@ impl NodeImpl { ...@@ -873,7 +873,7 @@ impl NodeImpl {
.expect("granted we don't mess up log compaction, this should not be None"); .expect("granted we don't mess up log compaction, this should not be None");
// This instance is catching up to the cluster. // This instance is catching up to the cluster.
if v_local < pending_version { if v_local < v_pending {
if self.is_readonly() { if self.is_readonly() {
return SleepAndRetry; return SleepAndRetry;
} else { } else {
...@@ -881,7 +881,7 @@ impl NodeImpl { ...@@ -881,7 +881,7 @@ impl NodeImpl {
let resp = rpc::ddl_apply::apply_schema_change( let resp = rpc::ddl_apply::apply_schema_change(
&self.storage, &self.storage,
&ddl, &ddl,
pending_version, v_pending,
true, true,
) )
.expect("storage error"); .expect("storage error");
...@@ -920,12 +920,12 @@ impl NodeImpl { ...@@ -920,12 +920,12 @@ impl NodeImpl {
.delete(PropertyName::PendingSchemaVersion) .delete(PropertyName::PendingSchemaVersion)
.expect("storage error"); .expect("storage error");
storage_properties storage_properties
.put(PropertyName::GlobalSchemaVersion, &pending_version) .put(PropertyName::GlobalSchemaVersion, &v_pending)
.expect("storage error"); .expect("storage error");
} }
Op::DdlAbort => { Op::DdlAbort => {
let v_local = local_schema_version().expect("storage error"); let v_local = local_schema_version().expect("storage error");
let pending_version: u64 = storage_properties let v_pending: u64 = storage_properties
.pending_schema_version() .pending_schema_version()
.expect("storage error") .expect("storage error")
.expect("granted we don't mess up log compaction, this should not be None"); .expect("granted we don't mess up log compaction, this should not be None");
...@@ -935,7 +935,7 @@ impl NodeImpl { ...@@ -935,7 +935,7 @@ impl NodeImpl {
.expect("granted we don't mess up log compaction, this should not be None"); .expect("granted we don't mess up log compaction, this should not be None");
// This condition means, schema versions must always increase // This condition means, schema versions must always increase
// even after an DdlAbort // even after an DdlAbort
if v_local == pending_version { if v_local == v_pending {
if self.is_readonly() { if self.is_readonly() {
return SleepAndRetry; return SleepAndRetry;
} else { } else {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment