From 5a4101855289e2ca06de0a1918635ba6fae5253a Mon Sep 17 00:00:00 2001
From: Kurdakov Alexander <kusancho12@gmail.com>
Date: Wed, 15 Jan 2025 00:55:09 +0300
Subject: [PATCH 1/5] feat: rename parameters from _pico_db_config according to
 adr

---
 src/access_control.rs                  | 19 +++---
 src/config.rs                          | 74 ++++++++++-----------
 src/lib.rs                             |  4 +-
 src/pgproto/backend/storage.rs         |  8 +--
 src/reachability.rs                    | 22 ++----
 src/storage.rs                         | 51 ++++++--------
 src/storage/snapshot.rs                |  4 +-
 src/traft/node.rs                      |  4 +-
 test/int/test_basics.py                | 27 ++++----
 test/int/test_compaction.py            | 10 +--
 test/int/test_ddl.py                   |  2 +-
 test/int/test_network_effects.py       |  2 +-
 test/int/test_plugin.py                |  6 +-
 test/int/test_runtime_configuration.py |  2 +-
 test/int/test_sql.py                   | 92 +++++++++++++-------------
 test/pgproto/storage_limits_test.py    |  6 +-
 16 files changed, 153 insertions(+), 180 deletions(-)

diff --git a/src/access_control.rs b/src/access_control.rs
index 66accf545d..f8025ad27a 100644
--- a/src/access_control.rs
+++ b/src/access_control.rs
@@ -126,7 +126,7 @@ pub fn validate_password(
     // This check is called from user facing API.
     // A user is not expected to have access to _pico_property
     let password_min_length =
-        session::with_su(ADMIN_ID, || storage.db_config.password_min_length())??;
+        session::with_su(ADMIN_ID, || storage.db_config.auth_password_length_min())??;
     if password.len() < password_min_length {
         return Err(Error::Other(
             format!(
@@ -138,24 +138,27 @@ pub fn validate_password(
         ));
     }
 
-    let password_enforce_uppercase =
-        session::with_su(ADMIN_ID, || storage.db_config.password_enforce_uppercase())??;
+    let password_enforce_uppercase = session::with_su(ADMIN_ID, || {
+        storage.db_config.auth_password_enforce_uppercase()
+    })??;
     if password_enforce_uppercase && !password.chars().any(|ch| ch.is_uppercase()) {
         return Err(Error::Other(
             "invalid password: password should contains at least one uppercase letter".into(),
         ));
     }
 
-    let password_enforce_lowercase =
-        session::with_su(ADMIN_ID, || storage.db_config.password_enforce_lowercase())??;
+    let password_enforce_lowercase = session::with_su(ADMIN_ID, || {
+        storage.db_config.auth_password_enforce_lowercase()
+    })??;
     if password_enforce_lowercase && !password.chars().any(|ch| ch.is_lowercase()) {
         return Err(Error::Other(
             "invalid password: password should contains at least one lowercase letter".into(),
         ));
     }
 
-    let password_enforce_digits =
-        session::with_su(ADMIN_ID, || storage.db_config.password_enforce_digits())??;
+    let password_enforce_digits = session::with_su(ADMIN_ID, || {
+        storage.db_config.auth_password_enforce_digits()
+    })??;
     if password_enforce_digits && !password.chars().any(|ch| ch.is_ascii_digit()) {
         return Err(Error::Other(
             "invalid password: password should contains at least one digit".into(),
@@ -163,7 +166,7 @@ pub fn validate_password(
     }
 
     let password_enforce_specialchars = session::with_su(ADMIN_ID, || {
-        storage.db_config.password_enforce_specialchars()
+        storage.db_config.auth_password_enforce_specialchars()
     })??;
     if password_enforce_specialchars && !password.chars().any(|ch| SPECIAL_CHARACTERS.contains(&ch))
     {
diff --git a/src/config.rs b/src/config.rs
index 0920085ab6..12ce2d2c1f 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -1495,28 +1495,28 @@ pub struct AlterSystemParameters {
     /// Password should contain at least this many characters
     #[introspection(sbroad_type = SbroadType::Unsigned)]
     #[introspection(config_default = 8)]
-    pub password_min_length: u64,
+    pub auth_password_length_min: u64,
 
     /// Password should contain at least one uppercase letter
     #[introspection(sbroad_type = SbroadType::Boolean)]
     #[introspection(config_default = true)]
-    pub password_enforce_uppercase: bool,
+    pub auth_password_enforce_uppercase: bool,
 
     /// Password should contain at least one lowercase letter
     #[introspection(sbroad_type = SbroadType::Boolean)]
     #[introspection(config_default = true)]
-    pub password_enforce_lowercase: bool,
+    pub auth_password_enforce_lowercase: bool,
 
     /// Password should contain at least one digit
     #[introspection(sbroad_type = SbroadType::Boolean)]
     #[introspection(config_default = true)]
-    pub password_enforce_digits: bool,
+    pub auth_password_enforce_digits: bool,
 
     /// Password should contain at least one special symbol.
     /// Special symbols - &, |, ?, !, $, @
     #[introspection(sbroad_type = SbroadType::Boolean)]
     #[introspection(config_default = false)]
-    pub password_enforce_specialchars: bool,
+    pub auth_password_enforce_specialchars: bool,
 
     /// Maximum number of login attempts through `picodata connect`.
     /// Each failed login attempt increases a local per user counter of failed attempts.
@@ -1525,45 +1525,17 @@ pub struct AlterSystemParameters {
     /// Local counter for a user is reset on successful login.
     #[introspection(sbroad_type = SbroadType::Unsigned)]
     #[introspection(config_default = 4)]
-    pub max_login_attempts: u64,
-
-    /// Number of seconds to wait before automatically changing an
-    /// unresponsive instance's state to Offline.
-    #[introspection(sbroad_type = SbroadType::Double)]
-    #[introspection(config_default = 30.0)]
-    pub auto_offline_timeout: f64,
-
-    /// Maximum number of seconds to wait before sending another heartbeat
-    /// to an unresponsive instance.
-    #[introspection(sbroad_type = SbroadType::Double)]
-    #[introspection(config_default = 5.0)]
-    pub max_heartbeat_period: f64,
+    pub auth_login_attempt_max: u64,
 
     /// PG statement storage size.
     #[introspection(sbroad_type = SbroadType::Unsigned)]
     #[introspection(config_default = 1024)]
-    pub max_pg_statements: u64,
+    pub pg_statement_max: u64,
 
     /// PG portal storage size.
     #[introspection(sbroad_type = SbroadType::Unsigned)]
     #[introspection(config_default = 1024)]
-    pub max_pg_portals: u64,
-
-    /// Maximum size in bytes `_raft_log` system space is allowed to grow to
-    /// before it gets automatically compacted.
-    ///
-    /// NOTE: The size is computed from the tuple storage only. Some memory will
-    /// also be allocated for the index, and there's no way to control this.
-    /// The option only controls the memory allocated for tuples.
-    #[introspection(sbroad_type = SbroadType::Unsigned)]
-    #[introspection(config_default = 64 * 1024 * 1024)]
-    pub cluster_wal_max_size: u64,
-
-    /// Maximum number of tuples `_raft_log` system space is allowed to grow to
-    /// before it gets automatically compacted.
-    #[introspection(sbroad_type = SbroadType::Unsigned)]
-    #[introspection(config_default = 64)]
-    pub cluster_wal_max_count: u64,
+    pub pg_portal_max: u64,
 
     /// Raft snapshot will be sent out in chunks not bigger than this threshold.
     /// Note: actual snapshot size may exceed this threshold. In most cases
@@ -1573,7 +1545,7 @@ pub struct AlterSystemParameters {
     /// greater than this.
     #[introspection(sbroad_type = SbroadType::Unsigned)]
     #[introspection(config_default = 16 * 1024 * 1024)]
-    pub snapshot_chunk_max_size: u64,
+    pub raft_snapshot_chunk_size_max: u64,
 
     /// Snapshot read views with live reference counts will be forcefully
     /// closed after this number of seconds. This is necessary if followers
@@ -1584,7 +1556,29 @@ pub struct AlterSystemParameters {
     // need them anymore. Or maybe timeouts is the better way..
     #[introspection(sbroad_type = SbroadType::Double)]
     #[introspection(config_default = (24 * 3600))]
-    pub snapshot_read_view_close_timeout: f64,
+    pub raft_snapshot_read_view_close_timeout: f64,
+
+    /// Maximum size in bytes `_raft_log` system space is allowed to grow to
+    /// before it gets automatically compacted.
+    ///
+    /// NOTE: The size is computed from the tuple storage only. Some memory will
+    /// also be allocated for the index, and there's no way to control this.
+    /// The option only controls the memory allocated for tuples.
+    #[introspection(sbroad_type = SbroadType::Unsigned)]
+    #[introspection(config_default = 64 * 1024 * 1024)]
+    pub raft_wal_size_max: u64,
+
+    /// Maximum number of tuples `_raft_log` system space is allowed to grow to
+    /// before it gets automatically compacted.
+    #[introspection(sbroad_type = SbroadType::Unsigned)]
+    #[introspection(config_default = 64)]
+    pub raft_wal_count_max: u64,
+
+    /// Number of seconds to wait before automatically changing an
+    /// unresponsive instance's state to Offline.
+    #[introspection(sbroad_type = SbroadType::Double)]
+    #[introspection(config_default = 30.0)]
+    pub governor_auto_offline_timeout: f64,
 
     /// Governor proposes operations to raft log and waits for them to be
     /// applied to the local raft machine.
@@ -1820,14 +1814,14 @@ pub fn apply_parameter(tuple: Tuple) {
             .expect("type already checked");
 
         set_cfg_field("net_msg_max", value).expect("changing net_msg_max shouldn't fail");
-    } else if name == system_parameter_name!(max_pg_portals) {
+    } else if name == system_parameter_name!(pg_portal_max) {
         let value = tuple
             .field::<usize>(1)
             .expect("there is always 2 fields in _pico_db_config tuple")
             .expect("type already checked");
         // Cache the value.
         MAX_PG_PORTALS.store(value, Ordering::Relaxed);
-    } else if name == system_parameter_name!(max_pg_statements) {
+    } else if name == system_parameter_name!(pg_statement_max) {
         let value = tuple
             .field::<usize>(1)
             .expect("there is always 2 fields in _pico_db_config tuple")
diff --git a/src/lib.rs b/src/lib.rs
index de688e5692..86d7c02211 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -358,7 +358,7 @@ fn redirect_interactive_sql() {
 /// Sets a check that will be performed when a user is logging in
 /// Checks for user exceeding maximum number of login attempts and if user was blocked.
 ///
-/// Also see [`config::AlterSystemParameters::max_login_attempts`].
+/// Also see [`config::AlterSystemParameters::auth_login_attempt_max`].
 fn set_login_check(storage: Clusterwide) {
     const MAX_ATTEMPTS_EXCEEDED: &str = "Maximum number of login attempts exceeded";
     const NO_LOGIN_PRIVILEGE: &str = "User does not have login privilege";
@@ -404,7 +404,7 @@ fn set_login_check(storage: Clusterwide) {
         };
         let max_login_attempts = storage
             .db_config
-            .max_login_attempts()
+            .auth_login_attempt_max()
             .expect("accessing storage should not fail");
         if storage
             .privileges
diff --git a/src/pgproto/backend/storage.rs b/src/pgproto/backend/storage.rs
index b3fcfd4880..0ab7470027 100644
--- a/src/pgproto/backend/storage.rs
+++ b/src/pgproto/backend/storage.rs
@@ -46,12 +46,12 @@ struct StorageContext {
 impl StorageContext {
     fn portals() -> StorageContext {
         fn get_capacity() -> PgResult<usize> {
-            Ok(node::global()?.storage.db_config.max_pg_portals()?)
+            Ok(node::global()?.storage.db_config.pg_portal_max()?)
         }
 
         Self {
             value_kind: "Portal",
-            capacity_parameter: crate::system_parameter_name!(max_pg_portals),
+            capacity_parameter: crate::system_parameter_name!(pg_portal_max),
             get_capacity,
             dublicate_key_error_code: PgErrorCode::DuplicateCursor,
         }
@@ -59,12 +59,12 @@ impl StorageContext {
 
     fn statements() -> StorageContext {
         fn get_capacity() -> PgResult<usize> {
-            Ok(node::global()?.storage.db_config.max_pg_statements()?)
+            Ok(node::global()?.storage.db_config.pg_statement_max()?)
         }
 
         Self {
             value_kind: "Statement",
-            capacity_parameter: crate::system_parameter_name!(max_pg_statements),
+            capacity_parameter: crate::system_parameter_name!(pg_statement_max),
             get_capacity,
             dublicate_key_error_code: PgErrorCode::DuplicatePreparedStatement,
         }
diff --git a/src/reachability.rs b/src/reachability.rs
index 7b48bca983..c8dd0a8ac0 100644
--- a/src/reachability.rs
+++ b/src/reachability.rs
@@ -30,6 +30,8 @@ pub fn instance_reachability_manager(storage: Clusterwide) -> InstanceReachabili
 }
 
 impl InstanceReachabilityManager {
+    const MAX_HEARTBEAT_PERIOD: Duration = Duration::from_secs(5);
+
     pub fn new(storage: Clusterwide) -> Self {
         Self {
             storage: Some(storage),
@@ -155,7 +157,7 @@ impl InstanceReachabilityManager {
         //
         let now = fiber::clock();
         let since_last_success = last_attempt.duration_since(last_success);
-        let delay = since_last_success.min(self.max_heartbeat_period());
+        let delay = since_last_success.min(Self::MAX_HEARTBEAT_PERIOD);
         if now > last_attempt + delay {
             return true;
         }
@@ -175,23 +177,7 @@ impl InstanceReachabilityManager {
             .unwrap_or_else(|| Clusterwide::try_get(false).expect("should be initialized by now"));
         storage
             .db_config
-            .auto_offline_timeout()
-            .expect("storage aint gonna fail")
-    }
-
-    fn max_heartbeat_period(&self) -> Duration {
-        // TODO: it would be better for cache locality and overall performance
-        // if we don't look this value up in the storage every time. Instead we
-        // could store it in a field of this struct and only update it's value
-        // once per raft loop iteration by calling a method update_configuration
-        // or something like that.
-        let storage = self
-            .storage
-            .as_ref()
-            .unwrap_or_else(|| Clusterwide::try_get(false).expect("should be initialized by now"));
-        storage
-            .db_config
-            .max_heartbeat_period()
+            .governor_auto_offline_timeout()
             .expect("storage aint gonna fail")
     }
 }
diff --git a/src/storage.rs b/src/storage.rs
index 745bf70803..5ba987e040 100644
--- a/src/storage.rs
+++ b/src/storage.rs
@@ -3130,43 +3130,43 @@ impl DbConfig {
     }
 
     #[inline]
-    pub fn password_min_length(&self) -> tarantool::Result<usize> {
-        self.get_or_default(system_parameter_name!(password_min_length))
+    pub fn auth_password_length_min(&self) -> tarantool::Result<usize> {
+        self.get_or_default(system_parameter_name!(auth_password_length_min))
     }
 
     #[inline]
-    pub fn password_enforce_uppercase(&self) -> tarantool::Result<bool> {
-        self.get_or_default(system_parameter_name!(password_enforce_uppercase))
+    pub fn auth_password_enforce_uppercase(&self) -> tarantool::Result<bool> {
+        self.get_or_default(system_parameter_name!(auth_password_enforce_uppercase))
     }
 
     #[inline]
-    pub fn password_enforce_lowercase(&self) -> tarantool::Result<bool> {
-        self.get_or_default(system_parameter_name!(password_enforce_lowercase))
+    pub fn auth_password_enforce_lowercase(&self) -> tarantool::Result<bool> {
+        self.get_or_default(system_parameter_name!(auth_password_enforce_lowercase))
     }
 
     #[inline]
-    pub fn password_enforce_digits(&self) -> tarantool::Result<bool> {
-        self.get_or_default(system_parameter_name!(password_enforce_digits))
+    pub fn auth_password_enforce_digits(&self) -> tarantool::Result<bool> {
+        self.get_or_default(system_parameter_name!(auth_password_enforce_digits))
     }
 
     #[inline]
-    pub fn password_enforce_specialchars(&self) -> tarantool::Result<bool> {
-        self.get_or_default(system_parameter_name!(password_enforce_specialchars))
+    pub fn auth_password_enforce_specialchars(&self) -> tarantool::Result<bool> {
+        self.get_or_default(system_parameter_name!(auth_password_enforce_specialchars))
     }
 
     #[inline]
-    pub fn max_login_attempts(&self) -> tarantool::Result<usize> {
-        self.get_or_default(system_parameter_name!(max_login_attempts))
+    pub fn auth_login_attempt_max(&self) -> tarantool::Result<usize> {
+        self.get_or_default(system_parameter_name!(auth_login_attempt_max))
     }
 
     #[inline]
-    pub fn max_pg_statements(&self) -> tarantool::Result<usize> {
+    pub fn pg_statement_max(&self) -> tarantool::Result<usize> {
         let cached = config::MAX_PG_STATEMENTS.load(Ordering::Relaxed);
         if cached != 0 {
             return Ok(cached);
         }
 
-        let res = self.get_or_default(system_parameter_name!(max_pg_statements))?;
+        let res = self.get_or_default(system_parameter_name!(pg_statement_max))?;
 
         // Cache the value.
         config::MAX_PG_STATEMENTS.store(res, Ordering::Relaxed);
@@ -3174,13 +3174,13 @@ impl DbConfig {
     }
 
     #[inline]
-    pub fn max_pg_portals(&self) -> tarantool::Result<usize> {
+    pub fn pg_portal_max(&self) -> tarantool::Result<usize> {
         let cached = config::MAX_PG_PORTALS.load(Ordering::Relaxed);
         if cached != 0 {
             return Ok(cached);
         }
 
-        let res = self.get_or_default(system_parameter_name!(max_pg_portals))?;
+        let res = self.get_or_default(system_parameter_name!(pg_portal_max))?;
 
         // Cache the value.
         config::MAX_PG_PORTALS.store(res, Ordering::Relaxed);
@@ -3188,28 +3188,21 @@ impl DbConfig {
     }
 
     #[inline]
-    pub fn snapshot_chunk_max_size(&self) -> tarantool::Result<usize> {
-        self.get_or_default(system_parameter_name!(snapshot_chunk_max_size))
+    pub fn raft_snapshot_chunk_size_max(&self) -> tarantool::Result<usize> {
+        self.get_or_default(system_parameter_name!(raft_snapshot_chunk_size_max))
     }
 
     #[inline]
-    pub fn snapshot_read_view_close_timeout(&self) -> tarantool::Result<Duration> {
+    pub fn raft_snapshot_read_view_close_timeout(&self) -> tarantool::Result<Duration> {
         #[rustfmt::skip]
-        let res: f64 = self.get_or_default(system_parameter_name!(snapshot_read_view_close_timeout))?;
+        let res: f64 = self.get_or_default(system_parameter_name!(raft_snapshot_read_view_close_timeout))?;
         Ok(Duration::from_secs_f64(res))
     }
 
     #[inline]
-    pub fn auto_offline_timeout(&self) -> tarantool::Result<Duration> {
+    pub fn governor_auto_offline_timeout(&self) -> tarantool::Result<Duration> {
         #[rustfmt::skip]
-        let res: f64 = self.get_or_default(system_parameter_name!(auto_offline_timeout))?;
-        Ok(Duration::from_secs_f64(res))
-    }
-
-    #[inline]
-    pub fn max_heartbeat_period(&self) -> tarantool::Result<Duration> {
-        #[rustfmt::skip]
-        let res: f64 = self.get_or_default(system_parameter_name!(max_heartbeat_period))?;
+        let res: f64 = self.get_or_default(system_parameter_name!(governor_auto_offline_timeout))?;
         Ok(Duration::from_secs_f64(res))
     }
 
diff --git a/src/storage/snapshot.rs b/src/storage/snapshot.rs
index b2f40c6c84..438f17a991 100644
--- a/src/storage/snapshot.rs
+++ b/src/storage/snapshot.rs
@@ -94,7 +94,7 @@ impl Clusterwide {
         let mut space_dumps = Vec::with_capacity(global_spaces.len());
         let mut total_size = 0;
         let mut hit_threashold = false;
-        let snapshot_chunk_max_size = self.db_config.snapshot_chunk_max_size()?;
+        let snapshot_chunk_max_size = self.db_config.raft_snapshot_chunk_size_max()?;
         let t0 = std::time::Instant::now();
         for &(space_id, index_id) in global_spaces {
             debug_assert_eq!(index_id, 0, "only primary keys should be added");
@@ -260,7 +260,7 @@ impl Clusterwide {
             rv.ref_count = 1;
         }
 
-        let timeout = self.db_config.snapshot_read_view_close_timeout()?;
+        let timeout = self.db_config.raft_snapshot_read_view_close_timeout()?;
         let now = fiber::clock();
         // Clear old snapshots.
         // This is where we clear any stale snapshots with no references
diff --git a/src/traft/node.rs b/src/traft/node.rs
index 0deac60ef6..6e2b732bcd 100644
--- a/src/traft/node.rs
+++ b/src/traft/node.rs
@@ -2375,7 +2375,7 @@ impl NodeImpl {
         let max_size: u64 = self
             .storage
             .db_config
-            .get_or_default(system_parameter_name!(cluster_wal_max_size))?;
+            .get_or_default(system_parameter_name!(raft_wal_size_max))?;
         let current_size = self.raft_storage.raft_log_bsize()?;
         if current_size > max_size {
             #[rustfmt::skip]
@@ -2386,7 +2386,7 @@ impl NodeImpl {
         let max_count: u64 = self
             .storage
             .db_config
-            .get_or_default(system_parameter_name!(cluster_wal_max_count))?;
+            .get_or_default(system_parameter_name!(raft_wal_count_max))?;
         let current_count = self.raft_storage.raft_log_count()?;
         if current_count > max_count {
             #[rustfmt::skip]
diff --git a/test/int/test_basics.py b/test/int/test_basics.py
index d49618f24f..68279c1bab 100644
--- a/test/int/test_basics.py
+++ b/test/int/test_basics.py
@@ -346,20 +346,19 @@ Insert(_pico_replicaset, ["default_1","{r1_uuid}","default_1_1","default_1_1","d
 Insert(_pico_property, ["global_schema_version",0]),
 Insert(_pico_property, ["next_schema_version",1]),
 Insert(_pico_property, ["system_catalog_version",1]),
-Insert(_pico_db_config, ["password_min_length",8]),
-Insert(_pico_db_config, ["password_enforce_uppercase",true]),
-Insert(_pico_db_config, ["password_enforce_lowercase",true]),
-Insert(_pico_db_config, ["password_enforce_digits",true]),
-Insert(_pico_db_config, ["password_enforce_specialchars",false]),
-Insert(_pico_db_config, ["max_login_attempts",4]),
-Insert(_pico_db_config, ["auto_offline_timeout",30.0]),
-Insert(_pico_db_config, ["max_heartbeat_period",5.0]),
-Insert(_pico_db_config, ["max_pg_statements",1024]),
-Insert(_pico_db_config, ["max_pg_portals",1024]),
-Insert(_pico_db_config, ["cluster_wal_max_size",67108864]),
-Insert(_pico_db_config, ["cluster_wal_max_count",64]),
-Insert(_pico_db_config, ["snapshot_chunk_max_size",16777216]),
-Insert(_pico_db_config, ["snapshot_read_view_close_timeout",86400]),
+Insert(_pico_db_config, ["auth_password_length_min",8]),
+Insert(_pico_db_config, ["auth_password_enforce_uppercase",true]),
+Insert(_pico_db_config, ["auth_password_enforce_lowercase",true]),
+Insert(_pico_db_config, ["auth_password_enforce_digits",true]),
+Insert(_pico_db_config, ["auth_password_enforce_specialchars",false]),
+Insert(_pico_db_config, ["auth_login_attempt_max",4]),
+Insert(_pico_db_config, ["pg_statement_max",1024]),
+Insert(_pico_db_config, ["pg_portal_max",1024]),
+Insert(_pico_db_config, ["raft_snapshot_chunk_size_max",16777216]),
+Insert(_pico_db_config, ["raft_snapshot_read_view_close_timeout",86400]),
+Insert(_pico_db_config, ["raft_wal_size_max",67108864]),
+Insert(_pico_db_config, ["raft_wal_count_max",64]),
+Insert(_pico_db_config, ["governor_auto_offline_timeout",30.0]),
 Insert(_pico_db_config, ["governor_raft_op_timeout",3.0]),
 Insert(_pico_db_config, ["governor_common_rpc_timeout",3.0]),
 Insert(_pico_db_config, ["governor_plugin_rpc_timeout",10.0]),
diff --git a/test/int/test_compaction.py b/test/int/test_compaction.py
index 6171b197b8..17aae1f6ef 100644
--- a/test/int/test_compaction.py
+++ b/test/int/test_compaction.py
@@ -80,7 +80,7 @@ def test_raft_log_auto_compaction_basics(cluster: Cluster):
     # compaction because a new entry will be added to the log which will
     # increase it's size past the newly introduced limit.
     max_size = size
-    i1.sql(f"ALTER SYSTEM SET cluster_wal_max_size = {max_size}")
+    i1.sql(f"ALTER SYSTEM SET raft_wal_size_max = {max_size}")
 
     count, size = get_raft_log_count_and_size(i1)
     assert count == 0
@@ -136,7 +136,7 @@ def test_raft_log_auto_compaction_basics(cluster: Cluster):
 
     # Set the maximum raft log entry count.
     max_count = 2
-    i1.sql(f"ALTER SYSTEM SET cluster_wal_max_count = {max_count}")
+    i1.sql(f"ALTER SYSTEM SET raft_wal_count_max = {max_count}")
 
     # Alter system statement results in one raft log entry
     count, size = get_raft_log_count_and_size(i1)
@@ -159,7 +159,7 @@ def test_raft_log_auto_compaction_basics(cluster: Cluster):
 
     # Increase the maximum raft log entry count.
     max_count = 5
-    i1.sql(f"ALTER SYSTEM SET cluster_wal_max_count = {max_count}")
+    i1.sql(f"ALTER SYSTEM SET raft_wal_count_max = {max_count}")
 
     count, size = get_raft_log_count_and_size(i1)
     assert count == 1
@@ -188,7 +188,7 @@ def test_raft_log_auto_compaction_basics(cluster: Cluster):
 
     # Disable limit on number of tuples all together
     old_max_count = max_count
-    i1.sql("ALTER SYSTEM SET cluster_wal_max_count = 9999")
+    i1.sql("ALTER SYSTEM SET raft_wal_count_max = 9999")
 
     # Insert more stuff until log compacts based on number of bytes
     prev_count = bulk_insert_until_compaction()
@@ -200,7 +200,7 @@ def test_raft_log_auto_compaction_preserves_finalizers(cluster: Cluster):
 
     # Compact the log and make it so compaction is triggerred the moment a
     # DdlCommit entry is added to the log
-    i1.sql("ALTER SYSTEM SET cluster_wal_max_count = 1")
+    i1.sql("ALTER SYSTEM SET raft_wal_count_max = 1")
 
     assert i1.call("box.space._raft_log:len") == 0
 
diff --git a/test/int/test_ddl.py b/test/int/test_ddl.py
index adba9a6d9d..adb2ab2d3f 100644
--- a/test/int/test_ddl.py
+++ b/test/int/test_ddl.py
@@ -1675,7 +1675,7 @@ cluster:
     )
 
     # Trigger log compaction
-    leader.sql("ALTER SYSTEM SET cluster_wal_max_count TO 1")
+    leader.sql("ALTER SYSTEM SET raft_wal_count_max TO 1")
 
     # Add a new replicaset, which catches up by raft snapshot
     storage_3_1 = cluster.add_instance(tier="storage", wait_online=True)
diff --git a/test/int/test_network_effects.py b/test/int/test_network_effects.py
index 70ee089320..ec4efb1fda 100644
--- a/test/int/test_network_effects.py
+++ b/test/int/test_network_effects.py
@@ -182,7 +182,7 @@ def get_instance_states(peer: Instance, instance_name) -> tuple[str, str]:
 
 def test_instance_automatic_offline_detection(cluster: Cluster):
     i1, i2, i3 = cluster.deploy(instance_count=3)
-    dml = i1.sql("ALTER SYSTEM SET auto_offline_timeout=0.5")
+    dml = i1.sql("ALTER SYSTEM SET governor_auto_offline_timeout=0.5")
     assert dml["row_count"] == 1
 
     assert get_instance_states(i1, i3.name) == ("Online", "Online")
diff --git a/test/int/test_plugin.py b/test/int/test_plugin.py
index a63e7f7f2d..22bb58085c 100644
--- a/test/int/test_plugin.py
+++ b/test/int/test_plugin.py
@@ -938,7 +938,7 @@ def test_plugin_not_enable_if_error_on_start(cluster: Cluster):
     plugin_ref.inject_error("testservice_1", "on_start", True, i1)
 
     # This is needed to check that raft log compaction respects plugin op finalizers
-    i1.sql(""" ALTER SYSTEM SET cluster_wal_max_count = 1 """)
+    i1.sql(""" ALTER SYSTEM SET raft_wal_count_max = 1 """)
     assert i1.call("box.space._raft_log:len") == 0
     index_before = i1.raft_get_index()
 
@@ -1266,9 +1266,9 @@ def test_migration_lock(cluster: Cluster):
     i3 = cluster.add_instance(wait_online=False, replicaset_name="storage")
     cluster.wait_online()
 
-    # Decrease auto_offline_timeout so that sentinel notices that the instance
+    # Decrease governor_auto_offline_timeout so that sentinel notices that the instance
     # disappeared quicker
-    i1.sql(""" ALTER SYSTEM SET auto_offline_timeout = 1 """)
+    i1.sql(""" ALTER SYSTEM SET governor_auto_offline_timeout = 1 """)
 
     # successfully install v0.1.0
     i2.call(
diff --git a/test/int/test_runtime_configuration.py b/test/int/test_runtime_configuration.py
index 529723abfc..3d46ed8e97 100644
--- a/test/int/test_runtime_configuration.py
+++ b/test/int/test_runtime_configuration.py
@@ -61,7 +61,7 @@ def test_snapshot_and_dynamic_parameters(cluster: Cluster):
     i1.sql("ALTER SYSTEM SET iproto_net_msg_max = 100")
 
     # Trigger raft log compaction
-    i1.sql("ALTER SYSTEM SET cluster_wal_max_count TO 1")
+    i1.sql("ALTER SYSTEM SET raft_wal_count_max TO 1")
 
     # Add a new instance and restart `i2`, which catches up by raft snapshot
     i2.start_and_wait()
diff --git a/test/int/test_sql.py b/test/int/test_sql.py
index 34603613d5..3578600704 100644
--- a/test/int/test_sql.py
+++ b/test/int/test_sql.py
@@ -274,26 +274,25 @@ def test_read_from_system_tables(cluster: Cluster):
     # Ignore values for the sake of stability
     keys = [row[0] for row in data["rows"]]
     assert keys == [
-        "auto_offline_timeout",
-        "cluster_wal_max_count",
-        "cluster_wal_max_size",
+        "auth_login_attempt_max",
+        "auth_password_enforce_digits",
+        "auth_password_enforce_lowercase",
+        "auth_password_enforce_specialchars",
+        "auth_password_enforce_uppercase",
+        "auth_password_length_min",
+        "governor_auto_offline_timeout",
         "governor_common_rpc_timeout",
         "governor_plugin_rpc_timeout",
         "governor_raft_op_timeout",
         "iproto_net_msg_max",
-        "max_heartbeat_period",
-        "max_login_attempts",
-        "max_pg_portals",
-        "max_pg_statements",
         "memtx_checkpoint_count",
         "memtx_checkpoint_interval",
-        "password_enforce_digits",
-        "password_enforce_lowercase",
-        "password_enforce_specialchars",
-        "password_enforce_uppercase",
-        "password_min_length",
-        "snapshot_chunk_max_size",
-        "snapshot_read_view_close_timeout",
+        "pg_portal_max",
+        "pg_statement_max",
+        "raft_snapshot_chunk_size_max",
+        "raft_snapshot_read_view_close_timeout",
+        "raft_wal_count_max",
+        "raft_wal_size_max",
         "sql_motion_row_max",
         "sql_vdbe_opcode_max",
     ]
@@ -3252,14 +3251,14 @@ def test_sql_user_password_checks(cluster: Cluster):
     # let's turn off uppercase check and turn on special characters check
     dml = i1.sql(
         """
-        ALTER SYSTEM SET password_enforce_uppercase=false
+        ALTER SYSTEM SET auth_password_enforce_uppercase=false
         """
     )
     assert dml["row_count"] == 1
 
     dml = i1.sql(
         """
-        ALTER SYSTEM SET password_enforce_specialchars=true
+        ALTER SYSTEM SET auth_password_enforce_specialchars=true
         """
     )
     assert dml["row_count"] == 1
@@ -5042,24 +5041,23 @@ def test_alter_system_property(cluster: Cluster):
     i1 = cluster.instances[0]
 
     non_default_prop = [
-        ("password_min_length", 10),
-        ("password_enforce_digits", True),
-        ("password_enforce_uppercase", True),
-        ("password_enforce_lowercase", True),
-        ("password_enforce_specialchars", False),
-        ("auto_offline_timeout", 12),
-        ("max_heartbeat_period", 6.6),
-        ("max_login_attempts", 4),
-        ("max_pg_statements", 1024),
-        ("max_pg_portals", 1024),
-        ("snapshot_chunk_max_size", 1500),
-        ("snapshot_read_view_close_timeout", 12312.4),
-        ("password_enforce_uppercase", False),
-        ("password_enforce_lowercase", False),
-        ("password_enforce_specialchars", True),
-        ("max_login_attempts", 8),
-        ("max_pg_statements", 4096),
-        ("max_pg_portals", 2048),
+        ("auth_password_length_min", 10),
+        ("auth_password_enforce_digits", True),
+        ("auth_password_enforce_uppercase", True),
+        ("auth_password_enforce_lowercase", True),
+        ("auth_password_enforce_specialchars", False),
+        ("governor_auto_offline_timeout", 12),
+        ("auth_login_attempt_max", 4),
+        ("pg_statement_max", 1024),
+        ("pg_portal_max", 1024),
+        ("raft_snapshot_chunk_size_max", 1500),
+        ("raft_snapshot_read_view_close_timeout", 12312.4),
+        ("auth_password_enforce_uppercase", False),
+        ("auth_password_enforce_lowercase", False),
+        ("auth_password_enforce_specialchars", True),
+        ("auth_login_attempt_max", 8),
+        ("pg_statement_max", 4096),
+        ("pg_portal_max", 2048),
         ("governor_raft_op_timeout", 10),
         ("governor_common_rpc_timeout", 10),
         ("governor_plugin_rpc_timeout", 20),
@@ -5114,35 +5112,35 @@ def test_alter_system_property_errors(cluster: Cluster):
 
     # check valid insertion (int)
     data = i1.sql(
-        """ select * from "_pico_db_config" where "key" = 'auto_offline_timeout' """
+        """ select * from "_pico_db_config" where "key" = 'governor_auto_offline_timeout' """
     )
-    assert data == [["auto_offline_timeout", 30]]
+    assert data == [["governor_auto_offline_timeout", 30]]
     dml = i1.sql(
         """
-        alter system set "auto_offline_timeout" to 3
+        alter system set "governor_auto_offline_timeout" to 3
         """
     )
     assert dml["row_count"] == 1
     data = i1.sql(
-        """ select * from "_pico_db_config" where "key" = 'auto_offline_timeout' """
+        """ select * from "_pico_db_config" where "key" = 'governor_auto_offline_timeout' """
     )
-    assert data == [["auto_offline_timeout", 3]]
+    assert data == [["governor_auto_offline_timeout", 3]]
 
     # check valid insertion (bool)
     data = i1.sql(
-        """ select * from "_pico_db_config" where "key" = 'password_enforce_digits' """
+        """ select * from "_pico_db_config" where "key" = 'auth_password_enforce_digits' """
     )
-    assert data == [["password_enforce_digits", True]]
+    assert data == [["auth_password_enforce_digits", True]]
     dml = i1.sql(
         """
-        alter system set "password_enforce_digits" to false
+        alter system set "auth_password_enforce_digits" to false
         """
     )
     assert dml["row_count"] == 1
     data = i1.sql(
-        """ select * from "_pico_db_config" where "key" = 'password_enforce_digits' """
+        """ select * from "_pico_db_config" where "key" = 'auth_password_enforce_digits' """
     )
-    assert data == [["password_enforce_digits", False]]
+    assert data == [["auth_password_enforce_digits", False]]
 
     # such property does not exist
     with pytest.raises(
@@ -5157,11 +5155,11 @@ def test_alter_system_property_errors(cluster: Cluster):
     # property expects different value type
     with pytest.raises(
         TarantoolError,
-        match="invalid value for 'password_enforce_digits' expected boolean, got unsigned.",
+        match="invalid value for 'auth_password_enforce_digits' expected boolean, got unsigned.",
     ):
         dml = i1.sql(
             """
-            alter system set "password_enforce_digits" to 3
+            alter system set "auth_password_enforce_digits" to 3
             """
         )
 
@@ -5188,7 +5186,7 @@ def test_alter_system_property_errors(cluster: Cluster):
         )
 
     # timeout values cannot be negative
-    for param in ["auto_offline_timeout", "governor_raft_op_timeout"]:
+    for param in ["governor_auto_offline_timeout", "governor_raft_op_timeout"]:
         with pytest.raises(TarantoolError) as e:
             dml = i1.sql(
                 f"""
diff --git a/test/pgproto/storage_limits_test.py b/test/pgproto/storage_limits_test.py
index e0d23c042a..9531d72545 100644
--- a/test/pgproto/storage_limits_test.py
+++ b/test/pgproto/storage_limits_test.py
@@ -43,7 +43,7 @@ def test_statement_storage_config_limit(postgres: Postgres):
 
     max_pg_statements = 32
     postgres.instance.sql(
-        f"ALTER SYSTEM SET max_pg_statements = {max_pg_statements}",
+        f"ALTER SYSTEM SET pg_statement_max = {max_pg_statements}",
         sudo=True,
     )
 
@@ -59,7 +59,7 @@ def test_statement_storage_config_limit(postgres: Postgres):
         pg8000.Error,
         match=f'Statement storage is full. Current size limit: {max_pg_statements}. \
 Please, increase storage limit using: \
-ALTER SYSTEM SET "max_pg_statements" TO <new-limit>',
+ALTER SYSTEM SET "pg_statement_max" TO <new-limit>',
     ):
         statements.append(
             conn.prepare(""" insert into "t" values (cast (:p as int)) """)
@@ -67,7 +67,7 @@ ALTER SYSTEM SET "max_pg_statements" TO <new-limit>',
 
     # increase storage capacity
     postgres.instance.sql(
-        f"ALTER SYSTEM SET max_pg_statements={max_pg_statements + 1}",
+        f"ALTER SYSTEM SET pg_statement_max={max_pg_statements + 1}",
         sudo=True,
     )
 
-- 
GitLab


From c2d3e2d80c344e3b8bf1ab9a24f31b0950ebb47d Mon Sep 17 00:00:00 2001
From: Kurdakov Alexander <kusancho12@gmail.com>
Date: Thu, 16 Jan 2025 19:42:18 +0300
Subject: [PATCH 2/5] feat: rename `listen` to `iproto_listen` preserving
 backward compatibility

---
 src/cli/args.rs              | 23 +++++++++--
 src/config.rs                | 80 ++++++++++++++++++++++++++++--------
 src/lib.rs                   |  4 +-
 src/tarantool.rs             |  2 +-
 test/conftest.py             | 12 +++---
 test/int/test_basics.py      |  2 +-
 test/int/test_cli_ux.py      |  4 +-
 test/int/test_config_file.py | 16 +++++---
 test/int/test_http_server.py |  8 ++--
 test/int/test_joining.py     | 19 +++++----
 10 files changed, 120 insertions(+), 50 deletions(-)

diff --git a/src/cli/args.rs b/src/cli/args.rs
index 10bc4c0dcc..fe8be27455 100644
--- a/src/cli/args.rs
+++ b/src/cli/args.rs
@@ -88,24 +88,39 @@ pub struct Run {
     /// cluster during the instance start. Later it's used by other
     /// instances for connecting to this one.
     ///
-    /// Defaults to `--listen` value which is enough in most cases. But,
-    /// for example, in case of `--listen 0.0.0.0` it should be
+    /// Defaults to `--iproto-listen` value which is enough in most cases. But,
+    /// for example, in case of `--iproto-listen 0.0.0.0` it should be
     /// specified explicitly:
     ///
-    /// picodata run --listen 0.0.0.0:3301 --advertise 192.168.0.1:3301
+    /// picodata run --iproto-listen 0.0.0.0:3301 --advertise 192.168.0.1:3301
     pub advertise_address: Option<IprotoAddress>,
 
     #[clap(
         short = 'l',
         long = "listen",
         value_name = "[HOST][:PORT]",
-        env = "PICODATA_LISTEN"
+        env = "PICODATA_LISTEN",
+        hide = true,
+        group = "listen_arguments"
     )]
+    /// DEPRECATED option
+    ///
     /// Instance network address.
     ///
     /// By default "127.0.0.1:3301" is used.
     pub listen: Option<IprotoAddress>,
 
+    #[clap(
+        long = "iproto-listen",
+        value_name = "[HOST][:PORT]",
+        env = "PICODATA_IPROTO_LISTEN",
+        group = "listen_arguments"
+    )]
+    /// Instance network address.
+    ///
+    /// By default "127.0.0.1:3301" is used.
+    pub iproto_listen: Option<IprotoAddress>,
+
     /// Pgproto server address.
     #[clap(long, value_name = "[HOST][:PORT]", env = "PICODATA_PG_LISTEN")]
     pub pg_listen: Option<PgprotoAddress>,
diff --git a/src/config.rs b/src/config.rs
index 12ce2d2c1f..922ec0d6c4 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -289,6 +289,11 @@ Using configuration file '{args_path}'.");
             config_from_args.instance.advertise_address = Some(address);
         }
 
+        if let Some(iproto_listen) = args.iproto_listen {
+            config_from_args.instance.iproto_listen = Some(iproto_listen);
+        }
+
+        #[allow(deprecated)]
         if let Some(listen) = args.listen {
             config_from_args.instance.listen = Some(listen);
         }
@@ -518,6 +523,10 @@ Using configuration file '{args_path}'.");
                 config_parameter_path!(instance.plugin_dir),
                 config_parameter_path!(instance.share_dir),
             ),
+            (
+                config_parameter_path!(instance.listen),
+                config_parameter_path!(instance.iproto_listen),
+            ),
         ];
 
         // Handle renamed parameters
@@ -1102,12 +1111,14 @@ pub struct InstanceConfig {
     #[introspection(config_default = FailureDomain::default())]
     pub failure_domain: Option<FailureDomain>,
 
-    #[introspection(
-        config_default = IprotoAddress::default()
-    )]
+    #[deprecated = "use iproto_listen instead"]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub listen: Option<IprotoAddress>,
 
-    #[introspection(config_default = self.listen())]
+    #[introspection(config_default = IprotoAddress::default())]
+    pub iproto_listen: Option<IprotoAddress>,
+
+    #[introspection(config_default = self.iproto_listen())]
     pub advertise_address: Option<IprotoAddress>,
 
     #[introspection(config_default = vec![self.advertise_address()])]
@@ -1211,8 +1222,8 @@ impl InstanceConfig {
     }
 
     #[inline]
-    pub fn listen(&self) -> IprotoAddress {
-        self.listen
+    pub fn iproto_listen(&self) -> IprotoAddress {
+        self.iproto_listen
             .clone()
             .expect("is set in PicodataConfig::set_defaults_explicitly")
     }
@@ -2079,27 +2090,27 @@ cluster:
     fn spaces_in_addresses() {
         let yaml = r###"
 instance:
-    listen:  kevin:  <- spacey
+    iproto_listen:  kevin:  <- spacey
 "###;
         let err = PicodataConfig::read_yaml_contents(&yaml.trim_start()).unwrap_err();
         #[rustfmt::skip]
-        assert_eq!(err.to_string(), "invalid configuration: mapping values are not allowed in this context at line 2 column 19");
+        assert_eq!(err.to_string(), "invalid configuration: mapping values are not allowed in this context at line 2 column 26");
 
         let yaml = r###"
 instance:
-    listen:  kevin->  :spacey   # <- some more trailing space
+    iproto_listen:  kevin->  :spacey   # <- some more trailing space
 "###;
         let config = PicodataConfig::read_yaml_contents(&yaml.trim_start()).unwrap();
-        let listen = config.instance.listen.unwrap();
+        let listen = config.instance.iproto_listen.unwrap();
         assert_eq!(listen.host, "kevin->  ");
         assert_eq!(listen.port, "spacey");
 
         let yaml = r###"
 instance:
-    listen:  kevin->  <-spacey
+    iproto_listen:  kevin->  <-spacey
 "###;
         let config = PicodataConfig::read_yaml_contents(&yaml.trim_start()).unwrap();
-        let listen = config.instance.listen.unwrap();
+        let listen = config.instance.iproto_listen.unwrap();
         assert_eq!(listen.host, "kevin->  <-spacey");
         assert_eq!(listen.port, "3301");
     }
@@ -2127,6 +2138,7 @@ instance:
         let mut parameter_sources = Default::default();
         mark_non_none_field_sources(&mut parameter_sources, &config, ParameterSource::ConfigFile);
         config.set_from_args_and_env(args, &mut parameter_sources)?;
+        config.handle_deprecated_parameters(&mut parameter_sources)?;
         config.set_defaults_explicitly(&parameter_sources);
         config.parameter_sources = parameter_sources;
         Ok(config)
@@ -2176,7 +2188,7 @@ instance:
                 vec![IprotoAddress::default()]
             );
             assert_eq!(config.instance.name(), None);
-            assert_eq!(config.instance.listen().to_host_port(), IprotoAddress::default_host_port());
+            assert_eq!(config.instance.iproto_listen().to_host_port(), IprotoAddress::default_host_port());
             assert_eq!(config.instance.advertise_address().to_host_port(), IprotoAddress::default_host_port());
             assert_eq!(config.instance.log_level(), SayLevel::Info);
             assert!(config.instance.failure_domain().data.is_empty());
@@ -2347,11 +2359,11 @@ instance:
         //
         // Advertise = listen unless specified explicitly
         //
-        {
+            {
             std::env::set_var("PICODATA_LISTEN", "L-ENVIRON");
             let config = setup_for_tests(Some(""), &["run"]).unwrap();
 
-            assert_eq!(config.instance.listen().to_host_port(), "L-ENVIRON:3301");
+            assert_eq!(config.instance.iproto_listen().to_host_port(), "L-ENVIRON:3301");
             assert_eq!(config.instance.advertise_address().to_host_port(), "L-ENVIRON:3301");
 
             let yaml = r###"
@@ -2360,12 +2372,12 @@ instance:
 "###;
             let config = setup_for_tests(Some(yaml), &["run"]).unwrap();
 
-            assert_eq!(config.instance.listen().to_host_port(), "L-ENVIRON:3301");
+            assert_eq!(config.instance.iproto_listen().to_host_port(), "L-ENVIRON:3301");
             assert_eq!(config.instance.advertise_address().to_host_port(), "A-CONFIG:3301");
 
             let config = setup_for_tests(Some(yaml), &["run", "-l", "L-COMMANDLINE"]).unwrap();
 
-            assert_eq!(config.instance.listen().to_host_port(), "L-COMMANDLINE:3301");
+            assert_eq!(config.instance.iproto_listen().to_host_port(), "L-COMMANDLINE:3301");
             assert_eq!(config.instance.advertise_address().to_host_port(), "A-CONFIG:3301");
         }
 
@@ -2585,4 +2597,38 @@ instance:
         assert_eq!(e("1 000 X"), "invalid digit found in string");
         assert_eq!(e("17000000T"), "Value is too large");
     }
+
+    #[test]
+    fn test_iproto_listen_listen_interaction() {
+        let _guard = protect_env();
+
+        // iproto_listen should be equal to listen
+        let yaml = r###"
+instance:
+        listen: localhost:3301
+"###;
+        let config = setup_for_tests(Some(yaml), &["run"]).unwrap();
+        assert_eq!(
+            config.instance.iproto_listen().to_host_port(),
+            "localhost:3301"
+        );
+
+        // can't specify both
+        let yaml = r###"
+instance:
+        listen: localhost:3302
+        iproto_listen: localhost:3301
+"###;
+        let config = setup_for_tests(Some(yaml), &["run"]);
+
+        assert_eq!(config.unwrap_err().to_string(), "invalid configuration: instance.listen is deprecated, use instance.iproto_listen instead (cannot use both at the same time)");
+
+        let yaml = r###"
+instance:
+        iproto_listen: localhost:3302
+"###;
+        let config = setup_for_tests(Some(yaml), &["run", "--listen", "localhost:3303"]);
+
+        assert_eq!(config.unwrap_err().to_string(), "invalid configuration: instance.listen is deprecated, use instance.iproto_listen instead (cannot use both at the same time)");
+    }
 }
diff --git a/src/lib.rs b/src/lib.rs
index 86d7c02211..07fd346504 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -735,7 +735,7 @@ fn start_discover(config: &PicodataConfig) -> Result<Option<Entrypoint>, Error>
 
     // Start listening only after we've checked if this is a restart.
     // Postjoin phase has its own idea of when to start listening.
-    tarantool::set_cfg_field("listen", config.instance.listen().to_host_port())?;
+    tarantool::set_cfg_field("listen", config.instance.iproto_listen().to_host_port())?;
 
     let role = discovery::wait_global();
     let next_entrypoint = match role {
@@ -1003,7 +1003,7 @@ fn postjoin(
         assert!(node.status().raft_state.is_leader());
     }
 
-    tarantool::set_cfg_field("listen", config.instance.listen().to_host_port())
+    tarantool::set_cfg_field("listen", config.instance.iproto_listen().to_host_port())
         .expect("changing listen port shouldn't fail");
 
     // Start admin console
diff --git a/src/tarantool.rs b/src/tarantool.rs
index 77e7e29375..db42dae36a 100644
--- a/src/tarantool.rs
+++ b/src/tarantool.rs
@@ -286,7 +286,7 @@ impl Cfg {
 
             // Needs to be set, because an applier will attempt to connect to
             // self and will block box.cfg() call until it succeeds.
-            listen: Some(config.instance.listen().to_host_port()),
+            listen: Some(config.instance.iproto_listen().to_host_port()),
 
             // If we're joining to an existing replicaset,
             // then we're the follower.
diff --git a/test/conftest.py b/test/conftest.py
index 5dc7522384..5036a33532 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -595,7 +595,7 @@ class Instance:
         return self._instance_dir
 
     @property
-    def listen(self):
+    def iproto_listen(self):
         if self.host is None or self.port is None:
             return None
         return f"{self.host}:{self.port}"
@@ -647,7 +647,7 @@ class Instance:
             *([f"--replicaset-name={self.replicaset_name}"] if self.replicaset_name else []),
             *([f"--instance-dir={self._instance_dir}"] if self._instance_dir else []),
             *([f"--share-dir={self.share_dir}"] if self.share_dir else []),
-            *([f"--listen={self.listen}"] if self.listen else []),
+            *([f"--iproto-listen={self.iproto_listen}"] if self.iproto_listen else []),
             *([f"--pg-listen={self.pg_listen}"] if self.pg_listen else []),
             *([f"-c instance.pg.ssl={self.pg_ssl}"] if self.pg_ssl else []),
             *([f"--peer={str.join(',', self.peers)}"] if self.peers else []),
@@ -663,9 +663,9 @@ class Instance:
 
     def __repr__(self):
         if self.process:
-            return f"Instance({self.name}, listen={self.listen} cluster={self.cluster_name}, process.pid={self.process.pid})"  # noqa: E501
+            return f"Instance({self.name}, iproto_listen={self.iproto_listen} cluster={self.cluster_name}, process.pid={self.process.pid})"  # noqa: E501
         else:
-            return f"Instance({self.name}, listen={self.listen} cluster={self.cluster_name})"  # noqa: E501
+            return f"Instance({self.name}, iproto_listen={self.iproto_listen} cluster={self.cluster_name})"  # noqa: E501
 
     def __hash__(self):
         return hash((self.cluster_name, self.name))
@@ -938,7 +938,7 @@ class Instance:
         eprint(f"{self} starting...")
 
         if peers is not None:
-            self.peers = list(map(lambda i: i.listen, peers))
+            self.peers = list(map(lambda i: i.iproto_listen, peers))
 
         env = {**self.env}
 
@@ -1885,7 +1885,7 @@ class Cluster:
         # fmt: off
         command: list[str] = [
             self.binary_path, "expel",
-            "--peer", f"pico_service@{peer.listen}",
+            "--peer", f"pico_service@{peer.iproto_listen}",
             "--cluster-name", target.cluster_name or "",
             "--password-file", self.service_password_file,
             "--auth-type", "chap-sha1",
diff --git a/test/int/test_basics.py b/test/int/test_basics.py
index 68279c1bab..a50d7ed205 100644
--- a/test/int/test_basics.py
+++ b/test/int/test_basics.py
@@ -185,7 +185,7 @@ invalid configuration: instance restarted with a different `replicaset_name`, wh
     #
     # Change advertise address
     #
-    was = instance.listen  # type: ignore
+    was = instance.iproto_listen  # type: ignore
     instance.env["PICODATA_ADVERTISE"] = "example.com:1234"
     assert instance.env["PICODATA_ADVERTISE"] != was
     err = f"""\
diff --git a/test/int/test_cli_ux.py b/test/int/test_cli_ux.py
index 0780c7b752..5c263fec3d 100644
--- a/test/int/test_cli_ux.py
+++ b/test/int/test_cli_ux.py
@@ -198,7 +198,7 @@ i_dont_care_about_this_service:
             i1.binary_path,
             "plugin",
             "configure",
-            i1.listen,
+            i1.iproto_listen,
             _PLUGIN,
             _PLUGIN_VERSION_1,
             new_config,
@@ -240,7 +240,7 @@ testservice_1:
             i1.binary_path,
             "plugin",
             "configure",
-            i1.listen,
+            i1.iproto_listen,
             _PLUGIN,
             _PLUGIN_VERSION_1,
             new_config,
diff --git a/test/int/test_config_file.py b/test/int/test_config_file.py
index 269be81493..6d9a3dc36e 100644
--- a/test/int/test_config_file.py
+++ b/test/int/test_config_file.py
@@ -2,6 +2,7 @@ from conftest import Cluster, Instance, PortDistributor, log_crawler, ColorCode
 import os
 import subprocess
 import json
+import yaml
 
 
 def test_config_works(cluster: Cluster):
@@ -42,7 +43,7 @@ cluster:
         deluxe:
 instance:
     instance_dir: {instance_dir}
-    listen: {listen}
+    iproto_listen: {listen}
     peer:
         - {listen}
     cluster_name: my-cluster
@@ -98,7 +99,7 @@ instance:
                 value=f"{instance.config_path}", source="commandline_or_environment"
             ),
             instance_dir=dict(value=instance_dir, source="config_file"),
-            listen=dict(value=f"{host}:{port}", source="config_file"),
+            iproto_listen=dict(value=f"{host}:{port}", source="config_file"),
             log=dict(
                 level=dict(value="verbose", source="commandline_or_environment"),
                 format=dict(value="plain", source="default"),
@@ -396,6 +397,9 @@ def test_picodata_default_config(cluster: Cluster):
     default_config = data.decode()
     assert len(default_config) != 0
 
+    default_config_dict = yaml.safe_load(default_config)
+    assert "listen" not in default_config_dict["instance"]
+
     # Explicit filename
     subprocess.call(
         [cluster.binary_path, "config", "default", "-o", "filename.yaml"],
@@ -416,11 +420,11 @@ def test_picodata_default_config(cluster: Cluster):
     cluster.set_config_file(yaml=default_config)
     i = cluster.add_instance(wait_online=False)
 
-    # Default config contains default values for `listen`, `advertise` & `peers`,
-    # but our testing harness overrides the `listen` & `peers` values so that
+    # Default config contains default values for `iproto_listen`, `advertise` & `peers`,
+    # but our testing harness overrides the `iproto_listen` & `peers` values so that
     # running tests in parallel doesn't result in binding to conflicting ports.
     # For this reason we must also specify `advertise` explictily.
-    i.env["PICODATA_ADVERTISE"] = i.listen  # type: ignore
+    i.env["PICODATA_ADVERTISE"] = i.iproto_listen  # type: ignore
 
     i.start()
     i.wait_online()
@@ -473,7 +477,7 @@ def test_output_config_parameters(cluster: Cluster):
         'instance.tier': "default"
         'instance.failure_domain': {}
         'instance.peer':
-        'instance.listen':
+        'instance.iproto_listen':
         'instance.advertise_address':
         'instance.admin_socket':
         'instance.share_dir':
diff --git a/test/int/test_http_server.py b/test/int/test_http_server.py
index 6a729b61ef..c2c560e908 100644
--- a/test/int/test_http_server.py
+++ b/test/int/test_http_server.py
@@ -52,7 +52,7 @@ def test_webui_basic(instance: Instance):
                                 "name": "default_1_1",
                                 "version": instance_version,
                                 "httpAddress": http_listen,
-                                "binaryAddress": instance.listen,
+                                "binaryAddress": instance.iproto_listen,
                             }
                         ],
                         "instanceCount": 1,
@@ -152,19 +152,19 @@ def test_webui_with_plugin(cluster: Cluster):
     instance_1 = {
         **instance_template,
         "name": "red_1_1",
-        "binaryAddress": i1.listen,
+        "binaryAddress": i1.iproto_listen,
         "httpAddress": http_listen,
     }
     instance_2 = {
         **instance_template,
         "name": "blue_1_1",
-        "binaryAddress": i2.listen,
+        "binaryAddress": i2.iproto_listen,
         "httpAddress": "",
     }
     instance_3 = {
         **instance_template,
         "name": "green_1_1",
-        "binaryAddress": i3.listen,
+        "binaryAddress": i3.iproto_listen,
         "httpAddress": "",
     }
 
diff --git a/test/int/test_joining.py b/test/int/test_joining.py
index 04e00a79d3..374dc33697 100644
--- a/test/int/test_joining.py
+++ b/test/int/test_joining.py
@@ -82,24 +82,24 @@ def test_discovery(cluster3: Cluster):
 
     def req_discover(instance: Instance):
         request = dict(tmp_id="unused", peers=["test:3301"])
-        request_to = instance.listen
+        request_to = instance.iproto_listen
         return instance.call(".proc_discover", request, request_to)
 
     # Run discovery against `--instance i1`.
     # It used to be a bootstrap leader, but now it's just a follower.
-    assert req_discover(i1) == {"Done": {"NonLeader": {"leader": i2.listen}}}
+    assert req_discover(i1) == {"Done": {"NonLeader": {"leader": i2.iproto_listen}}}
 
     # add instance
-    i4 = cluster3.add_instance(peers=[i1.listen])
+    i4 = cluster3.add_instance(peers=[i1.iproto_listen])
     i4.assert_raft_status("Follower", leader_id=i2.raft_id)
 
     # Run discovery against `--instance i3`.
     # It has performed a rebootstrap after discovery,
     # and now has the discovery module uninitialized.
-    assert req_discover(i3) == {"Done": {"NonLeader": {"leader": i2.listen}}}
+    assert req_discover(i3) == {"Done": {"NonLeader": {"leader": i2.iproto_listen}}}
 
     # add instance
-    i5 = cluster3.add_instance(peers=[i3.listen])
+    i5 = cluster3.add_instance(peers=[i3.iproto_listen])
     i5.assert_raft_status("Follower", leader_id=i2.raft_id)
 
 
@@ -119,7 +119,9 @@ def test_parallel(cluster3: Cluster):
     i3.assert_raft_status("Follower", leader_id=i2.raft_id)
 
     # Add instance with the first instance being i1
-    i4 = cluster3.add_instance(peers=[i1.listen, i2.listen, i3.listen])
+    i4 = cluster3.add_instance(
+        peers=[i1.iproto_listen, i2.iproto_listen, i3.iproto_listen]
+    )
     i4.assert_raft_status("Follower", leader_id=i2.raft_id)
 
 
@@ -134,7 +136,10 @@ def test_replication(cluster: Cluster):
     def check_replicated(instance):
         box_replication = instance.eval("return box.cfg.replication")
         assert set(box_replication) == set(
-            (f"pico_service:secret@{addr}" for addr in [i1.listen, i2.listen])
+            (
+                f"pico_service:secret@{addr}"
+                for addr in [i1.iproto_listen, i2.iproto_listen]
+            )
         ), instance
 
     for instance in cluster.instances:
-- 
GitLab


From e50e0abcad5c1c516dda4a9e5e65465313896ea5 Mon Sep 17 00:00:00 2001
From: Kurdakov Alexander <kusancho12@gmail.com>
Date: Tue, 21 Jan 2025 03:21:43 +0300
Subject: [PATCH 3/5] feat: rename `advertise` to `iproto_advertise` preserving
 backward compatibility

---
 src/bootstrap_entries.rs     |  2 +-
 src/cli/args.rs              | 25 +++++++++++--
 src/config.rs                | 68 ++++++++++++++++++++++++++++++------
 src/info.rs                  |  4 +--
 src/lib.rs                   |  2 +-
 src/luamod.rs                |  2 +-
 src/plugin/ffi.rs            |  2 +-
 test/int/test_basics.py      | 12 +++----
 test/int/test_config_file.py |  7 ++--
 9 files changed, 96 insertions(+), 28 deletions(-)

diff --git a/src/bootstrap_entries.rs b/src/bootstrap_entries.rs
index a8eb109b02..89de269773 100644
--- a/src/bootstrap_entries.rs
+++ b/src/bootstrap_entries.rs
@@ -38,7 +38,7 @@ pub(super) fn prepare(
             ClusterwideTable::Address,
             &traft::PeerAddress {
                 raft_id: instance.raft_id,
-                address: config.instance.advertise_address().to_host_port(),
+                address: config.instance.iproto_advertise().to_host_port(),
             },
             ADMIN_ID,
         )
diff --git a/src/cli/args.rs b/src/cli/args.rs
index fe8be27455..e97da9b32c 100644
--- a/src/cli/args.rs
+++ b/src/cli/args.rs
@@ -82,8 +82,12 @@ pub struct Run {
     #[clap(
         long = "advertise",
         value_name = "[HOST][:PORT]",
-        env = "PICODATA_ADVERTISE"
+        env = "PICODATA_ADVERTISE",
+        hide = true,
+        group = "advertise_arguments"
     )]
+    /// DEPRECATED option
+    ///
     /// Public network address of the instance. It is announced to the
     /// cluster during the instance start. Later it's used by other
     /// instances for connecting to this one.
@@ -92,9 +96,26 @@ pub struct Run {
     /// for example, in case of `--iproto-listen 0.0.0.0` it should be
     /// specified explicitly:
     ///
-    /// picodata run --iproto-listen 0.0.0.0:3301 --advertise 192.168.0.1:3301
+    /// picodata run --iproto-listen 0.0.0.0:3301 --iproto-advertise 192.168.0.1:3301
     pub advertise_address: Option<IprotoAddress>,
 
+    #[clap(
+        long = "iproto-advertise",
+        value_name = "[HOST][:PORT]",
+        env = "PICODATA_IPROTO_ADVERTISE",
+        group = "advertise_arguments"
+    )]
+    /// Public network address of the instance. It is announced to the
+    /// cluster during the instance start. Later it's used by other
+    /// instances for connecting to this one.
+    ///
+    /// Defaults to `--iproto-listen` value which is enough in most cases. But,
+    /// for example, in case of `--iproto-listen 0.0.0.0` it should be
+    /// specified explicitly:
+    ///
+    /// picodata run --iproto-listen 0.0.0.0:3301 --iproto-advertise 192.168.0.1:3301
+    pub iproto_advertise: Option<IprotoAddress>,
+
     #[clap(
         short = 'l',
         long = "listen",
diff --git a/src/config.rs b/src/config.rs
index 922ec0d6c4..7d72df4634 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -285,6 +285,11 @@ Using configuration file '{args_path}'.");
             config_from_args.instance.failure_domain = Some(FailureDomain::from(map));
         }
 
+        if let Some(address) = args.iproto_advertise {
+            config_from_args.instance.iproto_advertise = Some(address);
+        }
+
+        #[allow(deprecated)]
         if let Some(address) = args.advertise_address {
             config_from_args.instance.advertise_address = Some(address);
         }
@@ -527,6 +532,10 @@ Using configuration file '{args_path}'.");
                 config_parameter_path!(instance.listen),
                 config_parameter_path!(instance.iproto_listen),
             ),
+            (
+                config_parameter_path!(instance.advertise_address),
+                config_parameter_path!(instance.iproto_advertise),
+            ),
         ];
 
         // Handle renamed parameters
@@ -675,13 +684,13 @@ Using configuration file '{args_path}'.");
         if let Some(raft_id) = raft_storage.raft_id()? {
             match (
                 storage.peer_addresses.get(raft_id)?,
-                &self.instance.advertise_address,
+                &self.instance.iproto_advertise,
             ) {
                 (Some(from_storage), Some(from_config))
                     if from_storage != from_config.to_host_port() =>
                 {
                     return Err(Error::InvalidConfiguration(format!(
-                        "instance restarted with a different `advertise_address`, which is not allowed, was: '{from_storage}' became: '{from_config}'"
+                        "instance restarted with a different `iproto_advertise`, which is not allowed, was: '{from_storage}' became: '{from_config}'"
                     )));
                 }
                 _ => {}
@@ -1118,10 +1127,14 @@ pub struct InstanceConfig {
     #[introspection(config_default = IprotoAddress::default())]
     pub iproto_listen: Option<IprotoAddress>,
 
-    #[introspection(config_default = self.iproto_listen())]
+    #[deprecated = "use iproto_advertise instead"]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub advertise_address: Option<IprotoAddress>,
 
-    #[introspection(config_default = vec![self.advertise_address()])]
+    #[introspection(config_default = self.iproto_listen())]
+    pub iproto_advertise: Option<IprotoAddress>,
+
+    #[introspection(config_default = vec![self.iproto_advertise()])]
     pub peer: Option<Vec<IprotoAddress>>,
 
     pub http_listen: Option<HttpAddress>,
@@ -1215,8 +1228,8 @@ impl InstanceConfig {
     }
 
     #[inline]
-    pub fn advertise_address(&self) -> IprotoAddress {
-        self.advertise_address
+    pub fn iproto_advertise(&self) -> IprotoAddress {
+        self.iproto_advertise
             .clone()
             .expect("is set in PicodataConfig::set_defaults_explicitly")
     }
@@ -2189,7 +2202,7 @@ instance:
             );
             assert_eq!(config.instance.name(), None);
             assert_eq!(config.instance.iproto_listen().to_host_port(), IprotoAddress::default_host_port());
-            assert_eq!(config.instance.advertise_address().to_host_port(), IprotoAddress::default_host_port());
+            assert_eq!(config.instance.iproto_advertise().to_host_port(), IprotoAddress::default_host_port());
             assert_eq!(config.instance.log_level(), SayLevel::Info);
             assert!(config.instance.failure_domain().data.is_empty());
         }
@@ -2364,21 +2377,21 @@ instance:
             let config = setup_for_tests(Some(""), &["run"]).unwrap();
 
             assert_eq!(config.instance.iproto_listen().to_host_port(), "L-ENVIRON:3301");
-            assert_eq!(config.instance.advertise_address().to_host_port(), "L-ENVIRON:3301");
+            assert_eq!(config.instance.iproto_advertise().to_host_port(), "L-ENVIRON:3301");
 
             let yaml = r###"
 instance:
-    advertise_address: A-CONFIG
+    iproto_advertise: A-CONFIG
 "###;
             let config = setup_for_tests(Some(yaml), &["run"]).unwrap();
 
             assert_eq!(config.instance.iproto_listen().to_host_port(), "L-ENVIRON:3301");
-            assert_eq!(config.instance.advertise_address().to_host_port(), "A-CONFIG:3301");
+            assert_eq!(config.instance.iproto_advertise().to_host_port(), "A-CONFIG:3301");
 
             let config = setup_for_tests(Some(yaml), &["run", "-l", "L-COMMANDLINE"]).unwrap();
 
             assert_eq!(config.instance.iproto_listen().to_host_port(), "L-COMMANDLINE:3301");
-            assert_eq!(config.instance.advertise_address().to_host_port(), "A-CONFIG:3301");
+            assert_eq!(config.instance.iproto_advertise().to_host_port(), "A-CONFIG:3301");
         }
 
         //
@@ -2631,4 +2644,37 @@ instance:
 
         assert_eq!(config.unwrap_err().to_string(), "invalid configuration: instance.listen is deprecated, use instance.iproto_listen instead (cannot use both at the same time)");
     }
+
+    #[test]
+    fn test_iproto_advertise_advertise_interaction() {
+        let _guard = protect_env();
+
+        // iproto_advertise should be equal to listen
+        let yaml = r###"
+instance:
+        advertise_address: localhost:3301
+"###;
+        let config = setup_for_tests(Some(yaml), &["run"]).unwrap();
+        assert_eq!(
+            config.instance.iproto_advertise().to_host_port(),
+            "localhost:3301"
+        );
+
+        // can't use both options
+        let yaml = r###"
+instance:
+        advertise_address: localhost:3302
+        iproto_advertise: localhost:3301
+"###;
+        let config = setup_for_tests(Some(yaml), &["run"]);
+        assert_eq!(config.unwrap_err().to_string(), "invalid configuration: instance.advertise_address is deprecated, use instance.iproto_advertise instead (cannot use both at the same time)");
+
+        // can't use both options
+        let yaml = r###"
+instance:
+        iproto_advertise: localhost:3302
+"###;
+        let config = setup_for_tests(Some(yaml), &["run", "--advertise", "localhost:3303"]);
+        assert_eq!(config.unwrap_err().to_string(), "invalid configuration: instance.advertise_address is deprecated, use instance.iproto_advertise instead (cannot use both at the same time)");
+    }
 }
diff --git a/src/info.rs b/src/info.rs
index 7d4a0bdc32..ae2ec9a6a5 100644
--- a/src/info.rs
+++ b/src/info.rs
@@ -94,7 +94,7 @@ pub fn proc_version_info() -> VersionInfo<'static> {
 #[derive(Clone, Debug, ::serde::Serialize, ::serde::Deserialize)]
 pub struct InstanceInfo {
     pub raft_id: RaftId,
-    pub advertise_address: String,
+    pub iproto_advertise: String,
     pub name: InstanceName,
     pub uuid: String,
     pub replicaset_name: ReplicasetName,
@@ -132,7 +132,7 @@ impl InstanceInfo {
 
         Ok(InstanceInfo {
             raft_id: instance.raft_id,
-            advertise_address: peer_address,
+            iproto_advertise: peer_address,
             name: instance.name,
             uuid: instance.uuid,
             replicaset_name: instance.replicaset_name,
diff --git a/src/lib.rs b/src/lib.rs
index 07fd346504..f2f5582a74 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -854,7 +854,7 @@ fn start_join(config: &PicodataConfig, instance_address: String) -> Result<(), E
         cluster_name: config.cluster_name().into(),
         instance_name: config.instance.name().map(From::from),
         replicaset_name: config.instance.replicaset_name().map(From::from),
-        advertise_address: config.instance.advertise_address().to_host_port(),
+        advertise_address: config.instance.iproto_advertise().to_host_port(),
         failure_domain: config.instance.failure_domain(),
         tier: config.instance.tier().into(),
         picodata_version: version,
diff --git a/src/luamod.rs b/src/luamod.rs
index 72664c3fbb..eb71359e8c 100644
--- a/src/luamod.rs
+++ b/src/luamod.rs
@@ -265,7 +265,7 @@ pub(crate) fn setup() {
 
             Ok(tlua::AsTable((
                 ("raft_id", info.raft_id),
-                ("advertise_address", info.advertise_address),
+                ("iproto_advertise", info.iproto_advertise),
                 ("name", info.name.0),
                 ("uuid", info.uuid),
                 ("replicaset_name", info.replicaset_name),
diff --git a/src/plugin/ffi.rs b/src/plugin/ffi.rs
index 96a3f7e6f2..75ca13f892 100644
--- a/src/plugin/ffi.rs
+++ b/src/plugin/ffi.rs
@@ -57,7 +57,7 @@ extern "C" fn pico_ffi_instance_info() -> RResult<types::InstanceInfo, ()> {
     };
     ROk(types::InstanceInfo::new(
         info.raft_id,
-        info.advertise_address,
+        info.iproto_advertise,
         info.name.0,
         info.uuid,
         info.replicaset_name.0,
diff --git a/test/int/test_basics.py b/test/int/test_basics.py
index a50d7ed205..b522dd05a7 100644
--- a/test/int/test_basics.py
+++ b/test/int/test_basics.py
@@ -186,15 +186,15 @@ invalid configuration: instance restarted with a different `replicaset_name`, wh
     # Change advertise address
     #
     was = instance.iproto_listen  # type: ignore
-    instance.env["PICODATA_ADVERTISE"] = "example.com:1234"
-    assert instance.env["PICODATA_ADVERTISE"] != was
+    instance.env["PICODATA_IPROTO_ADVERTISE"] = "example.com:1234"
+    assert instance.env["PICODATA_IPROTO_ADVERTISE"] != was
     err = f"""\
-invalid configuration: instance restarted with a different `advertise_address`, which is not allowed, was: '{was}' became: 'example.com:1234'
+invalid configuration: instance restarted with a different `iproto_advertise`, which is not allowed, was: '{was}' became: 'example.com:1234'
 """  # noqa: E501
     crawler = log_crawler(instance, err)
     instance.fail_to_start()
     assert crawler.matched
-    del instance.env["PICODATA_ADVERTISE"]
+    del instance.env["PICODATA_IPROTO_ADVERTISE"]
 
 
 def test_whoami(instance: Instance):
@@ -539,8 +539,8 @@ cluster:
     i1_info = i1.call(".proc_instance_info")
     assert i1_info == dict(
         raft_id=1,
-        advertise_address=f"{i1.host}:{i1.port}",
         name="storage_1_1",
+        iproto_advertise=f"{i1.host}:{i1.port}",
         uuid=i1.uuid(),
         replicaset_name="storage_1",
         replicaset_uuid=i1.replicaset_uuid(),
@@ -553,8 +553,8 @@ cluster:
     i2_info = i2.call(".proc_instance_info")
     assert i2_info == dict(
         raft_id=2,
-        advertise_address=f"{i2.host}:{i2.port}",
         name="router_1_1",
+        iproto_advertise=f"{i2.host}:{i2.port}",
         uuid=i2.uuid(),
         replicaset_name="router_1",
         replicaset_uuid=i2.replicaset_uuid(),
diff --git a/test/int/test_config_file.py b/test/int/test_config_file.py
index 6d9a3dc36e..04fff49557 100644
--- a/test/int/test_config_file.py
+++ b/test/int/test_config_file.py
@@ -84,7 +84,7 @@ instance:
         ),
         instance=dict(
             admin_socket=dict(value=f"{instance_dir}/admin.sock", source="default"),
-            advertise_address=dict(value=f"{host}:{port}", source="default"),
+            iproto_advertise=dict(value=f"{host}:{port}", source="default"),
             failure_domain=dict(value=dict(), source="default"),
             shredding=dict(value=False, source="default"),
             cluster_name=dict(value="my-cluster", source="config_file"),
@@ -399,6 +399,7 @@ def test_picodata_default_config(cluster: Cluster):
 
     default_config_dict = yaml.safe_load(default_config)
     assert "listen" not in default_config_dict["instance"]
+    assert "advertise" not in default_config_dict["instance"]
 
     # Explicit filename
     subprocess.call(
@@ -424,7 +425,7 @@ def test_picodata_default_config(cluster: Cluster):
     # but our testing harness overrides the `iproto_listen` & `peers` values so that
     # running tests in parallel doesn't result in binding to conflicting ports.
     # For this reason we must also specify `advertise` explictily.
-    i.env["PICODATA_ADVERTISE"] = i.iproto_listen  # type: ignore
+    i.env["PICODATA_IPROTO_ADVERTISE"] = i.iproto_listen  # type: ignore
 
     i.start()
     i.wait_online()
@@ -478,7 +479,7 @@ def test_output_config_parameters(cluster: Cluster):
         'instance.failure_domain': {}
         'instance.peer':
         'instance.iproto_listen':
-        'instance.advertise_address':
+        'instance.iproto_advertise':
         'instance.admin_socket':
         'instance.share_dir':
         'instance.audit':
-- 
GitLab


From 3941e0628f5d6747c9e55e699f52b750fc98041e Mon Sep 17 00:00:00 2001
From: Kurdakov Alexander <kusancho12@gmail.com>
Date: Thu, 30 Jan 2025 16:55:28 +0300
Subject: [PATCH 4/5] changelog: update naming changes

---
 CHANGELOG.md | 21 +++++++++++++++++++--
 1 file changed, 19 insertions(+), 2 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index c8e6f82f0c..a086537f3b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -28,12 +28,29 @@ with the `YY.MINOR.MICRO` scheme.
   instance.share_dir)
 
 - The following parameters has been moved from configuration file to `_pico_db_config` system table:
-
   - checkpoint_interval
   - checkpoint_count
   - max_concurrent_messages
 
-- `max_concurrent_messages` parameter is renamed to `net_msg_max`
+- `max_heartbeat_period` removed from `_pico_db_config`
+
+- Major renaming of parameters from _pico_db_config:
+ - `max_concurrent_messages` renamed to `iproto_net_msg_max`
+ - `password_min_length` renamed to `auth_password_length_min`
+ - `password_enforce_uppercase` renamed to `auth_password_enforce_uppercase`
+ - `password_enforce_lowercase` renamed to `auth_password_enforce_lowercase`
+ - `password_enforce_digits` renamed to `auth_password_enforce_digits`
+ - `password_enforce_specialchars` renamed to `auth_password_enforce_specialchars`
+ - `max_login_attempts` renamed to `auth_login_attempt_max`
+ - `auto_offline_timeout` renamed to `governor_auto_offline_timeout`
+ - `max_pg_statements` renamed to `pg_statement_max`
+ - `max_pg_portals` renamed to `pg_portal_max`
+ - `snapshot_chunk_max_size` renamed to `raft_snapshot_chunk_size_max`
+ - `snapshot_read_view_close_timeout` renamed to `raft_snapshot_read_view_close_timeout`
+ - `cluster_wal_max_size` renamed to `raft_wal_size_max`
+ - `cluster_wal_max_count` renamed to `raft_wal_count_max`
+
+- `listen`, `advertise` parameters are renamed to `iproto_listen`, `iproto_advertise`
 
 ### CLI
 
-- 
GitLab


From 27f3c263a06dd43ca164860f646192e353f3c26b Mon Sep 17 00:00:00 2001
From: Kurdakov Alexander <kusancho12@gmail.com>
Date: Mon, 3 Feb 2025 18:11:59 +0300
Subject: [PATCH 5/5] fix: remove plugin_dir from output of `picodata config
 default`

Since it's deprecated option, it should not appear anywhere
except of explicit user intention.
---
 src/config.rs                | 1 +
 test/int/test_config_file.py | 1 +
 2 files changed, 2 insertions(+)

diff --git a/src/config.rs b/src/config.rs
index 7d72df4634..4d634b84ed 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -1143,6 +1143,7 @@ pub struct InstanceConfig {
     pub admin_socket: Option<PathBuf>,
 
     #[deprecated = "use share_dir instead"]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub plugin_dir: Option<PathBuf>,
 
     #[introspection(config_default = "/usr/share/picodata/")]
diff --git a/test/int/test_config_file.py b/test/int/test_config_file.py
index 04fff49557..980413f250 100644
--- a/test/int/test_config_file.py
+++ b/test/int/test_config_file.py
@@ -400,6 +400,7 @@ def test_picodata_default_config(cluster: Cluster):
     default_config_dict = yaml.safe_load(default_config)
     assert "listen" not in default_config_dict["instance"]
     assert "advertise" not in default_config_dict["instance"]
+    assert "plugin_dir" not in default_config_dict["instance"]
 
     # Explicit filename
     subprocess.call(
-- 
GitLab