From 180df2c9bc8b954bfce2fc535959b879309a2681 Mon Sep 17 00:00:00 2001 From: Georgy Moshkin <gmoshkin@picodata.io> Date: Wed, 28 Feb 2024 21:43:40 +0300 Subject: [PATCH] fix: move tiers configuration to config.yaml --- src/bootstrap_entries.rs | 5 +- src/cli/init_cfg.rs | 75 ------------------ src/cli/mod.rs | 1 - src/config.rs | 149 ++++++++++++++++++++++++++++++++--- src/lib.rs | 34 ++------ src/replicaset.rs | 1 + src/rpc/join.rs | 1 + src/tier.rs | 35 ++++++-- test/conftest.py | 29 ++++--- test/int/test_basics.py | 37 +++++---- test/int/test_config_file.py | 98 ++++++++++++----------- 11 files changed, 269 insertions(+), 196 deletions(-) delete mode 100644 src/cli/init_cfg.rs diff --git a/src/bootstrap_entries.rs b/src/bootstrap_entries.rs index 15d6f17c9e..3e6f6c068b 100644 --- a/src/bootstrap_entries.rs +++ b/src/bootstrap_entries.rs @@ -12,11 +12,12 @@ use crate::storage::PropertyName; use crate::tier::Tier; use crate::traft; use crate::traft::op; +use std::collections::HashMap; pub(super) fn prepare( config: &PicodataConfig, instance: &Instance, - tiers: &[Tier], + tiers: &HashMap<String, Tier>, ) -> Vec<raft::Entry> { let mut init_entries = Vec::new(); @@ -55,7 +56,7 @@ pub(super) fn prepare( // // Populate "_pico_tier" with initial tiers // - for tier in tiers { + for tier in tiers.values() { init_entries_push_op(op::Dml::insert(ClusterwideTable::Tier, &tier, ADMIN_ID)); } diff --git a/src/cli/init_cfg.rs b/src/cli/init_cfg.rs deleted file mode 100644 index 4587b301a4..0000000000 --- a/src/cli/init_cfg.rs +++ /dev/null @@ -1,75 +0,0 @@ -use crate::tier::Tier; - -#[derive(Debug, serde::Deserialize, PartialEq, Clone)] -pub struct InitCfg { - #[serde(deserialize_with = "map_to_vec_of_tiers")] - pub tier: Vec<Tier>, -} - -impl Default for InitCfg { - fn default() -> Self { - InitCfg { - tier: vec![Tier::default()], - } - } -} - -impl InitCfg { - pub fn try_from_yaml_file(path: &str) -> Result<InitCfg, String> { - let content = std::fs::read_to_string(path) - .map_err(|e| format!("can't read from {path}, error: {e}"))?; - - let cfg: InitCfg = serde_yaml::from_str(&content) - .map_err(|e| format!("error while parsing {path}, reason: {e}"))?; - - if cfg.tier.is_empty() { - return Err(format!("empty `tier` field in init-cfg by path: {path}")); - } - - Ok(cfg) - } -} - -pub fn map_to_vec_of_tiers<'de, D>(des: D) -> Result<Vec<Tier>, D::Error> -where - D: serde::Deserializer<'de>, -{ - #[derive(serde::Deserialize)] - struct TierConf { - replication_factor: u8, - } - - struct Vis; - - impl<'de> serde::de::Visitor<'de> for Vis { - type Value = Vec<Tier>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("a map of tier name to tier's config") - } - - fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> - where - A: serde::de::MapAccess<'de>, - { - let mut names = std::collections::HashSet::new(); - let mut tiers = Vec::new(); - while let Some((name, tier_conf)) = map.next_entry::<String, TierConf>()? { - if !names.insert(name.clone()) { - return Err(serde::de::Error::custom(format!( - "duplicated tier name `{name}` found" - ))); - } - - tiers.push(Tier { - name, - replication_factor: tier_conf.replication_factor, - }); - } - - Ok(tiers) - } - } - - des.deserialize_map(Vis) -} diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 0ee7727275..aae2765e5c 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -3,7 +3,6 @@ pub mod args; pub mod connect; pub mod console; pub mod expel; -pub mod init_cfg; pub mod run; pub mod tarantool; pub mod test; diff --git a/src/config.rs b/src/config.rs index 409b7a5a6d..6746379dc5 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,6 +4,9 @@ use crate::failure_domain::FailureDomain; use crate::instance::InstanceId; use crate::replicaset::ReplicasetId; use crate::storage; +use crate::tier::Tier; +use crate::tier::TierConfig; +use crate::tier::DEFAULT_TIER; use crate::tlog; use crate::traft::error::Error; use crate::traft::RaftSpaceAccess; @@ -51,12 +54,28 @@ pub struct PicodataConfig { // pub unknown_sections: HashMap<String, serde_json::Value>, } +fn validate_args(args: &args::Run) -> Result<(), Error> { + if args.init_cfg.is_some() { + return Err(Error::other( + "error: option `--init-cfg` is removed, use `--config` instead", + )); + } + + if args.init_replication_factor.is_some() && args.config.is_some() { + return Err(Error::other("error: option `--init-replication-factor` cannot be used with `--config` simultaneously")); + } + + Ok(()) +} + impl PicodataConfig { // TODO: // fn default() -> Self // which returns an instance of config with all the default parameters. // Also add a command to generate a default config from command line. pub fn init(args: args::Run) -> Result<Self, Error> { + validate_args(&args)?; + let cwd = std::env::current_dir(); let cwd = cwd.as_deref().unwrap_or_else(|_| Path::new(".")).display(); let default_path = format!("{cwd}/{DEFAULT_CONFIG_FILE_NAME}"); @@ -84,6 +103,9 @@ Using configuration file '{args_path}'."); (None, false) => {} } + if let Some(config) = &config { + config.validate_from_file()?; + } let mut config = config.unwrap_or_default(); config.set_from_args(args); @@ -136,11 +158,6 @@ Using configuration file '{args_path}'."); self.instance.tier = Some(tier); } - // TODO: remove this - if let Some(init_cfg) = args.init_cfg { - self.cluster.init_cfg = Some(init_cfg); - } - if let Some(init_replication_factor) = args.init_replication_factor { self.cluster.default_replication_factor = Some(init_replication_factor); } @@ -197,6 +214,21 @@ Using configuration file '{args_path}'."); // TODO: the rest } + /// Does checks which are applicable to configuration loaded from a file. + fn validate_from_file(&self) -> Result<(), Error> { + // XXX: This is kind of a hack. If a config file is provided we require + // it to define the list of initial tiers. However if config file wasn't + // specified, there's no way to define tiers, so we just ignore that + // case and create a dummy "default" tier. + if self.cluster.tiers.is_empty() { + return Err(Error::invalid_configuration( + "empty `cluster.tiers` section which is required to define the initial tiers", + )); + } + + Ok(()) + } + /// Does basic config validation. This function checks constraints /// applicable for all configuration updates. /// @@ -214,6 +246,14 @@ Using configuration file '{args_path}'."); } } + for (name, info) in &self.cluster.tiers { + if let Some(explicit_name) = &info.name { + return Err(Error::InvalidConfiguration(format!( + "tier '{name}' has an explicit name field '{explicit_name}', which is not allowed. Tier name is always derived from the outer dictionary's key" + ))); + } + } + Ok(()) } @@ -329,8 +369,8 @@ Using configuration file '{args_path}'."); pub struct ClusterConfig { pub cluster_id: Option<String>, - // TODO: remove this - pub init_cfg: Option<String>, + #[serde(deserialize_with = "deserialize_map_forbid_duplicate_keys")] + pub tiers: HashMap<String, TierConfig>, /// Replication factor which is used for tiers which didn't specify one /// explicitly. For default value see [`Self::default_replication_factor()`]. @@ -338,6 +378,33 @@ pub struct ClusterConfig { } impl ClusterConfig { + pub fn tiers(&self) -> HashMap<String, Tier> { + if self.tiers.is_empty() { + return HashMap::from([( + DEFAULT_TIER.to_string(), + Tier { + name: DEFAULT_TIER.into(), + replication_factor: self.default_replication_factor(), + }, + )]); + } + + let mut tier_defs = HashMap::with_capacity(self.tiers.len()); + for (name, info) in &self.tiers { + let replication_factor = info + .replication_factor + .unwrap_or_else(|| self.default_replication_factor()); + + let tier_def = Tier { + name: name.into(), + replication_factor, + // TODO: support other fields + }; + tier_defs.insert(name.into(), tier_def); + } + tier_defs + } + #[inline] pub fn default_replication_factor(&self) -> u8 { self.default_replication_factor.unwrap_or(1) @@ -703,12 +770,43 @@ mod tests { cluster: cluster_id: foobar + tiers: + voter: + can_vote: true + + storage: + replication_factor: 3 + can_vote: false + + search-index: + replication_factor: 2 + can_vote: false + instance: instance_id: voter1 "###; - let config = PicodataConfig::read_yaml_contents(&yaml).unwrap(); - dbg!(config); + _ = PicodataConfig::read_yaml_contents(&yaml).unwrap(); + } + + #[test] + fn duplicate_tiers_is_error() { + let yaml = r###" +cluster: + tiers: + voter: + can_vote: true + + voter: + can_vote: false + +"###; + + let err = PicodataConfig::read_yaml_contents(&yaml.trim_start()).unwrap_err(); + assert_eq!( + err.to_string(), + "invalid configuration: cluster.tiers: duplicate key `voter` found at line 3 column 9" + ); } #[test] @@ -716,6 +814,8 @@ instance: let yaml = r###" cluster: cluster_id: foo + tiers: + default: instance: cluster_id: bar "###; @@ -724,6 +824,37 @@ instance: assert_eq!(err.to_string(), "invalid configuration: `cluster.cluster_id` (foo) conflicts with `instance.cluster_id` (bar)") } + #[test] + fn missing_tiers_is_error() { + let yaml = r###" +cluster: +"###; + let err = PicodataConfig::read_yaml_contents(&yaml.trim()).unwrap_err(); + assert_eq!( + err.to_string(), + "invalid configuration: cluster: missing field `tiers` at line 1 column 9" + ); + + let yaml = r###" +cluster: + tiers: +"###; + let config = PicodataConfig::read_yaml_contents(&yaml.trim()).unwrap(); + let err = config.validate_from_file().unwrap_err(); + assert_eq!( + err.to_string(), + "invalid configuration: empty `cluster.tiers` section which is required to define the initial tiers" + ); + + let yaml = r###" +cluster: + tiers: + default: +"###; + let config = PicodataConfig::read_yaml_contents(&yaml.trim()).unwrap(); + config.validate_from_file().unwrap(); + } + #[test] fn spaces_in_addresses() { let yaml = r###" diff --git a/src/lib.rs b/src/lib.rs index 2ca5e1c224..87ed5614f2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,14 +22,12 @@ use traft::RaftSpaceAccess; use crate::access_control::user_by_id; use crate::address::Address; -use crate::cli::init_cfg::InitCfg; use crate::instance::Grade; use crate::instance::GradeVariant::*; use crate::instance::Instance; use crate::plugin::*; use crate::schema::ADMIN_ID; use crate::schema::PICO_SERVICE_USER_NAME; -use crate::tier::{Tier, DEFAULT_TIER}; use crate::traft::error::Error; use crate::traft::op; use crate::util::effective_user_id; @@ -642,31 +640,11 @@ fn start_discover( fn start_boot(config: &PicodataConfig) -> Result<(), Error> { tlog!(Info, "entering cluster bootstrap phase"); - let init_cfg = match &config.cluster.init_cfg { - Some(path) => InitCfg::try_from_yaml_file(path).map_err(Error::other)?, - None => { - tlog!(Info, "init-cfg wasn't set"); - tlog!( - Info, - "filling init-cfg with default tier `{}` using replication-factor={}", - DEFAULT_TIER, - config.cluster.default_replication_factor() - ); - - let tier = vec![Tier::with_replication_factor( - config.cluster.default_replication_factor(), - )]; - InitCfg { tier } - } - }; - - let tiers = init_cfg.tier; - + let tiers = config.cluster.tiers(); let my_tier_name = config.instance.tier(); - let Some(current_instance_tier) = tiers.iter().find(|tier| tier.name == my_tier_name).cloned() - else { + if !tiers.contains_key(my_tier_name) { return Err(Error::other(format!( - "tier '{my_tier_name}' for current instance is not found in init-cfg", + "invalid configuration: current instance is assigned tier '{my_tier_name}' which is not defined in the configuration file", ))); }; @@ -677,7 +655,7 @@ fn start_boot(config: &PicodataConfig) -> Result<(), Error> { Grade::new(Offline, 0), Grade::new(Offline, 0), config.instance.failure_domain(), - ¤t_instance_tier.name, + my_tier_name, ); let raft_id = instance.raft_id; let instance_id = instance.instance_id.clone(); @@ -717,9 +695,7 @@ fn start_boot(config: &PicodataConfig) -> Result<(), Error> { transaction(|| -> Result<(), TntError> { raft_storage.persist_raft_id(raft_id).unwrap(); raft_storage.persist_instance_id(&instance_id).unwrap(); - raft_storage - .persist_tier(¤t_instance_tier.name) - .unwrap(); + raft_storage.persist_tier(my_tier_name).unwrap(); raft_storage .persist_cluster_id(config.cluster_id()) .unwrap(); diff --git a/src/replicaset.rs b/src/replicaset.rs index a4cc8470e3..9a1f4fe425 100644 --- a/src/replicaset.rs +++ b/src/replicaset.rs @@ -2,6 +2,7 @@ use super::instance::InstanceId; use ::tarantool::tlua; use ::tarantool::tuple::Encode; +// TODO: this redundant boilerplate needs to be removed crate::define_string_newtype! { /// Unique id of a replicaset. /// diff --git a/src/rpc/join.rs b/src/rpc/join.rs index ae974c7737..1bc5fb7878 100644 --- a/src/rpc/join.rs +++ b/src/rpc/join.rs @@ -215,6 +215,7 @@ pub fn build_instance( Ok(instance) } +// TODO: choose instance id based on tier name instead /// Choose [`InstanceId`] based on `raft_id`. fn choose_instance_id(raft_id: RaftId, storage: &Clusterwide) -> InstanceId { let mut suffix: Option<u64> = None; diff --git a/src/tier.rs b/src/tier.rs index 8a03493b70..356536c1ef 100644 --- a/src/tier.rs +++ b/src/tier.rs @@ -6,6 +6,8 @@ pub const DEFAULT_TIER: &str = "default"; #[derive(Debug, serde::Deserialize, serde::Serialize, PartialEq, tlua::Push, Clone)] //////////////////////////////////////////////////////////////////////////////// /// Serializable struct representing a tier. +/// +/// Can be used to store tier definition in the _pico_tier global table. pub struct Tier { pub name: String, pub replication_factor: u8, @@ -22,13 +24,6 @@ impl Tier { Field::from(("replication_factor", FieldType::Unsigned)), ] } - - pub fn with_replication_factor(replication_factor: u8) -> Self { - Tier { - name: DEFAULT_TIER.into(), - replication_factor, - } - } } impl Default for Tier { @@ -40,6 +35,32 @@ impl Default for Tier { } } +/// Tier definition struct which can be deserialized from the config file. +#[derive( + PartialEq, + Default, + Debug, + Clone, + serde::Deserialize, + serde::Serialize, + tlua::Push, + tlua::PushInto, +)] +#[serde(deny_unknown_fields)] +pub struct TierConfig { + pub name: Option<String>, + pub replication_factor: Option<u8>, + + /// TODO: This is not yet implemented, currently all tiers can vote + #[serde(default = "default_can_vote")] + pub can_vote: bool, +} + +#[inline(always)] +fn default_can_vote() -> bool { + true +} + #[cfg(test)] mod tests { use super::*; diff --git a/test/conftest.py b/test/conftest.py index 19631d8b3d..be5a14c101 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -8,7 +8,7 @@ import time import threading from types import SimpleNamespace -import yaml # type: ignore +import yaml as yaml_lib # type: ignore import pytest import signal import subprocess @@ -468,7 +468,7 @@ class Instance: audit: str | bool = True tier: str | None = None init_replication_factor: int | None = None - init_cfg_path: str | None = None + config_path: str | None = None instance_id: str | None = None replicaset_id: str | None = None failure_domain: dict[str, str] = field(default_factory=dict) @@ -524,8 +524,7 @@ class Instance: *(f"--failure-domain={k}={v}" for k, v in self.failure_domain.items()), *(["--init-replication-factor", f"{self.init_replication_factor}"] if self.init_replication_factor is not None else []), - *(["--init-cfg", self.init_cfg_path] - if self.init_cfg_path is not None else []), + *(["--config", self.config_path] if self.config_path is not None else []), *(["--tier", self.tier] if self.tier is not None else []), *(["--audit", audit] if audit else []), *(["--service-password-file", service_password] if service_password else []), @@ -1211,7 +1210,7 @@ class Cluster: base_port: int max_port: int instances: list[Instance] = field(default_factory=list) - cfg_path: str | None = None + config_path: str | None = None def __repr__(self): return f'Cluster("{self.base_host}:{self.base_port}", n={len(self.instances)})' @@ -1244,12 +1243,18 @@ class Cluster: eprint(f" {self} deployed ".center(80, "=")) return self.instances - def set_init_cfg(self, cfg: dict): - assert self.cfg_path is None - self.cfg_path = self.data_dir + "/tier.yaml" - with open(self.cfg_path, "w") as yaml_file: - dump = yaml.dump(cfg, default_flow_style=False) - yaml_file.write(dump) + def set_config_file(self, config: dict | None = None, yaml: str | None = None): + assert config or yaml + assert self.config_path is None + + self.config_path = self.data_dir + "/config.yaml" + + if config: + yaml = yaml_lib.dump(config, default_flow_style=False) + + assert yaml + with open(self.config_path, "w") as yaml_file: + yaml_file.write(yaml) def add_instance( self, @@ -1308,7 +1313,7 @@ class Cluster: failure_domain=failure_domain, init_replication_factor=init_replication_factor, tier=tier, - init_cfg_path=self.cfg_path, + config_path=self.config_path, audit=True, ) diff --git a/test/int/test_basics.py b/test/int/test_basics.py index 38cde6353a..46e90a603a 100644 --- a/test/int/test_basics.py +++ b/test/int/test_basics.py @@ -252,14 +252,17 @@ def test_whoami(instance: Instance): def test_whoami_in_different_tiers(cluster: Cluster): - cfg = { - "tier": { - "storage": {"replication_factor": 1}, - "router": {"replication_factor": 2}, - } - } - - cluster.set_init_cfg(cfg) + cluster.set_config_file( + yaml=""" +cluster: + cluster_id: test + tiers: + storage: + replication_factor: 2 + router: + replication_factor: 1 +""" + ) i1 = cluster.add_instance(tier="storage") i2 = cluster.add_instance(tier="router") @@ -454,13 +457,17 @@ def test_proc_version_info(instance: Instance): def test_proc_instance_info(cluster: Cluster): - cfg = { - "tier": { - "storage": {"replication_factor": 1}, - "router": {"replication_factor": 2}, - } - } - cluster.set_init_cfg(cfg) + cluster.set_config_file( + yaml=""" +cluster: + cluster_id: test + tiers: + storage: + replication_factor: 2 + router: + replication_factor: 1 +""" + ) i1 = cluster.add_instance(tier="storage") i2 = cluster.add_instance(tier="router") diff --git a/test/int/test_config_file.py b/test/int/test_config_file.py index 55a133de5f..28471bbd16 100644 --- a/test/int/test_config_file.py +++ b/test/int/test_config_file.py @@ -3,18 +3,19 @@ import os def test_config_works(cluster: Cluster): - instance = cluster.add_instance(instance_id=False, wait_online=False) - config_path = cluster.data_dir + "/config.yaml" - with open(config_path, "w") as f: - f.write( - """ + cluster.set_config_file( + yaml=""" +cluster: + tiers: + default: instance: - instance-id: from-config - replicaset-id: with-love - memtx-memory: 42069 - """ - ) - instance.env["PICODATA_CONFIG_FILE"] = config_path + cluster_id: test + instance_id: from-config + replicaset_id: with-love + memtx_memory: 42069 +""" + ) + instance = cluster.add_instance(instance_id=False, wait_online=False) instance.start() instance.wait_online() @@ -34,8 +35,12 @@ def test_default_path_to_config_file(cluster: Cluster): with open(work_dir + "/config.yaml", "w") as f: f.write( """ +cluster: + cluster_id: test + tiers: + default: instance: - memtx-memory: 0xdeadbeef + memtx_memory: 0xdeadbeef """ ) instance.start(cwd=work_dir) @@ -49,8 +54,12 @@ instance: with open(config_path, "w") as f: f.write( """ +cluster: + cluster_id: test + tiers: + default: instance: - memtx-memory: 0xcafebabe + memtx_memory: 0xcafebabe """ ) instance.env["PICODATA_CONFIG_FILE"] = config_path @@ -69,35 +78,23 @@ Using configuration file '{config_path}'. instance.terminate() -def test_run_init_cfg_enoent(cluster: Cluster): +def test_init_cfg_is_removed(cluster: Cluster): i1 = cluster.add_instance(wait_online=False) - i1.env.update({"PICODATA_INIT_CFG": "./unexisting_dir/trash.yaml"}) + + i1.env["PICODATA_INIT_CFG"] = "any-path" err = """\ -can't read from ./unexisting_dir/trash.yaml, error: No such file or directory (os error 2)\ +error: option `--init-cfg` is removed, use `--config` instead """ crawler = log_crawler(i1, err) - i1.fail_to_start() - assert crawler.matched -def test_run_init_cfg_with_duplicated_tier_names(cluster: Cluster): - cluster.cfg_path = cluster.data_dir + "/init_cfg.yaml" - cfg_content = """\ -tier: - storage: - replication_factor: 1 - storage: - replication_factor: 1 -""" - with open(cluster.cfg_path, "w") as yaml_file: - yaml_file.write(cfg_content) - +def test_config_file_enoent(cluster: Cluster): i1 = cluster.add_instance(wait_online=False) - err = f"""\ -error while parsing {cluster.cfg_path}, \ -reason: tier: duplicated tier name `storage` found at line 2\ + i1.env.update({"PICODATA_CONFIG_FILE": "./unexisting_dir/trash.yaml"}) + err = """\ +can't read from './unexisting_dir/trash.yaml': No such file or directory (os error 2) """ crawler = log_crawler(i1, err) @@ -106,12 +103,18 @@ reason: tier: duplicated tier name `storage` found at line 2\ assert crawler.matched -def test_run_init_cfg_with_empty_tiers(cluster: Cluster): - cfg: dict = {"tier": {}} - cluster.set_init_cfg(cfg) +def test_config_file_with_empty_tiers(cluster: Cluster): + cluster.set_config_file( + yaml=""" +cluster: + tiers: +""" + ) i1 = cluster.add_instance(wait_online=False) - err = f"empty `tier` field in init-cfg by path: {cluster.cfg_path}" - crawler = log_crawler(i1, err) + err = """\ +invalid configuration: empty `cluster.tiers` section which is required to define the initial tiers\ +""" # noqa: E501 + crawler = log_crawler(i1, err.strip()) i1.fail_to_start() @@ -121,7 +124,9 @@ def test_run_init_cfg_with_empty_tiers(cluster: Cluster): def test_run_with_tier_which_is_not_in_tier_list(cluster: Cluster): i1 = cluster.add_instance(wait_online=False) i1.tier = "unexistent_tier" - err = "tier 'unexistent_tier' for current instance is not found in init-cfg" + err = """\ +current instance is assigned tier 'unexistent_tier' which is not defined in the configuration file\ +""" crawler = log_crawler(i1, err) i1.fail_to_start() @@ -129,11 +134,13 @@ def test_run_with_tier_which_is_not_in_tier_list(cluster: Cluster): assert crawler.matched -def test_run_init_cfg_with_garbage(cluster: Cluster): +def test_config_file_with_garbage(cluster: Cluster): cfg = {"trash": [], "garbage": "tier"} - cluster.set_init_cfg(cfg) + cluster.set_config_file(cfg) i1 = cluster.add_instance(wait_online=False) - err = "reason: missing field `tier`" + err = """\ +invalid configuration: unknown field `garbage`, expected `cluster` or `instance`\ +""" crawler = log_crawler(i1, err) i1.fail_to_start() @@ -141,14 +148,13 @@ def test_run_init_cfg_with_garbage(cluster: Cluster): assert crawler.matched -def test_run_init_cfg_with_init_replication_factor(cluster: Cluster): +def test_config_file_with_init_replication_factor(cluster: Cluster): cfg = {"no matter": "init-cfg doesn't allow to use with `init-replication-factor"} - cluster.set_init_cfg(cfg) + cluster.set_config_file(cfg) i1 = cluster.add_instance(wait_online=False) i1.init_replication_factor = 1 err = """\ -error: The argument '--init-replication-factor <INIT_REPLICATION_FACTOR>' \ -cannot be used with '--init-cfg <PATH>'\ +error: option `--init-replication-factor` cannot be used with `--config` simultaneously\ """ crawler = log_crawler(i1, err) -- GitLab