diff --git a/docs/clustering.md b/docs/clustering.md index 5c2943b740a1a93be966f421dda5d6c8e2eb23ba..bfdf382669d92f1b49e880c0afe5cb5f7440199a 100644 --- a/docs/clustering.md +++ b/docs/clustering.md @@ -74,7 +74,7 @@ picodata run --instance-id iN --listen iN --peer i1 Ð—Ð½Ð°Ñ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ñ‡Ð°ÑÑ‚ÑŒ вÑей логики по управлению топологией находитÑÑ Ð² хранимой процедуре `raft_join`. Ðргументом Ð´Ð»Ñ Ð½ÐµÐµ ÑвлÑетÑÑ ÑÐ»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ñтруктура: ```rust -struct JoinRequest { +struct join::Request { cluster_id: String, instance_id: Option<String>, replicaset_id: Option<String>, @@ -86,7 +86,7 @@ struct JoinRequest { Ответом Ñлужит Ñтруктура: ```rust -struct JoinResponse { +struct join::Response { /// Добавленный пир (чтобы знать вÑе айдишники) peer: Peer, /// Воутеры (чтобы добавлÑемый инÑÑ‚Ð°Ð½Ñ Ð¼Ð¾Ð³ наладить контакт) @@ -119,7 +119,7 @@ struct Peer { Цель такого запроÑа ÑводитÑÑ Ðº добавление нового инÑтанÑа в Raft-группу. Ð”Ð»Ñ Ñтого алгоритма Ñправедливы Ñледующие тезиÑÑ‹: -- `JoinRequest` отправлÑет вÑегда неинициализированный инÑтанÑ. +- `join::Request` отправлÑет вÑегда неинициализированный инÑтанÑ. - Ð’ завиÑимоÑти от того, ÑодержитÑÑ Ð»Ð¸ в запроÑе `instance_id`, проводитÑÑ Ð°Ð½Ð°Ð»Ð¸Ð· его корректноÑти (уникальноÑти). - Ð’ процеÑÑе обработки запроÑа в Raft-журнал добавлÑетÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ `op::PersistPeer { peer }`, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð¿Ð¾Ð¼Ð¸Ð¼Ð¾ вÑевозможных айдишников Ñодержит Ð¿Ð¾Ð»Ñ `current_grade: Offline`, `target_grade: Offline`, играющие важную роль в обеÑпечении надежноÑти клаÑтера (подробнее о них в разделе [topology governor](#Topology-governor)). - Ð’ ответ выдаётÑÑ Ð²Ñегда новый `raft_id`, никому другому ранее не принадлежавший. diff --git a/src/main.rs b/src/main.rs index 52cb8b411963d9757fe89c306918b5eaba91a94a..d16bcd54c5fcfbfd1c7fa200fb47d3481042f249 100644 --- a/src/main.rs +++ b/src/main.rs @@ -14,6 +14,7 @@ use std::time::{Duration, Instant}; use storage::Clusterwide; use storage::{ClusterwideSpace, ProperyName}; use traft::rpc; +use traft::rpc::join; use traft::RaftSpaceAccess; use clap::StructOpt as _; @@ -869,7 +870,7 @@ fn start_boot(args: &args::Run) { fn start_join(args: &args::Run, leader_address: String) { tlog!(Info, ">>>>> start_join({leader_address})"); - let req = traft::JoinRequest { + let req = join::Request { cluster_id: args.cluster_id.clone(), instance_id: args.instance_id.clone(), replicaset_id: args.replicaset_id.clone(), @@ -892,10 +893,10 @@ fn start_join(args: &args::Run, leader_address: String) { // TODO: exponential decay let timeout = Duration::from_secs(1); match rpc::net_box_call(&leader_address, &req, Duration::MAX) { - Ok(traft::JoinResponse::Ok(resp)) => { + Ok(join::Response::Ok(resp)) => { break resp; } - Ok(traft::JoinResponse::ErrNotALeader(maybe_new_leader)) => { + Ok(join::Response::ErrNotALeader(maybe_new_leader)) => { tlog!(Warning, "join request failed: not a leader, retry..."); if let Some(new_leader) = maybe_new_leader { leader_address = new_leader.address; diff --git a/src/traft/mod.rs b/src/traft/mod.rs index da9cd8f5654128722963a55264f9645b275ed62b..f65c772a389d27ff51f3195d2e9d78bacab56794 100644 --- a/src/traft/mod.rs +++ b/src/traft/mod.rs @@ -29,8 +29,7 @@ use protobuf::Message as _; pub use network::ConnectionPool; pub use raft_storage::RaftSpaceAccess; -pub use rpc::join::Request as JoinRequest; -pub use rpc::join::Response as JoinResponse; +pub use rpc::join; pub use rpc::update_instance::Request as UpdateInstanceRequest; pub use rpc::update_instance::Response as UpdateInstanceResponse; pub use topology::Topology; @@ -821,12 +820,12 @@ pub trait ContextCoercion: Serialize + DeserializeOwned { /// Request to change cluster topology. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum TopologyRequest { - Join(JoinRequest), + Join(join::Request), UpdateInstance(UpdateInstanceRequest), } -impl From<JoinRequest> for TopologyRequest { - fn from(j: JoinRequest) -> Self { +impl From<join::Request> for TopologyRequest { + fn from(j: join::Request) -> Self { Self::Join(j) } } diff --git a/src/traft/node.rs b/src/traft/node.rs index 6dbfbab56bbe009f602da142b24f1ba9773cfe86..dfdb056ed99f64e12dd0141febd571c769460921 100644 --- a/src/traft/node.rs +++ b/src/traft/node.rs @@ -52,14 +52,14 @@ use crate::traft::event; use crate::traft::event::Event; use crate::traft::notify::Notify; use crate::traft::rpc::sharding::cfg::ReplicasetWeights; -use crate::traft::rpc::{replication, sharding, sync}; +use crate::traft::rpc::{join, replication, sharding, sync}; use crate::traft::ConnectionPool; use crate::traft::LogicalClock; use crate::traft::Op; use crate::traft::RaftSpaceAccess; use crate::traft::Topology; use crate::traft::TopologyRequest; -use crate::traft::{JoinRequest, UpdateInstanceRequest}; +use crate::traft::UpdateInstanceRequest; use super::OpResult; use super::{CurrentGrade, CurrentGradeVariant, TargetGradeVariant}; @@ -469,7 +469,7 @@ impl NodeImpl { // FIXME: remove this once we introduce some 'async' stuff let notify_for_address; let instance = match req { - TopologyRequest::Join(JoinRequest { + TopologyRequest::Join(join::Request { instance_id, replicaset_id, advertise_address, diff --git a/src/traft/topology.rs b/src/traft/topology.rs index b16e441f56b09150f96bc8b26591199b87e10e62..85e90ceef337c33cbe17db07b4e8d6b7ec9ee4e0 100644 --- a/src/traft/topology.rs +++ b/src/traft/topology.rs @@ -408,7 +408,7 @@ mod tests { ]) .with_replication_factor(2); - // JoinRequest with a given instance_id online. + // join::Request with a given instance_id online. // - It must be an impostor, return an error. // - Even if it's a fair rebootstrap, it will be marked as // unreachable soon (when we implement failover) an the error @@ -420,7 +420,7 @@ mod tests { "i1 is already joined", ); - // JoinRequest with a given instance_id offline (or unreachable). + // join::Request with a given instance_id offline (or unreachable). // - Presumably it's a rebootstrap. // 1. Perform auto-expel, unless it threatens data safety (TODO). // 2. Assign new raft_id. @@ -440,7 +440,7 @@ mod tests { // TODO // - // JoinRequest with a given instance_id bootstrtapping. + // join::Request with a given instance_id bootstrtapping. // - Presumably it's a retry after tarantool bootstrap failure. // 1. Perform auto-expel (it's always ok until bootstrap // finishes).