Newer
Older
//! This module incapsulates most of the application-specific logics.
//!
//! It's responsible for
//! - handling proposals,
//! - handling configuration changes,
//! - processing raft `Ready` - persisting entries, communicating with other raft nodes.

Georgy Moshkin
committed
use crate::governor;
use crate::kvcell::KVCell;
use crate::loop_start;
use crate::r#loop::FlowControl;
use crate::reachability::InstanceReachabilityManager;
use crate::schema::{Distribution, IndexDef, TableDef};
use crate::sentinel;
use crate::storage::ddl_meta_drop_space;
use crate::storage::SnapshotData;
use crate::storage::{ddl_abort_on_master, ddl_meta_space_update_operable};
use crate::storage::{local_schema_version, set_local_schema_version};
use crate::storage::{Clusterwide, ClusterwideTable, PropertyName};
use crate::stringify_cfunc;
use crate::traft::error::Error;
use crate::traft::network::WorkerOptions;
use crate::traft::notify::{notification, Notifier, Notify};
use crate::traft::op::{Acl, Ddl, Dml, Op, OpResult};
use crate::traft::RaftEntryId;
use crate::traft::RaftId;
use crate::traft::RaftIndex;
use crate::traft::RaftSpaceAccess;
use crate::unwrap_ok_or;
use crate::unwrap_some_or;
use crate::util::AnyWithTypeName;
use ::raft::prelude as raft;
use ::raft::Error as RaftError;
use ::raft::StateRole as RaftStateRole;
use ::raft::StorageError;
use ::raft::INVALID_ID;
use ::tarantool::fiber;
use ::tarantool::fiber::mutex::MutexGuard;
use ::tarantool::fiber::r#async::timeout::Error as TimeoutError;
use ::tarantool::fiber::r#async::timeout::IntoTimeout as _;
use ::tarantool::fiber::r#async::{oneshot, watch};
use ::tarantool::fiber::Mutex;
use ::tarantool::index::FieldType as IFT;
use ::tarantool::index::Part;
use ::tarantool::space::FieldType as SFT;
use ::tarantool::time::Instant;
use ::tarantool::tuple::Decode;
use std::cell::RefCell;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::rc::Rc;
use std::time::Duration;
use ApplyEntryResult::*;
type RawNode = raft::RawNode<RaftSpaceAccess>;
::tarantool::define_str_enum! {
pub enum RaftState {
Follower = "Follower",
Candidate = "Candidate",
Leader = "Leader",
PreCandidate = "PreCandidate",
}
}
impl RaftState {
pub fn is_leader(&self) -> bool {
matches!(self, Self::Leader)
}
}
impl From<RaftStateRole> for RaftState {
fn from(role: RaftStateRole) -> Self {
match role {
RaftStateRole::Follower => Self::Follower,
RaftStateRole::Candidate => Self::Candidate,
RaftStateRole::Leader => Self::Leader,
RaftStateRole::PreCandidate => Self::PreCandidate,
}
}
}
#[derive(Copy, Clone, Debug, tlua::Push, tlua::PushInto)]
/// `raft_id` of the current instance
/// `raft_id` of the leader instance
pub leader_id: Option<RaftId>,
/// Current term number
pub term: RaftTerm,
/// Current raft state
pub raft_state: RaftState,
/// Current state of the main loop.
///
/// Set this before yielding from [`NodeImpl::advance`].
pub main_loop_status: &'static str,
pub fn check_term(&self, requested_term: RaftTerm) -> traft::Result<()> {
if requested_term != self.term {
return Err(Error::TermMismatch {
requested: requested_term,
current: self.term,
});
}
Ok(())
}
}
/// The heart of `traft` module - the Node.
/// RaftId of the Node.
//
// It appears twice in the Node: here and in `status.id`.
// This is a concious decision.
// `self.raft_id()` is used in Rust API, and
// `self.status()` is mostly useful in Lua API.

Georgy Moshkin
committed
pub(crate) raft_id: RaftId,
pub(crate) storage: Clusterwide,
pub(crate) raft_storage: RaftSpaceAccess,
pub(crate) main_loop: MainLoop,
pub(crate) governor_loop: governor::Loop,
pub(crate) sentinel_loop: sentinel::Loop,
status: watch::Receiver<Status>,
applied: watch::Receiver<RaftIndex>,
/// Should be locked during join and update instance request
/// to avoid costly cas conflicts during concurrent requests.
pub instances_update: Mutex<()>,
impl std::fmt::Debug for Node {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Node")
.field("raft_id", &self.raft_id)
.finish_non_exhaustive()
}
static mut RAFT_NODE: Option<Box<Node>> = None;
/// Initialize the global raft node singleton. Returns a reference to it.
///
/// Returns an error in case of storage failure.
/// **This function yields**
pub fn init(
storage: Clusterwide,
raft_storage: RaftSpaceAccess,
) -> Result<&'static Self, Error> {
if unsafe { RAFT_NODE.is_some() } {
return Err(Error::other("raft node is already initialized"));
}

Georgy Moshkin
committed
let opts = WorkerOptions {
raft_msg_handler: stringify_cfunc!(proc_raft_interact),
call_timeout: MainLoop::TICK.saturating_mul(4),
..Default::default()
};
let mut pool = ConnectionPool::new(storage.clone(), opts);
let instance_reachability = Rc::new(RefCell::new(InstanceReachabilityManager::new(
storage.clone(),
)));
pool.instance_reachability = instance_reachability.clone();
let pool = Rc::new(pool);

Georgy Moshkin
committed
let node_impl = NodeImpl::new(pool.clone(), storage.clone(), raft_storage.clone())?;
let raft_id = node_impl.raft_id();
let status = node_impl.status.subscribe();
let applied = node_impl.applied.subscribe();
let node_impl = Rc::new(Mutex::new(node_impl));
// Raft main loop accesses the global node refernce,
// so it must be initilized before the main loop starts.
let guard = crate::util::NoYieldsGuard::new();
main_loop: MainLoop::start(node_impl.clone()),
governor_loop: governor::Loop::start(
pool.clone(),
status.clone(),
storage.clone(),
raft_storage.clone(),
),
sentinel_loop: sentinel::Loop::start(

Georgy Moshkin
committed
pool,

Georgy Moshkin
committed
status.clone(),
storage.clone(),
raft_storage.clone(),
instance_reachability,

Georgy Moshkin
committed
),
storage,
raft_storage,
unsafe { RAFT_NODE = Some(Box::new(node)) };
let node = global().expect("just initialized it");
drop(guard);
node.tick_and_yield(0);
#[inline(always)]
pub fn raft_id(&self) -> RaftId {
#[inline(always)]
#[inline(always)]
pub(crate) fn node_impl(&self) -> MutexGuard<NodeImpl> {
self.node_impl.lock()
}
/// Wait for the status to be changed.
/// **This function yields**
#[inline(always)]
fiber::block_on(self.status.clone().changed()).unwrap();
/// Returns current applied [`RaftIndex`].
#[inline(always)]
pub fn get_index(&self) -> RaftIndex {
self.applied.get()
}
/// Performs the quorum read operation.
///
/// If works the following way:
///
/// 1. The instance forwards a request (`MsgReadIndex`) to a raft
/// leader. In case there's no leader at the moment, the function
/// returns `Err(ProposalDropped)`.
/// 2. Raft leader tracks its `commit_index` and broadcasts a
/// heartbeat to followers to make certain that it's still a
/// leader.
/// 3. As soon as the heartbeat is acknowlenged by the quorum, the
/// function returns that index.
/// 4. The instance awaits when the index is applied. If timeout
/// expires beforehand, the function returns `Err(Timeout)`.
///
/// Returns current applied [`RaftIndex`].
///
/// **This function yields**
pub fn read_index(&self, timeout: Duration) -> traft::Result<RaftIndex> {
let deadline = fiber::clock().saturating_add(timeout);
let notify = self.raw_operation(|node_impl| node_impl.read_index_async())?;
let index: RaftIndex = fiber::block_on(notify.recv_timeout(timeout))?;
self.wait_index(index, deadline.duration_since(fiber::clock()))
}
/// Waits for [`RaftIndex`] to be applied to the storage locally.
///
/// Returns current applied [`RaftIndex`]. It can be equal to or
/// greater than the target one. If timeout expires beforehand, the
/// function returns `Err(Timeout)`.
///
/// **This function yields**
#[inline]
pub fn wait_index(&self, target: RaftIndex, timeout: Duration) -> traft::Result<RaftIndex> {
tlog!(Debug, "waiting for applied index {target}");
let mut applied = self.applied.clone();
let deadline = fiber::clock().saturating_add(timeout);
fiber::block_on(async {
loop {
let current = self.get_index();
if current >= target {
tlog!(
Debug,
"done waiting for applied index {target}, current: {current}"
);
return Ok(current);
}
let timeout = deadline.duration_since(fiber::clock());
let res = applied.changed().timeout(timeout).await;
if let Err(TimeoutError::Expired) = res {
tlog!(
Debug,
"failed waiting for applied index {target}: timeout, current: {current}"
);
return Err(Error::Timeout);
}
/// Propose an operation and wait for it's result.
/// **This function yields**
pub fn propose_and_wait<T: OpResult + Into<Op>>(
&self,
op: T,
timeout: Duration,
) -> traft::Result<T::Result> {
let notify = self.raw_operation(|node_impl| node_impl.propose_async(op))?;
fiber::block_on(notify.recv_timeout::<T::Result>(timeout))
/// Become a candidate and wait for a main loop round so that there's a
/// chance we become the leader.
/// **This function yields**
pub fn campaign_and_yield(&self) -> traft::Result<()> {
self.raw_operation(|node_impl| node_impl.campaign())?;
// Even though we don't expect a response, we still should let the
// main_loop do an iteration. Without rescheduling, the Ready state
// wouldn't be processed, the Status wouldn't be updated, and some
// assertions may fail (e.g. in `postjoin()` in `main.rs`).
fiber::reschedule();
Ok(())
/// **This function yields**
pub fn step_and_yield(&self, msg: raft::Message) {
self.raw_operation(|node_impl| node_impl.step(msg))
.map_err(|e| tlog!(Error, "{e}"))
.ok();
// even though we don't expect a response, we still should let the
// main_loop do an iteration
fiber::reschedule();
/// **This function yields**
pub fn tick_and_yield(&self, n_times: u32) {
self.raw_operation(|node_impl| node_impl.tick(n_times));
// even though we don't expect a response, we still should let the
// main_loop do an iteration
fiber::reschedule();
/// **This function yields**
let raft_id = self.raft_id();
self.step_and_yield(raft::Message {
to: raft_id,
from: raft_id,
msg_type: raft::MessageType::MsgTimeoutNow,
..Default::default()
})
/// Only the conf_change_loop on a leader is eligible to call this function.
///
/// **This function yields**

Georgy Moshkin
committed
pub(crate) fn propose_conf_change_and_wait(
&self,
term: RaftTerm,
conf_change: raft::ConfChangeV2,
) -> traft::Result<()> {
let notify =
self.raw_operation(|node_impl| node_impl.propose_conf_change_async(term, conf_change))?;
fiber::block_on(notify).unwrap()?;
Ok(())
/// Attempt to transfer leadership to a given node and yield.
///
/// **This function yields**
pub fn transfer_leadership_and_yield(&self, new_leader_id: RaftId) {
self.raw_operation(|node_impl| node_impl.raw_node.transfer_leader(new_leader_id));
fiber::reschedule();
}
/// This function **may yield** if `self.node_impl` mutex is acquired.
#[inline]
fn raw_operation<R>(&self, f: impl FnOnce(&mut NodeImpl) -> R) -> R {
let mut node_impl = self.node_impl.lock();
res
#[inline]
pub fn all_traft_entries(&self) -> ::tarantool::Result<Vec<traft::Entry>> {
self.raft_storage.all_traft_entries()
}
pub raw_node: RawNode,
pub notifications: HashMap<LogicalClock, Notifier>,
joint_state_latch: KVCell<RaftIndex, oneshot::Sender<Result<(), RaftError>>>,
storage: Clusterwide,
raft_storage: RaftSpaceAccess,

Georgy Moshkin
committed
pool: Rc<ConnectionPool>,
status: watch::Sender<Status>,
applied: watch::Sender<RaftIndex>,
instance_reachability: Rc<RefCell<InstanceReachabilityManager>>,

Georgy Moshkin
committed
pool: Rc<ConnectionPool>,
storage: Clusterwide,
raft_storage: RaftSpaceAccess,
) -> Result<Self, RaftError> {
let box_err = |e| StorageError::Other(Box::new(e));
let raft_id: RaftId = raft_storage
.raft_id()
.map_err(box_err)?
.expect("raft_id should be set by the time the node is being initialized");
let applied: RaftIndex = raft_storage.applied().map_err(box_err)?;
let gen = raft_storage.gen().unwrap() + 1;
raft_storage.persist_gen(gen).unwrap();
LogicalClock::new(raft_id, gen)
};
let cfg = raft::Config {
id: raft_id,
applied,
pre_vote: true,
..Default::default()
};
let raw_node = RawNode::new(&cfg, raft_storage.clone(), tlog::root())?;
let (status, _) = watch::channel(Status {
id: raft_id,
leader_id: None,
term: traft::INIT_RAFT_TERM,
raft_state: RaftState::Follower,
let (applied, _) = watch::channel(applied);
Ok(Self {
raw_node,
notifications: Default::default(),
joint_state_latch: KVCell::new(),
raft_storage,
instance_reachability: pool.instance_reachability.clone(),
})
}
fn raft_id(&self) -> RaftId {
self.raw_node.raft.id
}
pub fn read_index_async(&mut self) -> Result<Notify, RaftError> {
// In some states `raft-rs` ignores the ReadIndex request.
// Check it preliminary, don't wait for the timeout.
//
// See for details:
// - <https://github.com/tikv/raft-rs/blob/v0.6.0/src/raft.rs#L2058>
// - <https://github.com/tikv/raft-rs/blob/v0.6.0/src/raft.rs#L2323>
let leader_doesnt_exist = self.raw_node.raft.leader_id == INVALID_ID;
let term_just_started = // ...
self.raw_node.raft.state == RaftStateRole::Leader
&& !self.raw_node.raft.commit_to_current_term();
if leader_doesnt_exist || term_just_started {
return Err(RaftError::ProposalDropped);
}
let (lc, notify) = self.schedule_notification();
// read_index puts this context into an Entry,
// so we've got to compose full EntryContext,
// despite single LogicalClock would be enough
let ctx = traft::EntryContextNormal::new(lc, Op::Nop);
self.raw_node.read_index(ctx.to_bytes());
Ok(notify)
}
/// **Doesn't yield**
#[inline]
pub fn propose_async<T>(&mut self, op: T) -> Result<Notify, RaftError>
where
{
let (lc, notify) = self.schedule_notification();
let ctx = traft::EntryContextNormal::new(lc, op.into());
self.raw_node.propose(ctx.to_bytes(), vec![])?;
Ok(notify)
}
/// Proposes a raft entry to be appended to the log and returns raft index
/// at which it is expected to be committed unless it gets rejected.
///
/// **Doesn't yield**
pub fn propose(&mut self, op: Op) -> Result<RaftIndex, RaftError> {
self.lc.inc();
let ctx = traft::EntryContextNormal::new(self.lc, op);
self.raw_node.propose(ctx.to_bytes(), vec![])?;
let index = self.raw_node.raft.raft_log.last_index();
Ok(index)
}
pub fn campaign(&mut self) -> Result<(), RaftError> {
self.raw_node.campaign()
}
pub fn step(&mut self, msg: raft::Message) -> Result<(), RaftError> {
if msg.to != self.raft_id() {
return Ok(());
}
// TODO check it's not a MsgPropose with op::Dml for updating _pico_instance.
// TODO check it's not a MsgPropose with ConfChange.
self.raw_node.step(msg)
}
pub fn tick(&mut self, n_times: u32) {
for _ in 0..n_times {
self.raw_node.tick();
}
}
fn propose_conf_change_async(
&mut self,
term: RaftTerm,
conf_change: raft::ConfChangeV2,
) -> Result<oneshot::Receiver<Result<(), RaftError>>, RaftError> {
// In some states proposing a ConfChange is impossible.
// Check if there's a reason to reject it.
// Checking leadership is only needed for the
// correct latch management. It doesn't affect
// raft correctness. Checking the instance is a
// leader makes sure the proposed `ConfChange`
// is appended to the raft log immediately
// instead of sending `MsgPropose` over the
// network.
if self.raw_node.raft.state != RaftStateRole::Leader {
return Err(RaftError::ConfChangeError("not a leader".into()));
}
if term != self.raw_node.raft.term {
return Err(RaftError::ConfChangeError("raft term mismatch".into()));
}
// Without this check the node would silently ignore the conf change.
// See https://github.com/tikv/raft-rs/blob/v0.6.0/src/raft.rs#L2014-L2026
if self.raw_node.raft.has_pending_conf() {
return Err(RaftError::ConfChangeError(
"already has pending confchange".into(),
));
}
let prev_index = self.raw_node.raft.raft_log.last_index();
self.raw_node.propose_conf_change(vec![], conf_change)?;
// Ensure the ConfChange was actually appended to the log.
// Otherwise it's a problem: current instance isn't actually a
// leader (which is impossible in theory, but we're not sure in
// practice) and sent the message to the raft network. It may
// lead to an inconsistency.
let last_index = self.raw_node.raft.raft_log.last_index();
assert_eq!(last_index, prev_index + 1);
if !self.joint_state_latch.is_empty() {
warn_or_panic!("joint state latch is locked");
}
let (tx, rx) = oneshot::channel();
self.joint_state_latch.insert(last_index, tx);
Ok(rx)
}
fn handle_committed_entries(
&mut self,
entries: &[raft::Entry],
expelled: &mut bool,
) -> traft::Result<()> {
let mut entries = entries.iter().peekable();
while let Some(&entry) = entries.peek() {
let entry = match traft::Entry::try_from(entry) {
Ok(v) => v,
Err(e) => {
tlog!(Error, "abnormal entry: {e}"; "entry" => ?entry);
continue;
}
};
let mut apply_entry_result = EntryApplied;
let mut new_applied = None;
self.main_loop_status("handling committed entries");
let entry_index = entry.index;
match entry.entry_type {
raft::EntryType::EntryNormal => {
apply_entry_result = self.handle_committed_normal_entry(entry, expelled);
if apply_entry_result != EntryApplied {
return Ok(());
}
}
raft::EntryType::EntryConfChange | raft::EntryType::EntryConfChangeV2 => {
self.handle_committed_conf_change(entry)
}
let res = self.raft_storage.persist_applied(entry_index);
if let Err(e) = res {
tlog!(
Error,
"error persisting applied index: {e}";
"index" => entry_index
);
}
new_applied = Some(entry_index);
Ok(())
})?;
if let Some(new_applied) = new_applied {
self.applied
.send(new_applied)
.expect("applied shouldn't ever be borrowed across yields");
}
match apply_entry_result {
SleepAndRetry => {
self.main_loop_status("blocked by raft entry");
let timeout = MainLoop::TICK * 4;
fiber::sleep(timeout);
continue;
}
EntryApplied => {
// Actually advance the iterator.
let _ = entries.next();
Ok(())
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
fn wake_governor_if_needed(&self, op: &Op) {
let wake_governor = match &op {
Op::Dml(op) => {
matches!(
op.space().try_into(),
Ok(ClusterwideTable::Property
| ClusterwideTable::Replicaset
| ClusterwideTable::Instance)
)
}
Op::DdlPrepare { .. } => true,
_ => false,
};
// NOTE: this may be premature, because the dml may fail to apply and/or
// the transaction may be rolled back, but we ignore this for the sake
// of simplicity, as nothing bad can happen if governor makes another
// idle iteration.
if wake_governor {
let res = global()
.expect("node must be initialized by this point")
.governor_loop
.wakeup();
if let Err(e) = res {
tlog!(Warning, "failed waking up governor: {e}");
}
}
}
/// Is called during a transaction
fn handle_committed_normal_entry(
&mut self,
entry: traft::Entry,
expelled: &mut bool,
) -> ApplyEntryResult {
assert_eq!(entry.entry_type, raft::EntryType::EntryNormal);
let lc = entry.lc();
let index = entry.index;
let op = entry.into_op().unwrap_or(Op::Nop);
tlog!(Debug, "applying entry: {op}"; "index" => index);
self.wake_governor_if_needed(&op);
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
let storage_properties = &self.storage.properties;
// apply the operation
let mut result = Box::new(()) as Box<dyn AnyWithTypeName>;
match op {
Op::Nop => {}
Op::Dml(op) => {
let res = self.storage.do_dml(&op);
match &res {
Err(e) => {
tlog!(Error, "clusterwide dml failed: {e}");
}
Ok(Some(tuple))
if op.space() == ClusterwideTable::Instance.id()
// TODO: do we need to log something into the audit when deleting instances?
&& !matches!(op, Dml::Delete { .. }) =>
{
// FIXME: we do this prematurely, because the
// transaction may still be rolled back for some reason.
let new: Instance = tuple
.decode()
.expect("tuple already passed format verification");
if has_grades!(new, Expelled -> *) && new.raft_id == self.raft_id() {
// cannot exit during a transaction
*expelled = true;
}
// Check if we're handling a "new node joined" event:
// * Either there's no tuple for this node in the storage;
// * Or its raft id has changed, meaning it's no longer the same node.
let prev = self.storage.instances.get(&new.instance_id).ok();
if prev.as_ref().map(|x| x.raft_id) != Some(new.raft_id) {
let instance_id = &new.instance_id;
message: "a new instance `{instance_id}` joined the cluster",
title: "join_instance",
severity: Low,
instance_id: %instance_id,
raft_id: %new.raft_id,
);
}
if prev.as_ref().map(|x| x.current_grade) != Some(new.current_grade) {
let instance_id = &new.instance_id;
let grade = &new.current_grade;
crate::audit!(
message: "current grade of instance `{instance_id}` changed to {grade}",
title: "change_current_grade",
severity: Medium,
instance_id: %instance_id,
new_grade: %grade,
);
}
if prev.as_ref().map(|x| x.target_grade) != Some(new.target_grade) {
let instance_id = &new.instance_id;
let grade = &new.target_grade;
crate::audit!(
message: "target grade of instance `{instance_id}` changed to {grade}",
title: "change_target_grade",
severity: Low,
instance_id: %instance_id,
raft_id: %new.raft_id,
}
result = Box::new(res) as _;
}
Op::DdlPrepare {
ddl,
schema_version,
} => {
self.apply_op_ddl_prepare(ddl, schema_version)
.expect("storage should not fail");
}
Op::DdlCommit => {
let v_local = local_schema_version().expect("storage should not fail");
let v_pending = storage_properties
.pending_schema_version()
.expect("granted we don't mess up log compaction, this should not be None");
let ddl = storage_properties
.pending_schema_change()
.expect("granted we don't mess up log compaction, this should not be None");
// This instance is catching up to the cluster.
if v_local < v_pending {
// Master applies schema change at this point.
let res = rpc::ddl_apply::apply_schema_change(
&self.storage,
&ddl,
);
match res {
Err(rpc::ddl_apply::Error::Other(err)) => {
panic!("storage should not fail, but failed with: {err}")
}
Err(rpc::ddl_apply::Error::Aborted(reason)) => {
tlog!(Warning, "failed applying committed ddl operation: {reason}";
"ddl" => ?ddl,
);
return SleepAndRetry;
}
Ddl::CreateTable { id, name, .. } => {
ddl_meta_space_update_operable(&self.storage, id, true)
.expect("storage shouldn't fail");
message: "created table `{name}`",
title: "create_table",
severity: Medium,
Ddl::DropTable { id } => {
let space_raw = self.storage.tables.get(id);
let space = space_raw.ok().flatten().expect("failed to get space");
ddl_meta_drop_space(&self.storage, id).expect("storage shouldn't fail");
let name = &space.name;
crate::audit!(
message: "dropped table `{name}`",
title: "drop_table",
severity: Medium,
_ => {
todo!()
}
}
storage_properties
.delete(PropertyName::PendingSchemaChange)
.expect("storage should not fail");
storage_properties
.delete(PropertyName::PendingSchemaVersion)
.expect("storage should not fail");
storage_properties
.put(PropertyName::GlobalSchemaVersion, &v_pending)
.expect("storage should not fail");
}
Op::DdlAbort => {
let v_local = local_schema_version().expect("storage should not fail");
let v_pending: u64 = storage_properties
.pending_schema_version()
.expect("granted we don't mess up log compaction, this should not be None");
let ddl = storage_properties
.pending_schema_change()
.expect("granted we don't mess up log compaction, this should not be None");
// This condition means, schema versions must always increase
// even after an DdlAbort
if v_local == v_pending {
} else {
let v_global = storage_properties
.global_schema_version()
.expect("storage should not fail");
ddl_abort_on_master(&ddl, v_global).expect("storage should not fail");
Ddl::CreateTable { id, .. } => {
ddl_meta_drop_space(&self.storage, id).expect("storage shouldn't fail");
Ddl::DropTable { id } => {
ddl_meta_space_update_operable(&self.storage, id, true)
.expect("storage shouldn't fail");
_ => {
todo!()
}
}
storage_properties
.delete(PropertyName::PendingSchemaChange)
.expect("storage should not fail");
storage_properties
.delete(PropertyName::PendingSchemaVersion)
.expect("storage should not fail");
let v_local = local_schema_version().expect("storage shoudl not fail");
let v_pending = acl.schema_version();
if v_local < v_pending {
if self.is_readonly() {
// Wait for tarantool replication with master to progress.
return SleepAndRetry;
} else {
match &acl {
Acl::CreateUser { user_def } => {
acl::on_master_create_user(user_def)
.expect("creating user shouldn't fail");
}
acl::on_master_change_user_auth(*user_id, auth)
.expect("changing user auth shouldn't fail");
}
Acl::DropUser { user_id, .. } => {
acl::on_master_drop_user(*user_id)
Acl::CreateRole { role_def } => {
acl::on_master_create_role(role_def)
.expect("creating role shouldn't fail");
}
Acl::DropRole { role_id, .. } => {
acl::on_master_drop_role(*role_id)
.expect("droping role shouldn't fail");
}
Acl::GrantPrivilege { priv_def } => {
acl::on_master_grant_privilege(priv_def)
.expect("granting a privilege shouldn't fail");
}
Acl::RevokePrivilege { priv_def } => {
acl::on_master_revoke_privilege(priv_def)
.expect("revoking a privilege shouldn't fail");
}
}
set_local_schema_version(v_pending).expect("storage should not fail");
match &acl {
Acl::CreateUser { user_def } => {
acl::global_create_user(&self.storage, user_def)
.expect("persisting a user definition shouldn't fail");
}
acl::global_change_user_auth(&self.storage, *user_id, auth)
.expect("changing user definition shouldn't fail");
}
Acl::DropUser { user_id, .. } => {
acl::global_drop_user(&self.storage, *user_id)
.expect("droping a user definition shouldn't fail");
}
Acl::CreateRole { role_def } => {
acl::global_create_role(&self.storage, role_def)
.expect("persisting a role definition shouldn't fail");
}
Acl::DropRole { role_id, .. } => {
acl::global_drop_role(&self.storage, *role_id)
.expect("droping a role definition shouldn't fail");
}
Acl::GrantPrivilege { priv_def } => {
acl::global_grant_privilege(&self.storage, priv_def)
.expect("persiting a privilege definition shouldn't fail");
}
Acl::RevokePrivilege { priv_def } => {
acl::global_revoke_privilege(&self.storage, priv_def)
.expect("removing a privilege definition shouldn't fail");