From 4dde8dd81c59777db02830d5b0bc13a3106c4b57 Mon Sep 17 00:00:00 2001 From: Dmitry Rodionov <d.rodionov@picodata.io> Date: Tue, 12 Dec 2023 17:39:15 +0300 Subject: [PATCH] chore: deduplicate constants for admin user id Before this patch we've had ADMIN_ID in schema.rs and ADMIN_USER_ID in lib.rs. This patch removes ADMIN_USER_ID in favor of ADMIN_ID --- src/access_control.rs | 22 ++++++++++------------ src/lib.rs | 5 ----- src/luamod.rs | 6 +++--- src/rpc/join.rs | 10 +++++----- src/rpc/update_instance.rs | 6 +++--- src/schema.rs | 4 +++- src/sql.rs | 12 ++++++------ 7 files changed, 30 insertions(+), 35 deletions(-) diff --git a/src/access_control.rs b/src/access_control.rs index 24be5a8cf8..be5bb5da68 100644 --- a/src/access_control.rs +++ b/src/access_control.rs @@ -42,10 +42,9 @@ use tarantool::{ }; use crate::{ - schema::{PrivilegeDef, PrivilegeType, SchemaObjectType as PicoSchemaObjectType}, + schema::{PrivilegeDef, PrivilegeType, SchemaObjectType as PicoSchemaObjectType, ADMIN_ID}, storage::{space_by_id, Clusterwide, ToEntryIter}, traft::op::{self, Op}, - ADMIN_USER_ID, }; tarantool::define_str_enum! { @@ -290,7 +289,7 @@ fn access_check_grant_revoke( // In vanilla tarantool each object type has counterpart with entity suffix // i e BOX_SC_ENTITY_SPACE. Since these variants are not stored we do // not materialize them as valid SchemaObjectType variants and streamline this check. - if grantor_id != ADMIN_USER_ID { + if grantor_id != ADMIN_ID { return Err(make_access_denied( access_name, priv_def.object_type(), @@ -306,7 +305,7 @@ fn access_check_grant_revoke( match priv_def.object_type() { PicoSchemaObjectType::Universe => { // Only admin can grant on universe - if grantor_id != ADMIN_USER_ID { + if grantor_id != ADMIN_ID { return Err(make_access_denied( access_name, PicoSchemaObjectType::Universe, @@ -322,7 +321,7 @@ fn access_check_grant_revoke( assert_eq!(object_id, meta.id, "user metadata id mismatch"); // Only owner or admin can grant on space - if meta.user_id != grantor_id && grantor_id != ADMIN_USER_ID { + if meta.user_id != grantor_id && grantor_id != ADMIN_ID { return Err(make_access_denied( access_name, PicoSchemaObjectType::Table, @@ -349,7 +348,7 @@ fn access_check_grant_revoke( // Everyone can grant 'PUBLIC' role. // Note that having a role means having execute privilege on it. if granted_role.owner_id != grantor_id - && grantor_id != ADMIN_USER_ID + && grantor_id != ADMIN_ID && !(granted_role.name == "public" && priv_def.privilege() == PrivilegeType::Execute) { @@ -379,7 +378,7 @@ fn access_check_grant_revoke( } // Only owner or admin can grant on user - if target_sys_user.owner_id != grantor_id && grantor_id != ADMIN_USER_ID { + if target_sys_user.owner_id != grantor_id && grantor_id != ADMIN_ID { return Err(make_access_denied( access_name, PicoSchemaObjectType::User, @@ -499,7 +498,7 @@ pub(super) fn access_check_op( Op::Dml(dml) => access_check_dml(dml, as_user), Op::DdlPrepare { ddl, .. } => access_check_ddl(ddl, as_user), Op::DdlCommit | Op::DdlAbort => { - if as_user != ADMIN_USER_ID { + if as_user != ADMIN_ID { let sys_user = user_by_id(as_user)?; return Err(make_access_denied( "ddl", @@ -531,7 +530,6 @@ mod tests { Clusterwide, }, traft::op::{Acl, Ddl, Dml, Op}, - ADMIN_USER_ID, }; use tarantool::{ auth::{AuthData, AuthDef, AuthMethod}, @@ -552,9 +550,9 @@ mod tests { #[tarantool::test] fn decode_user_metadata() { - let sys_user = user_by_id(ADMIN_USER_ID).unwrap(); - assert_eq!(sys_user.id, ADMIN_USER_ID); - assert_eq!(sys_user.owner_id, ADMIN_USER_ID); + let sys_user = user_by_id(ADMIN_ID).unwrap(); + assert_eq!(sys_user.id, ADMIN_ID); + assert_eq!(sys_user.owner_id, ADMIN_ID); assert_eq!(&sys_user.name, "admin"); assert_eq!(sys_user.ty, UserMetadataKind::User); assert_eq!(sys_user.auth, HashMap::new()); diff --git a/src/lib.rs b/src/lib.rs index 36ed191ff6..4d532ae77e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,7 +7,6 @@ use ::raft::prelude as raft; use ::tarantool::error::Error as TntError; use ::tarantool::fiber; use ::tarantool::fiber::r#async::timeout::{self, IntoTimeout}; -use ::tarantool::session::UserId; use ::tarantool::time::Instant; use ::tarantool::tlua; use ::tarantool::transaction::transaction; @@ -62,10 +61,6 @@ pub mod traft; pub mod util; pub mod vshard; -// This is the user id used when we need to elevate privileges -// because current user doesnt have access to system spaces -pub(crate) const ADMIN_USER_ID: UserId = 1; - macro_rules! lua_preload { ($lua:ident, $module:literal, $path_prefix:literal, $path:literal) => { $lua.exec_with( diff --git a/src/luamod.rs b/src/luamod.rs index 9b19ef8c7f..1e798d3236 100644 --- a/src/luamod.rs +++ b/src/luamod.rs @@ -5,14 +5,14 @@ use std::time::Duration; use crate::cas::{self, compare_and_swap}; use crate::instance::InstanceId; -use crate::schema::{self, CreateTableParams}; +use crate::schema::{self, CreateTableParams, ADMIN_ID}; use crate::traft::error::Error; use crate::traft::op::{self, Op}; use crate::traft::{self, node, RaftIndex, RaftTerm}; use crate::util::str_eq; use crate::util::INFINITY; use crate::util::{duration_from_secs_f64_clamped, effective_user_id}; -use crate::{args, rpc, sync, tlog, ADMIN_USER_ID}; +use crate::{args, rpc, sync, tlog}; use ::tarantool::fiber; use ::tarantool::session; use ::tarantool::tlua; @@ -1152,7 +1152,7 @@ pub(crate) fn setup(args: &args::Run) { -> traft::Result<RaftIndex> { // su is needed here because for cas execution we need to consult with system spaces like `_raft_state` // and the user executing cas request may not (even shouldnt) have access to these spaces - let su = session::su(ADMIN_USER_ID)?; + let su = session::su(ADMIN_ID)?; let op = op::Dml::from_lua_args(op, su.original_user_id) .map_err(traft::error::Error::other)?; diff --git a/src/rpc/join.rs b/src/rpc/join.rs index 05d4925c91..ae974c7737 100644 --- a/src/rpc/join.rs +++ b/src/rpc/join.rs @@ -8,12 +8,12 @@ use crate::instance::Grade; use crate::instance::GradeVariant::*; use crate::instance::{Instance, InstanceId}; use crate::replicaset::ReplicasetId; +use crate::schema::ADMIN_ID; use crate::storage::ClusterwideTable; use crate::storage::{Clusterwide, ToEntryIter as _}; use crate::traft::op::{Dml, Op}; use crate::traft::{self, RaftId}; use crate::traft::{error::Error, node, Address, PeerAddress, Result}; -use crate::ADMIN_USER_ID; use ::tarantool::fiber; @@ -92,9 +92,9 @@ pub fn handle_join_request_and_wait(req: Request, timeout: Duration) -> Result<R raft_id: instance.raft_id, address: req.advertise_address.clone(), }; - let op_addr = Dml::replace(ClusterwideTable::Address, &peer_address, ADMIN_USER_ID) + let op_addr = Dml::replace(ClusterwideTable::Address, &peer_address, ADMIN_ID) .expect("encoding should not fail"); - let op_instance = Dml::replace(ClusterwideTable::Instance, &instance, ADMIN_USER_ID) + let op_instance = Dml::replace(ClusterwideTable::Instance, &instance, ADMIN_ID) .expect("encoding should not fail"); let ranges = vec![ cas::Range::new(ClusterwideTable::Instance), @@ -132,7 +132,7 @@ pub fn handle_join_request_and_wait(req: Request, timeout: Duration) -> Result<R term: raft_storage.term()?, ranges: ranges.clone(), }, - ADMIN_USER_ID, + ADMIN_ID, deadline.duration_since(fiber::clock()), )); handle_result!(cas::compare_and_swap( @@ -142,7 +142,7 @@ pub fn handle_join_request_and_wait(req: Request, timeout: Duration) -> Result<R term: raft_storage.term()?, ranges, }, - ADMIN_USER_ID, + ADMIN_ID, deadline.duration_since(fiber::clock()), )); node.main_loop.wakeup(); diff --git a/src/rpc/update_instance.rs b/src/rpc/update_instance.rs index d38566ace3..bb1b21578e 100644 --- a/src/rpc/update_instance.rs +++ b/src/rpc/update_instance.rs @@ -6,11 +6,11 @@ use crate::instance::Grade; use crate::instance::GradeVariant; use crate::instance::GradeVariant::*; use crate::instance::{Instance, InstanceId}; +use crate::schema::ADMIN_ID; use crate::storage::{Clusterwide, ClusterwideTable}; use crate::traft::op::{Dml, Op}; use crate::traft::Result; use crate::traft::{error::Error, node}; -use crate::ADMIN_USER_ID; use ::tarantool::fiber; @@ -125,7 +125,7 @@ pub fn handle_update_instance_request_and_wait(req: Request, timeout: Duration) return Ok(()); } - let dml = Dml::replace(ClusterwideTable::Instance, &new_instance, ADMIN_USER_ID) + let dml = Dml::replace(ClusterwideTable::Instance, &new_instance, ADMIN_ID) .expect("encoding should not fail"); let ranges = vec![ @@ -140,7 +140,7 @@ pub fn handle_update_instance_request_and_wait(req: Request, timeout: Duration) term: raft_storage.term()?, ranges, }, - ADMIN_USER_ID, + ADMIN_ID, deadline.duration_since(fiber::clock()), ); match res { diff --git a/src/schema.rs b/src/schema.rs index 7988c8aff5..fab758bc14 100644 --- a/src/schema.rs +++ b/src/schema.rs @@ -335,6 +335,8 @@ impl RoleDef { // default users pub const GUEST_ID: UserId = 0; +// Note: Admin user id is used when we need to elevate privileges +// because current user doesnt have access to system spaces pub const ADMIN_ID: UserId = 1; // default roles @@ -1308,7 +1310,7 @@ mod tests { #[cfg(test)] mod test { use super::*; - use tarantool::tuple::{Decode, ToTupleBuffer}; + use tarantool::tuple::ToTupleBuffer; #[test] #[rustfmt::skip] diff --git a/src/sql.rs b/src/sql.rs index ec059342b3..d7203e56f7 100644 --- a/src/sql.rs +++ b/src/sql.rs @@ -2,7 +2,7 @@ use crate::schema::{ wait_for_ddl_commit, CreateTableParams, DistributionParam, Field, PrivilegeDef, PrivilegeType, - RoleDef, SchemaObjectType, ShardingFn, UserDef, + RoleDef, SchemaObjectType, ShardingFn, UserDef, ADMIN_ID, }; use crate::sql::pgproto::{ with_portals, BoxedPortal, Describe, Descriptor, UserDescriptors, PG_PORTALS, @@ -15,7 +15,7 @@ use crate::traft::node::Node as TraftNode; use crate::traft::op::{Acl as OpAcl, Ddl as OpDdl, Op}; use crate::traft::{self, node}; use crate::util::{duration_from_secs_f64_clamped, effective_user_id}; -use crate::{cas, unwrap_ok_or, ADMIN_USER_ID}; +use crate::{cas, unwrap_ok_or}; use sbroad::backend::sql::ir::{EncodedPatternWithParams, PatternWithParams}; use sbroad::debug; @@ -89,7 +89,7 @@ fn check_table_privileges(plan: &IrPlan) -> traft::Result<()> { // Switch to admin to get space ids. At the moment we don't use space cache in tarantool // module and can't get space metadata without _space table read permissions. - with_su(ADMIN_USER_ID, || -> traft::Result<()> { + with_su(ADMIN_ID, || -> traft::Result<()> { for (_, node_id) in nodes { let rel_node = plan.get_relation_node(node_id).map_err(Error::from)?; let (relation, privileges) = match rel_node { @@ -189,7 +189,7 @@ pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result ¶ms.pattern, || { let runtime = RouterRuntime::new().map_err(Error::from)?; - let query = with_su(ADMIN_USER_ID, || -> traft::Result<Query<RouterRuntime>> { + let query = with_su(ADMIN_ID, || -> traft::Result<Query<RouterRuntime>> { Query::new(&runtime, ¶ms.pattern, params.params).map_err(Error::from) })??; dispatch(query) @@ -339,7 +339,7 @@ pub fn proc_pg_parse(query: String, traceable: bool) -> traft::Result<Descriptor } let ast = <RouterRuntime as Router>::ParseTree::new(&query).map_err(Error::from)?; let metadata = &*runtime.metadata().map_err(Error::from)?; - let plan = with_su(ADMIN_USER_ID, || -> traft::Result<IrPlan> { + let plan = with_su(ADMIN_ID, || -> traft::Result<IrPlan> { let mut plan = ast.resolve_metadata(metadata).map_err(Error::from)?; if runtime.provides_versions() { let mut table_version_map = @@ -704,7 +704,7 @@ fn reenterable_schema_change_request( } }; - let _su = session::su(ADMIN_USER_ID).expect("cant fail because session is available"); + let _su = session::su(ADMIN_ID).expect("cant fail because session is available"); 'retry: loop { if Instant::now() > deadline { -- GitLab