diff --git a/src/config.rs b/src/config.rs
index c9bc8e200ba63a9206103674bc4315d72ba7ddff..4d1544d18e30a5adf6570873aa14725397d13398 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -1,7 +1,6 @@
 use crate::address::{HttpAddress, IprotoAddress};
 use crate::cli::args;
 use crate::cli::args::CONFIG_PARAMETERS_ENV;
-use crate::config_parameter_path;
 use crate::failure_domain::FailureDomain;
 use crate::instance::InstanceName;
 use crate::introspection::leaf_field_paths;
@@ -21,6 +20,7 @@ use crate::traft::error::Error;
 use crate::traft::RaftSpaceAccess;
 use crate::util::edit_distance;
 use crate::util::file_exists;
+use crate::{config_parameter_path, sql};
 use sbroad::ir::relation::DerivedType;
 use sbroad::ir::value::{EncodedValue, Value};
 use serde_yaml::Value as YamlValue;
@@ -197,6 +197,7 @@ Using configuration file '{args_path}'.");
             TierConfig {
                 replication_factor: Some(1),
                 can_vote: true,
+                bucket_count: Some(sql::DEFAULT_BUCKET_COUNT),
                 ..Default::default()
             },
         )]));
@@ -793,14 +794,6 @@ Using configuration file '{args_path}'.");
         1048576
     }
 
-    #[inline]
-    pub fn total_bucket_count() -> u64 {
-        // This is value is not configurable at the moment, but this may change
-        // in the future. At that point this function will probably also want to
-        // accept a `&self` parameter, but for now it's not necessary.
-        3000
-    }
-
     pub fn log_config_params(&self) {
         for path in &leaf_field_paths::<PicodataConfig>() {
             let value = self
@@ -1051,6 +1044,11 @@ pub struct ClusterConfig {
     #[introspection(config_default = 1)]
     pub default_replication_factor: Option<u8>,
 
+    /// Bucket count which is used for tiers which didn't specify one
+    /// explicitly. For default value see [`Self::default_bucket_count()`].
+    #[introspection(config_default = sql::DEFAULT_BUCKET_COUNT)]
+    pub default_bucket_count: Option<u64>,
+
     #[serde(flatten)]
     #[introspection(ignore)]
     pub unknown_parameters: HashMap<String, YamlValue>,
@@ -1065,6 +1063,7 @@ impl ClusterConfig {
                 Tier {
                     name: DEFAULT_TIER.into(),
                     replication_factor: self.default_replication_factor(),
+                    bucket_count: self.default_bucket_count(),
                     can_vote: true,
                     ..Default::default()
                 },
@@ -1077,10 +1076,15 @@ impl ClusterConfig {
                 .replication_factor
                 .unwrap_or_else(|| self.default_replication_factor());
 
+            let bucket_count = info
+                .bucket_count
+                .unwrap_or_else(|| self.default_bucket_count());
+
             let tier_def = Tier {
                 name: name.into(),
                 replication_factor,
                 can_vote: info.can_vote,
+                bucket_count,
                 ..Default::default()
             };
             tier_defs.insert(name.into(), tier_def);
@@ -1093,6 +1097,12 @@ impl ClusterConfig {
         self.default_replication_factor
             .expect("is set in PicodataConfig::set_defaults_explicitly")
     }
+
+    #[inline]
+    pub fn default_bucket_count(&self) -> u64 {
+        self.default_bucket_count
+            .expect("is set in PicodataConfig::set_defaults_explicitly")
+    }
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -2008,6 +2018,9 @@ cluster:
             replication_factor: 2
             can_vote: false
 
+        radix:
+            bucket_count: 16384
+
 instance:
     instance_name: voter1
 
@@ -2097,16 +2110,20 @@ cluster:
         let yaml = r###"
 cluster:
     default_replication_factor: 3
+    default_bucket_count: 5000
     name: test
 "###;
         let config = PicodataConfig::read_yaml_contents(&yaml.trim()).unwrap();
         config.validate_from_file().expect("");
 
         let tiers = config.cluster.tiers();
+        assert_eq!(tiers.len(), 1);
+
         let default_tier = tiers
             .get("default")
             .expect("default replication factor should applied to default tier configuration");
         assert_eq!(default_tier.replication_factor, 3);
+        assert_eq!(default_tier.bucket_count, 5000);
     }
 
     #[test]
@@ -2150,6 +2167,44 @@ instance:
         assert_eq!(listen.port, "8080");
     }
 
+    #[test]
+    fn default_bucket_count_replication_factor() {
+        let yaml = r###"
+cluster:
+    tier:
+        non-default:
+            replication_factor: 2
+            bucket_count: 1000
+        default:
+        default-rf:
+            bucket_count: 10000
+        default-bc:
+            replication_factor: 4
+    default_replication_factor: 3
+    default_bucket_count: 5000
+    name: test
+"###;
+        let config = PicodataConfig::read_yaml_contents(&yaml.trim()).unwrap();
+        config.validate_from_file().expect("");
+
+        let tiers = config.cluster.tiers();
+        assert_eq!(tiers.len(), 4);
+
+        let assert_tier = |name: &str, rf: u8, bc: u64| {
+            let tier: &Tier = tiers
+                .get(name)
+                .expect("default replication factor should applied to default tier configuration");
+
+            assert_eq!(tier.replication_factor, rf);
+            assert_eq!(tier.bucket_count, bc);
+        };
+
+        assert_tier("default", 3, 5000);
+        assert_tier("non-default", 2, 1000);
+        assert_tier("default-rf", 3, 10000);
+        assert_tier("default-bc", 4, 5000);
+    }
+
     #[track_caller]
     fn setup_for_tests(yaml: Option<&str>, args: &[&str]) -> Result<Box<PicodataConfig>, Error> {
         let mut config = if let Some(yaml) = yaml {
diff --git a/src/http_server.rs b/src/http_server.rs
index 179f825923738f9d7adf7912fb7d3ce500902ad5..4409416a87bf5a8a8a70f97a915f227ffdb82bbb 100644
--- a/src/http_server.rs
+++ b/src/http_server.rs
@@ -114,6 +114,7 @@ pub(crate) struct TierInfo {
     replicasets: Vec<ReplicasetInfo>,
     replicaset_count: usize,
     rf: u8,
+    bucket_count: u64,
     instance_count: usize,
     #[serde(rename = "can_vote")] // for compatibility with lua version
     can_vote: bool,
@@ -200,7 +201,7 @@ async fn get_instances_data(
         // we have to add timeout directly to future due
         // to the bug in connection pool that does not consider
         // timeout when establishing TCP connection
-        // See https://git.picodata.io/picodata/picodata/picodata/-/issues/943 
+        // See https://git.picodata.io/picodata/picodata/picodata/-/issues/943
         ).timeout(DEFAULT_TIMEOUT);
         fs.push({
             async move {
@@ -382,6 +383,7 @@ pub(crate) fn http_api_tiers() -> Result<Vec<TierInfo>> {
                     replicasets: Vec::new(),
                     replicaset_count: 0,
                     rf: item.replication_factor,
+                    bucket_count: item.bucket_count,
                     instance_count: 0,
                     can_vote: true,
                     name: item.name.clone(),
diff --git a/src/info.rs b/src/info.rs
index 1740bc5b0a0c2f9d492c02f70a477faa769cf893..7fd1e304221ffc85326127f002566659a8827f75 100644
--- a/src/info.rs
+++ b/src/info.rs
@@ -347,19 +347,17 @@ pub fn proc_get_config() -> Result<rmpv::Value, Error> {
 pub fn proc_get_vshard_config(tier_name: Option<String>) -> Result<RawByteBuf, Error> {
     let node = node::global()?;
     let tier_name = if let Some(tier_name) = tier_name {
-        let tier = node.storage.tiers.by_name(&tier_name)?;
-        if tier.is_none() {
-            return Err(Error::NoSuchTier(tier_name));
-        };
-
         tier_name
     } else {
         node.raft_storage
             .tier()?
             .expect("tier for instance should exists")
     };
+    let Some(tier) = node.storage.tiers.by_name(&tier_name)? else {
+        return Err(Error::NoSuchTier(tier_name));
+    };
 
-    let config = VshardConfig::from_storage(&node.storage, &tier_name)?;
+    let config = VshardConfig::from_storage(&node.storage, &tier.name, tier.bucket_count)?;
     let data = rmp_serde::to_vec_named(&config).map_err(Error::other)?;
     Ok(RawByteBuf::from(data))
 }
diff --git a/src/luamod.rs b/src/luamod.rs
index 1b412d285cccb9555238ae660d32a155b3bcae6a..af7e63920f78e951b0ea0bbc2fdd5c1ed5b3ec3b 100644
--- a/src/luamod.rs
+++ b/src/luamod.rs
@@ -13,6 +13,7 @@ use crate::schema::{self, ADMIN_ID};
 use crate::sync;
 #[allow(unused_imports)]
 use crate::tlog;
+use crate::traft::error::Error;
 use crate::traft::op::{self, Op};
 use crate::traft::{self, node, RaftIndex, RaftTerm};
 use crate::util::duration_from_secs_f64_clamped;
@@ -158,7 +159,15 @@ pub(crate) fn setup() {
                 .raft_storage
                 .tier()?
                 .expect("tier for instance should exists");
-            let config = crate::vshard::VshardConfig::from_storage(&node.storage, &tier)?;
+
+            let Some(tier) = node.storage.tiers.by_name(&tier)? else {
+                return Err(Error::NoSuchTier(tier));
+            };
+            let config = crate::vshard::VshardConfig::from_storage(
+                &node.storage,
+                &tier.name,
+                tier.bucket_count,
+            )?;
             Ok(config)
         }),
     );
diff --git a/src/plugin/rpc/client.rs b/src/plugin/rpc/client.rs
index 9843db1facbd411f3d71b3d9bfd5169ed54f1213..9aba046240a801fc9190ee298af149068c49f548 100644
--- a/src/plugin/rpc/client.rs
+++ b/src/plugin/rpc/client.rs
@@ -242,13 +242,13 @@ fn resolve_rpc_target(
     let mut by_bucket_id = None;
     let mut tier_and_replicaset_uuid = None;
     let replicaset_uuid_owned;
-    if let Some((tier, bucket_id)) = tier_and_bucket_id {
+    if let Some((tier_name, bucket_id)) = tier_and_bucket_id {
         // This call must be done here, because this function may yield
         // but later we take a volatile reference to TopologyCache which can't be held across yields
-        replicaset_uuid_owned = vshard::get_replicaset_uuid_by_bucket_id(tier, bucket_id)?;
+        replicaset_uuid_owned = vshard::get_replicaset_uuid_by_bucket_id(tier_name, bucket_id)?;
         let uuid = &*replicaset_uuid_owned;
-        by_bucket_id = Some((tier, bucket_id, uuid));
-        tier_and_replicaset_uuid = Some((tier, uuid));
+        by_bucket_id = Some((tier_name, bucket_id, uuid));
+        tier_and_replicaset_uuid = Some((tier_name, uuid));
     }
 
     let topology_ref = topology.get();
diff --git a/src/rpc/sharding.rs b/src/rpc/sharding.rs
index 54f08799e51a4e8e065de88beeecf3c1f036c3f8..0529546b7ff800a27e3cd71cfc5e4779dc86e6d2 100644
--- a/src/rpc/sharding.rs
+++ b/src/rpc/sharding.rs
@@ -66,7 +66,7 @@ crate::define_rpc_request! {
                 continue;
             }
 
-            let mut config = VshardConfig::from_storage(storage, &tier.name)?;
+            let mut config = VshardConfig::from_storage(storage, &tier.name, tier.bucket_count)?;
 
             config.listen = Some(lua.eval("return box.info.listen")?);
             config.set_password_in_uris();
diff --git a/src/sql/router.rs b/src/sql/router.rs
index aeec9819c1f3d5952f30ab159692917412d32680..3cf14d425dc7dfbcc77714c49a7709ae202da04f 100644
--- a/src/sql/router.rs
+++ b/src/sql/router.rs
@@ -29,8 +29,6 @@ use std::any::Any;
 use std::collections::HashMap;
 use std::rc::Rc;
 
-use crate::sql::DEFAULT_BUCKET_COUNT;
-
 use crate::schema::{Distribution, ShardingFn, ADMIN_ID};
 use crate::storage::{self, Clusterwide};
 
@@ -51,7 +49,7 @@ thread_local! {
         Mutex::new(PicoRouterCache::new(DEFAULT_CAPACITY).unwrap()));
 }
 
-fn get_tier_info(tier_name: &SmolStr) -> Result<Tier, SbroadError> {
+pub fn get_tier_info(tier_name: &str) -> Result<Tier, SbroadError> {
     let node = node::global().map_err(|e| {
         SbroadError::FailedTo(Action::Get, None, format_smolstr!("raft node: {}", e))
     })?;
@@ -59,7 +57,7 @@ fn get_tier_info(tier_name: &SmolStr) -> Result<Tier, SbroadError> {
     let tier = with_su(ADMIN_ID, || {
         node.storage
             .tiers
-            .by_name(tier_name.as_str())
+            .by_name(tier_name)
             .map_err(|e| {
                 SbroadError::FailedTo(
                     Action::Get,
@@ -69,12 +67,12 @@ fn get_tier_info(tier_name: &SmolStr) -> Result<Tier, SbroadError> {
             })?
             .ok_or(SbroadError::NotFound(
                 Entity::Metadata,
-                format_smolstr!("tier with tier_name `{tier_name}` not found"),
+                format_smolstr!("tier with name `{tier_name}` not found"),
             ))
     })??;
 
     Ok(Tier {
-        bucket_count: DEFAULT_BUCKET_COUNT,
+        bucket_count: tier.bucket_count,
         name: tier.name,
     })
 }
diff --git a/src/tier.rs b/src/tier.rs
index 62ba96a246068b0c40f0da82ce5e6d3ef7f788b9..e0b1a94482a53287047176629a5067e0075bb1fa 100644
--- a/src/tier.rs
+++ b/src/tier.rs
@@ -4,6 +4,7 @@ use tarantool::{space::UpdateOps, tuple::Encode};
 use crate::{
     column_name,
     schema::ADMIN_ID,
+    sql,
     storage::{TClusterwideTable, Tiers},
     traft::{error::Error, op::Dml},
 };
@@ -22,6 +23,7 @@ pub struct Tier {
     pub current_vshard_config_version: u64,
     pub target_vshard_config_version: u64,
     pub vshard_bootstrapped: bool,
+    pub bucket_count: u64,
 }
 
 impl Encode for Tier {}
@@ -38,6 +40,7 @@ impl Tier {
             Field::from(("current_vshard_config_version", FieldType::Unsigned)),
             Field::from(("target_vshard_config_version", FieldType::Unsigned)),
             Field::from(("vshard_bootstrapped", FieldType::Boolean)),
+            Field::from(("bucket_count", FieldType::Unsigned)),
         ]
     }
 
@@ -65,6 +68,7 @@ impl Default for Tier {
             name: DEFAULT_TIER.into(),
             replication_factor: 1,
             can_vote: true,
+            bucket_count: sql::DEFAULT_BUCKET_COUNT,
             current_vshard_config_version: 0,
             target_vshard_config_version: 0,
             vshard_bootstrapped: false,
@@ -91,6 +95,9 @@ pub struct TierConfig {
     #[serde(skip_serializing_if = "Option::is_none")]
     pub replication_factor: Option<u8>,
 
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub bucket_count: Option<u64>,
+
     #[serde(default = "default_can_vote")]
     pub can_vote: bool,
 }
diff --git a/src/vshard.rs b/src/vshard.rs
index f049a699f12ee0bc1f7e2d0a8d715a49cbe7bc1f..c18195274c6bb14196e989ed70c66fcc955e5fae 100644
--- a/src/vshard.rs
+++ b/src/vshard.rs
@@ -1,4 +1,3 @@
-use crate::config::PicodataConfig;
 use crate::instance::Instance;
 use crate::instance::InstanceName;
 use crate::pico_service::pico_service_password;
@@ -6,6 +5,7 @@ use crate::replicaset::Replicaset;
 use crate::replicaset::ReplicasetName;
 use crate::replicaset::Weight;
 use crate::schema::PICO_SERVICE_USER_NAME;
+use crate::sql::router;
 use crate::storage::Clusterwide;
 use crate::storage::ToEntryIter as _;
 use crate::storage::TABLE_ID_BUCKET;
@@ -13,6 +13,7 @@ use crate::traft::error::Error;
 use crate::traft::node;
 use crate::traft::RaftId;
 use crate::traft::Result;
+use sbroad::executor::engine::Vshard;
 use std::collections::HashMap;
 use tarantool::space::SpaceId;
 use tarantool::tlua;
@@ -48,7 +49,9 @@ pub fn get_replicaset_priority_list(
 ///
 /// This function **may yield** if vshard needs to update it's bucket mapping.
 pub fn get_replicaset_uuid_by_bucket_id(tier: &str, bucket_id: u64) -> Result<String, Error> {
-    let max_bucket_id = PicodataConfig::total_bucket_count();
+    let info = router::get_tier_info(tier)?;
+
+    let max_bucket_id = info.bucket_count();
     if bucket_id < 1 || bucket_id > max_bucket_id {
         #[rustfmt::skip]
         return Err(Error::other(format!("invalid bucket id: must be within 1..{max_bucket_id}, got {bucket_id}")));
@@ -127,7 +130,11 @@ tarantool::define_str_enum! {
 }
 
 impl VshardConfig {
-    pub fn from_storage(storage: &Clusterwide, tier_name: &str) -> Result<Self, Error> {
+    pub fn from_storage(
+        storage: &Clusterwide,
+        tier_name: &str,
+        bucket_count: u64,
+    ) -> Result<Self, Error> {
         let instances = storage.instances.all_instances()?;
         let peer_addresses: HashMap<_, _> = storage
             .peer_addresses
@@ -137,7 +144,13 @@ impl VshardConfig {
         let replicasets: Vec<_> = storage.replicasets.iter()?.collect();
         let replicasets: HashMap<_, _> = replicasets.iter().map(|rs| (&rs.name, rs)).collect();
 
-        let result = Self::new(&instances, &peer_addresses, &replicasets, tier_name);
+        let result = Self::new(
+            &instances,
+            &peer_addresses,
+            &replicasets,
+            tier_name,
+            bucket_count,
+        );
         Ok(result)
     }
 
@@ -146,6 +159,7 @@ impl VshardConfig {
         peer_addresses: &HashMap<RaftId, String>,
         replicasets: &HashMap<&ReplicasetName, &Replicaset>,
         tier_name: &str,
+        bucket_count: u64,
     ) -> Self {
         let mut sharding: HashMap<String, ReplicasetSpec> = HashMap::new();
         for peer in instances {
@@ -187,7 +201,7 @@ impl VshardConfig {
             sharding,
             discovery_mode: DiscoveryMode::On,
             space_bucket_id: TABLE_ID_BUCKET,
-            bucket_count: PicodataConfig::total_bucket_count(),
+            bucket_count,
         }
     }
 
diff --git a/test/int/test_basics.py b/test/int/test_basics.py
index 6e5e081013c269d4d89860f48c52646cdf0b9bae..f6605b1038e57a90ba3f8142e6a3852d021e5767 100644
--- a/test/int/test_basics.py
+++ b/test/int/test_basics.py
@@ -338,7 +338,7 @@ def test_raft_log(instance: Instance):
 Replace(_pico_peer_address, [1,"127.0.0.1:{p}"]),
 Insert(_pico_instance, ["default_1_1","{i1_uuid}",1,"default_1","{r1_uuid}",["Offline",0],["Offline",0],{b},"default","{picodata_version}"]),
 Insert(_pico_replicaset, ["default_1","{r1_uuid}","default_1_1","default_1_1","default",0.0,"auto","not-ready",0,0,{{}}]))|
-|  0  | 1  |BatchDml(Insert(_pico_tier, ["default",1,true,0,0,false]))|
+|  0  | 1  |BatchDml(Insert(_pico_tier, ["default",1,true,0,0,false,3000]))|
 |  0  | 1  |BatchDml(
 Insert(_pico_property, ["global_schema_version",0]),
 Insert(_pico_property, ["next_schema_version",1]),
@@ -417,7 +417,7 @@ Insert(_pico_index, [{_pico_user},2,"_pico_user_owner_id","tree",[{{"unique":fal
 Insert(_pico_table, [{_pico_privilege},"_pico_privilege",{{"Global":null}},[{{"field_type":"unsigned","is_nullable":false,"name":"grantor_id"}},{{"field_type":"unsigned","is_nullable":false,"name":"grantee_id"}},{{"field_type":"string","is_nullable":false,"name":"privilege"}},{{"field_type":"string","is_nullable":false,"name":"object_type"}},{{"field_type":"integer","is_nullable":false,"name":"object_id"}},{{"field_type":"unsigned","is_nullable":false,"name":"schema_version"}}],0,true,"memtx",1,""]),
 Insert(_pico_index, [{_pico_privilege},0,"_pico_privilege_primary","tree",[{{"unique":true}}],[["grantee_id","unsigned",null,false,null],["object_type","string",null,false,null],["object_id","integer",null,false,null],["privilege","string",null,false,null]],true,0]),
 Insert(_pico_index, [{_pico_privilege},1,"_pico_privilege_object","tree",[{{"unique":false}}],[["object_type","string",null,false,null],["object_id","integer",null,false,null]],true,0]),
-Insert(_pico_table, [{_pico_tier},"_pico_tier",{{"Global":null}},[{{"field_type":"string","is_nullable":false,"name":"name"}},{{"field_type":"unsigned","is_nullable":false,"name":"replication_factor"}},{{"field_type":"boolean","is_nullable":false,"name":"can_vote"}},{{"field_type":"unsigned","is_nullable":false,"name":"current_vshard_config_version"}},{{"field_type":"unsigned","is_nullable":false,"name":"target_vshard_config_version"}},{{"field_type":"boolean","is_nullable":false,"name":"vshard_bootstrapped"}}],0,true,"memtx",1,""]),
+Insert(_pico_table, [{_pico_tier},"_pico_tier",{{"Global":null}},[{{"field_type":"string","is_nullable":false,"name":"name"}},{{"field_type":"unsigned","is_nullable":false,"name":"replication_factor"}},{{"field_type":"boolean","is_nullable":false,"name":"can_vote"}},{{"field_type":"unsigned","is_nullable":false,"name":"current_vshard_config_version"}},{{"field_type":"unsigned","is_nullable":false,"name":"target_vshard_config_version"}},{{"field_type":"boolean","is_nullable":false,"name":"vshard_bootstrapped"}},{{"field_type":"unsigned","is_nullable":false,"name":"bucket_count"}}],0,true,"memtx",1,""]),
 Insert(_pico_index, [{_pico_tier},0,"_pico_tier_name","tree",[{{"unique":true}}],[["name","string",null,false,null]],true,0]),
 Insert(_pico_table, [{_pico_routine},"_pico_routine",{{"Global":null}},[{{"field_type":"unsigned","is_nullable":false,"name":"id"}},{{"field_type":"string","is_nullable":false,"name":"name"}},{{"field_type":"string","is_nullable":false,"name":"kind"}},{{"field_type":"array","is_nullable":false,"name":"params"}},{{"field_type":"array","is_nullable":false,"name":"returns"}},{{"field_type":"string","is_nullable":false,"name":"language"}},{{"field_type":"string","is_nullable":false,"name":"body"}},{{"field_type":"string","is_nullable":false,"name":"security"}},{{"field_type":"boolean","is_nullable":false,"name":"operable"}},{{"field_type":"unsigned","is_nullable":false,"name":"schema_version"}},{{"field_type":"unsigned","is_nullable":false,"name":"owner"}}],0,true,"memtx",1,""]),
 Insert(_pico_index, [{_pico_routine},0,"_pico_routine_id","tree",[{{"unique":true}}],[["id","unsigned",null,false,null]],true,0]),
diff --git a/test/int/test_config_file.py b/test/int/test_config_file.py
index fef22385686a2093a3f198f9d169f85937734ad1..1b138f8147a41765e4b7c4a729f2fff519e43865 100644
--- a/test/int/test_config_file.py
+++ b/test/int/test_config_file.py
@@ -82,6 +82,7 @@ instance:
                 value=dict(deluxe=dict(can_vote=True)),
                 source="config_file",
             ),
+            default_bucket_count=dict(value=3000, source="default"),
             default_replication_factor=dict(value=1, source="default"),
         ),
         instance=dict(
diff --git a/test/int/test_http_server.py b/test/int/test_http_server.py
index c2c560e90825c1af2ad8392cab814fc0278e573c..a3da0f6a172bffdd1e3feda1fc1961c150485839 100644
--- a/test/int/test_http_server.py
+++ b/test/int/test_http_server.py
@@ -67,6 +67,7 @@ def test_webui_basic(instance: Instance):
                 ],
                 "replicasetCount": 1,
                 "rf": 1,
+                "bucketCount": 3000,
                 "instanceCount": 1,
                 "can_vote": True,
                 "name": "default",
@@ -202,6 +203,7 @@ def test_webui_with_plugin(cluster: Cluster):
     tier_template = {
         "replicasetCount": 1,
         "rf": 1,
+        "bucketCount": 3000,
         "instanceCount": 1,
         "can_vote": True,
     }
diff --git a/test/int/test_plugin.py b/test/int/test_plugin.py
index e1a62e4556a6ade6e93b4950d23caeef4cb841fc..493ae2e10cea9cbccc741943d8e6c7c525f1e1e4 100644
--- a/test/int/test_plugin.py
+++ b/test/int/test_plugin.py
@@ -2513,7 +2513,7 @@ cluster:
     # Check requesting RPC to unknown tier
     with pytest.raises(
         TarantoolError,
-        match='tier with name "undefined" not found',
+        match="tier with name `undefined` not found",
     ):
         context = make_context()
         input = dict(
diff --git a/test/int/test_sharding.py b/test/int/test_sharding.py
index fe35c6e7c07c51f0b5ef9b5fa3b92b4c28be4fbf..ebd82982f284ab9c5c5a9f759aad8c669102e69b 100644
--- a/test/int/test_sharding.py
+++ b/test/int/test_sharding.py
@@ -388,3 +388,44 @@ def test_expel_blocked_by_bucket_rebalancing(cluster: Cluster):
     ]
     cluster.wait_until_instance_has_this_many_active_buckets(i1, 1500)
     cluster.wait_until_instance_has_this_many_active_buckets(i2, 1500)
+
+
+def assert_tier_bucket_count(cluster: Cluster, name: str, bucket_count: int, *instances: Instance):
+    assert len(instances) > 0
+
+    i1 = instances[0]
+    # default `bucket_count` is 3000
+    rows = i1.sql(""" SELECT name, bucket_count FROM _pico_tier WHERE name = ?""", name)
+    assert rows == [
+        [name, bucket_count],
+    ]
+
+    # 3000 bucket counts, 3 replicasets
+    for x in instances:
+        cluster.wait_until_instance_has_this_many_active_buckets(x, int(bucket_count / len(instances)))
+
+
+def test_bucket_count_custom_and_default(cluster: Cluster):
+    cluster.set_service_password("secret")
+    cluster.set_config_file(
+        yaml="""
+cluster:
+    name: test
+    default_bucket_count: 6000
+    tier:
+        radix:
+            replication_factor: 1
+            bucket_count: 16384
+        storage:
+            replication_factor: 1
+"""
+    )
+    i1 = cluster.add_instance(tier="storage", wait_online=False)
+    i2 = cluster.add_instance(tier="storage", wait_online=False)
+    i3 = cluster.add_instance(tier="storage", wait_online=False)
+    i4 = cluster.add_instance(tier="radix", wait_online=False)
+    i5 = cluster.add_instance(tier="radix", wait_online=False)
+    cluster.wait_online()
+
+    assert_tier_bucket_count(cluster, "storage", 6000, i1, i2, i3)
+    assert_tier_bucket_count(cluster, "radix", 16384, i4, i5)
diff --git a/webui/src/modules/nodes/nodesPage/NodesContent/ReplicasetCard/ReplicasetCard.tsx b/webui/src/modules/nodes/nodesPage/NodesContent/ReplicasetCard/ReplicasetCard.tsx
index 41c36447d38b1826f1f0c11cd44257f1d34f05fa..172daf2a6b0c2119fe558ff41b0bee8b6d0ac2a6 100644
--- a/webui/src/modules/nodes/nodesPage/NodesContent/ReplicasetCard/ReplicasetCard.tsx
+++ b/webui/src/modules/nodes/nodesPage/NodesContent/ReplicasetCard/ReplicasetCard.tsx
@@ -19,7 +19,7 @@ export type TReplicaset = {
   instanceCount: number;
   instances: InstanceType[];
   version: string;
-  state: "Online" | "Offline";
+  state: "Online" | "Offline" | "Expelled";
   capacityUsage: number;
   memory: {
     usable: number;
diff --git a/webui/src/modules/nodes/nodesPage/NodesContent/TierCard/TierCard.module.scss b/webui/src/modules/nodes/nodesPage/NodesContent/TierCard/TierCard.module.scss
index 58671e3017502b22615495cf633be6fa0761f10c..885f9d1185af831044841507733086ea8ce4c747 100644
--- a/webui/src/modules/nodes/nodesPage/NodesContent/TierCard/TierCard.module.scss
+++ b/webui/src/modules/nodes/nodesPage/NodesContent/TierCard/TierCard.module.scss
@@ -66,6 +66,10 @@ $chevron-column-flex: 0.2;
   flex: 1 166px;
 }
 
+.bucketCountColumn {
+  flex: 1 166px;
+}
+
 .canVoterColumn {
   flex: 1 77px;
 }
diff --git a/webui/src/modules/nodes/nodesPage/NodesContent/TierCard/TierCard.tsx b/webui/src/modules/nodes/nodesPage/NodesContent/TierCard/TierCard.tsx
index de481320560421a39bd3c4ec0afae6acaed6f87a..851965e1ab61e97999ea754df594242054bb9206 100644
--- a/webui/src/modules/nodes/nodesPage/NodesContent/TierCard/TierCard.tsx
+++ b/webui/src/modules/nodes/nodesPage/NodesContent/TierCard/TierCard.tsx
@@ -90,6 +90,12 @@ export const TierCard: FC<TierCardProps> = React.memo(({ tier }) => {
           <div className={styles.label}>{tierTranslations.rf.label}</div>
           <div className={styles.infoValue}>{tier.rf}</div>
         </div>
+        <div className={cn(styles.infoColumn, styles.bucketCountColumn)}>
+          <div className={styles.label}>
+            {tierTranslations.bucket_count.label}
+          </div>
+          <div className={styles.infoValue}>{tier.bucketCount}</div>
+        </div>
         <div className={cn(styles.infoColumn, styles.canVoterColumn)}>
           <div className={styles.label}>{tierTranslations.canVote.label}</div>
           <div className={styles.infoValue}>
diff --git a/webui/src/shared/components/NetworkState/NetworkState.tsx b/webui/src/shared/components/NetworkState/NetworkState.tsx
index d684cca7198a21d82c16bd48c26da11e72a294a1..0d36a6d36323a5531eeb1f2dcc4b592c63cf3d1b 100644
--- a/webui/src/shared/components/NetworkState/NetworkState.tsx
+++ b/webui/src/shared/components/NetworkState/NetworkState.tsx
@@ -4,7 +4,7 @@ import { useTranslation } from "shared/intl";
 import { TextInFrame } from "shared/ui/typography/TextInFrame/TextInFrame";
 
 type NetworkStateProps = {
-  state: "Online" | "Offline";
+  state: "Online" | "Offline" | "Expelled";
 };
 
 export const NetworkState: React.FC<NetworkStateProps> = (props) => {
diff --git a/webui/src/shared/entity/instance/common/types.ts b/webui/src/shared/entity/instance/common/types.ts
index 02e281faa7b52d1599c1502c50c8a6c31a70050f..0c3309bfb449b9d0b324f6baa91257a4ce8b5608 100644
--- a/webui/src/shared/entity/instance/common/types.ts
+++ b/webui/src/shared/entity/instance/common/types.ts
@@ -2,8 +2,8 @@ import { Override } from "../../../utils/tsUtils";
 
 export type ServerInstanceType = {
   name: string;
-  targetState: "Online" | "Offline";
-  currentState: "Online" | "Offline";
+  targetState: "Online" | "Offline" | "Expelled";
+  currentState: "Online" | "Offline" | "Expelled";
   failureDomain: Record<string, string>;
   version: string;
   isLeader: boolean;
diff --git a/webui/src/shared/entity/replicaset/common/types.ts b/webui/src/shared/entity/replicaset/common/types.ts
index 19497ba1cc782576486dcec1fc1bfac1cd651498..1d5b06391e664520b5aefc751f32a6e7d8b16606 100644
--- a/webui/src/shared/entity/replicaset/common/types.ts
+++ b/webui/src/shared/entity/replicaset/common/types.ts
@@ -6,7 +6,7 @@ export type ServerReplicasetType = {
   instanceCount: number;
   instances: ServerInstanceType[];
   version: string;
-  state: "Online" | "Offline" | "Replicated" | "Expelled";
+  state: "Online" | "Offline" | "Expelled";
   capacityUsage: number;
   memory: {
     usable: number;
diff --git a/webui/src/shared/entity/tier/common/types.ts b/webui/src/shared/entity/tier/common/types.ts
index 3d9a157d7e9d2e07ff3ed63aaf76fe1c1f0172b9..fdbd3bb62de0f4f330d58d3c2bffae73a2d463e5 100644
--- a/webui/src/shared/entity/tier/common/types.ts
+++ b/webui/src/shared/entity/tier/common/types.ts
@@ -10,6 +10,7 @@ export type ServerTierType = {
   replicasetCount: number;
   instanceCount: number;
   rf: number;
+  bucketCount: number;
   can_vote: boolean;
   replicasets: ServerReplicasetType[];
 };
diff --git a/webui/src/shared/entity/tier/list/mock.ts b/webui/src/shared/entity/tier/list/mock.ts
index b223cc70ac85786bf2be652cf02c74ce51081f46..d839accbc8faead83ec578fc29151f889bff2da1 100644
--- a/webui/src/shared/entity/tier/list/mock.ts
+++ b/webui/src/shared/entity/tier/list/mock.ts
@@ -81,6 +81,7 @@ export const mock: ServerTiersListType = [
     ],
     replicasetCount: 2,
     rf: 2,
+    bucketCount: 2000,
     instanceCount: 4,
     can_vote: true,
     name: "red",
@@ -141,6 +142,7 @@ export const mock: ServerTiersListType = [
     ],
     replicasetCount: 2,
     rf: 1,
+    bucketCount: 3000,
     instanceCount: 2,
     can_vote: true,
     name: "blue",
diff --git a/webui/src/shared/intl/translations/en/pages/instances.ts b/webui/src/shared/intl/translations/en/pages/instances.ts
index e5e95b39c928031ca8ac665ad6a576d9a8a87a39..0e5d9395d6899c752d4b0f21b071327ca1bf9f70 100644
--- a/webui/src/shared/intl/translations/en/pages/instances.ts
+++ b/webui/src/shared/intl/translations/en/pages/instances.ts
@@ -72,6 +72,9 @@ export const instances = {
       rf: {
         label: "RF",
       },
+      bucket_count: {
+        label: "Buckets",
+      },
       canVote: {
         label: "Can vote",
       },
diff --git a/webui/src/shared/intl/translations/ru/pages/instances.ts b/webui/src/shared/intl/translations/ru/pages/instances.ts
index 29ad44606dc0bae8609ec0ae0267f9d569849972..973fc82f31165fd1cddbd7689468fd6dfa9d1af2 100644
--- a/webui/src/shared/intl/translations/ru/pages/instances.ts
+++ b/webui/src/shared/intl/translations/ru/pages/instances.ts
@@ -74,6 +74,9 @@ export const instances: TPages["instances"] = {
       rf: {
         label: "Фактор репликации",
       },
+      bucket_count: {
+        label: "Бакеты",
+      },
       canVote: {
         label: "Голосует?",
       },