diff --git a/src/cbus.rs b/src/cbus.rs
index 4876f80d7ab75b7ff09f71fc939a9565f7a7774e..0cb7c66f38ddeb15506ce2456faece90a313211a 100644
--- a/src/cbus.rs
+++ b/src/cbus.rs
@@ -22,4 +22,3 @@ pub fn init_cbus_endpoint() {
         .start_non_joinable()
         .expect("starting a fiber shouldn't fail");
 }
-
diff --git a/src/plugin/manager.rs b/src/plugin/manager.rs
index 24fd00a14d9a9f146404cf6600c176421bb520dc..4e8565b4dbd0787b80c5ca39692ed866a7516311 100644
--- a/src/plugin/manager.rs
+++ b/src/plugin/manager.rs
@@ -604,7 +604,7 @@ impl PluginManager {
         let cfg = node
             .storage
             .plugin_config
-            .get_by_entity(plugin_ident, &service_defs[0].name)?;
+            .get_by_entity_as_mp(plugin_ident, &service_defs[0].name)?;
         let cfg_raw = rmp_serde::encode::to_vec_named(&cfg).expect("out of memory");
         context_set_service_info(&mut ctx, &new_service);
 
@@ -692,7 +692,7 @@ impl PluginManager {
             let cfg = node
                 .storage
                 .plugin_config
-                .get_by_entity(plugin_ident, &def.name)?;
+                .get_by_entity_as_mp(plugin_ident, &def.name)?;
             let cfg_raw = rmp_serde::encode::to_vec_named(&cfg).expect("out of memory");
 
             #[rustfmt::skip]
diff --git a/src/plugin/migration.rs b/src/plugin/migration.rs
index 75a969d069b23a2b4717183296cb4a3d18ead5bb..4a718db8f02ef5492a568b992512d8587c75019b 100644
--- a/src/plugin/migration.rs
+++ b/src/plugin/migration.rs
@@ -4,12 +4,14 @@ use crate::plugin::PreconditionCheckResult;
 use crate::plugin::{lock, reenterable_plugin_cas_request};
 use crate::plugin::{PluginIdentifier, PLUGIN_DIR};
 use crate::schema::ADMIN_ID;
-use crate::storage::ClusterwideTable;
+use crate::storage::{Clusterwide, ClusterwideTable};
 use crate::traft::node;
 use crate::traft::op::{Dml, Op};
 use crate::util::Lexer;
 use crate::util::QuoteEscapingStyle;
 use crate::{sql, tlog, traft};
+use std::borrow::Cow;
+use std::collections::HashMap;
 use std::fs::File;
 use std::io::{ErrorKind, Read};
 use std::time::Duration;
@@ -47,6 +49,9 @@ pub enum Error {
 
     #[error("inconsistent with previous version migration list, reason: {0}")]
     InconsistentMigrationList(String),
+
+    #[error("placeholder: {0}")]
+    Placeholder(#[from] PlaceholderSubstitutionError),
 }
 
 const MAX_COMMAND_LENGTH_TO_SHOW: usize = 256;
@@ -97,8 +102,8 @@ pub enum BlockingError {
     Bug,
 }
 
-/// Function executes provided closure on a separate thread without blocking current fiber.
-/// In case provided closure panics the panic is forwarded to caller.
+/// Function executes provided closure on a separate thread
+/// blocking the current fiber until the result is ready.
 pub fn blocking<F, R>(f: F) -> Result<R, BlockingError>
 where
     F: FnOnce() -> R + Send + 'static,
@@ -228,21 +233,28 @@ impl MigrationInfo {
 /// the current fiber until the result is ready.
 fn read_migration_queries_from_file_async(
     mut migration: MigrationInfo,
+    plugin_ident: &PluginIdentifier,
+    storage: &Clusterwide,
 ) -> Result<MigrationInfo, Error> {
     tlog!(Info, "parsing migrations file '{}'", migration.shortname());
     let t0 = Instant::now_accurate();
 
-    let mut migration = blocking(|| {
+    let substitutions = storage
+        .plugin_config
+        .get_by_entity(plugin_ident, CONTEXT_ENTITY)
+        .map_err(PlaceholderSubstitutionError::Tarantool)?;
+
+    let migration = blocking(move || {
         let fullpath = &migration.full_filepath;
         tlog!(Debug, "parsing a migrations file '{fullpath}'");
-        let res = read_migration_queries_from_file(&mut migration);
+        let res = read_migration_queries_from_file(&mut migration, &substitutions);
         if let Err(e) = &res {
             let fullpath = &migration.full_filepath;
             tlog!(Debug, "failed parsing migrations file '{fullpath}': {e}");
         }
 
-        migration
-    })?;
+        res.map(|_| migration)
+    })??;
 
     let elapsed = t0.elapsed();
     tlog!(
@@ -256,15 +268,22 @@ fn read_migration_queries_from_file_async(
 
 /// Reads and parses migrations file desribed by `migration`.
 #[inline]
-fn read_migration_queries_from_file(migration: &mut MigrationInfo) -> Result<(), Error> {
+fn read_migration_queries_from_file(
+    migration: &mut MigrationInfo,
+    substitutions: &HashMap<String, rmpv::Value>,
+) -> Result<(), Error> {
     let fullpath = &migration.full_filepath;
     let source = std::fs::read_to_string(fullpath).map_err(|e| Error::File(fullpath.into(), e))?;
-    parse_migration_queries(&source, migration)
+    parse_migration_queries(&source, migration, substitutions)
 }
 
 /// Parses the migration queries from `source`, returns updates `migration` with
 /// "UP" and "DOWN" queries from the file.
-fn parse_migration_queries(source: &str, migration: &mut MigrationInfo) -> Result<(), Error> {
+fn parse_migration_queries(
+    source: &str,
+    migration: &mut MigrationInfo,
+    substitutions: &HashMap<String, rmpv::Value>,
+) -> Result<(), Error> {
     let mut up_lines = vec![];
     let mut down_lines = vec![];
 
@@ -331,8 +350,12 @@ fn parse_migration_queries(source: &str, migration: &mut MigrationInfo) -> Resul
                     "{filename}: no pico.UP annotation found at start of file"
                 )));
             }
-            State::ParsingUp => up_lines.push(line),
-            State::ParsingDown => down_lines.push(line),
+            State::ParsingUp => {
+                up_lines.push(substitute_config_placeholders(line, lineno, substitutions)?);
+            }
+            State::ParsingDown => {
+                down_lines.push(substitute_config_placeholders(line, lineno, substitutions)?)
+            }
         }
     }
 
@@ -348,12 +371,12 @@ fn parse_migration_queries(source: &str, migration: &mut MigrationInfo) -> Resul
     Ok(())
 }
 
-fn split_sql_queries(lines: &[&str]) -> Vec<String> {
+fn split_sql_queries(lines: &[Cow<'_, str>]) -> Vec<String> {
     let mut queries = Vec::new();
 
     let mut current_query_start = 0;
     let mut current_query_length = 0;
-    for (line, i) in lines.iter().copied().zip(0..) {
+    for (i, line) in lines.iter().enumerate() {
         // `+ 1` for an extra '\n'
         current_query_length += line.len() + 1;
 
@@ -389,6 +412,90 @@ fn split_sql_queries(lines: &[&str]) -> Vec<String> {
     queries
 }
 
+#[derive(Debug, thiserror::Error)]
+pub enum PlaceholderSubstitutionError {
+    #[error("no key named {key} found in migration context at line {line_no}")]
+    NotFound { key: String, line_no: usize },
+
+    #[error(
+        "only strings are supported as placeholder targets, {key} is not a string but {value} at line {line_no}"
+    )]
+    BadType {
+        key: String,
+        value: rmpv::Value,
+        line_no: usize,
+    },
+
+    #[error(transparent)]
+    Tarantool(#[from] tarantool::error::Error),
+}
+
+pub const CONTEXT_ENTITY: &str = "migration_context";
+
+const MARKER: &str = "@_plugin_config.";
+
+/// Finds all placeholders in provided query string and replaces with value taken from provided map.
+/// Function returns query string with all substitutions applied if any
+fn substitute_config_placeholders<'a>(
+    query_string: &'a str,
+    line_no: usize,
+    substitutions: &HashMap<String, rmpv::Value>,
+) -> Result<Cow<'a, str>, PlaceholderSubstitutionError> {
+    use PlaceholderSubstitutionError::*;
+
+    let mut query_string = Cow::from(query_string);
+
+    // Note that it is wrong to search for all placeholders first and then replace them, because
+    // after each replace position of the following placeholder in a string might change.
+    while let Some(placeholder) = find_placeholder(query_string.as_ref()) {
+        let Some(value) = substitutions.get(&placeholder.variable) else {
+            return Err(NotFound {
+                key: placeholder.variable,
+                line_no,
+            });
+        };
+
+        let Some(value) = value.as_str() else {
+            return Err(BadType {
+                key: placeholder.variable,
+                value: value.clone(),
+                line_no,
+            });
+        };
+
+        query_string
+            .to_mut()
+            .replace_range(placeholder.start..placeholder.end, value);
+    }
+
+    Ok(query_string)
+}
+
+#[derive(Debug, PartialEq, Eq)]
+struct Placeholder {
+    variable: String,
+    start: usize,
+    end: usize,
+}
+
+fn find_placeholder(query_string: &str) -> Option<Placeholder> {
+    let start = query_string.find(MARKER)?;
+
+    // we either find next symbol that is not part of the name
+    // or assume placeholder lasts till the end of the string
+    let end = query_string[start + MARKER.len()..]
+        .find(|c: char| !(c.is_ascii_alphanumeric() || c == '_'))
+        .unwrap_or(query_string.len() - start - MARKER.len());
+
+    let variable = &query_string[start + MARKER.len()..start + MARKER.len() + end];
+
+    Some(Placeholder {
+        variable: variable.to_owned(),
+        start,
+        end: start + MARKER.len() + end,
+    })
+}
+
 /// Apply sql from migration file onto cluster.
 trait SqlApplier {
     fn apply(&self, sql: &str, deadline: Option<Instant>) -> traft::Result<()>;
@@ -531,8 +638,9 @@ pub fn apply_up_migrations(
         #[rustfmt::skip]
         tlog!(Info, "applying `UP` migrations, progress: {num}/{migrations_count}");
 
-        let migration = read_migration_queries_from_file_async(migration)
-            .inspect(|_| handle_err(&seen_queries))?;
+        let migration =
+            read_migration_queries_from_file_async(migration, plugin_ident, &node.storage)
+                .inspect_err(|_| handle_err(&seen_queries))?;
         seen_queries.push(migration);
         let migration = seen_queries.last().expect("just inserted");
 
@@ -594,29 +702,27 @@ pub fn apply_up_migrations(
 /// * `plugin_identity`: plugin for which migrations belong to
 /// * `migrations`: list of migration file names
 pub fn apply_down_migrations(
-    plugin_identity: &PluginIdentifier,
+    plugin_ident: &PluginIdentifier,
     migrations: &[String],
     deadline: Instant,
+    storage: &Clusterwide,
 ) {
     let iter = migrations.iter().rev().zip(0..);
     for (filename, num) in iter {
         #[rustfmt::skip]
         tlog!(Info, "applying `DOWN` migrations, progress: {num}/{}", migrations.len());
 
-        let migration = MigrationInfo::new_unparsed(plugin_identity, filename.clone());
-        match read_migration_queries_from_file_async(migration) {
-            Ok(migration) => {
-                down_single_file_with_commit(
-                    &plugin_identity.name,
-                    &migration,
-                    &SBroadApplier,
-                    deadline,
-                );
-            }
-            Err(e) => {
-                tlog!(Error, "Rollback DOWN migration error: {e}");
-            }
-        }
+        let migration = MigrationInfo::new_unparsed(plugin_ident, filename.clone());
+        let migration =
+            match read_migration_queries_from_file_async(migration, plugin_ident, storage) {
+                Ok(migration) => migration,
+                Err(e) => {
+                    tlog!(Error, "Rollback DOWN migration error: {e}");
+                    continue;
+                }
+            };
+
+        down_single_file_with_commit(&plugin_ident.name, &migration, &SBroadApplier, deadline);
     }
     #[rustfmt::skip]
     tlog!(Info, "applying `DOWN` migrations, progress: {0}/{0}", migrations.len());
@@ -626,6 +732,7 @@ pub fn apply_down_migrations(
 mod tests {
     use super::*;
     use pretty_assertions::assert_eq;
+    use rmpv::Utf8String;
     use std::cell::RefCell;
 
     #[track_caller]
@@ -635,7 +742,7 @@ mod tests {
             filename_from_manifest: "test.db".into(),
             ..Default::default()
         };
-        parse_migration_queries(sql, &mut migration)?;
+        parse_migration_queries(sql, &mut migration, &HashMap::new())?;
         Ok(migration)
     }
 
@@ -859,6 +966,114 @@ sql_command_3;
             &["sql_command_1;", "sql_command_3;"],
         );
     }
+
+    #[test]
+    fn test_find_placeholder() {
+        let query_string = "SELECT @_plugin_config.kek FROM bubba".to_owned();
+        let placeholder = find_placeholder(&query_string).expect("placeholder must be found");
+        assert_eq!(
+            placeholder,
+            Placeholder {
+                variable: "kek".to_owned(),
+                start: 7,
+                end: 26,
+            }
+        );
+
+        // check with underscore and such
+        let query_string = "SELECT @_plugin_config.kek_91 FROM bubba".to_owned();
+        let placeholder = find_placeholder(&query_string).expect("placeholder must be found");
+
+        assert_eq!(
+            placeholder,
+            Placeholder {
+                variable: "kek_91".to_owned(),
+                start: 7,
+                end: 29,
+            }
+        );
+
+        let query_string = "SELECT @_plugin_config.kek.with.dot FROM bubba".to_owned();
+        let placeholder_with_service =
+            find_placeholder(&query_string).expect("placeholder must be found");
+        assert_eq!(
+            placeholder_with_service,
+            Placeholder {
+                variable: "kek".to_owned(),
+                start: 7,
+                end: 26,
+            }
+        );
+
+        // check case at the end of the query
+        let query_string = "foo bar @_plugin_config.kek".to_owned();
+        let placeholder = find_placeholder(&query_string).expect("placeholder must be found");
+
+        assert_eq!(
+            placeholder,
+            Placeholder {
+                variable: "kek".to_owned(),
+                start: 8,
+                end: 27,
+            }
+        );
+
+        // at the end with special symbols
+        let query_string = "foo bar @_plugin_config.kek_".to_owned();
+        let placeholder = find_placeholder(&query_string).expect("placeholder must be found");
+
+        assert_eq!(
+            placeholder,
+            Placeholder {
+                variable: "kek_".to_owned(),
+                start: 8,
+                end: 28,
+            }
+        );
+
+        let query_string = "SELECT 1";
+        assert!(find_placeholder(&query_string).is_none());
+    }
+
+    fn migration_context() -> HashMap<String, rmpv::Value> {
+        let mut substitutions = HashMap::new();
+        substitutions.insert(
+            String::from("var_a"),
+            rmpv::Value::String(Utf8String::from("value_123")),
+        );
+
+        substitutions.insert(
+            String::from("var_b_longer_name"),
+            rmpv::Value::String(Utf8String::from("value_123_also_long")),
+        );
+
+        substitutions
+    }
+
+    #[test]
+    fn test_substitute_config_placeholder_one() {
+        let mut query_string = "SELECT @_plugin_config.var_a".to_owned();
+
+        let substitutions = migration_context();
+
+        assert_eq!(
+            substitute_config_placeholders(&mut query_string, 1, &substitutions).unwrap(),
+            "SELECT value_123"
+        )
+    }
+
+    #[test]
+    fn test_substitute_config_placeholders_many() {
+        let mut query_string =
+            "SELECT @_plugin_config.var_a @@_plugin_config.var_b_longer_name".to_owned();
+
+        let substitutions = migration_context();
+
+        assert_eq!(
+            substitute_config_placeholders(&mut query_string, 1, &substitutions).unwrap(),
+            "SELECT value_123 @value_123_also_long"
+        )
+    }
 }
 
 mod tests_internal {
diff --git a/src/plugin/mod.rs b/src/plugin/mod.rs
index 176adf77a58575a487117d08a60f5588c8b146b1..3688b7594c78b85848a3a296afee0adad8e66a6e 100644
--- a/src/plugin/mod.rs
+++ b/src/plugin/mod.rs
@@ -772,7 +772,7 @@ pub fn migration_down(ident: PluginIdentifier, timeout: Duration) -> traft::Resu
     }
 
     lock::try_acquire(deadline)?;
-    migration::apply_down_migrations(&ident, &migration_list, deadline);
+    migration::apply_down_migrations(&ident, &migration_list, deadline, &node.storage);
     lock::release(deadline)?;
 
     Ok(())
@@ -1059,7 +1059,7 @@ pub fn remove_plugin(
             return Err(Error::other("attempt to remove plugin with applied `UP` migrations"));
         }
         lock::try_acquire(deadline)?;
-        migration::apply_down_migrations(ident, &migration_list, deadline);
+        migration::apply_down_migrations(ident, &migration_list, deadline, &node.storage);
         lock::release(deadline)?;
     } else if /* migration_list.is_empty() && */ drop_data {
         tlog!(Info, "`DOWN` migrations are up to date");
@@ -1202,7 +1202,17 @@ pub fn change_config_atom(
                 })
                 .collect();
 
-            let current_cfg = node.storage.plugin_config.get_by_entity(ident, service)?;
+            // when migration context is changed we do not perform validation
+            // since we dont have anything to validate against
+            if service == &migration::CONTEXT_ENTITY {
+                service_config_part.push((service.to_string(), kv));
+                continue;
+            }
+
+            let current_cfg = node
+                .storage
+                .plugin_config
+                .get_by_entity_as_mp(ident, service)?;
             let mut current_cfg = match current_cfg {
                 Value::Nil => return Err(PluginError::UpdateEmptyConfig.into()),
                 Value::Map(cfg) => cfg,
diff --git a/src/storage.rs b/src/storage.rs
index 389a59edd5b20879dc8ec7210bf93bf63919accf..281385e53dac71f5ae070085d2574cb97725dad7 100644
--- a/src/storage.rs
+++ b/src/storage.rs
@@ -3898,9 +3898,40 @@ impl PluginConfig {
         Ok(())
     }
 
+    pub fn get(
+        &self,
+        ident: &PluginIdentifier,
+        entity: &str,
+        key: &str,
+    ) -> tarantool::Result<Option<PluginConfigRecord>> {
+        self.space
+            .get(&(&ident.name, &ident.version, entity, key))?
+            .map(|t| t.decode())
+            .transpose()
+    }
+
+    pub fn get_by_entity(
+        &self,
+        ident: &PluginIdentifier,
+        entity: &str,
+    ) -> tarantool::Result<HashMap<String, rmpv::Value>> {
+        let cfg_records = self
+            .space
+            .select(IteratorType::Eq, &(&ident.name, &ident.version, entity))?;
+
+        let mut result = HashMap::new();
+        for tuple in cfg_records {
+            let cfg_record: PluginConfigRecord = tuple.decode()?;
+            result.insert(cfg_record.key, cfg_record.value);
+        }
+
+        Ok(result)
+    }
+
     /// Return configuration for service or extension.
+    /// Result is represented as message pack Map, in case there is nothing mp Nil is used.
     #[inline]
-    pub fn get_by_entity(
+    pub fn get_by_entity_as_mp(
         &self,
         ident: &PluginIdentifier,
         entity: &str,
diff --git a/src/traft/node.rs b/src/traft/node.rs
index c6e969df9a411d03da4eab550a0689d54b607200..f7d373e6c6028db7bb48301c03a3009607087baa 100644
--- a/src/traft/node.rs
+++ b/src/traft/node.rs
@@ -12,6 +12,7 @@ use crate::has_states;
 use crate::instance::Instance;
 use crate::kvcell::KVCell;
 use crate::loop_start;
+use crate::plugin::migration;
 use crate::proc_name;
 use crate::reachability::instance_reachability_manager;
 use crate::reachability::InstanceReachabilityManagerRef;
@@ -1249,7 +1250,7 @@ impl NodeImpl {
                     let old_config = self
                         .storage
                         .plugin_config
-                        .get_by_entity(&ident, &service_name)
+                        .get_by_entity_as_mp(&ident, &service_name)
                         .expect("storage should not fail");
 
                     self.storage
@@ -1396,6 +1397,15 @@ impl NodeImpl {
 
             Op::Plugin(PluginRaftOp::PluginConfigPartialUpdate { ident, updates }) => {
                 for (service, config_part) in updates {
+                    if service == migration::CONTEXT_ENTITY {
+                        self.storage
+                            .plugin_config
+                            .replace_many(&ident, &service, config_part)
+                            .expect("storage should not fail");
+
+                        continue;
+                    }
+
                     let maybe_service = self
                         .storage
                         .services
@@ -1406,7 +1416,7 @@ impl NodeImpl {
                         let old_cfg = self
                             .storage
                             .plugin_config
-                            .get_by_entity(&ident, &svc.name)
+                            .get_by_entity_as_mp(&ident, &svc.name)
                             .expect("storage should not fail");
                         self.storage
                             .plugin_config
@@ -1415,7 +1425,7 @@ impl NodeImpl {
                         let new_cfg = self
                             .storage
                             .plugin_config
-                            .get_by_entity(&ident, &svc.name)
+                            .get_by_entity_as_mp(&ident, &svc.name)
                             .expect("storage should not fail");
 
                         let new_raw_cfg =
diff --git a/test/conftest.py b/test/conftest.py
index 18f238a6a4219cb05e1afa0c11e08b02ae2a4479..5b0f318648721256737f09ccc2f06ab7f77d5137 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -2031,6 +2031,12 @@ def build_profile() -> str:
     return os.environ.get("BUILD_PROFILE", "dev")
 
 
+def get_test_dir():
+    test_dir = Path(__file__).parent
+    assert test_dir.name == "test"
+    return test_dir
+
+
 @pytest.fixture(scope="session")
 def binary_path(cargo_build: None) -> str:
     """Path to the picodata binary, e.g. "./target/debug/picodata"."""
@@ -2048,10 +2054,6 @@ def binary_path(cargo_build: None) -> str:
 
     binary_path = os.path.realpath(os.path.join(target, f"{profile}/picodata"))
 
-    # Copy the test plugin library into the appropriate location
-    test_dir = os.path.dirname(__file__)
-    assert test_dir.endswith("test")
-
     ext = None
     match sys.platform:
         case "linux":
@@ -2059,6 +2061,8 @@ def binary_path(cargo_build: None) -> str:
         case "darwin":
             ext = "dylib"
 
+    test_dir = get_test_dir()
+    # Copy the test plugin library into the appropriate location
     source = f"{os.path.dirname(binary_path)}/libtestplug.{ext}"
     destinations = [
         f"{test_dir}/testplug/testplug/0.1.0/libtestplug.{ext}",
@@ -2072,6 +2076,7 @@ def binary_path(cargo_build: None) -> str:
         f"{test_dir}/testplug/testplug_w_migration_2/0.1.0/libtestplug.{ext}",
         f"{test_dir}/testplug/testplug_w_migration/0.2.0/libtestplug.{ext}",
         f"{test_dir}/testplug/testplug_w_migration/0.2.0_broken/libtestplug.{ext}",
+        f"{test_dir}/testplug/testplug_w_migration_in_tier/0.1.0/libtestplug.{ext}",
         f"{test_dir}/testplug/testplug_sdk/0.1.0/libtestplug.{ext}",
     ]
     for destination in destinations:
diff --git a/test/int/test_plugin.py b/test/int/test_plugin.py
index 78119e4980ef24476aa1b0b27813de365d4b918e..458ffa77bccb84492ba1cb571347bb8f540e17e3 100644
--- a/test/int/test_plugin.py
+++ b/test/int/test_plugin.py
@@ -7,6 +7,8 @@ import msgpack  # type: ignore
 import os
 import hashlib
 from pathlib import Path
+
+import yaml
 from conftest import (
     Cluster,
     ErrorCode,
@@ -14,11 +16,12 @@ from conftest import (
     Retriable,
     Instance,
     TarantoolError,
+    get_test_dir,
     log_crawler,
     assert_starts_with,
 )
 from decimal import Decimal
-import requests  # type: ignore
+import requests
 import signal
 
 _3_SEC = 3
@@ -2858,3 +2861,98 @@ def test_set_string_values_in_config(cluster: Cluster):
         set_service_3_test_type("\"['1', '2']\"")
     with pytest.raises(TarantoolError, match="rule parsing error"):
         set_service_3_test_type("['1', '2']")
+
+
+MANIFEST_WITH_MIGRATION = {
+    "description": "plugin for test purposes",
+    "name": "testplug_w_migration_in_tier",
+    "version": "0.1.0",
+    "services": [],
+    "migration": ["migration.sql"],
+}
+
+
+MIGRATION_OK_IN_TIER = """
+-- pico.UP
+
+CREATE TABLE author (id INTEGER NOT NULL, name TEXT NOT NULL, PRIMARY KEY (id))
+USING memtx
+DISTRIBUTED BY ("id") IN TIER @_plugin_config.stringy_string;
+
+-- pico.DOWN
+DROP TABLE author;
+"""
+
+MIGRATION_REF_MISSING_VAR = """
+-- pico.UP
+
+CREATE TABLE author (id INTEGER NOT NULL, name TEXT NOT NULL, PRIMARY KEY (id))
+USING memtx
+DISTRIBUTED BY ("id") IN TIER @_plugin_config.bubba;
+
+-- pico.DOWN
+DROP TABLE author;
+"""
+
+
+def dump_manifest_and_migration(migration: str, to: Path):
+    manifest_path = to / "manifest.yaml"
+    migration_path = to / "migration.sql"
+
+    migration_path.write_text(migration)
+    manifest_path.write_text(yaml.safe_dump(MANIFEST_WITH_MIGRATION))
+
+
+def test_plugin_migration_placeholder_substitution(cluster: Cluster):
+    test_dir = get_test_dir()
+    cluster.set_config_file(
+        yaml="""
+        cluster:
+            name: test
+            tier:
+                default:
+                nondefault:
+    """
+    )
+
+    i1 = cluster.add_instance(tier="nondefault")
+
+    plugin = "testplug_w_migration_in_tier"
+
+    plug_path = test_dir / "testplug" / plugin / "0.1.0"
+    dump_manifest_and_migration(MIGRATION_OK_IN_TIER, plug_path)
+
+    # happy path, valid migration, everything is ok
+    i1.sql(f'CREATE PLUGIN "{plugin}" 0.1.0')
+
+    i1.sql(
+        f"""
+        ALTER PLUGIN "{plugin}" 0.1.0 SET
+            migration_context.stringy_string = \'"nondefault"\'
+        """
+    )
+
+    i1.sql(f'ALTER PLUGIN "{plugin}" MIGRATE TO 0.1.0')
+
+    assert i1.sql("SELECT * FROM author") == []
+    distribution = i1.sql("SELECT distribution FROM _pico_table WHERE name = 'author'")[
+        0
+    ][0]
+    tier = distribution["ShardedImplicitly"][2]
+    assert tier == "nondefault"
+
+    i1.sql(f'DROP PLUGIN "{plugin}" 0.1.0 WITH DATA')
+
+    with pytest.raises(TarantoolError, match='table with name "author" not found'):
+        assert i1.sql("SELECT * FROM author") == []
+
+    # reference missing variable
+    dump_manifest_and_migration(MIGRATION_REF_MISSING_VAR, plug_path)
+    i1.sql(f'CREATE PLUGIN "{plugin}" 0.1.0')
+
+    with pytest.raises(
+        TarantoolError, match="no key named bubba found in migration context at line 6"
+    ):
+        i1.sql(f'ALTER PLUGIN "{plugin}" MIGRATE TO 0.1.0')
+
+    i1.sql(f'DROP PLUGIN "{plugin}" 0.1.0 WITH DATA')
diff --git a/test/testplug/src/lib.rs b/test/testplug/src/lib.rs
index 46e5741f3c10b12fd6a2e7d75b9de135e56d1c6a..6f5908e4a326a4c546ae399089dd9084c20c8bff 100644
--- a/test/testplug/src/lib.rs
+++ b/test/testplug/src/lib.rs
@@ -640,6 +640,17 @@ impl Service for ServiceWithRpcTests {
     }
 }
 
+#[derive(Debug, Serialize, Deserialize)]
+struct StringyConfig {
+    stringy_string: String,
+}
+
+struct ServiceWithStringConfigValue;
+
+impl Service for ServiceWithStringConfigValue {
+    type Config = StringyConfig;
+}
+
 // Ensures that macros usage at least compiles.
 #[tarantool::proc]
 fn example_stored_proc() {}
@@ -669,4 +680,11 @@ pub fn service_registrar(reg: &mut ServiceRegistry) {
 
     // 0.2.0 broken version cause inconsistent migration
     reg.add("testservice_2", "0.2.0_broken", Service2::new);
+
+    reg.add("testservice_w_string_conf", "0.1.0", || {
+        ServiceWithStringConfigValue
+    });
+    reg.add("testservice_w_string_conf2", "0.1.0", || {
+        ServiceWithStringConfigValue
+    });
 }
diff --git a/test/testplug/testplug_w_migration_in_tier/0.1.0/.gitignore b/test/testplug/testplug_w_migration_in_tier/0.1.0/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..3ff75d953c6aece85ba7a6b5d911ab4194fdff5e
--- /dev/null
+++ b/test/testplug/testplug_w_migration_in_tier/0.1.0/.gitignore
@@ -0,0 +1,2 @@
+manifest.yaml
+*.sql