diff --git a/CHANGELOG.md b/CHANGELOG.md
index de562be3246f2af9ea2a94a937bd9ce6a9725032..9ab7beded5dc27d96fbd96c046886a9728a385e1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -23,6 +23,10 @@ with the `YY.MINOR.MICRO` scheme.
 - New parameters for Vinyl configuration: `bloom_fpr`, `max_tuple_size`, `page_size`, `range_size`,
   `run_count_per_size`, `run_size_ratio`, `read_threads`, `write_threads` and `timeout`.
 
+- `plugin_dir` parameter is renamed to `share_dir` (--plugin-dir -> --share-dir,
+  PICODATA_PLUGIN_DIR -> PICODATA_SHARE_DIR, config: instance.plugin_dir ->
+  instance.share_dir)
+
 ### CLI
 
 - `picodata expel` takes instance uuid instead of instance name.
diff --git a/src/cli/args.rs b/src/cli/args.rs
index 1e4cdce24bc364478a464a369150502d81382f52..10bc4c0dcc83f926c0aa13af26dbd09241c11ad1 100644
--- a/src/cli/args.rs
+++ b/src/cli/args.rs
@@ -189,8 +189,12 @@ pub struct Run {
     /// By default the "admin.sock" in the instance directory is used.
     pub admin_sock: Option<PathBuf>,
 
-    #[clap(long, value_name = "PATH", env = "PICODATA_PLUGIN_DIR")]
-    /// Path to directory with plugin files
+    #[clap(long, value_name = "PATH", env = "PICODATA_SHARE_DIR")]
+    /// Path to directory with plugin installations.
+    pub share_dir: Option<PathBuf>,
+
+    #[clap(long, value_name = "PATH", env = "PICODATA_PLUGIN_DIR", hide = true)]
+    /// Deprecated. Use --share-dir instead.
     pub plugin_dir: Option<PathBuf>,
 
     #[clap(long = "tier", value_name = "TIER", env = "PICODATA_INSTANCE_TIER")]
diff --git a/src/config.rs b/src/config.rs
index eb966f41dac5bc1d3f143e99344c7cdb49231321..4067caa2be8de4eefcdba59966d27f2d69c8e839 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -80,10 +80,6 @@ fn validate_args(args: &args::Run) -> Result<(), Error> {
 static mut GLOBAL_CONFIG: Option<Box<PicodataConfig>> = None;
 
 impl PicodataConfig {
-    // TODO:
-    // fn default() -> Self
-    // which returns an instance of config with all the default parameters.
-    // Also add a command to generate a default config from command line.
     pub fn init(args: args::Run) -> Result<&'static Self, Error> {
         validate_args(&args)?;
 
@@ -149,6 +145,8 @@ Using configuration file '{args_path}'.");
 
         config.set_from_args_and_env(args, &mut parameter_sources)?;
 
+        config.handle_deprecated_parameters(&mut parameter_sources)?;
+
         config.set_defaults_explicitly(&parameter_sources);
 
         config.validate_common()?;
@@ -317,7 +315,12 @@ Using configuration file '{args_path}'.");
             config_from_args.instance.audit = Some(audit_destination);
         }
 
-        config_from_args.instance.plugin_dir = args.plugin_dir;
+        config_from_args.instance.share_dir = args.share_dir;
+
+        #[allow(deprecated)]
+        {
+            config_from_args.instance.plugin_dir = args.plugin_dir;
+        }
 
         if let Some(admin_socket) = args.admin_sock {
             config_from_args.instance.admin_socket = Some(admin_socket);
@@ -363,7 +366,7 @@ Using configuration file '{args_path}'.");
         Ok(())
     }
 
-    fn set_defaults_explicitly(&mut self, parameter_sources: &HashMap<String, ParameterSource>) {
+    fn set_defaults_explicitly(&mut self, parameter_sources: &ParameterSourcesMap) {
         for path in &leaf_field_paths::<Self>() {
             if !matches!(
                 parameter_sources.get(path),
@@ -488,6 +491,88 @@ Using configuration file '{args_path}'.");
         Ok(())
     }
 
+    /// This function handles the usage of deprecated parameters:
+    ///
+    /// - Logs a warning message if deprecated parameter is used instead of replacement
+    ///
+    /// - Returns error in case deprecated parameter and it's replacement are
+    ///   specified at the same time
+    ///
+    /// - Moves the value of the deprecated parameter to it's replacement field
+    ///
+    /// Only stuff related to deprecated parameters goes here.
+    ///
+    /// Must be called before [`Self::set_defaults_explicitly`]!.
+    #[allow(deprecated)]
+    fn handle_deprecated_parameters(
+        &mut self,
+        parameter_sources: &mut ParameterSourcesMap,
+    ) -> Result<(), Error> {
+        // In the future when renaming configuration file parameters just
+        // add a pair of names into this array.
+        let renamed_parameters = &[
+            // (deprecated, use_instead)
+            (
+                config_parameter_path!(instance.plugin_dir),
+                config_parameter_path!(instance.share_dir),
+            ),
+        ];
+
+        // Handle renamed parameters
+        for &(deprecated, use_instead) in renamed_parameters {
+            let value = self
+                .get_field_as_rmpv(deprecated)
+                .expect("this should be tested thoroughly");
+
+            if !value_is_specified(deprecated, &value) {
+                continue;
+            }
+
+            let deprecated_source = parameter_sources.get(deprecated);
+            let deprecated_source =
+                *deprecated_source.expect("the parameter was specified, so source must be set");
+
+            let conflicting_value = self
+                .get_field_as_rmpv(use_instead)
+                .expect("this should be tested thoroughly");
+
+            if value_is_specified(use_instead, &conflicting_value) {
+                let conflicting_source = parameter_sources.get(use_instead);
+                let conflicting_source = *conflicting_source
+                    .expect("the parameter was specified, so source must be set");
+                #[rustfmt::skip]
+                tlog!(Info, "{deprecated} is set to {value} via {deprecated_source}");
+                #[rustfmt::skip]
+                tlog!(Info, "{use_instead} is set to {conflicting_value} via {conflicting_source}");
+
+                #[rustfmt::skip]
+                return Err(Error::invalid_configuration(format!("{deprecated} is deprecated, use {use_instead} instead (cannot use both at the same time)")));
+            }
+
+            #[rustfmt::skip]
+            tlog!(Warning, "{deprecated} is deprecated, use {use_instead} instead");
+
+            // Copy the value from the deprecated parameter to the one that should be used instead
+            self.set_field_from_rmpv(use_instead, &value)
+                .expect("these fields have the same type");
+
+            // Copy the parameter source info as well, so that other parts of the system work correclty.
+            // For example set_defaults_explicitly relies on this.
+            parameter_sources.insert(use_instead.into(), deprecated_source);
+        }
+
+        // TODO: we will likely have more complicated cases of deprecation, not
+        // just renamings of parameters. For example some parameter may be
+        // deprecated because the feature is replaced with a different feature,
+        // for example we don't want users to specify the instance_name directly
+        // or something like this. In this case we would just need to log a
+        // warning if the parameter is specified. Or even more complex cases can
+        // happen like one parameter is replaced with 2 or even more. These
+        // kinds of cases are not covered by the above code unfortunately.
+
+        Ok(())
+    }
+
     /// Does basic config validation. This function checks constraints
     /// applicable for all configuration updates.
     ///
@@ -1021,10 +1106,12 @@ pub struct InstanceConfig {
     #[introspection(config_default = self.instance_dir.as_ref().map(|dir| dir.join("admin.sock")))]
     pub admin_socket: Option<PathBuf>,
 
-    // TODO:
-    // - sepparate config file for common parameters
+    #[deprecated = "use share_dir instead"]
     pub plugin_dir: Option<PathBuf>,
 
+    #[introspection(config_default = "/usr/share/picodata/")]
+    pub share_dir: Option<PathBuf>,
+
     // Skip serializing, so that default config doesn't contain this option,
     // because it's deprecated.
     #[serde(skip_serializing_if = "Option::is_none")]
@@ -1142,6 +1229,13 @@ impl InstanceConfig {
         self.shredding
             .expect("is set in PicodataConfig::set_defaults_explicitly")
     }
+
+    #[inline]
+    pub fn share_dir(&self) -> &Path {
+        self.share_dir
+            .as_deref()
+            .expect("is set in PicodataConfig::set_defaults_explicitly")
+    }
 }
 
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/src/lib.rs b/src/lib.rs
index 953f71a8e276a8bda37797c54bb8ebd3d88bd08b..2c72a5b28bd30e02d05b7a899a0b30a6793e07a2 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -12,7 +12,6 @@
 #![allow(clippy::unused_io_amount)]
 #![allow(clippy::bool_assert_comparison)]
 use serde::{Deserialize, Serialize};
-use std::path::Path;
 
 use ::raft::prelude as raft;
 use ::tarantool::error::Error as TntError;
@@ -949,10 +948,6 @@ fn postjoin(
         audit::init(config, raft_id, gen);
     }
 
-    if let Some(ref plugin_dir) = config.instance.plugin_dir {
-        plugin::set_plugin_dir(Path::new(plugin_dir));
-    }
-
     if let Some(addr) = &config.instance.http_listen {
         start_http_server(addr);
         if cfg!(feature = "webui") {
diff --git a/src/plugin/manager.rs b/src/plugin/manager.rs
index bdc068b24c68a09f2689ffbee4ac5427ea970cf9..daf4d39928801667e39d055293b81cad8669ccdf 100644
--- a/src/plugin/manager.rs
+++ b/src/plugin/manager.rs
@@ -1,3 +1,4 @@
+use crate::config::PicodataConfig;
 use crate::info::InstanceInfo;
 use crate::info::PICODATA_VERSION;
 use crate::plugin::rpc;
@@ -5,7 +6,7 @@ use crate::plugin::LibraryWrapper;
 use crate::plugin::PluginError::{PluginNotFound, ServiceCollision};
 use crate::plugin::{
     remove_routes, replace_routes, topology, Manifest, PluginAsyncEvent, PluginCallbackError,
-    PluginError, PluginEvent, PluginIdentifier, Result, Service, PLUGIN_DIR,
+    PluginError, PluginEvent, PluginIdentifier, Result, Service,
 };
 use crate::schema::{PluginDef, ServiceDef, ServiceRouteItem, ServiceRouteKey};
 use crate::storage::Clusterwide;
@@ -113,8 +114,8 @@ impl PluginManager {
     const AVAILABLE_EXT: &'static [&'static str] = &["so", "dylib"];
 
     fn load_plugin_dir(&self, ident: &PluginIdentifier) -> Result<ReadDir> {
-        let plugin_dir = PLUGIN_DIR.with(|dir| dir.lock().clone());
-        let plugin_dir = plugin_dir.join(&ident.name);
+        let share_dir = PicodataConfig::get().instance.share_dir();
+        let plugin_dir = share_dir.join(&ident.name);
         let plugin_dir = plugin_dir.join(&ident.version);
         Ok(fs::read_dir(plugin_dir)?)
     }
diff --git a/src/plugin/migration.rs b/src/plugin/migration.rs
index b719b0e49533e9cbcfdcbf6e28b18d5e96a74990..4c5f5482e6c12c43ecfafef82cc6d5a62ed97f46 100644
--- a/src/plugin/migration.rs
+++ b/src/plugin/migration.rs
@@ -1,8 +1,9 @@
 use crate::cas;
 use crate::cbus::ENDPOINT_NAME;
+use crate::config::PicodataConfig;
+use crate::plugin::PluginIdentifier;
 use crate::plugin::PreconditionCheckResult;
 use crate::plugin::{lock, reenterable_plugin_cas_request};
-use crate::plugin::{PluginIdentifier, PLUGIN_DIR};
 use crate::schema::ADMIN_ID;
 use crate::storage::{Clusterwide, ClusterwideTable};
 use crate::traft::node;
@@ -201,8 +202,8 @@ pub struct MigrationInfo {
 impl MigrationInfo {
     /// Initializes the struct, doesn't read the file yet.
     pub fn new_unparsed(plugin_ident: &PluginIdentifier, filename: String) -> Self {
-        let plugin_dir = PLUGIN_DIR
-            .with(|dir| dir.lock().clone())
+        let share_dir = PicodataConfig::get().instance.share_dir();
+        let plugin_dir = share_dir
             .join(&plugin_ident.name)
             .join(&plugin_ident.version);
         let fullpath = plugin_dir.join(&filename);
diff --git a/src/plugin/mod.rs b/src/plugin/mod.rs
index fdfed062df8155e821ce3ed2a92a9beb311fe9ce..6fdfd8aaf7a533586ad6bc97ebfd4dfc20def3e7 100644
--- a/src/plugin/mod.rs
+++ b/src/plugin/mod.rs
@@ -6,7 +6,6 @@ pub mod migration;
 pub mod rpc;
 pub mod topology;
 
-use once_cell::unsync;
 use picodata_plugin::error_code::ErrorCode;
 use picodata_plugin::plugin::interface::ServiceId;
 use picodata_plugin::plugin::interface::{ServiceBox, ValidatorBox};
@@ -17,7 +16,6 @@ use std::collections::HashMap;
 use std::fmt::{Display, Formatter};
 use std::fs::File;
 use std::io;
-use std::path::{Path, PathBuf};
 use std::rc::Rc;
 use std::time::Duration;
 use tarantool::error::{BoxError, IntoBoxError};
@@ -25,6 +23,7 @@ use tarantool::fiber;
 use tarantool::time::Instant;
 
 use crate::cas::Range;
+use crate::config::PicodataConfig;
 use crate::info::InstanceInfo;
 use crate::info::PICODATA_VERSION;
 use crate::plugin::lock::PicoPropertyLock;
@@ -41,20 +40,6 @@ use crate::traft::{node, RaftIndex};
 use crate::util::effective_user_id;
 use crate::{cas, tlog, traft};
 
-const DEFAULT_PLUGIN_DIR: &'static str = "/usr/share/picodata/";
-
-thread_local! {
-    // TODO this will be removed and replaced with the config (in future)
-    static PLUGIN_DIR: unsync::Lazy<fiber::Mutex<PathBuf>> =
-        unsync::Lazy::new(|| fiber::Mutex::new(PathBuf::from(DEFAULT_PLUGIN_DIR)));
-}
-
-/// Set the new plugin directory.
-/// Search of manifest and shared objects will take place in this directory.
-pub fn set_plugin_dir(path: &Path) {
-    PLUGIN_DIR.with(|dir| *dir.lock() = path.to_path_buf());
-}
-
 #[derive(thiserror::Error, Debug)]
 pub enum PluginError {
     #[error("Plugin `{0}` already exists")]
@@ -270,9 +255,10 @@ impl Manifest {
         let plugin_name = ident.name.as_str();
         let version = ident.version.as_str();
 
-        // TODO move this into config (in future)
-        let plugin_dir = PLUGIN_DIR.with(|dir| dir.lock().clone());
-        let manifest_path = plugin_dir.join(format!("{plugin_name}/{version}/manifest.yaml"));
+        let share_dir = PicodataConfig::get().instance.share_dir();
+        let plugin_dir = share_dir.join(plugin_name);
+        let plugin_dir = plugin_dir.join(version);
+        let manifest_path = plugin_dir.join("manifest.yaml");
         // TODO non-blocking needed?
         let file = File::open(&manifest_path).map_err(|e| {
             PluginError::ManifestNotFound(manifest_path.to_string_lossy().to_string(), e)
diff --git a/test/conftest.py b/test/conftest.py
index 985d599690bc05c573d45d81792f5b6c059f870d..9ec0d781717530c361f7804bfa841d1d6acab61f 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -563,7 +563,7 @@ class Instance:
     cwd: str
     color_code: str
 
-    plugin_dir: str | None = None
+    share_dir: str | None = None
     cluster_name: str | None = None
     _instance_dir: str | None = None
     peers: list[str] = field(default_factory=list)
@@ -644,7 +644,7 @@ class Instance:
             *([f"--instance-name={self.name}"] if self.name else []),
             *([f"--replicaset-name={self.replicaset_name}"] if self.replicaset_name else []),
             *([f"--instance-dir={self._instance_dir}"] if self._instance_dir else []),
-            *([f"--plugin-dir={self.plugin_dir}"] if self.plugin_dir else []),
+            *([f"--share-dir={self.share_dir}"] if self.share_dir else []),
             *([f"--listen={self.listen}"] if self.listen else []),
             *([f"--pg-listen={self.pg_listen}"] if self.pg_listen else []),
             *([f"-c instance.pg.ssl={self.pg_ssl}"] if self.pg_ssl else []),
@@ -1686,7 +1686,7 @@ class Cluster:
     instances: list[Instance] = field(default_factory=list)
     config_path: str | None = None
     service_password_file: str | None = None
-    plugin_dir: str | None = None
+    share_dir: str | None = None
 
     def __repr__(self):
         return f'Cluster("{self.base_host}", n={len(self.instances)})'
@@ -1805,7 +1805,7 @@ class Cluster:
             name=name,
             replicaset_name=replicaset_name,
             _instance_dir=f"{self.instance_dir}/i{i}",
-            plugin_dir=self.plugin_dir,
+            share_dir=self.share_dir,
             host=self.base_host,
             port=port,
             pg_host=self.base_host,
@@ -2307,12 +2307,12 @@ def binary_path() -> str:
 
 
 def copy_plugin_library(
-    binary_path: Path | str, plugin_dir: Path | str, file_name: str = "libtestplug"
+    binary_path: Path | str, share_dir: Path | str, file_name: str = "libtestplug"
 ):
     ext = dynamic_library_extension()
     lib_name = f"{file_name}.{ext}"
     source = Path(binary_path).parent / lib_name
-    destination = Path(plugin_dir) / lib_name
+    destination = Path(share_dir) / lib_name
     if os.path.exists(destination) and filecmp.cmp(source, destination):
         return
     eprint(f"Copying '{source}' to '{destination}'")
@@ -2378,12 +2378,12 @@ def cluster(
     """Return a `Cluster` object capable of deploying test clusters."""
     # FIXME: instead of os.getcwd() construct a path relative to os.path.realpath(__file__)
     # see how it's done in def binary_path()
-    plugin_dir = os.getcwd() + "/test/testplug"
+    share_dir = os.getcwd() + "/test/testplug"
     cluster = Cluster(
         binary_path=binary_path_fixt,
         id=next(cluster_names),
         instance_dir=tmpdir,
-        plugin_dir=plugin_dir,
+        share_dir=share_dir,
         base_host=BASE_HOST,
         port_distributor=port_distributor,
     )
diff --git a/test/int/test_config_file.py b/test/int/test_config_file.py
index 87037a98e486da211b75187804e004d5baa65641..2bf8c02231d7670d58a2dceee4464b0962a28203 100644
--- a/test/int/test_config_file.py
+++ b/test/int/test_config_file.py
@@ -90,6 +90,7 @@ instance:
             name=dict(value="my-instance", source="config_file"),
             replicaset_name=dict(value="my-replicaset", source="config_file"),
             tier=dict(value="deluxe", source="config_file"),
+            share_dir=dict(value="/usr/share/picodata/", source="default"),
             audit=dict(
                 value=f"{instance_dir}/audit.log", source="commandline_or_environment"
             ),
@@ -423,7 +424,9 @@ def test_picodata_default_config(cluster: Cluster):
     default_config_3 = data.decode()
     assert default_config.strip() == default_config_3.strip()
 
+    #
     # Check that running with the generated config file works
+    #
     cluster.set_config_file(yaml=default_config)
     i = cluster.add_instance(wait_online=False)
 
@@ -487,7 +490,7 @@ def test_output_config_parameters(cluster: Cluster):
         'instance.listen':
         'instance.advertise_address':
         'instance.admin_socket':
-        'instance.plugin_dir':
+        'instance.share_dir':
         'instance.audit':
         'instance.shredding': false
         'instance.log.level': "verbose"
diff --git a/test/int/test_plugin.py b/test/int/test_plugin.py
index 52349b29a9227633c43c864d58b042b9a9754e72..f6e4107b9b924899c20f22398de7542bd33fd3fb 100644
--- a/test/int/test_plugin.py
+++ b/test/int/test_plugin.py
@@ -400,8 +400,8 @@ def init_dummy_plugin(
     library_name: str = "libtestplug",
 ) -> Path:
     """Does the following:
-    - Setup --plugin-dir option for the cluster
-    - Create plugin directory <cluster-plugin-dir>/<plugin>/<version>
+    - Setup --share-dir option for the cluster
+    - Create plugin directory <share-dir>/<plugin>/<version>
     - Create manifest.yaml in that directory with provided info
     - Copy the <library_name>.so into that directory
 
@@ -410,9 +410,9 @@ def init_dummy_plugin(
     Does *NOT* create migration files, you have to do it yourself.
 
     """
-    cluster.plugin_dir = cluster.instance_dir
+    cluster.share_dir = cluster.instance_dir
     # Initialize the plugin directory
-    plugin_dir = Path(cluster.plugin_dir) / plugin / version
+    plugin_dir = Path(cluster.share_dir) / plugin / version
     os.makedirs(plugin_dir)
 
     # Copy plugin library
@@ -438,7 +438,7 @@ def init_dummy_plugin(
 
 def test_invalid_manifest_plugin(cluster: Cluster):
     # This must be set before any instances start up
-    cluster.plugin_dir = cluster.instance_dir
+    cluster.share_dir = cluster.instance_dir
 
     i1, i2 = cluster.deploy(instance_count=2)
 
@@ -1088,8 +1088,8 @@ def test_migration_separate_command(cluster: Cluster):
 
     # check migration file checksums are calculated correctly
     rows = i1.sql(""" SELECT "migration_file", "hash" FROM "_pico_plugin_migration" """)
-    assert i1.plugin_dir
-    plugin_dir = os.path.join(i1.plugin_dir, _PLUGIN_WITH_MIGRATION, "0.1.0")
+    assert i1.share_dir
+    plugin_dir = os.path.join(i1.share_dir, _PLUGIN_WITH_MIGRATION, "0.1.0")
     for filename, checksum in rows:
         fullpath = os.path.join(plugin_dir, filename)
         with open(fullpath, "rb") as f: