diff --git a/CHANGELOG.md b/CHANGELOG.md
index 88b0ccb7e5883a1232d8b8758f171f91b2836a29..c8a8502a9116c749b54b3e6dff11c9426021eb01 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,15 @@ with the `YY.0M.MICRO` scheme.
 - Allow specifying `picodata connect [user@][host][:port]` format. It
   overrides the `--user` option.
 
+- _Clusterwide SQL_ now uses an internal module called `key_def` to
+  determine tuple buckets. In case the spaces were sharded using a
+  different hash function, executing SQL queries on these spaces would
+  return inaccurate outcomes. For more examples, refer to
+  `pico.help('create_space')`.
+
+- _Clusterwide SQL_ now features Lua documentation. Refer to
+  `pico.help('sql')` for more information.
+
 ### Lua API:
 
 - Update `pico.LUA_API_VERSION`: `1.0.0` -> `1.2.0`
diff --git a/src/luamod.rs b/src/luamod.rs
index ff4d9e514b0e268953aaa1e6a66d6d8078c33132..d060fe576b82fcfa408b7767cff8549c3b8a8b82 100644
--- a/src/luamod.rs
+++ b/src/luamod.rs
@@ -388,6 +388,74 @@ pub(crate) fn setup(args: &args::Run) {
         ),
     );
 
+    // sql
+    ///////////////////////////////////////////////////////////////////////////
+    luamod_set_help_only(
+        &l,
+        "sql",
+        indoc! {r#"
+        pico.sql(query[, params])
+        =========================
+
+        Executes a cluster-wide SQL query.
+
+        1. The query is parsed and validated to build a distributed
+           query plan on the current instance (router).
+        2. The query plan is dispatched to the target instances (storages)
+           slice-by-slice in a bottom-up manner. All intermediate results
+           are stored in the router's memory.
+
+        Params:
+
+            1. query (string)
+            2. params (table), optional
+
+        Returns:
+
+            (table DqlResult) if query retrieves data
+            or
+            (table DmlResult) if query modifies data
+            or
+            (nil, string) in case of an error
+
+        table DqlResult:
+
+            - metadata (table),
+                `{{name = string, type = string}, ...}`, an array of column
+                definitions of the table.
+            - rows (table),
+                `{row, ...}`, essentially the result of the query.
+
+        table DmlResult:
+
+            - row_count (number), the number of rows modified, inserted or
+            deleted by the query.
+
+        Example:
+
+            picodata> -- Insert a row into the 'wonderland' table using parameters
+            picodata> -- as an optional argument.
+            picodata> pico.sql(
+                [[insert into "wonderland" ("property", "value") values (?, ?)]],
+                {"dragon", 13}
+            )
+            ---
+            - row_count: 1
+            ...
+
+            picodata> -- Select a row from the 'wonderland' table.
+            picodata> pico.sql(
+                [[select * from "wonderland" where "property" = 'dragon']]
+            )
+            ---
+            - metadata:
+                - {'name': 'property', 'type': 'string'}
+                - {'name': 'value', 'type': 'integer'}
+              rows:
+                - ['dragon', 13]
+        "#},
+    );
+
     // vclock
     ///////////////////////////////////////////////////////////////////////////
     luamod_set_help_only(
@@ -1107,7 +1175,7 @@ pub(crate) fn setup(args: &args::Run) {
                 name = 'wonderland',
                 format = {
                     {name = 'property', type = 'string', is_nullable = false},
-                    {name = 'value', type = 'any', is_nullable = true}
+                    {name = 'value', type = 'integer', is_nullable = true}
                 },
                 primary_key = {'property'},
                 distribution = 'sharded',
@@ -1115,8 +1183,12 @@ pub(crate) fn setup(args: &args::Run) {
                 timeout = 3,
             })
 
+            -- Calculate an SQL-compatible hash for the bucket id.
+            local key = require('key_def').new({{fieldno = 1, type = 'string'}})
+            local tuple = box.tuple.new({'unicorns'})
+            local bucket_id = key:hash(tuple) % vshard.router.bucket_count()
+
             -- Sharded spaces are updated via vshard api, see [1]
-            local bucket_id = vshard.router.bucket_id_mpcrc32('unicorns')
             vshard.router.callrw(bucket_id, 'box.space.wonderland:insert', {{'unicorns', 12}})
 
         See also:
diff --git a/src/sql.rs b/src/sql.rs
index d06036a1e398c086bb8ff5ed8935b374580c6fb1..318a927dd889f2a57c942849e0ac8e7f42d97032 100644
--- a/src/sql.rs
+++ b/src/sql.rs
@@ -1,9 +1,11 @@
+//! Cluster-wide SQL query execution.
 use crate::traft;
 use ::tarantool::proc;
 use ::tarantool::tuple::{RawByteBuf, RawBytes};
 use sbroad::backend::sql::ir::{EncodedPatternWithParams, PatternWithParams};
 use sbroad_picodata::api::{dispatch_sql, execute_sql};
 
+/// Dispatches a query to the cluster.
 #[proc(packed_args)]
 pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result<RawByteBuf> {
     let params = PatternWithParams::from(encoded_params);
@@ -11,6 +13,7 @@ pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result
     Ok(RawByteBuf::from(bytes))
 }
 
+/// Executes a query sub-plan on the local node.
 #[proc(packed_args)]
 pub fn execute(raw: &RawBytes) -> traft::Result<RawByteBuf> {
     let bytes = execute_sql(raw)?;