Skip to content
Snippets Groups Projects
Commit 70fe8e63 authored by Denis Smirnov's avatar Denis Smirnov Committed by Yaroslav Dynnikov
Browse files

doc: update SQL documentation and changlog

parent 4afcd628
No related branches found
No related tags found
1 merge request!600Update cluster-wide SQL
...@@ -15,6 +15,15 @@ with the `YY.0M.MICRO` scheme. ...@@ -15,6 +15,15 @@ with the `YY.0M.MICRO` scheme.
- Allow specifying `picodata connect [user@][host][:port]` format. It - Allow specifying `picodata connect [user@][host][:port]` format. It
overrides the `--user` option. overrides the `--user` option.
- _Clusterwide SQL_ now uses an internal module called `key_def` to
determine tuple buckets. In case the spaces were sharded using a
different hash function, executing SQL queries on these spaces would
return inaccurate outcomes. For more examples, refer to
`pico.help('create_space')`.
- _Clusterwide SQL_ now features Lua documentation. Refer to
`pico.help('sql')` for more information.
### Lua API: ### Lua API:
- Update `pico.LUA_API_VERSION`: `1.0.0` -> `1.2.0` - Update `pico.LUA_API_VERSION`: `1.0.0` -> `1.2.0`
......
...@@ -388,6 +388,74 @@ pub(crate) fn setup(args: &args::Run) { ...@@ -388,6 +388,74 @@ pub(crate) fn setup(args: &args::Run) {
), ),
); );
// sql
///////////////////////////////////////////////////////////////////////////
luamod_set_help_only(
&l,
"sql",
indoc! {r#"
pico.sql(query[, params])
=========================
Executes a cluster-wide SQL query.
1. The query is parsed and validated to build a distributed
query plan on the current instance (router).
2. The query plan is dispatched to the target instances (storages)
slice-by-slice in a bottom-up manner. All intermediate results
are stored in the router's memory.
Params:
1. query (string)
2. params (table), optional
Returns:
(table DqlResult) if query retrieves data
or
(table DmlResult) if query modifies data
or
(nil, string) in case of an error
table DqlResult:
- metadata (table),
`{{name = string, type = string}, ...}`, an array of column
definitions of the table.
- rows (table),
`{row, ...}`, essentially the result of the query.
table DmlResult:
- row_count (number), the number of rows modified, inserted or
deleted by the query.
Example:
picodata> -- Insert a row into the 'wonderland' table using parameters
picodata> -- as an optional argument.
picodata> pico.sql(
[[insert into "wonderland" ("property", "value") values (?, ?)]],
{"dragon", 13}
)
---
- row_count: 1
...
picodata> -- Select a row from the 'wonderland' table.
picodata> pico.sql(
[[select * from "wonderland" where "property" = 'dragon']]
)
---
- metadata:
- {'name': 'property', 'type': 'string'}
- {'name': 'value', 'type': 'integer'}
rows:
- ['dragon', 13]
"#},
);
// vclock // vclock
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
luamod_set_help_only( luamod_set_help_only(
...@@ -1107,7 +1175,7 @@ pub(crate) fn setup(args: &args::Run) { ...@@ -1107,7 +1175,7 @@ pub(crate) fn setup(args: &args::Run) {
name = 'wonderland', name = 'wonderland',
format = { format = {
{name = 'property', type = 'string', is_nullable = false}, {name = 'property', type = 'string', is_nullable = false},
{name = 'value', type = 'any', is_nullable = true} {name = 'value', type = 'integer', is_nullable = true}
}, },
primary_key = {'property'}, primary_key = {'property'},
distribution = 'sharded', distribution = 'sharded',
...@@ -1115,8 +1183,12 @@ pub(crate) fn setup(args: &args::Run) { ...@@ -1115,8 +1183,12 @@ pub(crate) fn setup(args: &args::Run) {
timeout = 3, timeout = 3,
}) })
-- Calculate an SQL-compatible hash for the bucket id.
local key = require('key_def').new({{fieldno = 1, type = 'string'}})
local tuple = box.tuple.new({'unicorns'})
local bucket_id = key:hash(tuple) % vshard.router.bucket_count()
-- Sharded spaces are updated via vshard api, see [1] -- Sharded spaces are updated via vshard api, see [1]
local bucket_id = vshard.router.bucket_id_mpcrc32('unicorns')
vshard.router.callrw(bucket_id, 'box.space.wonderland:insert', {{'unicorns', 12}}) vshard.router.callrw(bucket_id, 'box.space.wonderland:insert', {{'unicorns', 12}})
See also: See also:
......
//! Cluster-wide SQL query execution.
use crate::traft; use crate::traft;
use ::tarantool::proc; use ::tarantool::proc;
use ::tarantool::tuple::{RawByteBuf, RawBytes}; use ::tarantool::tuple::{RawByteBuf, RawBytes};
use sbroad::backend::sql::ir::{EncodedPatternWithParams, PatternWithParams}; use sbroad::backend::sql::ir::{EncodedPatternWithParams, PatternWithParams};
use sbroad_picodata::api::{dispatch_sql, execute_sql}; use sbroad_picodata::api::{dispatch_sql, execute_sql};
/// Dispatches a query to the cluster.
#[proc(packed_args)] #[proc(packed_args)]
pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result<RawByteBuf> { pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result<RawByteBuf> {
let params = PatternWithParams::from(encoded_params); let params = PatternWithParams::from(encoded_params);
...@@ -11,6 +13,7 @@ pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result ...@@ -11,6 +13,7 @@ pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result
Ok(RawByteBuf::from(bytes)) Ok(RawByteBuf::from(bytes))
} }
/// Executes a query sub-plan on the local node.
#[proc(packed_args)] #[proc(packed_args)]
pub fn execute(raw: &RawBytes) -> traft::Result<RawByteBuf> { pub fn execute(raw: &RawBytes) -> traft::Result<RawByteBuf> {
let bytes = execute_sql(raw)?; let bytes = execute_sql(raw)?;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment