Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
P
picodata
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Container Registry
Model registry
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
core
picodata
Commits
70fe8e63
Commit
70fe8e63
authored
1 year ago
by
Denis Smirnov
Committed by
Yaroslav Dynnikov
1 year ago
Browse files
Options
Downloads
Patches
Plain Diff
doc: update SQL documentation and changlog
parent
4afcd628
No related branches found
Branches containing commit
No related tags found
Tags containing commit
1 merge request
!600
Update cluster-wide SQL
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
CHANGELOG.md
+9
-0
9 additions, 0 deletions
CHANGELOG.md
src/luamod.rs
+74
-2
74 additions, 2 deletions
src/luamod.rs
src/sql.rs
+3
-0
3 additions, 0 deletions
src/sql.rs
with
86 additions
and
2 deletions
CHANGELOG.md
+
9
−
0
View file @
70fe8e63
...
@@ -15,6 +15,15 @@ with the `YY.0M.MICRO` scheme.
...
@@ -15,6 +15,15 @@ with the `YY.0M.MICRO` scheme.
-
Allow specifying
`picodata connect [user@][host][:port]`
format. It
-
Allow specifying
`picodata connect [user@][host][:port]`
format. It
overrides the
`--user`
option.
overrides the
`--user`
option.
-
_Clusterwide SQL_ now uses an internal module called
`key_def`
to
determine tuple buckets. In case the spaces were sharded using a
different hash function, executing SQL queries on these spaces would
return inaccurate outcomes. For more examples, refer to
`pico.help('create_space')`
.
-
_Clusterwide SQL_ now features Lua documentation. Refer to
`pico.help('sql')`
for more information.
### Lua API:
### Lua API:
-
Update
`pico.LUA_API_VERSION`
:
`1.0.0`
->
`1.2.0`
-
Update
`pico.LUA_API_VERSION`
:
`1.0.0`
->
`1.2.0`
...
...
This diff is collapsed.
Click to expand it.
src/luamod.rs
+
74
−
2
View file @
70fe8e63
...
@@ -388,6 +388,74 @@ pub(crate) fn setup(args: &args::Run) {
...
@@ -388,6 +388,74 @@ pub(crate) fn setup(args: &args::Run) {
),
),
);
);
// sql
///////////////////////////////////////////////////////////////////////////
luamod_set_help_only
(
&
l
,
"sql"
,
indoc!
{
r#"
pico.sql(query[, params])
=========================
Executes a cluster-wide SQL query.
1. The query is parsed and validated to build a distributed
query plan on the current instance (router).
2. The query plan is dispatched to the target instances (storages)
slice-by-slice in a bottom-up manner. All intermediate results
are stored in the router's memory.
Params:
1. query (string)
2. params (table), optional
Returns:
(table DqlResult) if query retrieves data
or
(table DmlResult) if query modifies data
or
(nil, string) in case of an error
table DqlResult:
- metadata (table),
`{{name = string, type = string}, ...}`, an array of column
definitions of the table.
- rows (table),
`{row, ...}`, essentially the result of the query.
table DmlResult:
- row_count (number), the number of rows modified, inserted or
deleted by the query.
Example:
picodata> -- Insert a row into the 'wonderland' table using parameters
picodata> -- as an optional argument.
picodata> pico.sql(
[[insert into "wonderland" ("property", "value") values (?, ?)]],
{"dragon", 13}
)
---
- row_count: 1
...
picodata> -- Select a row from the 'wonderland' table.
picodata> pico.sql(
[[select * from "wonderland" where "property" = 'dragon']]
)
---
- metadata:
- {'name': 'property', 'type': 'string'}
- {'name': 'value', 'type': 'integer'}
rows:
- ['dragon', 13]
"#
},
);
// vclock
// vclock
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
luamod_set_help_only
(
luamod_set_help_only
(
...
@@ -1107,7 +1175,7 @@ pub(crate) fn setup(args: &args::Run) {
...
@@ -1107,7 +1175,7 @@ pub(crate) fn setup(args: &args::Run) {
name = 'wonderland',
name = 'wonderland',
format = {
format = {
{name = 'property', type = 'string', is_nullable = false},
{name = 'property', type = 'string', is_nullable = false},
{name = 'value', type = '
any
', is_nullable = true}
{name = 'value', type = '
integer
', is_nullable = true}
},
},
primary_key = {'property'},
primary_key = {'property'},
distribution = 'sharded',
distribution = 'sharded',
...
@@ -1115,8 +1183,12 @@ pub(crate) fn setup(args: &args::Run) {
...
@@ -1115,8 +1183,12 @@ pub(crate) fn setup(args: &args::Run) {
timeout = 3,
timeout = 3,
})
})
-- Calculate an SQL-compatible hash for the bucket id.
local key = require('key_def').new({{fieldno = 1, type = 'string'}})
local tuple = box.tuple.new({'unicorns'})
local bucket_id = key:hash(tuple) % vshard.router.bucket_count()
-- Sharded spaces are updated via vshard api, see [1]
-- Sharded spaces are updated via vshard api, see [1]
local bucket_id = vshard.router.bucket_id_mpcrc32('unicorns')
vshard.router.callrw(bucket_id, 'box.space.wonderland:insert', {{'unicorns', 12}})
vshard.router.callrw(bucket_id, 'box.space.wonderland:insert', {{'unicorns', 12}})
See also:
See also:
...
...
This diff is collapsed.
Click to expand it.
src/sql.rs
+
3
−
0
View file @
70fe8e63
//! Cluster-wide SQL query execution.
use
crate
::
traft
;
use
crate
::
traft
;
use
::
tarantool
::
proc
;
use
::
tarantool
::
proc
;
use
::
tarantool
::
tuple
::{
RawByteBuf
,
RawBytes
};
use
::
tarantool
::
tuple
::{
RawByteBuf
,
RawBytes
};
use
sbroad
::
backend
::
sql
::
ir
::{
EncodedPatternWithParams
,
PatternWithParams
};
use
sbroad
::
backend
::
sql
::
ir
::{
EncodedPatternWithParams
,
PatternWithParams
};
use
sbroad_picodata
::
api
::{
dispatch_sql
,
execute_sql
};
use
sbroad_picodata
::
api
::{
dispatch_sql
,
execute_sql
};
/// Dispatches a query to the cluster.
#[proc(packed_args)]
#[proc(packed_args)]
pub
fn
dispatch_query
(
encoded_params
:
EncodedPatternWithParams
)
->
traft
::
Result
<
RawByteBuf
>
{
pub
fn
dispatch_query
(
encoded_params
:
EncodedPatternWithParams
)
->
traft
::
Result
<
RawByteBuf
>
{
let
params
=
PatternWithParams
::
from
(
encoded_params
);
let
params
=
PatternWithParams
::
from
(
encoded_params
);
...
@@ -11,6 +13,7 @@ pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result
...
@@ -11,6 +13,7 @@ pub fn dispatch_query(encoded_params: EncodedPatternWithParams) -> traft::Result
Ok
(
RawByteBuf
::
from
(
bytes
))
Ok
(
RawByteBuf
::
from
(
bytes
))
}
}
/// Executes a query sub-plan on the local node.
#[proc(packed_args)]
#[proc(packed_args)]
pub
fn
execute
(
raw
:
&
RawBytes
)
->
traft
::
Result
<
RawByteBuf
>
{
pub
fn
execute
(
raw
:
&
RawBytes
)
->
traft
::
Result
<
RawByteBuf
>
{
let
bytes
=
execute_sql
(
raw
)
?
;
let
bytes
=
execute_sql
(
raw
)
?
;
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment