From 8a75383f63f109bde54a9fdf6c13841da14ad907 Mon Sep 17 00:00:00 2001 From: Denis Smirnov <sd@picodata.io> Date: Mon, 15 Aug 2022 12:11:05 +0700 Subject: [PATCH] fix: name normalization The name normalization in Tarantool can be a little bit confusing because it has two different pipelines: for SQL and for YAML configuration. 1. SQL allows us to use table and column names with a rule: the name in the double quotes is processed "as is" while a name without double quotes is transformed to the upper case. Here are the examples: select * from t .. -> "T" select * from T .. -> "T" select * from "T" .. -> "T" select * from "t" .. -> "t" So, the only way to get in Tarantool a name with lowercase characters is to wrap it with double quotes. 2. YAML configuration allows us to create distributed tables with the following naming convention: the name is always processed "as is". For example: t -> "t" T -> "T" --- benches/engine.rs | 6 +- src/executor/engine.rs | 32 ++++----- .../engine/cartridge/backend/sql/ir/tests.rs | 6 +- src/executor/engine/cartridge/config.rs | 38 ++++++++-- src/executor/engine/cartridge/config/tests.rs | 4 +- src/executor/engine/cartridge/router.rs | 5 +- src/executor/engine/mock.rs | 11 +-- src/executor/tests.rs | 26 +++---- src/frontend/sql.rs | 69 +++++++++++-------- src/frontend/sql/ir.rs | 7 -- src/frontend/sql/ir/tests.rs | 10 +-- src/ir/operator.rs | 5 +- test_app/test/integration/api_test.lua | 48 +++++++++++++ 13 files changed, 176 insertions(+), 91 deletions(-) diff --git a/benches/engine.rs b/benches/engine.rs index e7449391c3..1b16ed0965 100644 --- a/benches/engine.rs +++ b/benches/engine.rs @@ -8,8 +8,8 @@ use sbroad::errors::QueryPlannerError; use sbroad::executor::bucket::Buckets; use sbroad::executor::engine::cartridge::hash::bucket_id_by_tuple; use sbroad::executor::engine::{ - sharding_keys_from_map, sharding_keys_from_tuple, Configuration, Coordinator, - CoordinatorMetadata, + normalize_name_from_sql, sharding_keys_from_map, sharding_keys_from_tuple, Configuration, + Coordinator, CoordinatorMetadata, }; use sbroad::executor::ir::ExecutionPlan; use sbroad::executor::lru::{Cache, LRUCache, DEFAULT_CAPACITY}; @@ -37,7 +37,7 @@ impl CoordinatorMetadata for RouterConfigurationMock { /// # Errors /// - Failed to get table by name from the metadata. fn get_table_segment(&self, table_name: &str) -> Result<Table, QueryPlannerError> { - let name = Self::to_name(table_name); + let name = normalize_name_from_sql(table_name); match self.tables.get(&name) { Some(v) => Ok(v.clone()), None => Err(QueryPlannerError::CustomError(format!( diff --git a/src/executor/engine.rs b/src/executor/engine.rs index 02298a4655..eb2f5e4a55 100644 --- a/src/executor/engine.rs +++ b/src/executor/engine.rs @@ -33,11 +33,6 @@ pub trait CoordinatorMetadata { fn get_sharding_column(&self) -> &str; - #[must_use] - fn to_name(s: &str) -> String { - to_name(s) - } - /// Provides vector of the sharding key column names or an error /// /// # Errors @@ -156,9 +151,10 @@ pub fn sharding_keys_from_tuple<'rec>( space: &str, tuple: &'rec [Value], ) -> Result<Vec<&'rec Value>, QueryPlannerError> { - let sharding_positions = conf.get_sharding_positions_by_space(space)?; + let quoted_space = normalize_name_from_schema(space); + let sharding_positions = conf.get_sharding_positions_by_space("ed_space)?; let mut sharding_tuple = Vec::with_capacity(sharding_positions.len()); - let table_col_amount = conf.get_fields_amount_by_space(space)?; + let table_col_amount = conf.get_fields_amount_by_space("ed_space)?; if table_col_amount == tuple.len() { // The tuple contains a "bucket_id" column. for position in &sharding_positions { @@ -173,7 +169,7 @@ pub fn sharding_keys_from_tuple<'rec>( Ok(sharding_tuple) } else if table_col_amount == tuple.len() + 1 { // The tuple doesn't contain the "bucket_id" column. - let table = conf.get_table_segment(space)?; + let table = conf.get_table_segment("ed_space)?; let bucket_position = table.get_bucket_id_position()?; // If the "bucket_id" splits the sharding key, we need to shift the sharding @@ -221,10 +217,11 @@ pub fn sharding_keys_from_map<'rec, S: ::std::hash::BuildHasher>( space: &str, map: &'rec HashMap<String, Value, S>, ) -> Result<Vec<&'rec Value>, QueryPlannerError> { - let sharding_key = conf.get_sharding_key_by_space(space)?; + let quoted_space = normalize_name_from_schema(space); + let sharding_key = conf.get_sharding_key_by_space("ed_space)?; let quoted_map = map .iter() - .map(|(k, _)| (to_name(k), k.as_str())) + .map(|(k, _)| (normalize_name_from_schema(k), k.as_str())) .collect::<HashMap<String, &str>>(); let mut tuple = Vec::with_capacity(sharding_key.len()); for quoted_column in &sharding_key { @@ -246,14 +243,17 @@ pub fn sharding_keys_from_map<'rec, S: ::std::hash::BuildHasher>( Ok(tuple) } -fn to_name(s: &str) -> String { +#[must_use] +pub fn normalize_name_from_schema(s: &str) -> String { + format!("\"{}\"", s) +} + +#[must_use] +pub fn normalize_name_from_sql(s: &str) -> String { if let (Some('"'), Some('"')) = (s.chars().next(), s.chars().last()) { - s.to_string() - } else if s.to_uppercase() == s { - s.to_lowercase() - } else { - format!("\"{}\"", s) + return s.to_string(); } + format!("\"{}\"", s.to_uppercase()) } #[cfg(test)] diff --git a/src/executor/engine/cartridge/backend/sql/ir/tests.rs b/src/executor/engine/cartridge/backend/sql/ir/tests.rs index 3467d9cdcf..3ce07caad1 100644 --- a/src/executor/engine/cartridge/backend/sql/ir/tests.rs +++ b/src/executor/engine/cartridge/backend/sql/ir/tests.rs @@ -131,10 +131,10 @@ fn from_sub_query() { PatternWithParams::new( format!( "{} {} {} {}", - r#"SELECT t1."product_code" FROM"#, + r#"SELECT "T1"."product_code" FROM"#, r#"(SELECT "hash_testing"."product_code" FROM "hash_testing""#, - r#"WHERE ("hash_testing"."identification_number") = (?)) as t1"#, - r#"WHERE (t1."product_code") = (?)"# + r#"WHERE ("hash_testing"."identification_number") = (?)) as "T1""#, + r#"WHERE ("T1"."product_code") = (?)"# ), vec![Value::from(1_u64), Value::from("a")], ), diff --git a/src/executor/engine/cartridge/config.rs b/src/executor/engine/cartridge/config.rs index 660487e196..3a5df8ad32 100644 --- a/src/executor/engine/cartridge/config.rs +++ b/src/executor/engine/cartridge/config.rs @@ -6,6 +6,7 @@ use std::collections::HashMap; use yaml_rust::{Yaml, YamlLoader}; use crate::errors::QueryPlannerError; +use crate::executor::engine::{normalize_name_from_schema, normalize_name_from_sql}; use crate::executor::lru::DEFAULT_CAPACITY; use crate::executor::CoordinatorMetadata; use crate::ir::relation::{Column, ColumnRole, Table, Type}; @@ -72,6 +73,7 @@ impl RouterConfiguration { /// /// # Errors /// Returns `QueryPlannerError` when schema contains errors. + #[allow(clippy::too_many_lines)] fn init_table_segments(&mut self, schema: &Yaml) -> Result<(), QueryPlannerError> { self.tables.clear(); let spaces = match schema["spaces"].as_hash() { @@ -93,7 +95,20 @@ impl RouterConfiguration { Some(t) => Type::new(t)?, None => return Err(QueryPlannerError::TypeNotImplemented), }; - let qualified_name = Self::to_name(name); + let qualified_name = normalize_name_from_schema(name); + #[cfg(not(feature = "mock"))] + { + say( + SayLevel::Debug, + file!(), + line!().try_into().unwrap_or(0), + Option::from("configuration parsing"), + &format!( + "Column's original name: {}, qualified name {}", + name, qualified_name + ), + ); + } let role = if self.get_sharding_column().eq(&qualified_name) { ColumnRole::Sharding } else { @@ -142,7 +157,7 @@ impl RouterConfiguration { continue; } }; - result.push(Self::to_name(key)); + result.push(normalize_name_from_schema(key)); } result } @@ -161,7 +176,20 @@ impl RouterConfiguration { } }; - let table_name: String = RouterConfiguration::to_name(current_space_name); + let table_name: String = normalize_name_from_schema(current_space_name); + #[cfg(not(feature = "mock"))] + { + say( + SayLevel::Debug, + file!(), + line!().try_into().unwrap_or(0), + Option::from("configuration parsing"), + &format!( + "Table's original name: {}, qualified name {}", + current_space_name, table_name + ), + ); + } let keys_str = keys.iter().map(String::as_str).collect::<Vec<&str>>(); let t = Table::new_seg(&table_name, fields, keys_str.as_slice())?; self.tables.insert(table_name, t); @@ -200,12 +228,12 @@ impl CoordinatorMetadata for RouterConfiguration { /// Returns `QueryPlannerError` when table was not found. #[allow(dead_code)] fn get_table_segment(&self, table_name: &str) -> Result<Table, QueryPlannerError> { - let name = Self::to_name(table_name); + let name = normalize_name_from_sql(table_name); match self.tables.get(&name) { Some(v) => Ok(v.clone()), None => Err(QueryPlannerError::CustomError(format!( "Space {} not found", - table_name + name ))), } } diff --git a/src/executor/engine/cartridge/config/tests.rs b/src/executor/engine/cartridge/config/tests.rs index a1b667253a..c6571fab20 100644 --- a/src/executor/engine/cartridge/config/tests.rs +++ b/src/executor/engine/cartridge/config/tests.rs @@ -87,7 +87,7 @@ fn test_yaml_schema_parser() { s.load_schema(test_schema).unwrap(); let expected_keys = vec!["\"identification_number\"", "\"product_code\""]; - let actual_keys = s.get_sharding_key_by_space("hash_testing").unwrap(); + let actual_keys = s.get_sharding_key_by_space("\"hash_testing\"").unwrap(); assert_eq!(actual_keys, expected_keys) } @@ -152,7 +152,7 @@ fn test_getting_table_segment() { assert_eq!( s.get_table_segment("invalid_table").unwrap_err(), - QueryPlannerError::CustomError("Space invalid_table not found".into()) + QueryPlannerError::CustomError(r#"Space "INVALID_TABLE" not found"#.into()) ); assert_eq!(s.get_table_segment("\"hash_testing\"").unwrap(), expected) } diff --git a/src/executor/engine/cartridge/router.rs b/src/executor/engine/cartridge/router.rs index 41a765da18..5cd8ebff9e 100644 --- a/src/executor/engine/cartridge/router.rs +++ b/src/executor/engine/cartridge/router.rs @@ -17,7 +17,8 @@ use crate::executor::engine::cartridge::backend::sql::ir::PatternWithParams; use crate::executor::engine::cartridge::config::RouterConfiguration; use crate::executor::engine::cartridge::hash::bucket_id_by_tuple; use crate::executor::engine::{ - sharding_keys_from_map, sharding_keys_from_tuple, Configuration, Coordinator, + normalize_name_from_schema, sharding_keys_from_map, sharding_keys_from_tuple, Configuration, + Coordinator, }; use crate::executor::ir::ExecutionPlan; use crate::executor::lru::{LRUCache, DEFAULT_CAPACITY}; @@ -130,7 +131,7 @@ impl Configuration for RouterRuntime { let mut metadata = RouterConfiguration::new(); metadata.set_waiting_timeout(timeout); metadata.set_cache_capacity(router_capacity); - metadata.set_sharding_column(RouterConfiguration::to_name(column.as_str())); + metadata.set_sharding_column(normalize_name_from_schema(column.as_str())); // We should always load the schema **after** setting the sharding column. metadata.load_schema(&schema)?; diff --git a/src/executor/engine/mock.rs b/src/executor/engine/mock.rs index 8d373cf991..acd19b0ddd 100644 --- a/src/executor/engine/mock.rs +++ b/src/executor/engine/mock.rs @@ -6,7 +6,8 @@ use crate::collection; use crate::errors::QueryPlannerError; use crate::executor::bucket::Buckets; use crate::executor::engine::{ - sharding_keys_from_map, sharding_keys_from_tuple, Configuration, Coordinator, + normalize_name_from_sql, sharding_keys_from_map, sharding_keys_from_tuple, Configuration, + Coordinator, }; use crate::executor::ir::ExecutionPlan; use crate::executor::lru::{LRUCache, DEFAULT_CAPACITY}; @@ -31,7 +32,7 @@ pub struct RouterConfigurationMock { impl CoordinatorMetadata for RouterConfigurationMock { fn get_table_segment(&self, table_name: &str) -> Result<Table, QueryPlannerError> { - let name = Self::to_name(table_name); + let name = normalize_name_from_sql(table_name); match self.tables.get(&name) { Some(v) => Ok(v.clone()), None => Err(QueryPlannerError::CustomError(format!( @@ -50,7 +51,7 @@ impl CoordinatorMetadata for RouterConfigurationMock { } fn get_sharding_key_by_space(&self, space: &str) -> Result<Vec<String>, QueryPlannerError> { - let table = self.get_table_segment(&Self::to_name(space))?; + let table = self.get_table_segment(space)?; table.get_sharding_column_names() } @@ -58,12 +59,12 @@ impl CoordinatorMetadata for RouterConfigurationMock { &self, space: &str, ) -> Result<Vec<usize>, QueryPlannerError> { - let table = self.get_table_segment(&Self::to_name(space))?; + let table = self.get_table_segment(space)?; Ok(table.get_sharding_positions().to_vec()) } fn get_fields_amount_by_space(&self, space: &str) -> Result<usize, QueryPlannerError> { - let table = self.get_table_segment(&Self::to_name(space))?; + let table = self.get_table_segment(space)?; Ok(table.columns.len()) } } diff --git a/src/executor/tests.rs b/src/executor/tests.rs index 755ce3a5fd..7f248494fb 100644 --- a/src/executor/tests.rs +++ b/src/executor/tests.rs @@ -532,7 +532,7 @@ fn join_linker4_test() { }); virtual_t2.add_tuple(vec![Value::from(1_u64)]); virtual_t2.add_tuple(vec![Value::from(2_u64)]); - virtual_t2.set_alias("t2").unwrap(); + virtual_t2.set_alias("\"T2\"").unwrap(); if let MotionPolicy::Segment(key) = get_motion_policy(query.exec_plan.get_ir_plan(), motion_t2_id) { @@ -584,13 +584,13 @@ fn join_linker4_test() { Value::String(String::from(PatternWithParams::new( format!( "{} {} {} {} {} {} {}", - r#"SELECT t1."id" FROM (SELECT"#, - r#"t1."id", t1."sysFrom", t1."FIRST_NAME", t1."sys_op""#, - r#"FROM "test_space" as t1) as t1"#, + r#"SELECT "T1"."id" FROM (SELECT"#, + r#""T1"."id", "T1"."sysFrom", "T1"."FIRST_NAME", "T1"."sys_op""#, + r#"FROM "test_space" as "T1") as "T1""#, r#"INNER JOIN"#, - r#"(SELECT COLUMN_1 as "r_id" FROM (VALUES (?))) as t2"#, - r#"ON (t1."id") = (t2."r_id")"#, - r#"and (t1."FIRST_NAME") = (SELECT COLUMN_3 as "fn" FROM (VALUES (?),(?)))"#, + r#"(SELECT COLUMN_1 as "r_id" FROM (VALUES (?))) as "T2""#, + r#"ON ("T1"."id") = ("T2"."r_id")"#, + r#"and ("T1"."FIRST_NAME") = (SELECT COLUMN_3 as "fn" FROM (VALUES (?),(?)))"#, ), vec![Value::from(2_u64), Value::from(2_u64), Value::from(3_u64)], ))), @@ -600,13 +600,13 @@ fn join_linker4_test() { Value::String(String::from(PatternWithParams::new( format!( "{} {} {} {} {} {} {}", - r#"SELECT t1."id" FROM (SELECT"#, - r#"t1."id", t1."sysFrom", t1."FIRST_NAME", t1."sys_op""#, - r#"FROM "test_space" as t1) as t1"#, + r#"SELECT "T1"."id" FROM (SELECT"#, + r#""T1"."id", "T1"."sysFrom", "T1"."FIRST_NAME", "T1"."sys_op""#, + r#"FROM "test_space" as "T1") as "T1""#, r#"INNER JOIN"#, - r#"(SELECT COLUMN_1 as "r_id" FROM (VALUES (?))) as t2"#, - r#"ON (t1."id") = (t2."r_id")"#, - r#"and (t1."FIRST_NAME") = (SELECT COLUMN_3 as "fn" FROM (VALUES (?),(?)))"#, + r#"(SELECT COLUMN_1 as "r_id" FROM (VALUES (?))) as "T2""#, + r#"ON ("T1"."id") = ("T2"."r_id")"#, + r#"and ("T1"."FIRST_NAME") = (SELECT COLUMN_3 as "fn" FROM (VALUES (?),(?)))"#, ), vec![Value::from(1_u64), Value::from(2_u64), Value::from(3_u64)], ))), diff --git a/src/frontend/sql.rs b/src/frontend/sql.rs index 2d31b68c01..fb973e8881 100644 --- a/src/frontend/sql.rs +++ b/src/frontend/sql.rs @@ -8,11 +8,11 @@ use std::collections::{HashMap, HashSet}; use traversal::DftPost; use crate::errors::QueryPlannerError; -use crate::executor::engine::CoordinatorMetadata; +use crate::executor::engine::{normalize_name_from_sql, CoordinatorMetadata}; use crate::frontend::sql::ast::{ AbstractSyntaxTree, ParseNode, ParseNodes, ParseTree, Rule, StackParseNode, Type, }; -use crate::frontend::sql::ir::{to_name, Translation}; +use crate::frontend::sql::ir::Translation; use crate::frontend::Ast; use crate::ir::expression::Expression; use crate::ir::operator::{Bool, Relational, Unary}; @@ -118,12 +118,14 @@ impl Ast for AbstractSyntaxTree { })?; let plan_child_id = map.get(*ast_child_id)?; map.add(*id, plan_child_id); - if let Some(ast_scan_name_id) = node.children.get(1) { - let ast_scan_name = self.nodes.get_node(*ast_scan_name_id)?; - if let Type::ScanName = ast_scan_name.rule { + if let Some(ast_scan_id) = node.children.get(1) { + let ast_scan = self.nodes.get_node(*ast_scan_id)?; + if let Type::ScanName = ast_scan.rule { + let ast_scan_name = + ast_scan.value.as_deref().map(normalize_name_from_sql); // Update scan name in the plan. let scan = plan.get_mut_relation_node(plan_child_id)?; - scan.set_scan_name(ast_scan_name.value.as_ref().map(|s| to_name(s)))?; + scan.set_scan_name(ast_scan_name)?; } else { return Err(QueryPlannerError::CustomError( "Expected scan name AST node.".into(), @@ -136,7 +138,7 @@ impl Ast for AbstractSyntaxTree { let table = node_val.as_str(); let t = metadata.get_table_segment(table)?; plan.add_rel(t); - let scan_id = plan.add_scan(table, None)?; + let scan_id = plan.add_scan(&normalize_name_from_sql(table), None)?; map.add(*id, scan_id); } else { return Err(QueryPlannerError::CustomError( @@ -161,7 +163,7 @@ impl Ast for AbstractSyntaxTree { ast_alias.rule ))); } - ast_alias.value.as_deref().map(to_name) + ast_alias.value.as_deref().map(normalize_name_from_sql) } else { None }; @@ -179,7 +181,8 @@ impl Ast for AbstractSyntaxTree { let get_column_name = |ast_id: usize| -> Result<String, QueryPlannerError> { let ast_col_name = self.nodes.get_node(ast_id)?; if let Type::ColumnName = ast_col_name.rule { - let name: Option<String> = ast_col_name.value.as_deref().map(to_name); + let name: Option<String> = + ast_col_name.value.as_deref().map(normalize_name_from_sql); Ok(name.ok_or_else(|| { QueryPlannerError::CustomError("Empty AST column name".into()) })?) @@ -212,20 +215,20 @@ impl Ast for AbstractSyntaxTree { if let (Some(plan_left_id), Some(plan_right_id)) = (plan_rel_list.get(0), plan_rel_list.get(1)) { - if let (Some(ast_scan_name_id), Some(ast_col_name_id)) = + if let (Some(ast_scan_id), Some(ast_col_name_id)) = (node.children.get(0), node.children.get(1)) { - let ast_scan_name = self.nodes.get_node(*ast_scan_name_id)?; - if let Type::ScanName = ast_scan_name.rule { + let ast_scan = self.nodes.get_node(*ast_scan_id)?; + if let Type::ScanName = ast_scan.rule { // Get the column name and its positions in the output tuples. let col_name = get_column_name(*ast_col_name_id)?; let left_name = get_scan_name(&col_name, *plan_left_id)?; let right_name = get_scan_name(&col_name, *plan_right_id)?; // Check that the AST scan name matches to the children scan names in the plan join node. - let scan_name: Option<String> = - ast_scan_name.value.as_deref().map(to_name); + let ast_scan_name: Option<String> = + ast_scan.value.as_deref().map(normalize_name_from_sql); // Determine the referred side of the join (left or right). - if left_name == scan_name { + if left_name == ast_scan_name { let left_col_map = plan .get_relation_node(*plan_left_id)? .output_alias_position_map(&plan.nodes)?; @@ -243,7 +246,7 @@ impl Ast for AbstractSyntaxTree { col_name, left_name ))); } - } else if right_name == scan_name { + } else if right_name == ast_scan_name { let right_col_map = plan .get_relation_node(*plan_right_id)? .output_alias_position_map(&plan.nodes)?; @@ -315,21 +318,27 @@ impl Ast for AbstractSyntaxTree { } else if let (Some(plan_rel_id), None) = (plan_rel_list.get(0), plan_rel_list.get(1)) { - let col_name: String = if let ( - Some(ast_scan_name_id), - Some(ast_col_name_id), - ) = (node.children.get(0), node.children.get(1)) + let col_name: String = if let (Some(ast_scan_id), Some(ast_col_id)) = + (node.children.get(0), node.children.get(1)) { // Get column name. - let col_name = get_column_name(*ast_col_name_id)?; + let col_name = get_column_name(*ast_col_id)?; // Check that scan name in the reference matches to the one in scan node. - let ast_scan_name = self.nodes.get_node(*ast_scan_name_id)?; - if let Type::ScanName = ast_scan_name.rule { + let ast_scan = self.nodes.get_node(*ast_scan_id)?; + if let Type::ScanName = ast_scan.rule { + let ast_scan_name = Some(normalize_name_from_sql( + ast_scan.value.as_ref().ok_or_else(|| { + QueryPlannerError::CustomError( + "Expected AST node to have a non-empty scan name." + .into(), + ) + })?, + )); let plan_scan_name = get_scan_name(&col_name, *plan_rel_id)?; - if plan_scan_name != ast_scan_name.value { + if plan_scan_name != ast_scan_name { return Err(QueryPlannerError::CustomError( format!("Scan name for the column {:?} doesn't match: expected {:?}, found {:?}", - get_column_name(*ast_col_name_id), plan_scan_name, ast_scan_name.value + get_column_name(*ast_col_id), plan_scan_name, ast_scan_name ))); } } else { @@ -338,11 +347,11 @@ impl Ast for AbstractSyntaxTree { )); }; col_name - } else if let (Some(ast_col_name_id), None) = + } else if let (Some(ast_col_id), None) = (node.children.get(0), node.children.get(1)) { // Get the column name. - get_column_name(*ast_col_name_id)? + get_column_name(*ast_col_id)? } else { return Err(QueryPlannerError::CustomError( "No child node found in the AST reference.".into(), @@ -423,7 +432,9 @@ impl Ast for AbstractSyntaxTree { .ok_or_else(|| { QueryPlannerError::CustomError("Alias name is not found.".into()) })?; - let plan_alias_id = plan.nodes.add_alias(&to_name(name), plan_ref_id)?; + let plan_alias_id = plan + .nodes + .add_alias(&normalize_name_from_sql(name), plan_ref_id)?; map.add(*id, plan_alias_id); } Type::Column => { @@ -893,5 +904,5 @@ impl Plan { } pub mod ast; -mod ir; +pub mod ir; pub mod tree; diff --git a/src/frontend/sql/ir.rs b/src/frontend/sql/ir.rs index 0de79a86d3..7406e2857f 100644 --- a/src/frontend/sql/ir.rs +++ b/src/frontend/sql/ir.rs @@ -118,13 +118,6 @@ impl Translation { } } -pub(super) fn to_name(s: &str) -> String { - if let (Some('"'), Some('"')) = (s.chars().next(), s.chars().last()) { - return s.to_string(); - } - s.to_lowercase() -} - #[derive(Hash, PartialEq, Debug)] struct SubQuery { relational: usize, diff --git a/src/frontend/sql/ir/tests.rs b/src/frontend/sql/ir/tests.rs index 1cb1932039..307bcc6f0a 100644 --- a/src/frontend/sql/ir/tests.rs +++ b/src/frontend/sql/ir/tests.rs @@ -150,13 +150,13 @@ fn front_sql6() { let expected = PatternWithParams::new( format!( "{} {} {} {} {} {} {} {}", - r#"SELECT t."id", "hash_testing"."product_units""#, + r#"SELECT "T"."id", "hash_testing"."product_units""#, r#"FROM (SELECT "hash_testing"."identification_number","#, r#""hash_testing"."product_code","#, r#""hash_testing"."product_units","#, r#""hash_testing"."sys_op" FROM "hash_testing") as "hash_testing""#, - r#"INNER JOIN (SELECT "test_space"."id" FROM "test_space") as t"#, - r#"ON ("hash_testing"."identification_number") = (t."id")"#, + r#"INNER JOIN (SELECT "test_space"."id" FROM "test_space") as "T""#, + r#"ON ("hash_testing"."identification_number") = ("T"."id")"#, r#"WHERE ("hash_testing"."identification_number") = (?) and ("hash_testing"."product_code") = (?)"#, ), vec![Value::from(5_u64), Value::from("123")], @@ -196,8 +196,8 @@ fn front_sql8() { let expected = PatternWithParams::new( format!( "{} {}", - r#"SELECT t."identification_number", t."product_code""#, - r#"FROM "hash_testing" as t WHERE (t."identification_number") = (?)"#, + r#"SELECT "T"."identification_number", "T"."product_code""#, + r#"FROM "hash_testing" as "T" WHERE ("T"."identification_number") = (?)"#, ), vec![Value::from(1_u64)], ); diff --git a/src/ir/operator.rs b/src/ir/operator.rs index fec393e2e6..bc659259a3 100644 --- a/src/ir/operator.rs +++ b/src/ir/operator.rs @@ -606,7 +606,10 @@ impl Plan { return Ok(scan_id); } } - Err(QueryPlannerError::InvalidRelation) + Err(QueryPlannerError::CustomError(format!( + "Failed to find relation {} among the plan relations", + table + ))) } /// Adds inner join node. diff --git a/test_app/test/integration/api_test.lua b/test_app/test/integration/api_test.lua index 2ce70b79ec..4278b23d4b 100644 --- a/test_app/test/integration/api_test.lua +++ b/test_app/test/integration/api_test.lua @@ -910,3 +910,51 @@ g.test_between2 = function() }, }) end + +g.test_uppercase1 = function() + local api = cluster:server("api-1").net_box + + local r, err = api:call("sbroad.execute", { [[ + SELECT "id" FROM broken + ]], {} }) + + t.assert_equals(err, nil) + t.assert_equals(r, { + metadata = { + {name = "id", type = "number"}, + }, + rows = {}, + }) +end + +g.test_uppercase2 = function() + local api = cluster:server("api-1").net_box + + local r, err = api:call("sbroad.execute", { [[ + SELECT "id" FROM BROKEN + ]], {} }) + + t.assert_equals(err, nil) + t.assert_equals(r, { + metadata = { + {name = "id", type = "number"}, + }, + rows = {}, + }) +end + +g.test_uppercase3 = function() + local api = cluster:server("api-1").net_box + + local r, err = api:call("sbroad.execute", { [[ + SELECT "id" FROM "BROKEN" + ]], {} }) + + t.assert_equals(err, nil) + t.assert_equals(r, { + metadata = { + {name = "id", type = "number"}, + }, + rows = {}, + }) +end -- GitLab