instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tobymao__sqlglot-1137
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 8ca1d362..679502b4 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -295,3 +295,12 @@ class Snowflake(Dialect):
kind = f" {kind_value}" if kind_value else ""
this = f" {self.sql(expression, 'this')}"
return f"DESCRIBE{kind}{this}"
+
+ def generatedasidentitycolumnconstraint_sql(
+ self, expression: exp.GeneratedAsIdentityColumnConstraint
+ ) -> str:
+ start = expression.args.get("start")
+ start = f" START {start}" if start else ""
+ increment = expression.args.get("increment")
+ increment = f" INCREMENT {increment}" if increment else ""
+ return f"AUTOINCREMENT{start}{increment}"
diff --git a/sqlglot/dialects/sqlite.py b/sqlglot/dialects/sqlite.py
index 1b394494..a428dd57 100644
--- a/sqlglot/dialects/sqlite.py
+++ b/sqlglot/dialects/sqlite.py
@@ -49,7 +49,6 @@ class SQLite(Dialect):
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
- "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
}
class Parser(parser.Parser):
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 38764fa2..ba09c8b2 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -939,7 +939,7 @@ class EncodeColumnConstraint(ColumnConstraintKind):
class GeneratedAsIdentityColumnConstraint(ColumnConstraintKind):
# this: True -> ALWAYS, this: False -> BY DEFAULT
- arg_types = {"this": True, "start": False, "increment": False}
+ arg_types = {"this": False, "start": False, "increment": False}
class NotNullColumnConstraint(ColumnConstraintKind):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 5b68688b..0d72fe31 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -453,6 +453,9 @@ class Generator:
def generatedasidentitycolumnconstraint_sql(
self, expression: exp.GeneratedAsIdentityColumnConstraint
) -> str:
+ this = ""
+ if expression.this is not None:
+ this = " ALWAYS " if expression.this else " BY DEFAULT "
start = expression.args.get("start")
start = f"START WITH {start}" if start else ""
increment = expression.args.get("increment")
@@ -461,9 +464,7 @@ class Generator:
if start or increment:
sequence_opts = f"{start} {increment}"
sequence_opts = f" ({sequence_opts.strip()})"
- return (
- f"GENERATED {'ALWAYS' if expression.this else 'BY DEFAULT'} AS IDENTITY{sequence_opts}"
- )
+ return f"GENERATED{this}AS IDENTITY{sequence_opts}"
def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index dfda4eed..579c2ce6 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2749,8 +2749,23 @@ class Parser(metaclass=_Parser):
kind: exp.Expression
- if self._match(TokenType.AUTO_INCREMENT):
- kind = exp.AutoIncrementColumnConstraint()
+ if self._match_set((TokenType.AUTO_INCREMENT, TokenType.IDENTITY)):
+ start = None
+ increment = None
+
+ if self._match(TokenType.L_PAREN, advance=False):
+ args = self._parse_wrapped_csv(self._parse_bitwise)
+ start = seq_get(args, 0)
+ increment = seq_get(args, 1)
+ elif self._match_text_seq("START"):
+ start = self._parse_bitwise()
+ self._match_text_seq("INCREMENT")
+ increment = self._parse_bitwise()
+
+ if start and increment:
+ kind = exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment)
+ else:
+ kind = exp.AutoIncrementColumnConstraint()
elif self._match(TokenType.CHECK):
constraint = self._parse_wrapped(self._parse_conjunction)
kind = self.expression(exp.CheckColumnConstraint, this=constraint)
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index f14cbbe3..42978f61 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -474,6 +474,7 @@ class Tokenizer(metaclass=_Tokenizer):
"ASC": TokenType.ASC,
"AS": TokenType.ALIAS,
"AT TIME ZONE": TokenType.AT_TIME_ZONE,
+ "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
"AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
"BEGIN": TokenType.BEGIN,
"BETWEEN": TokenType.BETWEEN,
|
tobymao/sqlglot
|
327874eb7ff361136a91a26bc337c1d501d17164
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 05e76430..8160d76b 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -7,7 +7,32 @@ class TestSnowflake(Validator):
def test_snowflake(self):
self.validate_identity("SELECT REGEXP_LIKE(a, b, c)")
+ self.validate_identity("CREATE TABLE foo (bar FLOAT AUTOINCREMENT START 0 INCREMENT 1)")
+ self.validate_all(
+ "CREATE OR REPLACE TEMPORARY TABLE x (y NUMBER IDENTITY(0, 1))",
+ write={
+ "snowflake": "CREATE OR REPLACE TEMPORARY TABLE x (y DECIMAL AUTOINCREMENT START 0 INCREMENT 1)",
+ },
+ )
+ self.validate_all(
+ "CREATE TEMPORARY TABLE x (y NUMBER AUTOINCREMENT(0, 1))",
+ write={
+ "snowflake": "CREATE TEMPORARY TABLE x (y DECIMAL AUTOINCREMENT START 0 INCREMENT 1)",
+ },
+ )
+ self.validate_all(
+ "CREATE TABLE x (y NUMBER IDENTITY START 0 INCREMENT 1)",
+ write={
+ "snowflake": "CREATE TABLE x (y DECIMAL AUTOINCREMENT START 0 INCREMENT 1)",
+ },
+ )
+ self.validate_all(
+ "ALTER TABLE foo ADD COLUMN id INT identity(1, 1)",
+ write={
+ "snowflake": "ALTER TABLE foo ADD COLUMN id INT AUTOINCREMENT START 1 INCREMENT 1",
+ },
+ )
self.validate_all(
"SELECT DAYOFWEEK('2016-01-02T23:39:20.123-07:00'::TIMESTAMP)",
write={
|
snowflake: cannot create or alter tables with AUTOINCREMENT or IDENTITY keyword
I can use the "IDENTITY" and "AUTOINCREMENT" keywords in snowflake:
```sql
create or replace table colors as
select name
from (values ('blue'),('red'),('green')) colors (name);
create or replace table identity_column_example like colors;
alter table identity_column_example add column id int identity(1,1);
```
```sql
CREATE OR REPLACE TEMPORARY TABLE "X" ("IDENTITY_COLUMN" NUMBER AUTOINCREMENT(0, 1))
```
In sqlglot 11.0.0, python 3.10.4 I get ParseError:
```python
import sqlglot
sqlglot.transpile('CREATE OR REPLACE TEMPORARY TABLE "X" ("IDENTITY_COLUMN" NUMBER AUTOINCREMENT(0, 1))', read='snowflake', write='snowflake')
sqlglot.transpile('alter table identity_column_example add column id int identity(1,1);')
```
<details>
<summary>stack trace for first error in example</summary>
```python-traceback
ParseError Traceback (most recent call last)
Cell In[4], line 1
----> 1 sqlglot.transpile('CREATE OR REPLACE TEMPORARY TABLE "X" ("IDENTITY_COLUMN" NUMBER AUTOINCREMENT(0, 1))', read='snowflake', write='snowflake')
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/__init__.py:177, in transpile(sql, read, write, identity, error_level, **opts)
158 """
159 Parses the given SQL string in accordance with the source dialect and returns a list of SQL strings transformed
160 to conform to the target dialect. Each string in the returned list represents a single transformed SQL statement.
(...)
172 The list of transpiled SQL statements.
173 """
174 write = write or read if identity else write
175 return [
176 Dialect.get_or_raise(write)().generate(expression, **opts)
--> 177 for expression in parse(sql, read, error_level=error_level)
178 ]
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/__init__.py:65, in parse(sql, read, **opts)
53 """
54 Parses the given SQL string into a collection of syntax trees, one per parsed SQL statement.
55
(...)
62 The resulting syntax tree collection.
63 """
64 dialect = Dialect.get_or_raise(read)()
---> 65 return dialect.parse(sql, **opts)
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/dialects/dialect.py:163, in Dialect.parse(self, sql, **opts)
162 def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
--> 163 return self.parser(**opts).parse(self.tokenizer.tokenize(sql), sql)
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/parser.py:707, in Parser.parse(self, raw_tokens, sql)
693 def parse(
694 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
695 ) -> t.List[t.Optional[exp.Expression]]:
696 """
697 Parses a list of tokens and returns a list of syntax trees, one tree
698 per parsed SQL statement.
(...)
705 The list of syntax trees.
706 """
--> 707 return self._parse(
708 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
709 )
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/parser.py:770, in Parser._parse(self, parse_method, raw_tokens, sql)
767 self._tokens = tokens
768 self._advance()
--> 770 expressions.append(parse_method(self))
772 if self._index < len(self._tokens):
773 self.raise_error("Invalid expression / Unexpected token")
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/parser.py:900, in Parser._parse_statement(self)
897 return None
899 if self._match_set(self.STATEMENT_PARSERS):
--> 900 return self.STATEMENT_PARSERS[self._prev.token_type](self)
902 if self._match_set(Tokenizer.COMMANDS):
903 return self._parse_command()
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/parser.py:431, in Parser.<lambda>(self)
365 COLUMN_OPERATORS = {
366 TokenType.DOT: None,
367 TokenType.DCOLON: lambda self, this, to: self.expression(
(...)
396 ),
397 }
399 EXPRESSION_PARSERS = {
400 exp.Column: lambda self: self._parse_column(),
401 exp.DataType: lambda self: self._parse_types(),
(...)
423 "JOIN_TYPE": lambda self: self._parse_join_side_and_kind(),
424 }
426 STATEMENT_PARSERS = {
427 TokenType.ALTER: lambda self: self._parse_alter(),
428 TokenType.BEGIN: lambda self: self._parse_transaction(),
429 TokenType.CACHE: lambda self: self._parse_cache(),
430 TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(),
--> 431 TokenType.CREATE: lambda self: self._parse_create(),
432 TokenType.DELETE: lambda self: self._parse_delete(),
433 TokenType.DESC: lambda self: self._parse_describe(),
434 TokenType.DESCRIBE: lambda self: self._parse_describe(),
435 TokenType.DROP: lambda self: self._parse_drop(),
436 TokenType.END: lambda self: self._parse_commit_or_rollback(),
437 TokenType.INSERT: lambda self: self._parse_insert(),
438 TokenType.LOAD_DATA: lambda self: self._parse_load_data(),
439 TokenType.MERGE: lambda self: self._parse_merge(),
440 TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(),
441 TokenType.UNCACHE: lambda self: self._parse_uncache(),
442 TokenType.UPDATE: lambda self: self._parse_update(),
443 TokenType.USE: lambda self: self.expression(
444 exp.Use,
445 kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"))
446 and exp.Var(this=self._prev.text),
447 this=self._parse_table(schema=False),
448 ),
449 }
451 UNARY_PARSERS = {
452 TokenType.PLUS: lambda self: self._parse_unary(), # Unary + is handled as a no-op
453 TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
454 TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()),
455 TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()),
456 }
458 PRIMARY_PARSERS = {
459 TokenType.STRING: lambda self, token: self.expression(
460 exp.Literal, this=token.text, is_string=True
(...)
477 TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
478 }
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/parser.py:997, in Parser._parse_create(self)
994 if self._match(TokenType.COMMA): # comma-separated properties before schema definition
995 properties = self._parse_properties(before=True)
--> 997 this = self._parse_schema(this=table_parts)
999 if not properties: # properties after schema definition
1000 properties = self._parse_properties()
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/parser.py:2720, in Parser._parse_schema(self, this)
2714 return this
2716 args = self._parse_csv(
2717 lambda: self._parse_constraint()
2718 or self._parse_column_def(self._parse_field(any_token=True))
2719 )
-> 2720 self._match_r_paren()
2721 return self.expression(exp.Schema, this=this, expressions=args)
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/parser.py:3635, in Parser._match_r_paren(self, expression)
3633 def _match_r_paren(self, expression=None):
3634 if not self._match(TokenType.R_PAREN):
-> 3635 self.raise_error("Expecting )")
3636 if expression and self._prev_comments:
3637 expression.comments = self._prev_comments
File ~/opt/anaconda3/envs/ponder-product-testing/lib/python3.10/site-packages/sqlglot/parser.py:816, in Parser.raise_error(self, message, token)
804 error = ParseError.new(
805 f"{message}. Line {token.line}, Col: {token.col}.\n"
806 f" {start_context}\033[4m{highlight}\033[0m{end_context}",
(...)
812 end_context=end_context,
813 )
815 if self.error_level == ErrorLevel.IMMEDIATE:
--> 816 raise error
818 self.errors.append(error)
ParseError: Expecting ). Line 1, Col: 65.
CREATE OR REPLACE TEMPORARY TABLE "X" ("IDENTITY_COLUMN" NUMBER AUTOINCREMENT(0, 1))
```
</details>
|
0.0
|
327874eb7ff361136a91a26bc337c1d501d17164
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-09 18:48:28+00:00
|
mit
| 5,935 |
|
tobymao__sqlglot-1423
|
diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
index c9533af7..1bbe6b96 100644
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -152,9 +152,9 @@ class Oracle(Dialect):
def xmltable_sql(self, expression: exp.XMLTable) -> str:
this = self.sql(expression, "this")
- passing = self.expressions(expression, "passing")
+ passing = self.expressions(expression, key="passing")
passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
- columns = self.expressions(expression, "columns")
+ columns = self.expressions(expression, key="columns")
columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
by_ref = (
f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 14b082e9..2071e1e8 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1476,6 +1476,7 @@ class MatchRecognize(Expression):
"after": False,
"pattern": False,
"define": False,
+ "alias": False,
}
@@ -3167,7 +3168,6 @@ class Neg(Unary):
pass
-# Special Functions
class Alias(Expression):
arg_types = {"this": True, "alias": False}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index b2b2182f..3cbbee55 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -703,9 +703,7 @@ class Generator:
nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
if expression.args.get("values") is not None:
delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
- values = (
- f"{delimiters[0]}{self.expressions(expression, 'values')}{delimiters[1]}"
- )
+ values = f"{delimiters[0]}{self.expressions(expression, key='values')}{delimiters[1]}"
else:
nested = f"({interior})"
@@ -721,7 +719,7 @@ class Generator:
this = self.sql(expression, "this")
this = f" FROM {this}" if this else ""
using_sql = (
- f" USING {self.expressions(expression, 'using', sep=', USING ')}"
+ f" USING {self.expressions(expression, key='using', sep=', USING ')}"
if expression.args.get("using")
else ""
)
@@ -1329,16 +1327,20 @@ class Generator:
def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
partition = self.partition_by_sql(expression)
order = self.sql(expression, "order")
- measures = self.sql(expression, "measures")
- measures = self.seg(f"MEASURES {measures}") if measures else ""
+ measures = self.expressions(expression, key="measures")
+ measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else ""
rows = self.sql(expression, "rows")
rows = self.seg(rows) if rows else ""
after = self.sql(expression, "after")
after = self.seg(after) if after else ""
pattern = self.sql(expression, "pattern")
pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
- define = self.sql(expression, "define")
- define = self.seg(f"DEFINE {define}") if define else ""
+ definition_sqls = [
+ f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}"
+ for definition in expression.args.get("define", [])
+ ]
+ definitions = self.expressions(sqls=definition_sqls)
+ define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else ""
body = "".join(
(
partition,
@@ -1350,7 +1352,9 @@ class Generator:
define,
)
)
- return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}"
+ alias = self.sql(expression, "alias")
+ alias = f" {alias}" if alias else ""
+ return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}"
def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
limit = expression.args.get("limit")
@@ -1371,7 +1375,7 @@ class Generator:
self.sql(expression, "group"),
self.sql(expression, "having"),
self.sql(expression, "qualify"),
- self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
+ self.seg("WINDOW ") + self.expressions(expression, key="windows", flat=True)
if expression.args.get("windows")
else "",
self.sql(expression, "distribute"),
@@ -1604,7 +1608,7 @@ class Generator:
def primarykey_sql(self, expression: exp.ForeignKey) -> str:
expressions = self.expressions(expression, flat=True)
- options = self.expressions(expression, "options", flat=True, sep=" ")
+ options = self.expressions(expression, key="options", flat=True, sep=" ")
options = f" {options}" if options else ""
return f"PRIMARY KEY ({expressions}){options}"
@@ -1688,7 +1692,7 @@ class Generator:
this = self.sql(expression, "this")
expressions = self.expressions(expression, flat=True)
expressions = f"({expressions})" if expressions else ""
- options = self.expressions(expression, "options", flat=True, sep=" ")
+ options = self.expressions(expression, key="options", flat=True, sep=" ")
options = f" {options}" if options else ""
return f"REFERENCES {this}{expressions}{options}"
@@ -1714,9 +1718,9 @@ class Generator:
return f"NOT {self.sql(expression, 'this')}"
def alias_sql(self, expression: exp.Alias) -> str:
- to_sql = self.sql(expression, "alias")
- to_sql = f" AS {to_sql}" if to_sql else ""
- return f"{self.sql(expression, 'this')}{to_sql}"
+ alias = self.sql(expression, "alias")
+ alias = f" AS {alias}" if alias else ""
+ return f"{self.sql(expression, 'this')}{alias}"
def aliases_sql(self, expression: exp.Aliases) -> str:
return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
@@ -1825,13 +1829,13 @@ class Generator:
actions = expression.args["actions"]
if isinstance(actions[0], exp.ColumnDef):
- actions = self.expressions(expression, "actions", prefix="ADD COLUMN ")
+ actions = self.expressions(expression, key="actions", prefix="ADD COLUMN ")
elif isinstance(actions[0], exp.Schema):
- actions = self.expressions(expression, "actions", prefix="ADD COLUMNS ")
+ actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ")
elif isinstance(actions[0], exp.Delete):
- actions = self.expressions(expression, "actions", flat=True)
+ actions = self.expressions(expression, key="actions", flat=True)
else:
- actions = self.expressions(expression, "actions")
+ actions = self.expressions(expression, key="actions")
exists = " IF EXISTS" if expression.args.get("exists") else ""
return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}"
@@ -1994,14 +1998,15 @@ class Generator:
def expressions(
self,
- expression: exp.Expression,
+ expression: t.Optional[exp.Expression] = None,
key: t.Optional[str] = None,
+ sqls: t.Optional[t.List[str]] = None,
flat: bool = False,
indent: bool = True,
sep: str = ", ",
prefix: str = "",
) -> str:
- expressions = expression.args.get(key or "expressions")
+ expressions = expression.args.get(key or "expressions") if expression else sqls
if not expressions:
return ""
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 7df407da..08cb3f2d 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1912,14 +1912,13 @@ class Parser(metaclass=_Parser):
def _parse_match_recognize(self) -> t.Optional[exp.Expression]:
if not self._match(TokenType.MATCH_RECOGNIZE):
return None
+
self._match_l_paren()
partition = self._parse_partition_by()
order = self._parse_order()
measures = (
- self._parse_alias(self._parse_conjunction())
- if self._match_text_seq("MEASURES")
- else None
+ self._parse_csv(self._parse_expression) if self._match_text_seq("MEASURES") else None
)
if self._match_text_seq("ONE", "ROW", "PER", "MATCH"):
@@ -1973,8 +1972,17 @@ class Parser(metaclass=_Parser):
pattern = None
define = (
- self._parse_alias(self._parse_conjunction()) if self._match_text_seq("DEFINE") else None
+ self._parse_csv(
+ lambda: self.expression(
+ exp.Alias,
+ alias=self._parse_id_var(any_token=True),
+ this=self._match(TokenType.ALIAS) and self._parse_conjunction(),
+ )
+ )
+ if self._match_text_seq("DEFINE")
+ else None
)
+
self._match_r_paren()
return self.expression(
@@ -1986,6 +1994,7 @@ class Parser(metaclass=_Parser):
after=after,
pattern=pattern,
define=define,
+ alias=self._parse_table_alias(),
)
def _parse_lateral(self) -> t.Optional[exp.Expression]:
|
tobymao/sqlglot
|
77b5608d0e7c8c2507b0b3dccd03d75dfd4978c6
|
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 6b4a32b1..92f0e5ce 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -108,3 +108,28 @@ FROM XMLTABLE(
},
pretty=True,
)
+
+ def test_match_recognize(self):
+ self.validate_identity(
+ """SELECT
+ *
+FROM sales_history
+MATCH_RECOGNIZE (
+ PARTITION BY product
+ ORDER BY
+ tstamp
+ MEASURES
+ STRT.tstamp AS start_tstamp,
+ LAST(UP.tstamp) AS peak_tstamp,
+ LAST(DOWN.tstamp) AS end_tstamp,
+ MATCH_NUMBER() AS mno
+ ONE ROW PER MATCH
+ AFTER MATCH SKIP TO LAST DOWN
+ PATTERN (STRT UP+ FLAT* DOWN+)
+ DEFINE
+ UP AS UP.units_sold > PREV(UP.units_sold),
+ FLAT AS FLAT.units_sold = PREV(FLAT.units_sold),
+ DOWN AS DOWN.units_sold < PREV(DOWN.units_sold)
+) MR""",
+ pretty=True,
+ )
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 6a9483b2..c883d13e 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -842,11 +842,13 @@ MATCH_RECOGNIZE (
PARTITION BY a, b
ORDER BY
x DESC
- MEASURES y AS b
+ MEASURES
+ y AS b
{row}
{after}
PATTERN (^ S1 S2*? ( {{- S3 -}} S4 )+ | PERMUTE(S1, S2){{1,2}} $)
- DEFINE x AS y
+ DEFINE
+ x AS y
)""",
pretty=True,
)
|
bug in match recognize implementation
I'm using sqlglot to parse some oracle SQL and noticed a parseError is raised with some sample code from Oracle docs.
source: https://oracle-base.com/articles/12c/pattern-matching-in-oracle-database-12cr1
```
SELECT *
FROM sales_history MATCH_RECOGNIZE (
PARTITION BY product
ORDER BY tstamp
MEASURES STRT.tstamp AS start_tstamp,
LAST(UP.tstamp) AS peak_tstamp,
LAST(DOWN.tstamp) AS end_tstamp,
MATCH_NUMBER() AS mno
ONE ROW PER MATCH
AFTER MATCH SKIP TO LAST DOWN
PATTERN (STRT UP+ FLAT* DOWN+)
DEFINE
UP AS UP.units_sold > PREV(UP.units_sold),
FLAT AS FLAT.units_sold = PREV(FLAT.units_sold),
DOWN AS DOWN.units_sold < PREV(DOWN.units_sold)
) MR
```
on parsing this statement i get
```
Exception has occurred: ParseError
Expecting ). Line 5, Col: 47.
PARTITION BY product
ORDER BY tstamp
MEASURES STRT.tstamp AS start_tstamp[4m,[0m
LAST(UP.tstamp) AS peak_tstamp,
LAST(DOWN.tstamp) AS end_tsta
File "C:\gitRepos\parse\main.py", line 12, in <module>
stmnt = parser.parse(sql=sql)
sqlglot.errors.ParseError: Expecting ). Line 5, Col: 47.
PARTITION BY product
ORDER BY tstamp
MEASURES STRT.tstamp AS start_tstamp[4m,[0m
LAST(UP.tstamp) AS peak_tstamp,
LAST(DOWN.tstamp) AS end_tsta
```
after some digging in the codebase i figured there is an implementation error in both parsing MEASURES and DEFINE,
where they both dont take into account several measures or definitions.
the fix for measures seems simple enough:
```
def _parse_match_recognize(self) -> t.Optional[exp.Expression]:
if not self._match(TokenType.MATCH_RECOGNIZE):
return None
self._match_l_paren()
partition = self._parse_partition_by()
order = self._parse_order()
measures = (
#self._parse_alias(self._parse_conjunction())
self._parse_csv(self._parse_expression)
if self._match_text_seq("MEASURES")
else None
)...
```
measures parses successfully (while assuming UP, DOWN and STRT are tables which might be a misnomer? maybe a new symbol token should be introduced?)
but now define has kind of the same bug:
```
File "C:\gitRepos\parse\main.py", line 12, in <module>
stmnt = parser.parse(sql=sql)
sqlglot.errors.ParseError: Expecting ). Line 13, Col: 20.
MATCH SKIP TO LAST DOWN
PATTERN (STRT UP+ FLAT* DOWN+)
DEFINE
UP AS UP[4m.[0munits_sold > PREV(UP.units_sold),
FLAT AS FLAT.units_sold = PREV(FLAT.units_sold),
```
but for the DEFINE clause, self._parse_expression wont cut it because the symbol(alias) comes before the expression,
so i guess a new function should be defined.
|
0.0
|
77b5608d0e7c8c2507b0b3dccd03d75dfd4978c6
|
[
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize"
] |
[
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_oracle",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-14 11:15:52+00:00
|
mit
| 5,936 |
|
tobymao__sqlglot-1468
|
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 81992f28..ae565ee0 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -953,8 +953,10 @@ class Tokenizer(metaclass=_Tokenizer):
comment_end = self._COMMENTS[comment_start] # type: ignore
if comment_end:
- comment_end_size = len(comment_end)
+ # Skip the comment's start delimiter
+ self._advance(comment_start_size)
+ comment_end_size = len(comment_end)
while not self._end and self._chars(comment_end_size) != comment_end:
self._advance()
|
tobymao/sqlglot
|
ed79921b2cdcc53b0ca419d40aec4f90c1796e83
|
diff --git a/tests/test_tokens.py b/tests/test_tokens.py
index 8481f4d1..231f30b0 100644
--- a/tests/test_tokens.py
+++ b/tests/test_tokens.py
@@ -14,6 +14,7 @@ class TestTokens(unittest.TestCase):
("foo", []),
("foo /*comment 1*/ /*comment 2*/", ["comment 1", "comment 2"]),
("foo\n-- comment", [" comment"]),
+ ("1 /*/2 */", ["/2 "]),
]
for sql, comment in sql_comment:
diff --git a/tests/test_transpile.py b/tests/test_transpile.py
index b954da21..e6af6cd2 100644
--- a/tests/test_transpile.py
+++ b/tests/test_transpile.py
@@ -87,6 +87,7 @@ class TestTranspile(unittest.TestCase):
self.validate("SELECT 3>=3", "SELECT 3 >= 3")
def test_comments(self):
+ self.validate("SELECT 1 /*/2 */", "SELECT 1 /* /2 */")
self.validate("SELECT */*comment*/", "SELECT * /* comment */")
self.validate(
"SELECT * FROM table /*comment 1*/ /*comment 2*/",
|
support division in a comment
`select 1 /*/2 */` will raise error, while it is valid query.
As far as I understend issue is that combination /*/ is treated like start and end of comment, while it is not
|
0.0
|
ed79921b2cdcc53b0ca419d40aec4f90c1796e83
|
[
"tests/test_tokens.py::TestTokens::test_comment_attachment",
"tests/test_transpile.py::TestTranspile::test_comments"
] |
[
"tests/test_tokens.py::TestTokens::test_command",
"tests/test_tokens.py::TestTokens::test_jinja",
"tests/test_tokens.py::TestTokens::test_token_line",
"tests/test_transpile.py::TestTranspile::test_alias",
"tests/test_transpile.py::TestTranspile::test_alter",
"tests/test_transpile.py::TestTranspile::test_asc",
"tests/test_transpile.py::TestTranspile::test_error_level",
"tests/test_transpile.py::TestTranspile::test_extract",
"tests/test_transpile.py::TestTranspile::test_identify_lambda",
"tests/test_transpile.py::TestTranspile::test_identity",
"tests/test_transpile.py::TestTranspile::test_if",
"tests/test_transpile.py::TestTranspile::test_ignore_nulls",
"tests/test_transpile.py::TestTranspile::test_index_offset",
"tests/test_transpile.py::TestTranspile::test_leading_comma",
"tests/test_transpile.py::TestTranspile::test_normalize_name",
"tests/test_transpile.py::TestTranspile::test_not_range",
"tests/test_transpile.py::TestTranspile::test_paren",
"tests/test_transpile.py::TestTranspile::test_partial",
"tests/test_transpile.py::TestTranspile::test_pretty",
"tests/test_transpile.py::TestTranspile::test_pretty_line_breaks",
"tests/test_transpile.py::TestTranspile::test_some",
"tests/test_transpile.py::TestTranspile::test_space",
"tests/test_transpile.py::TestTranspile::test_time",
"tests/test_transpile.py::TestTranspile::test_types",
"tests/test_transpile.py::TestTranspile::test_unary",
"tests/test_transpile.py::TestTranspile::test_unsupported_level",
"tests/test_transpile.py::TestTranspile::test_with"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-22 15:12:23+00:00
|
mit
| 5,937 |
|
tobymao__sqlglot-1549
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 3c2182ff..bce956eb 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -168,6 +168,9 @@ class DuckDB(Dialect):
exp.ArraySort: _array_sort_sql,
exp.ArraySum: rename_func("LIST_SUM"),
exp.CommentColumnConstraint: no_comment_column_constraint_sql,
+ exp.CurrentDate: lambda self, e: "CURRENT_DATE",
+ exp.CurrentTime: lambda self, e: "CURRENT_TIME",
+ exp.CurrentTimestamp: lambda self, e: "CURRENT_TIMESTAMP",
exp.DayOfMonth: rename_func("DAYOFMONTH"),
exp.DayOfWeek: rename_func("DAYOFWEEK"),
exp.DayOfYear: rename_func("DAYOFYEAR"),
diff --git a/sqlglot/dialects/spark2.py b/sqlglot/dialects/spark2.py
index 6056940e..1e62417d 100644
--- a/sqlglot/dialects/spark2.py
+++ b/sqlglot/dialects/spark2.py
@@ -42,7 +42,7 @@ def _unix_to_time_sql(self: Hive.Generator, expression: exp.UnixToTime) -> str:
scale = expression.args.get("scale")
timestamp = self.sql(expression, "this")
if scale is None:
- return f"FROM_UNIXTIME({timestamp})"
+ return f"CAST(FROM_UNIXTIME({timestamp}) AS TIMESTAMP)"
if scale == exp.UnixToTime.SECONDS:
return f"TIMESTAMP_SECONDS({timestamp})"
if scale == exp.UnixToTime.MILLIS:
|
tobymao/sqlglot
|
8f11a88ac16638c830e436ff87fd1f6d85a6cc18
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 386b0900..80edcd0a 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -187,7 +187,6 @@ class TestBigQuery(Validator):
"current_datetime",
write={
"bigquery": "CURRENT_DATETIME()",
- "duckdb": "CURRENT_DATETIME()",
"presto": "CURRENT_DATETIME()",
"hive": "CURRENT_DATETIME()",
"spark": "CURRENT_DATETIME()",
@@ -197,7 +196,7 @@ class TestBigQuery(Validator):
"current_time",
write={
"bigquery": "CURRENT_TIME()",
- "duckdb": "CURRENT_TIME()",
+ "duckdb": "CURRENT_TIME",
"presto": "CURRENT_TIME()",
"hive": "CURRENT_TIME()",
"spark": "CURRENT_TIME()",
@@ -207,7 +206,7 @@ class TestBigQuery(Validator):
"current_timestamp",
write={
"bigquery": "CURRENT_TIMESTAMP()",
- "duckdb": "CURRENT_TIMESTAMP()",
+ "duckdb": "CURRENT_TIMESTAMP",
"postgres": "CURRENT_TIMESTAMP",
"presto": "CURRENT_TIMESTAMP",
"hive": "CURRENT_TIMESTAMP()",
@@ -218,7 +217,7 @@ class TestBigQuery(Validator):
"current_timestamp()",
write={
"bigquery": "CURRENT_TIMESTAMP()",
- "duckdb": "CURRENT_TIMESTAMP()",
+ "duckdb": "CURRENT_TIMESTAMP",
"postgres": "CURRENT_TIMESTAMP",
"presto": "CURRENT_TIMESTAMP",
"hive": "CURRENT_TIMESTAMP()",
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 1a4c78bd..c7e6e85c 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -6,6 +6,9 @@ class TestDuckDB(Validator):
dialect = "duckdb"
def test_time(self):
+ self.validate_identity("SELECT CURRENT_DATE")
+ self.validate_identity("SELECT CURRENT_TIMESTAMP")
+
self.validate_all(
"EPOCH(x)",
read={
@@ -24,7 +27,7 @@ class TestDuckDB(Validator):
"bigquery": "UNIX_TO_TIME(x / 1000)",
"duckdb": "TO_TIMESTAMP(x / 1000)",
"presto": "FROM_UNIXTIME(x / 1000)",
- "spark": "FROM_UNIXTIME(x / 1000)",
+ "spark": "CAST(FROM_UNIXTIME(x / 1000) AS TIMESTAMP)",
},
)
self.validate_all(
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index 30804768..c657dfd8 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -133,6 +133,14 @@ class TestPresto(Validator):
self.validate_identity("TRIM(a, b)")
self.validate_identity("VAR_POP(a)")
+ self.validate_all(
+ "SELECT FROM_UNIXTIME(col) FROM tbl",
+ write={
+ "presto": "SELECT FROM_UNIXTIME(col) FROM tbl",
+ "spark": "SELECT CAST(FROM_UNIXTIME(col) AS TIMESTAMP) FROM tbl",
+ "trino": "SELECT FROM_UNIXTIME(col) FROM tbl",
+ },
+ )
self.validate_all(
"DATE_FORMAT(x, '%Y-%m-%d %H:%i:%S')",
write={
@@ -181,7 +189,7 @@ class TestPresto(Validator):
"duckdb": "TO_TIMESTAMP(x)",
"presto": "FROM_UNIXTIME(x)",
"hive": "FROM_UNIXTIME(x)",
- "spark": "FROM_UNIXTIME(x)",
+ "spark": "CAST(FROM_UNIXTIME(x) AS TIMESTAMP)",
},
)
self.validate_all(
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 78f23e49..57ee2354 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -227,7 +227,7 @@ class TestSnowflake(Validator):
write={
"bigquery": "SELECT UNIX_TO_TIME(1659981729)",
"snowflake": "SELECT TO_TIMESTAMP(1659981729)",
- "spark": "SELECT FROM_UNIXTIME(1659981729)",
+ "spark": "SELECT CAST(FROM_UNIXTIME(1659981729) AS TIMESTAMP)",
},
)
self.validate_all(
@@ -243,7 +243,7 @@ class TestSnowflake(Validator):
write={
"bigquery": "SELECT UNIX_TO_TIME('1659981729')",
"snowflake": "SELECT TO_TIMESTAMP('1659981729')",
- "spark": "SELECT FROM_UNIXTIME('1659981729')",
+ "spark": "SELECT CAST(FROM_UNIXTIME('1659981729') AS TIMESTAMP)",
},
)
self.validate_all(
diff --git a/tests/test_transpile.py b/tests/test_transpile.py
index 7d1a0910..9753e73d 100644
--- a/tests/test_transpile.py
+++ b/tests/test_transpile.py
@@ -479,7 +479,7 @@ FROM v""",
self.validate("UNIX_TO_STR(123, 'y')", "FROM_UNIXTIME(123, 'y')", write="spark")
self.validate(
"UNIX_TO_TIME(123)",
- "FROM_UNIXTIME(123)",
+ "CAST(FROM_UNIXTIME(123) AS TIMESTAMP)",
write="spark",
)
self.validate(
|
FROM_UNIXTIME has different types in Trino and SparkSQL
```FROM_UNIXTIME(`created`)``` in Trino SQL returns a datetime, but ```FROM_UNIXTIME(`created`)``` in SparkSQL returns a string, which can cause queries to fail.
I would expect:
```select FROM_UNIXTIME(`created`) from a``` # (read='trino')
to convert to
```select CAST(FROM_UNIXTIME('created') as timestamp) from a``` # (write='spark')
https://docs.databricks.com/sql/language-manual/functions/from_unixtime.html
https://spark.apache.org/docs/3.0.0/api/sql/#from_unixtime
|
0.0
|
8f11a88ac16638c830e436ff87fd1f6d85a6cc18
|
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/test_transpile.py::TestTranspile::test_time"
] |
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_explode_to_unnest",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular",
"tests/dialects/test_presto.py::TestPresto::test_json",
"tests/dialects/test_presto.py::TestPresto::test_presto",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_unnest",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/test_transpile.py::TestTranspile::test_alias",
"tests/test_transpile.py::TestTranspile::test_alter",
"tests/test_transpile.py::TestTranspile::test_asc",
"tests/test_transpile.py::TestTranspile::test_comments",
"tests/test_transpile.py::TestTranspile::test_error_level",
"tests/test_transpile.py::TestTranspile::test_extract",
"tests/test_transpile.py::TestTranspile::test_identify_lambda",
"tests/test_transpile.py::TestTranspile::test_identity",
"tests/test_transpile.py::TestTranspile::test_if",
"tests/test_transpile.py::TestTranspile::test_index_offset",
"tests/test_transpile.py::TestTranspile::test_leading_comma",
"tests/test_transpile.py::TestTranspile::test_normalize_name",
"tests/test_transpile.py::TestTranspile::test_not_range",
"tests/test_transpile.py::TestTranspile::test_paren",
"tests/test_transpile.py::TestTranspile::test_partial",
"tests/test_transpile.py::TestTranspile::test_pretty",
"tests/test_transpile.py::TestTranspile::test_pretty_line_breaks",
"tests/test_transpile.py::TestTranspile::test_some",
"tests/test_transpile.py::TestTranspile::test_space",
"tests/test_transpile.py::TestTranspile::test_types",
"tests/test_transpile.py::TestTranspile::test_unary",
"tests/test_transpile.py::TestTranspile::test_unsupported_level",
"tests/test_transpile.py::TestTranspile::test_with"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-04 17:42:32+00:00
|
mit
| 5,938 |
|
tobymao__sqlglot-1662
|
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 2a8c78f1..5949bd8b 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -232,7 +232,7 @@ class Generator:
RESERVED_KEYWORDS: t.Set[str] = set()
WITH_SEPARATED_COMMENTS = (exp.Select, exp.From, exp.Where, exp.With)
- UNWRAPPED_INTERVAL_VALUES = (exp.Literal, exp.Paren, exp.Column)
+ UNWRAPPED_INTERVAL_VALUES = (exp.Column, exp.Literal, exp.Neg, exp.Paren)
SENTINEL_LINE_BREAK = "__SQLGLOT__LB__"
|
tobymao/sqlglot
|
e7f64555469d54b28e7d898c3e2817608387acc7
|
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 932e1ba0..bcfd9845 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -214,6 +214,7 @@ TBLPROPERTIES (
)
def test_spark(self):
+ self.validate_identity("INTERVAL -86 days")
self.validate_identity("SELECT UNIX_TIMESTAMP()")
self.validate_identity("TRIM(' SparkSQL ')")
self.validate_identity("TRIM(BOTH 'SL' FROM 'SSparkSQLS')")
|
SparkSQL negative intervals are being generated incorrectly
```
>>> x = sqlglot.parse_one("INTERVAL -86 days", read="spark")
>>> x.sql(dialect="spark")
'INTERVAL (-86) days'
```
SparkSQL fails on `INTERVAL (-86) days` but will accept `INTERVAL '-86' days` or `INTERVAL -86 days`.
|
0.0
|
e7f64555469d54b28e7d898c3e2817608387acc7
|
[
"tests/dialects/test_spark.py::TestSpark::test_spark"
] |
[
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/dialects/test_spark.py::TestSpark::test_current_user",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_iif",
"tests/dialects/test_spark.py::TestSpark::test_bool_or"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-19 15:47:42+00:00
|
mit
| 5,939 |
|
tobymao__sqlglot-1663
|
diff --git a/sqlglot/lineage.py b/sqlglot/lineage.py
index ef38934a..f2f076e0 100644
--- a/sqlglot/lineage.py
+++ b/sqlglot/lineage.py
@@ -109,10 +109,7 @@ def lineage(
# a version that has only the column we care about.
# "x", SELECT x, y FROM foo
# => "x", SELECT x FROM foo
- source = optimize(
- scope.expression.select(select, append=False), schema=schema, rules=rules
- )
- select = source.selects[0]
+ source = t.cast(exp.Expression, scope.expression.select(select, append=False))
else:
source = scope.expression
|
tobymao/sqlglot
|
2cefcaaa192a110ac9c613bc1a211700a8a8399b
|
diff --git a/tests/test_lineage.py b/tests/test_lineage.py
index b9289385..f33a2c2d 100644
--- a/tests/test_lineage.py
+++ b/tests/test_lineage.py
@@ -146,17 +146,43 @@ class TestLineage(unittest.TestCase):
self.assertEqual(node.alias, "")
downstream = node.downstream[0]
- self.assertEqual(
- downstream.source.sql(),
- "SELECT t.a AS a FROM (VALUES (1), (2)) AS t(a)",
- )
+ self.assertEqual(downstream.source.sql(), "SELECT t.a AS a FROM (VALUES (1), (2)) AS t(a)")
self.assertEqual(downstream.expression.sql(), "t.a AS a")
self.assertEqual(downstream.alias, "y")
downstream = downstream.downstream[0]
+ self.assertEqual(downstream.source.sql(), "(VALUES (1), (2)) AS t(a)")
+ self.assertEqual(downstream.expression.sql(), "a")
+ self.assertEqual(downstream.alias, "")
+
+ def test_lineage_cte_name_appears_in_schema(self) -> None:
+ schema = {"a": {"b": {"t1": {"c1": "int"}, "t2": {"c2": "int"}}}}
+
+ node = lineage(
+ "c2",
+ "WITH t1 AS (SELECT * FROM a.b.t2), inter AS (SELECT * FROM t1) SELECT * FROM inter",
+ schema=schema,
+ )
+
self.assertEqual(
- downstream.source.sql(),
- "(VALUES (1), (2)) AS t(a)",
+ node.source.sql(),
+ "WITH t1 AS (SELECT t2.c2 AS c2 FROM a.b.t2 AS t2), inter AS (SELECT t1.c2 AS c2 FROM t1) SELECT inter.c2 AS c2 FROM inter",
)
- self.assertEqual(downstream.expression.sql(), "a")
+ self.assertEqual(node.alias, "")
+
+ downstream = node.downstream[0]
+ self.assertEqual(downstream.source.sql(), "SELECT t1.c2 AS c2 FROM t1")
+ self.assertEqual(downstream.expression.sql(), "t1.c2 AS c2")
self.assertEqual(downstream.alias, "")
+
+ downstream = downstream.downstream[0]
+ self.assertEqual(downstream.source.sql(), "SELECT t2.c2 AS c2 FROM a.b.t2 AS t2")
+ self.assertEqual(downstream.expression.sql(), "t2.c2 AS c2")
+ self.assertEqual(downstream.alias, "")
+
+ downstream = downstream.downstream[0]
+ self.assertEqual(downstream.source.sql(), "a.b.t2 AS t2")
+ self.assertEqual(downstream.expression.sql(), "a.b.t2 AS t2")
+ self.assertEqual(downstream.alias, "")
+
+ self.assertEqual(downstream.downstream, [])
|
Issue when calculating lineage when a CTE has the same name as a table from the schema
**Fully reproducible code snippet**
The following code
```sql
from sqlglot.lineage import lineage
selected_column = "col_a"
sql = """
with
my_cte_name_also_a_table_name as (
select * from raw.schema.my_table
)
, inter as (
select * from my_cte_name_also_a_table_name
)
select * from inter
"""
schema = {"raw": {"schema" : {"my_cte_name_also_a_table_name": {"col_1": "int"}, "my_table": {"col_a": "int"}}}}
schema_without_table = {"my_table": {"col_a": "int"}}
l = lineage(column=selected_column, sql=sql, schema=schema, dialect="snowflake")
```
returns the error
```
Traceback (most recent call last):
File "/xxx/short_issue_lineage.py", line 21, in <module>
l = lineage(column=selected_column, sql=sql, schema=schema, dialect="snowflake")
File "/xxx/lib/python3.9/site-packages/sqlglot/lineage.py", line 148, in lineage
return to_node(column if isinstance(column, str) else column.name, scope)
File "/xxx/lib/python3.9/site-packages/sqlglot/lineage.py", line 136, in to_node
to_node(
File "/xxx/lib/python3.9/site-packages/sqlglot/lineage.py", line 112, in to_node
source = optimize(
File "/xxx/lib/python3.9/site-packages/sqlglot/optimizer/optimizer.py", line 89, in optimize
expression = rule(expression, **rule_kwargs)
File "/xxx/lib/python3.9/site-packages/sqlglot/optimizer/qualify_columns.py", line 49, in qualify_columns
_qualify_columns(scope, resolver)
File "/xxx/lib/python3.9/site-packages/sqlglot/optimizer/qualify_columns.py", line 250, in _qualify_columns
raise OptimizeError(f"Unknown column: {column_name}")
sqlglot.errors.OptimizeError: Unknown column: col_a
```
It looks like there is an issue with the logic being confused between `my_cte_name_also_a_table_name` the CTE (with a column called `col_a`) and `my_cte_name_also_a_table_name` the table from the schema with a column called `col_1`.
With the example above, if I remove the table from the schema and calculate the lineage, the error goes away.
```
l = lineage(column=selected_column, sql=sql, schema=schema_without_table, dialect="snowflake")
```
Interestingly, the code without the intermediate cte `inter` calculates the lineage correctly when the table with the CTE name is provided in the schema.
```
sql = """
with
my_cte_name_also_a_table_name as (
select * from raw.schema.my_table
)
select * from my_cte_name_also_a_table_name
"""
l = lineage(column=selected_column, sql=sql, schema=schema, dialect="snowflake")
```
|
0.0
|
2cefcaaa192a110ac9c613bc1a211700a8a8399b
|
[
"tests/test_lineage.py::TestLineage::test_lineage_cte_name_appears_in_schema"
] |
[
"tests/test_lineage.py::TestLineage::test_lineage",
"tests/test_lineage.py::TestLineage::test_lineage_external_col",
"tests/test_lineage.py::TestLineage::test_lineage_source_with_cte",
"tests/test_lineage.py::TestLineage::test_lineage_source_with_star",
"tests/test_lineage.py::TestLineage::test_lineage_sql_with_cte",
"tests/test_lineage.py::TestLineage::test_lineage_values"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-05-19 16:27:17+00:00
|
mit
| 5,940 |
|
tobymao__sqlglot-1670
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index f4475e06..77b8b5e0 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -3931,7 +3931,7 @@ class IfNull(Func):
class Initcap(Func):
- pass
+ arg_types = {"this": True, "expression": False}
class JSONKeyValue(Expression):
|
tobymao/sqlglot
|
64fdca44be6ee6ddc41de424387741ac2baf3f8f
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index ed036150..58b2a2da 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -6,6 +6,7 @@ class TestSnowflake(Validator):
dialect = "snowflake"
def test_snowflake(self):
+ self.validate_identity("INITCAP('iqamqinterestedqinqthisqtopic', 'q')")
self.validate_identity("CAST(x AS GEOMETRY)")
self.validate_identity("OBJECT_CONSTRUCT(*)")
self.validate_identity("SELECT TO_DATE('2019-02-28') + INTERVAL '1 day, 1 year'")
|
`initcap` accepts 2 arguments on Snowflake
Hi! SQLGlot currently raises an error when `initcap` is called with a second argument which is allowed on Snowflake.
**Fully reproducible code snippet**
```
import sqlglot
sqlglot.transpile("select initcap('this is a-test', ' ')", read="snowflake")
```
SQLGlot Error:
```
sqlglot.errors.ParseError: The number of provided arguments (2) is greater than the maximum number of supported arguments (1). Line 1, Col: 38.
select initcap('this is a-test', ' ')
```
**Official Documentation**
https://docs.snowflake.com/en/sql-reference/functions/initcap
|
0.0
|
64fdca44be6ee6ddc41de424387741ac2baf3f8f
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-22 11:28:06+00:00
|
mit
| 5,941 |
|
tobymao__sqlglot-1675
|
diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py
index 3bed58bb..db0d9d4c 100644
--- a/sqlglot/dialects/teradata.py
+++ b/sqlglot/dialects/teradata.py
@@ -141,7 +141,9 @@ class Teradata(Dialect):
PROPERTIES_LOCATION = {
**generator.Generator.PROPERTIES_LOCATION, # type: ignore
+ exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
exp.PartitionedByProperty: exp.Properties.Location.POST_INDEX,
+ exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
}
TRANSFORMS = {
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 0fff2dca..5f890ac7 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1896,7 +1896,7 @@ class NoPrimaryIndexProperty(Property):
class OnCommitProperty(Property):
- arg_type = {"this": False}
+ arg_type = {"delete": False}
class PartitionedByProperty(Property):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 3b9961bb..bb94a51f 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -76,7 +76,7 @@ class Generator:
exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG",
exp.MaterializedProperty: lambda self, e: "MATERIALIZED",
exp.NoPrimaryIndexProperty: lambda self, e: "NO PRIMARY INDEX",
- exp.OnCommitProperty: lambda self, e: "ON COMMIT PRESERVE ROWS",
+ exp.OnCommitProperty: lambda self, e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS",
exp.ReturnsProperty: lambda self, e: self.naked_property(e),
exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}",
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index b4382539..54c152e9 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1212,28 +1212,21 @@ class Parser(metaclass=_Parser):
expression = self._parse_ddl_select()
if create_token.token_type == TokenType.TABLE:
- # exp.Properties.Location.POST_EXPRESSION
- temp_properties = self._parse_properties()
- if properties and temp_properties:
- properties.expressions.extend(temp_properties.expressions)
- elif temp_properties:
- properties = temp_properties
-
indexes = []
while True:
index = self._parse_create_table_index()
- # exp.Properties.Location.POST_INDEX
- if self._match(TokenType.PARTITION_BY, advance=False):
- temp_properties = self._parse_properties()
- if properties and temp_properties:
- properties.expressions.extend(temp_properties.expressions)
- elif temp_properties:
- properties = temp_properties
+ # exp.Properties.Location.POST_EXPRESSION or exp.Properties.Location.POST_INDEX
+ temp_properties = self._parse_properties()
+ if properties and temp_properties:
+ properties.expressions.extend(temp_properties.expressions)
+ elif temp_properties:
+ properties = temp_properties
if not index:
break
else:
+ self._match(TokenType.COMMA)
indexes.append(index)
elif create_token.token_type == TokenType.VIEW:
if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"):
@@ -1589,8 +1582,9 @@ class Parser(metaclass=_Parser):
return exp.NoPrimaryIndexProperty()
def _parse_oncommit(self) -> exp.Expression:
- self._match_text_seq("COMMIT", "PRESERVE", "ROWS")
- return exp.OnCommitProperty()
+ if self._match_text_seq("COMMIT", "PRESERVE", "ROWS"):
+ return exp.OnCommitProperty()
+ return exp.OnCommitProperty(delete=self._match_text_seq("COMMIT", "DELETE", "ROWS"))
def _parse_distkey(self) -> exp.Expression:
return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var))
|
tobymao/sqlglot
|
1a88b17ce0b9d5c71d48f53eacab63e011dc170b
|
diff --git a/tests/dialects/test_teradata.py b/tests/dialects/test_teradata.py
index dcb513d8..902e360c 100644
--- a/tests/dialects/test_teradata.py
+++ b/tests/dialects/test_teradata.py
@@ -24,6 +24,12 @@ class TestTeradata(Validator):
def test_create(self):
self.validate_identity("CREATE TABLE x (y INT) PRIMARY INDEX (y) PARTITION BY y INDEX (y)")
+ self.validate_identity(
+ "CREATE MULTISET VOLATILE TABLE my_table (id INT) PRIMARY INDEX (id) ON COMMIT PRESERVE ROWS"
+ )
+ self.validate_identity(
+ "CREATE SET VOLATILE TABLE my_table (id INT) PRIMARY INDEX (id) ON COMMIT DELETE ROWS"
+ )
self.validate_identity(
"CREATE TABLE a (b INT) PRIMARY INDEX (y) PARTITION BY RANGE_N(b BETWEEN 'a', 'b' AND 'c' EACH '1')"
)
@@ -34,11 +40,21 @@ class TestTeradata(Validator):
"CREATE TABLE a (b INT) PARTITION BY RANGE_N(b BETWEEN *, 1 AND * EACH b) INDEX (a)"
)
+ self.validate_all(
+ """
+ CREATE SET TABLE test, NO FALLBACK, NO BEFORE JOURNAL, NO AFTER JOURNAL,
+ CHECKSUM = DEFAULT (x INT, y INT, z CHAR(30), a INT, b DATE, e INT)
+ PRIMARY INDEX (a),
+ INDEX(x, y)
+ """,
+ write={
+ "teradata": "CREATE SET TABLE test, NO FALLBACK, NO BEFORE JOURNAL, NO AFTER JOURNAL, CHECKSUM=DEFAULT (x INT, y INT, z CHAR(30), a INT, b DATE, e INT) PRIMARY INDEX (a) INDEX (x, y)",
+ },
+ )
self.validate_all(
"REPLACE VIEW a AS (SELECT b FROM c)",
write={"teradata": "CREATE OR REPLACE VIEW a AS (SELECT b FROM c)"},
)
-
self.validate_all(
"CREATE VOLATILE TABLE a",
write={
|
Problem with Teradata create volatile table statement
There is a problem with parsing the Teradata CREATE VOLATILE TABLE statement.
In Teradata version 16.20 this SQL will work correctly:
create multiset volatile table my_table (
id int
)
primary index (id)
on commit preserve rows ;
This SQL will fail
create multiset volatile table my_table (
id int
)
on commit preserve rows
primary index (id) ;
With the Error "Expected something between the ROWS keyword and the PRIMARY keyword"
In sqlglot using teradata
The first statement will fail but the second incorrect statement will parse correctly.
The error from the first statement is:
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:851, in Parser.parse(self, raw_tokens, sql)
837 def parse(
838 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
839 ) -> t.List[t.Optional[exp.Expression]]:
840 """
841 Parses a list of tokens and returns a list of syntax trees, one tree
842 per parsed SQL statement.
(...)
849 The list of syntax trees.
850 """
--> 851 return self._parse(
852 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
853 )
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:917, in Parser._parse(self, parse_method, raw_tokens, sql)
914 expressions.append(parse_method(self))
916 if self._index < len(self._tokens):
--> 917 self.raise_error("Invalid expression / Unexpected token")
919 self.check_errors()
921 return expressions
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:960, in Parser.raise_error(self, message, token)
948 error = ParseError.new(
949 f"{message}. Line {token.line}, Col: {token.col}.\n"
950 f" {start_context}\033[4m{highlight}\033[0m{end_context}",
(...)
956 end_context=end_context,
957 )
959 if self.error_level == ErrorLevel.IMMEDIATE:
--> 960 raise error
962 self.errors.append(error)
ParseError: Invalid expression / Unexpected token. Line 6, Col: 2.
|
0.0
|
1a88b17ce0b9d5c71d48f53eacab63e011dc170b
|
[
"tests/dialects/test_teradata.py::TestTeradata::test_create"
] |
[
"tests/dialects/test_teradata.py::TestTeradata::test_abbrev",
"tests/dialects/test_teradata.py::TestTeradata::test_cast",
"tests/dialects/test_teradata.py::TestTeradata::test_datatype",
"tests/dialects/test_teradata.py::TestTeradata::test_insert",
"tests/dialects/test_teradata.py::TestTeradata::test_mod",
"tests/dialects/test_teradata.py::TestTeradata::test_translate",
"tests/dialects/test_teradata.py::TestTeradata::test_update"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-22 21:02:46+00:00
|
mit
| 5,942 |
|
tobymao__sqlglot-1678
|
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 956fafdf..1c5df09b 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -797,7 +797,7 @@ class Tokenizer(metaclass=_Tokenizer):
self._start = 0
self._current = 0
self._line = 1
- self._col = 1
+ self._col = 0
self._comments: t.List[str] = []
self._char = ""
@@ -810,13 +810,12 @@ class Tokenizer(metaclass=_Tokenizer):
self.reset()
self.sql = sql
self.size = len(sql)
+
try:
self._scan()
except Exception as e:
- start = self._current - 50
- end = self._current + 50
- start = start if start > 0 else 0
- end = end if end < self.size else self.size - 1
+ start = max(self._current - 50, 0)
+ end = min(self._current + 50, self.size - 1)
context = self.sql[start:end]
raise ValueError(f"Error tokenizing '{context}'") from e
@@ -841,17 +840,17 @@ class Tokenizer(metaclass=_Tokenizer):
if until and until():
break
- if self.tokens:
+ if self.tokens and self._comments:
self.tokens[-1].comments.extend(self._comments)
def _chars(self, size: int) -> str:
if size == 1:
return self._char
+
start = self._current - 1
end = start + size
- if end <= self.size:
- return self.sql[start:end]
- return ""
+
+ return self.sql[start:end] if end <= self.size else ""
def _advance(self, i: int = 1, alnum: bool = False) -> None:
if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
@@ -866,6 +865,7 @@ class Tokenizer(metaclass=_Tokenizer):
self._peek = "" if self._end else self.sql[self._current]
if alnum and self._char.isalnum():
+ # Here we use local variables instead of attributes for better performance
_col = self._col
_current = self._current
_end = self._end
|
tobymao/sqlglot
|
903dde0edaec7085dffd872731030eb966079474
|
diff --git a/tests/test_parser.py b/tests/test_parser.py
index e811e96a..11df5669 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -22,7 +22,7 @@ class TestParser(unittest.TestCase):
{
"description": "Invalid expression / Unexpected token",
"line": 1,
- "col": 7,
+ "col": 6,
"start_context": "",
"highlight": "SELECT",
"end_context": " 1;",
@@ -30,7 +30,8 @@ class TestParser(unittest.TestCase):
}
]
with self.assertRaises(ParseError) as ctx:
- parse_one("SELECT 1;", "sqlite", [exp.From])
+ parse_one("SELECT 1;", read="sqlite", into=[exp.From])
+
self.assertEqual(str(ctx.exception), expected_message)
self.assertEqual(ctx.exception.errors, expected_errors)
@@ -40,7 +41,7 @@ class TestParser(unittest.TestCase):
{
"description": "Invalid expression / Unexpected token",
"line": 1,
- "col": 7,
+ "col": 6,
"start_context": "",
"highlight": "SELECT",
"end_context": " 1;",
@@ -49,7 +50,7 @@ class TestParser(unittest.TestCase):
{
"description": "Invalid expression / Unexpected token",
"line": 1,
- "col": 7,
+ "col": 6,
"start_context": "",
"highlight": "SELECT",
"end_context": " 1;",
@@ -58,6 +59,7 @@ class TestParser(unittest.TestCase):
]
with self.assertRaises(ParseError) as ctx:
parse_one("SELECT 1;", "sqlite", [exp.From, exp.Join])
+
self.assertEqual(str(ctx.exception), expected_message)
self.assertEqual(ctx.exception.errors, expected_errors)
diff --git a/tests/test_tokens.py b/tests/test_tokens.py
index f70d70e8..c09eab48 100644
--- a/tests/test_tokens.py
+++ b/tests/test_tokens.py
@@ -20,7 +20,7 @@ class TestTokens(unittest.TestCase):
for sql, comment in sql_comment:
self.assertEqual(tokenizer.tokenize(sql)[0].comments, comment)
- def test_token_line(self):
+ def test_token_line_col(self):
tokens = Tokenizer().tokenize(
"""SELECT /*
line break
@@ -30,10 +30,19 @@ line break
x"""
)
+ self.assertEqual(tokens[0].line, 1)
+ self.assertEqual(tokens[0].col, 6)
self.assertEqual(tokens[1].line, 5)
self.assertEqual(tokens[1].col, 3)
- self.assertEqual(tokens[-1].line, 6)
- self.assertEqual(tokens[-1].col, 1)
+ self.assertEqual(tokens[2].line, 5)
+ self.assertEqual(tokens[2].col, 4)
+ self.assertEqual(tokens[3].line, 6)
+ self.assertEqual(tokens[3].col, 1)
+
+ tokens = Tokenizer().tokenize("SELECT .")
+
+ self.assertEqual(tokens[1].line, 1)
+ self.assertEqual(tokens[1].col, 8)
def test_command(self):
tokens = Tokenizer().tokenize("SHOW;")
@@ -51,7 +60,7 @@ x"""
self.assertEqual(tokens[3].token_type, TokenType.SEMICOLON)
def test_error_msg(self):
- with self.assertRaisesRegex(ValueError, "Error tokenizing 'select.*"):
+ with self.assertRaisesRegex(ValueError, "Error tokenizing 'select /'"):
Tokenizer().tokenize("select /*")
def test_jinja(self):
diff --git a/tests/test_transpile.py b/tests/test_transpile.py
index 701f8ef4..1085b092 100644
--- a/tests/test_transpile.py
+++ b/tests/test_transpile.py
@@ -555,14 +555,14 @@ FROM v""",
def test_error_level(self, logger):
invalid = "x + 1. ("
expected_messages = [
- "Required keyword: 'expressions' missing for <class 'sqlglot.expressions.Aliases'>. Line 1, Col: 9.\n x + 1. \033[4m(\033[0m",
- "Expecting ). Line 1, Col: 9.\n x + 1. \033[4m(\033[0m",
+ "Required keyword: 'expressions' missing for <class 'sqlglot.expressions.Aliases'>. Line 1, Col: 8.\n x + 1. \033[4m(\033[0m",
+ "Expecting ). Line 1, Col: 8.\n x + 1. \033[4m(\033[0m",
]
expected_errors = [
{
"description": "Required keyword: 'expressions' missing for <class 'sqlglot.expressions.Aliases'>",
"line": 1,
- "col": 9,
+ "col": 8,
"start_context": "x + 1. ",
"highlight": "(",
"end_context": "",
@@ -571,7 +571,7 @@ FROM v""",
{
"description": "Expecting )",
"line": 1,
- "col": 9,
+ "col": 8,
"start_context": "x + 1. ",
"highlight": "(",
"end_context": "",
@@ -585,26 +585,28 @@ FROM v""",
with self.assertRaises(ParseError) as ctx:
transpile(invalid, error_level=ErrorLevel.IMMEDIATE)
+
self.assertEqual(str(ctx.exception), expected_messages[0])
self.assertEqual(ctx.exception.errors[0], expected_errors[0])
with self.assertRaises(ParseError) as ctx:
transpile(invalid, error_level=ErrorLevel.RAISE)
+
self.assertEqual(str(ctx.exception), "\n\n".join(expected_messages))
self.assertEqual(ctx.exception.errors, expected_errors)
more_than_max_errors = "(((("
expected_messages = (
- "Required keyword: 'this' missing for <class 'sqlglot.expressions.Paren'>. Line 1, Col: 5.\n (((\033[4m(\033[0m\n\n"
- "Expecting ). Line 1, Col: 5.\n (((\033[4m(\033[0m\n\n"
- "Expecting ). Line 1, Col: 5.\n (((\033[4m(\033[0m\n\n"
+ "Required keyword: 'this' missing for <class 'sqlglot.expressions.Paren'>. Line 1, Col: 4.\n (((\033[4m(\033[0m\n\n"
+ "Expecting ). Line 1, Col: 4.\n (((\033[4m(\033[0m\n\n"
+ "Expecting ). Line 1, Col: 4.\n (((\033[4m(\033[0m\n\n"
"... and 2 more"
)
expected_errors = [
{
"description": "Required keyword: 'this' missing for <class 'sqlglot.expressions.Paren'>",
"line": 1,
- "col": 5,
+ "col": 4,
"start_context": "(((",
"highlight": "(",
"end_context": "",
@@ -613,7 +615,7 @@ FROM v""",
{
"description": "Expecting )",
"line": 1,
- "col": 5,
+ "col": 4,
"start_context": "(((",
"highlight": "(",
"end_context": "",
@@ -625,6 +627,7 @@ FROM v""",
with self.assertRaises(ParseError) as ctx:
transpile(more_than_max_errors, error_level=ErrorLevel.RAISE)
+
self.assertEqual(str(ctx.exception), expected_messages)
self.assertEqual(ctx.exception.errors, expected_errors)
|
Highlight in error is off by one for BigQuery when identifiers are quoted
When trying to parse a BigQuery statement that has an error, the returned error info's `start_context` and `highlight` are off by one when the query quotes the identifiers.
**Fully reproducible code snippet**
```
import sqlglot
sql = "SELECT * FROM `TableA` UNION `TableB`"
try:
parse = sqlglot.parse_one(sql, read="bigquery")
except Exception as e:
error = e.errors[0]
print(f'With quotes: Start context: {error["start_context"]}; Highlight: {error["highlight"]}')
sql = "SELECT * FROM TableA UNION TableB"
try:
parse = sqlglot.parse_one(sql, read="bigquery")
except Exception as e:
error = e.errors[0]
print(f'Without quotes: Start context: {error["start_context"]}; Highlight: {error["highlight"]}')
```
Expected output:
```
With quotes: Start context: SELECT * FROM TableA UNION ; Highlight: TableB
Without quotes: Start context: SELECT * FROM TableA UNION ; Highlight: TableB
```
Actual output:
```
With quotes: Start context: SELECT * FROM `TableA` UNION `T; Highlight: ableB`
Without quotes: Start context: SELECT * FROM TableA UNION ; Highlight: TableB
```
|
0.0
|
903dde0edaec7085dffd872731030eb966079474
|
[
"tests/test_parser.py::TestParser::test_parse_into_error",
"tests/test_parser.py::TestParser::test_parse_into_errors",
"tests/test_tokens.py::TestTokens::test_token_line_col",
"tests/test_transpile.py::TestTranspile::test_error_level"
] |
[
"tests/test_parser.py::TestParser::test_column",
"tests/test_parser.py::TestParser::test_command",
"tests/test_parser.py::TestParser::test_comment_error_n",
"tests/test_parser.py::TestParser::test_comment_error_r",
"tests/test_parser.py::TestParser::test_comments",
"tests/test_parser.py::TestParser::test_create_table_error",
"tests/test_parser.py::TestParser::test_expression",
"tests/test_parser.py::TestParser::test_float",
"tests/test_parser.py::TestParser::test_identify",
"tests/test_parser.py::TestParser::test_lambda_struct",
"tests/test_parser.py::TestParser::test_missing_by",
"tests/test_parser.py::TestParser::test_multi",
"tests/test_parser.py::TestParser::test_parameter",
"tests/test_parser.py::TestParser::test_parse_empty",
"tests/test_parser.py::TestParser::test_parse_errors",
"tests/test_parser.py::TestParser::test_parse_into",
"tests/test_parser.py::TestParser::test_pivot_columns",
"tests/test_parser.py::TestParser::test_pretty_config_override",
"tests/test_parser.py::TestParser::test_rename_table",
"tests/test_parser.py::TestParser::test_select",
"tests/test_parser.py::TestParser::test_set_expression",
"tests/test_parser.py::TestParser::test_space",
"tests/test_parser.py::TestParser::test_table",
"tests/test_parser.py::TestParser::test_transactions",
"tests/test_parser.py::TestParser::test_type_literals",
"tests/test_parser.py::TestParser::test_unary_plus",
"tests/test_parser.py::TestParser::test_union_order",
"tests/test_parser.py::TestParser::test_var",
"tests/test_tokens.py::TestTokens::test_command",
"tests/test_tokens.py::TestTokens::test_comment_attachment",
"tests/test_tokens.py::TestTokens::test_error_msg",
"tests/test_tokens.py::TestTokens::test_jinja",
"tests/test_transpile.py::TestTranspile::test_alias",
"tests/test_transpile.py::TestTranspile::test_alter",
"tests/test_transpile.py::TestTranspile::test_asc",
"tests/test_transpile.py::TestTranspile::test_comments",
"tests/test_transpile.py::TestTranspile::test_extract",
"tests/test_transpile.py::TestTranspile::test_identify_lambda",
"tests/test_transpile.py::TestTranspile::test_identity",
"tests/test_transpile.py::TestTranspile::test_if",
"tests/test_transpile.py::TestTranspile::test_index_offset",
"tests/test_transpile.py::TestTranspile::test_leading_comma",
"tests/test_transpile.py::TestTranspile::test_normalize_name",
"tests/test_transpile.py::TestTranspile::test_not_range",
"tests/test_transpile.py::TestTranspile::test_paren",
"tests/test_transpile.py::TestTranspile::test_partial",
"tests/test_transpile.py::TestTranspile::test_pretty",
"tests/test_transpile.py::TestTranspile::test_pretty_line_breaks",
"tests/test_transpile.py::TestTranspile::test_some",
"tests/test_transpile.py::TestTranspile::test_space",
"tests/test_transpile.py::TestTranspile::test_time",
"tests/test_transpile.py::TestTranspile::test_types",
"tests/test_transpile.py::TestTranspile::test_unary",
"tests/test_transpile.py::TestTranspile::test_unsupported_level",
"tests/test_transpile.py::TestTranspile::test_with"
] |
{
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-23 17:04:55+00:00
|
mit
| 5,943 |
|
tobymao__sqlglot-1703
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 7cdf4602..6afc11c0 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -4135,8 +4135,8 @@ class Parser(metaclass=_Parser):
)
def _parse_ddl_select(self) -> t.Optional[exp.Expression]:
- return self._parse_set_operations(
- self._parse_select(nested=True, parse_subquery_alias=False)
+ return self._parse_query_modifiers(
+ self._parse_set_operations(self._parse_select(nested=True, parse_subquery_alias=False))
)
def _parse_transaction(self) -> exp.Expression:
|
tobymao/sqlglot
|
910166c1d1d33e2110c26140e1916745dc2f1212
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 45845732..99d8a3ce 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -31,6 +31,9 @@ class TestBigQuery(Validator):
self.validate_identity(
"SELECT * FROM (SELECT * FROM `t`) AS a UNPIVOT((c) FOR c_name IN (v1, v2))"
)
+ self.validate_identity(
+ "CREATE TABLE IF NOT EXISTS foo AS SELECT * FROM bla EXCEPT DISTINCT (SELECT * FROM bar) LIMIT 0"
+ )
self.validate_all('x <> ""', write={"bigquery": "x <> ''"})
self.validate_all('x <> """"""', write={"bigquery": "x <> ''"})
|
bigquery create table as select with limit
Calling `sqlglot.parse_one(sql, read='bigquery')` fails on this SQL, seemingly because the limit clause gets associated with the create table instead of the select.
```sql
CREATE TABLE IF NOT EXISTS CENSUS.my_table_name AS SELECT `col1`, `col2` FROM CENSUS.my_base_table
EXCEPT DISTINCT (
SELECT `col1`, `col2` FROM CENSUS.my_third_table
)
LIMIT 0;
```
I'm not sure why you'd write this sort of query with the except distinct clause, but Census seems to generate this SQL.
|
0.0
|
910166c1d1d33e2110c26140e1916745dc2f1212
|
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery"
] |
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-05-30 22:12:33+00:00
|
mit
| 5,944 |
|
tobymao__sqlglot-1718
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index a4c4e95d..cdb09c9e 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -3222,6 +3222,18 @@ class DataType(Expression):
DATE = auto()
DATETIME = auto()
DATETIME64 = auto()
+ INT4RANGE = auto()
+ INT4MULTIRANGE = auto()
+ INT8RANGE = auto()
+ INT8MULTIRANGE = auto()
+ NUMRANGE = auto()
+ NUMMULTIRANGE = auto()
+ TSRANGE = auto()
+ TSMULTIRANGE = auto()
+ TSTZRANGE = auto()
+ TSTZMULTIRANGE = auto()
+ DATERANGE = auto()
+ DATEMULTIRANGE = auto()
DECIMAL = auto()
DOUBLE = auto()
FLOAT = auto()
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index e77bb5ab..8e3552f3 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -155,6 +155,18 @@ class Parser(metaclass=_Parser):
TokenType.DATETIME,
TokenType.DATETIME64,
TokenType.DATE,
+ TokenType.INT4RANGE,
+ TokenType.INT4MULTIRANGE,
+ TokenType.INT8RANGE,
+ TokenType.INT8MULTIRANGE,
+ TokenType.NUMRANGE,
+ TokenType.NUMMULTIRANGE,
+ TokenType.TSRANGE,
+ TokenType.TSMULTIRANGE,
+ TokenType.TSTZRANGE,
+ TokenType.TSTZMULTIRANGE,
+ TokenType.DATERANGE,
+ TokenType.DATEMULTIRANGE,
TokenType.DECIMAL,
TokenType.BIGDECIMAL,
TokenType.UUID,
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index ad329d26..b00583f9 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -113,6 +113,18 @@ class TokenType(AutoName):
DATETIME = auto()
DATETIME64 = auto()
DATE = auto()
+ INT4RANGE = auto()
+ INT4MULTIRANGE = auto()
+ INT8RANGE = auto()
+ INT8MULTIRANGE = auto()
+ NUMRANGE = auto()
+ NUMMULTIRANGE = auto()
+ TSRANGE = auto()
+ TSMULTIRANGE = auto()
+ TSTZRANGE = auto()
+ TSTZMULTIRANGE = auto()
+ DATERANGE = auto()
+ DATEMULTIRANGE = auto()
UUID = auto()
GEOGRAPHY = auto()
NULLABLE = auto()
@@ -669,6 +681,18 @@ class Tokenizer(metaclass=_Tokenizer):
"TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
"DATE": TokenType.DATE,
"DATETIME": TokenType.DATETIME,
+ "INT4RANGE": TokenType.INT4RANGE,
+ "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
+ "INT8RANGE": TokenType.INT8RANGE,
+ "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
+ "NUMRANGE": TokenType.NUMRANGE,
+ "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
+ "TSRANGE": TokenType.TSRANGE,
+ "TSMULTIRANGE": TokenType.TSMULTIRANGE,
+ "TSTZRANGE": TokenType.TSTZRANGE,
+ "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
+ "DATERANGE": TokenType.DATERANGE,
+ "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
"UNIQUE": TokenType.UNIQUE,
"STRUCT": TokenType.STRUCT,
"VARIANT": TokenType.VARIANT,
|
tobymao/sqlglot
|
4decebb8c190ad1f1293036c3cddeedbbc7c3c19
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 1f288c62..972a8c85 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -7,6 +7,7 @@ class TestPostgres(Validator):
dialect = "postgres"
def test_ddl(self):
+ self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)")
self.validate_identity("CREATE TABLE test (foo HSTORE)")
self.validate_identity("CREATE TABLE test (foo JSONB)")
self.validate_identity("CREATE TABLE test (foo VARCHAR(64)[])")
@@ -85,6 +86,18 @@ class TestPostgres(Validator):
)
def test_postgres(self):
+ self.validate_identity("CAST(x AS INT4RANGE)")
+ self.validate_identity("CAST(x AS INT4MULTIRANGE)")
+ self.validate_identity("CAST(x AS INT8RANGE)")
+ self.validate_identity("CAST(x AS INT8MULTIRANGE)")
+ self.validate_identity("CAST(x AS NUMRANGE)")
+ self.validate_identity("CAST(x AS NUMMULTIRANGE)")
+ self.validate_identity("CAST(x AS TSRANGE)")
+ self.validate_identity("CAST(x AS TSMULTIRANGE)")
+ self.validate_identity("CAST(x AS TSTZRANGE)")
+ self.validate_identity("CAST(x AS TSTZMULTIRANGE)")
+ self.validate_identity("CAST(x AS DATERANGE)")
+ self.validate_identity("CAST(x AS DATEMULTIRANGE)")
self.validate_identity(
"""LAST_VALUE("col1") OVER (ORDER BY "col2" RANGE BETWEEN INTERVAL '1 day' PRECEDING AND '1 month' FOLLOWING)"""
)
|
Issue parsing `tstzrange` columns on postgres
**Fully reproducible code snippet**
```sql
CREATE TABLE public.y (
x tstzrange NOT NULL
);
```
```python
import sqlglot
from sqlglot.dialects import Postgres
content = sqlglot.parse("sql content here", Postgres)
```
Error:
```
sqlglot.errors.ParseError: Expecting ). Line 2, Col: 15.
CREATE TABLE public.y (
x tstzrange NOT NULL
);
```
**Official Documentation**
https://www.postgresql.org/docs/current/rangetypes.html
|
0.0
|
4decebb8c190ad1f1293036c3cddeedbbc7c3c19
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-03 23:03:54+00:00
|
mit
| 5,945 |
|
tobymao__sqlglot-1723
|
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index fa370be0..55df83cb 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -33,7 +33,6 @@ class ClickHouse(Dialect):
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
- "ASOF": TokenType.ASOF,
"ATTACH": TokenType.COMMAND,
"DATETIME64": TokenType.DATETIME64,
"FINAL": TokenType.FINAL,
@@ -98,7 +97,6 @@ class ClickHouse(Dialect):
TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {
TokenType.ANY,
- TokenType.ASOF,
TokenType.SEMI,
TokenType.ANTI,
TokenType.SETTINGS,
@@ -183,7 +181,7 @@ class ClickHouse(Dialect):
return self.expression(exp.CTE, this=statement, alias=statement and statement.this)
- def _parse_join_side_and_kind(
+ def _parse_join_parts(
self,
) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
is_global = self._match(TokenType.GLOBAL) and self._prev
@@ -202,7 +200,7 @@ class ClickHouse(Dialect):
join = super()._parse_join(skip_join_token)
if join:
- join.set("global", join.args.pop("natural", None))
+ join.set("global", join.args.pop("method", None))
return join
def _parse_function(
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index cdb09c9e..73dea5c6 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1653,11 +1653,15 @@ class Join(Expression):
"side": False,
"kind": False,
"using": False,
- "natural": False,
+ "method": False,
"global": False,
"hint": False,
}
+ @property
+ def method(self) -> str:
+ return self.text("method").upper()
+
@property
def kind(self) -> str:
return self.text("kind").upper()
@@ -2797,12 +2801,12 @@ class Select(Subqueryable):
Returns:
Select: the modified expression.
"""
- parse_args = {"dialect": dialect, **opts}
+ parse_args: t.Dict[str, t.Any] = {"dialect": dialect, **opts}
try:
- expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args) # type: ignore
+ expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args)
except ParseError:
- expression = maybe_parse(expression, into=(Join, Expression), **parse_args) # type: ignore
+ expression = maybe_parse(expression, into=(Join, Expression), **parse_args)
join = expression if isinstance(expression, Join) else Join(this=expression)
@@ -2810,14 +2814,14 @@ class Select(Subqueryable):
join.this.replace(join.this.subquery())
if join_type:
- natural: t.Optional[Token]
+ method: t.Optional[Token]
side: t.Optional[Token]
kind: t.Optional[Token]
- natural, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args) # type: ignore
+ method, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args) # type: ignore
- if natural:
- join.set("natural", True)
+ if method:
+ join.set("method", method.text)
if side:
join.set("side", side.text)
if kind:
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index f1ec3985..2e9f96ad 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1313,7 +1313,7 @@ class Generator:
op_sql = " ".join(
op
for op in (
- "NATURAL" if expression.args.get("natural") else None,
+ expression.method,
"GLOBAL" if expression.args.get("global") else None,
expression.side,
expression.kind,
diff --git a/sqlglot/optimizer/optimize_joins.py b/sqlglot/optimizer/optimize_joins.py
index 43436cbb..4e0c3a1d 100644
--- a/sqlglot/optimizer/optimize_joins.py
+++ b/sqlglot/optimizer/optimize_joins.py
@@ -1,7 +1,7 @@
from sqlglot import exp
from sqlglot.helper import tsort
-JOIN_ATTRS = ("on", "side", "kind", "using", "natural")
+JOIN_ATTRS = ("on", "side", "kind", "using", "method")
def optimize_joins(expression):
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 8e3552f3..caeea1ae 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -284,6 +284,7 @@ class Parser(metaclass=_Parser):
TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - {
TokenType.APPLY,
+ TokenType.ASOF,
TokenType.FULL,
TokenType.LEFT,
TokenType.LOCK,
@@ -387,6 +388,11 @@ class Parser(metaclass=_Parser):
TokenType.EXCEPT,
}
+ JOIN_METHODS = {
+ TokenType.NATURAL,
+ TokenType.ASOF,
+ }
+
JOIN_SIDES = {
TokenType.LEFT,
TokenType.RIGHT,
@@ -477,7 +483,7 @@ class Parser(metaclass=_Parser):
exp.Where: lambda self: self._parse_where(),
exp.Window: lambda self: self._parse_named_window(),
exp.With: lambda self: self._parse_with(),
- "JOIN_TYPE": lambda self: self._parse_join_side_and_kind(),
+ "JOIN_TYPE": lambda self: self._parse_join_parts(),
}
STATEMENT_PARSERS = {
@@ -2166,11 +2172,11 @@ class Parser(metaclass=_Parser):
return expression
- def _parse_join_side_and_kind(
+ def _parse_join_parts(
self,
) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
return (
- self._match(TokenType.NATURAL) and self._prev,
+ self._match_set(self.JOIN_METHODS) and self._prev,
self._match_set(self.JOIN_SIDES) and self._prev,
self._match_set(self.JOIN_KINDS) and self._prev,
)
@@ -2180,14 +2186,14 @@ class Parser(metaclass=_Parser):
return self.expression(exp.Join, this=self._parse_table())
index = self._index
- natural, side, kind = self._parse_join_side_and_kind()
+ method, side, kind = self._parse_join_parts()
hint = self._prev.text if self._match_texts(self.JOIN_HINTS) else None
join = self._match(TokenType.JOIN)
if not skip_join_token and not join:
self._retreat(index)
kind = None
- natural = None
+ method = None
side = None
outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY, False)
@@ -2199,12 +2205,10 @@ class Parser(metaclass=_Parser):
if outer_apply:
side = Token(TokenType.LEFT, "LEFT")
- kwargs: t.Dict[
- str, t.Optional[exp.Expression] | bool | str | t.List[t.Optional[exp.Expression]]
- ] = {"this": self._parse_table()}
+ kwargs: t.Dict[str, t.Any] = {"this": self._parse_table()}
- if natural:
- kwargs["natural"] = True
+ if method:
+ kwargs["method"] = method.text
if side:
kwargs["side"] = side.text
if kind:
@@ -2217,7 +2221,7 @@ class Parser(metaclass=_Parser):
elif self._match(TokenType.USING):
kwargs["using"] = self._parse_wrapped_id_vars()
- return self.expression(exp.Join, **kwargs) # type: ignore
+ return self.expression(exp.Join, **kwargs)
def _parse_index(
self,
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index b00583f9..d33874b9 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -492,6 +492,7 @@ class Tokenizer(metaclass=_Tokenizer):
"ANY": TokenType.ANY,
"ASC": TokenType.ASC,
"AS": TokenType.ALIAS,
+ "ASOF": TokenType.ASOF,
"AUTOINCREMENT": TokenType.AUTO_INCREMENT,
"AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
"BEGIN": TokenType.BEGIN,
|
tobymao/sqlglot
|
95f7ac7d7f046c123653f157923a284a7db18cb0
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index ce6b122d..ee15d04a 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -132,6 +132,7 @@ class TestDuckDB(Validator):
parse_one("a // b", read="duckdb").assert_is(exp.IntDiv).sql(dialect="duckdb"), "a // b"
)
+ self.validate_identity("SELECT * FROM foo ASOF LEFT JOIN bar ON a = b")
self.validate_identity("PIVOT Cities ON Year USING SUM(Population)")
self.validate_identity("PIVOT Cities ON Year USING FIRST(Population)")
self.validate_identity("PIVOT Cities ON Year USING SUM(Population) GROUP BY Country")
|
Support duckdb's asof join syntax
**Is your feature request related to a problem? Please describe.**
Not related to a problem.
**Describe the solution you'd like**
I'd like this query to be parsable by sqlglot:
```
SELECT s.*, p.unit_price, s.quantity * p.unit_price AS total_cost
FROM sales s ASOF LEFT JOIN prices p
ON s.sale_time >= p.ticker_time
```
Here's the syntax diagram for `FROM`, which includes the allowed asof join syntax:

**Describe alternatives you've considered**
I haven't considered any alternatives other than I guess this feature continuing to not exist.
|
0.0
|
95f7ac7d7f046c123653f157923a284a7db18cb0
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-05 21:08:09+00:00
|
mit
| 5,946 |
|
tobymao__sqlglot-1726
|
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index 52a04a44..db223d1f 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -272,6 +272,7 @@ class Presto(Dialect):
exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+ exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
exp.DataType: _datatype_sql,
exp.DateAdd: lambda self, e: self.func(
@@ -319,6 +320,7 @@ class Presto(Dialect):
exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))",
exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
exp.TimeToUnix: rename_func("TO_UNIXTIME"),
+ exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]),
exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
exp.TsOrDsAdd: _ts_or_ds_add_sql,
exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
diff --git a/sqlglot/transforms.py b/sqlglot/transforms.py
index a1ec1bda..ba72616d 100644
--- a/sqlglot/transforms.py
+++ b/sqlglot/transforms.py
@@ -268,6 +268,17 @@ def add_recursive_cte_column_names(expression: exp.Expression) -> exp.Expression
return expression
+def epoch_cast_to_ts(expression: exp.Expression) -> exp.Expression:
+ if (
+ isinstance(expression, (exp.Cast, exp.TryCast))
+ and expression.name.lower() == "epoch"
+ and expression.to.this in exp.DataType.TEMPORAL_TYPES
+ ):
+ expression.this.replace(exp.Literal.string("1970-01-01 00:00:00"))
+
+ return expression
+
+
def preprocess(
transforms: t.List[t.Callable[[exp.Expression], exp.Expression]],
) -> t.Callable[[Generator, exp.Expression], str]:
|
tobymao/sqlglot
|
e4b164ff762fe3491ba86e8a9b5759793acf825a
|
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index 1f5953ca..5b8e0568 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -6,6 +6,10 @@ class TestPresto(Validator):
dialect = "presto"
def test_cast(self):
+ self.validate_all(
+ "SELECT TRY_CAST('1970-01-01 00:00:00' AS TIMESTAMP)",
+ read={"postgres": "SELECT 'epoch'::TIMESTAMP"},
+ )
self.validate_all(
"FROM_BASE64(x)",
read={
|
cannot correctly convert syntax "timestamp 'epoch'" from redshift to presto/trino
```
sql = """
select CAST(
DATE_TRUNC(
'day',
timestamp 'epoch' + creation_time * interval '1 second'
) AS DATE
) AS dt
"""
sqlglot.transpile(sql, read="redshift", write="trino")[0]
```
the output is:
`"SELECT TRY_CAST(DATE_TRUNC('day', CAST('epoch' AS TIMESTAMP) + creation_time * INTERVAL '1' second) AS DATE) AS dt"`
trino would throw the error: `Value cannot be cast to timestamp: epoch`
|
0.0
|
e4b164ff762fe3491ba86e8a9b5759793acf825a
|
[
"tests/dialects/test_presto.py::TestPresto::test_cast"
] |
[
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_explode_to_unnest",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_unnest",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_presto",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_json",
"tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular",
"tests/dialects/test_presto.py::TestPresto::test_match_recognize"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-06 08:18:43+00:00
|
mit
| 5,947 |
|
tobymao__sqlglot-1744
|
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
index fda191bf..afed1d1e 100644
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -3,6 +3,7 @@ from __future__ import annotations
import typing as t
from sqlglot import exp, transforms
+from sqlglot.dialects.dialect import rename_func
from sqlglot.dialects.postgres import Postgres
from sqlglot.helper import seq_get
from sqlglot.tokens import TokenType
@@ -34,6 +35,7 @@ class Redshift(Postgres):
unit=seq_get(args, 0),
),
"NVL": exp.Coalesce.from_arg_list,
+ "STRTOL": exp.FromBase.from_arg_list,
}
CONVERT_TYPE_FIRST = True
@@ -105,6 +107,7 @@ class Redshift(Postgres):
exp.JSONExtractScalar: _json_sql,
exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
+ exp.FromBase: rename_func("STRTOL"),
}
# Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index b061fd4b..3c6ba2a3 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -4392,6 +4392,10 @@ class NumberToStr(Func):
arg_types = {"this": True, "format": True}
+class FromBase(Func):
+ arg_types = {"this": True, "expression": True}
+
+
class Struct(Func):
arg_types = {"expressions": True}
is_var_len_args = True
|
tobymao/sqlglot
|
6168fbf450d47b06b730680c5f8383bd7460008e
|
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index b3dc912a..db5d72a9 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -10,6 +10,16 @@ class TestRedshift(Validator):
self.validate_identity("foo$")
self.validate_identity("$foo")
+ self.validate_all(
+ "SELECT STRTOL('abc', 16)",
+ read={
+ "trino": "SELECT FROM_BASE('abc', 16)",
+ },
+ write={
+ "redshift": "SELECT STRTOL('abc', 16)",
+ "trino": "SELECT FROM_BASE('abc', 16)",
+ },
+ )
self.validate_all(
"SELECT SNAPSHOT, type",
write={
|
Cannot convert Redshift STRTOL to Trino
```
sql = """SELECT STRTOL('abc', 16)"""
converted_sql = sqlglot.transpile(sql, read="redshift", write="trino")[0]
print(converted_sql)
SELECT STRTOL('abc', 16)
```
Trino error: `Function 'strtol' not registered`
a fix could be replace STRTOL with FROM_BASE
|
0.0
|
6168fbf450d47b06b730680c5f8383bd7460008e
|
[
"tests/dialects/test_redshift.py::TestRedshift::test_redshift"
] |
[
"tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding",
"tests/dialects/test_redshift.py::TestRedshift::test_rename_table",
"tests/dialects/test_redshift.py::TestRedshift::test_values",
"tests/dialects/test_redshift.py::TestRedshift::test_create_table_like",
"tests/dialects/test_redshift.py::TestRedshift::test_varchar_max",
"tests/dialects/test_redshift.py::TestRedshift::test_identity"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-08 00:16:37+00:00
|
mit
| 5,948 |
|
tobymao__sqlglot-1746
|
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
index afed1d1e..b0a67749 100644
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -25,13 +25,13 @@ class Redshift(Postgres):
FUNCTIONS = {
**Postgres.Parser.FUNCTIONS,
"DATEADD": lambda args: exp.DateAdd(
- this=seq_get(args, 2),
+ this=exp.TsOrDsToDate(this=seq_get(args, 2)),
expression=seq_get(args, 1),
unit=seq_get(args, 0),
),
"DATEDIFF": lambda args: exp.DateDiff(
- this=seq_get(args, 2),
- expression=seq_get(args, 1),
+ this=exp.TsOrDsToDate(this=seq_get(args, 2)),
+ expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
unit=seq_get(args, 0),
),
"NVL": exp.Coalesce.from_arg_list,
@@ -103,11 +103,12 @@ class Redshift(Postgres):
),
exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
exp.DistStyleProperty: lambda self, e: self.naked_property(e),
+ exp.FromBase: rename_func("STRTOL"),
exp.JSONExtract: _json_sql,
exp.JSONExtractScalar: _json_sql,
exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
- exp.FromBase: rename_func("STRTOL"),
+ exp.TsOrDsToDate: lambda self, e: self.sql(e.this),
}
# Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
|
tobymao/sqlglot
|
e028d984cc5631c66aac5f42c29200410caca47e
|
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index 0504576d..e3d09ef0 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -6,6 +6,14 @@ class TestPresto(Validator):
dialect = "presto"
def test_cast(self):
+ self.validate_all(
+ "SELECT DATE_DIFF('week', CAST(SUBSTR(CAST('2009-01-01' AS VARCHAR), 1, 10) AS DATE), CAST(SUBSTR(CAST('2009-12-31' AS VARCHAR), 1, 10) AS DATE))",
+ read={"redshift": "SELECT DATEDIFF(week, '2009-01-01', '2009-12-31')"},
+ )
+ self.validate_all(
+ "SELECT DATE_ADD('month', 18, CAST(SUBSTR(CAST('2008-02-28' AS VARCHAR), 1, 10) AS DATE))",
+ read={"redshift": "SELECT DATEADD(month, 18, '2008-02-28')"},
+ )
self.validate_all(
"SELECT TRY_CAST('1970-01-01 00:00:00' AS TIMESTAMP)",
read={"postgres": "SELECT 'epoch'::TIMESTAMP"},
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index db5d72a9..f4efe24f 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -180,7 +180,7 @@ class TestRedshift(Validator):
"DATEDIFF('day', a, b)",
write={
"redshift": "DATEDIFF(day, a, b)",
- "presto": "DATE_DIFF('day', a, b)",
+ "presto": "DATE_DIFF('day', CAST(SUBSTR(CAST(a AS VARCHAR), 1, 10) AS DATE), CAST(SUBSTR(CAST(b AS VARCHAR), 1, 10) AS DATE))",
},
)
self.validate_all(
|
Type error when converting datediff from redshift to trino
```
sql = "select datediff(week,'2009-01-01','2009-12-31')"
converted_sql = sqlglot.transpile(sql, read="redshift", write="trino")[0]
print(converted_sql)
SELECT DATE_DIFF('week', '2009-01-01', '2009-12-31')
```
Trino error: `Unexpected parameters (varchar(4), varchar(10), varchar(10)) for function date_diff. Expected: date_diff(varchar(x), date, date), date_diff(varchar(x), timestamp(p), timestamp(p)), date_diff(varchar(x), timestamp(p) with time zone, timestamp(p) with time zone), date_diff(varchar(x), time(p), time(p)), date_diff(varchar(x), time(p) with time zone, time(p) with time zone)'
`
Changing the SQL to `SELECT DATE_DIFF('week', DATE'2009-01-01', DATE'2009-12-31')` works in Trino
https://trino.io/docs/current/functions/datetime.html
|
0.0
|
e028d984cc5631c66aac5f42c29200410caca47e
|
[
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_redshift.py::TestRedshift::test_redshift"
] |
[
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular",
"tests/dialects/test_presto.py::TestPresto::test_json",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_presto.py::TestPresto::test_explode_to_unnest",
"tests/dialects/test_presto.py::TestPresto::test_unnest",
"tests/dialects/test_presto.py::TestPresto::test_match_recognize",
"tests/dialects/test_presto.py::TestPresto::test_presto",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_redshift.py::TestRedshift::test_values",
"tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding",
"tests/dialects/test_redshift.py::TestRedshift::test_rename_table",
"tests/dialects/test_redshift.py::TestRedshift::test_create_table_like",
"tests/dialects/test_redshift.py::TestRedshift::test_varchar_max",
"tests/dialects/test_redshift.py::TestRedshift::test_identity"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-08 00:44:24+00:00
|
mit
| 5,949 |
|
tobymao__sqlglot-1765
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 4e851e22..148b6d82 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -269,6 +269,7 @@ class Snowflake(Dialect):
QUOTES = ["'", "$$"]
STRING_ESCAPES = ["\\", "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
+ COMMENTS = ["--", "//", ("/*", "*/")]
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index c89592ad..3e847121 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -392,10 +392,13 @@ class _Tokenizer(type):
klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
- klass._COMMENTS = dict(
- (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
- for comment in klass.COMMENTS
- )
+ klass._COMMENTS = {
+ **dict(
+ (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
+ for comment in klass.COMMENTS
+ ),
+ "{#": "#}", # Ensure Jinja comments are tokenized correctly in all dialects
+ }
klass._KEYWORD_TRIE = new_trie(
key.upper()
@@ -735,7 +738,7 @@ class Tokenizer(metaclass=_Tokenizer):
NUMERIC_LITERALS: t.Dict[str, str] = {}
ENCODE: t.Optional[str] = None
- COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")]
+ COMMENTS = ["--", ("/*", "*/")]
__slots__ = (
"sql",
|
tobymao/sqlglot
|
0264b4383e2f45a51d7c758758e918c8cf9dd4ed
|
diff --git a/tests/test_tokens.py b/tests/test_tokens.py
index 30af34fe..d5a2b7f5 100644
--- a/tests/test_tokens.py
+++ b/tests/test_tokens.py
@@ -1,5 +1,6 @@
import unittest
+from sqlglot.dialects import BigQuery
from sqlglot.tokens import Tokenizer, TokenType
@@ -68,7 +69,8 @@ x"""
Tokenizer().tokenize("select /*")
def test_jinja(self):
- tokenizer = Tokenizer()
+ # Check that {#, #} are treated as token delimiters, even though BigQuery overrides COMMENTS
+ tokenizer = BigQuery.Tokenizer()
tokens = tokenizer.tokenize(
"""
diff --git a/tests/test_transpile.py b/tests/test_transpile.py
index 1085b092..8d762d33 100644
--- a/tests/test_transpile.py
+++ b/tests/test_transpile.py
@@ -280,6 +280,11 @@ FROM v""",
"select * from t where ((condition = 1)/*test*/)",
"SELECT * FROM t WHERE ((condition = 1) /* test */)",
)
+ self.validate(
+ "SELECT 1 // hi this is a comment",
+ "SELECT 1 /* hi this is a comment */",
+ read="snowflake",
+ )
def test_types(self):
self.validate("INT 1", "CAST(1 AS INT)")
|
Comments starting with `//` throws a ParseError in Snowflake ❄️
_Note: it's absolutely not important since we can just replace them by `--` - just thought I'd write an issue either way_
The following query works in Snowflake:
```sql
SELECT 1 // hi this is a comment
```
But throws a ParseError when parsed:
```py
parse_one("SELECT 1 // hi this is a comment", read=dialects.Snowflake)
```
<details><summary>Here is the traceback</summary>
<p>
```py
Traceback (most recent call last):
File "/Users/florian.ernst/Library/Application Support/JetBrains/Toolbox/apps/PyCharm-P/ch-0/231.9011.38/PyCharm.app/Contents/plugins/python/helpers/pydev/pydevconsole.py", line 364, in runcode
coro = func()
File "<input>", line 1, in <module>
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/__init__.py", line 158, in parse_one
result = dialect.parse(sql, **opts)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/dialects/dialect.py", line 217, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 856, in parse
return self._parse(
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 922, in _parse
expressions.append(parse_method(self))
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 1112, in _parse_statement
expression = self._parse_set_operations(expression) if expression else self._parse_select()
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 1877, in _parse_select
expressions = self._parse_csv(self._parse_expression)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 4135, in _parse_csv
parse_result = parse_method()
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 2722, in _parse_expression
return self._parse_alias(self._parse_conjunction())
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 2725, in _parse_conjunction
return self._parse_tokens(self._parse_equality, self.CONJUNCTION)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 4149, in _parse_tokens
this = parse_method()
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 2728, in _parse_equality
return self._parse_tokens(self._parse_comparison, self.EQUALITY)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 4149, in _parse_tokens
this = parse_method()
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 2731, in _parse_comparison
return self._parse_tokens(self._parse_range, self.COMPARISON)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 4149, in _parse_tokens
this = parse_method()
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 2734, in _parse_range
this = self._parse_bitwise()
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 2831, in _parse_bitwise
this = self._parse_term()
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 2852, in _parse_term
return self._parse_tokens(self._parse_factor, self.TERM)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 4149, in _parse_tokens
this = parse_method()
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 2855, in _parse_factor
return self._parse_tokens(self._parse_unary, self.FACTOR)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 4152, in _parse_tokens
this = self.expression(
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 986, in expression
return self.validate_expression(instance)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 1006, in validate_expression
self.raise_error(error_message)
File "/Users/florian.ernst/Alan/alan-jobs/env/lib/python3.10/site-packages/sqlglot/parser.py", line 966, in raise_error
raise error
sqlglot.errors.ParseError: Required keyword: 'expression' missing for <class 'sqlglot.expressions.Div'>. Line 1, Col: 11.
SELECT 1 // hi this is a comment
```
</p>
</details>
|
0.0
|
0264b4383e2f45a51d7c758758e918c8cf9dd4ed
|
[
"tests/test_tokens.py::TestTokens::test_jinja",
"tests/test_transpile.py::TestTranspile::test_comments"
] |
[
"tests/test_tokens.py::TestTokens::test_command",
"tests/test_tokens.py::TestTokens::test_comment_attachment",
"tests/test_tokens.py::TestTokens::test_error_msg",
"tests/test_tokens.py::TestTokens::test_token_line_col",
"tests/test_transpile.py::TestTranspile::test_alias",
"tests/test_transpile.py::TestTranspile::test_alter",
"tests/test_transpile.py::TestTranspile::test_asc",
"tests/test_transpile.py::TestTranspile::test_error_level",
"tests/test_transpile.py::TestTranspile::test_extract",
"tests/test_transpile.py::TestTranspile::test_identify_lambda",
"tests/test_transpile.py::TestTranspile::test_identity",
"tests/test_transpile.py::TestTranspile::test_if",
"tests/test_transpile.py::TestTranspile::test_index_offset",
"tests/test_transpile.py::TestTranspile::test_leading_comma",
"tests/test_transpile.py::TestTranspile::test_normalize_name",
"tests/test_transpile.py::TestTranspile::test_not_range",
"tests/test_transpile.py::TestTranspile::test_paren",
"tests/test_transpile.py::TestTranspile::test_partial",
"tests/test_transpile.py::TestTranspile::test_pretty",
"tests/test_transpile.py::TestTranspile::test_pretty_line_breaks",
"tests/test_transpile.py::TestTranspile::test_some",
"tests/test_transpile.py::TestTranspile::test_space",
"tests/test_transpile.py::TestTranspile::test_time",
"tests/test_transpile.py::TestTranspile::test_types",
"tests/test_transpile.py::TestTranspile::test_unary",
"tests/test_transpile.py::TestTranspile::test_unsupported_level",
"tests/test_transpile.py::TestTranspile::test_with"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-13 13:32:32+00:00
|
mit
| 5,950 |
|
tobymao__sqlglot-1782
|
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index f9b1cf33..8c2a4ab8 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -183,6 +183,7 @@ def _to_timestamp(args: t.List) -> exp.Expression:
class Postgres(Dialect):
+ INDEX_OFFSET = 1
NULL_ORDERING = "nulls_are_large"
TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
TIME_MAPPING = {
|
tobymao/sqlglot
|
3b95a465311e969eb8348d57dc9a9acc623a92ab
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 10262471..48a89c3a 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -107,6 +107,16 @@ class TestPostgres(Validator):
},
)
+ def test_array_offset(self):
+ self.validate_all(
+ "SELECT col[1]",
+ write={
+ "hive": "SELECT col[0]",
+ "postgres": "SELECT col[1]",
+ "presto": "SELECT col[1]",
+ },
+ )
+
def test_postgres(self):
self.validate_identity("CAST(x AS INT4RANGE)")
self.validate_identity("CAST(x AS INT4MULTIRANGE)")
|
Postgres array indexes are incorrectly incremented when transpiling to Trino
Both Postgres and Trino are 1-indexed, but SQLGlot adds a 1.
```
>>> sqlglot.__version__
'16.1.3'
>>> sqlglot.transpile("select col[1]", read='postgres', write='trino')[0]
Applying array index offset (1)
'SELECT col[2]'
```
|
0.0
|
3b95a465311e969eb8348d57dc9a9acc623a92ab
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-15 11:11:42+00:00
|
mit
| 5,951 |
|
tobymao__sqlglot-1841
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index a902f503..a2dbfd9a 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -267,6 +267,21 @@ class Snowflake(Dialect):
),
}
+ def _parse_id_var(
+ self,
+ any_token: bool = True,
+ tokens: t.Optional[t.Collection[TokenType]] = None,
+ ) -> t.Optional[exp.Expression]:
+ if self._match_text_seq("IDENTIFIER", "("):
+ identifier = (
+ super()._parse_id_var(any_token=any_token, tokens=tokens)
+ or self._parse_string()
+ )
+ self._match_r_paren()
+ return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
+
+ return super()._parse_id_var(any_token=any_token, tokens=tokens)
+
class Tokenizer(tokens.Tokenizer):
QUOTES = ["'", "$$"]
STRING_ESCAPES = ["\\", "'"]
|
tobymao/sqlglot
|
19295cc5d0080883af183771512f8e8a4050eecd
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 0c735925..4d2c392d 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -6,6 +6,8 @@ class TestSnowflake(Validator):
dialect = "snowflake"
def test_snowflake(self):
+ self.validate_identity("WITH x AS (SELECT 1 AS foo) SELECT foo FROM IDENTIFIER('x')")
+ self.validate_identity("WITH x AS (SELECT 1 AS foo) SELECT IDENTIFIER('foo') FROM x")
self.validate_identity("INITCAP('iqamqinterestedqinqthisqtopic', 'q')")
self.validate_identity("CAST(x AS GEOMETRY)")
self.validate_identity("OBJECT_CONSTRUCT(*)")
@@ -585,6 +587,8 @@ class TestSnowflake(Validator):
self.validate_identity("CREATE DATABASE mytestdb_clone CLONE mytestdb")
self.validate_identity("CREATE SCHEMA mytestschema_clone CLONE testschema")
self.validate_identity("CREATE TABLE orders_clone CLONE orders")
+ self.validate_identity("CREATE TABLE IDENTIFIER('foo') (COLUMN1 VARCHAR, COLUMN2 VARCHAR)")
+ self.validate_identity("CREATE TABLE IDENTIFIER($foo) (col1 VARCHAR, col2 VARCHAR)")
self.validate_identity(
"CREATE TABLE orders_clone_restore CLONE orders AT (TIMESTAMP => TO_TIMESTAMP_TZ('04/05/2013 01:02:03', 'mm/dd/yyyy hh24:mi:ss'))"
)
|
Parsing Snowflake Table Name Identifiers Throws Errors
**Issue**
- Dialect: snowflake
When parsing create or drop table statements which build a table name literal using SQL variables, the parser throws a parsing error. This should be valid snowflake code.
```
sqlglot.errors.ParseError: Expecting ). Line 2, Col: 45.
SET TABLE_NAME = 'table';
CREATE TABLE IDENTIFIER($TABLE_NAME) (COLUMN1 VARCHAR, COLUMN2 VARCHAR);
DROP TABLE IDENTIFIER($TABLE_NAME);
```
It seems to believe the `IDENTIFIER()` function is actually the table name and the following function parameters as the column declarations.
**Fully reproducible code snippet**
```
SET TABLE_NAME = 'table';
CREATE TABLE IDENTIFIER($TABLE_NAME) (COLUMN1 VARCHAR, COLUMN2 VARCHAR);
DROP TABLE IDENTIFIER($TABLE_NAME);
```
**Official Documentation**
https://docs.snowflake.com/en/sql-reference/identifier-literal
|
0.0
|
19295cc5d0080883af183771512f8e8a4050eecd
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-27 16:48:13+00:00
|
mit
| 5,952 |
|
tobymao__sqlglot-1863
|
diff --git a/sqlglot/executor/context.py b/sqlglot/executor/context.py
index 630cb657..d7952c15 100644
--- a/sqlglot/executor/context.py
+++ b/sqlglot/executor/context.py
@@ -41,11 +41,13 @@ class Context:
def table(self) -> Table:
if self._table is None:
self._table = list(self.tables.values())[0]
+
for other in self.tables.values():
if self._table.columns != other.columns:
raise Exception(f"Columns are different.")
if len(self._table.rows) != len(other.rows):
raise Exception(f"Rows are different.")
+
return self._table
def add_columns(self, *columns: str) -> None:
diff --git a/sqlglot/planner.py b/sqlglot/planner.py
index f2467021..232c3b94 100644
--- a/sqlglot/planner.py
+++ b/sqlglot/planner.py
@@ -23,9 +23,11 @@ class Plan:
while nodes:
node = nodes.pop()
dag[node] = set()
+
for dep in node.dependencies:
dag[node].add(dep)
nodes.add(dep)
+
self._dag = dag
return self._dag
@@ -128,13 +130,16 @@ class Step:
agg_funcs = tuple(expression.find_all(exp.AggFunc))
if agg_funcs:
aggregations.add(expression)
+
for agg in agg_funcs:
for operand in agg.unnest_operands():
if isinstance(operand, exp.Column):
continue
if operand not in operands:
operands[operand] = next_operand_name()
+
operand.replace(exp.column(operands[operand], quoted=True))
+
return bool(agg_funcs)
for e in expression.expressions:
@@ -178,13 +183,14 @@ class Step:
for k, v in aggregate.group.items():
intermediate[v] = k
if isinstance(v, exp.Column):
- intermediate[v.alias_or_name] = k
+ intermediate[v.name] = k
for projection in projections:
for node, *_ in projection.walk():
name = intermediate.get(node)
if name:
node.replace(exp.column(name, step.name))
+
if aggregate.condition:
for node, *_ in aggregate.condition.walk():
name = intermediate.get(node) or intermediate.get(node.name)
@@ -197,6 +203,15 @@ class Step:
order = expression.args.get("order")
if order:
+ if isinstance(step, Aggregate):
+ for ordered in order.expressions:
+ if ordered.find(exp.AggFunc):
+ operand_name = next_operand_name()
+ extract_agg_operands(exp.alias_(ordered.this, operand_name, quoted=True))
+ ordered.this.replace(exp.column(operand_name, quoted=True))
+
+ step.aggregations = list(aggregations)
+
sort = Sort()
sort.name = step.name
sort.key = order.expressions
|
tobymao/sqlglot
|
38001582953423b6ea34b4694cf5715446e131f2
|
diff --git a/tests/test_executor.py b/tests/test_executor.py
index f257bd14..13376ce5 100644
--- a/tests/test_executor.py
+++ b/tests/test_executor.py
@@ -700,6 +700,11 @@ class TestExecutor(unittest.TestCase):
[(2, 25.0)],
("_col_0", "_col_1"),
),
+ (
+ "SELECT a FROM x GROUP BY a ORDER BY AVG(b)",
+ [(2,), (1,), (3,)],
+ ("a",),
+ ),
):
with self.subTest(sql):
result = execute(sql, tables=tables)
|
Executor cannot find the aggr column in order-by clause if aggr column is not in the select clause.
sqlglot==16.6.0
Related to #1822
```python
('SELECT a, AVG(b) FROM x GROUP BY a ORDER BY AVG(b)', ["a", "_col_1"], [("b", 22.5), ("a", 25), ("c", 28)]), # PASS
('SELECT a FROM x GROUP BY a ORDER BY AVG(b)', ["a"], [("b",), ("a",), ("c",)]), # FAILED
```
if avg not in select clause , i have the same issue.
Here is the error log.
```
Traceback (most recent call last):
File "/Users/.../python3.11/site-packages/sqlglot/executor/python.py", line 46, in execute
contexts[node] = self.sort(node, context)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/.../python3.11/site-packages/sqlglot/executor/python.py", line 323, in sort
sort_ctx.sort(self.generate_tuple(step.key))
File "/Users/.../python3.11/site-packages/sqlglot/executor/context.py", line 81, in sort
self.table.rows.sort(key=sort_key)
File "/Users/.../python3.11/site-packages/sqlglot/executor/context.py", line 79, in sort_key
return self.eval_tuple(key)
^^^^^^^^^^^^^^^^^^^^
File "/Users/.../python3.11/site-packages/sqlglot/executor/context.py", line 38, in eval_tuple
return tuple(self.eval(code) for code in codes)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/.../python3.11/site-packages/sqlglot/executor/context.py", line 38, in <genexpr>
return tuple(self.eval(code) for code in codes)
^^^^^^^^^^^^^^^
File "/Users/.../python3.11/site-packages/sqlglot/executor/context.py", line 35, in eval
return eval(code, self.env)
^^^^^^^^^^^^^^^^^^^^
File "ORDERED(AVG(scope["x"]["b"]), False, True)", line 1, in <module>
File "/Users/.../python3.11/site-packages/sqlglot/executor/table.py", line 104, in __getitem__
return self.row[self.columns[column]]
~~~~~~~~~~~~^^^^^^^^
KeyError: 'b'
```
|
0.0
|
38001582953423b6ea34b4694cf5715446e131f2
|
[
"tests/test_executor.py::TestExecutor::test_group_by"
] |
[
"tests/test_executor.py::TestExecutor::test_aggregate_without_group_by",
"tests/test_executor.py::TestExecutor::test_case_sensitivity",
"tests/test_executor.py::TestExecutor::test_correlated_count",
"tests/test_executor.py::TestExecutor::test_execute_callable",
"tests/test_executor.py::TestExecutor::test_execute_catalog_db_table",
"tests/test_executor.py::TestExecutor::test_execute_subqueries",
"tests/test_executor.py::TestExecutor::test_execute_tables",
"tests/test_executor.py::TestExecutor::test_nested_table_reference",
"tests/test_executor.py::TestExecutor::test_py_dialect",
"tests/test_executor.py::TestExecutor::test_scalar_functions",
"tests/test_executor.py::TestExecutor::test_set_operations",
"tests/test_executor.py::TestExecutor::test_static_queries",
"tests/test_executor.py::TestExecutor::test_table_depth_mismatch",
"tests/test_executor.py::TestExecutor::test_tables"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-30 05:05:59+00:00
|
mit
| 5,953 |
|
tobymao__sqlglot-1873
|
diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index a93c98ee..eac04c81 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -163,6 +163,12 @@ def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
return expression
+def _parse_timestamp(args: t.List) -> exp.StrToTime:
+ this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
+ this.set("zone", seq_get(args, 2))
+ return this
+
+
class BigQuery(Dialect):
UNNEST_COLUMN_ONLY = True
@@ -266,9 +272,7 @@ class BigQuery(Dialect):
"PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
[seq_get(args, 1), seq_get(args, 0)]
),
- "PARSE_TIMESTAMP": lambda args: format_time_lambda(exp.StrToTime, "bigquery")(
- [seq_get(args, 1), seq_get(args, 0)]
- ),
+ "PARSE_TIMESTAMP": _parse_timestamp,
"REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
"REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
this=seq_get(args, 0),
@@ -400,7 +404,9 @@ class BigQuery(Dialect):
]
),
exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
- exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})",
+ exp.StrToTime: lambda self, e: self.func(
+ "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
+ ),
exp.TimeAdd: _date_add_sql("TIME", "ADD"),
exp.TimeSub: _date_add_sql("TIME", "SUB"),
exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
@@ -551,10 +557,15 @@ class BigQuery(Dialect):
}
def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
- if not isinstance(expression.parent, exp.Cast):
+ parent = expression.parent
+
+ # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
+ # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
+ if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
return self.func(
"TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
)
+
return super().attimezone_sql(expression)
def trycast_sql(self, expression: exp.TryCast) -> str:
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
index 5f743eef..07e744df 100644
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -123,14 +123,15 @@ class MySQL(Dialect):
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"CHARSET": TokenType.CHARACTER_SET,
+ "ENUM": TokenType.ENUM,
"FORCE": TokenType.FORCE,
"IGNORE": TokenType.IGNORE,
"LONGBLOB": TokenType.LONGBLOB,
"LONGTEXT": TokenType.LONGTEXT,
"MEDIUMBLOB": TokenType.MEDIUMBLOB,
"MEDIUMTEXT": TokenType.MEDIUMTEXT,
+ "MEMBER OF": TokenType.MEMBER_OF,
"SEPARATOR": TokenType.SEPARATOR,
- "ENUM": TokenType.ENUM,
"START": TokenType.BEGIN,
"SIGNED": TokenType.BIGINT,
"SIGNED INTEGER": TokenType.BIGINT,
@@ -185,11 +186,24 @@ class MySQL(Dialect):
COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
class Parser(parser.Parser):
- FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA, TokenType.DATABASE}
+ FUNC_TOKENS = {
+ *parser.Parser.FUNC_TOKENS,
+ TokenType.DATABASE,
+ TokenType.SCHEMA,
+ TokenType.VALUES,
+ }
+
TABLE_ALIAS_TOKENS = (
parser.Parser.TABLE_ALIAS_TOKENS - parser.Parser.TABLE_INDEX_HINT_TOKENS
)
+ RANGE_PARSERS = {
+ **parser.Parser.RANGE_PARSERS,
+ TokenType.MEMBER_OF: lambda self, this: self.expression(
+ exp.JSONArrayContains, this=this, expression=self._parse_wrapped(self._parse_string)
+ ),
+ }
+
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
@@ -207,6 +221,10 @@ class MySQL(Dialect):
this=self._parse_lambda(),
separator=self._match(TokenType.SEPARATOR) and self._parse_field(),
),
+ # https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values
+ "VALUES": lambda self: self.expression(
+ exp.Anonymous, this="VALUES", expressions=[self._parse_id_var()]
+ ),
}
STATEMENT_PARSERS = {
@@ -399,6 +417,7 @@ class MySQL(Dialect):
NULL_ORDERING_SUPPORTED = False
JOIN_HINTS = False
TABLE_HINTS = True
+ DUPLICATE_KEY_UPDATE_WITH_SET = False
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index de88d72d..2941c9c4 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1525,6 +1525,7 @@ class Insert(Expression):
"partition": False,
"alternative": False,
"where": False,
+ "ignore": False,
}
def with_(
@@ -3623,6 +3624,11 @@ class Is(Binary, Predicate):
pass
+# https://dev.mysql.com/doc/refman/8.0/en/json-search-functions.html#operator_member-of
+class JSONArrayContains(Binary, Predicate):
+ pass
+
+
class Kwarg(Binary):
"""Kwarg in special functions like func(kwarg => y)."""
@@ -4476,7 +4482,7 @@ class StrToDate(Func):
class StrToTime(Func):
- arg_types = {"this": True, "format": True}
+ arg_types = {"this": True, "format": True, "zone": False}
# Spark allows unix_timestamp()
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index d7794f70..b2c10e02 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -143,6 +143,9 @@ class Generator:
# Whether or not comparing against booleans (e.g. x IS TRUE) is supported
IS_BOOL_ALLOWED = True
+ # Whether or not to include the "SET" keyword in the "INSERT ... ON DUPLICATE KEY UPDATE" statement
+ DUPLICATE_KEY_UPDATE_WITH_SET = True
+
# https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax
SELECT_KINDS: t.Tuple[str, ...] = ("STRUCT", "VALUE")
@@ -1105,6 +1108,8 @@ class Generator:
alternative = expression.args.get("alternative")
alternative = f" OR {alternative}" if alternative else ""
+ ignore = " IGNORE" if expression.args.get("ignore") else ""
+
this = f"{this} {self.sql(expression, 'this')}"
exists = " IF EXISTS" if expression.args.get("exists") else ""
@@ -1116,7 +1121,7 @@ class Generator:
expression_sql = f"{self.sep()}{self.sql(expression, 'expression')}"
conflict = self.sql(expression, "conflict")
returning = self.sql(expression, "returning")
- sql = f"INSERT{alternative}{this}{exists}{partition_sql}{where}{expression_sql}{conflict}{returning}"
+ sql = f"INSERT{alternative}{ignore}{this}{exists}{partition_sql}{where}{expression_sql}{conflict}{returning}"
return self.prepend_ctes(expression, sql)
def intersect_sql(self, expression: exp.Intersect) -> str:
@@ -1143,8 +1148,9 @@ class Generator:
do = "" if expression.args.get("duplicate") else " DO "
nothing = "NOTHING" if expression.args.get("nothing") else ""
expressions = self.expressions(expression, flat=True)
+ set_keyword = "SET " if self.DUPLICATE_KEY_UPDATE_WITH_SET else ""
if expressions:
- expressions = f"UPDATE SET {expressions}"
+ expressions = f"UPDATE {set_keyword}{expressions}"
return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}"
def returning_sql(self, expression: exp.Returning) -> str:
@@ -2434,6 +2440,9 @@ class Generator:
return self.func("ANY_VALUE", this)
+ def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str:
+ return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})"
+
def cached_generator(
cache: t.Optional[t.Dict[int, str]] = None
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index b7704de1..700d9aa1 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1679,6 +1679,7 @@ class Parser(metaclass=_Parser):
def _parse_insert(self) -> exp.Insert:
overwrite = self._match(TokenType.OVERWRITE)
+ ignore = self._match(TokenType.IGNORE)
local = self._match_text_seq("LOCAL")
alternative = None
@@ -1709,6 +1710,7 @@ class Parser(metaclass=_Parser):
returning=self._parse_returning(),
overwrite=overwrite,
alternative=alternative,
+ ignore=ignore,
)
def _parse_on_conflict(self) -> t.Optional[exp.OnConflict]:
@@ -1734,7 +1736,8 @@ class Parser(metaclass=_Parser):
nothing = True
else:
self._match(TokenType.UPDATE)
- expressions = self._match(TokenType.SET) and self._parse_csv(self._parse_equality)
+ self._match(TokenType.SET)
+ expressions = self._parse_csv(self._parse_equality)
return self.expression(
exp.OnConflict,
@@ -1917,7 +1920,7 @@ class Parser(metaclass=_Parser):
self.raise_error("Cannot specify both ALL and DISTINCT after SELECT")
limit = self._parse_limit(top=True)
- expressions = self._parse_csv(self._parse_expression)
+ expressions = self._parse_expressions()
this = self.expression(
exp.Select,
@@ -2091,9 +2094,7 @@ class Parser(metaclass=_Parser):
partition = self._parse_partition_by()
order = self._parse_order()
- measures = (
- self._parse_csv(self._parse_expression) if self._match_text_seq("MEASURES") else None
- )
+ measures = self._parse_expressions() if self._match_text_seq("MEASURES") else None
if self._match_text_seq("ONE", "ROW", "PER", "MATCH"):
rows = exp.var("ONE ROW PER MATCH")
@@ -3174,7 +3175,7 @@ class Parser(metaclass=_Parser):
if query:
expressions = [query]
else:
- expressions = self._parse_csv(self._parse_expression)
+ expressions = self._parse_expressions()
this = self._parse_query_modifiers(seq_get(expressions, 0))
@@ -3709,21 +3710,27 @@ class Parser(metaclass=_Parser):
if self._match(TokenType.CHARACTER_SET):
to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
elif self._match(TokenType.FORMAT):
- fmt = self._parse_at_time_zone(self._parse_string())
+ fmt_string = self._parse_string()
+ fmt = self._parse_at_time_zone(fmt_string)
if to.this in exp.DataType.TEMPORAL_TYPES:
- return self.expression(
+ this = self.expression(
exp.StrToDate if to.this == exp.DataType.Type.DATE else exp.StrToTime,
this=this,
format=exp.Literal.string(
format_time(
- fmt.this if fmt else "",
+ fmt_string.this if fmt_string else "",
self.FORMAT_MAPPING or self.TIME_MAPPING,
self.FORMAT_TRIE or self.TIME_TRIE,
)
),
)
+ if isinstance(fmt, exp.AtTimeZone) and isinstance(this, exp.StrToTime):
+ this.set("zone", fmt.args["zone"])
+
+ return this
+
return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to, format=fmt)
def _parse_concat(self) -> t.Optional[exp.Expression]:
@@ -4226,7 +4233,7 @@ class Parser(metaclass=_Parser):
return None
if self._match(TokenType.L_PAREN, advance=False):
return self._parse_wrapped_csv(self._parse_expression)
- return self._parse_csv(self._parse_expression)
+ return self._parse_expressions()
def _parse_csv(
self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA
@@ -4276,6 +4283,9 @@ class Parser(metaclass=_Parser):
self._match_r_paren()
return parse_result
+ def _parse_expressions(self) -> t.List[t.Optional[exp.Expression]]:
+ return self._parse_csv(self._parse_expression)
+
def _parse_select_or_expression(self, alias: bool = False) -> t.Optional[exp.Expression]:
return self._parse_select() or self._parse_set_operations(
self._parse_expression() if alias else self._parse_conjunction()
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 79f7a659..8657b73b 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -239,6 +239,7 @@ class TokenType(AutoName):
LOCK = auto()
MAP = auto()
MATCH_RECOGNIZE = auto()
+ MEMBER_OF = auto()
MERGE = auto()
MOD = auto()
NATURAL = auto()
|
tobymao/sqlglot
|
59da40e043957b81ad55f4197a33aeab431115a5
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 1b7414bb..1bed6824 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -29,6 +29,7 @@ class TestBigQuery(Validator):
with self.assertRaises(ParseError):
transpile("SELECT * FROM UNNEST(x) AS x(y)", read="bigquery")
+ self.validate_identity("SELECT PARSE_TIMESTAMP('%c', 'Thu Dec 25 07:30:00 2008', 'UTC')")
self.validate_identity("SELECT ANY_VALUE(fruit HAVING MAX sold) FROM fruits")
self.validate_identity("SELECT ANY_VALUE(fruit HAVING MIN sold) FROM fruits")
self.validate_identity("SELECT `project-id`.udfs.func(call.dir)")
@@ -105,6 +106,14 @@ class TestBigQuery(Validator):
self.validate_all("CAST(x AS NVARCHAR)", write={"bigquery": "CAST(x AS STRING)"})
self.validate_all("CAST(x AS TIMESTAMPTZ)", write={"bigquery": "CAST(x AS TIMESTAMP)"})
self.validate_all("CAST(x AS RECORD)", write={"bigquery": "CAST(x AS STRUCT)"})
+ self.validate_all(
+ "SELECT CAST('20201225' AS TIMESTAMP FORMAT 'YYYYMMDD' AT TIME ZONE 'America/New_York')",
+ write={"bigquery": "SELECT PARSE_TIMESTAMP('%Y%m%d', '20201225', 'America/New_York')"},
+ )
+ self.validate_all(
+ "SELECT CAST('20201225' AS TIMESTAMP FORMAT 'YYYYMMDD')",
+ write={"bigquery": "SELECT PARSE_TIMESTAMP('%Y%m%d', '20201225')"},
+ )
self.validate_all(
"SELECT CAST(TIMESTAMP '2008-12-25 00:00:00+00:00' AS STRING FORMAT 'YYYY-MM-DD HH24:MI:SS TZH:TZM') AS date_time_to_string",
write={
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index ca2f9210..6cc50e05 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -10,7 +10,19 @@ class TestMySQL(Validator):
self.validate_identity("UPDATE items SET items.price = 0 WHERE items.id >= 5 LIMIT 10")
self.validate_identity("DELETE FROM t WHERE a <= 10 LIMIT 10")
self.validate_identity(
- "INSERT INTO x VALUES (1, 'a', 2.0) ON DUPLICATE KEY UPDATE SET x.id = 1"
+ "INSERT IGNORE INTO subscribers (email) VALUES ('[email protected]'), ('[email protected]')"
+ )
+ self.validate_identity(
+ "INSERT INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6) ON DUPLICATE KEY UPDATE c = VALUES(a) + VALUES(b)"
+ )
+ self.validate_identity(
+ "INSERT INTO t1 (a, b) SELECT c, d FROM t2 UNION SELECT e, f FROM t3 ON DUPLICATE KEY UPDATE b = b + c"
+ )
+ self.validate_identity(
+ "INSERT INTO t1 (a, b, c) VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = c + 1"
+ )
+ self.validate_identity(
+ "INSERT INTO x VALUES (1, 'a', 2.0) ON DUPLICATE KEY UPDATE x.id = 1"
)
self.validate_all(
@@ -48,6 +60,11 @@ class TestMySQL(Validator):
)
def test_identity(self):
+ self.validate_identity("SELECT @a MEMBER OF(@c), @b MEMBER OF(@c)")
+ self.validate_identity("SELECT JSON_ARRAY(4, 5) MEMBER OF('[[3,4],[4,5]]')")
+ self.validate_identity("SELECT CAST('[4,5]' AS JSON) MEMBER OF('[[3,4],[4,5]]')")
+ self.validate_identity("""SELECT 'ab' MEMBER OF('[23, "abc", 17, "ab", 10]')""")
+ self.validate_identity("""SELECT 17 MEMBER OF('[23, "abc", 17, "ab", 10]')""")
self.validate_identity("CAST(x AS ENUM('a', 'b'))")
self.validate_identity("CAST(x AS SET('a', 'b'))")
self.validate_identity("SELECT CURRENT_TIMESTAMP(6)")
|
BiqQuery TIMESTAMP FORMAT causes parser error
The following BQ statement runs successfully in GCP BQ but produces error when trying to parser (setting `read=bigquery`)
BigQuery statement to test:
``SELECT CAST(date AS TIMESTAMP FORMAT 'YYYYMMDD' AT TIME ZONE 'America/New York') as date_col
FROM `bigquery-public-data.google_analytics_sample.ga_sessions_*` ``
Error:
`Expecting ). Line 1, Col: 30.`
I'm not sure why sqlglot isn't ok with this `FORMAT 'YYYYMMDD' AT TIME ZONE 'America/New York'`?
|
0.0
|
59da40e043957b81ad55f4197a33aeab431115a5
|
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_mysql.py::TestMySQL::test_identity"
] |
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_types"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-30 19:59:49+00:00
|
mit
| 5,954 |
|
tobymao__sqlglot-1889
|
diff --git a/sqlglot/planner.py b/sqlglot/planner.py
index 232c3b94..07ee7399 100644
--- a/sqlglot/planner.py
+++ b/sqlglot/planner.py
@@ -142,6 +142,10 @@ class Step:
return bool(agg_funcs)
+ def set_ops_and_aggs(step):
+ step.operands = tuple(alias(operand, alias_) for operand, alias_ in operands.items())
+ step.aggregations = list(aggregations)
+
for e in expression.expressions:
if e.find(exp.AggFunc):
projections.append(exp.column(e.alias_or_name, step.name, quoted=True))
@@ -169,10 +173,7 @@ class Step:
else:
aggregate.condition = having.this
- aggregate.operands = tuple(
- alias(operand, alias_) for operand, alias_ in operands.items()
- )
- aggregate.aggregations = list(aggregations)
+ set_ops_and_aggs(aggregate)
# give aggregates names and replace projections with references to them
aggregate.group = {
@@ -204,13 +205,11 @@ class Step:
if order:
if isinstance(step, Aggregate):
- for ordered in order.expressions:
- if ordered.find(exp.AggFunc):
- operand_name = next_operand_name()
- extract_agg_operands(exp.alias_(ordered.this, operand_name, quoted=True))
- ordered.this.replace(exp.column(operand_name, quoted=True))
+ for i, ordered in enumerate(order.expressions):
+ if extract_agg_operands(exp.alias_(ordered.this, f"_o_{i}", quoted=True)):
+ ordered.this.replace(exp.column(f"_o_{i}", step.name, quoted=True))
- step.aggregations = list(aggregations)
+ set_ops_and_aggs(aggregate)
sort = Sort()
sort.name = step.name
@@ -355,7 +354,10 @@ class Join(Step):
def _to_s(self, indent: str) -> t.List[str]:
lines = []
for name, join in self.joins.items():
- lines.append(f"{indent}{name}: {join['side']}")
+ lines.append(f"{indent}{name}: {join['side'] or 'INNER'}")
+ join_key = ", ".join(str(key) for key in t.cast(list, join.get("join_key") or []))
+ if join_key:
+ lines.append(f"{indent}Key: {join_key}")
if join.get("condition"):
lines.append(f"{indent}On: {join['condition'].sql()}") # type: ignore
return lines
|
tobymao/sqlglot
|
80cd1d0d12560e9e228131b101dd760171fa27f8
|
diff --git a/tests/test_executor.py b/tests/test_executor.py
index 006cf9df..9dacbbf3 100644
--- a/tests/test_executor.py
+++ b/tests/test_executor.py
@@ -708,6 +708,16 @@ class TestExecutor(unittest.TestCase):
[(2,), (1,), (3,)],
("a",),
),
+ (
+ "SELECT a, SUM(b) FROM x GROUP BY a ORDER BY COUNT(*)",
+ [(3, 28), (1, 50), (2, 45)],
+ ("a", "_col_1"),
+ ),
+ (
+ "SELECT a, SUM(b) FROM x GROUP BY a ORDER BY COUNT(*) DESC",
+ [(1, 50), (2, 45), (3, 28)],
+ ("a", "_col_1"),
+ ),
):
with self.subTest(sql):
result = execute(sql, tables=tables)
|
Executor doesn't work when GROUP BY..ORDER BY COUNT(*)
sqlglot==16.8.1
```python
def test_execute_group_by_order_by_count(self):
tables = {
"x": [
{"t": "a", "a": 6, "y": 2019},
{"t": "b", "a": 12, "y": 2019},
{"t": "c", "a": 15, "y": 2019},
{"t": "a", "a": 10, "y": 2018},
{"t": "b", "a": 12, "y": 2018},
{"t": "c", "a": 2, "y": 2018},
{"t": "a", "a": 9, "y": 2017},
],
}
schema = {
"x": {
"t": "text",
"a": "number",
"y": "number"
},
}
gen = SqlGenerator("mysql")
creates = gen.get_sql_tables_schema(schema)
for c in creates:
print(c.sql(pretty=True, dialect="mysql"), ";", sep="")
inserts = gen.get_sql_tables_content(tables)
for i in inserts:
print(i.sql(pretty=True, dialect="mysql"), ";", sep="")
for sql, cols, rows in [
('select t , sum ( a ) from x group by t order by avg(a) asc limit 1;', ["t", "_col_1"], [("a", 25)]), # PASS
('select t , sum ( a ) from x group by t order by count ( * ) asc limit 1;', ["t", "_col_1"], [("b", 24)]), # FAIL
]:
with self.subTest(sql):
result = execute(sql, read="mysql", schema=schema, tables=tables)
self.assertEqual(result.columns, tuple(cols))
self.assertEqual(result.rows, rows)
```
```
SubTest error: Traceback (most recent call last):
File "/Users/.../python3.11/site-packages/sqlglot/executor/python.py", line 40, in execute
contexts[node] = self.aggregate(node, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/.../python3.11/site-packages/sqlglot/executor/python.py", line 288, in aggregate
add_row()
File "/Users/.../python3.11/site-packages/sqlglot/executor/python.py", line 278, in add_row
table.append(group + context.eval_tuple(aggregations))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/.../python3.11/site-packages/sqlglot/executor/context.py", line 38, in eval_tuple
return tuple(self.eval(code) for code in codes)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/./python3.11/site-packages/sqlglot/executor/context.py", line 38, in <genexpr>
return tuple(self.eval(code) for code in codes)
^^^^^^^^^^^^^^^
File "/Users/./python3.11/site-packages/sqlglot/executor/context.py", line 35, in eval
return eval(code, self.env)
^^^^^^^^^^^^^^^^^^^^
File "COUNT(scope[None]["_a_1"])", line 1, in <module>
KeyError: None
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/.../python3.11/unittest/case.py", line 57, in testPartExecutor
yield
File "/usr/.../python3.11/unittest/case.py", line 538, in subTest
yield
File "/Users/.../src/tr/dbtool/test_executor.py", line 237, in test_execute_group_by_order_by_count
result = execute(sql, read="mysql", schema=schema, tables=tables)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/..../python3.11/site-packages/sqlglot/executor/__init__.py", line 82, in execute
result = PythonExecutor(tables=tables_).execute(plan)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/.../python3.11/site-packages/sqlglot/executor/python.py", line 60, in execute
raise ExecuteError(f"Step '{node.id}' failed: {e}") from e
sqlglot.errors.ExecuteError: Step 'Aggregate: x (4552043856)' failed: None
```
|
0.0
|
80cd1d0d12560e9e228131b101dd760171fa27f8
|
[
"tests/test_executor.py::TestExecutor::test_group_by"
] |
[
"tests/test_executor.py::TestExecutor::test_table_depth_mismatch",
"tests/test_executor.py::TestExecutor::test_tables",
"tests/test_executor.py::TestExecutor::test_correlated_count",
"tests/test_executor.py::TestExecutor::test_static_queries",
"tests/test_executor.py::TestExecutor::test_execute_callable",
"tests/test_executor.py::TestExecutor::test_aggregate_without_group_by",
"tests/test_executor.py::TestExecutor::test_nested_table_reference",
"tests/test_executor.py::TestExecutor::test_set_operations",
"tests/test_executor.py::TestExecutor::test_execute_subqueries",
"tests/test_executor.py::TestExecutor::test_scalar_functions",
"tests/test_executor.py::TestExecutor::test_execute_tables",
"tests/test_executor.py::TestExecutor::test_py_dialect",
"tests/test_executor.py::TestExecutor::test_execute_catalog_db_table",
"tests/test_executor.py::TestExecutor::test_case_sensitivity"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-04 17:19:15+00:00
|
mit
| 5,955 |
|
tobymao__sqlglot-1924
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 1d57a88b..f5e3c893 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1234,11 +1234,14 @@ class Parser(metaclass=_Parser):
expression = self._parse_ddl_select()
if create_token.token_type == TokenType.TABLE:
+ # exp.Properties.Location.POST_EXPRESSION
+ extend_props(self._parse_properties())
+
indexes = []
while True:
index = self._parse_index()
- # exp.Properties.Location.POST_EXPRESSION and POST_INDEX
+ # exp.Properties.Location.POST_INDEX
extend_props(self._parse_properties())
if not index:
@@ -1385,7 +1388,6 @@ class Parser(metaclass=_Parser):
def _parse_with_property(
self,
) -> t.Optional[exp.Expression] | t.List[t.Optional[exp.Expression]]:
- self._match(TokenType.WITH)
if self._match(TokenType.L_PAREN, advance=False):
return self._parse_wrapped_csv(self._parse_property)
|
tobymao/sqlglot
|
f5184cd2009610ccebe77b14e65cd6ffb1f005ef
|
diff --git a/tests/dialects/test_teradata.py b/tests/dialects/test_teradata.py
index 6906e47d..0df6d0be 100644
--- a/tests/dialects/test_teradata.py
+++ b/tests/dialects/test_teradata.py
@@ -61,6 +61,9 @@ class TestTeradata(Validator):
self.validate_identity(
"CREATE VOLATILE MULTISET TABLE a, NOT LOCAL AFTER JOURNAL, FREESPACE=1 PERCENT, DATABLOCKSIZE=10 BYTES, WITH NO CONCURRENT ISOLATED LOADING FOR ALL (a INT)"
)
+ self.validate_identity(
+ "CREATE VOLATILE SET TABLE example1 AS (SELECT col1, col2, col3 FROM table1) WITH DATA PRIMARY INDEX (col1) ON COMMIT PRESERVE ROWS"
+ )
self.validate_all(
"""
|
CREATE TABLE - problem parsing the Teradata WITH DATA clause
There is a problem parsing a CREATE TABLE statement in Teradata that has a WITH DATA clause
sqlglot version 17.2.0
Teradata version 16.20
The SQL is
CREATE VOLATILE SET TABLE example1 AS (
SELECT col1,col2, col3
from table1
)
WITH DATA
PRIMARY INDEX (col1)
ON COMMIT PRESERVE ROWS;
and code is
for statement in sqlglot.parse(sql, read="teradata"):
for table in statement.find_all(exp.Table):
print(table.name)
commenting out the with clause will work. Error message is
---------------------------------------------------------------------------
ParseError Traceback (most recent call last)
Cell In[21], line 1
----> 1 for statement in sqlglot.parse(raw_sql, read="teradata"):
2 for table in statement.find_all(exp.Table):
3 print(table.name)
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\__init__.py:83, in parse(sql, read, **opts)
71 """
72 Parses the given SQL string into a collection of syntax trees, one per parsed SQL statement.
73
(...)
80 The resulting syntax tree collection.
81 """
82 dialect = Dialect.get_or_raise(read)()
---> 83 return dialect.parse(sql, **opts)
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\dialects\dialect.py:278, in Dialect.parse(self, sql, **opts)
277 def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
--> 278 return self.parser(**opts).parse(self.tokenize(sql), sql)
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:877, in Parser.parse(self, raw_tokens, sql)
863 def parse(
864 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
865 ) -> t.List[t.Optional[exp.Expression]]:
866 """
867 Parses a list of tokens and returns a list of syntax trees, one tree
868 per parsed SQL statement.
(...)
875 The list of the produced syntax trees.
876 """
--> 877 return self._parse(
878 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
879 )
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:946, in Parser._parse(self, parse_method, raw_tokens, sql)
943 expressions.append(parse_method(self))
945 if self._index < len(self._tokens):
--> 946 self.raise_error("Invalid expression / Unexpected token")
948 self.check_errors()
950 return expressions
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:987, in Parser.raise_error(self, message, token)
975 error = ParseError.new(
976 f"{message}. Line {token.line}, Col: {token.col}.\n"
977 f" {start_context}\033[4m{highlight}\033[0m{end_context}",
(...)
983 end_context=end_context,
984 )
986 if self.error_level == ErrorLevel.IMMEDIATE:
--> 987 raise error
989 self.errors.append(error)
ParseError: Invalid expression / Unexpected token. Line 7, Col: 7.
CREATE VOLATILE SET TABLE example1 AS (
SELECT col1,col2, col3
from table1
)
WITH DATA
PRIMARY INDEX (col1)
ON COMMIT PRESERVE ROWS;
|
0.0
|
f5184cd2009610ccebe77b14e65cd6ffb1f005ef
|
[
"tests/dialects/test_teradata.py::TestTeradata::test_create"
] |
[
"tests/dialects/test_teradata.py::TestTeradata::test_abbrev",
"tests/dialects/test_teradata.py::TestTeradata::test_cast",
"tests/dialects/test_teradata.py::TestTeradata::test_datatype",
"tests/dialects/test_teradata.py::TestTeradata::test_insert",
"tests/dialects/test_teradata.py::TestTeradata::test_mod",
"tests/dialects/test_teradata.py::TestTeradata::test_translate",
"tests/dialects/test_teradata.py::TestTeradata::test_update"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-07-15 17:15:46+00:00
|
mit
| 5,956 |
|
tobymao__sqlglot-1944
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 03edaa3b..219b1aa8 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -211,7 +211,7 @@ class DuckDB(Dialect):
exp.DateFromParts: rename_func("MAKE_DATE"),
exp.DateSub: _date_delta_sql,
exp.DateDiff: lambda self, e: self.func(
- "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this
+ "DATE_DIFF", f"'{e.args.get('unit') or 'day'}'", e.expression, e.this
),
exp.DateStrToDate: datestrtodate_sql,
exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.DATEINT_FORMAT}) AS INT)",
|
tobymao/sqlglot
|
d1454102c321bb69cfdba3dd0f041685aa5fb5b3
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 37a6f490..5284700f 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -56,6 +56,15 @@ class TestDuckDB(Validator):
self.validate_all("0x1010", write={"": "0 AS x1010"})
self.validate_all("x ~ y", write={"duckdb": "REGEXP_MATCHES(x, y)"})
self.validate_all("SELECT * FROM 'x.y'", write={"duckdb": 'SELECT * FROM "x.y"'})
+ self.validate_all(
+ "DATE_DIFF('day', CAST(b AS DATE), CAST(a AS DATE))",
+ read={
+ "duckdb": "DATE_DIFF('day', CAST(b AS DATE), CAST(a AS DATE))",
+ "hive": "DATEDIFF(a, b)",
+ "spark": "DATEDIFF(a, b)",
+ "spark2": "DATEDIFF(a, b)",
+ },
+ )
self.validate_all(
"XOR(a, b)",
read={
|
Spark to DuckDB 'datediff' transpilation error
Possibly related to #1929
Getting an issue transpiling from Spark to DuckDB, the 'datediff' function is transpiling correctly to "date_diff" but the period parameter is being returned as "None" instead of "Day".
Source sql:

Expected:

Actual:

Can you take a look please.
|
0.0
|
d1454102c321bb69cfdba3dd0f041685aa5fb5b3
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-07-21 15:33:03+00:00
|
mit
| 5,957 |
|
tobymao__sqlglot-1953
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 5adec776..fcb54d1b 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -4172,7 +4172,7 @@ class Parser(metaclass=_Parser):
self._match_r_paren()
- return self.expression(
+ window = self.expression(
exp.Window,
this=this,
partition_by=partition,
@@ -4183,6 +4183,12 @@ class Parser(metaclass=_Parser):
first=first,
)
+ # This covers Oracle's FIRST/LAST syntax: aggregate KEEP (...) OVER (...)
+ if self._match_set(self.WINDOW_BEFORE_PAREN_TOKENS, advance=False):
+ return self._parse_window(window, alias=alias)
+
+ return window
+
def _parse_window_spec(self) -> t.Dict[str, t.Optional[str | exp.Expression]]:
self._match(TokenType.BETWEEN)
|
tobymao/sqlglot
|
e71bbc40c71982860a1e2ac77cdc521b3d38f965
|
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index f30b38fa..0c3b09f3 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -23,6 +23,11 @@ class TestOracle(Validator):
self.validate_identity(
"SELECT MIN(column_name) KEEP (DENSE_RANK FIRST ORDER BY column_name DESC) FROM table_name"
)
+ self.validate_identity(
+ "SELECT last_name, department_id, salary, MIN(salary) KEEP (DENSE_RANK FIRST ORDER BY commission_pct) "
+ 'OVER (PARTITION BY department_id) AS "Worst", MAX(salary) KEEP (DENSE_RANK LAST ORDER BY commission_pct) '
+ 'OVER (PARTITION BY department_id) AS "Best" FROM employees ORDER BY department_id, salary, last_name'
+ )
self.validate_all(
"NVL(NULL, 1)",
|
Support of first/last clause in oracle dialect
**Before you file an issue**
it seems there is support for only 1 window clause for an aggregate with the KEEP/OVER clause
so the following statements parse successfully.
```
SELECT last_name, department_id, salary,
MAX(salary) KEEP (DENSE_RANK FIRST ORDER BY commission_pct)
"Worst"
FROM employees
ORDER BY department_id, salary, last_name;
SELECT last_name, department_id, salary,
MAX(salary)
OVER (PARTITION BY department_id) "Worst"
FROM employees
ORDER BY department_id, salary, last_name
```
but the full first\last clause of oracle fails to parse.
would it be possible to get support for this clause?
**Fully reproducible code snippet**
```
from sqlglot import parse_one
from sqlglot.dialects.oracle import Oracle
parser = Oracle()
sql = """
SELECT last_name, department_id, salary,
MIN(salary) KEEP (DENSE_RANK FIRST ORDER BY commission_pct)
OVER (PARTITION BY department_id) "Worst",
MAX(salary) KEEP (DENSE_RANK LAST ORDER BY commission_pct)
OVER (PARTITION BY department_id) "Best"
FROM employees
ORDER BY department_id, salary, last_name
"""
ast = parse_one(sql, read=parser)
```
error:
```
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 4, Col: 13.
department_id, salary,
MAX(salary) KEEP (DENSE_RANK FIRST ORDER BY commission_pct)
OVER (PARTITION BY department_id) "Worst",
MAX(salary) KEEP (DENSE_RANK LAST ORDER BY commission_
```
**Official Documentation**
https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/FIRST.html#GUID-85AB9246-0E0A-44A1-A7E6-4E57502E9238
https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/LAST.html#GUID-4E16BC0E-D3B8-4BA4-8F97-3A08891A85CC
|
0.0
|
e71bbc40c71982860a1e2ac77cdc521b3d38f965
|
[
"tests/dialects/test_oracle.py::TestOracle::test_oracle"
] |
[
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-24 12:36:23+00:00
|
mit
| 5,958 |
|
tobymao__sqlglot-1979
|
diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py
index 4e8ffb4a..2e0e8f9d 100644
--- a/sqlglot/dialects/teradata.py
+++ b/sqlglot/dialects/teradata.py
@@ -33,8 +33,11 @@ class Teradata(Dialect):
**tokens.Tokenizer.KEYWORDS,
"^=": TokenType.NEQ,
"BYTEINT": TokenType.SMALLINT,
+ "COLLECT STATS": TokenType.COMMAND,
+ "COLLECT STATISTICS": TokenType.COMMAND,
"GE": TokenType.GTE,
"GT": TokenType.GT,
+ "HELP": TokenType.COMMAND,
"INS": TokenType.INSERT,
"LE": TokenType.LTE,
"LT": TokenType.LT,
|
tobymao/sqlglot
|
8a44cc22119aa46af20f442645fe496217042722
|
diff --git a/tests/dialects/test_teradata.py b/tests/dialects/test_teradata.py
index 0df6d0be..4d322419 100644
--- a/tests/dialects/test_teradata.py
+++ b/tests/dialects/test_teradata.py
@@ -22,6 +22,13 @@ class TestTeradata(Validator):
},
)
+ def test_statistics(self):
+ self.validate_identity("COLLECT STATISTICS ON tbl INDEX(col)")
+ self.validate_identity("COLLECT STATS ON tbl COLUMNS(col)")
+ self.validate_identity("COLLECT STATS COLUMNS(col) ON tbl")
+ self.validate_identity("HELP STATISTICS personel.employee")
+ self.validate_identity("HELP STATISTICS personnel.employee FROM my_qcd")
+
def test_create(self):
self.validate_identity("CREATE TABLE x (y INT) PRIMARY INDEX (y) PARTITION BY y INDEX (y)")
self.validate_identity("CREATE TABLE x (y INT) PARTITION BY y INDEX (y)")
|
Problem with parsing Teradata COLLECT STATISTICS command
I have found a problem parsing the COLLECT STATISTICS command in Teradata.
All variations of the COLLECT STATISTICS command fail
COLLECT STATISTICS ON TABLE1
INDEX(COL1);
COLLECT STATS
ON TABLE1
COLUMNS(COL1);
COLLECT STATS ON TABLE1
COLUMNS(COL1);
COLLECT STATS
COLUMNS(COL1)
ON TABLE1;
sqlglot Version: 17.4.1
Teradata version 17.10
Error message is
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:946, in Parser._parse(self, parse_method, raw_tokens, sql)
943 expressions.append(parse_method(self))
945 if self._index < len(self._tokens):
--> 946 self.raise_error("Invalid expression / Unexpected token")
948 self.check_errors()
950 return expressions
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:987, in Parser.raise_error(self, message, token)
975 error = ParseError.new(
976 f"{message}. Line {token.line}, Col: {token.col}.\n"
977 f" {start_context}\033[4m{highlight}\033[0m{end_context}",
(...)
983 end_context=end_context,
984 )
986 if self.error_level == ErrorLevel.IMMEDIATE:
--> 987 raise error
989 self.errors.append(error)
or
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:946, in Parser._parse(self, parse_method, raw_tokens, sql)
943 expressions.append(parse_method(self))
945 if self._index < len(self._tokens):
--> 946 self.raise_error("Invalid expression / Unexpected token")
948 self.check_errors()
950 return expressions
File ~\AppData\Roaming\Python\Python311\site-packages\sqlglot\parser.py:987, in Parser.raise_error(self, message, token)
975 error = ParseError.new(
976 f"{message}. Line {token.line}, Col: {token.col}.\n"
977 f" {start_context}\033[4m{highlight}\033[0m{end_context}",
(...)
983 end_context=end_context,
984 )
986 if self.error_level == ErrorLevel.IMMEDIATE:
--> 987 raise error
989 self.errors.append(error)
ParseError: Invalid expression / Unexpected token. Line 2, Col: 16.
COLLECT STATS ON TABLE1
COLUMNS(COL1);
|
0.0
|
8a44cc22119aa46af20f442645fe496217042722
|
[
"tests/dialects/test_teradata.py::TestTeradata::test_statistics"
] |
[
"tests/dialects/test_teradata.py::TestTeradata::test_abbrev",
"tests/dialects/test_teradata.py::TestTeradata::test_datatype",
"tests/dialects/test_teradata.py::TestTeradata::test_cast",
"tests/dialects/test_teradata.py::TestTeradata::test_update",
"tests/dialects/test_teradata.py::TestTeradata::test_insert",
"tests/dialects/test_teradata.py::TestTeradata::test_mod",
"tests/dialects/test_teradata.py::TestTeradata::test_translate",
"tests/dialects/test_teradata.py::TestTeradata::test_create"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-07-28 23:05:42+00:00
|
mit
| 5,959 |
|
tobymao__sqlglot-2007
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 4902105d..9733a855 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -297,9 +297,10 @@ class Snowflake(Dialect):
return super()._parse_id_var(any_token=any_token, tokens=tokens)
class Tokenizer(tokens.Tokenizer):
- QUOTES = ["'", "$$"]
+ QUOTES = ["'"]
STRING_ESCAPES = ["\\", "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
+ RAW_STRINGS = ["$$"]
COMMENTS = ["--", "//", ("/*", "*/")]
KEYWORDS = {
|
tobymao/sqlglot
|
c73790d0249cf0f21f855ed4921c9a8727e2d5ff
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 7fd24f24..3053d47f 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -34,12 +34,21 @@ class TestSnowflake(Validator):
self.validate_identity("ALTER TABLE foo UNSET DATA_RETENTION_TIME_IN_DAYS, CHANGE_TRACKING")
self.validate_identity("COMMENT IF EXISTS ON TABLE foo IS 'bar'")
self.validate_identity("SELECT CONVERT_TIMEZONE('UTC', 'America/Los_Angeles', col)")
+ self.validate_identity("REGEXP_REPLACE('target', 'pattern', '\n')")
self.validate_identity(
'COPY INTO NEW_TABLE ("foo", "bar") FROM (SELECT $1, $2, $3, $4 FROM @%old_table)'
)
self.validate_identity(
"SELECT state, city, SUM(retail_price * quantity) AS gross_revenue FROM sales GROUP BY ALL"
)
+ self.validate_identity(
+ r"SELECT RLIKE(a, $$regular expression with \ characters: \d{2}-\d{3}-\d{4}$$, 'i') FROM log_source",
+ r"SELECT REGEXP_LIKE(a, 'regular expression with \\ characters: \\d{2}-\\d{3}-\\d{4}', 'i') FROM log_source",
+ )
+ self.validate_identity(
+ r"SELECT $$a ' \ \t \x21 z $ $$",
+ r"SELECT 'a \' \\ \\t \\x21 z $ '",
+ )
self.validate_all("CAST(x AS BYTEINT)", write={"snowflake": "CAST(x AS INT)"})
self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
@@ -385,13 +394,6 @@ class TestSnowflake(Validator):
"snowflake": "SELECT 'a'",
},
)
- self.validate_all(
- r"SELECT $$a ' \ \t \x21 z $ $$",
- write={
- "snowflake": r"SELECT 'a \' \ \t \x21 z $ '",
- },
- )
- self.validate_identity("REGEXP_REPLACE('target', 'pattern', '\n')")
self.validate_all(
"SELECT RLIKE(a, b)",
write={
|
Incorrect treatment of dollar quoted signs in for regex patterns
In the docs you can see that [Dollar quoted string constants](https://docs.snowflake.com/en/sql-reference/data-types-text#dollar-quoted-string-constants) should work as follows for regex
```
'regular expression with \\ characters: \\d{2}-\\d{3}-\\d{4}'
```
is equivalent to the following dollar quoted variant:
```
$$regular expression with \ characters: \d{2}-\d{3}-\d{4}$$
```
However when using the dollar quoted variant, sqlglot converts the dollar quoted version incorrectly as follows:
```
from sqlglot import parse_one
print(
parse_one(
"SELECT RLIKE(field, $$regular expression with \ characters: \d{2}-\d{3}-\d{4}$$, 'i') from log_source", read="snowflake"
).sql(dialect="snowflake", pretty=True)
)
```
Outputs:
```
SELECT
REGEXP_LIKE(field, 'regular expression with \ characters: \d{2}-\d{3}-\d{4}', 'i')
FROM log_source
```
I would expect:
```
SELECT
REGEXP_LIKE(field, 'regular expression with \\ characters: \\d{2}-\\d{3}-\\d{4}' 'i')
FROM log_source
```
|
0.0
|
c73790d0249cf0f21f855ed4921c9a8727e2d5ff
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-08 13:47:25+00:00
|
mit
| 5,960 |
|
tobymao__sqlglot-2021
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index c2077512..edd72ea6 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -3309,6 +3309,7 @@ class Pivot(Expression):
"using": False,
"group": False,
"columns": False,
+ "include_nulls": False,
}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 8dba11fc..0ab960ac 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1290,7 +1290,12 @@ class Generator:
unpivot = expression.args.get("unpivot")
direction = "UNPIVOT" if unpivot else "PIVOT"
field = self.sql(expression, "field")
- return f"{direction}({expressions} FOR {field}){alias}"
+ include_nulls = expression.args.get("include_nulls")
+ if include_nulls is not None:
+ nulls = " INCLUDE NULLS " if include_nulls else " EXCLUDE NULLS "
+ else:
+ nulls = ""
+ return f"{direction}{nulls}({expressions} FOR {field}){alias}"
def tuple_sql(self, expression: exp.Tuple) -> str:
return f"({self.expressions(expression, flat=True)})"
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 1f3e240d..18471480 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2623,11 +2623,18 @@ class Parser(metaclass=_Parser):
def _parse_pivot(self) -> t.Optional[exp.Pivot]:
index = self._index
+ include_nulls = None
if self._match(TokenType.PIVOT):
unpivot = False
elif self._match(TokenType.UNPIVOT):
unpivot = True
+
+ # https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-qry-select-unpivot.html#syntax
+ if self._match_text_seq("INCLUDE", "NULLS"):
+ include_nulls = True
+ elif self._match_text_seq("EXCLUDE", "NULLS"):
+ include_nulls = False
else:
return None
@@ -2658,7 +2665,13 @@ class Parser(metaclass=_Parser):
self._match_r_paren()
- pivot = self.expression(exp.Pivot, expressions=expressions, field=field, unpivot=unpivot)
+ pivot = self.expression(
+ exp.Pivot,
+ expressions=expressions,
+ field=field,
+ unpivot=unpivot,
+ include_nulls=include_nulls,
+ )
if not self._match_set((TokenType.PIVOT, TokenType.UNPIVOT), advance=False):
pivot.set("alias", self._parse_table_alias())
|
tobymao/sqlglot
|
d6a5a28c71cac196c027294fabefcd63a09f073e
|
diff --git a/tests/dialects/test_databricks.py b/tests/dialects/test_databricks.py
index 14f7cd04..38a79523 100644
--- a/tests/dialects/test_databricks.py
+++ b/tests/dialects/test_databricks.py
@@ -11,6 +11,12 @@ class TestDatabricks(Validator):
self.validate_identity("CREATE FUNCTION a AS b")
self.validate_identity("SELECT ${x} FROM ${y} WHERE ${z} > 1")
self.validate_identity("CREATE TABLE foo (x DATE GENERATED ALWAYS AS (CAST(y AS DATE)))")
+ self.validate_identity(
+ "SELECT * FROM sales UNPIVOT INCLUDE NULLS (sales FOR quarter IN (q1 AS `Jan-Mar`))"
+ )
+ self.validate_identity(
+ "SELECT * FROM sales UNPIVOT EXCLUDE NULLS (sales FOR quarter IN (q1 AS `Jan-Mar`))"
+ )
self.validate_all(
"CREATE TABLE foo (x INT GENERATED ALWAYS AS (YEAR(y)))",
|
Support for Databricks UNPIVOT Options
Databricks dialect does not support the Option INCLUDE NULLS/EXCLUDE NULLS: https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-qry-select-unpivot.html
sample sql:
```sql
select id
, level
, conditions
FROM bla.table_name UNPIVOT INCLUDE NULLS
(conditions FOR level IN (KONTR1
,KONTR2
,KONTR3
,KONTR4))
```
|
0.0
|
d6a5a28c71cac196c027294fabefcd63a09f073e
|
[
"tests/dialects/test_databricks.py::TestDatabricks::test_databricks"
] |
[
"tests/dialects/test_databricks.py::TestDatabricks::test_json",
"tests/dialects/test_databricks.py::TestDatabricks::test_datediff",
"tests/dialects/test_databricks.py::TestDatabricks::test_add_date",
"tests/dialects/test_databricks.py::TestDatabricks::test_without_as"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-10 12:01:49+00:00
|
mit
| 5,961 |
|
tobymao__sqlglot-2023
|
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index e6b7743f..fb22b254 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -41,7 +41,6 @@ class ClickHouse(Dialect):
"FLOAT32": TokenType.FLOAT,
"FLOAT64": TokenType.DOUBLE,
"GLOBAL": TokenType.GLOBAL,
- "INT128": TokenType.INT128,
"INT16": TokenType.SMALLINT,
"INT256": TokenType.INT256,
"INT32": TokenType.INT,
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 5428e869..5d657678 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -110,14 +110,14 @@ class DuckDB(Dialect):
"//": TokenType.DIV,
"ATTACH": TokenType.COMMAND,
"BINARY": TokenType.VARBINARY,
- "BPCHAR": TokenType.TEXT,
"BITSTRING": TokenType.BIT,
+ "BPCHAR": TokenType.TEXT,
"CHAR": TokenType.TEXT,
"CHARACTER VARYING": TokenType.TEXT,
"EXCLUDE": TokenType.EXCEPT,
+ "HUGEINT": TokenType.INT128,
"INT1": TokenType.TINYINT,
"LOGICAL": TokenType.BOOLEAN,
- "NUMERIC": TokenType.DOUBLE,
"PIVOT_WIDER": TokenType.PIVOT,
"SIGNED": TokenType.INT,
"STRING": TokenType.VARCHAR,
@@ -186,6 +186,22 @@ class DuckDB(Dialect):
TokenType.UTINYINT,
}
+ def _parse_types(
+ self, check_func: bool = False, schema: bool = False
+ ) -> t.Optional[exp.Expression]:
+ this = super()._parse_types(check_func=check_func, schema=schema)
+
+ # DuckDB treats NUMERIC and DECIMAL without precision as DECIMAL(18, 3)
+ # See: https://duckdb.org/docs/sql/data_types/numeric
+ if (
+ isinstance(this, exp.DataType)
+ and this.is_type("numeric", "decimal")
+ and not this.expressions
+ ):
+ return exp.DataType.build("DECIMAL(18, 3)")
+
+ return this
+
def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
if len(aggregations) == 1:
return super()._pivot_column_names(aggregations)
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index d28f95d8..3cb8775a 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -659,6 +659,7 @@ class Tokenizer(metaclass=_Tokenizer):
"TINYINT": TokenType.TINYINT,
"SHORT": TokenType.SMALLINT,
"SMALLINT": TokenType.SMALLINT,
+ "INT128": TokenType.INT128,
"INT2": TokenType.SMALLINT,
"INTEGER": TokenType.INT,
"INT": TokenType.INT,
|
tobymao/sqlglot
|
1c259849a92445410763d85edae59f5b04cfad5e
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 980f68ea..16f1b3fd 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -497,8 +497,12 @@ class TestDuckDB(Validator):
self.validate_identity("CAST(x AS USMALLINT)")
self.validate_identity("CAST(x AS UTINYINT)")
self.validate_identity("CAST(x AS TEXT)")
+ self.validate_identity("CAST(x AS INT128)")
+ self.validate_identity("CAST(x AS DOUBLE)")
+ self.validate_identity("CAST(x AS DECIMAL(15, 4))")
- self.validate_all("CAST(x AS NUMERIC)", write={"duckdb": "CAST(x AS DOUBLE)"})
+ self.validate_all("CAST(x AS NUMERIC(1, 2))", write={"duckdb": "CAST(x AS DECIMAL(1, 2))"})
+ self.validate_all("CAST(x AS HUGEINT)", write={"duckdb": "CAST(x AS INT128)"})
self.validate_all("CAST(x AS CHAR)", write={"duckdb": "CAST(x AS TEXT)"})
self.validate_all("CAST(x AS BPCHAR)", write={"duckdb": "CAST(x AS TEXT)"})
self.validate_all("CAST(x AS STRING)", write={"duckdb": "CAST(x AS TEXT)"})
@@ -513,6 +517,20 @@ class TestDuckDB(Validator):
self.validate_all("CAST(x AS BINARY)", write={"duckdb": "CAST(x AS BLOB)"})
self.validate_all("CAST(x AS VARBINARY)", write={"duckdb": "CAST(x AS BLOB)"})
self.validate_all("CAST(x AS LOGICAL)", write={"duckdb": "CAST(x AS BOOLEAN)"})
+ self.validate_all(
+ "CAST(x AS NUMERIC)",
+ write={
+ "duckdb": "CAST(x AS DECIMAL(18, 3))",
+ "postgres": "CAST(x AS DECIMAL(18, 3))",
+ },
+ )
+ self.validate_all(
+ "CAST(x AS DECIMAL)",
+ write={
+ "duckdb": "CAST(x AS DECIMAL(18, 3))",
+ "postgres": "CAST(x AS DECIMAL(18, 3))",
+ },
+ )
self.validate_all(
"CAST(x AS BIT)",
read={
|
Support additional duckdb data types
I've started work to move our type parser in ibis from parsy to sqlglot for performance reasons: sqlglot is much much faster.
sqlglot is missing a few types that we previously supported (parsing metadata that come back from DuckDB's `DESCRIBE` directive) and while these aren't blocks because they are simple to intercept, it would be nice if sqlglot supported some or all of these.
**Is your feature request related to a problem? Please describe.**
Not related to a problem.
Here's the list of types that are currently supported by ibis and potentially returned from duckdb's `DESCRIBE` that aren't supported in sqlglot:
- `INT128`
- `HUGEINT`
- `NUMERIC` with no precision or scale arguments
- `TIMESTAMP_SEC`
- `TIMESTAMP_S`
- `TIMESTAMP_MS`
- `TIMESTAMP_US`
- `TIMESTAMP_NS`
- `TIMESTAMP_TZ`
**Describe the solution you'd like**
- `INT128`, `HUGEINT` -> `DataType.Type.INT128`
- `NUMERIC` -> `DECIMAL(18, 3)` DuckDB's default precision and scale
- `TIMESTAMP_*` -> ? not sure if these have an exact sqlglot equivalent
**Describe alternatives you've considered**
I've implemented the only viable alternative I can think of, which is to handle these strings before passing the text to sqlglot.
**Additional context**
NA
|
0.0
|
1c259849a92445410763d85edae59f5b04cfad5e
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-10 18:13:12+00:00
|
mit
| 5,962 |
|
tobymao__sqlglot-2040
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 9efb21ed..09633b03 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -3144,29 +3144,9 @@ class Parser(metaclass=_Parser):
maybe_func = True
- if self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
- this = exp.DataType(
- this=exp.DataType.Type.ARRAY,
- expressions=[
- exp.DataType(
- this=exp.DataType.Type[type_token.value],
- expressions=expressions,
- nested=nested,
- )
- ],
- nested=True,
- )
-
- while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
- this = exp.DataType(this=exp.DataType.Type.ARRAY, expressions=[this], nested=True)
-
- return this
-
- if self._match(TokenType.L_BRACKET):
- self._retreat(index)
- return None
-
+ this: t.Optional[exp.Expression] = None
values: t.Optional[t.List[t.Optional[exp.Expression]]] = None
+
if nested and self._match(TokenType.LT):
if is_struct:
expressions = self._parse_csv(self._parse_struct_types)
@@ -3182,14 +3162,13 @@ class Parser(metaclass=_Parser):
values = self._parse_csv(self._parse_conjunction)
self._match_set((TokenType.R_BRACKET, TokenType.R_PAREN))
- value: t.Optional[exp.Expression] = None
if type_token in self.TIMESTAMPS:
if self._match_text_seq("WITH", "TIME", "ZONE"):
maybe_func = False
- value = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions)
+ this = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions)
elif self._match_text_seq("WITH", "LOCAL", "TIME", "ZONE"):
maybe_func = False
- value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions)
+ this = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions)
elif self._match_text_seq("WITHOUT", "TIME", "ZONE"):
maybe_func = False
elif type_token == TokenType.INTERVAL:
@@ -3202,11 +3181,11 @@ class Parser(metaclass=_Parser):
unit = not span and self._parse_var()
if not unit:
- value = self.expression(
+ this = self.expression(
exp.DataType, this=exp.DataType.Type.INTERVAL, expressions=span
)
else:
- value = self.expression(exp.Interval, unit=unit)
+ this = self.expression(exp.Interval, unit=unit)
if maybe_func and check_func:
index2 = self._index
@@ -3218,16 +3197,19 @@ class Parser(metaclass=_Parser):
self._retreat(index2)
- if value:
- return value
+ if not this:
+ this = exp.DataType(
+ this=exp.DataType.Type[type_token.value],
+ expressions=expressions,
+ nested=nested,
+ values=values,
+ prefix=prefix,
+ )
+
+ while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
+ this = exp.DataType(this=exp.DataType.Type.ARRAY, expressions=[this], nested=True)
- return exp.DataType(
- this=exp.DataType.Type[type_token.value],
- expressions=expressions,
- nested=nested,
- values=values,
- prefix=prefix,
- )
+ return this
def _parse_struct_types(self) -> t.Optional[exp.Expression]:
this = self._parse_type() or self._parse_id_var()
|
tobymao/sqlglot
|
36f308e80253d4c6b13232f21a5fd221d0757fb5
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index be34d8c9..a7719a9b 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -9,6 +9,10 @@ class TestPostgres(Validator):
dialect = "postgres"
def test_ddl(self):
+ self.validate_identity(
+ "CREATE TABLE test (x TIMESTAMP WITHOUT TIME ZONE[][])",
+ "CREATE TABLE test (x TIMESTAMP[][])",
+ )
self.validate_identity("CREATE TABLE test (elems JSONB[])")
self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)")
self.validate_identity("CREATE TABLE test (foo HSTORE)")
|
Support for parsing `timestamp with/without time zone[]` and `time with/without time zone[]` with postgres
**Is your feature request related to a problem? Please describe.**
Not related to a problem.
**Describe the solution you'd like**
I'd like sqlglot to support parsing the `timestamp without time zone[]` and `time without time zone[]` types for the postgres dialect.
**Describe alternatives you've considered**
I haven't considered any alternatives
**Additional context**
Proof that this is a thing :)
```
ibis_testing> create table t (x timestamp without time zone []);
CREATE TABLE
Time: 0.009s
```
|
0.0
|
36f308e80253d4c6b13232f21a5fd221d0757fb5
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-11 16:50:14+00:00
|
mit
| 5,963 |
|
tobymao__sqlglot-2047
|
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index a5089a16..b3b95384 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -79,22 +79,23 @@ def _format_time_lambda(
def _parse_format(args: t.List) -> exp.Expression:
- assert len(args) == 2
+ this = seq_get(args, 0)
+ fmt = seq_get(args, 1)
+ culture = seq_get(args, 2)
- fmt = args[1]
- number_fmt = fmt.name in TRANSPILE_SAFE_NUMBER_FMT or not DATE_FMT_RE.search(fmt.name)
+ number_fmt = fmt and (fmt.name in TRANSPILE_SAFE_NUMBER_FMT or not DATE_FMT_RE.search(fmt.name))
if number_fmt:
- return exp.NumberToStr(this=args[0], format=fmt)
+ return exp.NumberToStr(this=this, format=fmt, culture=culture)
- return exp.TimeToStr(
- this=args[0],
- format=exp.Literal.string(
+ if fmt:
+ fmt = exp.Literal.string(
format_time(fmt.name, TSQL.FORMAT_TIME_MAPPING)
if len(fmt.name) == 1
else format_time(fmt.name, TSQL.TIME_MAPPING)
- ),
- )
+ )
+
+ return exp.TimeToStr(this=this, format=fmt, culture=culture)
def _parse_eomonth(args: t.List) -> exp.Expression:
@@ -147,7 +148,7 @@ def _format_sql(self: generator.Generator, expression: exp.NumberToStr | exp.Tim
)
)
)
- return self.func("FORMAT", expression.this, fmt)
+ return self.func("FORMAT", expression.this, fmt, expression.args.get("culture"))
def _string_agg_sql(self: generator.Generator, expression: exp.GroupConcat) -> str:
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 57b8bfa7..fa37892d 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -4649,7 +4649,7 @@ class StrToUnix(Func):
class NumberToStr(Func):
- arg_types = {"this": True, "format": True}
+ arg_types = {"this": True, "format": True, "culture": False}
class FromBase(Func):
@@ -4686,7 +4686,7 @@ class StddevSamp(AggFunc):
class TimeToStr(Func):
- arg_types = {"this": True, "format": True}
+ arg_types = {"this": True, "format": True, "culture": False}
class TimeToTimeStr(Func):
|
tobymao/sqlglot
|
0746b6f96d9b8fad0d8fbea3e23170e8d56eb3ee
|
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 9decd354..8b919d48 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -971,9 +971,12 @@ WHERE
)
def test_format(self):
+ self.validate_identity("SELECT FORMAT(foo, 'dddd', 'de-CH')")
+ self.validate_identity("SELECT FORMAT(EndOfDayRate, 'N', 'en-us')")
self.validate_identity("SELECT FORMAT('01-01-1991', 'd.mm.yyyy')")
self.validate_identity("SELECT FORMAT(12345, '###.###.###')")
self.validate_identity("SELECT FORMAT(1234567, 'f')")
+
self.validate_all(
"SELECT FORMAT(1000000.01,'###,###.###')",
write={"spark": "SELECT FORMAT_NUMBER(1000000.01, '###,###.###')"},
|
SQLGlot Fails if T-SQL FORMAT used with Culture Parameter
Repo:
```python
import sqlglot
sqlglot.parse_one("SELECT FORMAT(TimeStart, 'dddd', 'de-CH') FROM TableName", read="tsql")
```
Docs: https://learn.microsoft.com/en-us/sql/t-sql/functions/format-transact-sql?view=sql-server-ver16
The problematic line is to be found here: https://github.com/tobymao/sqlglot/blob/0746b6f96d9b8fad0d8fbea3e23170e8d56eb3ee/sqlglot/dialects/tsql.py#L82
|
0.0
|
0746b6f96d9b8fad0d8fbea3e23170e8d56eb3ee
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_format"
] |
[
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-14 12:24:22+00:00
|
mit
| 5,964 |
|
tobymao__sqlglot-2049
|
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 1c837610..ac0fc706 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2528,10 +2528,21 @@ class Generator:
return f"WHEN {matched}{source}{condition} THEN {then}"
def merge_sql(self, expression: exp.Merge) -> str:
- this = self.sql(expression, "this")
+ table = expression.this
+ table_alias = ""
+
+ hints = table.args.get("hints")
+ if hints and table.alias and isinstance(hints[0], exp.WithTableHint):
+ # T-SQL syntax is MERGE ... <target_table> [WITH (<merge_hint>)] [[AS] table_alias]
+ table = table.copy()
+ table_alias = f" AS {self.sql(table.args['alias'].pop())}"
+
+ this = self.sql(table)
using = f"USING {self.sql(expression, 'using')}"
on = f"ON {self.sql(expression, 'on')}"
- return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}"
+ expressions = self.expressions(expression, sep=" ")
+
+ return f"MERGE INTO {this}{table_alias} {using} {on} {expressions}"
def tochar_sql(self, expression: exp.ToChar) -> str:
if expression.args.get("format"):
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 10fecc9a..4ab48548 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -4620,6 +4620,9 @@ class Parser(metaclass=_Parser):
self._match(TokenType.INTO)
target = self._parse_table()
+ if target and self._match(TokenType.ALIAS, advance=False):
+ target.set("alias", self._parse_table_alias())
+
self._match(TokenType.USING)
using = self._parse_table()
|
tobymao/sqlglot
|
a35cfe0ad78a287efa324f4a24dabab9559af471
|
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 8b919d48..a3fba58f 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -17,6 +17,11 @@ class TestTSQL(Validator):
"spark": "DROP TABLE IF EXISTS TempTableName",
},
)
+
+ self.validate_identity(
+ "MERGE INTO mytable WITH (HOLDLOCK) AS T USING mytable_merge AS S "
+ "ON (T.user_id = S.user_id) WHEN NOT MATCHED THEN INSERT (c1, c2) VALUES (S.c1, S.c2)"
+ )
self.validate_identity("UPDATE x SET y = 1 OUTPUT x.a, x.b INTO @y FROM y")
self.validate_identity("UPDATE x SET y = 1 OUTPUT x.a, x.b FROM y")
self.validate_identity("INSERT INTO x (y) OUTPUT x.a, x.b INTO l SELECT * FROM z")
|
tsql: table hint "...WITH (HOLDLOCK)..." causing parse error
HOLDLOCK is a table hint and is causing a parse error in the statement below. HOLDLOCK is one of many table hints that applies to many statement types [[DELETE](https://learn.microsoft.com/en-us/sql/t-sql/statements/delete-transact-sql?view=sql-server-ver16) [INSERT](https://learn.microsoft.com/en-us/sql/t-sql/statements/insert-transact-sql?view=sql-server-ver16) [SELECT](https://learn.microsoft.com/en-us/sql/t-sql/queries/select-transact-sql?view=sql-server-ver16) [UPDATE](https://learn.microsoft.com/en-us/sql/t-sql/queries/update-transact-sql?view=sql-server-ver16) [MERGE](https://learn.microsoft.com/en-us/sql/t-sql/statements/merge-transact-sql?view=sql-server-ver16)].
It would be good to parse `WITH ( <table_hint> [ [ , ] ...n ] )` [ tsql table hints](https://learn.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver16&viewFallbackFrom=azure-sqldw-latest)
```python
sql = """
MERGE mytable WITH (HOLDLOCK) AS T
USING mytable_merge AS S
ON (T.user_id = S.user_id)
WHEN NOT MATCHED BY TARGET
THEN INSERT(c1, c2);
"""
import sqlglot
sqlglot.parse(sql, read="tsql")
```
Produces ParseError `ParseError: Expected table name but got . Line 2, Col: 32`
where
```python
sql = """
MERGE mytable AS T
USING mytable_merge AS S
ON (T.user_id = S.user_id)
WHEN NOT MATCHED BY TARGET
THEN INSERT(c1, c2);
"""
import sqlglot
sqlglot.parse(sql, read="tsql")
```
Produces the AST.
**Official Documentation**
The `WITH (HOLDLOCK)` is [mentioned here in the MERGE docs](https://learn.microsoft.com/en-us/sql/t-sql/statements/merge-transact-sql?view=sql-server-ver16#concurrency-considerations-for-merge)
`HOLDLOCK` is a [table hint ](https://learn.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver16)
> HOLDLOCK
Equivalent to SERIALIZABLE. For more information, see SERIALIZABLE later in this article. HOLDLOCK applies only to the table or view for which it is specified and only for the duration of the transaction defined by the statement that it is used in. HOLDLOCK can't be used in a SELECT statement that includes the FOR BROWSE option.
Table Hints:
> WITH ( <table_hint> [ [ , ] ...n ] )
> <table_hint> ::=
{ NOEXPAND [ , INDEX ( <index_value> [ , ...n ] ) | INDEX = ( <index_value> ) ]
| INDEX ( <index_value> [ , ...n ] ) | INDEX = ( <index_value> )
| FORCESEEK [ ( <index_value> ( <index_column_name> [ , ... ] ) ) ]
| FORCESCAN
| HOLDLOCK
| NOLOCK
| NOWAIT
| PAGLOCK
| READCOMMITTED
| READCOMMITTEDLOCK
| READPAST
| READUNCOMMITTED
| REPEATABLEREAD
| ROWLOCK
| SERIALIZABLE
| SNAPSHOT
| SPATIAL_WINDOW_MAX_CELLS = <integer_value>
| TABLOCK
| TABLOCKX
| UPDLOCK
| XLOCK
}
> <table_hint_limited> ::=
{
KEEPIDENTITY
| KEEPDEFAULTS
| HOLDLOCK
| IGNORE_CONSTRAINTS
| IGNORE_TRIGGERS
| NOLOCK
| NOWAIT
| PAGLOCK
| READCOMMITTED
| READCOMMITTEDLOCK
| READPAST
| REPEATABLEREAD
| ROWLOCK
| SERIALIZABLE
| SNAPSHOT
| TABLOCK
| TABLOCKX
| UPDLOCK
| XLOCK
}
|
0.0
|
a35cfe0ad78a287efa324f4a24dabab9559af471
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_tsql"
] |
[
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_udf",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-14 16:15:16+00:00
|
mit
| 5,965 |
|
tobymao__sqlglot-2054
|
diff --git a/sqlglot/dialects/hive.py b/sqlglot/dialects/hive.py
index b6b7de72..69c92225 100644
--- a/sqlglot/dialects/hive.py
+++ b/sqlglot/dialects/hive.py
@@ -258,6 +258,11 @@ class Hive(Dialect):
),
"SIZE": exp.ArraySize.from_arg_list,
"SPLIT": exp.RegexpSplit.from_arg_list,
+ "STR_TO_MAP": lambda args: exp.StrToMap(
+ this=seq_get(args, 0),
+ pair_delim=seq_get(args, 1) or exp.Literal.string(","),
+ key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
+ ),
"TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
"TO_JSON": exp.JSONFormat.from_arg_list,
"UNBASE64": exp.FromBase64.from_arg_list,
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index ac8f3a0a..aa84462e 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -222,6 +222,7 @@ class Presto(Dialect):
),
"ROW": exp.Struct.from_arg_list,
"SEQUENCE": exp.GenerateSeries.from_arg_list,
+ "SPLIT_TO_MAP": exp.StrToMap.from_arg_list,
"STRPOS": lambda args: exp.StrPosition(
this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2)
),
@@ -321,6 +322,7 @@ class Presto(Dialect):
exp.SortArray: _no_sort_array,
exp.StrPosition: rename_func("STRPOS"),
exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
+ exp.StrToMap: rename_func("SPLIT_TO_MAP"),
exp.StrToTime: _str_to_time_sql,
exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
exp.Struct: rename_func("ROW"),
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index c231eb3e..b73a5e8a 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -4656,6 +4656,17 @@ class StrToUnix(Func):
arg_types = {"this": False, "format": False}
+# https://prestodb.io/docs/current/functions/string.html
+# https://spark.apache.org/docs/latest/api/sql/index.html#str_to_map
+class StrToMap(Func):
+ arg_types = {
+ "this": True,
+ "pair_delim": False,
+ "key_value_delim": False,
+ "duplicate_resolution_callback": False,
+ }
+
+
class NumberToStr(Func):
arg_types = {"this": True, "format": True, "culture": False}
|
tobymao/sqlglot
|
7a246794e3ca5f182ae4ea98f4e9a500b68cfd35
|
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index 92b10a5b..f116b6b5 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -487,6 +487,9 @@ class TestPresto(Validator):
self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE")
self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ")
self.validate_identity("APPROX_PERCENTILE(a, b, c, d)")
+ self.validate_identity(
+ "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))"
+ )
self.validate_all(
"SELECT ROW(1, 2)",
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 63487606..a984025c 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -239,7 +239,22 @@ TBLPROPERTIES (
self.validate_identity("TRIM(LEADING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("TRIM(TRAILING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("SPLIT(str, pattern, lim)")
+ self.validate_identity(
+ "SELECT STR_TO_MAP('a:1,b:2,c:3')",
+ "SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
+ )
+ self.validate_all(
+ "SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
+ read={
+ "presto": "SELECT SPLIT_TO_MAP('a:1,b:2,c:3', ',', ':')",
+ "spark": "SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
+ },
+ write={
+ "presto": "SELECT SPLIT_TO_MAP('a:1,b:2,c:3', ',', ':')",
+ "spark": "SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
+ },
+ )
self.validate_all(
"SELECT DATEDIFF(month, CAST('1996-10-30' AS TIMESTAMP), CAST('1997-02-28 10:30:00' AS TIMESTAMP))",
read={
|
Support for Spark's `str_to_map` and Presto's `split_to_map`
**Is your feature request related to a problem? Please describe.**
[Spark](https://spark.apache.org/docs/latest/api/sql/index.html#str_to_map) and [Presto](https://trino.io/docs/current/functions/string.html?highlight=split_to_map#split_to_map) support splitting text into into key/value pairs using delimiters. This is useful when parsing text fields.
sqlglot currently parses either one into an anonymous, i.e., `sqlglot.parse_one("select str_to_map(c1, ';', '=')")` gives
```python
(SELECT expressions:
(ANONYMOUS this: str_to_map, expressions:
(COLUMN this:
(IDENTIFIER this: c1, quoted: False)),
(LITERAL this: ;, is_string: True),
(LITERAL this: =, is_string: True)))
```
This prevents us from transpiling `str_to_map` to `split_to_map` (and vice versa).
|
0.0
|
7a246794e3ca5f182ae4ea98f4e9a500b68cfd35
|
[
"tests/dialects/test_spark.py::TestSpark::test_spark"
] |
[
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_explode_to_unnest",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular",
"tests/dialects/test_presto.py::TestPresto::test_json",
"tests/dialects/test_presto.py::TestPresto::test_match_recognize",
"tests/dialects/test_presto.py::TestPresto::test_presto",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_presto.py::TestPresto::test_unnest",
"tests/dialects/test_spark.py::TestSpark::test_bool_or",
"tests/dialects/test_spark.py::TestSpark::test_current_user",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_iif",
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/dialects/test_spark.py::TestSpark::test_transform_query"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-14 17:49:43+00:00
|
mit
| 5,966 |
|
tobymao__sqlglot-2063
|
diff --git a/sqlglot/dialects/spark2.py b/sqlglot/dialects/spark2.py
index b8ba78ba..83d1684c 100644
--- a/sqlglot/dialects/spark2.py
+++ b/sqlglot/dialects/spark2.py
@@ -32,9 +32,13 @@ def _create_sql(self: Hive.Generator, e: exp.Create) -> str:
def _map_sql(self: Hive.Generator, expression: exp.Map) -> str:
- keys = self.sql(expression.args["keys"])
- values = self.sql(expression.args["values"])
- return f"MAP_FROM_ARRAYS({keys}, {values})"
+ keys = expression.args.get("keys")
+ values = expression.args.get("values")
+
+ if not keys or not values:
+ return "MAP()"
+
+ return f"MAP_FROM_ARRAYS({self.sql(keys)}, {self.sql(values)})"
def _parse_as_cast(to_type: str) -> t.Callable[[t.List], exp.Expression]:
|
tobymao/sqlglot
|
d92a5b73895ed3d843f44e4b24b68bf283376ee6
|
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index a984025c..1808f531 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -244,6 +244,23 @@ TBLPROPERTIES (
"SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
)
+ self.validate_all(
+ "MAP(1, 2, 3, 4)",
+ write={
+ "spark": "MAP(1, 2, 3, 4)",
+ "trino": "MAP(ARRAY[1, 3], ARRAY[2, 4])",
+ },
+ )
+ self.validate_all(
+ "MAP()",
+ read={
+ "spark": "MAP()",
+ "trino": "MAP()",
+ },
+ write={
+ "trino": "MAP(ARRAY[], ARRAY[])",
+ },
+ )
self.validate_all(
"SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
read={
|
Empty map error
**Fully reproducible code snippet**
```python
from sqlglot import transpile
transpile("SELECT map()", read="trino", write="spark")
```
Raises the exception:
```
KeyError: 'keys'
```
When it should just transpile as:
```python
'SELECT map()'
```
**Official Documentation**
[Trino map](https://trino.io/docs/current/functions/map.html?highlight=map#map)
[SparkSQL map](https://spark.apache.org/docs/latest/api/sql/index.html#map)
|
0.0
|
d92a5b73895ed3d843f44e4b24b68bf283376ee6
|
[
"tests/dialects/test_spark.py::TestSpark::test_spark"
] |
[
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_current_user",
"tests/dialects/test_spark.py::TestSpark::test_transform_query",
"tests/dialects/test_spark.py::TestSpark::test_bool_or",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_iif"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-15 16:31:52+00:00
|
mit
| 5,967 |
|
tobymao__sqlglot-2083
|
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index efe3c6e8..23b63fe4 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -294,8 +294,20 @@ class ClickHouse(Dialect):
STRUCT_DELIMITER = ("(", ")")
NVL2_SUPPORTED = False
+ STRING_TYPE_MAPPING = {
+ exp.DataType.Type.CHAR: "String",
+ exp.DataType.Type.LONGBLOB: "String",
+ exp.DataType.Type.LONGTEXT: "String",
+ exp.DataType.Type.MEDIUMBLOB: "String",
+ exp.DataType.Type.MEDIUMTEXT: "String",
+ exp.DataType.Type.TEXT: "String",
+ exp.DataType.Type.VARBINARY: "String",
+ exp.DataType.Type.VARCHAR: "String",
+ }
+
TYPE_MAPPING = {
**generator.Generator.TYPE_MAPPING,
+ **STRING_TYPE_MAPPING,
exp.DataType.Type.ARRAY: "Array",
exp.DataType.Type.BIGINT: "Int64",
exp.DataType.Type.DATETIME64: "DateTime64",
@@ -365,6 +377,16 @@ class ClickHouse(Dialect):
"NAMED COLLECTION",
}
+ def datatype_sql(self, expression: exp.DataType) -> str:
+ # String is the standard ClickHouse type, every other variant is just an alias.
+ # Additionally, any supplied length parameter will be ignored.
+ #
+ # https://clickhouse.com/docs/en/sql-reference/data-types/string
+ if expression.this in self.STRING_TYPE_MAPPING:
+ return "String"
+
+ return super().datatype_sql(expression)
+
def safeconcat_sql(self, expression: exp.SafeConcat) -> str:
# Clickhouse errors out if we try to cast a NULL value to TEXT
expression = expression.copy()
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index dea7f26a..eb891a16 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -207,6 +207,9 @@ class DuckDB(Dialect):
return this
+ def _parse_struct_types(self) -> t.Optional[exp.Expression]:
+ return self._parse_field_def()
+
def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
if len(aggregations) == 1:
return super()._pivot_column_names(aggregations)
diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
index eb3a1580..c43c020d 100644
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -22,7 +22,7 @@ def _parse_xml_table(self: parser.Parser) -> exp.XMLTable:
by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
if self._match_text_seq("COLUMNS"):
- columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))
+ columns = self._parse_csv(self._parse_field_def)
return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index 1f5c351b..1470455c 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -666,6 +666,16 @@ class TSQL(Dialect):
return sql
+ def create_sql(self, expression: exp.Create) -> str:
+ kind = self.sql(expression, "kind").upper()
+ exists = expression.args.get("exists")
+
+ if exists and kind == "SCHEMA":
+ schema_name = self.sql(expression, "this")
+ return f"IF NOT EXISTS (SELECT * FROM information_schema.schemata WHERE SCHEMA_NAME = {schema_name}) EXEC('CREATE SCHEMA {schema_name}')"
+
+ return super().create_sql(expression)
+
def offset_sql(self, expression: exp.Offset) -> str:
return f"{super().offset_sql(expression)} ROWS"
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 24f12cba..c1d86954 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -3489,14 +3489,14 @@ class Parser(metaclass=_Parser):
if not self._match(TokenType.L_PAREN):
return this
- args = self._parse_csv(
- lambda: self._parse_constraint()
- or self._parse_column_def(self._parse_field(any_token=True))
- )
+ args = self._parse_csv(lambda: self._parse_constraint() or self._parse_field_def())
self._match_r_paren()
return self.expression(exp.Schema, this=this, expressions=args)
+ def _parse_field_def(self) -> t.Optional[exp.Expression]:
+ return self._parse_column_def(self._parse_field(any_token=True))
+
def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
# column defs are not really columns, they're identifiers
if isinstance(this, exp.Column):
@@ -4506,7 +4506,7 @@ class Parser(metaclass=_Parser):
self._match(TokenType.COLUMN)
exists_column = self._parse_exists(not_=True)
- expression = self._parse_column_def(self._parse_field(any_token=True))
+ expression = self._parse_field_def()
if expression:
expression.set("exists", exists_column)
|
tobymao/sqlglot
|
c5dc9acdfeb715de1c219eb228fe2dda1b9af497
|
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 583be3ef..c014aa0e 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -47,8 +47,10 @@ class TestClickhouse(Validator):
self.validate_identity("position(haystack, needle)")
self.validate_identity("position(haystack, needle, position)")
self.validate_identity("CAST(x AS DATETIME)")
+ self.validate_identity("CAST(x AS VARCHAR(255))", "CAST(x AS String)")
+ self.validate_identity("CAST(x AS BLOB)", "CAST(x AS String)")
self.validate_identity(
- 'SELECT CAST(tuple(1 AS "a", 2 AS "b", 3.0 AS "c").2 AS Nullable(TEXT))'
+ 'SELECT CAST(tuple(1 AS "a", 2 AS "b", 3.0 AS "c").2 AS Nullable(String))'
)
self.validate_identity(
"CREATE TABLE test (id UInt8) ENGINE=AggregatingMergeTree() ORDER BY tuple()"
@@ -95,11 +97,11 @@ class TestClickhouse(Validator):
},
)
self.validate_all(
- "CONCAT(CASE WHEN COALESCE(CAST(a AS TEXT), '') IS NULL THEN COALESCE(CAST(a AS TEXT), '') ELSE CAST(COALESCE(CAST(a AS TEXT), '') AS TEXT) END, CASE WHEN COALESCE(CAST(b AS TEXT), '') IS NULL THEN COALESCE(CAST(b AS TEXT), '') ELSE CAST(COALESCE(CAST(b AS TEXT), '') AS TEXT) END)",
+ "CONCAT(CASE WHEN COALESCE(CAST(a AS String), '') IS NULL THEN COALESCE(CAST(a AS String), '') ELSE CAST(COALESCE(CAST(a AS String), '') AS String) END, CASE WHEN COALESCE(CAST(b AS String), '') IS NULL THEN COALESCE(CAST(b AS String), '') ELSE CAST(COALESCE(CAST(b AS String), '') AS String) END)",
read={"postgres": "CONCAT(a, b)"},
)
self.validate_all(
- "CONCAT(CASE WHEN a IS NULL THEN a ELSE CAST(a AS TEXT) END, CASE WHEN b IS NULL THEN b ELSE CAST(b AS TEXT) END)",
+ "CONCAT(CASE WHEN a IS NULL THEN a ELSE CAST(a AS String) END, CASE WHEN b IS NULL THEN b ELSE CAST(b AS String) END)",
read={"mysql": "CONCAT(a, b)"},
)
self.validate_all(
@@ -233,7 +235,7 @@ class TestClickhouse(Validator):
self.validate_all(
"SELECT {abc: UInt32}, {b: String}, {c: DateTime},{d: Map(String, Array(UInt8))}, {e: Tuple(UInt8, String)}",
write={
- "clickhouse": "SELECT {abc: UInt32}, {b: TEXT}, {c: DATETIME}, {d: Map(TEXT, Array(UInt8))}, {e: Tuple(UInt8, String)}",
+ "clickhouse": "SELECT {abc: UInt32}, {b: String}, {c: DATETIME}, {d: Map(String, Array(UInt8))}, {e: Tuple(UInt8, String)}",
"": "SELECT :abc, :b, :c, :d, :e",
},
)
@@ -283,8 +285,8 @@ class TestClickhouse(Validator):
"clickhouse": """CREATE TABLE example1 (
timestamp DATETIME,
x UInt32 TTL now() + INTERVAL '1' MONTH,
- y TEXT TTL timestamp + INTERVAL '1' DAY,
- z TEXT
+ y String TTL timestamp + INTERVAL '1' DAY,
+ z String
)
ENGINE=MergeTree
ORDER BY tuple()""",
@@ -305,7 +307,7 @@ ORDER BY tuple()""",
"clickhouse": """CREATE TABLE test (
id UInt64,
timestamp DateTime64,
- data TEXT,
+ data String,
max_hits UInt64,
sum_hits UInt64
)
@@ -332,8 +334,8 @@ SET
""",
write={
"clickhouse": """CREATE TABLE test (
- id TEXT,
- data TEXT
+ id String,
+ data String
)
ENGINE=AggregatingMergeTree()
ORDER BY tuple()
@@ -416,7 +418,7 @@ WHERE
"clickhouse": """CREATE TABLE table_for_recompression (
d DATETIME,
key UInt64,
- value TEXT
+ value String
)
ENGINE=MergeTree()
ORDER BY tuple()
@@ -512,9 +514,9 @@ RANGE(MIN discount_start_date MAX discount_end_date)""",
""",
write={
"clickhouse": """CREATE DICTIONARY my_ip_trie_dictionary (
- prefix TEXT,
+ prefix String,
asn UInt32,
- cca2 TEXT DEFAULT '??'
+ cca2 String DEFAULT '??'
)
PRIMARY KEY (prefix)
SOURCE(CLICKHOUSE(
@@ -540,7 +542,7 @@ LIFETIME(MIN 0 MAX 3600)""",
write={
"clickhouse": """CREATE DICTIONARY polygons_test_dictionary (
key Array(Array(Array(Tuple(Float64, Float64)))),
- name TEXT
+ name String
)
PRIMARY KEY (key)
SOURCE(CLICKHOUSE(
diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index 100f4a06..5f2afd75 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -78,7 +78,7 @@ class TestDialect(Validator):
"CAST(a AS TEXT)",
write={
"bigquery": "CAST(a AS STRING)",
- "clickhouse": "CAST(a AS TEXT)",
+ "clickhouse": "CAST(a AS String)",
"drill": "CAST(a AS VARCHAR)",
"duckdb": "CAST(a AS TEXT)",
"mysql": "CAST(a AS CHAR)",
@@ -116,7 +116,7 @@ class TestDialect(Validator):
"CAST(a AS VARBINARY(4))",
write={
"bigquery": "CAST(a AS BYTES)",
- "clickhouse": "CAST(a AS VARBINARY(4))",
+ "clickhouse": "CAST(a AS String)",
"duckdb": "CAST(a AS BLOB(4))",
"mysql": "CAST(a AS VARBINARY(4))",
"hive": "CAST(a AS BINARY(4))",
@@ -133,7 +133,7 @@ class TestDialect(Validator):
self.validate_all(
"CAST(MAP('a', '1') AS MAP(TEXT, TEXT))",
write={
- "clickhouse": "CAST(map('a', '1') AS Map(TEXT, TEXT))",
+ "clickhouse": "CAST(map('a', '1') AS Map(String, String))",
},
)
self.validate_all(
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index c800e589..99a331ac 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -508,6 +508,10 @@ class TestDuckDB(Validator):
self.validate_identity("CAST(x AS INT128)")
self.validate_identity("CAST(x AS DOUBLE)")
self.validate_identity("CAST(x AS DECIMAL(15, 4))")
+ self.validate_identity("CAST(x AS STRUCT(number BIGINT))")
+ self.validate_identity(
+ "CAST(ROW(1, ROW(1)) AS STRUCT(number BIGINT, row STRUCT(number BIGINT)))"
+ )
self.validate_all("CAST(x AS NUMERIC(1, 2))", write={"duckdb": "CAST(x AS DECIMAL(1, 2))"})
self.validate_all("CAST(x AS HUGEINT)", write={"duckdb": "CAST(x AS INT128)"})
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 530522da..11740239 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -608,7 +608,7 @@ class TestPostgres(Validator):
"a || b",
write={
"": "a || b",
- "clickhouse": "CONCAT(CAST(a AS TEXT), CAST(b AS TEXT))",
+ "clickhouse": "CONCAT(CAST(a AS String), CAST(b AS String))",
"duckdb": "a || b",
"postgres": "a || b",
"presto": "CONCAT(CAST(a AS VARCHAR), CAST(b AS VARCHAR))",
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index a3fba58f..8d3c7d70 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -410,6 +410,12 @@ class TestTSQL(Validator):
)
def test_ddl(self):
+ self.validate_all(
+ "IF NOT EXISTS (SELECT * FROM information_schema.schemata WHERE SCHEMA_NAME = foo) EXEC('CREATE SCHEMA foo')",
+ read={
+ "": "CREATE SCHEMA IF NOT EXISTS foo",
+ },
+ )
self.validate_all(
"CREATE TABLE #mytemp (a INTEGER, b CHAR(2), c TIME(4), d FLOAT(24))",
write={
|
TSQL Dialect Yields Invalid Syntax when Creating Indexes and Replacing Data
Creating Indexes:
`>>> sqlglot.transpile('CREATE SCHEMA IF NOT EXISTS "sqlmesh_example"', 'mysql', 'tsql')`
`['CREATE SCHEMA IF NOT EXISTS "sqlmesh_example"']`
[Docs](https://learn.microsoft.com/en-us/sql/t-sql/statements/create-schema-transact-sql?view=sql-server-ver16)
Expected result:
`IF NOT EXISTS (SELECT * FROM information_schema.schemata WHERE SCHEMA_NAME = 'sqlmesh_example') EXEC('CREATE SCHEMA "sqlmesh_example"')`
Replacing Data:
`>>> sqlglot.transpile("""INSERT INTO "test_table" ("a", "ds") REPLACE WHERE "ds" BETWEEN '2022-01-01' AND '2022-01-02' SELECT * FROM (SELECT CAST("a" AS INTEGER) AS "a", CAST("ds" AS TEXT) AS "ds" FROM (VALUES (1, '2022-01-01'), (2, '2022-01-02')) AS "test_table"("a", "ds")) AS "_subquery" WHERE "ds" BETWEEN '2022-01-01' AND '2022-01-02'""", 'mysql', 'tsql')`
`['INSERT INTO "test_table" (\'a\', \'ds\') REPLACE WHERE \'ds\' BETWEEN \'2022-01-01\' AND \'2022-01-02\' SELECT * FROM (SELECT CAST(\'a\' AS INTEGER) AS "a",
CAST(\'ds\' AS TEXT) AS "ds" FROM (VALUES (1, \'2022-01-01\'), (2, \'2022-01-02\')) AS "test_table"("a", "ds")) AS "_subquery" WHERE \'ds\' BETWEEN \'2022-01-01\' AND \'2022-01-02\'']`
Not sure which would be the best SQL Server flavor but here are a few docs:
[Update/Delete/Insert](https://learn.microsoft.com/en-us/sql/odbc/reference/develop-app/update-delete-and-insert-statements?view=sql-server-ver16)
[Merge](https://learn.microsoft.com/en-us/sql/t-sql/statements/merge-transact-sql?view=sql-server-ver16)
[Switch Partition](https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-table-transact-sql?view=sql-server-ver16#c-switch-partitions-between-tables)
Possible expected result:
`DELETE FROM "test_table" WHERE "ds" BETWEEN '2022-01-01' AND '2022-01-02'`
`INSERT INTO "test_table" ("a", "ds") SELECT * FROM (SELECT CAST("a" AS INTEGER) AS "a", CAST("ds" AS TEXT) AS "ds" FROM (VALUES (1, '2022-01-01'), (2, '2022-01-02')) AS "test_table"("a", "ds")) AS "_subquery" WHERE "ds" BETWEEN '2022-01-01' AND '2022-01-02'`
|
0.0
|
c5dc9acdfeb715de1c219eb228fe2dda1b9af497
|
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_dialect.py::TestDialect::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl"
] |
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary",
"tests/dialects/test_dialect.py::TestDialect::test_alias",
"tests/dialects/test_dialect.py::TestDialect::test_array",
"tests/dialects/test_dialect.py::TestDialect::test_count_if",
"tests/dialects/test_dialect.py::TestDialect::test_cross_join",
"tests/dialects/test_dialect.py::TestDialect::test_decode",
"tests/dialects/test_dialect.py::TestDialect::test_enum",
"tests/dialects/test_dialect.py::TestDialect::test_get_or_raise",
"tests/dialects/test_dialect.py::TestDialect::test_hash_comments",
"tests/dialects/test_dialect.py::TestDialect::test_if_null",
"tests/dialects/test_dialect.py::TestDialect::test_json",
"tests/dialects/test_dialect.py::TestDialect::test_lateral_subquery",
"tests/dialects/test_dialect.py::TestDialect::test_limit",
"tests/dialects/test_dialect.py::TestDialect::test_logarithm",
"tests/dialects/test_dialect.py::TestDialect::test_merge",
"tests/dialects/test_dialect.py::TestDialect::test_nullsafe_eq",
"tests/dialects/test_dialect.py::TestDialect::test_nullsafe_neq",
"tests/dialects/test_dialect.py::TestDialect::test_nvl2",
"tests/dialects/test_dialect.py::TestDialect::test_operators",
"tests/dialects/test_dialect.py::TestDialect::test_order_by",
"tests/dialects/test_dialect.py::TestDialect::test_set_operators",
"tests/dialects/test_dialect.py::TestDialect::test_substring",
"tests/dialects/test_dialect.py::TestDialect::test_time",
"tests/dialects/test_dialect.py::TestDialect::test_transactions",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-17 12:43:38+00:00
|
mit
| 5,968 |
|
tobymao__sqlglot-2093
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index c1d86954..14c5e964 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -578,7 +578,7 @@ class Parser(metaclass=_Parser):
TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder),
TokenType.PARAMETER: lambda self: self._parse_parameter(),
TokenType.COLON: lambda self: self.expression(exp.Placeholder, this=self._prev.text)
- if self._match_set((TokenType.NUMBER, TokenType.VAR))
+ if self._match(TokenType.NUMBER) or self._match_set(self.ID_VAR_TOKENS)
else None,
}
|
tobymao/sqlglot
|
1da653f64f91556e5a32b5a513f5404886da0c37
|
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 694cd68b..fb8f8fbb 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -6,6 +6,7 @@ class TestOracle(Validator):
dialect = "oracle"
def test_oracle(self):
+ self.validate_identity("SELECT :OBJECT")
self.validate_identity("SELECT * FROM t FOR UPDATE")
self.validate_identity("SELECT * FROM t FOR UPDATE WAIT 5")
self.validate_identity("SELECT * FROM t FOR UPDATE NOWAIT")
|
Unable to parse the oracle query when ":OBJECT" bind is present
Hello,
Parser is throwing an error when there is ":OBJECT" bind is present in the oracle query.
**Code for your reference -**
```
import sqlglot.expressions as exp
query ="""
SELECT A.COL1, B.COL2 FROM SCHEMA1.TABLE1 A, SCHEMA2.TABLE2 B WHERE A.COL1 = :OBJECT AND B.COL2 = :SOMEVARIABLE
"""
expressions = sqlglot.parse_one(query, read='oracle')
x = list(expressions.find_all(exp.Table))
for i in x:
listTables = str(i).split(' ')[0]
print(listTables)
```
Above code works fine if Bind name is different that OBJECT. If ':OBJECT' is present in the query, then it fails.
Request you to add support for ':OBJECT' for query parsing.
Unable to parse the oracle query when "#" is present in the column names
Hello,
Parser is throwing an error when there is "#" character is present in the column name.
**Code for your reference -**
```
import sqlglot.expressions as exp
query ="""
SELECT A.COL#, B.COL2 FROM SCHEMA1.TABLE1 A
"""
expressions = sqlglot.parse_one(query, read='oracle')
x = list(expressions.find_all(exp.Table))
for i in x:
listTables = str(i).split(' ')[0]
print(listTables)
```
Above code fails as it cannot process '#' and throws the error.
Request you to add support for '#' for query parsing.
|
0.0
|
1da653f64f91556e5a32b5a513f5404886da0c37
|
[
"tests/dialects/test_oracle.py::TestOracle::test_oracle"
] |
[
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-08-18 10:25:21+00:00
|
mit
| 5,969 |
|
tobymao__sqlglot-2157
|
diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
index 27c1f62b..279ed314 100644
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -133,7 +133,6 @@ class Oracle(Dialect):
),
exp.Group: transforms.preprocess([transforms.unalias_group]),
exp.ILike: no_ilike_sql,
- exp.Coalesce: rename_func("NVL"),
exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 214998c7..f9f8bd4d 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -3154,12 +3154,12 @@ class Parser(metaclass=_Parser):
if len(parts) == 2:
if unit:
- # this is not actually a unit, it's something else
+ # This is not actually a unit, it's something else (e.g. a "window side")
unit = None
self._retreat(self._index - 1)
- else:
- this = exp.Literal.string(parts[0])
- unit = self.expression(exp.Var, this=parts[1])
+
+ this = exp.Literal.string(parts[0])
+ unit = self.expression(exp.Var, this=parts[1])
return self.expression(exp.Interval, this=this, unit=unit)
@@ -4151,7 +4151,7 @@ class Parser(metaclass=_Parser):
key = self._parse_column()
self._match_set((TokenType.COLON, TokenType.COMMA))
self._match_text_seq("VALUE")
- value = self._parse_column()
+ value = self._parse_bitwise()
if not key and not value:
return None
|
tobymao/sqlglot
|
585d0bfcbd40125492640480c890af328d8bc51f
|
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 7d9ab900..01a9ca3a 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -6,6 +6,8 @@ class TestOracle(Validator):
dialect = "oracle"
def test_oracle(self):
+ self.validate_identity("SELECT JSON_OBJECT('name': first_name || ' ' || last_name) FROM t")
+ self.validate_identity("COALESCE(c1, c2, c3)")
self.validate_identity("SELECT * FROM TABLE(foo)")
self.validate_identity("SELECT a$x#b")
self.validate_identity("SELECT :OBJECT")
@@ -43,7 +45,7 @@ class TestOracle(Validator):
"NVL(NULL, 1)",
write={
"": "COALESCE(NULL, 1)",
- "oracle": "NVL(NULL, 1)",
+ "oracle": "COALESCE(NULL, 1)",
},
)
self.validate_all(
diff --git a/tests/test_parser.py b/tests/test_parser.py
index a853a757..7135dd89 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -711,3 +711,11 @@ class TestParser(unittest.TestCase):
parse_one("SELECT a, b ?? c ?? 'No Data' FROM z").sql(),
"SELECT a, COALESCE(COALESCE(b, c), 'No Data') FROM z",
)
+
+ def test_parse_intervals(self):
+ ast = parse_one(
+ "SELECT a FROM tbl WHERE a <= DATE '1998-12-01' - INTERVAL '71 days' GROUP BY b"
+ )
+
+ self.assertEqual(ast.find(exp.Interval).this.sql(), "'71'")
+ self.assertEqual(ast.find(exp.Interval).unit.assert_is(exp.Var).sql(), "days")
|
Interval parsing does not meet expectations
When adding other keywords after interval, there is a problem with interval parsing, and it is not parsed into the format of the standard interval '71' day.
Reproduce code example:
`expression = sqlglot.parse_one("""select a from tbl where a <= date '1998-12-01' - interval '71 days' group by b;`
result:`SELECT a FROM tbl WHERE a <= CAST('1998-12-01' AS DATE) - INTERVAL '71 days' GROUP BY b`
`expression = sqlglot.parse_one("""select a from tbl where a <= date '1998-12-01' - interval '71 days';`
result:`SELECT a FROM tbl WHERE a <= CAST('1998-12-01' AS DATE) - INTERVAL '71' days`
|
0.0
|
585d0bfcbd40125492640480c890af328d8bc51f
|
[
"tests/dialects/test_oracle.py::TestOracle::test_oracle",
"tests/test_parser.py::TestParser::test_parse_intervals"
] |
[
"tests/dialects/test_oracle.py::TestOracle::test_xml_table",
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/test_parser.py::TestParser::test_set_expression",
"tests/test_parser.py::TestParser::test_identify",
"tests/test_parser.py::TestParser::test_comments_insert",
"tests/test_parser.py::TestParser::test_comments_delete",
"tests/test_parser.py::TestParser::test_comments_update",
"tests/test_parser.py::TestParser::test_comments_insert_cte",
"tests/test_parser.py::TestParser::test_comments_select_cte",
"tests/test_parser.py::TestParser::test_pretty_config_override",
"tests/test_parser.py::TestParser::test_unnest_projection",
"tests/test_parser.py::TestParser::test_rename_table",
"tests/test_parser.py::TestParser::test_parse_into_errors",
"tests/test_parser.py::TestParser::test_parse_errors",
"tests/test_parser.py::TestParser::test_comment_error_r",
"tests/test_parser.py::TestParser::test_multi",
"tests/test_parser.py::TestParser::test_parse_floats",
"tests/test_parser.py::TestParser::test_space",
"tests/test_parser.py::TestParser::test_missing_by",
"tests/test_parser.py::TestParser::test_comments_update_cte",
"tests/test_parser.py::TestParser::test_parse_into_error",
"tests/test_parser.py::TestParser::test_parse_nested",
"tests/test_parser.py::TestParser::test_parameter",
"tests/test_parser.py::TestParser::test_parse_empty",
"tests/test_parser.py::TestParser::test_var",
"tests/test_parser.py::TestParser::test_column",
"tests/test_parser.py::TestParser::test_table",
"tests/test_parser.py::TestParser::test_comment_error_n",
"tests/test_parser.py::TestParser::test_lambda_struct",
"tests/test_parser.py::TestParser::test_float",
"tests/test_parser.py::TestParser::test_command",
"tests/test_parser.py::TestParser::test_parse_properties",
"tests/test_parser.py::TestParser::test_create_table_error",
"tests/test_parser.py::TestParser::test_parse_into",
"tests/test_parser.py::TestParser::test_parse_terse_coalesce",
"tests/test_parser.py::TestParser::test_select",
"tests/test_parser.py::TestParser::test_expression",
"tests/test_parser.py::TestParser::test_type_literals",
"tests/test_parser.py::TestParser::test_comments_delete_cte",
"tests/test_parser.py::TestParser::test_transactions",
"tests/test_parser.py::TestParser::test_union_order",
"tests/test_parser.py::TestParser::test_pivot_columns",
"tests/test_parser.py::TestParser::test_unary_plus",
"tests/test_parser.py::TestParser::test_comments_select"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-05 10:19:50+00:00
|
mit
| 5,970 |
|
tobymao__sqlglot-2160
|
diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py
index 2be1a625..163cc137 100644
--- a/sqlglot/dialects/teradata.py
+++ b/sqlglot/dialects/teradata.py
@@ -95,6 +95,9 @@ class Teradata(Dialect):
STATEMENT_PARSERS = {
**parser.Parser.STATEMENT_PARSERS,
+ TokenType.DATABASE: lambda self: self.expression(
+ exp.Use, this=self._parse_table(schema=False)
+ ),
TokenType.REPLACE: lambda self: self._parse_create(),
}
@@ -165,6 +168,7 @@ class Teradata(Dialect):
exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
+ exp.Use: lambda self, e: f"DATABASE {self.sql(e, 'this')}",
}
def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 214998c7..f9f8bd4d 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -3154,12 +3154,12 @@ class Parser(metaclass=_Parser):
if len(parts) == 2:
if unit:
- # this is not actually a unit, it's something else
+ # This is not actually a unit, it's something else (e.g. a "window side")
unit = None
self._retreat(self._index - 1)
- else:
- this = exp.Literal.string(parts[0])
- unit = self.expression(exp.Var, this=parts[1])
+
+ this = exp.Literal.string(parts[0])
+ unit = self.expression(exp.Var, this=parts[1])
return self.expression(exp.Interval, this=this, unit=unit)
@@ -4151,7 +4151,7 @@ class Parser(metaclass=_Parser):
key = self._parse_column()
self._match_set((TokenType.COLON, TokenType.COMMA))
self._match_text_seq("VALUE")
- value = self._parse_column()
+ value = self._parse_bitwise()
if not key and not value:
return None
|
tobymao/sqlglot
|
f0bddde63d47a620592261e1a1810aabcd8ec800
|
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 27845ed7..01a9ca3a 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -6,6 +6,7 @@ class TestOracle(Validator):
dialect = "oracle"
def test_oracle(self):
+ self.validate_identity("SELECT JSON_OBJECT('name': first_name || ' ' || last_name) FROM t")
self.validate_identity("COALESCE(c1, c2, c3)")
self.validate_identity("SELECT * FROM TABLE(foo)")
self.validate_identity("SELECT a$x#b")
diff --git a/tests/dialects/test_teradata.py b/tests/dialects/test_teradata.py
index 4d322419..32bdc719 100644
--- a/tests/dialects/test_teradata.py
+++ b/tests/dialects/test_teradata.py
@@ -4,6 +4,18 @@ from tests.dialects.test_dialect import Validator
class TestTeradata(Validator):
dialect = "teradata"
+ def test_teradata(self):
+ self.validate_all(
+ "DATABASE tduser",
+ read={
+ "databricks": "USE tduser",
+ },
+ write={
+ "databricks": "USE tduser",
+ "teradata": "DATABASE tduser",
+ },
+ )
+
def test_translate(self):
self.validate_all(
"TRANSLATE(x USING LATIN_TO_UNICODE)",
diff --git a/tests/test_parser.py b/tests/test_parser.py
index a853a757..7135dd89 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -711,3 +711,11 @@ class TestParser(unittest.TestCase):
parse_one("SELECT a, b ?? c ?? 'No Data' FROM z").sql(),
"SELECT a, COALESCE(COALESCE(b, c), 'No Data') FROM z",
)
+
+ def test_parse_intervals(self):
+ ast = parse_one(
+ "SELECT a FROM tbl WHERE a <= DATE '1998-12-01' - INTERVAL '71 days' GROUP BY b"
+ )
+
+ self.assertEqual(ast.find(exp.Interval).this.sql(), "'71'")
+ self.assertEqual(ast.find(exp.Interval).unit.assert_is(exp.Var).sql(), "days")
|
Teradata to Databricks transpilation issue
When working on Teradata to Databricks conversion i ran into a issue where a command like "Database databasename" which is equvalent to "USE Databasename" on Databricks side is not getting tranpiled correctly. The final output still shows the original command.
**Sample code** below
```
sql="""
DATABASE tduser;
"""
try:
new_query = sqlglot.transpile(sql, read='teradata', write='databricks', pretty=True)[0]
print(new_query)
except sqlglot.errors.ParseError as error:
print(traceback.format_exc())
print(error.errors)
```
**Expected output:**
USE tduser
**Official Documentation**
https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-usedb.html
|
0.0
|
f0bddde63d47a620592261e1a1810aabcd8ec800
|
[
"tests/dialects/test_teradata.py::TestTeradata::test_teradata",
"tests/test_parser.py::TestParser::test_parse_intervals",
"tests/dialects/test_oracle.py::TestOracle::test_oracle"
] |
[
"tests/dialects/test_teradata.py::TestTeradata::test_translate",
"tests/dialects/test_teradata.py::TestTeradata::test_statistics",
"tests/dialects/test_teradata.py::TestTeradata::test_cast",
"tests/dialects/test_teradata.py::TestTeradata::test_create",
"tests/dialects/test_teradata.py::TestTeradata::test_datatype",
"tests/dialects/test_teradata.py::TestTeradata::test_insert",
"tests/dialects/test_teradata.py::TestTeradata::test_abbrev",
"tests/dialects/test_teradata.py::TestTeradata::test_update",
"tests/dialects/test_teradata.py::TestTeradata::test_mod",
"tests/test_parser.py::TestParser::test_comments_delete_cte",
"tests/test_parser.py::TestParser::test_comment_error_n",
"tests/test_parser.py::TestParser::test_parse_errors",
"tests/test_parser.py::TestParser::test_comments_insert_cte",
"tests/test_parser.py::TestParser::test_parse_properties",
"tests/test_parser.py::TestParser::test_multi",
"tests/test_parser.py::TestParser::test_rename_table",
"tests/test_parser.py::TestParser::test_pivot_columns",
"tests/test_parser.py::TestParser::test_lambda_struct",
"tests/test_parser.py::TestParser::test_parse_empty",
"tests/test_parser.py::TestParser::test_parse_nested",
"tests/test_parser.py::TestParser::test_table",
"tests/test_parser.py::TestParser::test_comment_error_r",
"tests/test_parser.py::TestParser::test_comments_select_cte",
"tests/test_parser.py::TestParser::test_create_table_error",
"tests/test_parser.py::TestParser::test_command",
"tests/test_parser.py::TestParser::test_parameter",
"tests/test_parser.py::TestParser::test_parse_into",
"tests/test_parser.py::TestParser::test_var",
"tests/test_parser.py::TestParser::test_transactions",
"tests/test_parser.py::TestParser::test_parse_floats",
"tests/test_parser.py::TestParser::test_comments_select",
"tests/test_parser.py::TestParser::test_parse_into_error",
"tests/test_parser.py::TestParser::test_union_order",
"tests/test_parser.py::TestParser::test_expression",
"tests/test_parser.py::TestParser::test_identify",
"tests/test_parser.py::TestParser::test_type_literals",
"tests/test_parser.py::TestParser::test_float",
"tests/test_parser.py::TestParser::test_comments_insert",
"tests/test_parser.py::TestParser::test_select",
"tests/test_parser.py::TestParser::test_space",
"tests/test_parser.py::TestParser::test_parse_into_errors",
"tests/test_parser.py::TestParser::test_set_expression",
"tests/test_parser.py::TestParser::test_missing_by",
"tests/test_parser.py::TestParser::test_pretty_config_override",
"tests/test_parser.py::TestParser::test_comments_update",
"tests/test_parser.py::TestParser::test_comments_update_cte",
"tests/test_parser.py::TestParser::test_unnest_projection",
"tests/test_parser.py::TestParser::test_unary_plus",
"tests/test_parser.py::TestParser::test_column",
"tests/test_parser.py::TestParser::test_parse_terse_coalesce",
"tests/test_parser.py::TestParser::test_comments_delete",
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-05 12:29:19+00:00
|
mit
| 5,971 |
|
tobymao__sqlglot-2199
|
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
index 190a3769..46e3f193 100644
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -119,7 +119,7 @@ class MySQL(Dialect):
QUOTES = ["'", '"']
COMMENTS = ["--", "#", ("/*", "*/")]
IDENTIFIERS = ["`"]
- STRING_ESCAPES = ["'", "\\"]
+ STRING_ESCAPES = ["'", '"', "\\"]
BIT_STRINGS = [("b'", "'"), ("B'", "'"), ("0b", "")]
HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", "")]
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 7a2409e6..ce255c34 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -1168,7 +1168,11 @@ class Tokenizer(metaclass=_Tokenizer):
escapes = self._STRING_ESCAPES if escapes is None else escapes
while True:
- if self._char in escapes and (self._peek == delimiter or self._peek in escapes):
+ if (
+ self._char in escapes
+ and (self._peek == delimiter or self._peek in escapes)
+ and (self._char not in self._QUOTES or self._char == self._peek)
+ ):
if self._peek == delimiter:
text += self._peek
else:
|
tobymao/sqlglot
|
e2bb427ee1f20741857e0dfabc13847ee6324c74
|
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index c4cb4b75..a0fc16da 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -243,6 +243,15 @@ class TestMySQL(Validator):
)
def test_escape(self):
+ self.validate_identity("""'"abc"'""")
+ self.validate_identity(
+ r"'\'a'",
+ "'''a'",
+ )
+ self.validate_identity(
+ '''"'abc'"''',
+ "'''abc'''",
+ )
self.validate_all(
r"'a \' b '' '",
write={
|
failed to parse mysql string literals
sqlglot treats both "'" and '\\' as string escapes in mysql dialect, and it failed to parse such sqls:
`select "'abc'";`
**Fully reproducible code snippet**
Please include a fully reproducible code snippet or the input sql, dialect, and expected output.
```
import sqlglot
sqlglot.transpile('select "\'abc\'";', read='mysql')
```
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/robert/workspace/sqlglot/.venv/lib/python3.9/site-packages/sqlglot/__init__.py", line 125, in parse_one
result = dialect.parse(sql, **opts)
File "/Users/robert/workspace/sqlglot/.venv/lib/python3.9/site-packages/sqlglot/dialects/dialect.py", line 288, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File "/Users/robert/workspace/sqlglot/.venv/lib/python3.9/site-packages/sqlglot/dialects/dialect.py", line 302, in tokenize
return self.tokenizer.tokenize(sql)
File "/Users/robert/workspace/sqlglot/.venv/lib/python3.9/site-packages/sqlglot/tokens.py", line 827, in tokenize
raise TokenError(f"Error tokenizing '{context}'") from e
sqlglot.errors.TokenError: Error tokenizing 'select "'abc'"'
```
while in mysql:
```
mysql> select "'abc'";
+-------+
| 'abc' |
+-------+
| 'abc' |
+-------+
1 row in set (0.00 sec)
```
**Official Documentation**
according to official doc, single quote seems much more a special syntax other than general string escape char:
- A ' inside a string quoted with ' may be written as ''.
- A " inside a string quoted with " may be written as "".
https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
|
0.0
|
e2bb427ee1f20741857e0dfabc13847ee6324c74
|
[
"tests/dialects/test_mysql.py::TestMySQL::test_escape"
] |
[
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_identity",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_types"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-12 17:51:34+00:00
|
mit
| 5,972 |
|
tobymao__sqlglot-2212
|
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
index 121698af..025e66fd 100644
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -130,6 +130,7 @@ class MySQL(Dialect):
"ENUM": TokenType.ENUM,
"FORCE": TokenType.FORCE,
"IGNORE": TokenType.IGNORE,
+ "LOCK TABLES": TokenType.COMMAND,
"LONGBLOB": TokenType.LONGBLOB,
"LONGTEXT": TokenType.LONGTEXT,
"MEDIUMBLOB": TokenType.MEDIUMBLOB,
@@ -142,6 +143,7 @@ class MySQL(Dialect):
"START": TokenType.BEGIN,
"SIGNED": TokenType.BIGINT,
"SIGNED INTEGER": TokenType.BIGINT,
+ "UNLOCK TABLES": TokenType.COMMAND,
"UNSIGNED": TokenType.UBIGINT,
"UNSIGNED INTEGER": TokenType.UBIGINT,
"YEAR": TokenType.YEAR,
@@ -373,7 +375,7 @@ class MySQL(Dialect):
self._match_texts({"INDEX", "KEY"})
this = self._parse_id_var(any_token=False)
- type_ = self._match(TokenType.USING) and self._advance_any() and self._prev.text
+ index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text
schema = self._parse_schema()
options = []
@@ -413,7 +415,7 @@ class MySQL(Dialect):
this=this,
schema=schema,
kind=kind,
- type=type_,
+ index_type=index_type,
options=options,
)
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 98afddc8..3b39efb8 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1321,7 +1321,13 @@ class GeneratedAsIdentityColumnConstraint(ColumnConstraintKind):
# https://dev.mysql.com/doc/refman/8.0/en/create-table.html
class IndexColumnConstraint(ColumnConstraintKind):
- arg_types = {"this": False, "schema": True, "kind": False, "type": False, "options": False}
+ arg_types = {
+ "this": False,
+ "schema": True,
+ "kind": False,
+ "index_type": False,
+ "options": False,
+ }
class InlineLengthColumnConstraint(ColumnConstraintKind):
@@ -1354,7 +1360,7 @@ class TitleColumnConstraint(ColumnConstraintKind):
class UniqueColumnConstraint(ColumnConstraintKind):
- arg_types = {"this": False}
+ arg_types = {"this": False, "index_type": False}
class UppercaseColumnConstraint(ColumnConstraintKind):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 399b48bc..d086e8ab 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -705,7 +705,9 @@ class Generator:
def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str:
this = self.sql(expression, "this")
this = f" {this}" if this else ""
- return f"UNIQUE{this}"
+ index_type = expression.args.get("index_type")
+ index_type = f" USING {index_type}" if index_type else ""
+ return f"UNIQUE{this}{index_type}"
def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
return self.sql(expression, "this")
@@ -2740,13 +2742,13 @@ class Generator:
kind = f"{kind} INDEX" if kind else "INDEX"
this = self.sql(expression, "this")
this = f" {this}" if this else ""
- type_ = self.sql(expression, "type")
- type_ = f" USING {type_}" if type_ else ""
+ index_type = self.sql(expression, "index_type")
+ index_type = f" USING {index_type}" if index_type else ""
schema = self.sql(expression, "schema")
schema = f" {schema}" if schema else ""
options = self.expressions(expression, key="options", sep=" ")
options = f" {options}" if options else ""
- return f"{kind}{this}{type_}{schema}{options}"
+ return f"{kind}{this}{index_type}{schema}{options}"
def nvl2_sql(self, expression: exp.Nvl2) -> str:
if self.NVL2_SUPPORTED:
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index a3dcc493..a4b2a015 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -848,6 +848,8 @@ class Parser(metaclass=_Parser):
WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER}
WINDOW_SIDES = {"FOLLOWING", "PRECEDING"}
+ FETCH_TOKENS = ID_VAR_TOKENS - {TokenType.ROW, TokenType.ROWS, TokenType.PERCENT}
+
ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY}
DISTINCT_TOKENS = {TokenType.DISTINCT}
@@ -2984,7 +2986,7 @@ class Parser(metaclass=_Parser):
direction = self._match_set((TokenType.FIRST, TokenType.NEXT))
direction = self._prev.text if direction else "FIRST"
- count = self._parse_number()
+ count = self._parse_field(tokens=self.FETCH_TOKENS)
percent = self._match(TokenType.PERCENT)
self._match_set((TokenType.ROW, TokenType.ROWS))
@@ -3815,7 +3817,9 @@ class Parser(metaclass=_Parser):
def _parse_unique(self) -> exp.UniqueColumnConstraint:
self._match_text_seq("KEY")
return self.expression(
- exp.UniqueColumnConstraint, this=self._parse_schema(self._parse_id_var(any_token=False))
+ exp.UniqueColumnConstraint,
+ this=self._parse_schema(self._parse_id_var(any_token=False)),
+ index_type=self._match(TokenType.USING) and self._advance_any() and self._prev.text,
)
def _parse_key_constraint_options(self) -> t.List[str]:
|
tobymao/sqlglot
|
416b341c45cd0a766a9919cc5a11b5f90dc3b3f3
|
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index 802a073d..9deccbfb 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -19,6 +19,7 @@ class TestMySQL(Validator):
},
)
+ self.validate_identity("CREATE TABLE foo (a BIGINT, UNIQUE (b) USING BTREE)")
self.validate_identity("CREATE TABLE foo (id BIGINT)")
self.validate_identity("CREATE TABLE 00f (1d BIGINT)")
self.validate_identity("UPDATE items SET items.price = 0 WHERE items.id >= 5 LIMIT 10")
@@ -107,9 +108,8 @@ class TestMySQL(Validator):
)
def test_identity(self):
- self.validate_identity(
- "SELECT * FROM x ORDER BY BINARY a", "SELECT * FROM x ORDER BY CAST(a AS BINARY)"
- )
+ self.validate_identity("UNLOCK TABLES")
+ self.validate_identity("LOCK TABLES `app_fields` WRITE")
self.validate_identity("SELECT 1 XOR 0")
self.validate_identity("SELECT 1 && 0", "SELECT 1 AND 0")
self.validate_identity("SELECT /*+ BKA(t1) NO_BKA(t2) */ * FROM t1 INNER JOIN t2")
@@ -133,6 +133,9 @@ class TestMySQL(Validator):
self.validate_identity("CREATE TABLE A LIKE B")
self.validate_identity("SELECT * FROM t1, t2 FOR SHARE OF t1, t2 SKIP LOCKED")
self.validate_identity("SELECT a || b", "SELECT a OR b")
+ self.validate_identity(
+ "SELECT * FROM x ORDER BY BINARY a", "SELECT * FROM x ORDER BY CAST(a AS BINARY)"
+ )
self.validate_identity(
"""SELECT * FROM foo WHERE 3 MEMBER OF(JSON_EXTRACT(info, '$.value'))"""
)
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 2dfd1796..7ebe0173 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -50,6 +50,10 @@ class TestOracle(Validator):
"SELECT UNIQUE col1, col2 FROM table",
"SELECT DISTINCT col1, col2 FROM table",
)
+ self.validate_identity(
+ "SELECT * FROM T ORDER BY I OFFSET nvl(:variable1, 10) ROWS FETCH NEXT nvl(:variable2, 10) ROWS ONLY",
+ "SELECT * FROM T ORDER BY I OFFSET COALESCE(:variable1, 10) ROWS FETCH NEXT COALESCE(:variable2, 10) ROWS ONLY",
+ )
self.validate_all(
"NVL(NULL, 1)",
|
Support LOCK TABLES MySQL statement
MySQL schema can have LOCK TABLES and UNLOCK TABLES statements.
At the moment it seems that the library does not parse them in any way and raises an error.
**Fully reproducible code snippet**
```python
sql = "LOCK TABLES `app_fields` WRITE;"
parsed_sql_exprs = sqlglot.parse_one(sql, read="mysql")
```
**Official Documentation**
https://dev.mysql.com/doc/refman/8.0/en/lock-tables.html
|
0.0
|
416b341c45cd0a766a9919cc5a11b5f90dc3b3f3
|
[
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_mysql.py::TestMySQL::test_identity",
"tests/dialects/test_oracle.py::TestOracle::test_oracle"
] |
[
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_types",
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_json_table",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-13 15:08:33+00:00
|
mit
| 5,973 |
|
tobymao__sqlglot-2232
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 5aa946e7..16a1c23b 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -407,7 +407,9 @@ class Snowflake(Dialect):
exp.Min: min_or_least,
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
exp.RegexpILike: _regexpilike_sql,
- exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
+ exp.Select: transforms.preprocess(
+ [transforms.eliminate_distinct_on, transforms.explode_to_unnest]
+ ),
exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
exp.StartsWith: rename_func("STARTSWITH"),
exp.StrPosition: lambda self, e: self.func(
@@ -449,6 +451,16 @@ class Snowflake(Dialect):
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
+ def unnest_sql(self, expression: exp.Unnest) -> str:
+ subquery = exp.Subquery(
+ this=exp.select("value").from_(
+ f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
+ ),
+ )
+ alias = self.sql(expression, "alias")
+ alias = f" AS {alias}" if alias else ""
+ return f"{self.sql(subquery)}{alias}"
+
def show_sql(self, expression: exp.Show) -> str:
scope = self.sql(expression, "scope")
scope = f" {scope}" if scope else ""
|
tobymao/sqlglot
|
cd30eb765a4b733c1b12148609eb3bafa4983eab
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 36fca7c2..1415f8b4 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -61,6 +61,13 @@ class TestDuckDB(Validator):
self.validate_all("0x1010", write={"": "0 AS x1010"})
self.validate_all("x ~ y", write={"duckdb": "REGEXP_MATCHES(x, y)"})
self.validate_all("SELECT * FROM 'x.y'", write={"duckdb": 'SELECT * FROM "x.y"'})
+ self.validate_all(
+ "SELECT UNNEST([1, 2, 3])",
+ write={
+ "duckdb": "SELECT UNNEST([1, 2, 3])",
+ "snowflake": "SELECT col FROM (SELECT value FROM TABLE(FLATTEN(INPUT => [1, 2, 3]))) AS _u(col)",
+ },
+ )
self.validate_all(
"VAR_POP(x)",
read={
|
duckdb unnest to snowflake generates incorrect code
**Fully reproducible code snippet**
```
In [7]: import sqlglot as sg
In [8]: sg.__version__
Out[8]: '18.0.0'
In [9]: sg.parse_one("select unnest([1, 2, 3])", read="duckdb").sql(dialect="snowflake")
Out[9]: 'SELECT EXPLODE([1, 2, 3])'
```
As far as I can tell, `EXPLODE` is not a function that exists in Snowflake.
Array flattening operations are done with their `FLATTEN` function
**Official Documentation**
https://docs.snowflake.com/en/sql-reference/functions/flatten
|
0.0
|
cd30eb765a4b733c1b12148609eb3bafa4983eab
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-15 22:43:45+00:00
|
mit
| 5,974 |
|
tobymao__sqlglot-2249
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index b83d4eac..5473fb16 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2861,6 +2861,7 @@ class Select(Subqueryable):
prefix="LIMIT",
dialect=dialect,
copy=copy,
+ into_arg="expression",
**opts,
)
@@ -5130,10 +5131,11 @@ def _apply_builder(
prefix=None,
into=None,
dialect=None,
+ into_arg="this",
**opts,
):
if _is_wrong_expression(expression, into):
- expression = into(this=expression)
+ expression = into(**{into_arg: expression})
instance = maybe_copy(instance, copy)
expression = maybe_parse(
sql_or_expression=expression,
|
tobymao/sqlglot
|
ff19f4c4e1d735710bb945ced6d6c15af186c058
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 4165c5e9..6b496ecd 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -6,6 +6,11 @@ class TestDuckDB(Validator):
dialect = "duckdb"
def test_duckdb(self):
+ self.assertEqual(
+ parse_one("select * from t limit (select 5)").sql(dialect="duckdb"),
+ exp.select("*").from_("t").limit(exp.select("5").subquery()).sql(dialect="duckdb"),
+ )
+
for join_type in ("SEMI", "ANTI"):
exists = "EXISTS" if join_type == "SEMI" else "NOT EXISTS"
|
subquery in limit or offset is compiled to syntactically invalid sql
**Fully reproducible code snippet**
I might be using sqlglot incorrectly, but I am trying to create the sqlglot-builder-equivalent version of this DuckDB query:
```sql
SELECT *
FROM t LIMIT (SELECT 5)
```
I looked at the parse tree:
```
In [8]: import sqlglot as sg
In [9]: sg.__version__
Out[9]: '18.5.2.dev11'
In [10]: sg.parse_one("select * from t limit (select 5)")
Out[10]:
(SELECT expressions:
(STAR ), limit:
(LIMIT expression:
(SUBQUERY this:
(SELECT expressions:
(LITERAL this: 5, is_string: False)))), from:
(FROM this:
(TABLE this:
(IDENTIFIER this: t, quoted: False))))
```
and then tried:
```
In [43]: sg.select("*").from_("t").limit(sg.select("5").subquery()).sql("duckdb")
Out[43]: 'SELECT * FROM t(SELECT 5) LIMIT'
```
For now what I am doing is:
```
In [44]: sg.select("*").from_("t").limit(sg.select("5").subquery().sql('duckdb')).sql("duckdb")
Out[44]: 'SELECT * FROM t LIMIT (SELECT 5)'
```
which generates the desired output.
**Official Documentation**
N/A
|
0.0
|
ff19f4c4e1d735710bb945ced6d6c15af186c058
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-09-17 18:37:41+00:00
|
mit
| 5,975 |
|
tobymao__sqlglot-2293
|
diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 3dd5c3f4..1349c568 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -178,6 +178,7 @@ class BigQuery(Dialect):
UNNEST_COLUMN_ONLY = True
SUPPORTS_USER_DEFINED_TYPES = False
SUPPORTS_SEMI_ANTI_JOIN = False
+ LOG_BASE_FIRST = False
# https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
@@ -265,7 +266,6 @@ class BigQuery(Dialect):
class Parser(parser.Parser):
PREFIXED_PIVOT_COLUMNS = True
- LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
FUNCTIONS = {
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index bf4ab305..fa62e789 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -208,6 +208,7 @@ class TSQL(Dialect):
NULL_ORDERING = "nulls_are_small"
TIME_FORMAT = "'yyyy-mm-dd hh:mm:ss'"
SUPPORTS_SEMI_ANTI_JOIN = False
+ LOG_BASE_FIRST = False
TIME_MAPPING = {
"year": "%Y",
@@ -400,7 +401,6 @@ class TSQL(Dialect):
TokenType.END: lambda self: self._parse_command(),
}
- LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
CONCAT_NULL_OUTPUTS_STRING = True
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 1ddcb2e4..f3b77219 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -99,6 +99,9 @@ class Generator:
exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
}
+ # Whether the base comes first
+ LOG_BASE_FIRST = True
+
# Whether or not null ordering is supported in order by
NULL_ORDERING_SUPPORTED = True
@@ -2524,6 +2527,12 @@ class Generator:
def trycast_sql(self, expression: exp.TryCast) -> str:
return self.cast_sql(expression, safe_prefix="TRY_")
+ def log_sql(self, expression: exp.Log) -> str:
+ args = list(expression.args.values())
+ if not self.LOG_BASE_FIRST:
+ args.reverse()
+ return self.func("LOG", *args)
+
def use_sql(self, expression: exp.Use) -> str:
kind = self.sql(expression, "kind")
kind = f" {kind}" if kind else ""
|
tobymao/sqlglot
|
06e0869e7aa5714d77e6ec763da38d6a422965fa
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 1da3da29..33d34a6a 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -710,6 +710,8 @@ WHERE
pretty=True,
)
+ self.validate_identity("LOG(n, b)")
+
def test_user_defined_functions(self):
self.validate_identity(
"CREATE TEMPORARY FUNCTION a(x FLOAT64, y FLOAT64) RETURNS FLOAT64 NOT DETERMINISTIC LANGUAGE js AS 'return x*y;'"
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index f5b43bb6..f76894d6 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -198,6 +198,7 @@ class TestTSQL(Validator):
},
)
self.validate_identity("HASHBYTES('MD2', 'x')")
+ self.validate_identity("LOG(n, b)")
def test_types(self):
self.validate_identity("CAST(x AS XML)")
|
should bigquery generation reverse the order of `log` arguments?
Wondering if this is the correct behavior:
**Fully reproducible code snippet**
```
In [3]: sg.parse_one("LOG(x, y)").sql("bigquery")
Out[3]: 'LOG(x, y)'
In [4]: sg.parse_one("LOG(x, y)", read="bigquery").sql("bigquery")
Out[4]: 'LOG(y, x)'
```
How can sqlglot know my intent here? Is it that sqlglot's behavior is to assume that the meaning of any `log` call is always base-first?
|
0.0
|
06e0869e7aa5714d77e6ec763da38d6a422965fa
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery"
] |
[
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_udf",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-22 11:09:35+00:00
|
mit
| 5,976 |
|
tobymao__sqlglot-2317
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 467ef523..b137a668 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1613,6 +1613,11 @@ class Identifier(Expression):
return self.name
+# https://www.postgresql.org/docs/current/indexes-opclass.html
+class Opclass(Expression):
+ arg_types = {"this": True, "expression": True}
+
+
class Index(Expression):
arg_types = {
"this": False,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index f5654284..7a6bd8bd 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2851,6 +2851,9 @@ class Generator:
def columnprefix_sql(self, expression: exp.ColumnPrefix) -> str:
return f"{self.sql(expression, 'this')}({self.sql(expression, 'expression')})"
+ def opclass_sql(self, expression: exp.Opclass) -> str:
+ return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
+
def cached_generator(
cache: t.Optional[t.Dict[int, str]] = None
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index bde52f92..84f7e46d 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -850,6 +850,8 @@ class Parser(metaclass=_Parser):
CLONE_KEYWORDS = {"CLONE", "COPY"}
CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"}
+ OPCLASS_FOLLOW_KEYWORDS = {"ASC", "DESC", "NULLS"}
+
TABLE_INDEX_HINT_TOKENS = {TokenType.FORCE, TokenType.IGNORE, TokenType.USE}
WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
@@ -2463,6 +2465,17 @@ class Parser(metaclass=_Parser):
comments = [c for token in (method, side, kind) if token for c in token.comments]
return self.expression(exp.Join, comments=comments, **kwargs)
+ def _parse_opclass(self) -> t.Optional[exp.Expression]:
+ this = self._parse_conjunction()
+ if self._match_texts(self.OPCLASS_FOLLOW_KEYWORDS, advance=False):
+ return this
+
+ opclass = self._parse_var(any_token=True)
+ if opclass:
+ return self.expression(exp.Opclass, this=this, expression=opclass)
+
+ return this
+
def _parse_index(
self,
index: t.Optional[exp.Expression] = None,
@@ -2489,7 +2502,7 @@ class Parser(metaclass=_Parser):
using = self._parse_var(any_token=True) if self._match(TokenType.USING) else None
if self._match(TokenType.L_PAREN, advance=False):
- columns = self._parse_wrapped_csv(self._parse_ordered)
+ columns = self._parse_wrapped_csv(lambda: self._parse_ordered(self._parse_opclass))
else:
columns = None
@@ -2968,8 +2981,8 @@ class Parser(metaclass=_Parser):
return None
return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered))
- def _parse_ordered(self) -> exp.Ordered:
- this = self._parse_conjunction()
+ def _parse_ordered(self, parse_method: t.Optional[t.Callable] = None) -> exp.Ordered:
+ this = parse_method() if parse_method else self._parse_conjunction()
asc = self._match(TokenType.ASC)
desc = self._match(TokenType.DESC) or (asc and False)
|
tobymao/sqlglot
|
cdcc564130a01188295948d5562f71d78dbffa12
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 290c6af7..74a3f1c9 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -9,6 +9,9 @@ class TestPostgres(Validator):
dialect = "postgres"
def test_ddl(self):
+ self.validate_identity(
+ "CREATE INDEX foo ON bar.baz USING btree(col1 varchar_pattern_ops ASC, col2)"
+ )
self.validate_identity(
"CREATE TABLE test (x TIMESTAMP WITHOUT TIME ZONE[][])",
"CREATE TABLE test (x TIMESTAMP[][])",
|
Support postgres Operator Classes and Operator Families
An index definition can specify an operator class for each column of an index.
Right now, the library does not support such cases
**Fully reproducible code snippet**
```python
sql = """
CREATE INDEX index_routes_on_path_text_pattern_ops ON public.routes USING btree (path varchar_pattern_ops);
CREATE INDEX index_ci_pipelines_on_project_idandrefandiddesc ON public.ci_pipelines USING btree (project_id, ref, id DESC);
CREATE INDEX index_issues_on_title_trigram ON public.issues USING gin (title public.gin_trgm_ops);
"""
parsed_sql_exprs = sqlglot.parse(sql, read="postgres")
```
**Official Documentation**
https://www.postgresql.org/docs/current/indexes-opclass.html
|
0.0
|
cdcc564130a01188295948d5562f71d78dbffa12
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-25 18:33:17+00:00
|
mit
| 5,977 |
|
tobymao__sqlglot-2328
|
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index 55095c88..03b116e4 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -43,6 +43,7 @@ class ClickHouse(Dialect):
STRING_ESCAPES = ["'", "\\"]
BIT_STRINGS = [("0b", "")]
HEX_STRINGS = [("0x", ""), ("0X", "")]
+ HEREDOC_STRINGS = ["$"]
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
@@ -75,6 +76,11 @@ class ClickHouse(Dialect):
"UINT8": TokenType.UTINYINT,
}
+ SINGLE_TOKENS = {
+ **tokens.Tokenizer.SINGLE_TOKENS,
+ "$": TokenType.HEREDOC_STRING,
+ }
+
class Parser(parser.Parser):
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index 342fd95a..c22a30b3 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -248,11 +248,10 @@ class Postgres(Dialect):
}
class Tokenizer(tokens.Tokenizer):
- QUOTES = ["'", "$$"]
-
BIT_STRINGS = [("b'", "'"), ("B'", "'")]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
+ HEREDOC_STRINGS = ["$"]
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
@@ -296,7 +295,7 @@ class Postgres(Dialect):
SINGLE_TOKENS = {
**tokens.Tokenizer.SINGLE_TOKENS,
- "$": TokenType.PARAMETER,
+ "$": TokenType.HEREDOC_STRING,
}
VAR_SINGLE_TOKENS = {"$"}
diff --git a/sqlglot/dialects/spark.py b/sqlglot/dialects/spark.py
index 2eaa2ae9..63924d43 100644
--- a/sqlglot/dialects/spark.py
+++ b/sqlglot/dialects/spark.py
@@ -96,3 +96,17 @@ class Spark(Spark2):
return self.func("DATEDIFF", unit, start, end)
return self.func("DATEDIFF", end, start)
+
+ def create_sql(self, expression: exp.Create) -> str:
+ kind = self.sql(expression, "kind").upper()
+ properties = expression.args.get("properties")
+ temporary = any(
+ isinstance(prop, exp.TemporaryProperty)
+ for prop in (properties.expressions if properties else [])
+ )
+ if kind == "TABLE" and temporary:
+ provider = exp.FileFormatProperty(this=exp.Literal.string("parquet"))
+ expression = expression.copy()
+ expression.args["properties"].append("expressions", provider)
+
+ return super().create_sql(expression)
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 84f7e46d..5f8a8448 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -590,6 +590,9 @@ class Parser(metaclass=_Parser):
exp.National, this=token.text
),
TokenType.RAW_STRING: lambda self, token: self.expression(exp.RawString, this=token.text),
+ TokenType.HEREDOC_STRING: lambda self, token: self.expression(
+ exp.RawString, this=token.text
+ ),
TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
}
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 4d5f1983..6b86a6b3 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -77,6 +77,7 @@ class TokenType(AutoName):
BYTE_STRING = auto()
NATIONAL_STRING = auto()
RAW_STRING = auto()
+ HEREDOC_STRING = auto()
# types
BIT = auto()
@@ -418,6 +419,7 @@ class _Tokenizer(type):
**_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS),
**_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS),
**_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS),
+ **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS),
}
klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
@@ -484,6 +486,7 @@ class Tokenizer(metaclass=_Tokenizer):
BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
IDENTIFIER_ESCAPES = ['"']
QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
@@ -997,9 +1000,11 @@ class Tokenizer(metaclass=_Tokenizer):
word = word.upper()
self._add(self.KEYWORDS[word], text=word)
return
+
if self._char in self.SINGLE_TOKENS:
self._add(self.SINGLE_TOKENS[self._char], text=self._char)
return
+
self._scan_var()
def _scan_comment(self, comment_start: str) -> bool:
@@ -1126,6 +1131,10 @@ class Tokenizer(metaclass=_Tokenizer):
base = 16
elif token_type == TokenType.BIT_STRING:
base = 2
+ elif token_type == TokenType.HEREDOC_STRING:
+ self._advance()
+ tag = "" if self._char == end else self._extract_string(end)
+ end = f"{start}{tag}{end}"
else:
return False
|
tobymao/sqlglot
|
180cd8e21713f01080a6b32c55739f5220f56526
|
diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index 3e0ffd5f..55c1f3b0 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -5,6 +5,7 @@ from sqlglot import (
Dialects,
ErrorLevel,
ParseError,
+ TokenError,
UnsupportedError,
parse_one,
)
@@ -308,6 +309,44 @@ class TestDialect(Validator):
read={"postgres": "INET '127.0.0.1/32'"},
)
+ def test_heredoc_strings(self):
+ for dialect in ("clickhouse", "postgres", "redshift"):
+ # Invalid matching tag
+ with self.assertRaises(TokenError):
+ parse_one("SELECT $tag1$invalid heredoc string$tag2$", dialect=dialect)
+
+ # Unmatched tag
+ with self.assertRaises(TokenError):
+ parse_one("SELECT $tag1$invalid heredoc string", dialect=dialect)
+
+ # Without tag
+ self.validate_all(
+ "SELECT 'this is a heredoc string'",
+ read={
+ dialect: "SELECT $$this is a heredoc string$$",
+ },
+ )
+ self.validate_all(
+ "SELECT ''",
+ read={
+ dialect: "SELECT $$$$",
+ },
+ )
+
+ # With tag
+ self.validate_all(
+ "SELECT 'this is also a heredoc string'",
+ read={
+ dialect: "SELECT $foo$this is also a heredoc string$foo$",
+ },
+ )
+ self.validate_all(
+ "SELECT ''",
+ read={
+ dialect: "SELECT $foo$$foo$",
+ },
+ )
+
def test_decode(self):
self.validate_identity("DECODE(bin, charset)")
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 74a3f1c9..0ddc1061 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -207,7 +207,6 @@ class TestPostgres(Validator):
self.validate_identity("SELECT ARRAY[1, 2, 3] @> ARRAY[1, 2]")
self.validate_identity("SELECT ARRAY[1, 2, 3] <@ ARRAY[1, 2]")
self.validate_identity("SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]")
- self.validate_identity("$x")
self.validate_identity("x$")
self.validate_identity("SELECT ARRAY[1, 2, 3]")
self.validate_identity("SELECT ARRAY(SELECT 1)")
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index c75654c8..5f337b05 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -226,7 +226,6 @@ class TestRedshift(Validator):
self.validate_identity("SELECT * FROM #x")
self.validate_identity("SELECT INTERVAL '5 day'")
self.validate_identity("foo$")
- self.validate_identity("$foo")
self.validate_identity("CAST('bla' AS SUPER)")
self.validate_identity("CREATE TABLE real1 (realcol REAL)")
self.validate_identity("CAST('foo' AS HLLSKETCH)")
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 2e43ba52..0148e553 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -8,6 +8,7 @@ class TestSpark(Validator):
dialect = "spark"
def test_ddl(self):
+ self.validate_identity("CREATE TEMPORARY VIEW test AS SELECT 1")
self.validate_identity("CREATE TABLE foo (col VARCHAR(50))")
self.validate_identity("CREATE TABLE foo (col STRUCT<struct_col_a: VARCHAR((50))>)")
self.validate_identity("CREATE TABLE foo (col STRING) CLUSTERED BY (col) INTO 10 BUCKETS")
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 10b1087d..960f65ef 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -564,7 +564,7 @@ class TestTSQL(Validator):
self.validate_all(
"CREATE TABLE #mytemp (a INTEGER, b CHAR(2), c TIME(4), d FLOAT(24))",
write={
- "spark": "CREATE TEMPORARY TABLE mytemp (a INT, b CHAR(2), c TIMESTAMP, d FLOAT)",
+ "spark": "CREATE TEMPORARY TABLE mytemp (a INT, b CHAR(2), c TIMESTAMP, d FLOAT) USING PARQUET",
"tsql": "CREATE TABLE #mytemp (a INTEGER, b CHAR(2), c TIME(4), d FLOAT(24))",
},
)
|
clickhouse: heredoc is not parsable
```python
import sqlglot
sql = """
SELECT $$this is a heredoc string$$
"""
print(sqlglot.parse_one(sql, read="clickhouse"))
```
```
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 2, Col: 18.
SELECT $$this is a heredoc string$$
```
docs: https://clickhouse.com/docs/en/sql-reference/syntax#heredoc
_Note: `$$anything$$` is a form of heredoc too, i.e. `SELECT $$aaa$$this is a heredoc string$$aaa$$`, and it could be multiline, etc._
|
0.0
|
180cd8e21713f01080a6b32c55739f5220f56526
|
[
"tests/dialects/test_dialect.py::TestDialect::test_heredoc_strings",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl"
] |
[
"tests/dialects/test_dialect.py::TestDialect::test_alias",
"tests/dialects/test_dialect.py::TestDialect::test_array",
"tests/dialects/test_dialect.py::TestDialect::test_cast",
"tests/dialects/test_dialect.py::TestDialect::test_cast_to_user_defined_type",
"tests/dialects/test_dialect.py::TestDialect::test_count_if",
"tests/dialects/test_dialect.py::TestDialect::test_cross_join",
"tests/dialects/test_dialect.py::TestDialect::test_decode",
"tests/dialects/test_dialect.py::TestDialect::test_enum",
"tests/dialects/test_dialect.py::TestDialect::test_get_or_raise",
"tests/dialects/test_dialect.py::TestDialect::test_hash_comments",
"tests/dialects/test_dialect.py::TestDialect::test_if_null",
"tests/dialects/test_dialect.py::TestDialect::test_json",
"tests/dialects/test_dialect.py::TestDialect::test_lateral_subquery",
"tests/dialects/test_dialect.py::TestDialect::test_limit",
"tests/dialects/test_dialect.py::TestDialect::test_logarithm",
"tests/dialects/test_dialect.py::TestDialect::test_merge",
"tests/dialects/test_dialect.py::TestDialect::test_nullsafe_eq",
"tests/dialects/test_dialect.py::TestDialect::test_nullsafe_neq",
"tests/dialects/test_dialect.py::TestDialect::test_nvl2",
"tests/dialects/test_dialect.py::TestDialect::test_operators",
"tests/dialects/test_dialect.py::TestDialect::test_order_by",
"tests/dialects/test_dialect.py::TestDialect::test_set_operators",
"tests/dialects/test_dialect.py::TestDialect::test_substring",
"tests/dialects/test_dialect.py::TestDialect::test_time",
"tests/dialects/test_dialect.py::TestDialect::test_transactions",
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_redshift.py::TestRedshift::test_concat",
"tests/dialects/test_redshift.py::TestRedshift::test_create_table_like",
"tests/dialects/test_redshift.py::TestRedshift::test_identity",
"tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding",
"tests/dialects/test_redshift.py::TestRedshift::test_redshift",
"tests/dialects/test_redshift.py::TestRedshift::test_rename_table",
"tests/dialects/test_redshift.py::TestRedshift::test_values",
"tests/dialects/test_redshift.py::TestRedshift::test_varchar_max",
"tests/dialects/test_spark.py::TestSpark::test_bool_or",
"tests/dialects/test_spark.py::TestSpark::test_current_user",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_explode_to_unnest",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_iif",
"tests/dialects/test_spark.py::TestSpark::test_insert_cte",
"tests/dialects/test_spark.py::TestSpark::test_spark",
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/dialects/test_spark.py::TestSpark::test_transform_query",
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-26 17:19:40+00:00
|
mit
| 5,978 |
|
tobymao__sqlglot-2337
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 0ba6a113..5131726e 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -239,6 +239,8 @@ class Snowflake(Dialect):
class Parser(parser.Parser):
IDENTIFY_PIVOT_STRINGS = True
+ TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
+
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"ARRAYAGG": exp.ArrayAgg.from_arg_list,
|
tobymao/sqlglot
|
f0e5eb6a904d8ee4420c6a9acf489db9b7fa108f
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index a85dc841..b96b7ee8 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -50,6 +50,10 @@ class TestSnowflake(Validator):
self.validate_identity(
"SELECT state, city, SUM(retail_price * quantity) AS gross_revenue FROM sales GROUP BY ALL"
)
+ self.validate_identity(
+ "SELECT * FROM foo window",
+ "SELECT * FROM foo AS window",
+ )
self.validate_identity(
r"SELECT RLIKE(a, $$regular expression with \ characters: \d{2}-\d{3}-\d{4}$$, 'i') FROM log_source",
r"SELECT REGEXP_LIKE(a, 'regular expression with \\ characters: \\d{2}-\\d{3}-\\d{4}', 'i') FROM log_source",
|
Parser error for Snowflake SQL when "window" is used as an alias
Hi All,
Trying to parse our Snowflake queries to perform some validations. Parser fails when the keyword "window" is used as an alias for a table. The SQL executes fine on Snowflake though.
Code:
```
parsed_sql = parse_one(
"""
SELECT
dd.delivery_id
FROM prod.public.orders o
LEFT JOIN orders.public.order_assignment window on o.order_id = window.order_id
WHERE active_dt BETWEEN '2023-06-28' and '2023-09-27'
""",
read="snowflake"
)
print(parsed_sql)
```
Error:
```
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 5, Col: 57.
d.delivery_id
FROM prod.public.orders o
LEFT JOIN orders.public.order_assignment window on o.order_id = window.order_id
WHERE active_dt BETWEEN '2023-06-28' and '2023-09-27'
```
`sqlglot version - 18.8.0`
|
0.0
|
f0e5eb6a904d8ee4420c6a9acf489db9b7fa108f
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-09-27 23:16:46+00:00
|
mit
| 5,979 |
|
tobymao__sqlglot-2355
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index e1809a1b..9a347f3f 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2164,6 +2164,10 @@ class QueryTransform(Expression):
}
+class SampleProperty(Property):
+ arg_types = {"this": True}
+
+
class SchemaCommentProperty(Property):
arg_types = {"this": True}
@@ -2473,17 +2477,17 @@ class Table(Expression):
return []
@property
- def parts(self) -> t.List[Identifier]:
+ def parts(self) -> t.List[Expression]:
"""Return the parts of a table in order catalog, db, table."""
- parts: t.List[Identifier] = []
+ parts: t.List[Expression] = []
for arg in ("catalog", "db", "this"):
part = self.args.get(arg)
- if isinstance(part, Identifier):
- parts.append(part)
- elif isinstance(part, Dot):
+ if isinstance(part, Dot):
parts.extend(part.flatten())
+ elif isinstance(part, Expression):
+ parts.append(part)
return parts
@@ -6180,7 +6184,7 @@ def table_name(table: Table | str, dialect: DialectType = None) -> str:
The table name.
"""
- table = maybe_parse(table, into=Table)
+ table = maybe_parse(table, into=Table, dialect=dialect)
if not table:
raise ValueError(f"Cannot parse {table}")
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index ad513824..ab498e0d 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -86,6 +86,7 @@ class Generator:
exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}",
exp.PathColumnConstraint: lambda self, e: f"PATH {self.sql(e, 'this')}",
exp.ReturnsProperty: lambda self, e: self.naked_property(e),
+ exp.SampleProperty: lambda self, e: f"SAMPLE BY {self.sql(e, 'this')}",
exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}",
exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
@@ -294,6 +295,7 @@ class Generator:
exp.RowFormatProperty: exp.Properties.Location.POST_SCHEMA,
exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA,
exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA,
+ exp.SampleProperty: exp.Properties.Location.POST_SCHEMA,
exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA,
exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA,
exp.Set: exp.Properties.Location.POST_SCHEMA,
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index cf703231..e96ea8e0 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -672,6 +672,9 @@ class Parser(metaclass=_Parser):
"RETURNS": lambda self: self._parse_returns(),
"ROW": lambda self: self._parse_row(),
"ROW_FORMAT": lambda self: self._parse_property_assignment(exp.RowFormatProperty),
+ "SAMPLE": lambda self: self.expression(
+ exp.SampleProperty, this=self._match_text_seq("BY") and self._parse_bitwise()
+ ),
"SET": lambda self: self.expression(exp.SetProperty, multi=False),
"SETTINGS": lambda self: self.expression(
exp.SettingsProperty, expressions=self._parse_csv(self._parse_set_item)
|
tobymao/sqlglot
|
5fb71743d9274b7e0e825a761be3672c6299e453
|
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 2a2fd811..cddfe424 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -334,6 +334,9 @@ class TestClickhouse(Validator):
)
def test_ddl(self):
+ self.validate_identity(
+ 'CREATE TABLE data5 ("x" UInt32, "y" UInt32) ENGINE=MergeTree ORDER BY (round(y / 1000000000), cityHash64(x)) SAMPLE BY cityHash64(x)'
+ )
self.validate_identity(
"CREATE TABLE foo (x UInt32) TTL time_column + INTERVAL '1' MONTH DELETE WHERE column = 'value'"
)
diff --git a/tests/test_expressions.py b/tests/test_expressions.py
index b1b53601..832967c2 100644
--- a/tests/test_expressions.py
+++ b/tests/test_expressions.py
@@ -182,16 +182,21 @@ class TestExpressions(unittest.TestCase):
self.assertEqual(parse_one("a.b.c").name, "c")
def test_table_name(self):
+ bq_dashed_table = exp.to_table("a-1.b.c", dialect="bigquery")
+ self.assertEqual(exp.table_name(bq_dashed_table), '"a-1".b.c')
+ self.assertEqual(exp.table_name(bq_dashed_table, dialect="bigquery"), "`a-1`.b.c")
+ self.assertEqual(exp.table_name("a-1.b.c", dialect="bigquery"), "`a-1`.b.c")
self.assertEqual(exp.table_name(parse_one("a", into=exp.Table)), "a")
self.assertEqual(exp.table_name(parse_one("a.b", into=exp.Table)), "a.b")
self.assertEqual(exp.table_name(parse_one("a.b.c", into=exp.Table)), "a.b.c")
self.assertEqual(exp.table_name("a.b.c"), "a.b.c")
+ self.assertEqual(exp.table_name(exp.to_table("a.b.c.d.e", dialect="bigquery")), "a.b.c.d.e")
+ self.assertEqual(exp.table_name(exp.to_table("'@foo'", dialect="snowflake")), "'@foo'")
+ self.assertEqual(exp.table_name(exp.to_table("@foo", dialect="snowflake")), "@foo")
self.assertEqual(
exp.table_name(parse_one("foo.`{bar,er}`", read="databricks"), dialect="databricks"),
"foo.`{bar,er}`",
)
- self.assertEqual(exp.table_name(exp.to_table("a-1.b.c", dialect="bigquery")), '"a-1".b.c')
- self.assertEqual(exp.table_name(exp.to_table("a.b.c.d.e", dialect="bigquery")), "a.b.c.d.e")
def test_table(self):
self.assertEqual(exp.table_("a", alias="b"), parse_one("select * from a b").find(exp.Table))
|
support parsing and constructing clickhouse `SAMPLE BY` in create table
**Is your feature request related to a problem? Please describe.**
Not related to a problem.
**Describe the solution you'd like**
I'd like to be able to run, and ideally construct with sqlglot objects:
```python
sg.parse_one("CREATE TABLE ... SAMPLE BY expr")
```
**Describe alternatives you've considered**
I haven't considered any alternatives.
**Additional context**
ClickHouse docs for the feature: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#sample-by
|
0.0
|
5fb71743d9274b7e0e825a761be3672c6299e453
|
[
"tests/test_expressions.py::TestExpressions::test_table_name",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl"
] |
[
"tests/test_expressions.py::TestExpressions::test_to_dot",
"tests/test_expressions.py::TestExpressions::test_transform_no_infinite_recursion",
"tests/test_expressions.py::TestExpressions::test_unit",
"tests/test_expressions.py::TestExpressions::test_rename_table",
"tests/test_expressions.py::TestExpressions::test_function_building",
"tests/test_expressions.py::TestExpressions::test_eq",
"tests/test_expressions.py::TestExpressions::test_to_interval",
"tests/test_expressions.py::TestExpressions::test_replace_tables",
"tests/test_expressions.py::TestExpressions::test_alias_or_name",
"tests/test_expressions.py::TestExpressions::test_arg_key",
"tests/test_expressions.py::TestExpressions::test_transform_simple",
"tests/test_expressions.py::TestExpressions::test_column",
"tests/test_expressions.py::TestExpressions::test_to_table",
"tests/test_expressions.py::TestExpressions::test_find_all",
"tests/test_expressions.py::TestExpressions::test_transform_node_removal",
"tests/test_expressions.py::TestExpressions::test_named_selects",
"tests/test_expressions.py::TestExpressions::test_alias_column_names",
"tests/test_expressions.py::TestExpressions::test_convert",
"tests/test_expressions.py::TestExpressions::test_data_type_builder",
"tests/test_expressions.py::TestExpressions::test_transform_multiple_children",
"tests/test_expressions.py::TestExpressions::test_alias",
"tests/test_expressions.py::TestExpressions::test_arg_deletion",
"tests/test_expressions.py::TestExpressions::test_replace_placeholders",
"tests/test_expressions.py::TestExpressions::test_function_normalizer",
"tests/test_expressions.py::TestExpressions::test_unnest",
"tests/test_expressions.py::TestExpressions::test_values",
"tests/test_expressions.py::TestExpressions::test_is_star",
"tests/test_expressions.py::TestExpressions::test_to_column",
"tests/test_expressions.py::TestExpressions::test_find_ancestor",
"tests/test_expressions.py::TestExpressions::test_sql",
"tests/test_expressions.py::TestExpressions::test_union",
"tests/test_expressions.py::TestExpressions::test_selects",
"tests/test_expressions.py::TestExpressions::test_properties_from_dict",
"tests/test_expressions.py::TestExpressions::test_ctes",
"tests/test_expressions.py::TestExpressions::test_find",
"tests/test_expressions.py::TestExpressions::test_is_type",
"tests/test_expressions.py::TestExpressions::test_set_metadata",
"tests/test_expressions.py::TestExpressions::test_table",
"tests/test_expressions.py::TestExpressions::test_transform_with_arguments",
"tests/test_expressions.py::TestExpressions::test_replace",
"tests/test_expressions.py::TestExpressions::test_root",
"tests/test_expressions.py::TestExpressions::test_hash",
"tests/test_expressions.py::TestExpressions::test_text",
"tests/test_expressions.py::TestExpressions::test_functions",
"tests/test_expressions.py::TestExpressions::test_comment_alias",
"tests/test_expressions.py::TestExpressions::test_identifier",
"tests/test_expressions.py::TestExpressions::test_walk",
"tests/test_expressions.py::TestExpressions::test_depth",
"tests/test_expressions.py::TestExpressions::test_iter",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-02 10:24:41+00:00
|
mit
| 5,980 |
|
tobymao__sqlglot-2377
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 76b6ee51..58b9a1a5 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -3892,7 +3892,9 @@ class Parser(metaclass=_Parser):
def _parse_unnamed_constraint(
self, constraints: t.Optional[t.Collection[str]] = None
) -> t.Optional[exp.Expression]:
- if not self._match_texts(constraints or self.CONSTRAINT_PARSERS):
+ if self._match(TokenType.IDENTIFIER, advance=False) or not self._match_texts(
+ constraints or self.CONSTRAINT_PARSERS
+ ):
return None
constraint = self._prev.text.upper()
|
tobymao/sqlglot
|
2bcd3e733aa7dffa68e60a36a427e4baa3fe00a5
|
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index 11f921ce..14a864be 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -29,6 +29,9 @@ class TestMySQL(Validator):
self.validate_identity("CREATE TABLE foo (a BIGINT, INDEX USING BTREE (b))")
self.validate_identity("CREATE TABLE foo (a BIGINT, FULLTEXT INDEX (b))")
self.validate_identity("CREATE TABLE foo (a BIGINT, SPATIAL INDEX (b))")
+ self.validate_identity(
+ "CREATE TABLE `oauth_consumer` (`key` VARCHAR(32) NOT NULL, UNIQUE `OAUTH_CONSUMER_KEY` (`key`))"
+ )
self.validate_identity(
"CREATE TABLE `x` (`username` VARCHAR(200), PRIMARY KEY (`username`(16)))"
)
|
Parsing columns with reserved names
I often find cases when databases use reserved words as a column name, for example `left` or `key`. Reserved words are permitted as identifiers if you quote them. The library cannot parse such syntax.
**Fully reproducible code snippet**
```python
sql = """
CREATE TABLE `oauth_consumer` (
`key` varchar(32) NOT NULL,
UNIQUE KEY `OAUTH_CONSUMER_KEY` (`key`),
);
"""
parsed_sql_exprs = sqlglot.parse(sql, read="mysql")
```
**Official Documentation**
[Please include links to official SQL documentation related to your issue.](https://dev.mysql.com/doc/refman/8.0/en/keywords.html)
|
0.0
|
2bcd3e733aa7dffa68e60a36a427e4baa3fe00a5
|
[
"tests/dialects/test_mysql.py::TestMySQL::test_ddl"
] |
[
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_identity",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_is_null",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_monthname",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_types"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-05 13:40:42+00:00
|
mit
| 5,981 |
|
tobymao__sqlglot-2395
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 5627af8d..314995ea 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -4633,14 +4633,18 @@ class JSONArrayAgg(Func):
# https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/JSON_TABLE.html
# Note: parsing of JSON column definitions is currently incomplete.
class JSONColumnDef(Expression):
- arg_types = {"this": True, "kind": False, "path": False}
+ arg_types = {"this": False, "kind": False, "path": False, "nested_schema": False}
+
+
+class JSONSchema(Expression):
+ arg_types = {"expressions": True}
# # https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/JSON_TABLE.html
class JSONTable(Func):
arg_types = {
"this": True,
- "expressions": True,
+ "schema": True,
"path": False,
"error_handling": False,
"empty_handling": False,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 7a2879ce..8257adc8 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2179,13 +2179,21 @@ class Generator:
)
def jsoncolumndef_sql(self, expression: exp.JSONColumnDef) -> str:
+ path = self.sql(expression, "path")
+ path = f" PATH {path}" if path else ""
+ nested_schema = self.sql(expression, "nested_schema")
+
+ if nested_schema:
+ return f"NESTED{path} {nested_schema}"
+
this = self.sql(expression, "this")
kind = self.sql(expression, "kind")
kind = f" {kind}" if kind else ""
- path = self.sql(expression, "path")
- path = f" PATH {path}" if path else ""
return f"{this}{kind}{path}"
+ def jsonschema_sql(self, expression: exp.JSONSchema) -> str:
+ return self.func("COLUMNS", *expression.expressions)
+
def jsontable_sql(self, expression: exp.JSONTable) -> str:
this = self.sql(expression, "this")
path = self.sql(expression, "path")
@@ -2194,9 +2202,9 @@ class Generator:
error_handling = f" {error_handling}" if error_handling else ""
empty_handling = expression.args.get("empty_handling")
empty_handling = f" {empty_handling}" if empty_handling else ""
- columns = f" COLUMNS ({self.expressions(expression, skip_first=True)})"
+ schema = self.sql(expression, "schema")
return self.func(
- "JSON_TABLE", this, suffix=f"{path}{error_handling}{empty_handling}{columns})"
+ "JSON_TABLE", this, suffix=f"{path}{error_handling}{empty_handling} {schema})"
)
def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str:
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index b0eb3ff0..cf9820b2 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -4354,22 +4354,43 @@ class Parser(metaclass=_Parser):
# Note: this is currently incomplete; it only implements the "JSON_value_column" part
def _parse_json_column_def(self) -> exp.JSONColumnDef:
- this = self._parse_id_var()
- kind = self._parse_types(allow_identifiers=False)
+ if not self._match_text_seq("NESTED"):
+ this = self._parse_id_var()
+ kind = self._parse_types(allow_identifiers=False)
+ nested = None
+ else:
+ this = None
+ kind = None
+ nested = True
+
path = self._match_text_seq("PATH") and self._parse_string()
- return self.expression(exp.JSONColumnDef, this=this, kind=kind, path=path)
+ nested_schema = nested and self._parse_json_schema()
+
+ return self.expression(
+ exp.JSONColumnDef,
+ this=this,
+ kind=kind,
+ path=path,
+ nested_schema=nested_schema,
+ )
+
+ def _parse_json_schema(self) -> exp.JSONSchema:
+ self._match_text_seq("COLUMNS")
+ return self.expression(
+ exp.JSONSchema,
+ expressions=self._parse_wrapped_csv(self._parse_json_column_def, optional=True),
+ )
def _parse_json_table(self) -> exp.JSONTable:
this = self._parse_format_json(self._parse_bitwise())
path = self._match(TokenType.COMMA) and self._parse_string()
error_handling = self._parse_on_handling("ERROR", "ERROR", "NULL")
empty_handling = self._parse_on_handling("EMPTY", "ERROR", "NULL")
- self._match_text_seq("COLUMNS")
- expressions = self._parse_wrapped_csv(self._parse_json_column_def, optional=True)
+ schema = self._parse_json_schema()
return exp.JSONTable(
this=this,
- expressions=expressions,
+ schema=schema,
path=path,
error_handling=error_handling,
empty_handling=empty_handling,
|
tobymao/sqlglot
|
c7c3869b01e984a243c071660f27a2c6c4863892
|
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index df52bd46..b9d1d26a 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -66,7 +66,7 @@ class TestMySQL(Validator):
"INSERT INTO x VALUES (1, 'a', 2.0) ON DUPLICATE KEY UPDATE x.id = 1"
)
self.validate_identity(
- "CREATE OR REPLACE VIEW my_view AS SELECT column1 AS `boo`, column2 AS `foo` FROM my_table WHERE column3 = 'some_value' UNION SELECT q.* FROM fruits_table, JSON_TABLE(Fruits, '$[*]' COLUMNS (id VARCHAR(255) PATH '$.$id', value VARCHAR(255) PATH '$.value')) AS q",
+ "CREATE OR REPLACE VIEW my_view AS SELECT column1 AS `boo`, column2 AS `foo` FROM my_table WHERE column3 = 'some_value' UNION SELECT q.* FROM fruits_table, JSON_TABLE(Fruits, '$[*]' COLUMNS(id VARCHAR(255) PATH '$.$id', value VARCHAR(255) PATH '$.value')) AS q",
)
self.validate_all(
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 5572ec10..d92eea5f 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -234,21 +234,30 @@ MATCH_RECOGNIZE (
def test_json_table(self):
self.validate_identity(
- "SELECT * FROM JSON_TABLE(foo FORMAT JSON, 'bla' ERROR ON ERROR NULL ON EMPTY COLUMNS (foo PATH 'bar'))"
+ "SELECT * FROM JSON_TABLE(foo FORMAT JSON, 'bla' ERROR ON ERROR NULL ON EMPTY COLUMNS(foo PATH 'bar'))"
)
self.validate_identity(
"SELECT * FROM JSON_TABLE(foo FORMAT JSON, 'bla' ERROR ON ERROR NULL ON EMPTY COLUMNS foo PATH 'bar')",
- "SELECT * FROM JSON_TABLE(foo FORMAT JSON, 'bla' ERROR ON ERROR NULL ON EMPTY COLUMNS (foo PATH 'bar'))",
+ "SELECT * FROM JSON_TABLE(foo FORMAT JSON, 'bla' ERROR ON ERROR NULL ON EMPTY COLUMNS(foo PATH 'bar'))",
)
self.validate_identity(
"""SELECT
CASE WHEN DBMS_LOB.GETLENGTH(info) < 32000 THEN DBMS_LOB.SUBSTR(info) END AS info_txt,
info AS info_clob
FROM schemaname.tablename ar
-INNER JOIN JSON_TABLE(:emps, '$[*]' COLUMNS (empno NUMBER PATH '$')) jt
+INNER JOIN JSON_TABLE(:emps, '$[*]' COLUMNS(empno NUMBER PATH '$')) jt
ON ar.empno = jt.empno""",
pretty=True,
)
+ self.validate_identity(
+ """SELECT
+ *
+FROM JSON_TABLE(res, '$.info[*]' COLUMNS(
+ tempid NUMBER PATH '$.tempid',
+ NESTED PATH '$.calid[*]' COLUMNS(last_dt PATH '$.last_dt ')
+)) src""",
+ pretty=True,
+ )
def test_connect_by(self):
start = "START WITH last_name = 'King'"
|
Parsing oracle queries having SQL/JSON functions like JSON_*
I have been trying to parse our Oracle queries using sqlglot. It's been very useful so far.
I have observed that sqlglot parsing fails for few SQL/JSON functions like JSON_OBJECT, JSON_OBJECTAGG, JSON_ARRAY, JSON_TABLE, JSON_VALUE etc.
**Sample oracle query snippet - **
```
JSON_TABLE(res,'$.info[*]' COLUMNS (
tempid NUMBER PATH '$.tempid',
NESTED PATH '$.calid[*]' COLUMNS (
last_dt PATH '$.last_dt '))) src
```
For above query, sqlglot.parse_one method fails to parse. Similarly, it doesn't parse other JSON methods too.
**Official Documentation -**
https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/JSON_TABLE.html#GUID-09422D4A-936C-4D38-9991-C64101283D98
Request team to look into this. Thank you!
|
0.0
|
c7c3869b01e984a243c071660f27a2c6c4863892
|
[
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_oracle.py::TestOracle::test_json_table"
] |
[
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_types",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_is_null",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_monthname",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_identity",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_connect_by",
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table",
"tests/dialects/test_oracle.py::TestOracle::test_oracle"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-10 20:54:30+00:00
|
mit
| 5,982 |
|
tobymao__sqlglot-2443
|
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
index 51b91150..df70aa77 100644
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -175,6 +175,7 @@ class Redshift(Postgres):
exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,
exp.JSONExtract: _json_sql,
exp.JSONExtractScalar: _json_sql,
+ exp.ParseJSON: rename_func("JSON_PARSE"),
exp.SafeConcat: concat_to_dpipe_sql,
exp.Select: transforms.preprocess(
[transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
|
tobymao/sqlglot
|
fdb166801144b721677d23c195e5bd3d35ee8841
|
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index bbe2de23..c8480101 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -310,6 +310,7 @@ ORDER BY
self.validate_identity(
"SELECT attr AS attr, JSON_TYPEOF(val) AS value_type FROM customer_orders_lineitem AS c, UNPIVOT c.c_orders AS val AT attr WHERE c_custkey = 9451"
)
+ self.validate_identity("SELECT JSON_PARSE('[]')")
def test_values(self):
# Test crazy-sized VALUES clause to UNION ALL conversion to ensure we don't get RecursionError
|
Erroneous handling of redshift's JSON_PARSE
**sqlglot version: 18.16.1**
**Fully reproducible code snippet**
```python
import sqlglot
sql = "SELECT JSON_PARSE('[10001,10002,\"abc\"]');"
parsed = sqlglot.parse_one(sql,dialect="redshift")
parsed.sql(dialect="redshift")
#'SELECT PARSE_JSON(\'[10001,10002,"abc"]\')'
```
The generated sql triggers an error when executed in redshift
>Failed to execute query: ERROR: function parse_json("unknown") does not exist
Hint: No function matches the given name and argument types. You may need to add explicit type casts.
**Official Documentation**
https://docs.aws.amazon.com/redshift/latest/dg/JSON_PARSE.html
|
0.0
|
fdb166801144b721677d23c195e5bd3d35ee8841
|
[
"tests/dialects/test_redshift.py::TestRedshift::test_identity"
] |
[
"tests/dialects/test_redshift.py::TestRedshift::test_concat",
"tests/dialects/test_redshift.py::TestRedshift::test_create_table_like",
"tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding",
"tests/dialects/test_redshift.py::TestRedshift::test_redshift",
"tests/dialects/test_redshift.py::TestRedshift::test_rename_table",
"tests/dialects/test_redshift.py::TestRedshift::test_values",
"tests/dialects/test_redshift.py::TestRedshift::test_varchar_max"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-23 09:19:22+00:00
|
mit
| 5,983 |
|
tobymao__sqlglot-2476
|
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index 30e8b0a8..14fdcb26 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -134,7 +134,9 @@ def _auto_increment_to_serial(expression: exp.Expression) -> exp.Expression:
def _serial_to_generated(expression: exp.Expression) -> exp.Expression:
- kind = expression.args["kind"]
+ kind = expression.args.get("kind")
+ if not kind:
+ return expression
if kind.this == exp.DataType.Type.SERIAL:
data_type = exp.DataType(this=exp.DataType.Type.INT)
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 07be65b9..2d31051b 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -32,7 +32,7 @@ def _check_int(s: str) -> bool:
# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
-def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime]:
+def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:
if len(args) == 2:
first_arg, second_arg = args
if second_arg.is_string:
@@ -60,8 +60,8 @@ def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime]:
# reduce it using `simplify_literals` first and then check if it's a Literal.
first_arg = seq_get(args, 0)
if not isinstance(simplify_literals(first_arg, root=True), Literal):
- # case: <variant_expr>
- return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
+ # case: <variant_expr> or other expressions such as columns
+ return exp.TimeStrToTime.from_arg_list(args)
if first_arg.is_string:
if _check_int(first_arg.this):
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index b4275b10..35c9a8eb 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2145,6 +2145,22 @@ class PartitionedByProperty(Property):
arg_types = {"this": True}
+# https://www.postgresql.org/docs/current/sql-createtable.html
+class PartitionBoundSpec(Expression):
+ # this -> IN / MODULUS, expression -> REMAINDER, from_expressions -> FROM (...), to_expressions -> TO (...)
+ arg_types = {
+ "this": False,
+ "expression": False,
+ "from_expressions": False,
+ "to_expressions": False,
+ }
+
+
+class PartitionedOfProperty(Property):
+ # this -> parent_table (schema), expression -> FOR VALUES ... / DEFAULT
+ arg_types = {"this": True, "expression": True}
+
+
class RemoteWithConnectionModelProperty(Property):
arg_types = {"this": True}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 6a3ba5eb..e88d2cd5 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -310,6 +310,7 @@ class Generator:
exp.Order: exp.Properties.Location.POST_SCHEMA,
exp.OutputModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.PartitionedByProperty: exp.Properties.Location.POST_WITH,
+ exp.PartitionedOfProperty: exp.Properties.Location.POST_SCHEMA,
exp.PrimaryKey: exp.Properties.Location.POST_SCHEMA,
exp.Property: exp.Properties.Location.POST_WITH,
exp.RemoteWithConnectionModelProperty: exp.Properties.Location.POST_SCHEMA,
@@ -1262,6 +1263,29 @@ class Generator:
for_ = " FOR NONE"
return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
+ def partitionboundspec_sql(self, expression: exp.PartitionBoundSpec) -> str:
+ if isinstance(expression.this, list):
+ return f"IN ({self.expressions(expression, key='this', flat=True)})"
+ if expression.this:
+ modulus = self.sql(expression, "this")
+ remainder = self.sql(expression, "expression")
+ return f"WITH (MODULUS {modulus}, REMAINDER {remainder})"
+
+ from_expressions = self.expressions(expression, key="from_expressions", flat=True)
+ to_expressions = self.expressions(expression, key="to_expressions", flat=True)
+ return f"FROM ({from_expressions}) TO ({to_expressions})"
+
+ def partitionedofproperty_sql(self, expression: exp.PartitionedOfProperty) -> str:
+ this = self.sql(expression, "this")
+
+ for_values_or_default = expression.expression
+ if isinstance(for_values_or_default, exp.PartitionBoundSpec):
+ for_values_or_default = f" FOR VALUES {self.sql(for_values_or_default)}"
+ else:
+ for_values_or_default = " DEFAULT"
+
+ return f"PARTITION OF {this}{for_values_or_default}"
+
def lockingproperty_sql(self, expression: exp.LockingProperty) -> str:
kind = expression.args.get("kind")
this = f" {self.sql(expression, 'this')}" if expression.this else ""
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index f8256fe8..7ff0aabf 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -674,6 +674,7 @@ class Parser(metaclass=_Parser):
"ON": lambda self: self._parse_on_property(),
"ORDER BY": lambda self: self._parse_order(skip_order_token=True),
"OUTPUT": lambda self: self.expression(exp.OutputModelProperty, this=self._parse_schema()),
+ "PARTITION": lambda self: self._parse_partitioned_of(),
"PARTITION BY": lambda self: self._parse_partitioned_by(),
"PARTITIONED BY": lambda self: self._parse_partitioned_by(),
"PARTITIONED_BY": lambda self: self._parse_partitioned_by(),
@@ -1743,6 +1744,58 @@ class Parser(metaclass=_Parser):
return self._parse_csv(self._parse_conjunction)
return []
+ def _parse_partition_bound_spec(self) -> exp.PartitionBoundSpec:
+ def _parse_partition_bound_expr() -> t.Optional[exp.Expression]:
+ if self._match_text_seq("MINVALUE"):
+ return exp.var("MINVALUE")
+ if self._match_text_seq("MAXVALUE"):
+ return exp.var("MAXVALUE")
+ return self._parse_bitwise()
+
+ this: t.Optional[exp.Expression | t.List[exp.Expression]] = None
+ expression = None
+ from_expressions = None
+ to_expressions = None
+
+ if self._match(TokenType.IN):
+ this = self._parse_wrapped_csv(self._parse_bitwise)
+ elif self._match(TokenType.FROM):
+ from_expressions = self._parse_wrapped_csv(_parse_partition_bound_expr)
+ self._match_text_seq("TO")
+ to_expressions = self._parse_wrapped_csv(_parse_partition_bound_expr)
+ elif self._match_text_seq("WITH", "(", "MODULUS"):
+ this = self._parse_number()
+ self._match_text_seq(",", "REMAINDER")
+ expression = self._parse_number()
+ self._match_r_paren()
+ else:
+ self.raise_error("Failed to parse partition bound spec.")
+
+ return self.expression(
+ exp.PartitionBoundSpec,
+ this=this,
+ expression=expression,
+ from_expressions=from_expressions,
+ to_expressions=to_expressions,
+ )
+
+ # https://www.postgresql.org/docs/current/sql-createtable.html
+ def _parse_partitioned_of(self) -> t.Optional[exp.PartitionedOfProperty]:
+ if not self._match_text_seq("OF"):
+ self._retreat(self._index - 1)
+ return None
+
+ this = self._parse_table(schema=True)
+
+ if self._match(TokenType.DEFAULT):
+ expression: exp.Var | exp.PartitionBoundSpec = exp.var("DEFAULT")
+ elif self._match_text_seq("FOR", "VALUES"):
+ expression = self._parse_partition_bound_spec()
+ else:
+ self.raise_error("Expecting either DEFAULT or FOR VALUES clause.")
+
+ return self.expression(exp.PartitionedOfProperty, this=this, expression=expression)
+
def _parse_partitioned_by(self) -> exp.PartitionedByProperty:
self._match(TokenType.EQ)
return self.expression(
|
tobymao/sqlglot
|
a1252d8ba7d2394bbb14ccd42d835da8cd4eb740
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 343b0e16..0e5f1a1b 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -9,13 +9,6 @@ class TestPostgres(Validator):
dialect = "postgres"
def test_ddl(self):
- self.validate_identity(
- "CREATE INDEX foo ON bar.baz USING btree(col1 varchar_pattern_ops ASC, col2)"
- )
- self.validate_identity(
- "CREATE TABLE test (x TIMESTAMP WITHOUT TIME ZONE[][])",
- "CREATE TABLE test (x TIMESTAMP[][])",
- )
self.validate_identity("CREATE INDEX idx_x ON x USING BTREE(x, y) WHERE (NOT y IS NULL)")
self.validate_identity("CREATE TABLE test (elems JSONB[])")
self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)")
@@ -26,6 +19,29 @@ class TestPostgres(Validator):
self.validate_identity("INSERT INTO x VALUES (1, 'a', 2.0) RETURNING a")
self.validate_identity("INSERT INTO x VALUES (1, 'a', 2.0) RETURNING a, b")
self.validate_identity("INSERT INTO x VALUES (1, 'a', 2.0) RETURNING *")
+ self.validate_identity("UPDATE tbl_name SET foo = 123 RETURNING a")
+ self.validate_identity("CREATE TABLE cities_partdef PARTITION OF cities DEFAULT")
+ self.validate_identity(
+ "CREATE TABLE cust_part3 PARTITION OF customers FOR VALUES WITH (MODULUS 3, REMAINDER 2)"
+ )
+ self.validate_identity(
+ "CREATE TABLE measurement_y2016m07 PARTITION OF measurement (unitsales DEFAULT 0) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01')"
+ )
+ self.validate_identity(
+ "CREATE TABLE measurement_ym_older PARTITION OF measurement_year_month FOR VALUES FROM (MINVALUE, MINVALUE) TO (2016, 11)"
+ )
+ self.validate_identity(
+ "CREATE TABLE measurement_ym_y2016m11 PARTITION OF measurement_year_month FOR VALUES FROM (2016, 11) TO (2016, 12)"
+ )
+ self.validate_identity(
+ "CREATE TABLE cities_ab PARTITION OF cities (CONSTRAINT city_id_nonzero CHECK (city_id <> 0)) FOR VALUES IN ('a', 'b')"
+ )
+ self.validate_identity(
+ "CREATE TABLE cities_ab PARTITION OF cities (CONSTRAINT city_id_nonzero CHECK (city_id <> 0)) FOR VALUES IN ('a', 'b') PARTITION BY RANGE(population)"
+ )
+ self.validate_identity(
+ "CREATE INDEX foo ON bar.baz USING btree(col1 varchar_pattern_ops ASC, col2)"
+ )
self.validate_identity(
"INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT (id) DO NOTHING RETURNING *"
)
@@ -44,7 +60,10 @@ class TestPostgres(Validator):
self.validate_identity(
"DELETE FROM event USING sales AS s WHERE event.eventid = s.eventid RETURNING a"
)
- self.validate_identity("UPDATE tbl_name SET foo = 123 RETURNING a")
+ self.validate_identity(
+ "CREATE TABLE test (x TIMESTAMP WITHOUT TIME ZONE[][])",
+ "CREATE TABLE test (x TIMESTAMP[][])",
+ )
self.validate_all(
"CREATE OR REPLACE FUNCTION function_name (input_a character varying DEFAULT NULL::character varying)",
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 65b77ea0..9164e525 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -77,6 +77,10 @@ class TestSnowflake(Validator):
"SELECT {fn CEILING(5.3)}",
"SELECT CEIL(5.3)",
)
+ self.validate_identity(
+ "SELECT TO_TIMESTAMP(x) FROM t",
+ "SELECT CAST(x AS TIMESTAMPNTZ) FROM t",
+ )
self.validate_all("CAST(x AS BYTEINT)", write={"snowflake": "CAST(x AS INT)"})
self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
|
feat(postgres): create partition tables
**Is your feature request related to a problem? Please describe.**
Be able to parse statements like: `CREATE TABLE cust_part3 PARTITION OF customers FOR VALUES WITH (modulus 3, remainder 2)`
```python
>>> sqlglot.parse_one('CREATE TABLE cust_part3 PARTITION OF customers FOR VALUES WITH (modulus 3, remainder 2)', dialect='postgres')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".venv/lib/python3.11/site-packages/sqlglot/__init__.py", line 125, in parse_one
result = dialect.parse(sql, **opts)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.11/site-packages/sqlglot/dialects/dialect.py", line 311, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File ".venv/lib/python3.11/site-packages/sqlglot/parser.py", line 986, in parse
return self._parse(
^^^^^^^^^^^^
File ".venv/lib/python3.11/site-packages/sqlglot/parser.py", line 1055, in _parse
self.raise_error("Invalid expression / Unexpected token")
File ".venv/lib/python3.11/site-packages/sqlglot/parser.py", line 1096, in raise_error
raise error
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 1, Col: 33.
CREATE TABLE cust_part3 PARTITION OF customers FOR VALUES WITH (modulus 3, remainder 2)
```
**Describe the solution you'd like**
I'd like this syntax to be supported for the Postgres dialect.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
|
0.0
|
a1252d8ba7d2394bbb14ccd42d835da8cd4eb740
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-28 02:01:40+00:00
|
mit
| 5,984 |
|
tobymao__sqlglot-2483
|
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index 13a5a6c5..a2812972 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -243,6 +243,7 @@ class TSQL(Dialect):
"MMM": "%b",
"MM": "%m",
"M": "%-m",
+ "dddd": "%A",
"dd": "%d",
"d": "%-d",
"HH": "%H",
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 35c9a8eb..08c04cf9 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -5049,7 +5049,7 @@ class FromBase(Func):
class Struct(Func):
- arg_types = {"expressions": True}
+ arg_types = {"expressions": False}
is_var_len_args = True
|
tobymao/sqlglot
|
23079105af1bdcbd849d813b402ee1a3b55fdacd
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 9164e525..2cad1d24 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -13,6 +13,7 @@ class TestSnowflake(Validator):
expr.selects[0].assert_is(exp.AggFunc)
self.assertEqual(expr.sql(dialect="snowflake"), "SELECT APPROX_TOP_K(C4, 3, 5) FROM t")
+ self.validate_identity("SELECT OBJECT_CONSTRUCT()")
self.validate_identity("SELECT DAYOFMONTH(CURRENT_TIMESTAMP())")
self.validate_identity("SELECT DAYOFYEAR(CURRENT_TIMESTAMP())")
self.validate_identity("LISTAGG(data['some_field'], ',')")
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 479f87b9..0ac94f2a 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -855,12 +855,18 @@ WHERE
def test_datename(self):
self.validate_all(
- "SELECT DATENAME(mm,'1970-01-01')",
- write={"spark": "SELECT DATE_FORMAT(CAST('1970-01-01' AS TIMESTAMP), 'MMMM')"},
+ "SELECT DATENAME(mm, '1970-01-01')",
+ write={
+ "spark": "SELECT DATE_FORMAT(CAST('1970-01-01' AS TIMESTAMP), 'MMMM')",
+ "tsql": "SELECT FORMAT(CAST('1970-01-01' AS DATETIME2), 'MMMM')",
+ },
)
self.validate_all(
- "SELECT DATENAME(dw,'1970-01-01')",
- write={"spark": "SELECT DATE_FORMAT(CAST('1970-01-01' AS TIMESTAMP), 'EEEE')"},
+ "SELECT DATENAME(dw, '1970-01-01')",
+ write={
+ "spark": "SELECT DATE_FORMAT(CAST('1970-01-01' AS TIMESTAMP), 'EEEE')",
+ "tsql": "SELECT FORMAT(CAST('1970-01-01' AS DATETIME2), 'dddd')",
+ },
)
def test_datepart(self):
|
[TSQL] DATENAME is replaced incorrectly
# Issue Description
sqlglot==13.3.0
**Code**
```python
query = "Select DATENAME(dw, some_date) FROM my_table"
query = parse_one(query, dialect='tsql')
print(query)
```
**Output**
```sql
Select FORMAT(some_date, '%A') AS day FROM my_table
```
**Expected Output**
```
Select FORMAT(some_date, 'dddd') AS day FROM my_table
```
## Analysis
> format
nvarchar format pattern.
The format argument must contain a valid .NET Framework format string, either as a standard format string (for example, "C" or "D"), or as a pattern of custom characters for dates and numeric values (for example, "MMMM DD, yyyy (dddd)"). Composite formatting is not supported. For a full explanation of these formatting patterns, consult the .NET Framework documentation on string formatting in general, custom date and time formats, and custom number formats. A good starting point is the topic, "[Formatting Types](https://learn.microsoft.com/en-us/dotnet/standard/base-types/formatting-types)."
The correct format should be `dddd`. See [here](https://learn.microsoft.com/en-us/dotnet/standard/base-types/custom-date-and-time-format-strings#ddddSpecifier) for more info.
## Additional Question
Could you please provide the rationale behind replacing DATENAME with FORMAT? Also, in general why are some functions replaced but not others?
|
0.0
|
23079105af1bdcbd849d813b402ee1a3b55fdacd
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/dialects/test_tsql.py::TestTSQL::test_datename"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-30 14:56:27+00:00
|
mit
| 5,985 |
|
tobymao__sqlglot-2486
|
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index fba18f85..d6e874ef 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -246,6 +246,7 @@ class Generator:
exp.DataType.Type.LONGBLOB: "BLOB",
exp.DataType.Type.TINYBLOB: "BLOB",
exp.DataType.Type.INET: "INET",
+ exp.DataType.Type.UNKNOWN: "",
}
STAR_MAPPING = {
@@ -2400,7 +2401,9 @@ class Generator:
def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
format_sql = self.sql(expression, "format")
format_sql = f" FORMAT {format_sql}" if format_sql else ""
- return f"{safe_prefix or ''}CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')}{format_sql})"
+ to_sql = self.sql(expression, "to")
+ to_sql = f" {to_sql}" if to_sql else ""
+ return f"{safe_prefix or ''}CAST({self.sql(expression, 'this')} AS{to_sql}{format_sql})"
def currentdate_sql(self, expression: exp.CurrentDate) -> str:
zone = self.sql(expression, "this")
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 7ff0aabf..1dab6000 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -4246,17 +4246,12 @@ class Parser(metaclass=_Parser):
fmt = None
to = self._parse_types()
- if not to:
- self.raise_error("Expected TYPE after CAST")
- elif isinstance(to, exp.Identifier):
- to = exp.DataType.build(to.name, udt=True)
- elif to.this == exp.DataType.Type.CHAR:
- if self._match(TokenType.CHARACTER_SET):
- to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
- elif self._match(TokenType.FORMAT):
+ if self._match(TokenType.FORMAT):
fmt_string = self._parse_string()
fmt = self._parse_at_time_zone(fmt_string)
+ if not to:
+ to = exp.DataType.build(exp.DataType.Type.UNKNOWN)
if to.this in exp.DataType.TEMPORAL_TYPES:
this = self.expression(
exp.StrToDate if to.this == exp.DataType.Type.DATE else exp.StrToTime,
@@ -4272,8 +4267,14 @@ class Parser(metaclass=_Parser):
if isinstance(fmt, exp.AtTimeZone) and isinstance(this, exp.StrToTime):
this.set("zone", fmt.args["zone"])
-
return this
+ elif not to:
+ self.raise_error("Expected TYPE after CAST")
+ elif isinstance(to, exp.Identifier):
+ to = exp.DataType.build(to.name, udt=True)
+ elif to.this == exp.DataType.Type.CHAR:
+ if self._match(TokenType.CHARACTER_SET):
+ to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
return self.expression(
exp.Cast if strict else exp.TryCast, this=this, to=to, format=fmt, safe=safe
|
tobymao/sqlglot
|
f06a67161b19eecd357801841117fcb0949ecbfa
|
diff --git a/tests/dialects/test_teradata.py b/tests/dialects/test_teradata.py
index 50d44229..7348806e 100644
--- a/tests/dialects/test_teradata.py
+++ b/tests/dialects/test_teradata.py
@@ -188,3 +188,4 @@ class TestTeradata(Validator):
"": "STR_TO_DATE('1992-01', '%Y-%d')",
},
)
+ self.validate_identity("CAST('1992-01' AS FORMAT 'YYYY-DD')")
diff --git a/tests/test_expressions.py b/tests/test_expressions.py
index 6c489433..2dae2886 100644
--- a/tests/test_expressions.py
+++ b/tests/test_expressions.py
@@ -443,7 +443,7 @@ class TestExpressions(unittest.TestCase):
return None
return node
- self.assertEqual(expression.transform(remove_non_list_arg).sql(), "CAST(x AS )")
+ self.assertEqual(expression.transform(remove_non_list_arg).sql(), "CAST(x AS)")
expression = parse_one("SELECT a, b FROM x")
@@ -855,16 +855,16 @@ FROM foo""",
self.assertEqual(exp.DataType.build("HSTORE", dialect="postgres").sql(), "HSTORE")
self.assertEqual(exp.DataType.build("NULL").sql(), "NULL")
self.assertEqual(exp.DataType.build("NULL", dialect="bigquery").sql(), "NULL")
- self.assertEqual(exp.DataType.build("UNKNOWN").sql(), "UNKNOWN")
- self.assertEqual(exp.DataType.build("UNKNOWN", dialect="bigquery").sql(), "UNKNOWN")
- self.assertEqual(exp.DataType.build("UNKNOWN", dialect="snowflake").sql(), "UNKNOWN")
+ self.assertEqual(exp.DataType.build("UNKNOWN").sql(), "")
+ self.assertEqual(exp.DataType.build("UNKNOWN", dialect="bigquery").sql(), "")
+ self.assertEqual(exp.DataType.build("UNKNOWN", dialect="snowflake").sql(), "")
self.assertEqual(exp.DataType.build("TIMESTAMP", dialect="bigquery").sql(), "TIMESTAMPTZ")
self.assertEqual(
exp.DataType.build("struct<x int>", dialect="spark").sql(), "STRUCT<x INT>"
)
self.assertEqual(exp.DataType.build("USER-DEFINED").sql(), "USER-DEFINED")
- self.assertEqual(exp.DataType.build("ARRAY<UNKNOWN>").sql(), "ARRAY<UNKNOWN>")
+ self.assertEqual(exp.DataType.build("ARRAY<UNKNOWN>").sql(), "ARRAY")
self.assertEqual(exp.DataType.build("ARRAY<NULL>").sql(), "ARRAY<NULL>")
self.assertEqual(exp.DataType.build("varchar(100) collate 'en-ci'").sql(), "VARCHAR(100)")
|
teradata `AS FORMAT` support
**Fully reproducible code snippet**
This query should parse properly.
```
select CAST(TimestampCol AS FORMAT 'YYYY-MM-DD') FROM dbc.table
```
There's a lot of other potential formats, e.g. '$$9.99' or 'E4'.
**Official Documentation**
- https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/SQL-Data-Types-and-Literals/Data-Type-Formats-and-Format-Phrases/FORMAT-Phrase-and-DateTime-Formats
- https://docs.teradata.com/r/Teradata-Warehouse-Miner-User-Guide-Volume-2ADS-Generation/February-2018/Analytic-Data-Sets/Variable-Creation/Initiating-a-Variable-Creation-Function/Variable-Creation-INPUT-Variables-SQL-Elements/Other/Cast-Function
- https://docs.teradata.com/r/SQL-Functions-Operators-Expressions-and-Predicates/June-2017/Data-Type-Conversions/Period-to-DATE-Conversion/CAST-Syntax
|
0.0
|
f06a67161b19eecd357801841117fcb0949ecbfa
|
[
"tests/dialects/test_teradata.py::TestTeradata::test_cast",
"tests/test_expressions.py::TestExpressions::test_data_type_builder",
"tests/test_expressions.py::TestExpressions::test_transform_node_removal"
] |
[
"tests/dialects/test_teradata.py::TestTeradata::test_abbrev",
"tests/dialects/test_teradata.py::TestTeradata::test_create",
"tests/dialects/test_teradata.py::TestTeradata::test_datatype",
"tests/dialects/test_teradata.py::TestTeradata::test_insert",
"tests/dialects/test_teradata.py::TestTeradata::test_mod",
"tests/dialects/test_teradata.py::TestTeradata::test_statistics",
"tests/dialects/test_teradata.py::TestTeradata::test_teradata",
"tests/dialects/test_teradata.py::TestTeradata::test_translate",
"tests/dialects/test_teradata.py::TestTeradata::test_update",
"tests/test_expressions.py::TestExpressions::test_alias",
"tests/test_expressions.py::TestExpressions::test_alias_column_names",
"tests/test_expressions.py::TestExpressions::test_alias_or_name",
"tests/test_expressions.py::TestExpressions::test_arg_deletion",
"tests/test_expressions.py::TestExpressions::test_arg_key",
"tests/test_expressions.py::TestExpressions::test_column",
"tests/test_expressions.py::TestExpressions::test_comment_alias",
"tests/test_expressions.py::TestExpressions::test_convert",
"tests/test_expressions.py::TestExpressions::test_ctes",
"tests/test_expressions.py::TestExpressions::test_depth",
"tests/test_expressions.py::TestExpressions::test_eq",
"tests/test_expressions.py::TestExpressions::test_find",
"tests/test_expressions.py::TestExpressions::test_find_all",
"tests/test_expressions.py::TestExpressions::test_find_ancestor",
"tests/test_expressions.py::TestExpressions::test_function_building",
"tests/test_expressions.py::TestExpressions::test_function_normalizer",
"tests/test_expressions.py::TestExpressions::test_functions",
"tests/test_expressions.py::TestExpressions::test_hash",
"tests/test_expressions.py::TestExpressions::test_identifier",
"tests/test_expressions.py::TestExpressions::test_is_star",
"tests/test_expressions.py::TestExpressions::test_is_type",
"tests/test_expressions.py::TestExpressions::test_iter",
"tests/test_expressions.py::TestExpressions::test_named_selects",
"tests/test_expressions.py::TestExpressions::test_properties_from_dict",
"tests/test_expressions.py::TestExpressions::test_rename_table",
"tests/test_expressions.py::TestExpressions::test_replace",
"tests/test_expressions.py::TestExpressions::test_replace_placeholders",
"tests/test_expressions.py::TestExpressions::test_replace_tables",
"tests/test_expressions.py::TestExpressions::test_root",
"tests/test_expressions.py::TestExpressions::test_selects",
"tests/test_expressions.py::TestExpressions::test_set_meta",
"tests/test_expressions.py::TestExpressions::test_set_metadata",
"tests/test_expressions.py::TestExpressions::test_sql",
"tests/test_expressions.py::TestExpressions::test_table",
"tests/test_expressions.py::TestExpressions::test_table_name",
"tests/test_expressions.py::TestExpressions::test_text",
"tests/test_expressions.py::TestExpressions::test_to_column",
"tests/test_expressions.py::TestExpressions::test_to_dot",
"tests/test_expressions.py::TestExpressions::test_to_interval",
"tests/test_expressions.py::TestExpressions::test_to_table",
"tests/test_expressions.py::TestExpressions::test_transform_multiple_children",
"tests/test_expressions.py::TestExpressions::test_transform_no_infinite_recursion",
"tests/test_expressions.py::TestExpressions::test_transform_simple",
"tests/test_expressions.py::TestExpressions::test_transform_with_arguments",
"tests/test_expressions.py::TestExpressions::test_union",
"tests/test_expressions.py::TestExpressions::test_unit",
"tests/test_expressions.py::TestExpressions::test_unnest",
"tests/test_expressions.py::TestExpressions::test_values",
"tests/test_expressions.py::TestExpressions::test_walk"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-30 23:26:22+00:00
|
mit
| 5,986 |
|
tobymao__sqlglot-2499
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 9aa2313c..3d9602b0 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -879,6 +879,7 @@ class Parser(metaclass=_Parser):
CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"}
OPCLASS_FOLLOW_KEYWORDS = {"ASC", "DESC", "NULLS"}
+ OPTYPE_FOLLOW_TOKENS = {TokenType.COMMA, TokenType.R_PAREN}
TABLE_INDEX_HINT_TOKENS = {TokenType.FORCE, TokenType.IGNORE, TokenType.USE}
@@ -2565,9 +2566,8 @@ class Parser(metaclass=_Parser):
if self._match_texts(self.OPCLASS_FOLLOW_KEYWORDS, advance=False):
return this
- opclass = self._parse_var(any_token=True)
- if opclass:
- return self.expression(exp.Opclass, this=this, expression=opclass)
+ if not self._match_set(self.OPTYPE_FOLLOW_TOKENS, advance=False):
+ return self.expression(exp.Opclass, this=this, expression=self._parse_table_parts())
return this
|
tobymao/sqlglot
|
808b0bbc4781bd671f52169259434f7ad656e004
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 0e5f1a1b..89360376 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -42,6 +42,9 @@ class TestPostgres(Validator):
self.validate_identity(
"CREATE INDEX foo ON bar.baz USING btree(col1 varchar_pattern_ops ASC, col2)"
)
+ self.validate_identity(
+ "CREATE INDEX index_issues_on_title_trigram ON public.issues USING gin(title public.gin_trgm_ops)"
+ )
self.validate_identity(
"INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT (id) DO NOTHING RETURNING *"
)
|
Postgres opclass with schema prefix
**Before you file an issue**
In Postgres SQL opclass may include a schema prefix. Now I get an error when parsing such SQL.
**Fully reproducible code snippet**
```python
sql = """
CREATE INDEX index_issues_on_title_trigram ON public.issues USING gin (title public.gin_trgm_ops);
"""
parsed_sql_exprs = sqlglot.parse_one(sql, read="postgres")
```
**Official Documentation**
https://www.postgresql.org/docs/current/sql-createopclass.html
|
0.0
|
808b0bbc4781bd671f52169259434f7ad656e004
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-01 16:17:24+00:00
|
mit
| 5,987 |
|
tobymao__sqlglot-2521
|
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index ee147437..c4d3f62b 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -245,6 +245,7 @@ class Parser(metaclass=_Parser):
CREATABLES = {
TokenType.COLUMN,
+ TokenType.CONSTRAINT,
TokenType.FUNCTION,
TokenType.INDEX,
TokenType.PROCEDURE,
|
tobymao/sqlglot
|
7ff5f254e755bfef02c694ca3920d10bc6e174cd
|
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 5c4c3a83..59f936e9 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -561,6 +561,13 @@ class TestTSQL(Validator):
)
def test_ddl(self):
+ expression = parse_one("ALTER TABLE dbo.DocExe DROP CONSTRAINT FK_Column_B", dialect="tsql")
+ self.assertIsInstance(expression, exp.AlterTable)
+ self.assertIsInstance(expression.args["actions"][0], exp.Drop)
+ self.assertEqual(
+ expression.sql(dialect="tsql"), "ALTER TABLE dbo.DocExe DROP CONSTRAINT FK_Column_B"
+ )
+
for clusterd_keyword in ("CLUSTERED", "NONCLUSTERED"):
self.validate_identity(
'CREATE TABLE "dbo"."benchmark" ('
@@ -630,7 +637,6 @@ class TestTSQL(Validator):
"tsql": "CREATE OR ALTER VIEW a.b AS SELECT 1",
},
)
-
self.validate_all(
"ALTER TABLE a ADD b INTEGER, c INTEGER",
read={
@@ -641,7 +647,6 @@ class TestTSQL(Validator):
"tsql": "ALTER TABLE a ADD b INTEGER, c INTEGER",
},
)
-
self.validate_all(
"CREATE TABLE #mytemp (a INTEGER, b CHAR(2), c TIME(4), d FLOAT(24))",
write={
|
ALTER TABLE statement with Drop constraint not parsing properly
I was trying to parse a DROP CONSTRAINT statement in oracle and TSQL, but sqlglot.parse_one is not parsing them properly. it does not return the meta data about the drop constraint as it is giving when we add constraint. specifically i am trying it with foreign key. it works perfectly when i add constraint in alter table but does work in drop constraint
`sqlglot.parse_one(`
`"ALTER TABLE dbo.DocExe DROP CONSTRAINT FK_Column_B;", "tsql"`
` )`
The above example is copied from this docs https://learn.microsoft.com/en-us/sql/relational-databases/tables/delete-foreign-key-relationships?view=sql-server-ver16
|
0.0
|
7ff5f254e755bfef02c694ca3920d10bc6e174cd
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_ddl"
] |
[
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_temporal_table",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_udf",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-07 17:16:19+00:00
|
mit
| 5,988 |
|
tobymao__sqlglot-2523
|
diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
index 0505335b..0f27c4b2 100644
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -3,7 +3,13 @@ from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
-from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
+from sqlglot.dialects.dialect import (
+ Dialect,
+ format_time_lambda,
+ no_ilike_sql,
+ rename_func,
+ trim_sql,
+)
from sqlglot.helper import seq_get
from sqlglot.tokens import TokenType
@@ -70,6 +76,7 @@ class Oracle(Dialect):
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
+ "TO_CHAR": format_time_lambda(exp.TimeToStr, "oracle", default=True),
}
FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
|
tobymao/sqlglot
|
e9334eab7c79ef8eaabe7532faab740e1b27f050
|
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 0aa4311a..6b181153 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -62,6 +62,13 @@ class TestOracle(Validator):
"SELECT * FROM t SAMPLE (0.25)",
)
+ self.validate_all(
+ "SELECT TO_CHAR(TIMESTAMP '1999-12-01 10:00:00')",
+ write={
+ "oracle": "SELECT TO_CHAR(CAST('1999-12-01 10:00:00' AS TIMESTAMP), 'YYYY-MM-DD HH24:MI:SS')",
+ "postgres": "SELECT TO_CHAR(CAST('1999-12-01 10:00:00' AS TIMESTAMP), 'YYYY-MM-DD HH24:MI:SS')",
+ },
+ )
self.validate_all(
"SELECT CAST(NULL AS VARCHAR2(2328 CHAR)) AS COL1",
write={
|
[Bug] Transpiling built-in functions is not accurate.
**Before you file an issue**
- updated to 19.0.3
**Fully reproducible code snippet**
I am transpiling Oracle SQL to Postgres. There are multiple functions that fail when executed.
Here is an example.
`sql_in_oracle = "to_char(123)"`
`sqlglot.transpile(sql_in_oracle, read='oracle', write='postgres')`
Output: `['to_char(123)']`
Expected: `['123::text']` or whatever is valid in Postgres
Postgres supports `to_char(expr, format)`, but not `to_char(expr)`.
I have many functions that cannot be tranpiled correctly in my query list, such as,
Truncate datetime to day:
TRUNC(datetime) -- Valid Oracle
Output: TRUNC(datetime) -- Invalid Postgres
Expected: DATE_TRUNC('day', datetime) -- Valid postgres
**Official Documentation**
[sqlines](https://sqlines.com/oracle-to-postgresql) has an online tool I tried once. On its website, it lists many migration conversions for different databases. It might be a helpful resource for you to check out. Thanks.
|
0.0
|
e9334eab7c79ef8eaabe7532faab740e1b27f050
|
[
"tests/dialects/test_oracle.py::TestOracle::test_oracle"
] |
[
"tests/dialects/test_oracle.py::TestOracle::test_connect_by",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table",
"tests/dialects/test_oracle.py::TestOracle::test_json_table",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_hints"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-07 17:57:03+00:00
|
mit
| 5,989 |
|
tobymao__sqlglot-2529
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 01f7512a..62b1cd0c 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -349,7 +349,7 @@ class Snowflake(Dialect):
table: t.Optional[exp.Expression] = None
if self._match_text_seq("@"):
table_name = "@"
- while True:
+ while self._curr:
self._advance()
table_name += self._prev.text
if not self._match_set(self.STAGED_FILE_SINGLE_TOKENS, advance=False):
|
tobymao/sqlglot
|
08d5d779c79736859471d52e4acf82ad77f16483
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index ede42f47..7452a8c9 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -552,6 +552,7 @@ class TestSnowflake(Validator):
staged_file.sql(dialect="snowflake"),
)
+ self.validate_identity("SELECT metadata$filename FROM @s1/")
self.validate_identity("SELECT * FROM @~")
self.validate_identity("SELECT * FROM @~/some/path/to/file.csv")
self.validate_identity("SELECT * FROM @mystage")
|
List index out of range for Snowflake SQL transpilation
Ran the issue using the latest `main` build `sqlglot-19.0.4.dev18`
Read: `snowflake`
Write `spark`
When running the following query on Snowflake's publically available documentation, we get an `IndexError: list index out of range`. It's unclear which part of the query is unable to be tokenized.
Code snippet
https://docs.snowflake.com/en/sql-reference/sql/create-external-table
```
query_1666 = 'SELECT metadata$filename FROM @s1/;'
transpiled_sqls = transpile(query_1666, read='snowflake', write='spark', pretty=True, error_level=None)
```
|
0.0
|
08d5d779c79736859471d52e4acf82ad77f16483
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-08 19:51:27+00:00
|
mit
| 5,990 |
|
tobymao__sqlglot-2534
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index af557b5c..3433cdf2 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -84,7 +84,8 @@ def _parse_date_diff(args: t.List) -> exp.Expression:
def _struct_sql(self: DuckDB.Generator, expression: exp.Struct) -> str:
args = [
- f"'{e.name or e.this.name}': {self.sql(e, 'expression')}" for e in expression.expressions
+ f"'{e.name or e.this.name}': {self.sql(e.expressions[0]) if isinstance(e, exp.Bracket) else self.sql(e, 'expression')}"
+ for e in expression.expressions
]
return f"{{{', '.join(args)}}}"
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index f0aa1dc8..554c7c11 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -375,8 +375,6 @@ class Presto(Dialect):
exp.Xor: bool_xor_sql,
}
- KEY_VALUE_DEFINITONS = (exp.EQ, exp.PropertyEQ, exp.Slice)
-
def struct_sql(self, expression: exp.Struct) -> str:
if any(isinstance(arg, self.KEY_VALUE_DEFINITONS) for arg in expression.expressions):
self.unsupported("Struct with key-value definitions is unsupported.")
diff --git a/sqlglot/dialects/spark2.py b/sqlglot/dialects/spark2.py
index da84bd86..38a06567 100644
--- a/sqlglot/dialects/spark2.py
+++ b/sqlglot/dialects/spark2.py
@@ -224,6 +224,19 @@ class Spark2(Hive):
WRAP_DERIVED_VALUES = False
CREATE_FUNCTION_RETURN_AS = False
+ def struct_sql(self, expression: exp.Struct) -> str:
+ args = []
+ for arg in expression.expressions:
+ if isinstance(arg, self.KEY_VALUE_DEFINITONS):
+ if isinstance(arg, exp.Bracket):
+ args.append(exp.alias_(arg.this, arg.expressions[0].name))
+ else:
+ args.append(exp.alias_(arg.expression, arg.this.name))
+ else:
+ args.append(arg)
+
+ return self.func("STRUCT", *args)
+
def temporary_storage_provider(self, expression: exp.Create) -> exp.Create:
# spark2, spark, Databricks require a storage provider for temporary tables
provider = exp.FileFormatProperty(this=exp.Literal.string("parquet"))
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index b89c30ad..f4193cda 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -378,6 +378,8 @@ class Generator:
exp.Paren,
)
+ KEY_VALUE_DEFINITONS = (exp.Bracket, exp.EQ, exp.PropertyEQ, exp.Slice)
+
SENTINEL_LINE_BREAK = "__SQLGLOT__LB__"
# Autofilled
|
tobymao/sqlglot
|
8f99d9bcf2b79a74ded6f0b5ddef3676904df2df
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index a02b5a35..c78d3efd 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -356,14 +356,14 @@ class TestDuckDB(Validator):
"STRUCT_PACK(x := 1, y := '2')",
write={
"duckdb": "{'x': 1, 'y': '2'}",
- "spark": "STRUCT(x = 1, y = '2')",
+ "spark": "STRUCT(1 AS x, '2' AS y)",
},
)
self.validate_all(
"STRUCT_PACK(key1 := 'value1', key2 := 42)",
write={
"duckdb": "{'key1': 'value1', 'key2': 42}",
- "spark": "STRUCT(key1 = 'value1', key2 = 42)",
+ "spark": "STRUCT('value1' AS key1, 42 AS key2)",
},
)
self.validate_all(
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 7452a8c9..edbdd005 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -87,6 +87,14 @@ class TestSnowflake(Validator):
self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
self.validate_all("CAST(x AS CHARACTER VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
self.validate_all("CAST(x AS NCHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
+ self.validate_all(
+ "SELECT { 'Manitoba': 'Winnipeg', 'foo': 'bar' } AS province_capital",
+ write={
+ "duckdb": "SELECT {'Manitoba': 'Winnipeg', 'foo': 'bar'} AS province_capital",
+ "snowflake": "SELECT OBJECT_CONSTRUCT('Manitoba', 'Winnipeg', 'foo', 'bar') AS province_capital",
+ "spark": "SELECT STRUCT('Manitoba' AS Winnipeg, 'foo' AS bar) AS province_capital",
+ },
+ )
self.validate_all(
"SELECT COLLATE('B', 'und:ci')",
write={
|
Conversion from Snowflake Object to Spark StructType does not produce syntactically correct result
Ran the issue using main `sqlglot-19.0.4.dev18`
Read: `snowflake`
Write: `spark`
We are trying to convert Snowflake SQL code that creates an `OBJECT` using `{foo: bar}` syntax. In Spark, we would like to create the corresponding data type, a `StructType`. The transpiled Spark code is not syntactically correct and throws an error `AnalysisException: [INVALID_EXTRACT_BASE_FIELD_TYPE] Can't extract a value from "Manitoba". Need a complex type [STRUCT, ARRAY, MAP] but got "STRING".`
This is presumably because the transpiled code is trying to make use the the `foo[bar]` method in SparkSQL to extract struct fields. However, this syntax doesn't work when _creating_ a struct.
Code snippet:
```
from pyspark.sql.session import SparkSession
from sqlglot import transpile
spark = SparkSession.builder.getOrCreate()
query = "SELECT { 'Manitoba': 'Winnipeg', 'foo': 'bar' } AS province_capital;"
transpiled_query = transpile(query, read='snowflake', write='spark', pretty=True, error_level=None)[0]
spark.sql(transpiled_query)
```
Produces the following string **(INCORRECT RESULT)**
```
SELECT
STRUCT('Manitoba'['Winnipeg'], 'foo'['bar']) AS province_capital
```
Properly converted code in Spark SQL:
```
SELECT STRUCT('Winnipeg' AS Manitoba, 'bar' AS foo) AS province_capital
```
Images with results
Snowflake documentation for creating semi-structured data types using `{foo: bar}` syntax.
https://docs.snowflake.com/en/sql-reference/data-types-semistructured
Spark SQL `struct` function
https://spark.apache.org/docs/3.5.0/api/sql/#struct
|
0.0
|
8f99d9bcf2b79a74ded6f0b5ddef3676904df2df
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_duckdb.py::TestDuckDB::test_timestamps_with_units",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-09 02:18:41+00:00
|
mit
| 5,991 |
|
tobymao__sqlglot-2576
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index bed638bd..ff3a72f8 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -117,7 +117,6 @@ class DuckDB(Dialect):
class Tokenizer(tokens.Tokenizer):
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
- ":=": TokenType.COLON_EQ,
"//": TokenType.DIV,
"ATTACH": TokenType.COMMAND,
"BINARY": TokenType.VARBINARY,
@@ -356,9 +355,6 @@ class DuckDB(Dialect):
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
- def propertyeq_sql(self, expression: exp.PropertyEQ) -> str:
- return self.binary(expression, ":=")
-
def interval_sql(self, expression: exp.Interval) -> str:
multiplier: t.Optional[int] = None
unit = expression.text("unit").lower()
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 35b84c38..24fc9eaf 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2648,7 +2648,7 @@ class Generator:
return self.binary(expression, "=")
def propertyeq_sql(self, expression: exp.PropertyEQ) -> str:
- return self.binary(expression, "=")
+ return self.binary(expression, ":=")
def escape_sql(self, expression: exp.Escape) -> str:
return self.binary(expression, "ESCAPE")
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 19be0367..4359184f 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -526,6 +526,7 @@ class Tokenizer(metaclass=_Tokenizer):
"<=": TokenType.LTE,
"<>": TokenType.NEQ,
"!=": TokenType.NEQ,
+ ":=": TokenType.COLON_EQ,
"<=>": TokenType.NULLSAFE_EQ,
"->": TokenType.ARROW,
"->>": TokenType.DARROW,
|
tobymao/sqlglot
|
f5899a1a0da096e012b7abd0627a372e4202a612
|
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index 97eb65b0..2cbcb9dc 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -123,6 +123,7 @@ class TestMySQL(Validator):
self.validate_identity("ALTER TABLE test_table ALTER COLUMN test_column SET DEFAULT 1")
def test_identity(self):
+ self.validate_identity("SELECT @var1 := 1, @var2")
self.validate_identity("UNLOCK TABLES")
self.validate_identity("LOCK TABLES `app_fields` WRITE")
self.validate_identity("SELECT 1 XOR 0")
|
sqlglot does not roundtrip assignment operator `:=`
There is no way to roundrip `:=` through sqlglot. It's converted into `EQ`, vs `PropertyEQ`.
**Fully reproducible code snippet**
> Please include a fully reproducible code snippet or the input sql, dialect, and expected output.
```python
In [28]: sqlglot.parse_one('SELECT @x := 1', read='mysql')
Out[28]:
(SELECT expressions:
(EQ this:
(PARAMETER this:
(VAR this: x)), expression:
(LITERAL this: 1, is_string: False)))
In [29]: sqlglot.parse_one('SELECT @x := 1', read='mysql').sql()
Out[29]: 'SELECT @x = 1'
```
Expected output: `SELECT @x := 1`.
**Official Documentation**
> Please include links to official SQL documentation related to your issue.
I couldn't find anything meaningful about `PropertyEQ`.
|
0.0
|
f5899a1a0da096e012b7abd0627a372e4202a612
|
[
"tests/dialects/test_mysql.py::TestMySQL::test_identity"
] |
[
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_safe_div",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_types",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_is_null",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_monthname",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-18 22:11:16+00:00
|
mit
| 5,992 |
|
tobymao__sqlglot-2582
|
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index 021843b2..51040ca5 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -354,6 +354,7 @@ class TSQL(Dialect):
IDENTIFIERS = ['"', ("[", "]")]
QUOTES = ["'", '"']
HEX_STRINGS = [("0x", ""), ("0X", "")]
+ VAR_SINGLE_TOKENS = {"@", "$", "#"}
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
|
tobymao/sqlglot
|
8cd7d1c0bb56aff6bcd08a3ae4e71f68022307b8
|
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 73bcc1ca..d94846cc 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -6,6 +6,10 @@ class TestTSQL(Validator):
dialect = "tsql"
def test_tsql(self):
+ self.validate_identity("SELECT TestSpecialChar.Test# FROM TestSpecialChar")
+ self.validate_identity("SELECT TestSpecialChar.Test@ FROM TestSpecialChar")
+ self.validate_identity("SELECT TestSpecialChar.Test$ FROM TestSpecialChar")
+ self.validate_identity("SELECT TestSpecialChar.Test_ FROM TestSpecialChar")
self.validate_identity("SELECT TOP (2 + 1) 1")
self.validate_identity("SELECT * FROM t WHERE NOT c", "SELECT * FROM t WHERE NOT c <> 0")
self.validate_identity("1 AND true", "1 <> 0 AND (1 = 1)")
|
# char at the end of field name disappear when using parse_one with tsql dialect
Query = 'SELECT TestSpecialChar.Test# FROM TestSpecialChar'
Using:
sql = Parse_one(sql=Query, read='tsql')
Returned value is:
SELECT TestSpecialChar.Test FROM TestSpecialChar
PS. The query is perfectly valid in T-SQL. I used it to test how SQLGlot was behaving with weird characters in text. Test$ is working, but not Test#
|
0.0
|
8cd7d1c0bb56aff6bcd08a3ae4e71f68022307b8
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_tsql"
] |
[
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_temporal_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-11-21 15:04:19+00:00
|
mit
| 5,993 |
|
tobymao__sqlglot-2591
|
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index 51040ca5..249d5211 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -153,6 +153,11 @@ def _format_sql(self: TSQL.Generator, expression: exp.NumberToStr | exp.TimeToSt
)
)
)
+
+ # There is no format for "quarter"
+ if fmt.name.lower() == "quarter":
+ return self.func("DATEPART", "QUARTER", expression.this)
+
return self.func("FORMAT", expression.this, fmt, expression.args.get("culture"))
@@ -245,9 +250,6 @@ class TSQL(Dialect):
TIME_MAPPING = {
"year": "%Y",
- "qq": "%q",
- "q": "%q",
- "quarter": "%q",
"dayofyear": "%j",
"day": "%d",
"dy": "%d",
@@ -684,9 +686,7 @@ class TSQL(Dialect):
exp.Subquery: transforms.preprocess([qualify_derived_table_outputs]),
exp.SHA: lambda self, e: self.func("HASHBYTES", exp.Literal.string("SHA1"), e.this),
exp.SHA2: lambda self, e: self.func(
- "HASHBYTES",
- exp.Literal.string(f"SHA2_{e.args.get('length', 256)}"),
- e.this,
+ "HASHBYTES", exp.Literal.string(f"SHA2_{e.args.get('length', 256)}"), e.this
),
exp.TemporaryProperty: lambda self, e: "",
exp.TimeStrToTime: timestrtotime_sql,
|
tobymao/sqlglot
|
426075fe17b60b419f38a8ef5735977b953b92af
|
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index d94846cc..6188fd3a 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -175,18 +175,6 @@ class TestTSQL(Validator):
"SELECT DISTINCT DepartmentName, PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY BaseRate) OVER (PARTITION BY DepartmentName) AS MedianCont FROM dbo.DimEmployee"
)
- self.validate_all(
- "SELECT DATEPART(year, CAST('2017-01-01' AS DATE))",
- read={"postgres": "SELECT DATE_PART('year', '2017-01-01'::DATE)"},
- )
- self.validate_all(
- "SELECT DATEPART(month, CAST('2017-03-01' AS DATE))",
- read={"postgres": "SELECT DATE_PART('month', '2017-03-01'::DATE)"},
- )
- self.validate_all(
- "SELECT DATEPART(day, CAST('2017-01-02' AS DATE))",
- read={"postgres": "SELECT DATE_PART('day', '2017-01-02'::DATE)"},
- )
self.validate_all(
"SELECT CAST([a].[b] AS SMALLINT) FROM foo",
write={
@@ -900,11 +888,50 @@ WHERE
)
def test_datepart(self):
+ self.validate_identity("DATEPART(QUARTER, x)", "DATEPART(QUARTER, CAST(x AS DATETIME2))")
+ self.validate_identity("DATEPART(YEAR, x)", "FORMAT(CAST(x AS DATETIME2), 'yyyy')")
+
self.validate_all(
"SELECT DATEPART(month,'1970-01-01')",
- write={"spark": "SELECT DATE_FORMAT(CAST('1970-01-01' AS TIMESTAMP), 'MM')"},
+ write={
+ "postgres": "SELECT TO_CHAR(CAST('1970-01-01' AS TIMESTAMP), 'MM')",
+ "spark": "SELECT DATE_FORMAT(CAST('1970-01-01' AS TIMESTAMP), 'MM')",
+ "tsql": "SELECT FORMAT(CAST('1970-01-01' AS DATETIME2), 'MM')",
+ },
+ )
+ self.validate_all(
+ "SELECT DATEPART(year, CAST('2017-01-01' AS DATE))",
+ read={
+ "postgres": "SELECT DATE_PART('year', '2017-01-01'::DATE)",
+ },
+ write={
+ "postgres": "SELECT TO_CHAR(CAST(CAST('2017-01-01' AS DATE) AS TIMESTAMP), 'YYYY')",
+ "spark": "SELECT DATE_FORMAT(CAST(CAST('2017-01-01' AS DATE) AS TIMESTAMP), 'yyyy')",
+ "tsql": "SELECT FORMAT(CAST(CAST('2017-01-01' AS DATE) AS DATETIME2), 'yyyy')",
+ },
+ )
+ self.validate_all(
+ "SELECT DATEPART(month, CAST('2017-03-01' AS DATE))",
+ read={
+ "postgres": "SELECT DATE_PART('month', '2017-03-01'::DATE)",
+ },
+ write={
+ "postgres": "SELECT TO_CHAR(CAST(CAST('2017-03-01' AS DATE) AS TIMESTAMP), 'MM')",
+ "spark": "SELECT DATE_FORMAT(CAST(CAST('2017-03-01' AS DATE) AS TIMESTAMP), 'MM')",
+ "tsql": "SELECT FORMAT(CAST(CAST('2017-03-01' AS DATE) AS DATETIME2), 'MM')",
+ },
+ )
+ self.validate_all(
+ "SELECT DATEPART(day, CAST('2017-01-02' AS DATE))",
+ read={
+ "postgres": "SELECT DATE_PART('day', '2017-01-02'::DATE)",
+ },
+ write={
+ "postgres": "SELECT TO_CHAR(CAST(CAST('2017-01-02' AS DATE) AS TIMESTAMP), 'DD')",
+ "spark": "SELECT DATE_FORMAT(CAST(CAST('2017-01-02' AS DATE) AS TIMESTAMP), 'dd')",
+ "tsql": "SELECT FORMAT(CAST(CAST('2017-01-02' AS DATE) AS DATETIME2), 'dd')",
+ },
)
- self.validate_identity("DATEPART(YEAR, x)", "FORMAT(CAST(x AS DATETIME2), 'yyyy')")
def test_convert_date_format(self):
self.validate_all(
|
Inaccurate sql generation for DATEPART in T-SQL
In TSQL queries while reading DATEPART function seems to be converted to FORMAT function, which would work fine but 'quarter' does not seem to be part of [format types](https://learn.microsoft.com/en-us/dotnet/standard/base-types/custom-date-and-time-format-strings).
Code snippet:
`quarter_query = """
SELECT
YEAR(sale_date) AS Year,
DATEPART(QUARTER, sale_date) AS Quarter,
SUM(sale_price) AS QuarterlySales
FROM
Sales_AI
GROUP BY
YEAR(sale_date),
DATEPART(QUARTER, sale_date)
ORDER BY
Year, Quarter
"""
print(parse_one(quarter_query, 'tsql').sql(dialect='tsql'))
`
Output: `"SELECT YEAR(sale_date) AS Year, FORMAT(CAST(sale_date AS DATETIME2), 'quarter') AS Quarter, SUM(sale_price) AS QuarterlySales FROM Sales_AI GROUP BY YEAR(sale_date), FORMAT(CAST(sale_date AS DATETIME2), 'quarter') ORDER BY Year, Quarter"`
Worked fine when time period was month.
Would also like to know if there are any resources for creating a custom dialect to let datepart stay as datepart after parsing.
|
0.0
|
426075fe17b60b419f38a8ef5735977b953b92af
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_datepart"
] |
[
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_temporal_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-23 00:52:11+00:00
|
mit
| 5,994 |
|
tobymao__sqlglot-2598
|
diff --git a/sqlglot/time.py b/sqlglot/time.py
index c286ec1e..50ec2ec3 100644
--- a/sqlglot/time.py
+++ b/sqlglot/time.py
@@ -42,6 +42,10 @@ def format_time(
end -= 1
chars = sym
sym = None
+ else:
+ chars = chars[0]
+ end = start + 1
+
start += len(chars)
chunks.append(chars)
current = trie
|
tobymao/sqlglot
|
dc783a8b723ca000e6ff2343675f4f0030716037
|
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index 45bb763b..ab246a3d 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -123,6 +123,7 @@ class TestMySQL(Validator):
self.validate_identity("ALTER TABLE test_table ALTER COLUMN test_column SET DEFAULT 1")
def test_identity(self):
+ self.validate_identity("SELECT DATE_FORMAT(NOW(), '%Y-%m-%d %H:%i:00.0000')")
self.validate_identity("SELECT @var1 := 1, @var2")
self.validate_identity("UNLOCK TABLES")
self.validate_identity("LOCK TABLES `app_fields` WRITE")
|
sqlglot corrupts date_format spec for MySQL
**Before you file an issue**
> - Make sure you specify the "read" dialect eg. parse_one(sql, read="spark")
Yes, `read='mysql'`
> - Check if the issue still exists on main
Yes
**Fully reproducible code snippet**
> Please include a fully reproducible code snippet or the input sql, dialect, and expected output.
```
In [19]: import sqlglot
In [20]: sqlglot.parse_one("date_format(now(), '%Y-%m-%d %H:%i:00.0000')", read='mysql').sql(dialect='mysql')
Out[20]: "DATE_FORMAT(NOW(), '%Y-%m-%d %H:%M:00.0000')"
```
sqlglot uses `%M` specifier for minute, but in MySQL `%i` should be used.
**Official Documentation**
> Please include links to official SQL documentation related to your issue.
https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format
|
0.0
|
dc783a8b723ca000e6ff2343675f4f0030716037
|
[
"tests/dialects/test_mysql.py::TestMySQL::test_identity"
] |
[
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_is_null",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_monthname",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_safe_div",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_types"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-27 21:53:26+00:00
|
mit
| 5,995 |
|
tobymao__sqlglot-2610
|
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index 891596a6..880a6c2d 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -284,6 +284,7 @@ class Postgres(Dialect):
"TEMP": TokenType.TEMPORARY,
"CSTRING": TokenType.PSEUDO_TYPE,
"OID": TokenType.OBJECT_IDENTIFIER,
+ "OPERATOR": TokenType.OPERATOR,
"REGCLASS": TokenType.OBJECT_IDENTIFIER,
"REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
"REGCONFIG": TokenType.OBJECT_IDENTIFIER,
@@ -333,12 +334,13 @@ class Postgres(Dialect):
RANGE_PARSERS = {
**parser.Parser.RANGE_PARSERS,
+ TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
TokenType.DAT: lambda self, this: self.expression(
exp.MatchAgainst, this=self._parse_bitwise(), expressions=[this]
),
- TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
+ TokenType.OPERATOR: lambda self, this: self._parse_operator(this),
}
STATEMENT_PARSERS = {
@@ -346,6 +348,29 @@ class Postgres(Dialect):
TokenType.END: lambda self: self._parse_commit_or_rollback(),
}
+ def _parse_operator(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+ while True:
+ if not self._match(TokenType.L_PAREN):
+ break
+
+ op = ""
+ while self._curr and not self._match(TokenType.R_PAREN):
+ op += self._curr.text
+ self._advance()
+
+ this = self.expression(
+ exp.Operator,
+ comments=self._prev_comments,
+ this=this,
+ operator=op,
+ expression=self._parse_bitwise(),
+ )
+
+ if not self._match(TokenType.OPERATOR):
+ break
+
+ return this
+
def _parse_date_part(self) -> exp.Expression:
part = self._parse_type()
self._match(TokenType.COMMA)
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index aad679d0..5c276be8 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -4036,6 +4036,11 @@ class NEQ(Binary, Predicate):
pass
+# https://www.postgresql.org/docs/current/ddl-schemas.html#DDL-SCHEMAS-PATH
+class Operator(Binary):
+ arg_types = {"this": True, "operator": True, "expression": True}
+
+
class SimilarTo(Binary, Predicate):
pass
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 9e432716..14125733 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -3050,6 +3050,9 @@ class Generator:
table = "" if isinstance(expression.this, exp.Literal) else "TABLE "
return f"REFRESH {table}{this}"
+ def operator_sql(self, expression: exp.Operator) -> str:
+ return self.binary(expression, f"OPERATOR({self.sql(expression, 'operator')})")
+
def _simplify_unless_literal(self, expression: E) -> E:
if not isinstance(expression, exp.Literal):
from sqlglot.optimizer.simplify import simplify
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 1689c43f..60bd1a7b 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -293,6 +293,7 @@ class Parser(metaclass=_Parser):
TokenType.NATURAL,
TokenType.NEXT,
TokenType.OFFSET,
+ TokenType.OPERATOR,
TokenType.ORDINALITY,
TokenType.OVERLAPS,
TokenType.OVERWRITE,
@@ -3336,7 +3337,7 @@ class Parser(metaclass=_Parser):
return this
- def _parse_between(self, this: exp.Expression) -> exp.Between:
+ def _parse_between(self, this: t.Optional[exp.Expression]) -> exp.Between:
low = self._parse_bitwise()
self._match(TokenType.AND)
high = self._parse_bitwise()
@@ -5362,7 +5363,9 @@ class Parser(metaclass=_Parser):
self._match_r_paren()
return self.expression(exp.DictRange, this=this, min=min, max=max)
- def _parse_comprehension(self, this: exp.Expression) -> t.Optional[exp.Comprehension]:
+ def _parse_comprehension(
+ self, this: t.Optional[exp.Expression]
+ ) -> t.Optional[exp.Comprehension]:
index = self._index
expression = self._parse_column()
if not self._match(TokenType.IN):
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index e933929e..a8c7dfc4 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -276,6 +276,7 @@ class TokenType(AutoName):
OBJECT_IDENTIFIER = auto()
OFFSET = auto()
ON = auto()
+ OPERATOR = auto()
ORDER_BY = auto()
ORDERED = auto()
ORDINALITY = auto()
|
tobymao/sqlglot
|
4ec01d398535738a55c15202a5a88bae3f9a86dc
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index ef5fc99b..0435aad7 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -177,6 +177,20 @@ class TestPostgres(Validator):
},
)
+ def test_operator(self):
+ expr = parse_one("1 OPERATOR(+) 2 OPERATOR(*) 3", read="postgres")
+
+ expr.left.assert_is(exp.Operator)
+ expr.left.left.assert_is(exp.Literal)
+ expr.left.right.assert_is(exp.Literal)
+ expr.right.assert_is(exp.Literal)
+ self.assertEqual(expr.sql(dialect="postgres"), "1 OPERATOR(+) 2 OPERATOR(*) 3")
+
+ self.validate_identity("SELECT operator FROM t")
+ self.validate_identity("SELECT 1 OPERATOR(+) 2")
+ self.validate_identity("SELECT 1 OPERATOR(+) /* foo */ 2")
+ self.validate_identity("SELECT 1 OPERATOR(pg_catalog.+) 2")
+
def test_postgres(self):
expr = parse_one(
"SELECT * FROM r CROSS JOIN LATERAL UNNEST(ARRAY[1]) AS s(location)", read="postgres"
@@ -203,6 +217,14 @@ class TestPostgres(Validator):
self.assertIsInstance(expr, exp.AlterTable)
self.assertEqual(expr.sql(dialect="postgres"), alter_table_only)
+ self.validate_identity(
+ "SELECT c.oid, n.nspname, c.relname "
+ "FROM pg_catalog.pg_class AS c "
+ "LEFT JOIN pg_catalog.pg_namespace AS n ON n.oid = c.relnamespace "
+ "WHERE c.relname OPERATOR(pg_catalog.~) '^(courses)$' COLLATE pg_catalog.default AND "
+ "pg_catalog.PG_TABLE_IS_VISIBLE(c.oid) "
+ "ORDER BY 2, 3"
+ )
self.validate_identity(
"SELECT ARRAY[]::INT[] AS foo",
"SELECT CAST(ARRAY[] AS INT[]) AS foo",
|
Not able to parse this query for postgres dialect
# query
sql5 = """SELECT c.oid, n.nspname, c.relname FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relname OPERATOR(pg_catalog.~) '^(courses)$' COLLATE pg_catalog.default AND pg_catalog.pg_table_is_visible(c.oid) ORDER BY 2, 3;"""
# Parse the SQL query
parsed_sql = parse_one(sql5, read="postgres")
select_clauses = parsed_sql.find_all(exp.Select)
# error
error = sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 1, Col: 148.
pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relname OPERATOR(pg_catalog.~) '^(courses)$' COLLATE pg_catalog.default AND pg_catalog.pg_table_is_visible(c.oid) OR
|
0.0
|
4ec01d398535738a55c15202a5a88bae3f9a86dc
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-30 14:21:25+00:00
|
mit
| 5,996 |
|
tobymao__sqlglot-2619
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 44f917f0..137cd367 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -127,6 +127,7 @@ class DuckDB(Dialect):
NULL_ORDERING = "nulls_are_last"
SUPPORTS_USER_DEFINED_TYPES = False
SAFE_DIVISION = True
+ INDEX_OFFSET = 1
# https://duckdb.org/docs/sql/introduction.html#creating-a-new-table
RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
|
tobymao/sqlglot
|
5575d71798ba0009cc9e606ded7918bccfe0bb1d
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index b053f355..687a807b 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -1,4 +1,5 @@
from sqlglot import ErrorLevel, UnsupportedError, exp, parse_one, transpile
+from sqlglot.helper import logger as helper_logger
from tests.dialects.test_dialect import Validator
@@ -96,7 +97,6 @@ class TestDuckDB(Validator):
)
self.validate_identity("SELECT i FROM RANGE(5) AS _(i) ORDER BY i ASC")
- self.validate_identity("[x.STRING_SPLIT(' ')[1] FOR x IN ['1', '2', 3] IF x.CONTAINS('1')]")
self.validate_identity("INSERT INTO x BY NAME SELECT 1 AS y")
self.validate_identity("SELECT 1 AS x UNION ALL BY NAME SELECT 2 AS x")
self.validate_identity("SELECT SUM(x) FILTER (x = 1)", "SELECT SUM(x) FILTER(WHERE x = 1)")
@@ -504,6 +504,35 @@ class TestDuckDB(Validator):
self.validate_identity("SELECT ISNAN(x)")
+ def test_array_index(self):
+ with self.assertLogs(helper_logger) as cm:
+ self.validate_all(
+ "SELECT some_arr[1] AS first FROM blah",
+ read={
+ "bigquery": "SELECT some_arr[0] AS first FROM blah",
+ },
+ write={
+ "bigquery": "SELECT some_arr[0] AS first FROM blah",
+ "duckdb": "SELECT some_arr[1] AS first FROM blah",
+ "presto": "SELECT some_arr[1] AS first FROM blah",
+ },
+ )
+ self.validate_identity(
+ "[x.STRING_SPLIT(' ')[1] FOR x IN ['1', '2', 3] IF x.CONTAINS('1')]"
+ )
+
+ self.assertEqual(
+ cm.output,
+ [
+ "WARNING:sqlglot:Applying array index offset (-1)",
+ "WARNING:sqlglot:Applying array index offset (1)",
+ "WARNING:sqlglot:Applying array index offset (1)",
+ "WARNING:sqlglot:Applying array index offset (1)",
+ "WARNING:sqlglot:Applying array index offset (-1)",
+ "WARNING:sqlglot:Applying array index offset (1)",
+ ],
+ )
+
def test_time(self):
self.validate_identity("SELECT CURRENT_DATE")
self.validate_identity("SELECT CURRENT_TIMESTAMP")
|
DuckDB bracket operator is 1-indexed
**Before you file an issue**
- Make sure you specify the "read" dialect eg. parse_one(sql, read="spark") Bigquery, for example
- Check if the issue still exists on main yes
**Fully reproducible code snippet**
Please include a fully reproducible code snippet or the input sql, dialect, and expected output.
DuckDB arrays are 1-indexed. When providing a literal to the subscript/bracket operator the expected output for a transpilation of
```sql
SELECT some_arr[0] AS first FROM blah
```
should render to
```sql
SELECT some_arr[1] AS first FROM blah
```
in DuckDB. However, this is not the case:
```python
>>> transpile("SELECT some_arr[0] AS first FROM blah", read="bigquery", write="duckdb")
['SELECT some_arr[0] AS first FROM blah']
```
I attempted to patch this by incrementing in the case that `exp.Bracket.expressions` is of length 1 and type `Literal`, but this does not seem to be directionally correct (do you still increment in the case where the subscript is a column?). I think there are some semantics concerns here which I'm not well-versed enough to address.
**Official Documentation**
Please include links to official SQL documentation related to your issue.
[DuckDB array documentation](https://duckdb.org/docs/sql/data_types/array.html)
|
0.0
|
5575d71798ba0009cc9e606ded7918bccfe0bb1d
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_array_index"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_isnan",
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialects/test_duckdb.py::TestDuckDB::test_isinf",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests/dialects/test_duckdb.py::TestDuckDB::test_timestamps_with_units",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-01 22:35:51+00:00
|
mit
| 5,997 |
|
tobymao__sqlglot-2632
|
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index 03f8537b..a60891fe 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -136,6 +136,9 @@ def _parse_hashbytes(args: t.List) -> exp.Expression:
return exp.func("HASHBYTES", *args)
+DATEPART_ONLY_FORMATS = {"dw", "hour", "quarter"}
+
+
def _format_sql(self: TSQL.Generator, expression: exp.NumberToStr | exp.TimeToStr) -> str:
fmt = (
expression.args["format"]
@@ -149,8 +152,8 @@ def _format_sql(self: TSQL.Generator, expression: exp.NumberToStr | exp.TimeToSt
)
# There is no format for "quarter"
- if fmt.name.lower() == "quarter":
- return self.func("DATEPART", "QUARTER", expression.this)
+ if fmt.name.lower() in DATEPART_ONLY_FORMATS:
+ return self.func("DATEPART", fmt.name, expression.this)
return self.func("FORMAT", expression.this, fmt, expression.args.get("culture"))
|
tobymao/sqlglot
|
7840393b3ad9c54ec6fed66b522f0f3f8e0edbf3
|
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 897aa24f..88d1ebcb 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -891,8 +891,26 @@ WHERE
)
def test_datepart(self):
- self.validate_identity("DATEPART(QUARTER, x)", "DATEPART(QUARTER, CAST(x AS DATETIME2))")
- self.validate_identity("DATEPART(YEAR, x)", "FORMAT(CAST(x AS DATETIME2), 'yyyy')")
+ self.validate_identity(
+ "DATEPART(QUARTER, x)",
+ "DATEPART(quarter, CAST(x AS DATETIME2))",
+ )
+ self.validate_identity(
+ "DATEPART(YEAR, x)",
+ "FORMAT(CAST(x AS DATETIME2), 'yyyy')",
+ )
+ self.validate_identity(
+ "DATEPART(HOUR, date_and_time)",
+ "DATEPART(hour, CAST(date_and_time AS DATETIME2))",
+ )
+ self.validate_identity(
+ "DATEPART(WEEKDAY, date_and_time)",
+ "DATEPART(dw, CAST(date_and_time AS DATETIME2))",
+ )
+ self.validate_identity(
+ "DATEPART(DW, date_and_time)",
+ "DATEPART(dw, CAST(date_and_time AS DATETIME2))",
+ )
self.validate_all(
"SELECT DATEPART(month,'1970-01-01')",
|
Inaccurate sql generation for DATEPART in T-SQL
Similar to [2586](https://github.com/tobymao/sqlglot/issues/2586) DATEPART function fails for HOUR and WEEKDAY
**Fully reproducible code snippet**
```
query = """SELECT DATEPART(HOUR, date_and_time) AS 'Hour' FROM table """
print(parse_one(quarter_query, 'tsql').sql(dialect='tsql'))
```
this gives `SELECT FORMAT(CAST(date_and_time AS DATETIME2), 'hour') AS "Hour" FROM table` but the [format code](https://learn.microsoft.com/en-us/dotnet/standard/base-types/custom-date-and-time-format-strings) is 'h' . Similarly for WEEKDAY it gives 'dw' which is not present in the format codes(thought I couldnt find a corresponding code)
|
0.0
|
7840393b3ad9c54ec6fed66b522f0f3f8e0edbf3
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_datepart"
] |
[
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_temporal_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-06 11:28:44+00:00
|
mit
| 5,998 |
|
tobymao__sqlglot-2640
|
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index 38f7b7a1..da182aae 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -108,6 +108,7 @@ class ClickHouse(Dialect):
FUNCTION_PARSERS = {
**parser.Parser.FUNCTION_PARSERS,
+ "ARRAYJOIN": lambda self: self.expression(exp.Explode, this=self._parse_expression()),
"QUANTILE": lambda self: self._parse_quantile(),
}
@@ -382,6 +383,7 @@ class ClickHouse(Dialect):
exp.DateDiff: lambda self, e: self.func(
"DATE_DIFF", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
),
+ exp.Explode: rename_func("arrayJoin"),
exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL",
exp.IsNan: rename_func("isNaN"),
exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)),
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index a746206b..c7e27a30 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -3335,7 +3335,8 @@ class Parser(metaclass=_Parser):
unnest = self._parse_unnest(with_alias=False)
if unnest:
this = self.expression(exp.In, this=this, unnest=unnest)
- elif self._match(TokenType.L_PAREN):
+ elif self._match_set((TokenType.L_PAREN, TokenType.L_BRACKET)):
+ matched_l_paren = self._prev.token_type == TokenType.L_PAREN
expressions = self._parse_csv(lambda: self._parse_select_or_expression(alias=alias))
if len(expressions) == 1 and isinstance(expressions[0], exp.Subqueryable):
@@ -3343,7 +3344,10 @@ class Parser(metaclass=_Parser):
else:
this = self.expression(exp.In, this=this, expressions=expressions)
- self._match_r_paren(this)
+ if matched_l_paren:
+ self._match_r_paren(this)
+ elif not self._match(TokenType.R_BRACKET, expression=this):
+ self.raise_error("Expecting ]")
else:
this = self.expression(exp.In, this=this, field=self._parse_field())
|
tobymao/sqlglot
|
1a231f7d83da43b66517359f0c13883f829dd2d5
|
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 03011f02..86ddb008 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -6,22 +6,6 @@ class TestClickhouse(Validator):
dialect = "clickhouse"
def test_clickhouse(self):
- self.validate_identity("x <> y")
-
- self.validate_all(
- "has([1], x)",
- read={
- "postgres": "x = any(array[1])",
- },
- )
- self.validate_all(
- "NOT has([1], x)",
- read={
- "postgres": "any(array[1]) <> x",
- },
- )
- self.validate_identity("x = y")
-
string_types = [
"BLOB",
"LONGBLOB",
@@ -40,6 +24,8 @@ class TestClickhouse(Validator):
self.assertEqual(expr.sql(dialect="clickhouse"), "COUNT(x)")
self.assertIsNone(expr._meta)
+ self.validate_identity("x = y")
+ self.validate_identity("x <> y")
self.validate_identity("SELECT * FROM (SELECT a FROM b SAMPLE 0.01)")
self.validate_identity("SELECT * FROM (SELECT a FROM b SAMPLE 1 / 10 OFFSET 1 / 2)")
self.validate_identity("SELECT sum(foo * bar) FROM bla SAMPLE 10000000")
@@ -81,7 +67,17 @@ class TestClickhouse(Validator):
self.validate_identity("position(haystack, needle, position)")
self.validate_identity("CAST(x AS DATETIME)")
self.validate_identity("CAST(x as MEDIUMINT)", "CAST(x AS Int32)")
-
+ self.validate_identity("SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src")
+ self.validate_identity(
+ "SELECT SUM(1) AS impressions, arrayJoin(cities) AS city, arrayJoin(browsers) AS browser FROM (SELECT ['Istanbul', 'Berlin', 'Bobruisk'] AS cities, ['Firefox', 'Chrome', 'Chrome'] AS browsers) GROUP BY 2, 3"
+ )
+ self.validate_identity(
+ "SELECT sum(1) AS impressions, (arrayJoin(arrayZip(cities, browsers)) AS t).1 AS city, t.2 AS browser FROM (SELECT ['Istanbul', 'Berlin', 'Bobruisk'] AS cities, ['Firefox', 'Chrome', 'Chrome'] AS browsers) GROUP BY 2, 3"
+ )
+ self.validate_identity(
+ "SELECT SUM(1) AS impressions FROM (SELECT ['Istanbul', 'Berlin', 'Bobruisk'] AS cities) WHERE arrayJoin(cities) IN ['Istanbul', 'Berlin']",
+ "SELECT SUM(1) AS impressions FROM (SELECT ['Istanbul', 'Berlin', 'Bobruisk'] AS cities) WHERE arrayJoin(cities) IN ('Istanbul', 'Berlin')",
+ )
self.validate_identity(
'SELECT CAST(tuple(1 AS "a", 2 AS "b", 3.0 AS "c").2 AS Nullable(String))'
)
@@ -101,6 +97,25 @@ class TestClickhouse(Validator):
"CREATE MATERIALIZED VIEW test_view (id UInt8) TO db.table1 AS SELECT * FROM test_data"
)
+ self.validate_all(
+ "SELECT arrayJoin([1,2,3])",
+ write={
+ "clickhouse": "SELECT arrayJoin([1, 2, 3])",
+ "postgres": "SELECT UNNEST(ARRAY[1, 2, 3])",
+ },
+ )
+ self.validate_all(
+ "has([1], x)",
+ read={
+ "postgres": "x = any(array[1])",
+ },
+ )
+ self.validate_all(
+ "NOT has([1], x)",
+ read={
+ "postgres": "any(array[1]) <> x",
+ },
+ )
self.validate_all(
"SELECT CAST('2020-01-01' AS TIMESTAMP) + INTERVAL '500' microsecond",
read={
|
clickhouse: exp.Unnest is not reported correctly, 'cos ClickHouse uses `arrayJoin()` as an `unnest()` function
In ClickHouse
```sql
SELECT arrayJoin(a), b FROM t
```
should be interpreted as
```sql
SELECT unnest(a), b FROM t
```
See docs: https://clickhouse.com/docs/en/sql-reference/functions/array-join
|
0.0
|
1a231f7d83da43b66517359f0c13883f829dd2d5
|
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse"
] |
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-07 22:52:28+00:00
|
mit
| 5,999 |
|
tobymao__sqlglot-2658
|
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index ef431119..ef3dc237 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -587,6 +587,7 @@ class Tokenizer(metaclass=_Tokenizer):
# Ensures we don't count an extra line if we get a \r\n line break sequence
if self._char == "\r" and self._peek == "\n":
i = 2
+ self._start += 1
self._col = 1
self._line += 1
diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs
index eb800f09..f386ce63 100644
--- a/sqlglotrs/src/tokenizer.rs
+++ b/sqlglotrs/src/tokenizer.rs
@@ -140,6 +140,7 @@ impl<'a> TokenizerState<'a> {
// Ensures we don't count an extra line if we get a \r\n line break sequence.
if self.current_char == '\r' && self.peek_char == '\n' {
i = 2;
+ self.start += 1;
}
self.column = 1;
|
tobymao/sqlglot
|
bf5f14673a7ec592ef33b3c56516d686e67b2fe7
|
diff --git a/tests/test_tokens.py b/tests/test_tokens.py
index b97f54a6..970c1ac2 100644
--- a/tests/test_tokens.py
+++ b/tests/test_tokens.py
@@ -71,6 +71,20 @@ x"""
self.assertEqual(tokens[2].line, 2)
self.assertEqual(tokens[3].line, 3)
+ def test_crlf(self):
+ tokens = Tokenizer().tokenize("SELECT a\r\nFROM b")
+ tokens = [(token.token_type, token.text) for token in tokens]
+
+ self.assertEqual(
+ tokens,
+ [
+ (TokenType.SELECT, "SELECT"),
+ (TokenType.VAR, "a"),
+ (TokenType.FROM, "FROM"),
+ (TokenType.VAR, "b"),
+ ],
+ )
+
def test_command(self):
tokens = Tokenizer().tokenize("SHOW;")
self.assertEqual(tokens[0].token_type, TokenType.SHOW)
diff --git a/tests/test_transpile.py b/tests/test_transpile.py
index b732b459..fb8f8313 100644
--- a/tests/test_transpile.py
+++ b/tests/test_transpile.py
@@ -89,6 +89,7 @@ class TestTranspile(unittest.TestCase):
self.validate("SELECT MIN(3)>=MIN(2)", "SELECT MIN(3) >= MIN(2)")
self.validate("SELECT 1>0", "SELECT 1 > 0")
self.validate("SELECT 3>=3", "SELECT 3 >= 3")
+ self.validate("SELECT a\r\nFROM b", "SELECT a FROM b")
def test_comments(self):
self.validate(
|
\r\n sometimes causes a parsing error
Repro
```python
from sqlglot import parse_one
parse_one('select a\r\nfrom b')
```
It appears that when the fix went in for line numbers, that the advance function skips the \n, but doesn't advance start, so the text becomes \nfrom which doesn't match.
|
0.0
|
bf5f14673a7ec592ef33b3c56516d686e67b2fe7
|
[
"tests/test_tokens.py::TestTokens::test_crlf",
"tests/test_transpile.py::TestTranspile::test_space"
] |
[
"tests/test_tokens.py::TestTokens::test_command",
"tests/test_tokens.py::TestTokens::test_comment_attachment",
"tests/test_tokens.py::TestTokens::test_error_msg",
"tests/test_tokens.py::TestTokens::test_jinja",
"tests/test_tokens.py::TestTokens::test_space_keywords",
"tests/test_tokens.py::TestTokens::test_token_line_col",
"tests/test_transpile.py::TestTranspile::test_alias",
"tests/test_transpile.py::TestTranspile::test_alter",
"tests/test_transpile.py::TestTranspile::test_comments",
"tests/test_transpile.py::TestTranspile::test_error_level",
"tests/test_transpile.py::TestTranspile::test_extract",
"tests/test_transpile.py::TestTranspile::test_identify_lambda",
"tests/test_transpile.py::TestTranspile::test_identity",
"tests/test_transpile.py::TestTranspile::test_if",
"tests/test_transpile.py::TestTranspile::test_index_offset",
"tests/test_transpile.py::TestTranspile::test_leading_comma",
"tests/test_transpile.py::TestTranspile::test_normalize_name",
"tests/test_transpile.py::TestTranspile::test_not_range",
"tests/test_transpile.py::TestTranspile::test_paren",
"tests/test_transpile.py::TestTranspile::test_partial",
"tests/test_transpile.py::TestTranspile::test_pretty",
"tests/test_transpile.py::TestTranspile::test_pretty_line_breaks",
"tests/test_transpile.py::TestTranspile::test_some",
"tests/test_transpile.py::TestTranspile::test_time",
"tests/test_transpile.py::TestTranspile::test_types",
"tests/test_transpile.py::TestTranspile::test_unary",
"tests/test_transpile.py::TestTranspile::test_unsupported_level",
"tests/test_transpile.py::TestTranspile::test_weird_chars",
"tests/test_transpile.py::TestTranspile::test_with"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-12 16:49:30+00:00
|
mit
| 6,000 |
|
tobymao__sqlglot-2659
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index d981ffd2..41afad80 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -84,6 +84,20 @@ def _parse_date_diff(args: t.List) -> exp.Expression:
return exp.DateDiff(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0))
+def _parse_make_timestamp(args: t.List) -> exp.Expression:
+ if len(args) == 1:
+ return exp.UnixToTime(this=seq_get(args, 0), scale=exp.UnixToTime.MICROS)
+
+ return exp.TimestampFromParts(
+ year=seq_get(args, 0),
+ month=seq_get(args, 1),
+ day=seq_get(args, 2),
+ hour=seq_get(args, 3),
+ min=seq_get(args, 4),
+ sec=seq_get(args, 5),
+ )
+
+
def _struct_sql(self: DuckDB.Generator, expression: exp.Struct) -> str:
args: t.List[str] = []
for expr in expression.expressions:
@@ -199,9 +213,7 @@ class DuckDB(Dialect):
"LIST_REVERSE_SORT": _sort_array_reverse,
"LIST_SORT": exp.SortArray.from_arg_list,
"LIST_VALUE": exp.Array.from_arg_list,
- "MAKE_TIMESTAMP": lambda args: exp.UnixToTime(
- this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
- ),
+ "MAKE_TIMESTAMP": _parse_make_timestamp,
"MEDIAN": lambda args: exp.PercentileCont(
this=seq_get(args, 0), expression=exp.Literal.number(0.5)
),
@@ -349,6 +361,7 @@ class DuckDB(Dialect):
exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
exp.Struct: _struct_sql,
exp.Timestamp: no_timestamp_sql,
+ exp.TimestampFromParts: rename_func("MAKE_TIMESTAMP"),
exp.TimestampTrunc: timestamptrunc_sql,
exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
exp.TimeStrToTime: timestrtotime_sql,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 99722be1..19a96df2 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -5233,6 +5233,19 @@ class UnixToTimeStr(Func):
pass
+class TimestampFromParts(Func):
+ """Constructs a timestamp given its constituent parts."""
+
+ arg_types = {
+ "year": True,
+ "month": True,
+ "day": True,
+ "hour": True,
+ "min": True,
+ "sec": True,
+ }
+
+
class Upper(Func):
_sql_names = ["UPPER", "UCASE"]
|
tobymao/sqlglot
|
238300381b53c232a7ad0fd9e5b8b2ceaf563f08
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 412f5c49..f9151681 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -116,6 +116,8 @@ class TestDuckDB(Validator):
parse_one("a // b", read="duckdb").assert_is(exp.IntDiv).sql(dialect="duckdb"), "a // b"
)
+ self.validate_identity("MAKE_TIMESTAMP(1992, 9, 20, 13, 34, 27.123456)")
+ self.validate_identity("MAKE_TIMESTAMP(1667810584123456)")
self.validate_identity("SELECT EPOCH_MS(10) AS t")
self.validate_identity("SELECT MAKE_TIMESTAMP(10) AS t")
self.validate_identity("SELECT TO_TIMESTAMP(10) AS t")
|
`make_timestamp` parse for duckdb is incorrect and also doesn't accept the correct number of arguments (6)
**Fully reproducible code snippet**
Not sure what's happening here but all the inputs are being thrown away
```
In [6]: sg.parse_one("select make_timestamp(1,2,3,4,5)", read="duckdb")
Out[6]:
(SELECT expressions:
(UNIXTOTIME this:
(LITERAL this: 1, is_string: False), scale:
(LITERAL this: micros, is_string: True)))
In [8]: sg.parse_one("select make_timestamp(1,2,3,4,5)", read="duckdb").sql('duckdb')
Out[8]: 'SELECT MAKE_TIMESTAMP(1)'
```
Second, this particular overload of DuckDB's `make_timestamp` accepts 6 arguments per the documentation:

```
In [9]: sg.parse_one("select make_timestamp(1,2,3,4,5,6)", read="duckdb")
...
ParseError: The number of provided arguments (6) is greater than the maximum number of supported arguments (5). Line 1, Col: 34.
select make_timestamp(1,2,3,4,5,6)
```
**Official Documentation**
https://duckdb.org/docs/sql/functions/timestamp#timestamp-functions
https://duckdb.org/docs/sql/functions/timestamptz#icu-timestamp-with-time-zone-functions
|
0.0
|
238300381b53c232a7ad0fd9e5b8b2ceaf563f08
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array_index",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests/dialects/test_duckdb.py::TestDuckDB::test_isinf",
"tests/dialects/test_duckdb.py::TestDuckDB::test_isnan",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_duckdb.py::TestDuckDB::test_timestamps_with_units"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-12 17:57:08+00:00
|
mit
| 6,001 |
|
tobymao__sqlglot-2674
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 42e8c661..fca42d48 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -369,12 +369,35 @@ class Snowflake(Dialect):
return lateral
+ def _parse_at_before(self, table: exp.Table) -> exp.Table:
+ # https://docs.snowflake.com/en/sql-reference/constructs/at-before
+ index = self._index
+ if self._match_texts(("AT", "BEFORE")):
+ this = self._prev.text.upper()
+ kind = (
+ self._match(TokenType.L_PAREN)
+ and self._match_texts(self.HISTORICAL_DATA_KIND)
+ and self._prev.text.upper()
+ )
+ expression = self._match(TokenType.FARROW) and self._parse_bitwise()
+
+ if expression:
+ self._match_r_paren()
+ when = self.expression(
+ exp.HistoricalData, this=this, kind=kind, expression=expression
+ )
+ table.set("when", when)
+ else:
+ self._retreat(index)
+
+ return table
+
def _parse_table_parts(self, schema: bool = False) -> exp.Table:
# https://docs.snowflake.com/en/user-guide/querying-stage
- if self._match_text_seq("@", advance=False):
- table: t.Optional[exp.Expression] = self._parse_location_path()
- elif self._match(TokenType.STRING, advance=False):
+ if self._match(TokenType.STRING, advance=False):
table = self._parse_string()
+ elif self._match_text_seq("@", advance=False):
+ table = self._parse_location_path()
else:
table = None
@@ -393,9 +416,11 @@ class Snowflake(Dialect):
self._match(TokenType.COMMA)
- return self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
+ table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
+ else:
+ table = super()._parse_table_parts(schema=schema)
- return super()._parse_table_parts(schema=schema)
+ return self._parse_at_before(table)
def _parse_id_var(
self,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 19a96df2..6990344e 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1105,14 +1105,7 @@ class Create(DDL):
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_clone_statement
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_copy
class Clone(Expression):
- arg_types = {
- "this": True,
- "when": False,
- "kind": False,
- "shallow": False,
- "expression": False,
- "copy": False,
- }
+ arg_types = {"this": True, "shallow": False, "copy": False}
class Describe(Expression):
@@ -2522,6 +2515,11 @@ class IndexTableHint(Expression):
arg_types = {"this": True, "expressions": False, "target": False}
+# https://docs.snowflake.com/en/sql-reference/constructs/at-before
+class HistoricalData(Expression):
+ arg_types = {"this": True, "kind": True, "expression": True}
+
+
class Table(Expression):
arg_types = {
"this": True,
@@ -2538,6 +2536,7 @@ class Table(Expression):
"pattern": False,
"index": False,
"ordinality": False,
+ "when": False,
}
@property
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index f3f90601..e03462d1 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -862,15 +862,7 @@ class Generator:
this = self.sql(expression, "this")
shallow = "SHALLOW " if expression.args.get("shallow") else ""
keyword = "COPY" if expression.args.get("copy") and self.SUPPORTS_TABLE_COPY else "CLONE"
- this = f"{shallow}{keyword} {this}"
- when = self.sql(expression, "when")
-
- if when:
- kind = self.sql(expression, "kind")
- expr = self.sql(expression, "expression")
- return f"{this} {when} ({kind} => {expr})"
-
- return this
+ return f"{shallow}{keyword} {this}"
def describe_sql(self, expression: exp.Describe) -> str:
return f"DESCRIBE {self.sql(expression, 'this')}"
@@ -1400,6 +1392,12 @@ class Generator:
target = f" FOR {target}" if target else ""
return f"{this}{target} ({self.expressions(expression, flat=True)})"
+ def historicaldata_sql(self, expression: exp.HistoricalData) -> str:
+ this = self.sql(expression, "this")
+ kind = self.sql(expression, "kind")
+ expr = self.sql(expression, "expression")
+ return f"{this} ({kind} => {expr})"
+
def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
table = ".".join(
self.sql(part)
@@ -1436,6 +1434,10 @@ class Generator:
ordinality = f" WITH ORDINALITY{alias}"
alias = ""
+ when = self.sql(expression, "when")
+ if when:
+ table = f"{table} {when}"
+
return f"{table}{version}{file_format}{alias}{index}{hints}{pivots}{joins}{laterals}{ordinality}"
def tablesample_sql(
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 1fae5959..5399b293 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -907,7 +907,7 @@ class Parser(metaclass=_Parser):
INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"}
CLONE_KEYWORDS = {"CLONE", "COPY"}
- CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"}
+ HISTORICAL_DATA_KIND = {"TIMESTAMP", "OFFSET", "STATEMENT", "STREAM"}
OPCLASS_FOLLOW_KEYWORDS = {"ASC", "DESC", "NULLS"}
OPTYPE_FOLLOW_TOKENS = {TokenType.COMMA, TokenType.R_PAREN}
@@ -1411,23 +1411,8 @@ class Parser(metaclass=_Parser):
if self._match_texts(self.CLONE_KEYWORDS):
copy = self._prev.text.lower() == "copy"
- clone = self._parse_table(schema=True)
- when = self._match_texts(("AT", "BEFORE")) and self._prev.text.upper()
- clone_kind = (
- self._match(TokenType.L_PAREN)
- and self._match_texts(self.CLONE_KINDS)
- and self._prev.text.upper()
- )
- clone_expression = self._match(TokenType.FARROW) and self._parse_bitwise()
- self._match(TokenType.R_PAREN)
clone = self.expression(
- exp.Clone,
- this=clone,
- when=when,
- kind=clone_kind,
- shallow=shallow,
- expression=clone_expression,
- copy=copy,
+ exp.Clone, this=self._parse_table(schema=True), shallow=shallow, copy=copy
)
return self.expression(
|
tobymao/sqlglot
|
2ae0debec0b945b0ece250d8e1e29b072b05602a
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 29931323..13f32c13 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -72,6 +72,18 @@ WHERE
self.validate_identity(
'DESCRIBE TABLE "SNOWFLAKE_SAMPLE_DATA"."TPCDS_SF100TCL"."WEB_SITE" type=stage'
)
+ self.validate_identity(
+ "SELECT * FROM foo at",
+ "SELECT * FROM foo AS at",
+ )
+ self.validate_identity(
+ "SELECT * FROM foo before",
+ "SELECT * FROM foo AS before",
+ )
+ self.validate_identity(
+ "SELECT * FROM foo at (col)",
+ "SELECT * FROM foo AS at(col)",
+ )
self.validate_identity(
"SELECT * FROM unnest(x) with ordinality",
"SELECT * FROM TABLE(FLATTEN(INPUT => x)) AS _u(seq, key, path, index, value, this)",
@@ -779,6 +791,53 @@ WHERE
},
)
+ def test_historical_data(self):
+ self.validate_identity("SELECT * FROM my_table AT (STATEMENT => $query_id_var)")
+ self.validate_identity("SELECT * FROM my_table AT (OFFSET => -60 * 5)")
+ self.validate_identity("SELECT * FROM my_table BEFORE (STATEMENT => $query_id_var)")
+ self.validate_identity("SELECT * FROM my_table BEFORE (OFFSET => -60 * 5)")
+ self.validate_identity("CREATE SCHEMA restored_schema CLONE my_schema AT (OFFSET => -3600)")
+ self.validate_identity(
+ "CREATE TABLE restored_table CLONE my_table AT (TIMESTAMP => CAST('Sat, 09 May 2015 01:01:00 +0300' AS TIMESTAMPTZ))",
+ )
+ self.validate_identity(
+ "CREATE DATABASE restored_db CLONE my_db BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726')"
+ )
+ self.validate_identity(
+ "SELECT * FROM my_table AT (TIMESTAMP => TO_TIMESTAMP(1432669154242, 3))"
+ )
+ self.validate_identity(
+ "SELECT * FROM my_table AT (OFFSET => -60 * 5) AS T WHERE T.flag = 'valid'"
+ )
+ self.validate_identity(
+ "SELECT * FROM my_table AT (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726')"
+ )
+ self.validate_identity(
+ "SELECT * FROM my_table BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726')"
+ )
+ self.validate_identity(
+ "SELECT * FROM my_table AT (TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp)",
+ "SELECT * FROM my_table AT (TIMESTAMP => CAST('Fri, 01 May 2015 16:20:00 -0700' AS TIMESTAMPNTZ))",
+ )
+ self.validate_identity(
+ "SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz)",
+ "SELECT * FROM my_table AT (TIMESTAMP => CAST('Fri, 01 May 2015 16:20:00 -0700' AS TIMESTAMPTZ))",
+ )
+ self.validate_identity(
+ "SELECT * FROM my_table BEFORE (TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz);",
+ "SELECT * FROM my_table BEFORE (TIMESTAMP => CAST('Fri, 01 May 2015 16:20:00 -0700' AS TIMESTAMPTZ))",
+ )
+ self.validate_identity(
+ """
+ SELECT oldt.* , newt.*
+ FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt
+ FULL OUTER JOIN my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS newt
+ ON oldt.id = newt.id
+ WHERE oldt.id IS NULL OR newt.id IS NULL;
+ """,
+ "SELECT oldt.*, newt.* FROM my_table BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt FULL OUTER JOIN my_table AT (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS newt ON oldt.id = newt.id WHERE oldt.id IS NULL OR newt.id IS NULL",
+ )
+
def test_ddl(self):
self.validate_identity(
"""create external table et2(
|
Snowflake SELECT ... AT/BEFORE syntax (time travel) does not parse in 20.1.0
In 20.1.0, the SELECT AT/BEFORE (time travel) examples from the documentation raise a ParserError. The CREATE TABLE/SCHEMA/DATABASE examples transpile successfully.
Documentation/references:
* https://docs.snowflake.com/en/user-guide/data-time-travel#querying-historical-data
* https://docs.snowflake.com/en/sql-reference/constructs/at-before
* Issue for time travel for other databases: https://github.com/tobymao/sqlglot/issues/2128
```
examples = [
"SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp);",
"SELECT * FROM my_table AT(TIMESTAMP => TO_TIMESTAMP(1432669154242, 3));",
"SELECT * FROM my_table AT(OFFSET => -60*5) AS T WHERE T.flag = 'valid';",
"SELECT * FROM my_table AT (STATEMENT=>$query_id_var);",
"SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz);",
"SELECT * FROM my_table AT (OFFSET => -60*5);",
"SELECT * FROM my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');",
"SELECT * FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');",
"SELECT * FROM my_table BEFORE(STATEMENT=>$query_id_var);",
"SELECT * FROM my_table BEFORE (TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz);",
"SELECT * FROM my_table BEFORE(OFFSET => -60*5);",
"SELECT * FROM my_table BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');",
"""
SELECT oldt.* , newt.*
FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt
FULL OUTER JOIN my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS newt
ON oldt.id = newt.id
WHERE oldt.id IS NULL OR newt.id IS NULL;
""",
# these work today:
"CREATE TABLE restored_table CLONE my_table AT(TIMESTAMP => 'Sat, 09 May 2015 01:01:00 +0300'::timestamp_tz);",
"CREATE SCHEMA restored_schema CLONE my_schema AT(OFFSET => -3600);",
"CREATE DATABASE restored_db CLONE my_db BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');",
]
import sqlglot
import sqlglot.errors
print(sqlglot.__version__)
for i, s in enumerate(examples):
try:
t = sqlglot.transpile(
s.strip(), read="snowflake", write="snowflake", pretty=True
)
print(i, "okay", t)
except sqlglot.errors.ParseError as e:
print(i, "error", e)
```
Output (expected is all are okay):
```
20.1.0
0 error Expecting ). Line 1, Col: 38.
SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp);
1 error Expecting ). Line 1, Col: 38.
SELECT * FROM my_table AT(TIMESTAMP => TO_TIMESTAMP(1432669154242, 3));
2 error Expecting ). Line 1, Col: 35.
SELECT * FROM my_table AT(OFFSET => -60*5) AS T WHERE T.flag = 'valid';
3 error Expecting ). Line 1, Col: 38.
SELECT * FROM my_table AT (STATEMENT=>$query_id_var);
4 error Expecting ). Line 1, Col: 38.
SELECT * FROM my_table AT(TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz);
5 error Expecting ). Line 1, Col: 36.
SELECT * FROM my_table AT (OFFSET => -60*5);
6 error Expecting ). Line 1, Col: 38.
SELECT * FROM my_table AT(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');
7 error Expecting ). Line 1, Col: 42.
SELECT * FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');
8 error Expecting ). Line 1, Col: 41.
SELECT * FROM my_table BEFORE(STATEMENT=>$query_id_var);
9 error Expecting ). Line 1, Col: 43.
SELECT * FROM my_table BEFORE (TIMESTAMP => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp_tz);
10 error Expecting ). Line 1, Col: 39.
SELECT * FROM my_table BEFORE(OFFSET => -60*5);
11 error Expecting ). Line 1, Col: 43.
SELECT * FROM my_table BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726');
12 error Expecting ). Line 2, Col: 34.
SELECT oldt.* , newt.*
FROM my_table BEFORE(STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726') AS oldt
FULL OUTER JOIN my_table AT(STATEMENT => '8e5d0
13 okay ["CREATE TABLE restored_table CLONE my_table AT (TIMESTAMP => CAST('Sat, 09 May 2015 01:01:00 +0300' AS TIMESTAMPTZ))"]
14 okay ['CREATE SCHEMA restored_schema CLONE my_schema AT (OFFSET => -3600)']
15 okay ["CREATE DATABASE restored_db CLONE my_db BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726')"]
```
|
0.0
|
2ae0debec0b945b0ece250d8e1e29b072b05602a
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-14 01:41:07+00:00
|
mit
| 6,002 |
|
tobymao__sqlglot-2700
|
diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py
index e9aa45db..b7eef451 100644
--- a/sqlglot/dialects/dialect.py
+++ b/sqlglot/dialects/dialect.py
@@ -126,6 +126,7 @@ class _Dialect(type):
klass.BIT_START, klass.BIT_END = get_start_end(TokenType.BIT_STRING)
klass.HEX_START, klass.HEX_END = get_start_end(TokenType.HEX_STRING)
klass.BYTE_START, klass.BYTE_END = get_start_end(TokenType.BYTE_STRING)
+ klass.UNICODE_START, klass.UNICODE_END = get_start_end(TokenType.UNICODE_STRING)
if enum not in ("", "bigquery"):
klass.generator_class.SELECT_KINDS = ()
@@ -240,13 +241,15 @@ class Dialect(metaclass=_Dialect):
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
- # Delimiters for bit, hex and byte literals
+ # Delimiters for bit, hex, byte and unicode literals
BIT_START: t.Optional[str] = None
BIT_END: t.Optional[str] = None
HEX_START: t.Optional[str] = None
HEX_END: t.Optional[str] = None
BYTE_START: t.Optional[str] = None
BYTE_END: t.Optional[str] = None
+ UNICODE_START: t.Optional[str] = None
+ UNICODE_END: t.Optional[str] = None
@classmethod
def get_or_raise(cls, dialect: DialectType) -> Dialect:
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index 88f4f539..5e6d444d 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -222,6 +222,12 @@ class Presto(Dialect):
NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
class Tokenizer(tokens.Tokenizer):
+ UNICODE_STRINGS = [
+ (prefix + q, q)
+ for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES)
+ for prefix in ("U&", "u&")
+ ]
+
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"START": TokenType.BEGIN,
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index fca42d48..36bbcc50 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -33,6 +33,21 @@ def _check_int(s: str) -> bool:
return s.isdigit()
+def _parse_to_array(args: t.List) -> exp.Expression:
+ arg = seq_get(args, 0)
+ if isinstance(arg, exp.Expression):
+ from sqlglot.optimizer.annotate_types import annotate_types
+
+ # https://docs.snowflake.com/en/sql-reference/functions/to_array
+ arg = annotate_types(arg)
+ if arg.is_type(exp.DataType.Type.ARRAY):
+ return arg
+ if arg.is_type(exp.DataType.Type.VARIANT):
+ return exp.Anonymous(this="TO_ARRAY", expressions=[arg])
+
+ return exp.Array.from_arg_list(args)
+
+
# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:
if len(args) == 2:
@@ -293,7 +308,7 @@ class Snowflake(Dialect):
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
"TIMEDIFF": _parse_datediff,
"TIMESTAMPDIFF": _parse_datediff,
- "TO_ARRAY": exp.Array.from_arg_list,
+ "TO_ARRAY": _parse_to_array,
"TO_TIMESTAMP": _parse_to_timestamp,
"TO_VARCHAR": exp.ToChar.from_arg_list,
"ZEROIFNULL": _zeroifnull_to_if,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 6990344e..e2839576 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1206,6 +1206,10 @@ class RawString(Condition):
pass
+class UnicodeString(Condition):
+ arg_types = {"this": True, "escape": False}
+
+
class Column(Condition):
arg_types = {"this": True, "table": False, "db": False, "catalog": False, "join_mark": False}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 665538eb..add02d06 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -915,6 +915,14 @@ class Generator:
return f"{self.dialect.BYTE_START}{this}{self.dialect.BYTE_END}"
return this
+ def unicodestring_sql(self, expression: exp.UnicodeString) -> str:
+ this = self.sql(expression, "this")
+ if self.dialect.UNICODE_START:
+ escape = self.sql(expression, "escape")
+ escape = f" UESCAPE {escape}" if escape else ""
+ return f"{self.dialect.UNICODE_START}{this}{self.dialect.UNICODE_END}{escape}"
+ return this
+
def rawstring_sql(self, expression: exp.RawString) -> str:
string = self.escape_str(expression.this.replace("\\", "\\\\"))
return f"{self.dialect.QUOTE_START}{string}{self.dialect.QUOTE_END}"
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index bee2cff8..c4062e1d 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -635,6 +635,11 @@ class Parser(metaclass=_Parser):
TokenType.HEREDOC_STRING: lambda self, token: self.expression(
exp.RawString, this=token.text
),
+ TokenType.UNICODE_STRING: lambda self, token: self.expression(
+ exp.UnicodeString,
+ this=token.text,
+ escape=self._match_text_seq("UESCAPE") and self._parse_string(),
+ ),
TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
}
@@ -3599,7 +3604,7 @@ class Parser(metaclass=_Parser):
exp.DataType, this=exp.DataType.Type.INTERVAL, expressions=span
)
else:
- this = self.expression(exp.Interval, unit=unit)
+ this = self.expression(exp.DataType, this=self.expression(exp.Interval, unit=unit))
if maybe_func and check_func:
index2 = self._index
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index aaeafb1c..de9d4c4a 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -97,6 +97,7 @@ class TokenType(AutoName):
NATIONAL_STRING = auto()
RAW_STRING = auto()
HEREDOC_STRING = auto()
+ UNICODE_STRING = auto()
# types
BIT = auto()
@@ -450,6 +451,7 @@ class _Tokenizer(type):
**_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS),
**_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS),
**_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS),
+ **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS),
}
klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
@@ -557,6 +559,7 @@ class Tokenizer(metaclass=_Tokenizer):
HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = []
IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
IDENTIFIER_ESCAPES = ['"']
QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
|
tobymao/sqlglot
|
0d6831aecfee75dcb4ed74dab37b7dd2b304c6f6
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 0332ae1a..91556960 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -8,6 +8,11 @@ class TestPostgres(Validator):
dialect = "postgres"
def test_ddl(self):
+ expr = parse_one("CREATE TABLE t (x INTERVAL day)", read="postgres")
+ cdef = expr.find(exp.ColumnDef)
+ cdef.args["kind"].assert_is(exp.DataType)
+ self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL day)")
+
self.validate_identity("CREATE INDEX idx_x ON x USING BTREE(x, y) WHERE (NOT y IS NULL)")
self.validate_identity("CREATE TABLE test (elems JSONB[])")
self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)")
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index ad85ddc6..a9d88cb0 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -544,26 +544,18 @@ class TestPresto(Validator):
},
)
- def test_presto(self):
- self.validate_identity("string_agg(x, ',')", "ARRAY_JOIN(ARRAY_AGG(x), ',')")
- self.validate_identity(
- "SELECT * FROM example.testdb.customer_orders FOR VERSION AS OF 8954597067493422955"
- )
- self.validate_identity(
- "SELECT * FROM example.testdb.customer_orders FOR TIMESTAMP AS OF CAST('2022-03-23 09:59:29.803 Europe/Vienna' AS TIMESTAMP)"
- )
-
- self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1")
- self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY")
- self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)")
- self.validate_identity("SELECT * FROM (VALUES (1))")
- self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE")
- self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ")
- self.validate_identity("APPROX_PERCENTILE(a, b, c, d)")
- self.validate_identity(
- "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))"
- )
+ def test_unicode_string(self):
+ for prefix in ("u&", "U&"):
+ self.validate_identity(
+ f"{prefix}'Hello winter \\2603 !'",
+ "U&'Hello winter \\2603 !'",
+ )
+ self.validate_identity(
+ f"{prefix}'Hello winter #2603 !' UESCAPE '#'",
+ "U&'Hello winter #2603 !' UESCAPE '#'",
+ )
+ def test_presto(self):
with self.assertLogs(helper_logger) as cm:
self.validate_all(
"SELECT COALESCE(ELEMENT_AT(MAP_FROM_ENTRIES(ARRAY[(51, '1')]), id), quantity) FROM my_table",
@@ -582,6 +574,24 @@ class TestPresto(Validator):
},
)
+ self.validate_identity("string_agg(x, ',')", "ARRAY_JOIN(ARRAY_AGG(x), ',')")
+ self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1")
+ self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY")
+ self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)")
+ self.validate_identity("SELECT * FROM (VALUES (1))")
+ self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE")
+ self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ")
+ self.validate_identity("APPROX_PERCENTILE(a, b, c, d)")
+ self.validate_identity(
+ "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))"
+ )
+ self.validate_identity(
+ "SELECT * FROM example.testdb.customer_orders FOR VERSION AS OF 8954597067493422955"
+ )
+ self.validate_identity(
+ "SELECT * FROM example.testdb.customer_orders FOR TIMESTAMP AS OF CAST('2022-03-23 09:59:29.803 Europe/Vienna' AS TIMESTAMP)"
+ )
+
self.validate_all(
"SELECT MAX_BY(a.id, a.timestamp) FROM a",
read={
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 13f32c13..4c2d7ca7 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -127,11 +127,45 @@ WHERE
"SELECT TO_TIMESTAMP(x) FROM t",
"SELECT CAST(x AS TIMESTAMPNTZ) FROM t",
)
+ self.validate_identity(
+ "CAST(x AS BYTEINT)",
+ "CAST(x AS INT)",
+ )
+ self.validate_identity(
+ "CAST(x AS CHAR VARYING)",
+ "CAST(x AS VARCHAR)",
+ )
+ self.validate_identity(
+ "CAST(x AS CHARACTER VARYING)",
+ "CAST(x AS VARCHAR)",
+ )
+ self.validate_identity(
+ "CAST(x AS NCHAR VARYING)",
+ "CAST(x AS VARCHAR)",
+ )
+ self.validate_identity(
+ "SELECT TO_ARRAY(x::ARRAY)",
+ "SELECT CAST(x AS ARRAY)",
+ )
+ self.validate_identity(
+ "SELECT TO_ARRAY(['test']::VARIANT)",
+ "SELECT TO_ARRAY(CAST(['test'] AS VARIANT))",
+ )
- self.validate_all("CAST(x AS BYTEINT)", write={"snowflake": "CAST(x AS INT)"})
- self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
- self.validate_all("CAST(x AS CHARACTER VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
- self.validate_all("CAST(x AS NCHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
+ self.validate_all(
+ "SELECT TO_ARRAY(['test'])",
+ write={
+ "snowflake": "SELECT ['test']",
+ "spark": "SELECT ARRAY('test')",
+ },
+ )
+ self.validate_all(
+ "SELECT TO_ARRAY(['test'])",
+ write={
+ "snowflake": "SELECT ['test']",
+ "spark": "SELECT ARRAY('test')",
+ },
+ )
self.validate_all(
# We need to qualify the columns in this query because "value" would be ambiguous
'WITH t(x, "value") AS (SELECT [1, 2, 3], 1) SELECT IFF(_u.pos = _u_2.pos_2, _u_2."value", NULL) AS "value" FROM t, TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (GREATEST(ARRAY_SIZE(t.x)) - 1) + 1))) AS _u(seq, key, path, index, pos, this) CROSS JOIN TABLE(FLATTEN(INPUT => t.x)) AS _u_2(seq, key, path, pos_2, "value", this) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > (ARRAY_SIZE(t.x) - 1) AND _u_2.pos_2 = (ARRAY_SIZE(t.x) - 1))',
|
Snowflake to_array implemented incorrectly
The docs for snowflake implementation of [to_array](https://docs.snowflake.com/en/sql-reference/functions/to_array) state
> If the input is an ARRAY, or VARIANT containing an array value, the result is unchanged.
> For NULL or a JSON null input, returns NULL.
> For any other value, the result is a single-element array containing this value.
As we can see below, the input as an array, the result is an array of arrays which is not unchanged.
```
>>> parse_one("select to_array(['test']) from test;", dialect="snowflake").sql(pretty=True)
"SELECT\n ARRAY(ARRAY('test'))\nFROM test"
```
```
sqlglot==20.2.0
```
|
0.0
|
0d6831aecfee75dcb4ed74dab37b7dd2b304c6f6
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_unicode_string",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance",
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular",
"tests/dialects/test_presto.py::TestPresto::test_json",
"tests/dialects/test_presto.py::TestPresto::test_match_recognize",
"tests/dialects/test_presto.py::TestPresto::test_presto",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_presto.py::TestPresto::test_to_char",
"tests/dialects/test_presto.py::TestPresto::test_unnest",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-18 13:00:25+00:00
|
mit
| 6,003 |
|
tobymao__sqlglot-2701
|
diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py
index e9aa45db..b7eef451 100644
--- a/sqlglot/dialects/dialect.py
+++ b/sqlglot/dialects/dialect.py
@@ -126,6 +126,7 @@ class _Dialect(type):
klass.BIT_START, klass.BIT_END = get_start_end(TokenType.BIT_STRING)
klass.HEX_START, klass.HEX_END = get_start_end(TokenType.HEX_STRING)
klass.BYTE_START, klass.BYTE_END = get_start_end(TokenType.BYTE_STRING)
+ klass.UNICODE_START, klass.UNICODE_END = get_start_end(TokenType.UNICODE_STRING)
if enum not in ("", "bigquery"):
klass.generator_class.SELECT_KINDS = ()
@@ -240,13 +241,15 @@ class Dialect(metaclass=_Dialect):
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
- # Delimiters for bit, hex and byte literals
+ # Delimiters for bit, hex, byte and unicode literals
BIT_START: t.Optional[str] = None
BIT_END: t.Optional[str] = None
HEX_START: t.Optional[str] = None
HEX_END: t.Optional[str] = None
BYTE_START: t.Optional[str] = None
BYTE_END: t.Optional[str] = None
+ UNICODE_START: t.Optional[str] = None
+ UNICODE_END: t.Optional[str] = None
@classmethod
def get_or_raise(cls, dialect: DialectType) -> Dialect:
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index 88f4f539..5e6d444d 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -222,6 +222,12 @@ class Presto(Dialect):
NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
class Tokenizer(tokens.Tokenizer):
+ UNICODE_STRINGS = [
+ (prefix + q, q)
+ for q in t.cast(t.List[str], tokens.Tokenizer.QUOTES)
+ for prefix in ("U&", "u&")
+ ]
+
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"START": TokenType.BEGIN,
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index fca42d48..36bbcc50 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -33,6 +33,21 @@ def _check_int(s: str) -> bool:
return s.isdigit()
+def _parse_to_array(args: t.List) -> exp.Expression:
+ arg = seq_get(args, 0)
+ if isinstance(arg, exp.Expression):
+ from sqlglot.optimizer.annotate_types import annotate_types
+
+ # https://docs.snowflake.com/en/sql-reference/functions/to_array
+ arg = annotate_types(arg)
+ if arg.is_type(exp.DataType.Type.ARRAY):
+ return arg
+ if arg.is_type(exp.DataType.Type.VARIANT):
+ return exp.Anonymous(this="TO_ARRAY", expressions=[arg])
+
+ return exp.Array.from_arg_list(args)
+
+
# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:
if len(args) == 2:
@@ -293,7 +308,7 @@ class Snowflake(Dialect):
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
"TIMEDIFF": _parse_datediff,
"TIMESTAMPDIFF": _parse_datediff,
- "TO_ARRAY": exp.Array.from_arg_list,
+ "TO_ARRAY": _parse_to_array,
"TO_TIMESTAMP": _parse_to_timestamp,
"TO_VARCHAR": exp.ToChar.from_arg_list,
"ZEROIFNULL": _zeroifnull_to_if,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 6990344e..6179b0c7 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1206,6 +1206,10 @@ class RawString(Condition):
pass
+class UnicodeString(Condition):
+ arg_types = {"this": True, "escape": False}
+
+
class Column(Condition):
arg_types = {"this": True, "table": False, "db": False, "catalog": False, "join_mark": False}
@@ -1960,7 +1964,12 @@ class Offset(Expression):
class Order(Expression):
- arg_types = {"this": False, "expressions": True}
+ arg_types = {"this": False, "expressions": True, "interpolate": False}
+
+
+# https://clickhouse.com/docs/en/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier
+class WithFill(Expression):
+ arg_types = {"from": False, "to": False, "step": False}
# hive specific sorts
@@ -1978,7 +1987,7 @@ class Sort(Order):
class Ordered(Expression):
- arg_types = {"this": True, "desc": False, "nulls_first": True}
+ arg_types = {"this": True, "desc": False, "nulls_first": True, "with_fill": False}
class Property(Expression):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 665538eb..0aac498d 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -915,6 +915,14 @@ class Generator:
return f"{self.dialect.BYTE_START}{this}{self.dialect.BYTE_END}"
return this
+ def unicodestring_sql(self, expression: exp.UnicodeString) -> str:
+ this = self.sql(expression, "this")
+ if self.dialect.UNICODE_START:
+ escape = self.sql(expression, "escape")
+ escape = f" UESCAPE {escape}" if escape else ""
+ return f"{self.dialect.UNICODE_START}{this}{self.dialect.UNICODE_END}{escape}"
+ return this
+
def rawstring_sql(self, expression: exp.RawString) -> str:
string = self.escape_str(expression.this.replace("\\", "\\\\"))
return f"{self.dialect.QUOTE_START}{string}{self.dialect.QUOTE_END}"
@@ -1786,7 +1794,24 @@ class Generator:
def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
this = self.sql(expression, "this")
this = f"{this} " if this else this
- return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat) # type: ignore
+ order = self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat) # type: ignore
+ interpolated_values = [
+ f"{self.sql(named_expression, 'alias')} AS {self.sql(named_expression, 'this')}"
+ for named_expression in expression.args.get("interpolate") or []
+ ]
+ interpolate = (
+ f" INTERPOLATE ({', '.join(interpolated_values)})" if interpolated_values else ""
+ )
+ return f"{order}{interpolate}"
+
+ def withfill_sql(self, expression: exp.WithFill) -> str:
+ from_sql = self.sql(expression, "from")
+ from_sql = f" FROM {from_sql}" if from_sql else ""
+ to_sql = self.sql(expression, "to")
+ to_sql = f" TO {to_sql}" if to_sql else ""
+ step_sql = self.sql(expression, "step")
+ step_sql = f" STEP {step_sql}" if step_sql else ""
+ return f"WITH FILL{from_sql}{to_sql}{step_sql}"
def cluster_sql(self, expression: exp.Cluster) -> str:
return self.op_expressions("CLUSTER BY", expression)
@@ -1828,7 +1853,10 @@ class Generator:
this = f"CASE WHEN {this} IS NULL THEN 1 ELSE 0 END{null_sort_order}, {this}"
nulls_sort_change = ""
- return f"{this}{sort_order}{nulls_sort_change}"
+ with_fill = self.sql(expression, "with_fill")
+ with_fill = f" {with_fill}" if with_fill else ""
+
+ return f"{this}{sort_order}{nulls_sort_change}{with_fill}"
def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
partition = self.partition_by_sql(expression)
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index bee2cff8..e9e9cc56 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -635,6 +635,11 @@ class Parser(metaclass=_Parser):
TokenType.HEREDOC_STRING: lambda self, token: self.expression(
exp.RawString, this=token.text
),
+ TokenType.UNICODE_STRING: lambda self, token: self.expression(
+ exp.UnicodeString,
+ this=token.text,
+ escape=self._match_text_seq("UESCAPE") and self._parse_string(),
+ ),
TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
}
@@ -2463,13 +2468,7 @@ class Parser(metaclass=_Parser):
pattern = None
define = (
- self._parse_csv(
- lambda: self.expression(
- exp.Alias,
- alias=self._parse_id_var(any_token=True),
- this=self._match(TokenType.ALIAS) and self._parse_conjunction(),
- )
- )
+ self._parse_csv(self._parse_name_as_expression)
if self._match_text_seq("DEFINE")
else None
)
@@ -3116,6 +3115,18 @@ class Parser(metaclass=_Parser):
return self.expression(exp.Connect, start=start, connect=connect)
+ def _parse_name_as_expression(self) -> exp.Alias:
+ return self.expression(
+ exp.Alias,
+ alias=self._parse_id_var(any_token=True),
+ this=self._match(TokenType.ALIAS) and self._parse_conjunction(),
+ )
+
+ def _parse_interpolate(self) -> t.Optional[t.List[exp.Expression]]:
+ if self._match_text_seq("INTERPOLATE"):
+ return self._parse_wrapped_csv(self._parse_name_as_expression)
+ return None
+
def _parse_order(
self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False
) -> t.Optional[exp.Expression]:
@@ -3123,7 +3134,10 @@ class Parser(metaclass=_Parser):
return this
return self.expression(
- exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered)
+ exp.Order,
+ this=this,
+ expressions=self._parse_csv(self._parse_ordered),
+ interpolate=self._parse_interpolate(),
)
def _parse_sort(self, exp_class: t.Type[E], token: TokenType) -> t.Optional[E]:
@@ -3153,7 +3167,21 @@ class Parser(metaclass=_Parser):
):
nulls_first = True
- return self.expression(exp.Ordered, this=this, desc=desc, nulls_first=nulls_first)
+ if self._match_text_seq("WITH", "FILL"):
+ with_fill = self.expression(
+ exp.WithFill,
+ **{ # type: ignore
+ "from": self._match(TokenType.FROM) and self._parse_bitwise(),
+ "to": self._match_text_seq("TO") and self._parse_bitwise(),
+ "step": self._match_text_seq("STEP") and self._parse_bitwise(),
+ },
+ )
+ else:
+ with_fill = None
+
+ return self.expression(
+ exp.Ordered, this=this, desc=desc, nulls_first=nulls_first, with_fill=with_fill
+ )
def _parse_limit(
self, this: t.Optional[exp.Expression] = None, top: bool = False
@@ -3599,7 +3627,7 @@ class Parser(metaclass=_Parser):
exp.DataType, this=exp.DataType.Type.INTERVAL, expressions=span
)
else:
- this = self.expression(exp.Interval, unit=unit)
+ this = self.expression(exp.DataType, this=self.expression(exp.Interval, unit=unit))
if maybe_func and check_func:
index2 = self._index
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index aaeafb1c..de9d4c4a 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -97,6 +97,7 @@ class TokenType(AutoName):
NATIONAL_STRING = auto()
RAW_STRING = auto()
HEREDOC_STRING = auto()
+ UNICODE_STRING = auto()
# types
BIT = auto()
@@ -450,6 +451,7 @@ class _Tokenizer(type):
**_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS),
**_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS),
**_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS),
+ **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS),
}
klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
@@ -557,6 +559,7 @@ class Tokenizer(metaclass=_Tokenizer):
HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = []
IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
IDENTIFIER_ESCAPES = ['"']
QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
|
tobymao/sqlglot
|
0d6831aecfee75dcb4ed74dab37b7dd2b304c6f6
|
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index e56bdabb..1f528b62 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -70,6 +70,18 @@ class TestClickhouse(Validator):
self.validate_identity("CAST(x AS DATETIME)")
self.validate_identity("CAST(x as MEDIUMINT)", "CAST(x AS Int32)")
self.validate_identity("SELECT arrayJoin([1, 2, 3] AS src) AS dst, 'Hello', src")
+ self.validate_identity(
+ "SELECT n, source FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL"
+ )
+ self.validate_identity(
+ "SELECT n, source FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5"
+ )
+ self.validate_identity(
+ "SELECT toDate((number * 10) * 86400) AS d1, toDate(number * 86400) AS d2, 'original' AS source FROM numbers(10) WHERE (number % 3) = 1 ORDER BY d2 WITH FILL, d1 WITH FILL STEP 5"
+ )
+ self.validate_identity(
+ "SELECT n, source, inter FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 INTERPOLATE (inter AS inter + 1)"
+ )
self.validate_identity(
"SELECT SUM(1) AS impressions, arrayJoin(cities) AS city, arrayJoin(browsers) AS browser FROM (SELECT ['Istanbul', 'Berlin', 'Bobruisk'] AS cities, ['Firefox', 'Chrome', 'Chrome'] AS browsers) GROUP BY 2, 3"
)
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 0332ae1a..91556960 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -8,6 +8,11 @@ class TestPostgres(Validator):
dialect = "postgres"
def test_ddl(self):
+ expr = parse_one("CREATE TABLE t (x INTERVAL day)", read="postgres")
+ cdef = expr.find(exp.ColumnDef)
+ cdef.args["kind"].assert_is(exp.DataType)
+ self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL day)")
+
self.validate_identity("CREATE INDEX idx_x ON x USING BTREE(x, y) WHERE (NOT y IS NULL)")
self.validate_identity("CREATE TABLE test (elems JSONB[])")
self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)")
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index ad85ddc6..a9d88cb0 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -544,26 +544,18 @@ class TestPresto(Validator):
},
)
- def test_presto(self):
- self.validate_identity("string_agg(x, ',')", "ARRAY_JOIN(ARRAY_AGG(x), ',')")
- self.validate_identity(
- "SELECT * FROM example.testdb.customer_orders FOR VERSION AS OF 8954597067493422955"
- )
- self.validate_identity(
- "SELECT * FROM example.testdb.customer_orders FOR TIMESTAMP AS OF CAST('2022-03-23 09:59:29.803 Europe/Vienna' AS TIMESTAMP)"
- )
-
- self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1")
- self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY")
- self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)")
- self.validate_identity("SELECT * FROM (VALUES (1))")
- self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE")
- self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ")
- self.validate_identity("APPROX_PERCENTILE(a, b, c, d)")
- self.validate_identity(
- "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))"
- )
+ def test_unicode_string(self):
+ for prefix in ("u&", "U&"):
+ self.validate_identity(
+ f"{prefix}'Hello winter \\2603 !'",
+ "U&'Hello winter \\2603 !'",
+ )
+ self.validate_identity(
+ f"{prefix}'Hello winter #2603 !' UESCAPE '#'",
+ "U&'Hello winter #2603 !' UESCAPE '#'",
+ )
+ def test_presto(self):
with self.assertLogs(helper_logger) as cm:
self.validate_all(
"SELECT COALESCE(ELEMENT_AT(MAP_FROM_ENTRIES(ARRAY[(51, '1')]), id), quantity) FROM my_table",
@@ -582,6 +574,24 @@ class TestPresto(Validator):
},
)
+ self.validate_identity("string_agg(x, ',')", "ARRAY_JOIN(ARRAY_AGG(x), ',')")
+ self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1")
+ self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY")
+ self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)")
+ self.validate_identity("SELECT * FROM (VALUES (1))")
+ self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE")
+ self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ")
+ self.validate_identity("APPROX_PERCENTILE(a, b, c, d)")
+ self.validate_identity(
+ "SELECT SPLIT_TO_MAP('a:1;b:2;a:3', ';', ':', (k, v1, v2) -> CONCAT(v1, v2))"
+ )
+ self.validate_identity(
+ "SELECT * FROM example.testdb.customer_orders FOR VERSION AS OF 8954597067493422955"
+ )
+ self.validate_identity(
+ "SELECT * FROM example.testdb.customer_orders FOR TIMESTAMP AS OF CAST('2022-03-23 09:59:29.803 Europe/Vienna' AS TIMESTAMP)"
+ )
+
self.validate_all(
"SELECT MAX_BY(a.id, a.timestamp) FROM a",
read={
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 13f32c13..4c2d7ca7 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -127,11 +127,45 @@ WHERE
"SELECT TO_TIMESTAMP(x) FROM t",
"SELECT CAST(x AS TIMESTAMPNTZ) FROM t",
)
+ self.validate_identity(
+ "CAST(x AS BYTEINT)",
+ "CAST(x AS INT)",
+ )
+ self.validate_identity(
+ "CAST(x AS CHAR VARYING)",
+ "CAST(x AS VARCHAR)",
+ )
+ self.validate_identity(
+ "CAST(x AS CHARACTER VARYING)",
+ "CAST(x AS VARCHAR)",
+ )
+ self.validate_identity(
+ "CAST(x AS NCHAR VARYING)",
+ "CAST(x AS VARCHAR)",
+ )
+ self.validate_identity(
+ "SELECT TO_ARRAY(x::ARRAY)",
+ "SELECT CAST(x AS ARRAY)",
+ )
+ self.validate_identity(
+ "SELECT TO_ARRAY(['test']::VARIANT)",
+ "SELECT TO_ARRAY(CAST(['test'] AS VARIANT))",
+ )
- self.validate_all("CAST(x AS BYTEINT)", write={"snowflake": "CAST(x AS INT)"})
- self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
- self.validate_all("CAST(x AS CHARACTER VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
- self.validate_all("CAST(x AS NCHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
+ self.validate_all(
+ "SELECT TO_ARRAY(['test'])",
+ write={
+ "snowflake": "SELECT ['test']",
+ "spark": "SELECT ARRAY('test')",
+ },
+ )
+ self.validate_all(
+ "SELECT TO_ARRAY(['test'])",
+ write={
+ "snowflake": "SELECT ['test']",
+ "spark": "SELECT ARRAY('test')",
+ },
+ )
self.validate_all(
# We need to qualify the columns in this query because "value" would be ambiguous
'WITH t(x, "value") AS (SELECT [1, 2, 3], 1) SELECT IFF(_u.pos = _u_2.pos_2, _u_2."value", NULL) AS "value" FROM t, TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (GREATEST(ARRAY_SIZE(t.x)) - 1) + 1))) AS _u(seq, key, path, index, pos, this) CROSS JOIN TABLE(FLATTEN(INPUT => t.x)) AS _u_2(seq, key, path, pos_2, "value", this) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > (ARRAY_SIZE(t.x) - 1) AND _u_2.pos_2 = (ARRAY_SIZE(t.x) - 1))',
|
Clickhouse: ORDER BY WITH FILL not working
Hey sqlglot team,
I am trying to integrate SQLglot further in one of my recent open source libraries [SQL Mock](https://github.com/DeepLcom/sql-mock). It is already used for some functionality but I want to extend its usage.
While testing locally, I identified that the Clickhouse `ORDER BY WITH FILL` [syntax](https://clickhouse.com/docs/en/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier) generates a parsing error:
**Fully reproducible code snippet**
```python
import sqlglot
query = """
SELECT
toLastDayOfMonth(date) AS observation_month
FROM (
SELECT
toStartOfMonth(toDate('2023-01-01') - INTERVAL 1 MONTH) AS date
ORDER BY
date WITH FILL
FROM toDate('2021-01-01')
STEP INTERVAL 1 MONTH
)
"""
ast = sqlglot.parse_one(query, dialect='clickhouse')
```
Will result in
```bash
ParseError: Expecting ). Line 8, Col: 10.
start date we care about and fill the rest from the period_start
ORDER BY
date WITH FILL
FROM toDate('2021-01-01') -- First day we will report on
STEP
```
**Official Documentation**
* [ORDER BY Expr WITH FILL Modifier Documentation](https://clickhouse.com/docs/en/sql-reference/statements/select/order-by#order-by-expr-with-fill-modifier)
|
0.0
|
0d6831aecfee75dcb4ed74dab37b7dd2b304c6f6
|
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_unicode_string",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary",
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance",
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular",
"tests/dialects/test_presto.py::TestPresto::test_json",
"tests/dialects/test_presto.py::TestPresto::test_match_recognize",
"tests/dialects/test_presto.py::TestPresto::test_presto",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_presto.py::TestPresto::test_to_char",
"tests/dialects/test_presto.py::TestPresto::test_unnest",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-18 15:11:32+00:00
|
mit
| 6,004 |
|
tobymao__sqlglot-2709
|
diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml
index b92925dc..fd418b6f 100644
--- a/.github/workflows/python-publish.yml
+++ b/.github/workflows/python-publish.yml
@@ -80,7 +80,6 @@ jobs:
with:
command: upload
args: --non-interactive --skip-existing *
- working-directory: ./sqlglot/sqlglotrs
deploy:
runs-on: ubuntu-latest
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index c571e8fb..b0e83d21 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import logging
+import re
import typing as t
from collections import defaultdict
from functools import reduce
@@ -17,6 +18,8 @@ if t.TYPE_CHECKING:
logger = logging.getLogger("sqlglot")
+ESCAPED_UNICODE_RE = re.compile(r"\\(\d+)")
+
class Generator:
"""
@@ -917,11 +920,19 @@ class Generator:
def unicodestring_sql(self, expression: exp.UnicodeString) -> str:
this = self.sql(expression, "this")
+ escape = expression.args.get("escape")
+
if self.dialect.UNICODE_START:
- escape = self.sql(expression, "escape")
- escape = f" UESCAPE {escape}" if escape else ""
+ escape = f" UESCAPE {self.sql(escape)}" if escape else ""
return f"{self.dialect.UNICODE_START}{this}{self.dialect.UNICODE_END}{escape}"
- return this
+
+ if escape:
+ pattern = re.compile(rf"{escape.name}(\d+)")
+ else:
+ pattern = ESCAPED_UNICODE_RE
+
+ this = pattern.sub(r"\\u\1", this)
+ return f"{self.dialect.QUOTE_START}{this}{self.dialect.QUOTE_END}"
def rawstring_sql(self, expression: exp.RawString) -> str:
string = self.escape_str(expression.this.replace("\\", "\\\\"))
|
tobymao/sqlglot
|
1c95b1e6fcd3c1de534266b379058a1bad85c29e
|
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index 97a387c6..8b5080c9 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -546,13 +546,21 @@ class TestPresto(Validator):
def test_unicode_string(self):
for prefix in ("u&", "U&"):
- self.validate_identity(
+ self.validate_all(
f"{prefix}'Hello winter \\2603 !'",
- "U&'Hello winter \\2603 !'",
+ write={
+ "presto": "U&'Hello winter \\2603 !'",
+ "snowflake": "'Hello winter \\u2603 !'",
+ "spark": "'Hello winter \\u2603 !'",
+ },
)
- self.validate_identity(
+ self.validate_all(
f"{prefix}'Hello winter #2603 !' UESCAPE '#'",
- "U&'Hello winter #2603 !' UESCAPE '#'",
+ write={
+ "presto": "U&'Hello winter #2603 !' UESCAPE '#'",
+ "snowflake": "'Hello winter \\u2603 !'",
+ "spark": "'Hello winter \\u2603 !'",
+ },
)
def test_presto(self):
|
Unicode character escape is not correctly converted from Trino
Trino uses a `U&` prefix to indicate that a string has unicode characters escaped with a `\`.
https://trino.io/docs/current/language/types.html#varchar
SparkSQL don't use a prefix but escapes unicode characters with a `\u`.
https://spark.apache.org/docs/latest/sql-ref-literals.html#parameters
**Fully reproducible code snippet**
```
in_sql = '''select U&'n\00e3o' as no'''
out_sql = sqlglot.transpile(in_sql, read='trino', write='spark', pretty=True)[0]
print(out_sql)
```
** Output **
```
SELECT
U & 'ne3o' AS no
```
** Expected Output **
```
SELECT
'n\u00e3o' AS no
```
|
0.0
|
1c95b1e6fcd3c1de534266b379058a1bad85c29e
|
[
"tests/dialects/test_presto.py::TestPresto::test_unicode_string"
] |
[
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular",
"tests/dialects/test_presto.py::TestPresto::test_json",
"tests/dialects/test_presto.py::TestPresto::test_match_recognize",
"tests/dialects/test_presto.py::TestPresto::test_presto",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_presto.py::TestPresto::test_to_char",
"tests/dialects/test_presto.py::TestPresto::test_unnest"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-19 19:05:11+00:00
|
mit
| 6,005 |
|
tobymao__sqlglot-2739
|
diff --git a/sqlglot/transforms.py b/sqlglot/transforms.py
index 03acc2b2..0da65b51 100644
--- a/sqlglot/transforms.py
+++ b/sqlglot/transforms.py
@@ -255,7 +255,7 @@ def explode_to_unnest(index_offset: int = 0) -> t.Callable[[exp.Expression], exp
if not arrays:
if expression.args.get("from"):
- expression.join(series, copy=False)
+ expression.join(series, copy=False, join_type="CROSS")
else:
expression.from_(series, copy=False)
|
tobymao/sqlglot
|
1ebfb3688975e420a70bac10c49ad127446c4c65
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 191f24d8..81219886 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -368,7 +368,7 @@ class TestBigQuery(Validator):
},
)
self.validate_all(
- "WITH cte AS (SELECT [1, 2, 3] AS arr) SELECT IF(pos = pos_2, col, NULL) AS col FROM cte, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(arr)) - 1)) AS pos CROSS JOIN UNNEST(arr) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(arr) - 1) AND pos_2 = (ARRAY_LENGTH(arr) - 1))",
+ "WITH cte AS (SELECT [1, 2, 3] AS arr) SELECT IF(pos = pos_2, col, NULL) AS col FROM cte CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(arr)) - 1)) AS pos CROSS JOIN UNNEST(arr) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(arr) - 1) AND pos_2 = (ARRAY_LENGTH(arr) - 1))",
read={
"spark": "WITH cte AS (SELECT ARRAY(1, 2, 3) AS arr) SELECT EXPLODE(arr) FROM cte"
},
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 543739a1..021349b1 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -103,8 +103,8 @@ class TestDuckDB(Validator):
self.validate_all(
"SELECT UNNEST(ARRAY[1, 2, 3]), UNNEST(ARRAY[4, 5]), UNNEST(ARRAY[6]) FROM x",
write={
- "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_3, col_2, NULL) AS col_2, IF(pos = pos_4, col_3, NULL) AS col_3 FROM x, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH([1, 2, 3]), ARRAY_LENGTH([4, 5]), ARRAY_LENGTH([6])) - 1)) AS pos CROSS JOIN UNNEST([1, 2, 3]) AS col WITH OFFSET AS pos_2 CROSS JOIN UNNEST([4, 5]) AS col_2 WITH OFFSET AS pos_3 CROSS JOIN UNNEST([6]) AS col_3 WITH OFFSET AS pos_4 WHERE ((pos = pos_2 OR (pos > (ARRAY_LENGTH([1, 2, 3]) - 1) AND pos_2 = (ARRAY_LENGTH([1, 2, 3]) - 1))) AND (pos = pos_3 OR (pos > (ARRAY_LENGTH([4, 5]) - 1) AND pos_3 = (ARRAY_LENGTH([4, 5]) - 1)))) AND (pos = pos_4 OR (pos > (ARRAY_LENGTH([6]) - 1) AND pos_4 = (ARRAY_LENGTH([6]) - 1)))",
- "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col, IF(_u.pos = _u_3.pos_3, _u_3.col_2) AS col_2, IF(_u.pos = _u_4.pos_4, _u_4.col_3) AS col_3 FROM x, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[1, 2, 3]), CARDINALITY(ARRAY[4, 5]), CARDINALITY(ARRAY[6])))) AS _u(pos) CROSS JOIN UNNEST(ARRAY[1, 2, 3]) WITH ORDINALITY AS _u_2(col, pos_2) CROSS JOIN UNNEST(ARRAY[4, 5]) WITH ORDINALITY AS _u_3(col_2, pos_3) CROSS JOIN UNNEST(ARRAY[6]) WITH ORDINALITY AS _u_4(col_3, pos_4) WHERE ((_u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(ARRAY[1, 2, 3]) AND _u_2.pos_2 = CARDINALITY(ARRAY[1, 2, 3]))) AND (_u.pos = _u_3.pos_3 OR (_u.pos > CARDINALITY(ARRAY[4, 5]) AND _u_3.pos_3 = CARDINALITY(ARRAY[4, 5])))) AND (_u.pos = _u_4.pos_4 OR (_u.pos > CARDINALITY(ARRAY[6]) AND _u_4.pos_4 = CARDINALITY(ARRAY[6])))",
+ "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_3, col_2, NULL) AS col_2, IF(pos = pos_4, col_3, NULL) AS col_3 FROM x CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH([1, 2, 3]), ARRAY_LENGTH([4, 5]), ARRAY_LENGTH([6])) - 1)) AS pos CROSS JOIN UNNEST([1, 2, 3]) AS col WITH OFFSET AS pos_2 CROSS JOIN UNNEST([4, 5]) AS col_2 WITH OFFSET AS pos_3 CROSS JOIN UNNEST([6]) AS col_3 WITH OFFSET AS pos_4 WHERE ((pos = pos_2 OR (pos > (ARRAY_LENGTH([1, 2, 3]) - 1) AND pos_2 = (ARRAY_LENGTH([1, 2, 3]) - 1))) AND (pos = pos_3 OR (pos > (ARRAY_LENGTH([4, 5]) - 1) AND pos_3 = (ARRAY_LENGTH([4, 5]) - 1)))) AND (pos = pos_4 OR (pos > (ARRAY_LENGTH([6]) - 1) AND pos_4 = (ARRAY_LENGTH([6]) - 1)))",
+ "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col, IF(_u.pos = _u_3.pos_3, _u_3.col_2) AS col_2, IF(_u.pos = _u_4.pos_4, _u_4.col_3) AS col_3 FROM x CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[1, 2, 3]), CARDINALITY(ARRAY[4, 5]), CARDINALITY(ARRAY[6])))) AS _u(pos) CROSS JOIN UNNEST(ARRAY[1, 2, 3]) WITH ORDINALITY AS _u_2(col, pos_2) CROSS JOIN UNNEST(ARRAY[4, 5]) WITH ORDINALITY AS _u_3(col_2, pos_3) CROSS JOIN UNNEST(ARRAY[6]) WITH ORDINALITY AS _u_4(col_3, pos_4) WHERE ((_u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(ARRAY[1, 2, 3]) AND _u_2.pos_2 = CARDINALITY(ARRAY[1, 2, 3]))) AND (_u.pos = _u_3.pos_3 OR (_u.pos > CARDINALITY(ARRAY[4, 5]) AND _u_3.pos_3 = CARDINALITY(ARRAY[4, 5])))) AND (_u.pos = _u_4.pos_4 OR (_u.pos > CARDINALITY(ARRAY[6]) AND _u_4.pos_4 = CARDINALITY(ARRAY[6])))",
},
)
self.validate_all(
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index ed5823b2..882b7f04 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -158,7 +158,7 @@ class TestPostgres(Validator):
write={
"hive": "SELECT EXPLODE(c) FROM t",
"postgres": "SELECT UNNEST(c) FROM t",
- "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM t, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(c)))) AS _u(pos) CROSS JOIN UNNEST(c) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(c) AND _u_2.pos_2 = CARDINALITY(c))",
+ "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM t CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(c)))) AS _u(pos) CROSS JOIN UNNEST(c) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(c) AND _u_2.pos_2 = CARDINALITY(c))",
},
)
self.validate_all(
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 4d8168ac..aa2cf363 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -162,7 +162,7 @@ WHERE
)
self.validate_all(
# We need to qualify the columns in this query because "value" would be ambiguous
- 'WITH t(x, "value") AS (SELECT [1, 2, 3], 1) SELECT IFF(_u.pos = _u_2.pos_2, _u_2."value", NULL) AS "value" FROM t, TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (GREATEST(ARRAY_SIZE(t.x)) - 1) + 1))) AS _u(seq, key, path, index, pos, this) CROSS JOIN TABLE(FLATTEN(INPUT => t.x)) AS _u_2(seq, key, path, pos_2, "value", this) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > (ARRAY_SIZE(t.x) - 1) AND _u_2.pos_2 = (ARRAY_SIZE(t.x) - 1))',
+ 'WITH t(x, "value") AS (SELECT [1, 2, 3], 1) SELECT IFF(_u.pos = _u_2.pos_2, _u_2."value", NULL) AS "value" FROM t CROSS JOIN TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (GREATEST(ARRAY_SIZE(t.x)) - 1) + 1))) AS _u(seq, key, path, index, pos, this) CROSS JOIN TABLE(FLATTEN(INPUT => t.x)) AS _u_2(seq, key, path, pos_2, "value", this) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > (ARRAY_SIZE(t.x) - 1) AND _u_2.pos_2 = (ARRAY_SIZE(t.x) - 1))',
read={
"duckdb": 'WITH t(x, "value") AS (SELECT [1,2,3], 1) SELECT UNNEST(t.x) AS "value" FROM t',
},
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 60c1a660..46e626cc 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -624,23 +624,23 @@ TBLPROPERTIES (
self.validate_all(
"SELECT EXPLODE(x) FROM tbl",
write={
- "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col FROM tbl, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(x)) - 1)) AS pos CROSS JOIN UNNEST(x) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(x) - 1) AND pos_2 = (ARRAY_LENGTH(x) - 1))",
- "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM tbl, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(x)))) AS _u(pos) CROSS JOIN UNNEST(x) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(x) AND _u_2.pos_2 = CARDINALITY(x))",
+ "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col FROM tbl CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(x)) - 1)) AS pos CROSS JOIN UNNEST(x) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(x) - 1) AND pos_2 = (ARRAY_LENGTH(x) - 1))",
+ "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col FROM tbl CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(x)))) AS _u(pos) CROSS JOIN UNNEST(x) WITH ORDINALITY AS _u_2(col, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(x) AND _u_2.pos_2 = CARDINALITY(x))",
"spark": "SELECT EXPLODE(x) FROM tbl",
},
)
self.validate_all(
"SELECT EXPLODE(col) FROM _u",
write={
- "bigquery": "SELECT IF(pos = pos_2, col_2, NULL) AS col_2 FROM _u, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(col)) - 1)) AS pos CROSS JOIN UNNEST(col) AS col_2 WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(col) - 1) AND pos_2 = (ARRAY_LENGTH(col) - 1))",
- "presto": "SELECT IF(_u_2.pos = _u_3.pos_2, _u_3.col_2) AS col_2 FROM _u, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(col)))) AS _u_2(pos) CROSS JOIN UNNEST(col) WITH ORDINALITY AS _u_3(col_2, pos_2) WHERE _u_2.pos = _u_3.pos_2 OR (_u_2.pos > CARDINALITY(col) AND _u_3.pos_2 = CARDINALITY(col))",
+ "bigquery": "SELECT IF(pos = pos_2, col_2, NULL) AS col_2 FROM _u CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(col)) - 1)) AS pos CROSS JOIN UNNEST(col) AS col_2 WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(col) - 1) AND pos_2 = (ARRAY_LENGTH(col) - 1))",
+ "presto": "SELECT IF(_u_2.pos = _u_3.pos_2, _u_3.col_2) AS col_2 FROM _u CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(col)))) AS _u_2(pos) CROSS JOIN UNNEST(col) WITH ORDINALITY AS _u_3(col_2, pos_2) WHERE _u_2.pos = _u_3.pos_2 OR (_u_2.pos > CARDINALITY(col) AND _u_3.pos_2 = CARDINALITY(col))",
"spark": "SELECT EXPLODE(col) FROM _u",
},
)
self.validate_all(
"SELECT EXPLODE(col) AS exploded FROM schema.tbl",
write={
- "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.exploded) AS exploded FROM schema.tbl, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(col)))) AS _u(pos) CROSS JOIN UNNEST(col) WITH ORDINALITY AS _u_2(exploded, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(col) AND _u_2.pos_2 = CARDINALITY(col))",
+ "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.exploded) AS exploded FROM schema.tbl CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(col)))) AS _u(pos) CROSS JOIN UNNEST(col) WITH ORDINALITY AS _u_2(exploded, pos_2) WHERE _u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(col) AND _u_2.pos_2 = CARDINALITY(col))",
},
)
self.validate_all(
@@ -666,13 +666,13 @@ TBLPROPERTIES (
self.validate_all(
"SELECT POSEXPLODE(ARRAY(2, 3)), EXPLODE(ARRAY(4, 5, 6)) FROM tbl",
write={
- "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_2, pos_2, NULL) AS pos_2, IF(pos = pos_3, col_2, NULL) AS col_2 FROM tbl, UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH([2, 3]), ARRAY_LENGTH([4, 5, 6])) - 1)) AS pos CROSS JOIN UNNEST([2, 3]) AS col WITH OFFSET AS pos_2 CROSS JOIN UNNEST([4, 5, 6]) AS col_2 WITH OFFSET AS pos_3 WHERE (pos = pos_2 OR (pos > (ARRAY_LENGTH([2, 3]) - 1) AND pos_2 = (ARRAY_LENGTH([2, 3]) - 1))) AND (pos = pos_3 OR (pos > (ARRAY_LENGTH([4, 5, 6]) - 1) AND pos_3 = (ARRAY_LENGTH([4, 5, 6]) - 1)))",
- "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col, IF(_u.pos = _u_2.pos_2, _u_2.pos_2) AS pos_2, IF(_u.pos = _u_3.pos_3, _u_3.col_2) AS col_2 FROM tbl, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[2, 3]), CARDINALITY(ARRAY[4, 5, 6])))) AS _u(pos) CROSS JOIN UNNEST(ARRAY[2, 3]) WITH ORDINALITY AS _u_2(col, pos_2) CROSS JOIN UNNEST(ARRAY[4, 5, 6]) WITH ORDINALITY AS _u_3(col_2, pos_3) WHERE (_u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(ARRAY[2, 3]) AND _u_2.pos_2 = CARDINALITY(ARRAY[2, 3]))) AND (_u.pos = _u_3.pos_3 OR (_u.pos > CARDINALITY(ARRAY[4, 5, 6]) AND _u_3.pos_3 = CARDINALITY(ARRAY[4, 5, 6])))",
+ "bigquery": "SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_2, pos_2, NULL) AS pos_2, IF(pos = pos_3, col_2, NULL) AS col_2 FROM tbl CROSS JOIN UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH([2, 3]), ARRAY_LENGTH([4, 5, 6])) - 1)) AS pos CROSS JOIN UNNEST([2, 3]) AS col WITH OFFSET AS pos_2 CROSS JOIN UNNEST([4, 5, 6]) AS col_2 WITH OFFSET AS pos_3 WHERE (pos = pos_2 OR (pos > (ARRAY_LENGTH([2, 3]) - 1) AND pos_2 = (ARRAY_LENGTH([2, 3]) - 1))) AND (pos = pos_3 OR (pos > (ARRAY_LENGTH([4, 5, 6]) - 1) AND pos_3 = (ARRAY_LENGTH([4, 5, 6]) - 1)))",
+ "presto": "SELECT IF(_u.pos = _u_2.pos_2, _u_2.col) AS col, IF(_u.pos = _u_2.pos_2, _u_2.pos_2) AS pos_2, IF(_u.pos = _u_3.pos_3, _u_3.col_2) AS col_2 FROM tbl CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[2, 3]), CARDINALITY(ARRAY[4, 5, 6])))) AS _u(pos) CROSS JOIN UNNEST(ARRAY[2, 3]) WITH ORDINALITY AS _u_2(col, pos_2) CROSS JOIN UNNEST(ARRAY[4, 5, 6]) WITH ORDINALITY AS _u_3(col_2, pos_3) WHERE (_u.pos = _u_2.pos_2 OR (_u.pos > CARDINALITY(ARRAY[2, 3]) AND _u_2.pos_2 = CARDINALITY(ARRAY[2, 3]))) AND (_u.pos = _u_3.pos_3 OR (_u.pos > CARDINALITY(ARRAY[4, 5, 6]) AND _u_3.pos_3 = CARDINALITY(ARRAY[4, 5, 6])))",
},
)
self.validate_all(
"SELECT col, pos, POSEXPLODE(ARRAY(2, 3)) FROM _u",
write={
- "presto": "SELECT col, pos, IF(_u_2.pos_2 = _u_3.pos_3, _u_3.col_2) AS col_2, IF(_u_2.pos_2 = _u_3.pos_3, _u_3.pos_3) AS pos_3 FROM _u, UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[2, 3])))) AS _u_2(pos_2) CROSS JOIN UNNEST(ARRAY[2, 3]) WITH ORDINALITY AS _u_3(col_2, pos_3) WHERE _u_2.pos_2 = _u_3.pos_3 OR (_u_2.pos_2 > CARDINALITY(ARRAY[2, 3]) AND _u_3.pos_3 = CARDINALITY(ARRAY[2, 3]))",
+ "presto": "SELECT col, pos, IF(_u_2.pos_2 = _u_3.pos_3, _u_3.col_2) AS col_2, IF(_u_2.pos_2 = _u_3.pos_3, _u_3.pos_3) AS pos_3 FROM _u CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(ARRAY[2, 3])))) AS _u_2(pos_2) CROSS JOIN UNNEST(ARRAY[2, 3]) WITH ORDINALITY AS _u_3(col_2, pos_3) WHERE _u_2.pos_2 = _u_3.pos_3 OR (_u_2.pos_2 > CARDINALITY(ARRAY[2, 3]) AND _u_3.pos_3 = CARDINALITY(ARRAY[2, 3]))",
},
)
|
`explode_to_unnest` transformation generates query that cannot be executed with trino
sqlglot code:
```
In [8]: import sqlglot as sg
In [9]: print(
...: sg.parse_one(
...: "select unnest(t.x) from (values [1, 2, 3] as t (x))", read="duckdb"
...: ).sql("trino", pretty=True)
...: )
SELECT
IF(_u.pos = _u_2.pos_2, _u_2.col) AS col
FROM (VALUES
(ARRAY[1, 2, 3])) AS t(x), UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)
CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)
WHERE
_u.pos = _u_2.pos_2
OR (
_u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)
)
```
trino-cli:
```
trino:default> SELECT
-> IF(_u.pos = _u_2.pos_2, _u_2.col) AS col
-> FROM (VALUES
-> (ARRAY[1, 2, 3])) AS t(x), UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)
-> CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)
-> WHERE
-> _u.pos = _u_2.pos_2
-> OR (
-> _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)
-> );
Query 20231230_105739_28099_gh8pj failed: line 4:70: Column 't.x' cannot be resolved
```
Changing the first `,` to be `CROSS JOIN` instead fixes the issue:
```
trino:default> SELECT
-> IF(_u.pos = _u_2.pos_2, _u_2.col) AS col
-> FROM (VALUES
-> (ARRAY[1, 2, 3])) AS t(x) CROSS JOIN UNNEST(SEQUENCE(1, GREATEST(CARDINALITY(t.x)))) AS _u(pos)
-> CROSS JOIN UNNEST(t.x) WITH ORDINALITY AS _u_2(col, pos_2)
-> WHERE
-> _u.pos = _u_2.pos_2
-> OR (
-> _u.pos > CARDINALITY(t.x) AND _u_2.pos_2 = CARDINALITY(t.x)
-> );
col
-----
1
2
3
(3 rows)
Query 20231230_105747_28107_gh8pj, FINISHED, 1 node
Splits: 17 total, 17 done (100.00%)
0.08 [0 rows, 0B] [0 rows/s, 0B/s]
```
|
0.0
|
1ebfb3688975e420a70bac10c49ad127446c4c65
|
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/dialects/test_spark.py::TestSpark::test_explode_to_unnest"
] |
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array_index",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests/dialects/test_duckdb.py::TestDuckDB::test_isinf",
"tests/dialects/test_duckdb.py::TestDuckDB::test_isnan",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_duckdb.py::TestDuckDB::test_timestamps_with_units",
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_variance",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_spark.py::TestSpark::test_bool_or",
"tests/dialects/test_spark.py::TestSpark::test_current_user",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_iif",
"tests/dialects/test_spark.py::TestSpark::test_insert_cte",
"tests/dialects/test_spark.py::TestSpark::test_spark",
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/dialects/test_spark.py::TestSpark::test_transform_query"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-12-31 13:02:38+00:00
|
mit
| 6,006 |
|
tobymao__sqlglot-2769
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 230e529f..d89ac5f9 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -17,6 +17,7 @@ from sqlglot.dialects.dialect import (
encode_decode_sql,
format_time_lambda,
inline_array_sql,
+ json_keyvalue_comma_sql,
no_comment_column_constraint_sql,
no_properties_sql,
no_safe_divide_sql,
@@ -349,11 +350,12 @@ class DuckDB(Dialect):
exp.IntDiv: lambda self, e: self.binary(e, "//"),
exp.IsInf: rename_func("ISINF"),
exp.IsNan: rename_func("ISNAN"),
+ exp.JSONBExtract: arrow_json_extract_sql,
+ exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
exp.JSONExtract: arrow_json_extract_sql,
exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
exp.JSONFormat: _json_format_sql,
- exp.JSONBExtract: arrow_json_extract_sql,
- exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
+ exp.JSONKeyValue: json_keyvalue_comma_sql,
exp.LogicalOr: rename_func("BOOL_OR"),
exp.LogicalAnd: rename_func("BOOL_AND"),
exp.MonthsBetween: lambda self, e: self.func(
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 6b5f71b3..f6bc55d5 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -14,6 +14,7 @@ from sqlglot.dialects.dialect import (
format_time_lambda,
if_sql,
inline_array_sql,
+ json_keyvalue_comma_sql,
max_or_greatest,
min_or_least,
rename_func,
@@ -445,6 +446,7 @@ class Snowflake(Dialect):
FUNCTION_PARSERS = {
**parser.Parser.FUNCTION_PARSERS,
"DATE_PART": _parse_date_part,
+ "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
}
FUNCTION_PARSERS.pop("TRIM")
@@ -694,6 +696,8 @@ class Snowflake(Dialect):
exp.GroupConcat: rename_func("LISTAGG"),
exp.If: if_sql(name="IFF", false_value="NULL"),
exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
+ exp.JSONKeyValue: json_keyvalue_comma_sql,
+ exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
exp.LogicalAnd: rename_func("BOOLAND_AGG"),
exp.LogicalOr: rename_func("BOOLOR_AGG"),
exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
|
tobymao/sqlglot
|
a2abbc773fb330e669c81abc115a81e1055a060f
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index f42a3315..5de56573 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -174,6 +174,18 @@ WHERE
"CAST(x AS VARCHAR)",
)
+ self.validate_all(
+ "OBJECT_CONSTRUCT_KEEP_NULL('key_1', 'one', 'key_2', NULL)",
+ read={
+ "bigquery": "JSON_OBJECT(['key_1', 'key_2'], ['one', NULL])",
+ "duckdb": "JSON_OBJECT('key_1', 'one', 'key_2', NULL)",
+ },
+ write={
+ "bigquery": "JSON_OBJECT('key_1', 'one', 'key_2', NULL)",
+ "duckdb": "JSON_OBJECT('key_1', 'one', 'key_2', NULL)",
+ "snowflake": "OBJECT_CONSTRUCT_KEEP_NULL('key_1', 'one', 'key_2', NULL)",
+ },
+ )
self.validate_all(
"SELECT * FROM example TABLESAMPLE (3) SEED (82)",
read={
|
Support OBJECT_CONSTRUCT_KEEP_NULL (Snowflake)
A [Snowflake OBJECT_CONSTRUCT_KEEP_NULL](https://docs.snowflake.com/en/sql-reference/functions/object_construct_keep_null#examples) example, where the key is NULL:
```sql
SELECT OBJECT_CONSTRUCT_KEEP_NULL('key_1', 'one', NULL, 'two') AS KEEP_NULL_2
{\n "key_1": "one"\n}
```
which maps to JSON_OBJECT in duckdb, eg:
```sql
SELECT JSON_OBJECT('key_1', 'one', NULL, 'two') AS KEEP_NULL_2
{"key_1":"one"}
```
|
0.0
|
a2abbc773fb330e669c81abc115a81e1055a060f
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-04 15:47:47+00:00
|
mit
| 6,007 |
|
tobymao__sqlglot-2770
|
diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py
index aefe625b..a555f5c4 100644
--- a/sqlglot/dialects/tsql.py
+++ b/sqlglot/dialects/tsql.py
@@ -730,6 +730,17 @@ class TSQL(Dialect):
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
+ def lateral_op(self, expression: exp.Lateral) -> str:
+ cross_apply = expression.args.get("cross_apply")
+ if cross_apply is True:
+ return "CROSS APPLY"
+ if cross_apply is False:
+ return "OUTER APPLY"
+
+ # TODO: perhaps we can check if the parent is a Join and transpile it appropriately
+ self.unsupported("LATERAL clause is not supported.")
+ return "LATERAL"
+
def timefromparts_sql(self, expression: exp.TimeFromParts) -> str:
nano = expression.args.get("nano")
if nano is not None:
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 56580dcf..9609c332 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1925,7 +1925,13 @@ class Join(Expression):
class Lateral(UDTF):
- arg_types = {"this": True, "view": False, "outer": False, "alias": False}
+ arg_types = {
+ "this": True,
+ "view": False,
+ "outer": False,
+ "alias": False,
+ "cross_apply": False, # True -> CROSS APPLY, False -> OUTER APPLY
+ }
class MatchRecognize(Expression):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index c9adb835..98337fc6 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1686,7 +1686,8 @@ class Generator:
if not on_sql and using:
on_sql = csv(*(self.sql(column) for column in using))
- this_sql = self.sql(expression, "this")
+ this = expression.this
+ this_sql = self.sql(this)
if on_sql:
on_sql = self.indent(on_sql, skip_first=True)
@@ -1696,6 +1697,9 @@ class Generator:
else:
on_sql = f"{space}ON {on_sql}"
elif not op_sql:
+ if isinstance(this, exp.Lateral) and this.args.get("cross_apply") is not None:
+ return f" {this_sql}"
+
return f", {this_sql}"
op_sql = f"{op_sql} JOIN" if op_sql else "JOIN"
@@ -1706,6 +1710,19 @@ class Generator:
args = f"({args})" if len(args.split(",")) > 1 else args
return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
+ def lateral_op(self, expression: exp.Lateral) -> str:
+ cross_apply = expression.args.get("cross_apply")
+
+ # https://www.mssqltips.com/sqlservertip/1958/sql-server-cross-apply-and-outer-apply/
+ if cross_apply is True:
+ op = "INNER JOIN "
+ elif cross_apply is False:
+ op = "LEFT JOIN "
+ else:
+ op = ""
+
+ return f"{op}LATERAL"
+
def lateral_sql(self, expression: exp.Lateral) -> str:
this = self.sql(expression, "this")
@@ -1719,7 +1736,7 @@ class Generator:
alias = self.sql(expression, "alias")
alias = f" AS {alias}" if alias else ""
- return f"LATERAL {this}{alias}"
+ return f"{self.lateral_op(expression)} {this}{alias}"
def limit_sql(self, expression: exp.Limit, top: bool = False) -> str:
this = self.sql(expression, "this")
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 89577cd5..40a71da5 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2493,13 +2493,14 @@ class Parser(metaclass=_Parser):
)
def _parse_lateral(self) -> t.Optional[exp.Lateral]:
- outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY)
cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY)
+ if not cross_apply and self._match_pair(TokenType.OUTER, TokenType.APPLY):
+ cross_apply = False
- if outer_apply or cross_apply:
+ if cross_apply is not None:
this = self._parse_select(table=True)
view = None
- outer = not cross_apply
+ outer = None
elif self._match(TokenType.LATERAL):
this = self._parse_select(table=True)
view = self._match(TokenType.VIEW)
@@ -2532,7 +2533,14 @@ class Parser(metaclass=_Parser):
else:
table_alias = self._parse_table_alias()
- return self.expression(exp.Lateral, this=this, view=view, outer=outer, alias=table_alias)
+ return self.expression(
+ exp.Lateral,
+ this=this,
+ view=view,
+ outer=outer,
+ alias=table_alias,
+ cross_apply=cross_apply,
+ )
def _parse_join_parts(
self,
@@ -2566,9 +2574,6 @@ class Parser(metaclass=_Parser):
if not skip_join_token and not join and not outer_apply and not cross_apply:
return None
- if outer_apply:
- side = Token(TokenType.LEFT, "LEFT")
-
kwargs: t.Dict[str, t.Any] = {"this": self._parse_table(parse_bracket=parse_bracket)}
if method:
|
tobymao/sqlglot
|
f65ed4d86517edd266ab9daf75570ea673a447af
|
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index e9c0a84d..fde88d70 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -1247,39 +1247,45 @@ WHERE
self.validate_all(
"SELECT x.a, x.b, t.v, t.y FROM x CROSS APPLY (SELECT v, y FROM t) t(v, y)",
write={
- "spark": "SELECT x.a, x.b, t.v, t.y FROM x, LATERAL (SELECT v, y FROM t) AS t(v, y)",
+ "spark": "SELECT x.a, x.b, t.v, t.y FROM x INNER JOIN LATERAL (SELECT v, y FROM t) AS t(v, y)",
+ "tsql": "SELECT x.a, x.b, t.v, t.y FROM x CROSS APPLY (SELECT v, y FROM t) AS t(v, y)",
},
)
self.validate_all(
"SELECT x.a, x.b, t.v, t.y FROM x OUTER APPLY (SELECT v, y FROM t) t(v, y)",
write={
"spark": "SELECT x.a, x.b, t.v, t.y FROM x LEFT JOIN LATERAL (SELECT v, y FROM t) AS t(v, y)",
+ "tsql": "SELECT x.a, x.b, t.v, t.y FROM x OUTER APPLY (SELECT v, y FROM t) AS t(v, y)",
},
)
self.validate_all(
"SELECT x.a, x.b, t.v, t.y, s.v, s.y FROM x OUTER APPLY (SELECT v, y FROM t) t(v, y) OUTER APPLY (SELECT v, y FROM t) s(v, y) LEFT JOIN z ON z.id = s.id",
write={
"spark": "SELECT x.a, x.b, t.v, t.y, s.v, s.y FROM x LEFT JOIN LATERAL (SELECT v, y FROM t) AS t(v, y) LEFT JOIN LATERAL (SELECT v, y FROM t) AS s(v, y) LEFT JOIN z ON z.id = s.id",
+ "tsql": "SELECT x.a, x.b, t.v, t.y, s.v, s.y FROM x OUTER APPLY (SELECT v, y FROM t) AS t(v, y) OUTER APPLY (SELECT v, y FROM t) AS s(v, y) LEFT JOIN z ON z.id = s.id",
},
)
def test_lateral_table_valued_function(self):
self.validate_all(
- "SELECT t.x, y.z FROM x CROSS APPLY tvfTest(t.x)y(z)",
+ "SELECT t.x, y.z FROM x CROSS APPLY tvfTest(t.x) y(z)",
write={
- "spark": "SELECT t.x, y.z FROM x, LATERAL TVFTEST(t.x) AS y(z)",
+ "spark": "SELECT t.x, y.z FROM x INNER JOIN LATERAL TVFTEST(t.x) AS y(z)",
+ "tsql": "SELECT t.x, y.z FROM x CROSS APPLY TVFTEST(t.x) AS y(z)",
},
)
self.validate_all(
"SELECT t.x, y.z FROM x OUTER APPLY tvfTest(t.x)y(z)",
write={
"spark": "SELECT t.x, y.z FROM x LEFT JOIN LATERAL TVFTEST(t.x) AS y(z)",
+ "tsql": "SELECT t.x, y.z FROM x OUTER APPLY TVFTEST(t.x) AS y(z)",
},
)
self.validate_all(
"SELECT t.x, y.z FROM x OUTER APPLY a.b.tvfTest(t.x)y(z)",
write={
"spark": "SELECT t.x, y.z FROM x LEFT JOIN LATERAL a.b.TVFTEST(t.x) AS y(z)",
+ "tsql": "SELECT t.x, y.z FROM x OUTER APPLY a.b.TVFTEST(t.x) AS y(z)",
},
)
|
CROSS APPLY is changed to LATERAL (tsql)
Code to reproduce:
```
import sqlglot
input_sql = """
SELECT
sd1.id,
sd1.item_id,
sd1.ds,
FROM
sqlmesh_example.seed_model AS sd1
CROSS APPLY (
SELECT TOP 1
sd2.id
FROM
sqlmesh_example.seed_model AS sd2
WHERE sd1.id = sd2.id
)
"""
sqlglot.transpile(input_sql, read="tsql")
```
Code output:
`'SELECT sd1.id, sd1.item_id, sd1.ds FROM sqlmesh_example.seed_model AS sd1, LATERAL (SELECT TOP 1 sd2.id FROM sqlmesh_example.seed_model AS sd2 WHERE sd1.id =
sd2.id)`
Error when executing query - coming from inner-select inside parenthases:
`SQL Error [156] [S0001]: Incorrect syntax near the keyword 'SELECT'.`
While LATERAL is reserved in tsql, there is no functional use. I would expect that CROSS APPLY would stay CROSS APPLY when transpiling back to tsql or when transpiling inner LATERAL joins from other dialects to tsql.
|
0.0
|
f65ed4d86517edd266ab9daf75570ea673a447af
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function"
] |
[
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_iif",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_jsonvalue",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_temporal_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-04 20:47:38+00:00
|
mit
| 6,008 |
|
tobymao__sqlglot-2795
|
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index eb3be0a9..795a04e5 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -23,16 +23,25 @@ def _lower_func(sql: str) -> str:
return sql[:index].lower() + sql[index:]
-def _quantile_sql(self, e):
+def _quantile_sql(self: ClickHouse.Generator, e: exp.Quantile) -> str:
quantile = e.args["quantile"]
args = f"({self.sql(e, 'this')})"
+
if isinstance(quantile, exp.Array):
func = self.func("quantiles", *quantile)
else:
func = self.func("quantile", quantile)
+
return func + args
+def _parse_count_if(args: t.List) -> exp.CountIf | exp.CombinedAggFunc:
+ if len(args) == 1:
+ return exp.CountIf(this=seq_get(args, 0))
+
+ return exp.CombinedAggFunc(this="countIf", expressions=args, parts=("count", "If"))
+
+
class ClickHouse(Dialect):
NORMALIZE_FUNCTIONS: bool | str = False
NULL_ORDERING = "nulls_are_last"
@@ -92,6 +101,7 @@ class ClickHouse(Dialect):
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"ANY": exp.AnyValue.from_arg_list,
+ "COUNTIF": _parse_count_if,
"DATE_ADD": lambda args: exp.DateAdd(
this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
),
@@ -542,6 +552,7 @@ class ClickHouse(Dialect):
exp.ArgMin: arg_max_or_min_no_count("argMin"),
exp.Array: inline_array_sql,
exp.CastToStrType: rename_func("CAST"),
+ exp.CountIf: rename_func("countIf"),
exp.CurrentDate: lambda self, e: self.func("CURRENT_DATE"),
exp.DateAdd: date_delta_sql("DATE_ADD"),
exp.DateDiff: date_delta_sql("DATE_DIFF"),
|
tobymao/sqlglot
|
a2499f591eeb7538db86abd8cc9341c8d91e325d
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 6844239e..71f2c196 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -292,8 +292,13 @@ class TestBigQuery(Validator):
)
self.validate_all(
"SELECT COUNTIF(x)",
+ read={
+ "clickhouse": "SELECT countIf(x)",
+ "duckdb": "SELECT COUNT_IF(x)",
+ },
write={
"bigquery": "SELECT COUNTIF(x)",
+ "clickhouse": "SELECT countIf(x)",
"duckdb": "SELECT COUNT_IF(x)",
},
)
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index fa40264c..2dfcad5c 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -26,6 +26,7 @@ class TestClickhouse(Validator):
self.assertEqual(expr.sql(dialect="clickhouse"), "COUNT(x)")
self.assertIsNone(expr._meta)
+ self.validate_identity("countIf(x, y)")
self.validate_identity("x = y")
self.validate_identity("x <> y")
self.validate_identity("SELECT * FROM (SELECT a FROM b SAMPLE 0.01)")
|
two-argument version of clickhouse's `countIf` function incorrectly fails to parse
This used to work:
```
In [3]: import sqlglot as sg
In [4]: sg.__version__
Out[4]: '20.7.1'
In [5]: sg.parse_one("select countIf(x, y)", read="clickhouse")
...
ParseError: The number of provided arguments (2) is greater than the maximum number of supported arguments (1). Line 1, Col: 20.
select countIf(x, y)
```
ClickHouse CLI:
```
localhost :) select countIf(1, false), countIf(1, true);
SELECT
countIf(1, false),
countIf(1, true)
Query id: cd5dea26-fae2-4bdc-a18b-edbf5b910fda
┌─countIf(1, false)─┬─countIf(1, true)─┐
│ 0 │ 1 │
└───────────────────┴──────────────────┘
```
Unfortunately the clickhouse docs on `countIf`'s signature are nonexistent, with only a single mention of the function in the `If` combinators section here https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators#-if
|
0.0
|
a2499f591eeb7538db86abd8cc9341c8d91e325d
|
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse"
] |
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-08 17:00:53+00:00
|
mit
| 6,009 |
|
tobymao__sqlglot-2800
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index ad14e6ee..454df94c 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -328,6 +328,9 @@ def _parse_colon_get_path(
if not self._match(TokenType.COLON):
break
+ if self._match_set(self.RANGE_PARSERS):
+ this = self.RANGE_PARSERS[self._prev.token_type](self, this) or this
+
return this
|
tobymao/sqlglot
|
18e07d3353c1e11cc5b3ba2025e4440f48c2be02
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 602bc630..39963b28 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -78,6 +78,14 @@ WHERE
self.validate_identity(
"SELECT a FROM test PIVOT(SUM(x) FOR y IN ('z', 'q')) AS x TABLESAMPLE (0.1)"
)
+ self.validate_identity(
+ """SELECT PARSE_JSON('{"x": "hello"}'):x LIKE 'hello'""",
+ """SELECT GET_PATH(PARSE_JSON('{"x": "hello"}'), 'x') LIKE 'hello'""",
+ )
+ self.validate_identity(
+ """SELECT data:x LIKE 'hello' FROM some_table""",
+ """SELECT GET_PATH(data, 'x') LIKE 'hello' FROM some_table""",
+ )
self.validate_identity(
"SELECT SUM({ fn CONVERT(123, SQL_DOUBLE) })",
"SELECT SUM(CAST(123 AS DOUBLE))",
|
ParseError when using LIKE/ILIKE on an element in an object in Snowflake
I'm getting `ParseError: Invalid expression / Unexpected token` when using `LIKE` or `ILIKE` on an element within an object in Snowflake.
Example:
```
import sqlglot
sqlglot.parse(""" select parse_json('{"x": "hello"}'):x like 'hello' """, read="snowflake")
sqlglot.parse(""" select data:x like 'hello' from some_table """, read="snowflake")
```
Both of these cause the parsing error, but both are valid Snowflake statements.
|
0.0
|
18e07d3353c1e11cc5b3ba2025e4440f48c2be02
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-01-09 16:42:25+00:00
|
mit
| 6,010 |
|
tobymao__sqlglot-2825
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index f88f3eb1..6ca8e9de 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -663,7 +663,9 @@ class Snowflake(Dialect):
"MINUS": TokenType.EXCEPT,
"NCHAR VARYING": TokenType.VARCHAR,
"PUT": TokenType.COMMAND,
+ "REMOVE": TokenType.COMMAND,
"RENAME": TokenType.REPLACE,
+ "RM": TokenType.COMMAND,
"SAMPLE": TokenType.TABLE_SAMPLE,
"SQL_DOUBLE": TokenType.DOUBLE,
"SQL_VARCHAR": TokenType.VARCHAR,
|
tobymao/sqlglot
|
7bce2f6abe79dfd8064c625294d94364042207c5
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 1f365bea..5dd81cdc 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -39,6 +39,8 @@ WHERE
)""",
)
+ self.validate_identity("RM @parquet_stage")
+ self.validate_identity("REMOVE @parquet_stage")
self.validate_identity("SELECT TIMESTAMP_FROM_PARTS(d, t)")
self.validate_identity("SELECT GET_PATH(v, 'attr[0].name') FROM vartab")
self.validate_identity("SELECT TO_ARRAY(CAST(x AS ARRAY))")
|
sqlglot 20.8.0 incorrectly transpiles or fails to parse Snowflake REMOVE / RM syntax
Snowflake RM / REMOVE syntax is incorrectly transpiled or causes errors for the cases from the documentation: `"RM @parquet_stage;" -> "RM AS $parquet_stage;"` is rejected by Snowflake at runtime, the other cases cause ParseErrors.
## reference
https://docs.snowflake.com/en/sql-reference/sql/remove
## code
```
examples = [
"RM @parquet_stage;",
"REMOVE @parquet_stage;",
"RM @%mytable/myobject;",
"RM @%mytable/myobject/;",
"RM @~ pattern='.*jun.*';",
"REMOVE @%orders;",
"REMOVE @mystage/path1/subpath2;",
]
import sqlglot
import sqlglot.errors
print(sqlglot.__version__)
for i, s in enumerate(examples):
try:
t = sqlglot.transpile(
s.strip(), read="snowflake", write="snowflake", pretty=True
)
print(i, s, "->", t)
except sqlglot.errors.ParseError as e:
print(i, "error", e)
```
## output
```
20.8.0
0 RM @parquet_stage; -> ['RM AS $parquet_stage']
1 REMOVE @parquet_stage; -> ['REMOVE AS $parquet_stage']
2 error Required keyword: 'this' missing for <class 'sqlglot.expressions.Parameter'>. Line 1, Col: 5.
RM @%mytable/myobject;
3 error Required keyword: 'this' missing for <class 'sqlglot.expressions.Parameter'>. Line 1, Col: 5.
RM @%mytable/myobject/;
4 error Required keyword: 'this' missing for <class 'sqlglot.expressions.Parameter'>. Line 1, Col: 5.
RM @~ pattern='.*jun.*';
5 error Required keyword: 'this' missing for <class 'sqlglot.expressions.Parameter'>. Line 1, Col: 9.
REMOVE @%orders;
6 error Invalid expression / Unexpected token. Line 1, Col: 16.
REMOVE @mystage/path1/subpath2;
```
|
0.0
|
7bce2f6abe79dfd8064c625294d94364042207c5
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-13 00:42:31+00:00
|
mit
| 6,011 |
|
tobymao__sqlglot-2857
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index ddad8f83..2286d682 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1667,6 +1667,7 @@ class Index(Expression):
"unique": False,
"primary": False,
"amp": False, # teradata
+ "include": False,
"partition_by": False, # teradata
"where": False, # postgres partial indexes
}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 977185ff..79e4b898 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1111,7 +1111,10 @@ class Generator:
partition_by = self.expressions(expression, key="partition_by", flat=True)
partition_by = f" PARTITION BY {partition_by}" if partition_by else ""
where = self.sql(expression, "where")
- return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{partition_by}{where}"
+ include = self.expressions(expression, key="include", flat=True)
+ if include:
+ include = f" INCLUDE ({include})"
+ return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{include}{partition_by}{where}"
def identifier_sql(self, expression: exp.Identifier) -> str:
text = expression.name
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 5fb40b95..54d09715 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2681,6 +2681,8 @@ class Parser(metaclass=_Parser):
else:
columns = None
+ include = self._parse_wrapped_id_vars() if self._match_text_seq("INCLUDE") else None
+
return self.expression(
exp.Index,
this=index,
@@ -2690,6 +2692,7 @@ class Parser(metaclass=_Parser):
unique=unique,
primary=primary,
amp=amp,
+ include=include,
partition_by=self._parse_partition_by(),
where=self._parse_where(),
)
|
tobymao/sqlglot
|
bf03a45d8df9abd63b8102e431c13ca0eb0b0fb0
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index ed25315b..1c2a2286 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -582,6 +582,7 @@ class TestPostgres(Validator):
cdef.args["kind"].assert_is(exp.DataType)
self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL DAY)")
+ self.validate_identity("CREATE INDEX et_vid_idx ON et(vid) INCLUDE (fid)")
self.validate_identity("CREATE INDEX idx_x ON x USING BTREE(x, y) WHERE (NOT y IS NULL)")
self.validate_identity("CREATE TABLE test (elems JSONB[])")
self.validate_identity("CREATE TABLE public.y (x TSTZRANGE NOT NULL)")
|
INCLUDE keyword for indexes in PostgreSQL
As I understood, the INCLUDE keyword for indexes isn't supported. Not a big problem, but would be better if it worked.
```text
Python 3.8.10 (default, Nov 22 2023, 10:22:35)
[GCC 9.4.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlglot
>>> sqlglot.parse_one("CREATE INDEX et_vid_idx ON et (vid) INCLUDE (fid)", read="postgres")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/__init__.py", line 125, in parse_one
result = dialect.parse(sql, **opts)
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/dialects/dialect.py", line 442, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1034, in parse
return self._parse(
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1103, in _parse
self.raise_error("Invalid expression / Unexpected token")
File "/opt/ngw/env/lib/python3.8/site-packages/sqlglot/parser.py", line 1144, in raise_error
raise error
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 1, Col: 43.
CREATE INDEX et_vid_idx ON et (vid) INCLUDE (fid)
>>> sqlglot.__version__
'20.9.0'
```
Without ``INCLUDE`` everything works:
```text
>>> sqlglot.parse_one("CREATE INDEX et_vid_idx ON et (vid)", read="postgres")
Create(
this=Index(
this=Identifier(this=et_vid_idx, quoted=False),
table=Table(
this=Identifier(this=et, quoted=False)),
columns=[
Ordered(
this=Column(
this=Identifier(this=vid, quoted=False)),
nulls_first=False)]),
kind=INDEX,
exists=False)
```
|
0.0
|
bf03a45d8df9abd63b8102e431c13ca0eb0b0fb0
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-19 03:56:02+00:00
|
mit
| 6,012 |
|
tobymao__sqlglot-2861
|
diff --git a/sqlglot/dataframe/sql/functions.py b/sqlglot/dataframe/sql/functions.py
index 141a302e..a388cb4b 100644
--- a/sqlglot/dataframe/sql/functions.py
+++ b/sqlglot/dataframe/sql/functions.py
@@ -661,7 +661,7 @@ def from_utc_timestamp(timestamp: ColumnOrName, tz: ColumnOrName) -> Column:
def to_utc_timestamp(timestamp: ColumnOrName, tz: ColumnOrName) -> Column:
tz_column = tz if isinstance(tz, Column) else lit(tz)
- return Column.invoke_anonymous_function(timestamp, "TO_UTC_TIMESTAMP", tz_column)
+ return Column.invoke_expression_over_column(timestamp, expression.FromTimeZone, zone=tz_column)
def timestamp_seconds(col: ColumnOrName) -> Column:
diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 0151e6c8..83ae94ed 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -560,6 +560,9 @@ class BigQuery(Dialect):
exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
+ exp.FromTimeZone: lambda self, e: self.func(
+ "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
+ ),
exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
exp.GetPath: path_to_jsonpath(),
exp.GroupConcat: rename_func("STRING_AGG"),
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index 9b421e7f..6cc6030c 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -356,6 +356,7 @@ class Presto(Dialect):
exp.Encode: lambda self, e: encode_decode_sql(self, e, "TO_UTF8"),
exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
exp.First: _first_last_sql,
+ exp.FromTimeZone: lambda self, e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'",
exp.GetPath: path_to_jsonpath(),
exp.Group: transforms.preprocess([transforms.unalias_group]),
exp.GroupConcat: lambda self, e: self.func(
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 3cf7f7d1..43a439da 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -687,6 +687,9 @@ class Snowflake(Dialect):
exp.DayOfYear: rename_func("DAYOFYEAR"),
exp.Explode: rename_func("FLATTEN"),
exp.Extract: rename_func("DATE_PART"),
+ exp.FromTimeZone: lambda self, e: self.func(
+ "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
+ ),
exp.GenerateSeries: lambda self, e: self.func(
"ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
),
diff --git a/sqlglot/dialects/spark2.py b/sqlglot/dialects/spark2.py
index e27ba185..7ecb06f0 100644
--- a/sqlglot/dialects/spark2.py
+++ b/sqlglot/dialects/spark2.py
@@ -133,6 +133,14 @@ class Spark2(Hive):
if len(args) == 1
else format_time_lambda(exp.StrToTime, "spark")(args),
"TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
+ "TO_UTC_TIMESTAMP": lambda args: exp.FromTimeZone(
+ this=exp.cast_unless(
+ seq_get(args, 0) or exp.Var(this=""),
+ exp.DataType.build("timestamp"),
+ exp.DataType.build("timestamp"),
+ ),
+ zone=seq_get(args, 1),
+ ),
"TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)),
"WEEKOFYEAR": lambda args: exp.WeekOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
}
@@ -188,6 +196,7 @@ class Spark2(Hive):
exp.DayOfYear: rename_func("DAYOFYEAR"),
exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
exp.From: transforms.preprocess([_unalias_pivot]),
+ exp.FromTimeZone: lambda self, e: f"TO_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
exp.LogicalAnd: rename_func("BOOL_AND"),
exp.LogicalOr: rename_func("BOOL_OR"),
exp.Map: _map_sql,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 597a37fe..7f68015e 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -4171,6 +4171,10 @@ class AtTimeZone(Expression):
arg_types = {"this": True, "zone": True}
+class FromTimeZone(Expression):
+ arg_types = {"this": True, "zone": True}
+
+
class Between(Predicate):
arg_types = {"this": True, "low": True, "high": True}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index bb26b385..704e9eec 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2555,6 +2555,11 @@ class Generator:
zone = self.sql(expression, "zone")
return f"{this} AT TIME ZONE {zone}"
+ def fromtimezone_sql(self, expression: exp.FromTimeZone) -> str:
+ this = self.sql(expression, "this")
+ zone = self.sql(expression, "zone")
+ return f"{this} AT TIME ZONE {zone} AT TIME ZONE 'UTC'"
+
def add_sql(self, expression: exp.Add) -> str:
return self.binary(expression, "+")
|
tobymao/sqlglot
|
90ffff83266b5714b1371a576d9484dfbe4be155
|
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 56a573a6..60440373 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -227,7 +227,6 @@ TBLPROPERTIES (
)
def test_spark(self):
- self.validate_identity("FROM_UTC_TIMESTAMP(CAST(x AS TIMESTAMP), 'utc')")
expr = parse_one("any_value(col, true)", read="spark")
self.assertIsInstance(expr.args.get("ignore_nulls"), exp.Boolean)
self.assertEqual(expr.sql(dialect="spark"), "ANY_VALUE(col, TRUE)")
@@ -276,6 +275,25 @@ TBLPROPERTIES (
"SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
)
+ self.validate_all(
+ "SELECT TO_UTC_TIMESTAMP('2016-08-31', 'Asia/Seoul')",
+ write={
+ "bigquery": "SELECT DATETIME(TIMESTAMP(CAST('2016-08-31' AS DATETIME), 'Asia/Seoul'), 'UTC')",
+ "duckdb": "SELECT CAST('2016-08-31' AS TIMESTAMP) AT TIME ZONE 'Asia/Seoul' AT TIME ZONE 'UTC'",
+ "postgres": "SELECT CAST('2016-08-31' AS TIMESTAMP) AT TIME ZONE 'Asia/Seoul' AT TIME ZONE 'UTC'",
+ "presto": "SELECT WITH_TIMEZONE(CAST('2016-08-31' AS TIMESTAMP), 'Asia/Seoul') AT TIME ZONE 'UTC'",
+ "redshift": "SELECT CAST('2016-08-31' AS TIMESTAMP) AT TIME ZONE 'Asia/Seoul' AT TIME ZONE 'UTC'",
+ "snowflake": "SELECT CONVERT_TIMEZONE('Asia/Seoul', 'UTC', CAST('2016-08-31' AS TIMESTAMPNTZ))",
+ "spark": "SELECT TO_UTC_TIMESTAMP(CAST('2016-08-31' AS TIMESTAMP), 'Asia/Seoul')",
+ },
+ )
+ self.validate_all(
+ "SELECT FROM_UTC_TIMESTAMP('2016-08-31', 'Asia/Seoul')",
+ write={
+ "presto": "SELECT CAST('2016-08-31' AS TIMESTAMP) AT TIME ZONE 'Asia/Seoul'",
+ "spark": "SELECT FROM_UTC_TIMESTAMP(CAST('2016-08-31' AS TIMESTAMP), 'Asia/Seoul')",
+ },
+ )
self.validate_all(
"foo.bar",
read={
|
Support for timezone conversion functions between Spark and Trino/Presto dialects
**Is your feature request related to a problem? Please describe.**
The Spark functions [`from_utc_timestamp`](https://spark.apache.org/docs/3.1.3/api/python/reference/api/pyspark.sql.functions.from_utc_timestamp.html) and [`to_utc_timestamp`](https://spark.apache.org/docs/3.1.2/api/python/reference/api/pyspark.sql.functions.to_utc_timestamp.html) are transpiled as such when converting to Presto/Trino, but these don't exist in those dialects. I believe they should be converted to [ `at_timezone`](https://trino.io/docs/current/functions/datetime.html#at_timezone) and [`with_timezone`](https://trino.io/docs/current/functions/datetime.html#with_timezone) respectively.
**Describe the solution you'd like**
I'd like support for `at_timezone` and `with_timezone` be added to Presto/Trino dialects such that they transpile `from_utc_timestamp` and `to_utc_timestamp`, from Spark. And vice versa.
**Describe alternatives you've considered**
I don't have any alternative if I want to use sqlglot.
**Additional context**
|
0.0
|
90ffff83266b5714b1371a576d9484dfbe4be155
|
[
"tests/dialects/test_spark.py::TestSpark::test_spark"
] |
[
"tests/dialects/test_spark.py::TestSpark::test_bool_or",
"tests/dialects/test_spark.py::TestSpark::test_current_user",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_explode_to_unnest",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_iif",
"tests/dialects/test_spark.py::TestSpark::test_insert_cte",
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/dialects/test_spark.py::TestSpark::test_transform_query"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-19 17:46:44+00:00
|
mit
| 6,013 |
|
tobymao__sqlglot-2873
|
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index 1ca0a781..6ee3bdec 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -282,6 +282,11 @@ class Postgres(Dialect):
VAR_SINGLE_TOKENS = {"$"}
class Parser(parser.Parser):
+ PROPERTY_PARSERS = {
+ **parser.Parser.PROPERTY_PARSERS,
+ "SET": lambda self: self.expression(exp.SetConfigProperty, this=self._parse_set()),
+ }
+
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"DATE_TRUNC": parse_timestamp_trunc,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 0e6608e2..98114fbd 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2281,6 +2281,10 @@ class SetProperty(Property):
arg_types = {"multi": True}
+class SetConfigProperty(Property):
+ arg_types = {"this": True}
+
+
class SettingsProperty(Property):
arg_types = {"expressions": True}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index aa78aef2..75a61c22 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -97,6 +97,7 @@ class Generator:
exp.ReturnsProperty: lambda self, e: self.naked_property(e),
exp.SampleProperty: lambda self, e: f"SAMPLE BY {self.sql(e, 'this')}",
exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
+ exp.SetConfigProperty: lambda self, e: self.sql(e, "this"),
exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}",
exp.SqlReadWriteProperty: lambda self, e: e.name,
exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
@@ -355,6 +356,7 @@ class Generator:
exp.Set: exp.Properties.Location.POST_SCHEMA,
exp.SettingsProperty: exp.Properties.Location.POST_SCHEMA,
exp.SetProperty: exp.Properties.Location.POST_CREATE,
+ exp.SetConfigProperty: exp.Properties.Location.POST_SCHEMA,
exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA,
exp.SqlReadWriteProperty: exp.Properties.Location.POST_SCHEMA,
exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE,
|
tobymao/sqlglot
|
89b781b991ce264cd7f8c44fa67860eb9a587b07
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 7aafa37b..da3a2065 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -597,6 +597,8 @@ class TestPostgres(Validator):
self.validate_identity("CREATE TABLE cities_partdef PARTITION OF cities DEFAULT")
self.validate_identity("CREATE TABLE t (c CHAR(2) UNIQUE NOT NULL) INHERITS (t1)")
self.validate_identity("CREATE TABLE s.t (c CHAR(2) UNIQUE NOT NULL) INHERITS (s.t1, s.t2)")
+ self.validate_identity("CREATE FUNCTION x(INT) RETURNS INT SET search_path = 'public'")
+ self.validate_identity("CREATE FUNCTION x(INT) RETURNS INT SET foo FROM CURRENT")
self.validate_identity(
"CREATE CONSTRAINT TRIGGER my_trigger AFTER INSERT OR DELETE OR UPDATE OF col_a, col_b ON public.my_table DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION do_sth()"
)
@@ -642,16 +644,23 @@ class TestPostgres(Validator):
self.validate_identity(
"DELETE FROM event USING sales AS s WHERE event.eventid = s.eventid RETURNING a"
)
- self.validate_identity(
- "CREATE TABLE test (x TIMESTAMP WITHOUT TIME ZONE[][])",
- "CREATE TABLE test (x TIMESTAMP[][])",
- )
self.validate_identity(
"CREATE UNLOGGED TABLE foo AS WITH t(c) AS (SELECT 1) SELECT * FROM (SELECT c AS c FROM t) AS temp"
)
self.validate_identity(
"WITH t(c) AS (SELECT 1) SELECT * INTO UNLOGGED foo FROM (SELECT c AS c FROM t) AS temp"
)
+ self.validate_identity(
+ "CREATE FUNCTION add(INT, INT) RETURNS INT SET search_path TO 'public' AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE"
+ )
+ self.validate_identity(
+ "CREATE FUNCTION x(INT) RETURNS INT SET search_path TO 'public'",
+ "CREATE FUNCTION x(INT) RETURNS INT SET search_path = 'public'",
+ )
+ self.validate_identity(
+ "CREATE TABLE test (x TIMESTAMP WITHOUT TIME ZONE[][])",
+ "CREATE TABLE test (x TIMESTAMP[][])",
+ )
self.validate_all(
"CREATE OR REPLACE FUNCTION function_name (input_a character varying DEFAULT NULL::character varying)",
|
Failed to parse SET statement in Postgres CREATE FUNCTION
**Fully reproducible code snippet**
```py
from sqlglot import parse
parse(
"""
CREATE FUNCTION add(integer, integer) RETURNS integer
SET search_path TO 'public'
AS 'select $1 + $2;'
LANGUAGE SQL
IMMUTABLE;
""",
dialect="postgres",
)
```
Raises
```
Traceback (most recent call last):
File "<...>/create-function-repro.py", line 3, in <module>
parse(
File "<...>/python3.9/site-packages/sqlglot/__init__.py", line 86, in parse
return Dialect.get_or_raise(read or dialect).parse(sql, **opts)
File "<...>/python3.9/site-packages/sqlglot/dialects/dialect.py", line 442, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File "<...>/python3.9/site-packages/sqlglot/parser.py", line 1026, in parse
return self._parse(
File "<...>/python3.9/site-packages/sqlglot/parser.py", line 1095, in _parse
self.raise_error("Invalid expression / Unexpected token")
File "<...>/python3.9/site-packages/sqlglot/parser.py", line 1136, in raise_error
raise error
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 3, Col: 28.
CREATE FUNCTION add(integer, integer) RETURNS integer
SET search_path TO 'public'
AS 'select $1 + $2;'
LANGUAGE SQL
IMMUTABLE;
```
**Official Documentation**
- https://www.postgresql.org/docs/current/sql-createfunction.html
Notice the allowed statement `SET configuration_parameter { TO value | = value | FROM CURRENT }` in `CREATE FUNCTION`.
Unrelated, but `STRICT/RETURNS NULL ON NULL INPUT` also fails, thus I omitted it from the example above.
|
0.0
|
89b781b991ce264cd7f8c44fa67860eb9a587b07
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-22 15:15:21+00:00
|
mit
| 6,014 |
|
tobymao__sqlglot-2935
|
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index 0404c78f..6a6825e4 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -232,6 +232,9 @@ class Postgres(Dialect):
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
HEREDOC_STRINGS = ["$"]
+ HEREDOC_TAG_IS_IDENTIFIER = True
+ HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
+
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"~~": TokenType.LIKE,
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 87a49240..b0649578 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -504,6 +504,7 @@ class _Tokenizer(type):
command_prefix_tokens={
_TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS
},
+ heredoc_tag_is_identifier=klass.HEREDOC_TAG_IS_IDENTIFIER,
)
token_types = RsTokenTypeSettings(
bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING],
@@ -517,6 +518,7 @@ class _Tokenizer(type):
semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON],
string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING],
var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR],
+ heredoc_string_alternative=_TOKEN_TYPE_TO_INDEX[klass.HEREDOC_STRING_ALTERNATIVE],
)
klass._RS_TOKENIZER = RsTokenizer(settings, token_types)
else:
@@ -573,6 +575,12 @@ class Tokenizer(metaclass=_Tokenizer):
STRING_ESCAPES = ["'"]
VAR_SINGLE_TOKENS: t.Set[str] = set()
+ # Whether or not the heredoc tags follow the same lexical rules as unquoted identifiers
+ HEREDOC_TAG_IS_IDENTIFIER = False
+
+ # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc
+ HEREDOC_STRING_ALTERNATIVE = TokenType.VAR
+
# Autofilled
_COMMENTS: t.Dict[str, str] = {}
_FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
@@ -1249,6 +1257,18 @@ class Tokenizer(metaclass=_Tokenizer):
elif token_type == TokenType.BIT_STRING:
base = 2
elif token_type == TokenType.HEREDOC_STRING:
+ if (
+ self.HEREDOC_TAG_IS_IDENTIFIER
+ and not self._peek.isidentifier()
+ and not self._peek == end
+ ):
+ if self.HEREDOC_STRING_ALTERNATIVE != token_type.VAR:
+ self._add(self.HEREDOC_STRING_ALTERNATIVE)
+ else:
+ self._scan_var()
+
+ return True
+
self._advance()
tag = "" if self._char == end else self._extract_string(end)
end = f"{start}{tag}{end}"
diff --git a/sqlglotrs/src/settings.rs b/sqlglotrs/src/settings.rs
index 32575c63..c6e76a70 100644
--- a/sqlglotrs/src/settings.rs
+++ b/sqlglotrs/src/settings.rs
@@ -17,6 +17,7 @@ pub struct TokenTypeSettings {
pub semicolon: TokenType,
pub string: TokenType,
pub var: TokenType,
+ pub heredoc_string_alternative: TokenType,
}
#[pymethods]
@@ -34,6 +35,7 @@ impl TokenTypeSettings {
semicolon: TokenType,
string: TokenType,
var: TokenType,
+ heredoc_string_alternative: TokenType,
) -> Self {
TokenTypeSettings {
bit_string,
@@ -47,6 +49,7 @@ impl TokenTypeSettings {
semicolon,
string,
var,
+ heredoc_string_alternative,
}
}
}
@@ -69,6 +72,7 @@ pub struct TokenizerSettings {
pub var_single_tokens: HashSet<char>,
pub commands: HashSet<TokenType>,
pub command_prefix_tokens: HashSet<TokenType>,
+ pub heredoc_tag_is_identifier: bool,
}
#[pymethods]
@@ -90,6 +94,7 @@ impl TokenizerSettings {
var_single_tokens: HashSet<String>,
commands: HashSet<TokenType>,
command_prefix_tokens: HashSet<TokenType>,
+ heredoc_tag_is_identifier: bool,
) -> Self {
let to_char = |v: &String| {
if v.len() == 1 {
@@ -138,6 +143,7 @@ impl TokenizerSettings {
var_single_tokens: var_single_tokens_native,
commands,
command_prefix_tokens,
+ heredoc_tag_is_identifier,
}
}
}
diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs
index 920a5b5c..94a8b084 100644
--- a/sqlglotrs/src/tokenizer.rs
+++ b/sqlglotrs/src/tokenizer.rs
@@ -399,6 +399,19 @@ impl<'a> TokenizerState<'a> {
} else if *token_type == self.token_types.bit_string {
(Some(2), *token_type, end.clone())
} else if *token_type == self.token_types.heredoc_string {
+ if self.settings.heredoc_tag_is_identifier
+ && !self.is_identifier(self.peek_char)
+ && self.peek_char.to_string() != *end
+ {
+ if self.token_types.heredoc_string_alternative != self.token_types.var {
+ self.add(self.token_types.heredoc_string_alternative, None)?
+ } else {
+ self.scan_var()?
+ };
+
+ return Ok(true)
+ };
+
self.advance(1)?;
let tag = if self.current_char.to_string() == *end {
String::from("")
@@ -469,7 +482,7 @@ impl<'a> TokenizerState<'a> {
} else if self.peek_char.to_ascii_uppercase() == 'E' && scientific == 0 {
scientific += 1;
self.advance(1)?;
- } else if self.peek_char.is_alphabetic() || self.peek_char == '_' {
+ } else if self.is_identifier(self.peek_char) {
let number_text = self.text();
let mut literal = String::from("");
@@ -643,6 +656,10 @@ impl<'a> TokenizerState<'a> {
Ok(text)
}
+ fn is_identifier(&mut self, name: char) -> bool {
+ name.is_alphabetic() || name == '_'
+ }
+
fn extract_value(&mut self) -> Result<String, TokenizerError> {
loop {
if !self.peek_char.is_whitespace()
|
tobymao/sqlglot
|
b8276262bdca57e358284fadfdd468d2bc957e84
|
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index dd73bae3..7351f6a0 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -77,6 +77,10 @@ class TestClickhouse(Validator):
self.validate_identity("""SELECT JSONExtractString('{"x": {"y": 1}}', 'x', 'y')""")
self.validate_identity("SELECT * FROM table LIMIT 1 BY a, b")
self.validate_identity("SELECT * FROM table LIMIT 2 OFFSET 1 BY a, b")
+ self.validate_identity(
+ "SELECT $1$foo$1$",
+ "SELECT 'foo'",
+ )
self.validate_identity(
"SELECT * FROM table LIMIT 1, 2 BY a, b",
"SELECT * FROM table LIMIT 2 OFFSET 1 BY a, b",
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 9c4246e5..61421e5f 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -33,6 +33,7 @@ class TestPostgres(Validator):
self.assertIsInstance(expr, exp.AlterTable)
self.assertEqual(expr.sql(dialect="postgres"), alter_table_only)
+ self.validate_identity("SELECT x FROM t WHERE CAST($1 AS TEXT) = 'ok'")
self.validate_identity("SELECT * FROM t TABLESAMPLE SYSTEM (50) REPEATABLE (55)")
self.validate_identity("x @@ y")
self.validate_identity("CAST(x AS MONEY)")
|
Parameter `$1` for `postgres` is not supported
In version `v18.8.0`, heredoc strings support was added, and `"$": TokenType.PARAMETER` was replaced with `"$": TokenType.HEREDOC_STRING`. Since then, sql query with a parameter like `$1` cannot be parsed.
Here is the original PR [Feat!: add support for heredoc strings (Postgres, ClickHouse) #2328](https://github.com/tobymao/sqlglot/pull/2328)
**Fully reproducible code snippet**
```
import sqlglot
from sqlglot.optimizer.annotate_types import annotate_types
from sqlglot.optimizer.qualify import qualify
schema = {"t": {"x": "text"}}
sql = "select x from t where $1::text = 'ok'"
expression = sqlglot.parse_one(sql, dialect="postgres")
print(expression.sql(dialect="postgres"))
qualified_expr = qualify(expression, schema=schema, dialect="postgres")
annotated_expr = annotate_types(qualified_expr, schema=schema)
print(annotated_expr.selects[0].type)
```
Exception was raised. Expected output is `TEXT`.
```
Traceback (most recent call last):
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 836, in tokenize
self._scan()
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 859, in _scan
self._scan_keywords()
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 994, in _scan_keywords
if self._scan_string(word):
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 1136, in _scan_string
tag = "" if self._char == end else self._extract_string(end)
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 1203, in _extract_string
raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}")
sqlglot.errors.TokenError: Missing $ from 1:22
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/eric/tmp/test.py", line 20, in <module>
expression = sqlglot.parse_one(sql, dialect="postgres")
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/__init__.py", line 125, in parse_one
result = dialect.parse(sql, **opts)
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/dialects/dialect.py", line 304, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/dialects/dialect.py", line 318, in tokenize
return self.tokenizer.tokenize(sql)
File "/Users/eric/venv/lib/python3.10/site-packages/sqlglot/tokens.py", line 841, in tokenize
raise TokenError(f"Error tokenizing '{context}'") from e
sqlglot.errors.TokenError: Error tokenizing 'select x from t where $1::text = 'ok'
```
|
0.0
|
b8276262bdca57e358284fadfdd468d2bc957e84
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_postgres"
] |
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary",
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-08 19:03:10+00:00
|
mit
| 6,015 |
|
tobymao__sqlglot-2936
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index d7ba729c..e61ac4fd 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -333,6 +333,7 @@ class DuckDB(Dialect):
IGNORE_NULLS_IN_FUNC = True
JSON_PATH_BRACKETED_KEY_SUPPORTED = False
SUPPORTS_CREATE_TABLE_LIKE = False
+ MULTI_ARG_DISTINCT = False
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index 0404c78f..68e2c6de 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -232,6 +232,9 @@ class Postgres(Dialect):
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
HEREDOC_STRINGS = ["$"]
+ HEREDOC_TAG_IS_IDENTIFIER = True
+ HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
+
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"~~": TokenType.LIKE,
@@ -381,6 +384,7 @@ class Postgres(Dialect):
JSON_TYPE_REQUIRED_FOR_EXTRACTION = True
SUPPORTS_UNLOGGED_TABLES = True
LIKE_PROPERTY_INSIDE_SCHEMA = True
+ MULTI_ARG_DISTINCT = False
SUPPORTED_JSON_PATH_PARTS = {
exp.JSONPathKey,
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index 8691192b..609103e5 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -292,6 +292,7 @@ class Presto(Dialect):
LIMIT_ONLY_LITERALS = True
SUPPORTS_SINGLE_ARG_CONCAT = False
LIKE_PROPERTY_INSIDE_SCHEMA = True
+ MULTI_ARG_DISTINCT = False
PROPERTIES_LOCATION = {
**generator.Generator.PROPERTIES_LOCATION,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 81af56d8..eff8aaa2 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -296,6 +296,10 @@ class Generator(metaclass=_Generator):
# Whether or not the LikeProperty needs to be specified inside of the schema clause
LIKE_PROPERTY_INSIDE_SCHEMA = False
+ # Whether or not DISTINCT can be followed by multiple args in an AggFunc. If not, it will be
+ # transpiled into a series of CASE-WHEN-ELSE, ultimately using a tuple conseisting of the args
+ MULTI_ARG_DISTINCT = True
+
# Whether or not the JSON extraction operators expect a value of type JSON
JSON_TYPE_REQUIRED_FOR_EXTRACTION = False
@@ -2837,6 +2841,13 @@ class Generator(metaclass=_Generator):
def distinct_sql(self, expression: exp.Distinct) -> str:
this = self.expressions(expression, flat=True)
+
+ if not self.MULTI_ARG_DISTINCT and len(expression.expressions) > 1:
+ case = exp.case()
+ for arg in expression.expressions:
+ case = case.when(arg.is_(exp.null()), exp.null())
+ this = self.sql(case.else_(f"({this})"))
+
this = f" {this}" if this else ""
on = self.sql(expression, "on")
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 87a49240..b0649578 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -504,6 +504,7 @@ class _Tokenizer(type):
command_prefix_tokens={
_TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS
},
+ heredoc_tag_is_identifier=klass.HEREDOC_TAG_IS_IDENTIFIER,
)
token_types = RsTokenTypeSettings(
bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING],
@@ -517,6 +518,7 @@ class _Tokenizer(type):
semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON],
string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING],
var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR],
+ heredoc_string_alternative=_TOKEN_TYPE_TO_INDEX[klass.HEREDOC_STRING_ALTERNATIVE],
)
klass._RS_TOKENIZER = RsTokenizer(settings, token_types)
else:
@@ -573,6 +575,12 @@ class Tokenizer(metaclass=_Tokenizer):
STRING_ESCAPES = ["'"]
VAR_SINGLE_TOKENS: t.Set[str] = set()
+ # Whether or not the heredoc tags follow the same lexical rules as unquoted identifiers
+ HEREDOC_TAG_IS_IDENTIFIER = False
+
+ # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc
+ HEREDOC_STRING_ALTERNATIVE = TokenType.VAR
+
# Autofilled
_COMMENTS: t.Dict[str, str] = {}
_FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
@@ -1249,6 +1257,18 @@ class Tokenizer(metaclass=_Tokenizer):
elif token_type == TokenType.BIT_STRING:
base = 2
elif token_type == TokenType.HEREDOC_STRING:
+ if (
+ self.HEREDOC_TAG_IS_IDENTIFIER
+ and not self._peek.isidentifier()
+ and not self._peek == end
+ ):
+ if self.HEREDOC_STRING_ALTERNATIVE != token_type.VAR:
+ self._add(self.HEREDOC_STRING_ALTERNATIVE)
+ else:
+ self._scan_var()
+
+ return True
+
self._advance()
tag = "" if self._char == end else self._extract_string(end)
end = f"{start}{tag}{end}"
diff --git a/sqlglotrs/src/settings.rs b/sqlglotrs/src/settings.rs
index 32575c63..c6e76a70 100644
--- a/sqlglotrs/src/settings.rs
+++ b/sqlglotrs/src/settings.rs
@@ -17,6 +17,7 @@ pub struct TokenTypeSettings {
pub semicolon: TokenType,
pub string: TokenType,
pub var: TokenType,
+ pub heredoc_string_alternative: TokenType,
}
#[pymethods]
@@ -34,6 +35,7 @@ impl TokenTypeSettings {
semicolon: TokenType,
string: TokenType,
var: TokenType,
+ heredoc_string_alternative: TokenType,
) -> Self {
TokenTypeSettings {
bit_string,
@@ -47,6 +49,7 @@ impl TokenTypeSettings {
semicolon,
string,
var,
+ heredoc_string_alternative,
}
}
}
@@ -69,6 +72,7 @@ pub struct TokenizerSettings {
pub var_single_tokens: HashSet<char>,
pub commands: HashSet<TokenType>,
pub command_prefix_tokens: HashSet<TokenType>,
+ pub heredoc_tag_is_identifier: bool,
}
#[pymethods]
@@ -90,6 +94,7 @@ impl TokenizerSettings {
var_single_tokens: HashSet<String>,
commands: HashSet<TokenType>,
command_prefix_tokens: HashSet<TokenType>,
+ heredoc_tag_is_identifier: bool,
) -> Self {
let to_char = |v: &String| {
if v.len() == 1 {
@@ -138,6 +143,7 @@ impl TokenizerSettings {
var_single_tokens: var_single_tokens_native,
commands,
command_prefix_tokens,
+ heredoc_tag_is_identifier,
}
}
}
diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs
index 920a5b5c..94a8b084 100644
--- a/sqlglotrs/src/tokenizer.rs
+++ b/sqlglotrs/src/tokenizer.rs
@@ -399,6 +399,19 @@ impl<'a> TokenizerState<'a> {
} else if *token_type == self.token_types.bit_string {
(Some(2), *token_type, end.clone())
} else if *token_type == self.token_types.heredoc_string {
+ if self.settings.heredoc_tag_is_identifier
+ && !self.is_identifier(self.peek_char)
+ && self.peek_char.to_string() != *end
+ {
+ if self.token_types.heredoc_string_alternative != self.token_types.var {
+ self.add(self.token_types.heredoc_string_alternative, None)?
+ } else {
+ self.scan_var()?
+ };
+
+ return Ok(true)
+ };
+
self.advance(1)?;
let tag = if self.current_char.to_string() == *end {
String::from("")
@@ -469,7 +482,7 @@ impl<'a> TokenizerState<'a> {
} else if self.peek_char.to_ascii_uppercase() == 'E' && scientific == 0 {
scientific += 1;
self.advance(1)?;
- } else if self.peek_char.is_alphabetic() || self.peek_char == '_' {
+ } else if self.is_identifier(self.peek_char) {
let number_text = self.text();
let mut literal = String::from("");
@@ -643,6 +656,10 @@ impl<'a> TokenizerState<'a> {
Ok(text)
}
+ fn is_identifier(&mut self, name: char) -> bool {
+ name.is_alphabetic() || name == '_'
+ }
+
fn extract_value(&mut self) -> Result<String, TokenizerError> {
loop {
if !self.peek_char.is_whitespace()
|
tobymao/sqlglot
|
b8276262bdca57e358284fadfdd468d2bc957e84
|
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index dd73bae3..7351f6a0 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -77,6 +77,10 @@ class TestClickhouse(Validator):
self.validate_identity("""SELECT JSONExtractString('{"x": {"y": 1}}', 'x', 'y')""")
self.validate_identity("SELECT * FROM table LIMIT 1 BY a, b")
self.validate_identity("SELECT * FROM table LIMIT 2 OFFSET 1 BY a, b")
+ self.validate_identity(
+ "SELECT $1$foo$1$",
+ "SELECT 'foo'",
+ )
self.validate_identity(
"SELECT * FROM table LIMIT 1, 2 BY a, b",
"SELECT * FROM table LIMIT 2 OFFSET 1 BY a, b",
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 9c4246e5..61421e5f 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -33,6 +33,7 @@ class TestPostgres(Validator):
self.assertIsInstance(expr, exp.AlterTable)
self.assertEqual(expr.sql(dialect="postgres"), alter_table_only)
+ self.validate_identity("SELECT x FROM t WHERE CAST($1 AS TEXT) = 'ok'")
self.validate_identity("SELECT * FROM t TABLESAMPLE SYSTEM (50) REPEATABLE (55)")
self.validate_identity("x @@ y")
self.validate_identity("CAST(x AS MONEY)")
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index a02a735c..75bb91af 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -277,6 +277,21 @@ TBLPROPERTIES (
"SELECT STR_TO_MAP('a:1,b:2,c:3', ',', ':')",
)
+ self.validate_all(
+ "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ write={
+ "clickhouse": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ "databricks": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ "doris": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ "duckdb": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT CASE WHEN id IS NULL THEN NULL WHEN name IS NULL THEN NULL ELSE (id, name) END) AS cnt FROM tbl",
+ "hive": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ "mysql": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ "postgres": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT CASE WHEN id IS NULL THEN NULL WHEN name IS NULL THEN NULL ELSE (id, name) END) AS cnt FROM tbl",
+ "presto": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT CASE WHEN id IS NULL THEN NULL WHEN name IS NULL THEN NULL ELSE (id, name) END) AS cnt FROM tbl",
+ "snowflake": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ "spark": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ },
+ )
self.validate_all(
"SELECT TO_UTC_TIMESTAMP('2016-08-31', 'Asia/Seoul')",
write={
|
Count distinct syntax error while transpiling to trino sql
**Before you file an issue**
- Make sure you specify the "read" dialect eg. parse_one(sql, read="spark")
- Check if the issue still exists on main
**Fully reproducible code snippet**
```python
import sqlglot
sql = """
with tbl as (
select 1 as id, 'eggy' as name
union all
select null as id, 'jake' as name
)
select count(distinct id, name) as cnt from tbl
"""
sqlglot.transpile(sql, read="hive", write="trino")[0]
```
**Details**
The result of code sinppet is ⬇️
```
"WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl"
```
and I run this sql in the trino cli, it would raise error below, as I konwn that trino/presto dose not support syntax `count(distinct col_1, col_2, ...)` but for wrapping multi-columns into **row** type like `count(distinct (col_1, col_2, ...))`.
<img width="808" alt="image" src="https://github.com/tobymao/sqlglot/assets/60967034/82f76a45-1519-401a-9133-b749317ba0d5">
|
0.0
|
b8276262bdca57e358284fadfdd468d2bc957e84
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_spark.py::TestSpark::test_spark"
] |
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary",
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_variance",
"tests/dialects/test_spark.py::TestSpark::test_bool_or",
"tests/dialects/test_spark.py::TestSpark::test_current_user",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_explode_to_unnest",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_iif",
"tests/dialects/test_spark.py::TestSpark::test_insert_cte",
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/dialects/test_spark.py::TestSpark::test_transform_query"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-08 20:50:27+00:00
|
mit
| 6,016 |
|
tobymao__sqlglot-2938
|
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index eff8aaa2..e22c2975 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2861,12 +2861,20 @@ class Generator(metaclass=_Generator):
return self._embed_ignore_nulls(expression, "RESPECT NULLS")
def _embed_ignore_nulls(self, expression: exp.IgnoreNulls | exp.RespectNulls, text: str) -> str:
- if self.IGNORE_NULLS_IN_FUNC:
- this = expression.find(exp.AggFunc)
- if this:
- sql = self.sql(this)
- sql = sql[:-1] + f" {text})"
- return sql
+ if self.IGNORE_NULLS_IN_FUNC and not expression.meta.get("inline"):
+ for klass in (exp.Order, exp.Limit):
+ mod = expression.find(klass)
+
+ if mod:
+ this = expression.__class__(this=mod.this.copy())
+ this.meta["inline"] = True
+ mod.this.replace(this)
+ return self.sql(expression.this)
+
+ agg_func = expression.find(exp.AggFunc)
+
+ if agg_func:
+ return self.sql(agg_func)[:-1] + f" {text})"
return f"{self.sql(expression, 'this')} {text}"
|
tobymao/sqlglot
|
31e1908d33a7fa01727159a4ab38b7cc9962fcbd
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 340630c2..41b96980 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -18,6 +18,11 @@ class TestBigQuery(Validator):
maxDiff = None
def test_bigquery(self):
+ self.validate_identity("ARRAY_AGG(x IGNORE NULLS LIMIT 1)")
+ self.validate_identity("ARRAY_AGG(x IGNORE NULLS ORDER BY x LIMIT 1)")
+ self.validate_identity("ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY x LIMIT 1)")
+ self.validate_identity("ARRAY_AGG(x IGNORE NULLS)")
+
self.validate_all(
"SELECT SUM(x IGNORE NULLS) AS x",
read={
@@ -55,6 +60,7 @@ class TestBigQuery(Validator):
self.validate_all(
"SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
write={
+ "bigquery": "SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
"duckdb": "SELECT QUANTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
"spark": "SELECT PERCENTILE_CONT(x, 0.5) RESPECT NULLS OVER ()",
},
@@ -62,14 +68,16 @@ class TestBigQuery(Validator):
self.validate_all(
"SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x",
write={
- "duckdb": "SELECT ARRAY_AGG(DISTINCT x ORDER BY a NULLS FIRST, b DESC LIMIT 10 IGNORE NULLS) AS x",
+ "bigquery": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x",
+ "duckdb": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a NULLS FIRST, b DESC LIMIT 10) AS x",
"spark": "SELECT COLLECT_LIST(DISTINCT x ORDER BY a, b DESC LIMIT 10) IGNORE NULLS AS x",
},
)
self.validate_all(
"SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x",
write={
- "duckdb": "SELECT ARRAY_AGG(DISTINCT x ORDER BY a NULLS FIRST, b DESC LIMIT 1, 10 IGNORE NULLS) AS x",
+ "bigquery": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x",
+ "duckdb": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a NULLS FIRST, b DESC LIMIT 1, 10) AS x",
"spark": "SELECT COLLECT_LIST(DISTINCT x ORDER BY a, b DESC LIMIT 1, 10) IGNORE NULLS AS x",
},
)
|
Order of `IGNORE NULLS` in BigQuery generated SQL is incorrect
`IGNORE NULLS` needs to occur before `LIMIT`, otherwise it's an error.
**Fully reproducible code snippet**
```
In [7]: import sqlglot as sg
In [8]: sg.__version__
Out[8]: '21.0.1'
In [9]: sg.parse_one('select array_agg(x ignore nulls limit 1)', read='bigquery').sql('bigquery')
Out[9]: 'SELECT array_agg(x LIMIT 1 IGNORE NULLS)'
```
**Official Documentation**
https://cloud.google.com/bigquery/docs/reference/standard-sql/aggregate-function-calls
|
0.0
|
31e1908d33a7fa01727159a4ab38b7cc9962fcbd
|
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery"
] |
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-08 23:35:03+00:00
|
mit
| 6,017 |
|
tobymao__sqlglot-2956
|
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
index a64c1d40..2ad9ac34 100644
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -136,11 +136,11 @@ class Redshift(Postgres):
refs.add(
(
this.args["from"] if i == 0 else this.args["joins"][i - 1]
- ).alias_or_name.lower()
+ ).this.alias.lower()
)
- table = join.this
- if isinstance(table, exp.Table):
+ table = join.this
+ if isinstance(table, exp.Table) and not join.args.get("on"):
if table.parts[0].name.lower() in refs:
table.replace(table.to_column())
return this
|
tobymao/sqlglot
|
78e6d0de83efbff1d3b61c8550db56c1819f7c22
|
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index b6b6ccc3..6925a64b 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -1,4 +1,4 @@
-from sqlglot import transpile
+from sqlglot import exp, parse_one, transpile
from tests.dialects.test_dialect import Validator
@@ -381,8 +381,6 @@ class TestRedshift(Validator):
"SELECT DATEADD(DAY, 1, DATE('2023-01-01'))",
)
- self.validate_identity("SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l")
-
self.validate_identity(
"""SELECT
c_name,
@@ -532,3 +530,26 @@ FROM (
"redshift": "CREATE OR REPLACE VIEW v1 AS SELECT cola, colb FROM t1 WITH NO SCHEMA BINDING",
},
)
+
+ def test_column_unnesting(self):
+ ast = parse_one("SELECT * FROM t.t JOIN t.c1 ON c1.c2 = t.c3", read="redshift")
+ ast.args["from"].this.assert_is(exp.Table)
+ ast.args["joins"][0].this.assert_is(exp.Table)
+ self.assertEqual(ast.sql("redshift"), "SELECT * FROM t.t JOIN t.c1 ON c1.c2 = t.c3")
+
+ ast = parse_one("SELECT * FROM t AS t CROSS JOIN t.c1", read="redshift")
+ ast.args["from"].this.assert_is(exp.Table)
+ ast.args["joins"][0].this.assert_is(exp.Column)
+ self.assertEqual(ast.sql("redshift"), "SELECT * FROM t AS t CROSS JOIN t.c1")
+
+ ast = parse_one(
+ "SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l", read="redshift"
+ )
+ joins = ast.args["joins"]
+ ast.args["from"].this.assert_is(exp.Table)
+ joins[0].this.this.assert_is(exp.Column)
+ joins[1].this.this.assert_is(exp.Column)
+ joins[2].this.this.assert_is(exp.Dot)
+ self.assertEqual(
+ ast.sql("redshift"), "SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l"
+ )
|
Table incorrectly parsed as column
# Description
Table of format `<schema>.<table>` in a join gets mis-parsed as a column. In the attached example, the table `"usage"."company_names"` gets misparsed as a column.
### Version details
- Dialect is 'redshift'
- The issue exists on the latest pip version of sqlglot (21.0.2)
- The issue doesn't exist on the latest pip version of sqlglot (20.4.0)
### Correct output with sqlglot==20.4.0
```
Python 3.11.7 (main, Dec 4 2023, 18:10:11) [Clang 15.0.0 (clang-1500.1.0.2.5)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlglot
from sqlglot import parse_one
sqlglot.__version__>>> from sqlglot import parse_one
>>> sqlglot.__version__
'20.4.0'
>>> dialect = 'redshift'
>>> query = 'SELECT * FROM "usage"."usage" JOIN "usage"."company_names" ON "company_names"."id" = "usage"."customerid"'
>>> parse_one(query, dialect=dialect)
(SELECT expressions:
(STAR ), from:
(FROM this:
(TABLE this:
(IDENTIFIER this: usage, quoted: True), db:
(IDENTIFIER this: usage, quoted: True))), joins:
(JOIN this:
(TABLE this:
(IDENTIFIER this: company_names, quoted: True), db:
(IDENTIFIER this: usage, quoted: True)), on:
(EQ this:
(COLUMN this:
(IDENTIFIER this: id, quoted: True), table:
(IDENTIFIER this: company_names, quoted: True)), expression:
(COLUMN this:
(IDENTIFIER this: customerid, quoted: True), table:
(IDENTIFIER this: usage, quoted: True)))))
```
### Incorrect output with sqlglot==21.0.2
```
Python 3.11.7 (main, Dec 4 2023, 18:10:11) [Clang 15.0.0 (clang-1500.1.0.2.5)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlglot
from sqlglot import parse_one
sqlglot.__version__>>> from sqlglot import parse_one
>>> sqlglot.__version__
'21.0.2'
>>> dialect = 'redshift'
>>> query = 'SELECT * FROM "usage"."usage" JOIN "usage"."company_names" ON "company_names"."id" = "usage"."customerid"'
>>> parse_one(query, dialect=dialect)
Select(
expressions=[
Star()],
from=From(
this=Table(
this=Identifier(this=usage, quoted=True),
db=Identifier(this=usage, quoted=True))),
joins=[
Join(
this=Column(
this=Identifier(this=company_names, quoted=True),
table=Identifier(this=usage, quoted=True)),
on=EQ(
this=Column(
this=Identifier(this=id, quoted=True),
table=Identifier(this=company_names, quoted=True)),
expression=Column(
this=Identifier(this=customerid, quoted=True),
table=Identifier(this=usage, quoted=True))))])
```
|
0.0
|
78e6d0de83efbff1d3b61c8550db56c1819f7c22
|
[
"tests/dialects/test_redshift.py::TestRedshift::test_column_unnesting"
] |
[
"tests/dialects/test_redshift.py::TestRedshift::test_create_table_like",
"tests/dialects/test_redshift.py::TestRedshift::test_identity",
"tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding",
"tests/dialects/test_redshift.py::TestRedshift::test_redshift",
"tests/dialects/test_redshift.py::TestRedshift::test_rename_table",
"tests/dialects/test_redshift.py::TestRedshift::test_values",
"tests/dialects/test_redshift.py::TestRedshift::test_varchar_max"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-02-12 17:23:40+00:00
|
mit
| 6,018 |
|
tobymao__sqlglot-3011
|
diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 3c8bb5f7..d4344f69 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -302,6 +302,7 @@ class BigQuery(Dialect):
"BYTES": TokenType.BINARY,
"CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
"DECLARE": TokenType.COMMAND,
+ "ELSEIF": TokenType.COMMAND,
"EXCEPTION": TokenType.COMMAND,
"FLOAT64": TokenType.DOUBLE,
"FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
@@ -410,6 +411,7 @@ class BigQuery(Dialect):
STATEMENT_PARSERS = {
**parser.Parser.STATEMENT_PARSERS,
+ TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
TokenType.END: lambda self: self._parse_as_command(self._prev),
TokenType.FOR: lambda self: self._parse_for_in(),
}
|
tobymao/sqlglot
|
9079ead97701b32bde0b2d704bbf8f9b67f5a740
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 37846da3..cf8cb3b6 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -21,6 +21,7 @@ class TestBigQuery(Validator):
self.validate_identity("SELECT * FROM x.*")
self.validate_identity("SELECT * FROM x.y*")
+ self.validate_identity("CASE A WHEN 90 THEN 'red' WHEN 50 THEN 'blue' ELSE 'green' END")
self.validate_identity("CREATE SCHEMA x DEFAULT COLLATE 'en'")
self.validate_identity("CREATE TABLE x (y INT64) DEFAULT COLLATE 'en'")
self.validate_identity("PARSE_JSON('{}', wide_number_mode => 'exact')")
@@ -1091,6 +1092,35 @@ WHERE
self.assertIn("unsupported syntax", cm.output[0])
+ with self.assertLogs(helper_logger):
+ statements = parse(
+ """
+ BEGIN
+ DECLARE MY_VAR INT64 DEFAULT 1;
+ SET MY_VAR = (SELECT 0);
+
+ IF MY_VAR = 1 THEN SELECT 'TRUE';
+ ELSEIF MY_VAR = 0 THEN SELECT 'FALSE';
+ ELSE SELECT 'NULL';
+ END IF;
+ END
+ """,
+ read="bigquery",
+ )
+
+ expected_statements = (
+ "BEGIN DECLARE MY_VAR INT64 DEFAULT 1",
+ "SET MY_VAR = (SELECT 0)",
+ "IF MY_VAR = 1 THEN SELECT 'TRUE'",
+ "ELSEIF MY_VAR = 0 THEN SELECT 'FALSE'",
+ "ELSE SELECT 'NULL'",
+ "END IF",
+ "END",
+ )
+
+ for actual, expected in zip(statements, expected_statements):
+ self.assertEqual(actual.sql(dialect="bigquery"), expected)
+
with self.assertLogs(helper_logger) as cm:
self.validate_identity(
"SELECT * FROM t AS t(c1, c2)",
|
Bigquery parse unexpected token found (= ) in elseif clause
Code:
```python
import sqlglot
my_str = """BEGIN
DECLARE MY_VAR INT64 DEFAULT 1;
SET MY_VAR = (SELECT 0);
IF MY_VAR = 1 THEN
SELECT 'TRUE';
ELSEIF MY_VAR = 0 THEN
SELECT 'FALSE';
ELSE
SELECT 'NULL';
END IF;
END
"""
parsed_objects = sqlglot.parse(my_str, dialect='bigquery')
print(parsed_objects)
```
error:
```
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 9, Col: 15.
VAR INT64 DEFAULT 1;
SET MY_VAR = (SELECT 0);
IF MY_VAR = 1 THEN
SELECT 'TRUE';
ELSEIF MY_VAR = 0 THEN
SELECT 'FALSE';
ELSE
SELECT 'NULL';
END IF;
END
```
`=` is underlined
|
0.0
|
9079ead97701b32bde0b2d704bbf8f9b67f5a740
|
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_warnings"
] |
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_bigquery.py::TestBigQuery::test_errors",
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-02-22 14:44:19+00:00
|
mit
| 6,019 |
|
tobymao__sqlglot-3027
|
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
index 13f8b858..7a160c1d 100644
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -391,6 +391,11 @@ class MySQL(Dialect):
"WARNINGS": _show_parser("WARNINGS"),
}
+ PROPERTY_PARSERS = {
+ **parser.Parser.PROPERTY_PARSERS,
+ "LOCK": lambda self: self._parse_property_assignment(exp.LockProperty),
+ }
+
SET_PARSERS = {
**parser.Parser.SET_PARSERS,
"PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 4fb3679a..3bec25ee 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2322,6 +2322,10 @@ class LocationProperty(Property):
arg_types = {"this": True}
+class LockProperty(Property):
+ arg_types = {"this": True}
+
+
class LockingProperty(Property):
arg_types = {
"this": False,
@@ -2505,6 +2509,7 @@ class Properties(Expression):
"FORMAT": FileFormatProperty,
"LANGUAGE": LanguageProperty,
"LOCATION": LocationProperty,
+ "LOCK": LockProperty,
"PARTITIONED_BY": PartitionedByProperty,
"RETURNS": ReturnsProperty,
"ROW_FORMAT": RowFormatProperty,
@@ -3923,7 +3928,13 @@ class Rollback(Expression):
class AlterTable(Expression):
- arg_types = {"this": True, "actions": True, "exists": False, "only": False}
+ arg_types = {
+ "this": True,
+ "actions": True,
+ "exists": False,
+ "only": False,
+ "options": False,
+ }
class AddConstraint(Expression):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index d8b6290d..66466734 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -388,6 +388,7 @@ class Generator(metaclass=_Generator):
exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA,
exp.LikeProperty: exp.Properties.Location.POST_SCHEMA,
exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,
+ exp.LockProperty: exp.Properties.Location.POST_SCHEMA,
exp.LockingProperty: exp.Properties.Location.POST_ALIAS,
exp.LogProperty: exp.Properties.Location.POST_NAME,
exp.MaterializedProperty: exp.Properties.Location.POST_CREATE,
@@ -2833,7 +2834,9 @@ class Generator(metaclass=_Generator):
exists = " IF EXISTS" if expression.args.get("exists") else ""
only = " ONLY" if expression.args.get("only") else ""
- return f"ALTER TABLE{exists}{only} {self.sql(expression, 'this')} {actions}"
+ options = self.expressions(expression, key="options")
+ options = f", {options}" if options else ""
+ return f"ALTER TABLE{exists}{only} {self.sql(expression, 'this')} {actions}{options}"
def add_column_sql(self, expression: exp.AlterTable) -> str:
if self.ALTER_TABLE_INCLUDE_COLUMN_KEYWORD:
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 322fa1ca..3465c56d 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -5281,6 +5281,9 @@ class Parser(metaclass=_Parser):
def _parse_var_or_string(self) -> t.Optional[exp.Expression]:
return self._parse_var() or self._parse_string()
+ def _parse_primary_or_var(self) -> t.Optional[exp.Expression]:
+ return self._parse_primary() or self._parse_var(any_token=True)
+
def _parse_null(self) -> t.Optional[exp.Expression]:
if self._match_set(self.NULL_TOKENS):
return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev)
@@ -5299,16 +5302,12 @@ class Parser(metaclass=_Parser):
return self._parse_placeholder()
def _parse_parameter(self) -> exp.Parameter:
- def _parse_parameter_part() -> t.Optional[exp.Expression]:
- return (
- self._parse_identifier() or self._parse_primary() or self._parse_var(any_token=True)
- )
-
self._match(TokenType.L_BRACE)
- this = _parse_parameter_part()
- expression = self._match(TokenType.COLON) and _parse_parameter_part()
+ this = self._parse_identifier() or self._parse_primary_or_var()
+ expression = self._match(TokenType.COLON) and (
+ self._parse_identifier() or self._parse_primary_or_var()
+ )
self._match(TokenType.R_BRACE)
-
return self.expression(exp.Parameter, this=this, expression=expression)
def _parse_placeholder(self) -> t.Optional[exp.Expression]:
@@ -5551,6 +5550,7 @@ class Parser(metaclass=_Parser):
parser = self.ALTER_PARSERS.get(self._prev.text.upper()) if self._prev else None
if parser:
actions = ensure_list(parser(self))
+ options = self._parse_csv(self._parse_property)
if not self._curr and actions:
return self.expression(
@@ -5559,6 +5559,7 @@ class Parser(metaclass=_Parser):
exists=exists,
actions=actions,
only=only,
+ options=options,
)
return self._parse_as_command(start)
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index eab61052..004b2882 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -1187,7 +1187,7 @@ class Tokenizer(metaclass=_Tokenizer):
self._advance()
elif self._peek == "." and not decimal:
after = self.peek(1)
- if after.isdigit() or not after.isalpha():
+ if after.isdigit() or not (after.isalpha() or after == "_"):
decimal = True
self._advance()
else:
diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs
index 94a8b084..927f3d80 100644
--- a/sqlglotrs/src/tokenizer.rs
+++ b/sqlglotrs/src/tokenizer.rs
@@ -470,7 +470,7 @@ impl<'a> TokenizerState<'a> {
self.advance(1)?;
} else if self.peek_char == '.' && !decimal {
let after = self.peek(1)?;
- if after.is_digit(10) || !after.is_alphabetic() {
+ if after.is_digit(10) || !(after.is_alphabetic() || after == '_') {
decimal = true;
self.advance(1)?;
} else {
|
tobymao/sqlglot
|
c9eef99b8fe3367c22a8186fb397ad550ac11386
|
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 38c19a77..db8da304 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -7,6 +7,7 @@ from sqlglot import (
UnsupportedError,
exp,
parse,
+ parse_one,
transpile,
)
from sqlglot.helper import logger as helper_logger
@@ -40,6 +41,11 @@ class TestBigQuery(Validator):
},
)
+ table = parse_one("x-0._y.z", dialect="bigquery", into=exp.Table)
+ self.assertEqual(table.catalog, "x-0")
+ self.assertEqual(table.db, "_y")
+ self.assertEqual(table.name, "z")
+
self.validate_identity("SELECT * FROM x-0.y")
self.assertEqual(exp.to_table("`x.y.z`", dialect="bigquery").sql(), '"x"."y"."z"')
self.assertEqual(exp.to_table("`x.y.z`", dialect="bigquery").sql("bigquery"), "`x.y.z`")
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index fd27a1ee..5f23c440 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -29,6 +29,7 @@ class TestMySQL(Validator):
self.validate_identity("CREATE TABLE foo (a BIGINT, INDEX USING BTREE (b))")
self.validate_identity("CREATE TABLE foo (a BIGINT, FULLTEXT INDEX (b))")
self.validate_identity("CREATE TABLE foo (a BIGINT, SPATIAL INDEX (b))")
+ self.validate_identity("ALTER TABLE t1 ADD COLUMN x INT, ALGORITHM=INPLACE, LOCK=EXCLUSIVE")
self.validate_identity(
"CREATE TABLE `oauth_consumer` (`key` VARCHAR(32) NOT NULL, UNIQUE `OAUTH_CONSUMER_KEY` (`key`))"
)
@@ -68,6 +69,26 @@ class TestMySQL(Validator):
self.validate_identity(
"CREATE OR REPLACE VIEW my_view AS SELECT column1 AS `boo`, column2 AS `foo` FROM my_table WHERE column3 = 'some_value' UNION SELECT q.* FROM fruits_table, JSON_TABLE(Fruits, '$[*]' COLUMNS(id VARCHAR(255) PATH '$.$id', value VARCHAR(255) PATH '$.value')) AS q",
)
+ self.validate_identity(
+ "CREATE TABLE `foo` (`id` char(36) NOT NULL DEFAULT (uuid()), PRIMARY KEY (`id`), UNIQUE KEY `id` (`id`))",
+ "CREATE TABLE `foo` (`id` CHAR(36) NOT NULL DEFAULT (UUID()), PRIMARY KEY (`id`), UNIQUE `id` (`id`))",
+ )
+ self.validate_identity(
+ "CREATE TABLE IF NOT EXISTS industry_info (a BIGINT(20) NOT NULL AUTO_INCREMENT, b BIGINT(20) NOT NULL, c VARCHAR(1000), PRIMARY KEY (a), UNIQUE KEY d (b), KEY e (b))",
+ "CREATE TABLE IF NOT EXISTS industry_info (a BIGINT(20) NOT NULL AUTO_INCREMENT, b BIGINT(20) NOT NULL, c VARCHAR(1000), PRIMARY KEY (a), UNIQUE d (b), INDEX e (b))",
+ )
+ self.validate_identity(
+ "CREATE TABLE test (ts TIMESTAMP, ts_tz TIMESTAMPTZ, ts_ltz TIMESTAMPLTZ)",
+ "CREATE TABLE test (ts DATETIME, ts_tz TIMESTAMP, ts_ltz TIMESTAMP)",
+ )
+ self.validate_identity(
+ "ALTER TABLE test_table ALTER COLUMN test_column SET DATA TYPE LONGTEXT",
+ "ALTER TABLE test_table MODIFY COLUMN test_column LONGTEXT",
+ )
+ self.validate_identity(
+ "CREATE TABLE t (c DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC",
+ "CREATE TABLE t (c DATETIME DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP()) DEFAULT CHARACTER SET=utf8 ROW_FORMAT=DYNAMIC",
+ )
self.validate_all(
"CREATE TABLE z (a INT) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARACTER SET=utf8 COLLATE=utf8_bin COMMENT='x'",
@@ -78,12 +99,6 @@ class TestMySQL(Validator):
"sqlite": "CREATE TABLE z (a INTEGER)",
},
)
- self.validate_all(
- "CREATE TABLE t (c DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC",
- write={
- "mysql": "CREATE TABLE t (c DATETIME DEFAULT CURRENT_TIMESTAMP() ON UPDATE CURRENT_TIMESTAMP()) DEFAULT CHARACTER SET=utf8 ROW_FORMAT=DYNAMIC",
- },
- )
self.validate_all(
"CREATE TABLE x (id int not null auto_increment, primary key (id))",
write={
@@ -96,33 +111,9 @@ class TestMySQL(Validator):
"sqlite": "CREATE TABLE x (id INTEGER NOT NULL)",
},
)
- self.validate_all(
- "CREATE TABLE `foo` (`id` char(36) NOT NULL DEFAULT (uuid()), PRIMARY KEY (`id`), UNIQUE KEY `id` (`id`))",
- write={
- "mysql": "CREATE TABLE `foo` (`id` CHAR(36) NOT NULL DEFAULT (UUID()), PRIMARY KEY (`id`), UNIQUE `id` (`id`))",
- },
- )
- self.validate_all(
- "CREATE TABLE IF NOT EXISTS industry_info (a BIGINT(20) NOT NULL AUTO_INCREMENT, b BIGINT(20) NOT NULL, c VARCHAR(1000), PRIMARY KEY (a), UNIQUE KEY d (b), KEY e (b))",
- write={
- "mysql": "CREATE TABLE IF NOT EXISTS industry_info (a BIGINT(20) NOT NULL AUTO_INCREMENT, b BIGINT(20) NOT NULL, c VARCHAR(1000), PRIMARY KEY (a), UNIQUE d (b), INDEX e (b))",
- },
- )
- self.validate_all(
- "CREATE TABLE test (ts TIMESTAMP, ts_tz TIMESTAMPTZ, ts_ltz TIMESTAMPLTZ)",
- write={
- "mysql": "CREATE TABLE test (ts DATETIME, ts_tz TIMESTAMP, ts_ltz TIMESTAMP)",
- },
- )
- self.validate_all(
- "ALTER TABLE test_table ALTER COLUMN test_column SET DATA TYPE LONGTEXT",
- write={
- "mysql": "ALTER TABLE test_table MODIFY COLUMN test_column LONGTEXT",
- },
- )
- self.validate_identity("ALTER TABLE test_table ALTER COLUMN test_column SET DEFAULT 1")
def test_identity(self):
+ self.validate_identity("ALTER TABLE test_table ALTER COLUMN test_column SET DEFAULT 1")
self.validate_identity("SELECT DATE_FORMAT(NOW(), '%Y-%m-%d %H:%i:00.0000')")
self.validate_identity("SELECT @var1 := 1, @var2")
self.validate_identity("UNLOCK TABLES")
|
Online DDL in Mysql
Could support for Mysql's online DDL please be added to Sqlglot? That is, DDL operations should support `ALGORITHM` and `LOCK` clauses.
See, e.g. https://dev.mysql.com/doc/refman/8.0/en/innodb-online-ddl.html
Here's an example of what I tried to do, the last step is what I'd like to see improved.
```
>>> import sys; sys.version_info
sys.version_info(major=3, minor=11, micro=7, releaselevel='final', serial=0)
>>> import sqlglot; sqlglot._version.version_tuple
(21, 1, 2)
>>> sqlglot.parse_one("ALTER TABLE t1 ADD COLUMN x INT;", dialect="mysql")
AlterTable(
this=Table(
this=Identifier(this=t1, quoted=False)),
actions=[
ColumnDef(
this=Identifier(this=x, quoted=False),
kind=DataType(this=Type.INT, nested=False))])
>>> sqlglot.parse_one("ALTER TABLE t1 ADD COLUMN x INT, ALGORITHM=INPLACE, LOCK=EXCLUSIVE;", dialect="mysql")
'ALTER TABLE t1 ADD COLUMN x INT, ALGORITHM=INPLACE, LOCK=EXCLUSIVE' contains unsupported syntax. Falling back to parsing as a 'Command'.
Command(this=ALTER, expression=TABLE t1 ADD COLUMN x INT, ALGORITHM=INPLACE, LOCK=EXCLUSIVE)
```
I'll take a look at the code myself but I suspect it will take me some time to understand how Sqlglot's modules are interrelated and the authors can probably do it faster/better.
Thanks!
|
0.0
|
c9eef99b8fe3367c22a8186fb397ad550ac11386
|
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery"
] |
[
"tests/dialects/test_bigquery.py::TestBigQuery::test_errors",
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_bigquery.py::TestBigQuery::test_warnings",
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_identity",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_is_null",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_monthname",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_safe_div",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_types"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-26 14:20:22+00:00
|
mit
| 6,020 |
|
tobymao__sqlglot-3045
|
diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
index fcb3aab9..c3888a3b 100644
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -93,6 +93,14 @@ class Oracle(Dialect):
"XMLTABLE": lambda self: self._parse_xml_table(),
}
+ PROPERTY_PARSERS = {
+ **parser.Parser.PROPERTY_PARSERS,
+ "GLOBAL": lambda self: self._match_text_seq("TEMPORARY")
+ and self.expression(exp.TemporaryProperty, this="GLOBAL"),
+ "PRIVATE": lambda self: self._match_text_seq("TEMPORARY")
+ and self.expression(exp.TemporaryProperty, this="PRIVATE"),
+ }
+
QUERY_MODIFIER_PARSERS = {
**parser.Parser.QUERY_MODIFIER_PARSERS,
TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()),
@@ -207,6 +215,7 @@ class Oracle(Dialect):
exp.Substring: rename_func("SUBSTR"),
exp.Table: lambda self, e: self.table_sql(e, sep=" "),
exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
+ exp.TemporaryProperty: lambda _, e: f"{e.name or 'GLOBAL'} TEMPORARY",
exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)),
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
exp.Trim: trim_sql,
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index ee4cdde5..bfffe31d 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2463,7 +2463,7 @@ class StabilityProperty(Property):
class TemporaryProperty(Property):
- arg_types = {}
+ arg_types = {"this": False}
class TransformModelProperty(Property):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 66466734..e8307dfa 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -73,7 +73,7 @@ class Generator(metaclass=_Generator):
TRANSFORMS: t.Dict[t.Type[exp.Expression], t.Callable[..., str]] = {
**JSON_PATH_PART_TRANSFORMS,
exp.AutoRefreshProperty: lambda self, e: f"AUTO REFRESH {self.sql(e, 'this')}",
- exp.CaseSpecificColumnConstraint: lambda self,
+ exp.CaseSpecificColumnConstraint: lambda _,
e: f"{'NOT ' if e.args.get('not_') else ''}CASESPECIFIC",
exp.CharacterSetColumnConstraint: lambda self, e: f"CHARACTER SET {self.sql(e, 'this')}",
exp.CharacterSetProperty: lambda self,
@@ -82,7 +82,7 @@ class Generator(metaclass=_Generator):
e: f"CLUSTERED ({self.expressions(e, 'this', indent=False)})",
exp.CollateColumnConstraint: lambda self, e: f"COLLATE {self.sql(e, 'this')}",
exp.CommentColumnConstraint: lambda self, e: f"COMMENT {self.sql(e, 'this')}",
- exp.CopyGrantsProperty: lambda self, e: "COPY GRANTS",
+ exp.CopyGrantsProperty: lambda *_: "COPY GRANTS",
exp.DateAdd: lambda self, e: self.func(
"DATE_ADD", e.this, e.expression, exp.Literal.string(e.text("unit"))
),
@@ -90,8 +90,8 @@ class Generator(metaclass=_Generator):
exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}",
exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}",
exp.ExecuteAsProperty: lambda self, e: self.naked_property(e),
- exp.ExternalProperty: lambda self, e: "EXTERNAL",
- exp.HeapProperty: lambda self, e: "HEAP",
+ exp.ExternalProperty: lambda *_: "EXTERNAL",
+ exp.HeapProperty: lambda *_: "HEAP",
exp.InheritsProperty: lambda self, e: f"INHERITS ({self.expressions(e, flat=True)})",
exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}",
exp.InputModelProperty: lambda self, e: f"INPUT{self.sql(e, 'this')}",
@@ -104,13 +104,13 @@ class Generator(metaclass=_Generator):
),
exp.LanguageProperty: lambda self, e: self.naked_property(e),
exp.LocationProperty: lambda self, e: self.naked_property(e),
- exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG",
- exp.MaterializedProperty: lambda self, e: "MATERIALIZED",
+ exp.LogProperty: lambda _, e: f"{'NO ' if e.args.get('no') else ''}LOG",
+ exp.MaterializedProperty: lambda *_: "MATERIALIZED",
exp.NonClusteredColumnConstraint: lambda self,
e: f"NONCLUSTERED ({self.expressions(e, 'this', indent=False)})",
- exp.NoPrimaryIndexProperty: lambda self, e: "NO PRIMARY INDEX",
- exp.NotForReplicationColumnConstraint: lambda self, e: "NOT FOR REPLICATION",
- exp.OnCommitProperty: lambda self,
+ exp.NoPrimaryIndexProperty: lambda *_: "NO PRIMARY INDEX",
+ exp.NotForReplicationColumnConstraint: lambda *_: "NOT FOR REPLICATION",
+ exp.OnCommitProperty: lambda _,
e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS",
exp.OnProperty: lambda self, e: f"ON {self.sql(e, 'this')}",
exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}",
@@ -121,21 +121,21 @@ class Generator(metaclass=_Generator):
exp.ReturnsProperty: lambda self, e: self.naked_property(e),
exp.SampleProperty: lambda self, e: f"SAMPLE BY {self.sql(e, 'this')}",
exp.SetConfigProperty: lambda self, e: self.sql(e, "this"),
- exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
+ exp.SetProperty: lambda _, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}",
- exp.SqlReadWriteProperty: lambda self, e: e.name,
- exp.SqlSecurityProperty: lambda self,
+ exp.SqlReadWriteProperty: lambda _, e: e.name,
+ exp.SqlSecurityProperty: lambda _,
e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
- exp.StabilityProperty: lambda self, e: e.name,
- exp.TemporaryProperty: lambda self, e: "TEMPORARY",
+ exp.StabilityProperty: lambda _, e: e.name,
+ exp.TemporaryProperty: lambda *_: "TEMPORARY",
exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}",
exp.Timestamp: lambda self, e: self.func("TIMESTAMP", e.this, e.expression),
exp.ToTableProperty: lambda self, e: f"TO {self.sql(e.this)}",
exp.TransformModelProperty: lambda self, e: self.func("TRANSFORM", *e.expressions),
- exp.TransientProperty: lambda self, e: "TRANSIENT",
- exp.UppercaseColumnConstraint: lambda self, e: "UPPERCASE",
+ exp.TransientProperty: lambda *_: "TRANSIENT",
+ exp.UppercaseColumnConstraint: lambda *_: "UPPERCASE",
exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]),
- exp.VolatileProperty: lambda self, e: "VOLATILE",
+ exp.VolatileProperty: lambda *_: "VOLATILE",
exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
}
|
tobymao/sqlglot
|
7b2cff84f9a544435aa22954536eb7c9c2632816
|
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index d49f7e91..9028b031 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -1,4 +1,4 @@
-from sqlglot import exp, parse_one
+from sqlglot import exp
from sqlglot.errors import UnsupportedError
from tests.dialects.test_dialect import Validator
@@ -7,11 +7,11 @@ class TestOracle(Validator):
dialect = "oracle"
def test_oracle(self):
- self.validate_identity("REGEXP_REPLACE('source', 'search')")
- parse_one("ALTER TABLE tbl_name DROP FOREIGN KEY fk_symbol", dialect="oracle").assert_is(
- exp.AlterTable
- )
+ self.parse_one("ALTER TABLE tbl_name DROP FOREIGN KEY fk_symbol").assert_is(exp.AlterTable)
+ self.validate_identity("CREATE GLOBAL TEMPORARY TABLE t AS SELECT * FROM orders")
+ self.validate_identity("CREATE PRIVATE TEMPORARY TABLE t AS SELECT * FROM orders")
+ self.validate_identity("REGEXP_REPLACE('source', 'search')")
self.validate_identity("TIMESTAMP(3) WITH TIME ZONE")
self.validate_identity("CURRENT_TIMESTAMP(precision)")
self.validate_identity("ALTER TABLE tbl_name DROP FOREIGN KEY fk_symbol")
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index a615c190..a0df87f0 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -144,7 +144,7 @@ class TestTSQL(Validator):
"tsql": "CREATE TABLE #mytemptable (a INTEGER)",
"snowflake": "CREATE TEMPORARY TABLE mytemptable (a INT)",
"duckdb": "CREATE TEMPORARY TABLE mytemptable (a INT)",
- "oracle": "CREATE TEMPORARY TABLE mytemptable (a NUMBER)",
+ "oracle": "CREATE GLOBAL TEMPORARY TABLE mytemptable (a NUMBER)",
"hive": "CREATE TEMPORARY TABLE mytemptable (a INT)",
"spark2": "CREATE TEMPORARY TABLE mytemptable (a INT) USING PARQUET",
"spark": "CREATE TEMPORARY TABLE mytemptable (a INT) USING PARQUET",
|
feat: support for temporary tables in Oracle dialect
**Is your feature request related to a problem? Please describe.**
Oracle requires either `PRIVATE` or `GLOBAL` keyword when creating a temporary table -- the syntax `CREATE TEMPORARY TABLE` is invalid, it should be either `CREATE PRIVATE TEMPORARY TABLE` or `CREATE GLOBAL TEMPORARY TABLE`
**Describe the solution you'd like**
the `PRIVATE` temp tables have a bunch of restrictions and seem unlike most other temporary tables to me -- I think adding `GLOBAL` to the Oracle dialect would get most users where they're likely to want to go.
**Describe alternatives you've considered**
I've got a heinous hack in Ibis where I intercept the output of `self.create_table` and then do a string replace to add in `GLOBAL`, and that works fine, but it's brittle and doesn't help anyone else.
**Additional context**
https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/CREATE-TABLE.html#GUID-F9CE0CC3-13AE-4744-A43C-EAC7A71AAAB6
|
0.0
|
7b2cff84f9a544435aa22954536eb7c9c2632816
|
[
"tests/dialects/test_tsql.py::TestTSQL::test_tsql"
] |
[
"tests/dialects/test_oracle.py::TestOracle::test_connect_by",
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_json_table",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_oracle",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table",
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert_date_format",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_json",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_option",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_temporal_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-28 13:44:51+00:00
|
mit
| 6,021 |
|
tobymao__sqlglot-3073
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 1a248750..b3c63460 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1605,7 +1605,7 @@ class TitleColumnConstraint(ColumnConstraintKind):
class UniqueColumnConstraint(ColumnConstraintKind):
- arg_types = {"this": False, "index_type": False}
+ arg_types = {"this": False, "index_type": False, "on_conflict": False}
class UppercaseColumnConstraint(ColumnConstraintKind):
@@ -1883,8 +1883,8 @@ class OnConflict(Expression):
arg_types = {
"duplicate": False,
"expressions": False,
- "nothing": False,
- "key": False,
+ "action": False,
+ "conflict_keys": False,
"constraint": False,
}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index e6f5c4b0..753d4391 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -869,7 +869,9 @@ class Generator(metaclass=_Generator):
this = f" {this}" if this else ""
index_type = expression.args.get("index_type")
index_type = f" USING {index_type}" if index_type else ""
- return f"UNIQUE{this}{index_type}"
+ on_conflict = self.sql(expression, "on_conflict")
+ on_conflict = f" {on_conflict}" if on_conflict else ""
+ return f"UNIQUE{this}{index_type}{on_conflict}"
def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
return self.sql(expression, "this")
@@ -1457,14 +1459,15 @@ class Generator(metaclass=_Generator):
where = self.sql(expression, "where")
where = f"{self.sep()}REPLACE WHERE {where}" if where else ""
expression_sql = f"{self.sep()}{self.sql(expression, 'expression')}"
- conflict = self.sql(expression, "conflict")
+ on_conflict = self.sql(expression, "conflict")
+ on_conflict = f" {on_conflict}" if on_conflict else ""
by_name = " BY NAME" if expression.args.get("by_name") else ""
returning = self.sql(expression, "returning")
if self.RETURNING_END:
- expression_sql = f"{expression_sql}{conflict}{returning}"
+ expression_sql = f"{expression_sql}{on_conflict}{returning}"
else:
- expression_sql = f"{returning}{expression_sql}{conflict}"
+ expression_sql = f"{returning}{expression_sql}{on_conflict}"
sql = f"INSERT{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}"
return self.prepend_ctes(expression, sql)
@@ -1496,17 +1499,20 @@ class Generator(metaclass=_Generator):
def onconflict_sql(self, expression: exp.OnConflict) -> str:
conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT"
+
constraint = self.sql(expression, "constraint")
- if constraint:
- constraint = f"ON CONSTRAINT {constraint}"
- key = self.expressions(expression, key="key", flat=True)
- do = "" if expression.args.get("duplicate") else " DO "
- nothing = "NOTHING" if expression.args.get("nothing") else ""
+ constraint = f" ON CONSTRAINT {constraint}" if constraint else ""
+
+ conflict_keys = self.expressions(expression, key="conflict_keys", flat=True)
+ conflict_keys = f"({conflict_keys}) " if conflict_keys else " "
+ action = self.sql(expression, "action")
+
expressions = self.expressions(expression, flat=True)
- set_keyword = "SET " if self.DUPLICATE_KEY_UPDATE_WITH_SET else ""
if expressions:
- expressions = f"UPDATE {set_keyword}{expressions}"
- return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}"
+ set_keyword = "SET " if self.DUPLICATE_KEY_UPDATE_WITH_SET else ""
+ expressions = f" {set_keyword}{expressions}"
+
+ return f"{conflict}{constraint}{conflict_keys}{action}{expressions}"
def returning_sql(self, expression: exp.Returning) -> str:
return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}"
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 49dac2ea..ad2907b8 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -966,6 +966,11 @@ class Parser(metaclass=_Parser):
"READ": ("WRITE", "ONLY"),
}
+ CONFLICT_ACTIONS: OPTIONS_TYPE = dict.fromkeys(
+ ("ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK", "UPDATE"), tuple()
+ )
+ CONFLICT_ACTIONS["DO"] = ("NOTHING", "UPDATE")
+
USABLES: OPTIONS_TYPE = dict.fromkeys(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"), tuple())
INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"}
@@ -2112,31 +2117,31 @@ class Parser(metaclass=_Parser):
if not conflict and not duplicate:
return None
- nothing = None
- expressions = None
- key = None
+ conflict_keys = None
constraint = None
if conflict:
if self._match_text_seq("ON", "CONSTRAINT"):
constraint = self._parse_id_var()
- else:
- key = self._parse_csv(self._parse_value)
+ elif self._match(TokenType.L_PAREN):
+ conflict_keys = self._parse_csv(self._parse_id_var)
+ self._match_r_paren()
- self._match_text_seq("DO")
- if self._match_text_seq("NOTHING"):
- nothing = True
- else:
- self._match(TokenType.UPDATE)
+ action = self._parse_var_from_options(
+ self.CONFLICT_ACTIONS,
+ )
+ if self._prev.token_type == TokenType.UPDATE:
self._match(TokenType.SET)
expressions = self._parse_csv(self._parse_equality)
+ else:
+ expressions = None
return self.expression(
exp.OnConflict,
duplicate=duplicate,
expressions=expressions,
- nothing=nothing,
- key=key,
+ action=action,
+ conflict_keys=conflict_keys,
constraint=constraint,
)
@@ -4417,9 +4422,7 @@ class Parser(metaclass=_Parser):
self._match_text_seq("LENGTH")
return self.expression(exp.InlineLengthColumnConstraint, this=self._parse_bitwise())
- def _parse_not_constraint(
- self,
- ) -> t.Optional[exp.Expression]:
+ def _parse_not_constraint(self) -> t.Optional[exp.Expression]:
if self._match_text_seq("NULL"):
return self.expression(exp.NotNullColumnConstraint)
if self._match_text_seq("CASESPECIFIC"):
@@ -4447,16 +4450,21 @@ class Parser(metaclass=_Parser):
if not self._match(TokenType.CONSTRAINT):
return self._parse_unnamed_constraint(constraints=self.SCHEMA_UNNAMED_CONSTRAINTS)
- this = self._parse_id_var()
- expressions = []
+ return self.expression(
+ exp.Constraint,
+ this=self._parse_id_var(),
+ expressions=self._parse_unnamed_constraints(),
+ )
+ def _parse_unnamed_constraints(self) -> t.List[exp.Expression]:
+ constraints = []
while True:
constraint = self._parse_unnamed_constraint() or self._parse_function()
if not constraint:
break
- expressions.append(constraint)
+ constraints.append(constraint)
- return self.expression(exp.Constraint, this=this, expressions=expressions)
+ return constraints
def _parse_unnamed_constraint(
self, constraints: t.Optional[t.Collection[str]] = None
@@ -4478,6 +4486,7 @@ class Parser(metaclass=_Parser):
exp.UniqueColumnConstraint,
this=self._parse_schema(self._parse_id_var(any_token=False)),
index_type=self._match(TokenType.USING) and self._advance_any() and self._prev.text,
+ on_conflict=self._parse_on_conflict(),
)
def _parse_key_constraint_options(self) -> t.List[str]:
|
tobymao/sqlglot
|
223a4751f88809710872fa7d757d22d9eeeb4f40
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 1d0ea8bd..fe4e3533 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -691,13 +691,13 @@ class TestPostgres(Validator):
"CREATE INDEX index_issues_on_title_trigram ON public.issues USING gin(title public.gin_trgm_ops)"
)
self.validate_identity(
- "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT (id) DO NOTHING RETURNING *"
+ "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT(id) DO NOTHING RETURNING *"
)
self.validate_identity(
- "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT (id) DO UPDATE SET x.id = 1 RETURNING *"
+ "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT(id) DO UPDATE SET x.id = 1 RETURNING *"
)
self.validate_identity(
- "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT (id) DO UPDATE SET x.id = excluded.id RETURNING *"
+ "INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT(id) DO UPDATE SET x.id = excluded.id RETURNING *"
)
self.validate_identity(
"INSERT INTO x VALUES (1, 'a', 2.0) ON CONFLICT ON CONSTRAINT pkey DO NOTHING RETURNING *"
diff --git a/tests/dialects/test_sqlite.py b/tests/dialects/test_sqlite.py
index 2421987b..e935c194 100644
--- a/tests/dialects/test_sqlite.py
+++ b/tests/dialects/test_sqlite.py
@@ -7,6 +7,10 @@ class TestSQLite(Validator):
dialect = "sqlite"
def test_ddl(self):
+ for conflict_action in ("ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"):
+ with self.subTest(f"ON CONFLICT {conflict_action}"):
+ self.validate_identity("CREATE TABLE a (b, c, UNIQUE (b, c) ON CONFLICT IGNORE)")
+
self.validate_identity("INSERT OR ABORT INTO foo (x, y) VALUES (1, 2)")
self.validate_identity("INSERT OR FAIL INTO foo (x, y) VALUES (1, 2)")
self.validate_identity("INSERT OR IGNORE INTO foo (x, y) VALUES (1, 2)")
|
SQLite ParseError: Unique Table Constraint- cannot parse On Conflict Clause
sqlglot Verison is 22.2.0
#### MVE
```python
import sqlglot
inputstring = """CREATE TABLE a (
b,
c,
UNIQUE (b, c) ON CONFLICT IGNORE
);"""
print(sqlglot.parse(inputstring, dialect='sqlite'))
```
#### Raises
```
sqlglot.errors.ParseError: Expecting ). Line 4, Col: 20.
CREATE TABLE a (
b,
c,
UNIQUE (b, c) ON CONFLICT IGNORE
);
```
(**ON** is underlined in terminal)
#### Official Docs
[SQLite Create Table Docs](https://www.sqlite.org/lang_createtable.html)
Tested at on the [official fiddle](https://sqlite.org/fiddle/) to make sure SQL was a valid statement using
```sqlite
CREATE TABLE a (
b,
c,
UNIQUE (b, c) ON CONFLICT IGNORE
);
INSERT INTO a(b,c) VALUES (1,1), (2,1), (1,1);
SELECT * FROM a;
```
Output is two rows as expected: (1,1), (2,1)
|
0.0
|
223a4751f88809710872fa7d757d22d9eeeb4f40
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_sqlite.py::TestSQLite::test_ddl"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest_json_array",
"tests/dialects/test_postgres.py::TestPostgres::test_variance",
"tests/dialects/test_sqlite.py::TestSQLite::test_datediff",
"tests/dialects/test_sqlite.py::TestSQLite::test_hexadecimal_literal",
"tests/dialects/test_sqlite.py::TestSQLite::test_longvarchar_dtype",
"tests/dialects/test_sqlite.py::TestSQLite::test_sqlite",
"tests/dialects/test_sqlite.py::TestSQLite::test_warnings",
"tests/dialects/test_sqlite.py::TestSQLite::test_window_null_treatment"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-03 14:23:49+00:00
|
mit
| 6,022 |
|
tobymao__sqlglot-3077
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index b3c63460..acf18ff1 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1828,6 +1828,7 @@ class Index(Expression):
class Insert(DDL, DML):
arg_types = {
+ "hint": False,
"with": False,
"this": True,
"expression": False,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 9e0f4da9..f0264197 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1451,6 +1451,7 @@ class Generator(metaclass=_Generator):
return f"{sql})"
def insert_sql(self, expression: exp.Insert) -> str:
+ hint = self.sql(expression, "hint")
overwrite = expression.args.get("overwrite")
if isinstance(expression.this, exp.Directory):
@@ -1481,7 +1482,7 @@ class Generator(metaclass=_Generator):
else:
expression_sql = f"{returning}{expression_sql}{on_conflict}"
- sql = f"INSERT{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}"
+ sql = f"INSERT{hint}{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}"
return self.prepend_ctes(expression, sql)
def intersect_sql(self, expression: exp.Intersect) -> str:
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 8da2eacd..8ea2dbab 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2061,6 +2061,7 @@ class Parser(metaclass=_Parser):
def _parse_insert(self) -> exp.Insert:
comments = ensure_list(self._prev_comments)
+ hint = self._parse_hint()
overwrite = self._match(TokenType.OVERWRITE)
ignore = self._match(TokenType.IGNORE)
local = self._match_text_seq("LOCAL")
@@ -2087,6 +2088,7 @@ class Parser(metaclass=_Parser):
return self.expression(
exp.Insert,
comments=comments,
+ hint=hint,
this=this,
by_name=self._match_text_seq("BY", "NAME"),
exists=self._parse_exists(),
|
tobymao/sqlglot
|
4173ea29bbd8944896c259fe45209de69fcbdc46
|
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 9438507b..67e18460 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -210,6 +210,8 @@ class TestOracle(Validator):
self.validate_identity(
"SELECT /*+ LEADING(e j) */ * FROM employees e, departments d, job_history j WHERE e.department_id = d.department_id AND e.hire_date = j.start_date"
)
+ self.validate_identity("INSERT /*+ APPEND */ INTO IAP_TBL (id, col1) VALUES (2, 'test2')")
+ self.validate_identity("INSERT /*+ APPEND_VALUES */ INTO dest_table VALUES (i, 'Value')")
def test_xml_table(self):
self.validate_identity("XMLTABLE('x')")
|
Oracle - insert append - error parsing hint
Error in parsing Oracle insert append.
# SQL
```SQL
-- Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
select banner from v$version;
-- create table
create table iap_tbl (id int, col1 varchar2(20 char));
/
-- normal insert
insert into iap_tbl (id, col1) values (1, 'test');
commit;
/
select * from iap_tbl;
/
-- insert with append hint
insert /*+ append */ into iap_tbl (id, col1) values (2, 'test2');
commit;
```
# Python
```Python
# pip show sqlglot
#Name: sqlglot
#Version: 22.2.0
from sqlglot import parse_one
# error parsing hint
sql_nok = "insert /*+ append */ into iap_tbl (id, col1) values (2, 'test2')"
# ok when + removed from comment, normal comment not hint
sql_ok = "insert /* append */ into iap_tbl (id, col1) values (2, 'test2')"
parse_one(sql_ok)
parse_one(sql_nok)
```
|
0.0
|
4173ea29bbd8944896c259fe45209de69fcbdc46
|
[
"tests/dialects/test_oracle.py::TestOracle::test_hints"
] |
[
"tests/dialects/test_oracle.py::TestOracle::test_connect_by",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_json_table",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_oracle",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-04 14:50:07+00:00
|
mit
| 6,023 |
|
tobymao__sqlglot-3089
|
diff --git a/sqlglot/dialects/__init__.py b/sqlglot/dialects/__init__.py
index 276ad59c..29c65800 100644
--- a/sqlglot/dialects/__init__.py
+++ b/sqlglot/dialects/__init__.py
@@ -61,6 +61,7 @@ dialect implementations in order to understand how their various components can
----
"""
+from sqlglot.dialects.athena import Athena
from sqlglot.dialects.bigquery import BigQuery
from sqlglot.dialects.clickhouse import ClickHouse
from sqlglot.dialects.databricks import Databricks
diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py
new file mode 100644
index 00000000..dc87d8dc
--- /dev/null
+++ b/sqlglot/dialects/athena.py
@@ -0,0 +1,12 @@
+from __future__ import annotations
+
+from sqlglot.dialects.trino import Trino
+from sqlglot.tokens import TokenType
+
+
+class Athena(Trino):
+ class Parser(Trino.Parser):
+ STATEMENT_PARSERS = {
+ **Trino.Parser.STATEMENT_PARSERS,
+ TokenType.USING: lambda self: self._parse_as_command(self._prev),
+ }
diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py
index f11c0da2..d2533ebc 100644
--- a/sqlglot/dialects/dialect.py
+++ b/sqlglot/dialects/dialect.py
@@ -31,6 +31,7 @@ class Dialects(str, Enum):
DIALECT = ""
+ ATHENA = "athena"
BIGQUERY = "bigquery"
CLICKHOUSE = "clickhouse"
DATABRICKS = "databricks"
|
tobymao/sqlglot
|
d898f559fac44789da08689e835619f978c05a3e
|
diff --git a/tests/dialects/test_athena.py b/tests/dialects/test_athena.py
new file mode 100644
index 00000000..99e36f21
--- /dev/null
+++ b/tests/dialects/test_athena.py
@@ -0,0 +1,16 @@
+from tests.dialects.test_dialect import Validator
+
+
+class TestAthena(Validator):
+ dialect = "athena"
+ maxDiff = None
+
+ def test_athena(self):
+ self.validate_identity(
+ """USING EXTERNAL FUNCTION some_function(input VARBINARY)
+ RETURNS VARCHAR
+ LAMBDA 'some-name'
+ SELECT
+ some_function(1)""",
+ check_command_warning=True,
+ )
|
Support User Defined Functions on Athena Dialect
It looks like sqlglot is not able to parse [AWS Athena's user defined functions syntax](https://docs.aws.amazon.com/athena/latest/ug/querying-udf.html):
```py
from sqlglot import parse
from sqlglot.dialects import Trino
parse("""
USING EXTERNAL FUNCTION some_function(input VARBINARY)
RETURNS VARCHAR
LAMBDA 'some-name'
SELECT
some_function(1)
""", dialect=Trino)
```
Exception:
```
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 2, Col: 9.
USING EXTERNAL FUNCTION some_function(input VARBINARY)
RETURNS VARCHAR
LAMBDA 'some-name'
```
We are using `Trino` dialect since sqlglot does not have a dedicated one for Athena, as far as I understand, but Athena is based off Trino, so this dialect works otherwise perfectly for our codebase :slightly_smiling_face:
Am I missing something? Does it need a dedicated dialect for Athena?
|
0.0
|
d898f559fac44789da08689e835619f978c05a3e
|
[
"tests/dialects/test_athena.py::TestAthena::test_athena"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-06 16:35:36+00:00
|
mit
| 6,024 |
|
tobymao__sqlglot-3092
|
diff --git a/sqlglot/dataframe/sql/dataframe.py b/sqlglot/dataframe/sql/dataframe.py
index 0bacbf90..88295749 100644
--- a/sqlglot/dataframe/sql/dataframe.py
+++ b/sqlglot/dataframe/sql/dataframe.py
@@ -18,8 +18,6 @@ from sqlglot.dataframe.sql.transforms import replace_id_value
from sqlglot.dataframe.sql.util import get_tables_from_expression_with_join
from sqlglot.dataframe.sql.window import Window
from sqlglot.helper import ensure_list, object_to_dict, seq_get
-from sqlglot.optimizer import optimize as optimize_func
-from sqlglot.optimizer.qualify_columns import quote_identifiers
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import (
@@ -308,9 +306,8 @@ class DataFrame:
for expression_type, select_expression in select_expressions:
select_expression = select_expression.transform(replace_id_value, replacement_mapping)
if optimize:
- quote_identifiers(select_expression, dialect=dialect)
select_expression = t.cast(
- exp.Select, optimize_func(select_expression, dialect=dialect)
+ exp.Select, self.spark._optimize(select_expression, dialect=dialect)
)
select_expression = df._replace_cte_names_with_hashes(select_expression)
diff --git a/sqlglot/dataframe/sql/session.py b/sqlglot/dataframe/sql/session.py
index bfc022bd..4e47aaa9 100644
--- a/sqlglot/dataframe/sql/session.py
+++ b/sqlglot/dataframe/sql/session.py
@@ -12,6 +12,8 @@ from sqlglot.dataframe.sql.readwriter import DataFrameReader
from sqlglot.dataframe.sql.types import StructType
from sqlglot.dataframe.sql.util import get_column_mapping_from_schema_input
from sqlglot.helper import classproperty
+from sqlglot.optimizer import optimize
+from sqlglot.optimizer.qualify_columns import quote_identifiers
if t.TYPE_CHECKING:
from sqlglot.dataframe.sql._typing import ColumnLiterals, SchemaInput
@@ -104,8 +106,15 @@ class SparkSession:
sel_expression = exp.Select(**select_kwargs)
return DataFrame(self, sel_expression)
+ def _optimize(
+ self, expression: exp.Expression, dialect: t.Optional[Dialect] = None
+ ) -> exp.Expression:
+ dialect = dialect or self.dialect
+ quote_identifiers(expression, dialect=dialect)
+ return optimize(expression, dialect=dialect)
+
def sql(self, sqlQuery: str) -> DataFrame:
- expression = sqlglot.parse_one(sqlQuery, read=self.dialect)
+ expression = self._optimize(sqlglot.parse_one(sqlQuery, read=self.dialect))
if isinstance(expression, exp.Select):
df = DataFrame(self, expression)
df = df._convert_leaf_to_cte()
|
tobymao/sqlglot
|
21e4fca2b744a22981d8ff1696986061d3344d40
|
diff --git a/tests/dataframe/integration/test_session.py b/tests/dataframe/integration/test_session.py
index ec500340..3bb3e204 100644
--- a/tests/dataframe/integration/test_session.py
+++ b/tests/dataframe/integration/test_session.py
@@ -34,3 +34,10 @@ class TestSessionFunc(DataFrameValidator):
.agg(SF.countDistinct(SF.col("employee_id")))
)
self.compare_spark_with_sqlglot(df, dfs, skip_schema_compare=True)
+
+ def test_nameless_column(self):
+ query = "SELECT MAX(age) FROM employee"
+ df = self.spark.sql(query)
+ dfs = self.sqlglot.sql(query)
+ # Spark will alias the column to `max(age)` while sqlglot will alias to `_col_0` so their schemas will differ
+ self.compare_spark_with_sqlglot(df, dfs, skip_schema_compare=True)
diff --git a/tests/dataframe/unit/test_session.py b/tests/dataframe/unit/test_session.py
index e2ebae42..848c6032 100644
--- a/tests/dataframe/unit/test_session.py
+++ b/tests/dataframe/unit/test_session.py
@@ -79,7 +79,7 @@ class TestDataframeSession(DataFrameSQLValidator):
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark")
df = self.spark.sql(query).groupBy(F.col("cola")).agg(F.sum("colb"))
self.assertEqual(
- "WITH t38189 AS (SELECT cola, colb FROM table), t42330 AS (SELECT cola, colb FROM t38189) SELECT cola, SUM(colb) FROM t42330 GROUP BY cola",
+ "WITH t26614 AS (SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`), t23454 AS (SELECT cola, colb FROM t26614) SELECT cola, SUM(colb) FROM t23454 GROUP BY cola",
df.sql(pretty=False, optimize=False)[0],
)
@@ -87,14 +87,14 @@ class TestDataframeSession(DataFrameSQLValidator):
query = "CREATE TABLE new_table AS WITH t1 AS (SELECT cola, colb FROM table) SELECT cola, colb, FROM t1"
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark")
df = self.spark.sql(query)
- expected = "CREATE TABLE new_table AS SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`"
+ expected = "CREATE TABLE `new_table` AS SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`"
self.compare_sql(df, expected)
def test_sql_insert(self):
query = "WITH t1 AS (SELECT cola, colb FROM table) INSERT INTO new_table SELECT cola, colb FROM t1"
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark")
df = self.spark.sql(query)
- expected = "INSERT INTO new_table SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`"
+ expected = "INSERT INTO `new_table` SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`"
self.compare_sql(df, expected)
def test_session_create_builder_patterns(self):
|
spark sql SELECT MAX without column alias throws error
**Fully reproducible code snippet**
```sql
spark = SparkSession.builder.config("sqlframe.dialect", "spark").getOrCreate()
df = spark.sql("""
SELECT
MAX(col)
FROM (SELECT 1 as col) t
""")
```
throws `sqlglot.errors.ParseError: No expression was parsed from ''` because its `name` [here](https://github.com/tobymao/sqlglot/blob/main/sqlglot/dataframe/sql/dataframe.py#L173) is an empty string. This seems to be an issue with expressions that inherit from [Func](https://github.com/tobymao/sqlglot/blob/main/sqlglot/expressions.py#L4330) (MIN, MAX, ABS, etc). Changing [Max](https://github.com/tobymao/sqlglot/blob/main/sqlglot/expressions.py#L5216) to inherit from [Condition](https://github.com/tobymao/sqlglot/blob/main/sqlglot/expressions.py#L903) directly fixes the issue. This isn't a problem for some more complex expressions that leverage multiple inheritance like [DateAdd](https://github.com/tobymao/sqlglot/blob/main/sqlglot/expressions.py#L4700).
**Official Documentation**
https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select.html#parameters - aliases are optional in spark sql
|
0.0
|
21e4fca2b744a22981d8ff1696986061d3344d40
|
[
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_sql_with_aggs",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_sql_create",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_sql_insert"
] |
[
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_sql_select_only",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_no_schema",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_one_row",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_row_mixed_primitives",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_dict_rows",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_session_create_builder_patterns",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_typed_schema_nested",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_typed_schema_basic",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_multiple_rows",
"tests/dataframe/unit/test_session.py::TestDataframeSession::test_cdf_str_schema"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-06 23:50:52+00:00
|
mit
| 6,025 |
|
tobymao__sqlglot-3111
|
diff --git a/README.md b/README.md
index 0bfedc66..e8eae258 100644
--- a/README.md
+++ b/README.md
@@ -150,7 +150,7 @@ sql = """
*/
SELECT
tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,
- CAST(x AS INT), # comment 3
+ CAST(x AS SIGNED), # comment 3
y -- comment 4
FROM
bar /* comment 5 */,
@@ -367,7 +367,9 @@ diff(parse_one("SELECT a + b, c, d"), parse_one("SELECT c, a - b, d"))
this=Identifier(this=a, quoted=False)),
expression=Column(
this=Identifier(this=b, quoted=False)))),
- Keep(source=Identifier(this=d, quoted=False), target=Identifier(this=d, quoted=False)),
+ Keep(
+ source=Column(this=Identifier(this=a, quoted=False)),
+ target=Column(this=Identifier(this=a, quoted=False))),
...
]
```
@@ -492,6 +494,7 @@ make docs-serve
```
make style # Only linter checks
make unit # Only unit tests
+make test # Unit and integration tests
make check # Full test suite & linter checks
```
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index 4a7bd04b..4ba3ac34 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -199,6 +199,7 @@ class DuckDB(Dialect):
"LOGICAL": TokenType.BOOLEAN,
"ONLY": TokenType.ONLY,
"PIVOT_WIDER": TokenType.PIVOT,
+ "POSITIONAL": TokenType.POSITIONAL,
"SIGNED": TokenType.INT,
"STRING": TokenType.VARCHAR,
"UBIGINT": TokenType.UBIGINT,
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index fb808fda..5f000540 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -507,8 +507,9 @@ class Parser(metaclass=_Parser):
}
JOIN_METHODS = {
- TokenType.NATURAL,
TokenType.ASOF,
+ TokenType.NATURAL,
+ TokenType.POSITIONAL,
}
JOIN_SIDES = {
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 8676eee4..201a3c04 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -317,6 +317,7 @@ class TokenType(AutoName):
PERCENT = auto()
PIVOT = auto()
PLACEHOLDER = auto()
+ POSITIONAL = auto()
PRAGMA = auto()
PREWHERE = auto()
PRIMARY_KEY = auto()
|
tobymao/sqlglot
|
88033dad05550cde05dcb86cce61a621c071382c
|
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index b80d507c..35daff09 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -213,6 +213,7 @@ class TestDuckDB(Validator):
parse_one("a // b", read="duckdb").assert_is(exp.IntDiv).sql(dialect="duckdb"), "a // b"
)
+ self.validate_identity("SELECT df1.*, df2.* FROM df1 POSITIONAL JOIN df2")
self.validate_identity("MAKE_TIMESTAMP(1992, 9, 20, 13, 34, 27.123456)")
self.validate_identity("MAKE_TIMESTAMP(1667810584123456)")
self.validate_identity("SELECT EPOCH_MS(10) AS t")
|
DuckDB POSITIONAL JOIN syntax
DuckDB supports a unique positional join type which matches up rows of equal length tables, [documented here](https://duckdb.org/docs/sql/query_syntax/from#positional-joins). Currently sqlglot's duckdb dialect does not recognize this syntax and interprets it as an alias for a normal join.
Here's a minimal repro, using the example from the documentation:
```python
from sqlglot import parse_one
parsed = parse_one("""
SELECT df1.*, df2.*
FROM df1
POSITIONAL JOIN df2;
""", dialect="duckdb")
print(f"""
{parsed!r}
--------------------
{parsed.sql(dialect="duckdb", pretty=True)}
""")
```
Which prints
```
Select(
expressions=[
Column(
this=Star(),
table=Identifier(this=df1, quoted=False)),
Column(
this=Star(),
table=Identifier(this=df2, quoted=False))],
from=From(
this=Table(
this=Identifier(this=df1, quoted=False),
alias=TableAlias(
this=Identifier(this=POSITIONAL, quoted=False)))),
joins=[
Join(
this=Table(
this=Identifier(this=df2, quoted=False)))])
--------------------
SELECT
df1.*,
df2.*
FROM df1 AS POSITIONAL, df2
```
|
0.0
|
88033dad05550cde05dcb86cce61a621c071382c
|
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_duckdb"
] |
[
"tests/dialects/test_duckdb.py::TestDuckDB::test_array",
"tests/dialects/test_duckdb.py::TestDuckDB::test_array_index",
"tests/dialects/test_duckdb.py::TestDuckDB::test_bool_or",
"tests/dialects/test_duckdb.py::TestDuckDB::test_cast",
"tests/dialects/test_duckdb.py::TestDuckDB::test_encode_decode",
"tests/dialects/test_duckdb.py::TestDuckDB::test_isinf",
"tests/dialects/test_duckdb.py::TestDuckDB::test_isnan",
"tests/dialects/test_duckdb.py::TestDuckDB::test_parameter_token",
"tests/dialects/test_duckdb.py::TestDuckDB::test_rename_table",
"tests/dialects/test_duckdb.py::TestDuckDB::test_sample",
"tests/dialects/test_duckdb.py::TestDuckDB::test_time",
"tests/dialects/test_duckdb.py::TestDuckDB::test_timestamps_with_units"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-09 10:02:02+00:00
|
mit
| 6,026 |
|
tobymao__sqlglot-3131
|
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 804775c3..18154779 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -20,8 +20,7 @@ from sqlglot.dialects.dialect import (
timestrtotime_sql,
var_map_sql,
)
-from sqlglot.expressions import Literal
-from sqlglot.helper import flatten, is_int, seq_get
+from sqlglot.helper import flatten, is_float, is_int, seq_get
from sqlglot.tokens import TokenType
if t.TYPE_CHECKING:
@@ -29,33 +28,28 @@ if t.TYPE_CHECKING:
# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
-def _build_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:
- if len(args) == 2:
- first_arg, second_arg = args
- if second_arg.is_string:
- # case: <string_expr> [ , <format> ]
- return build_formatted_time(exp.StrToTime, "snowflake")(args)
- return exp.UnixToTime(this=first_arg, scale=second_arg)
+def _build_timestamp(name: str, kind: exp.DataType.Type) -> t.Callable[[t.List], exp.Func]:
+ def _builder(args: t.List) -> exp.Func:
+ value = seq_get(args, 0)
- from sqlglot.optimizer.simplify import simplify_literals
+ if isinstance(value, exp.Literal):
+ int_value = is_int(value.this)
- # The first argument might be an expression like 40 * 365 * 86400, so we try to
- # reduce it using `simplify_literals` first and then check if it's a Literal.
- first_arg = seq_get(args, 0)
- if not isinstance(simplify_literals(first_arg, root=True), Literal):
- # case: <variant_expr> or other expressions such as columns
- return exp.TimeStrToTime.from_arg_list(args)
+ # Converts calls like `TO_TIME('01:02:03')` into casts
+ if len(args) == 1 and value.is_string and not int_value:
+ return exp.cast(value, kind)
- if first_arg.is_string:
- if is_int(first_arg.this):
- # case: <integer>
- return exp.UnixToTime.from_arg_list(args)
+ # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
+ # cases so we can transpile them, since they're relatively common
+ if kind == exp.DataType.Type.TIMESTAMP:
+ if int_value:
+ return exp.UnixToTime(this=value, scale=seq_get(args, 1))
+ if not is_float(value.this):
+ return build_formatted_time(exp.StrToTime, "snowflake")(args)
- # case: <date_expr>
- return build_formatted_time(exp.StrToTime, "snowflake", default=True)(args)
+ return exp.Anonymous(this=name, expressions=args)
- # case: <numeric_expr>
- return exp.UnixToTime.from_arg_list(args)
+ return _builder
def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
@@ -364,7 +358,13 @@ class Snowflake(Dialect):
precision=seq_get(args, 2),
scale=seq_get(args, 3),
),
- "TO_TIMESTAMP": _build_to_timestamp,
+ "TO_TIME": _build_timestamp("TO_TIME", exp.DataType.Type.TIME),
+ "TO_TIMESTAMP": _build_timestamp("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
+ "TO_TIMESTAMP_LTZ": _build_timestamp(
+ "TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ
+ ),
+ "TO_TIMESTAMP_NTZ": _build_timestamp("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
+ "TO_TIMESTAMP_TZ": _build_timestamp("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
"TO_VARCHAR": exp.ToChar.from_arg_list,
"ZEROIFNULL": _build_if_from_zeroifnull,
}
diff --git a/sqlglot/executor/env.py b/sqlglot/executor/env.py
index 0d0d4813..c51049bb 100644
--- a/sqlglot/executor/env.py
+++ b/sqlglot/executor/env.py
@@ -106,6 +106,13 @@ def cast(this, to):
return this
if isinstance(this, str):
return datetime.date.fromisoformat(this)
+ if to == exp.DataType.Type.TIME:
+ if isinstance(this, datetime.datetime):
+ return this.time()
+ if isinstance(this, datetime.time):
+ return this
+ if isinstance(this, str):
+ return datetime.time.fromisoformat(this)
if to in (exp.DataType.Type.DATETIME, exp.DataType.Type.TIMESTAMP):
if isinstance(this, datetime.datetime):
return this
diff --git a/sqlglot/helper.py b/sqlglot/helper.py
index 0d4547fa..bcc68c3c 100644
--- a/sqlglot/helper.py
+++ b/sqlglot/helper.py
@@ -317,8 +317,16 @@ def find_new_name(taken: t.Collection[str], base: str) -> str:
def is_int(text: str) -> bool:
+ return is_type(text, int)
+
+
+def is_float(text: str) -> bool:
+ return is_type(text, float)
+
+
+def is_type(text: str, target_type: t.Type) -> bool:
try:
- int(text)
+ target_type(text)
return True
except ValueError:
return False
|
tobymao/sqlglot
|
c01ff44b036526807624ba2d1f4b247081e8c56f
|
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index e4cec3a9..9d5a93be 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -40,6 +40,12 @@ WHERE
)""",
)
+ self.validate_identity("SELECT TO_TIMESTAMP(123.4)").selects[0].assert_is(exp.Anonymous)
+ self.validate_identity("SELECT TO_TIME(x) FROM t")
+ self.validate_identity("SELECT TO_TIMESTAMP(x) FROM t")
+ self.validate_identity("SELECT TO_TIMESTAMP_NTZ(x) FROM t")
+ self.validate_identity("SELECT TO_TIMESTAMP_LTZ(x) FROM t")
+ self.validate_identity("SELECT TO_TIMESTAMP_TZ(x) FROM t")
self.validate_identity("TO_DECIMAL(expr, fmt, precision, scale)")
self.validate_identity("ALTER TABLE authors ADD CONSTRAINT c1 UNIQUE (id, email)")
self.validate_identity("RM @parquet_stage", check_command_warning=True)
@@ -198,10 +204,6 @@ WHERE
"SELECT {fn CEILING(5.3)}",
"SELECT CEIL(5.3)",
)
- self.validate_identity(
- "SELECT TO_TIMESTAMP(x) FROM t",
- "SELECT CAST(x AS TIMESTAMPNTZ) FROM t",
- )
self.validate_identity(
"CAST(x AS BYTEINT)",
"CAST(x AS INT)",
@@ -632,9 +634,16 @@ WHERE
self.validate_all(
"SELECT TO_TIMESTAMP('2013-04-05 01:02:03')",
write={
- "bigquery": "SELECT PARSE_TIMESTAMP('%Y-%m-%d %H:%M:%S', '2013-04-05 01:02:03')",
- "snowflake": "SELECT TO_TIMESTAMP('2013-04-05 01:02:03', 'yyyy-mm-DD hh24:mi:ss')",
- "spark": "SELECT TO_TIMESTAMP('2013-04-05 01:02:03', 'yyyy-MM-dd HH:mm:ss')",
+ "bigquery": "SELECT CAST('2013-04-05 01:02:03' AS DATETIME)",
+ "snowflake": "SELECT CAST('2013-04-05 01:02:03' AS TIMESTAMPNTZ)",
+ "spark": "SELECT CAST('2013-04-05 01:02:03' AS TIMESTAMP)",
+ },
+ )
+ self.validate_all(
+ "SELECT TO_TIME('12:05:00')",
+ write={
+ "bigquery": "SELECT CAST('12:05:00' AS TIME)",
+ "snowflake": "SELECT CAST('12:05:00' AS TIME)",
},
)
self.validate_all(
diff --git a/tests/test_executor.py b/tests/test_executor.py
index 4b81359c..1eaca14f 100644
--- a/tests/test_executor.py
+++ b/tests/test_executor.py
@@ -1,7 +1,7 @@
import os
import datetime
import unittest
-from datetime import date
+from datetime import date, time
from multiprocessing import Pool
import duckdb
@@ -640,6 +640,7 @@ class TestExecutor(unittest.TestCase):
("CAST(1 AS TEXT)", "1"),
("CAST('1' AS LONG)", 1),
("CAST('1.1' AS FLOAT)", 1.1),
+ ("CAST('12:05:01' AS TIME)", time(12, 5, 1)),
("COALESCE(NULL)", None),
("COALESCE(NULL, NULL)", None),
("COALESCE(NULL, 'b')", "b"),
|
Support for Snowflake TO_TIME , TIME
**Is your feature request related to a problem? Please describe.**
The executor is not able to run queries containing `TO_TIME` , or `TIME` calls when the reader is set to `sqlglot.dialects.Snowflake`
Example:
```python
def test_time_function():
query = "SELECT TO_TIME('13:30:00');"
assert executor.execute(query, read=Snowflake)
```
raises:
```
> raise ExecuteError(f"Step '{node.id}' failed: {e}") from e
E sqlglot.errors.ExecuteError: Step 'Scan: (4541617904)' failed: name 'TO_TIME' is not defined
```
**Describe the solution you'd like**
I would like support for this function
**Describe alternatives you've considered**
I would gladly take on authoring a PR (including testing) if documentation was provided for how to implement this feature. Even if those steps were provided in this GH issue I could add them to the readme
**Additional context**
N/A
|
0.0
|
c01ff44b036526807624ba2d1f4b247081e8c56f
|
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/test_executor.py::TestExecutor::test_scalar_functions"
] |
[
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_columns",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_imported_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_objects",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_primary_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_schemas",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_sequences",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_tables",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_unique_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_users",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_views",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_storage_integration",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/test_executor.py::TestExecutor::test_aggregate_without_group_by",
"tests/test_executor.py::TestExecutor::test_case_sensitivity",
"tests/test_executor.py::TestExecutor::test_correlated_count",
"tests/test_executor.py::TestExecutor::test_execute_callable",
"tests/test_executor.py::TestExecutor::test_execute_catalog_db_table",
"tests/test_executor.py::TestExecutor::test_execute_subqueries",
"tests/test_executor.py::TestExecutor::test_execute_tables",
"tests/test_executor.py::TestExecutor::test_execute_tpcds",
"tests/test_executor.py::TestExecutor::test_execute_tpch",
"tests/test_executor.py::TestExecutor::test_group_by",
"tests/test_executor.py::TestExecutor::test_nested_table_reference",
"tests/test_executor.py::TestExecutor::test_nested_values",
"tests/test_executor.py::TestExecutor::test_optimized_tpch",
"tests/test_executor.py::TestExecutor::test_py_dialect",
"tests/test_executor.py::TestExecutor::test_set_operations",
"tests/test_executor.py::TestExecutor::test_static_queries",
"tests/test_executor.py::TestExecutor::test_table_depth_mismatch",
"tests/test_executor.py::TestExecutor::test_tables"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-12 20:41:15+00:00
|
mit
| 6,027 |
|
tobymao__sqlglot-3162
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index b19dc852..c0c39030 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1931,6 +1931,7 @@ class Insert(DDL, DML):
arg_types = {
"hint": False,
"with": False,
+ "is_function": False,
"this": True,
"expression": False,
"conflict": False,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index a61b4b75..077e5ff0 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1512,7 +1512,9 @@ class Generator(metaclass=_Generator):
alternative = expression.args.get("alternative")
alternative = f" OR {alternative}" if alternative else ""
ignore = " IGNORE" if expression.args.get("ignore") else ""
-
+ is_function = expression.args.get("is_function")
+ if is_function:
+ this = f"{this} FUNCTION"
this = f"{this} {self.sql(expression, 'this')}"
exists = " IF EXISTS" if expression.args.get("exists") else ""
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 60364141..d934b4c6 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2185,7 +2185,9 @@ class Parser(metaclass=_Parser):
self._match(TokenType.INTO)
comments += ensure_list(self._prev_comments)
self._match(TokenType.TABLE)
- this = self._parse_table(schema=True)
+ is_function = self._match(TokenType.FUNCTION)
+
+ this = self._parse_table(schema=True) if not is_function else self._parse_function()
returning = self._parse_returning()
@@ -2193,6 +2195,7 @@ class Parser(metaclass=_Parser):
exp.Insert,
comments=comments,
hint=hint,
+ is_function=is_function,
this=this,
by_name=self._match_text_seq("BY", "NAME"),
exists=self._parse_exists(),
|
tobymao/sqlglot
|
706fac382fbde6c1c6af8acd277291a3f18f94ee
|
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index edf3da12..8a40899a 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -390,6 +390,17 @@ class TestClickhouse(Validator):
)
self.validate_identity("SYSTEM STOP MERGES foo.bar", check_command_warning=True)
+ self.validate_identity(
+ "INSERT INTO FUNCTION s3('url', 'CSV', 'name String, value UInt32', 'gzip') SELECT name, value FROM existing_table"
+ )
+ self.validate_identity(
+ "INSERT INTO FUNCTION remote('localhost', default.simple_table) VALUES (100, 'inserted via remote()')"
+ )
+ self.validate_identity(
+ """INSERT INTO TABLE FUNCTION hdfs('hdfs://hdfs1:9000/test', 'TSV', 'name String, column2 UInt32, column3 UInt32') VALUES ('test', 1, 2)""",
+ """INSERT INTO FUNCTION hdfs('hdfs://hdfs1:9000/test', 'TSV', 'name String, column2 UInt32, column3 UInt32') VALUES ('test', 1, 2)""",
+ )
+
def test_cte(self):
self.validate_identity("WITH 'x' AS foo SELECT foo")
self.validate_identity("WITH ['c'] AS field_names SELECT field_names")
|
Clickhouse INSERT INTO FUNCTION s3 raises ParseError
**Fully reproducible code snippet**
```
import unittest
from sqlglot import parse_one
from sqlglot.dialects import ClickHouse
class TestClickhouseInsertIntoS3Select(unittest.TestCase):
def test_parse_one_insert_into_s3_select(self):
sql = """
INSERT INTO FUNCTION s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip')
SELECT name, value FROM existing_table;
"""
ast = parse_one(sql=sql, dialect=ClickHouse)
self.assertIsNotNone(ast)
if __name__ == '__main__':
unittest.main()
```
**Exception**
```
sqlglot.errors.ParseError: Invalid expression / Unexpected token. Line 2, Col: 31.
INSERT INTO FUNCTION s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/test-data.csv.gz', 'CSV', '
```
**Official Documentation**
SQL statement example taken from: https://clickhouse.com/docs/en/sql-reference/table-functions/s3 -
> Insert data into file test-data.csv.gz from existing table
Thanks in advance.
|
0.0
|
706fac382fbde6c1c6af8acd277291a3f18f94ee
|
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse"
] |
[
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-18 14:17:33+00:00
|
mit
| 6,028 |
|
tobymao__sqlglot-3166
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 7a8fef4e..21269329 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -621,7 +621,7 @@ class Expression(metaclass=_Expression):
return expression
key = self.arg_key
- value = parent.args[key]
+ value = parent.args.get(key)
if isinstance(value, list):
index = self.index
@@ -639,7 +639,7 @@ class Expression(metaclass=_Expression):
else:
value[index] = expression
parent._set_parent(key, expression, index=index)
- else:
+ elif value is not None:
if expression is None:
parent.args.pop(key)
else:
|
tobymao/sqlglot
|
cdbe39e7ec36b30e211942b1f62ae86946c7b520
|
diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py
index 71c65fae..984ec22b 100644
--- a/tests/test_optimizer.py
+++ b/tests/test_optimizer.py
@@ -342,6 +342,9 @@ class TestOptimizer(unittest.TestCase):
def test_simplify(self):
self.check_file("simplify", simplify, set_dialect=True)
+ expression = parse_one("SELECT a, c, b FROM table1 WHERE 1 = 1")
+ self.assertEqual(simplify(simplify(expression.find(exp.Where))).sql(), "WHERE TRUE")
+
expression = parse_one("TRUE AND TRUE AND TRUE")
self.assertEqual(exp.true(), optimizer.simplify.simplify(expression))
self.assertEqual(exp.true(), optimizer.simplify.simplify(expression.this))
|
running simplify multiple times fails
**Fully reproducible code snippet**
Please include a fully reproducible code snippet or the input sql, dialect, and expected output.
```
>>> from sqlglot.optimizer.scope import exp
>>> from sqlglot.optimizer.simplify import simplify
>>> from sqlglot import parse_one
>>> ast=parse_one("SELECT a,c,b from table1 where 1 = 1;")
>>> w = ast.find(exp.Where)
>>> w2 = simplify(w)
>>> w2 = simplify(w)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "lib/python3.9/site-packages/sqlglot/optimizer/simplify.py", line 109, in simplify
expression = while_changing(expression, _simplify)
File "lib/python3.9/site-packages/sqlglot/helper.py", line 211, in while_changing
expression = func(expression)
File "lib/python3.9/site-packages/sqlglot/optimizer/simplify.py", line 106, in _simplify
expression.replace(node)
File "lib/python3.9/site-packages/sqlglot/expressions.py", line 624, in replace
value = parent.args[key]
KeyError: 'where'
>>>
```
in my code i run simplify multiple times as I reduce some elements of a query. in this case, elements in the where clause.
as you see in the sample code, first time works fine but then the where clause was removed and the code fails.
This used to work before, so I guess some check that they key exists has been removed.
|
0.0
|
cdbe39e7ec36b30e211942b1f62ae86946c7b520
|
[
"tests/test_optimizer.py::TestOptimizer::test_simplify"
] |
[
"tests/test_optimizer.py::TestOptimizer::test_aggfunc_annotation",
"tests/test_optimizer.py::TestOptimizer::test_binary_annotation",
"tests/test_optimizer.py::TestOptimizer::test_boolean_type_annotation",
"tests/test_optimizer.py::TestOptimizer::test_bracket_annotation",
"tests/test_optimizer.py::TestOptimizer::test_cache_annotation",
"tests/test_optimizer.py::TestOptimizer::test_canonicalize",
"tests/test_optimizer.py::TestOptimizer::test_cast_type_annotation",
"tests/test_optimizer.py::TestOptimizer::test_concat_annotation",
"tests/test_optimizer.py::TestOptimizer::test_cte_column_annotation",
"tests/test_optimizer.py::TestOptimizer::test_derived_tables_column_annotation",
"tests/test_optimizer.py::TestOptimizer::test_eliminate_ctes",
"tests/test_optimizer.py::TestOptimizer::test_eliminate_joins",
"tests/test_optimizer.py::TestOptimizer::test_eliminate_subqueries",
"tests/test_optimizer.py::TestOptimizer::test_expand_alias_refs",
"tests/test_optimizer.py::TestOptimizer::test_file_schema",
"tests/test_optimizer.py::TestOptimizer::test_function_annotation",
"tests/test_optimizer.py::TestOptimizer::test_interval_math_annotation",
"tests/test_optimizer.py::TestOptimizer::test_isolate_table_selects",
"tests/test_optimizer.py::TestOptimizer::test_lateral_annotation",
"tests/test_optimizer.py::TestOptimizer::test_literal_type_annotation",
"tests/test_optimizer.py::TestOptimizer::test_nested_type_annotation",
"tests/test_optimizer.py::TestOptimizer::test_no_pseudocolumn_expansion",
"tests/test_optimizer.py::TestOptimizer::test_normalize",
"tests/test_optimizer.py::TestOptimizer::test_normalize_identifiers",
"tests/test_optimizer.py::TestOptimizer::test_null_annotation",
"tests/test_optimizer.py::TestOptimizer::test_nullable_annotation",
"tests/test_optimizer.py::TestOptimizer::test_optimize",
"tests/test_optimizer.py::TestOptimizer::test_optimize_joins",
"tests/test_optimizer.py::TestOptimizer::test_predicate_annotation",
"tests/test_optimizer.py::TestOptimizer::test_pushdown_cte_alias_columns",
"tests/test_optimizer.py::TestOptimizer::test_pushdown_predicates",
"tests/test_optimizer.py::TestOptimizer::test_pushdown_projection",
"tests/test_optimizer.py::TestOptimizer::test_qualify_columns",
"tests/test_optimizer.py::TestOptimizer::test_qualify_columns__invalid",
"tests/test_optimizer.py::TestOptimizer::test_qualify_columns__with_invisible",
"tests/test_optimizer.py::TestOptimizer::test_qualify_tables",
"tests/test_optimizer.py::TestOptimizer::test_quote_identifiers",
"tests/test_optimizer.py::TestOptimizer::test_quotes",
"tests/test_optimizer.py::TestOptimizer::test_recursive_cte",
"tests/test_optimizer.py::TestOptimizer::test_root_subquery_annotation",
"tests/test_optimizer.py::TestOptimizer::test_schema_with_spaces",
"tests/test_optimizer.py::TestOptimizer::test_scope",
"tests/test_optimizer.py::TestOptimizer::test_scope_warning",
"tests/test_optimizer.py::TestOptimizer::test_semistructured",
"tests/test_optimizer.py::TestOptimizer::test_struct_type_annotation",
"tests/test_optimizer.py::TestOptimizer::test_tpcds",
"tests/test_optimizer.py::TestOptimizer::test_tpch",
"tests/test_optimizer.py::TestOptimizer::test_type_annotation_cache",
"tests/test_optimizer.py::TestOptimizer::test_typeddiv_annotation",
"tests/test_optimizer.py::TestOptimizer::test_unknown_annotation",
"tests/test_optimizer.py::TestOptimizer::test_unnest_annotation",
"tests/test_optimizer.py::TestOptimizer::test_unnest_subqueries",
"tests/test_optimizer.py::TestOptimizer::test_user_defined_type_annotation"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-03-19 00:00:29+00:00
|
mit
| 6,029 |
|
tobymao__sqlglot-3171
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 7ef75ac3..6f2d7603 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -1404,7 +1404,12 @@ class WithinGroup(Expression):
# clickhouse supports scalar ctes
# https://clickhouse.com/docs/en/sql-reference/statements/select/with
class CTE(DerivedTable):
- arg_types = {"this": True, "alias": True, "scalar": False}
+ arg_types = {
+ "this": True,
+ "alias": True,
+ "scalar": False,
+ "materialized": False,
+ }
class TableAlias(Expression):
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index a6fa9a2a..804df019 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1049,7 +1049,14 @@ class Generator(metaclass=_Generator):
def cte_sql(self, expression: exp.CTE) -> str:
alias = self.sql(expression, "alias")
- return f"{alias} AS {self.wrap(expression)}"
+
+ materialized = expression.args.get("materialized")
+ if materialized is False:
+ materialized = "NOT MATERIALIZED "
+ elif materialized:
+ materialized = "MATERIALIZED "
+
+ return f"{alias} AS {materialized or ''}{self.wrap(expression)}"
def tablealias_sql(self, expression: exp.TableAlias) -> str:
alias = self.sql(expression, "this")
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 0c7e9957..208f3364 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2546,8 +2546,19 @@ class Parser(metaclass=_Parser):
self.raise_error("Expected CTE to have alias")
self._match(TokenType.ALIAS)
+
+ if self._match_text_seq("NOT", "MATERIALIZED"):
+ materialized = False
+ elif self._match_text_seq("MATERIALIZED"):
+ materialized = True
+ else:
+ materialized = None
+
return self.expression(
- exp.CTE, this=self._parse_wrapped(self._parse_statement), alias=alias
+ exp.CTE,
+ this=self._parse_wrapped(self._parse_statement),
+ alias=alias,
+ materialized=materialized,
)
def _parse_table_alias(
|
tobymao/sqlglot
|
d859fc0f6eeb0971dab5b22748d1e84425829444
|
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 77c42731..e2a153f5 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -40,13 +40,6 @@ class TestPostgres(Validator):
self.validate_identity("CAST(x AS DATEMULTIRANGE)")
self.validate_identity("SELECT ARRAY[1, 2, 3] @> ARRAY[1, 2]")
self.validate_identity("SELECT ARRAY[1, 2, 3] <@ ARRAY[1, 2]")
- self.validate_all(
- "SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]",
- write={
- "": "SELECT ARRAY_OVERLAPS(ARRAY(1, 2, 3), ARRAY(1, 2))",
- "postgres": "SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]",
- },
- )
self.validate_identity("x$")
self.validate_identity("SELECT ARRAY[1, 2, 3]")
self.validate_identity("SELECT ARRAY(SELECT 1)")
@@ -70,6 +63,9 @@ class TestPostgres(Validator):
self.validate_identity("EXEC AS myfunc @id = 123", check_command_warning=True)
self.validate_identity("SELECT CURRENT_USER")
self.validate_identity("SELECT * FROM ONLY t1")
+ self.validate_identity(
+ "WITH t1 AS MATERIALIZED (SELECT 1), t2 AS NOT MATERIALIZED (SELECT 2) SELECT * FROM t1, t2"
+ )
self.validate_identity(
"""LAST_VALUE("col1") OVER (ORDER BY "col2" RANGE BETWEEN INTERVAL '1 DAY' PRECEDING AND '1 month' FOLLOWING)"""
)
@@ -310,6 +306,13 @@ class TestPostgres(Validator):
)
self.validate_identity("SELECT * FROM t1*", "SELECT * FROM t1")
+ self.validate_all(
+ "SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]",
+ write={
+ "": "SELECT ARRAY_OVERLAPS(ARRAY(1, 2, 3), ARRAY(1, 2))",
+ "postgres": "SELECT ARRAY[1, 2, 3] && ARRAY[1, 2]",
+ },
+ )
self.validate_all(
"SELECT JSON_EXTRACT_PATH_TEXT(x, k1, k2, k3) FROM t",
read={
|
`WITH foo AS MATERIALIZED (` fails in Postgres
This is a valid Postgres query:
```sql
with data as materialized (
select a.n/100 as x, 10*a.n/30 as y from generate_series(1, 1000) as a(n)
) select * from data
```
But it fails to parse because of `materialized`:
```python
from sqlglot import parse_one # using 23.0.1
p = parse_one(
"""
with data as materialized (
select a.n/100 as x, 10*a.n/30 as y from generate_series(1, 1000) as a(n)
) select * from data
""",
dialect="postgres",
)
```
```bash
$ python test.py
Traceback (most recent call last):
File "/Users/beto/Projects/github/superset/test.py", line 3, in <module>
p = parse_one(
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/__init__.py", line 124, in parse_one
result = dialect.parse(sql, **opts)
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/dialects/dialect.py", line 490, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 1153, in parse
return self._parse(
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 1219, in _parse
expressions.append(parse_method(self))
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 1427, in _parse_statement
expression = self._parse_set_operations(expression) if expression else self._parse_select()
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 2426, in _parse_select
cte = self._parse_with()
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 2532, in _parse_with
expressions.append(self._parse_cte())
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 2550, in _parse_cte
exp.CTE, this=self._parse_wrapped(self._parse_statement), alias=alias
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 5548, in _parse_wrapped
self.raise_error("Expecting (")
File "/Users/beto/.pyenv/versions/superset-3.9.2/lib/python3.9/site-packages/sqlglot/parser.py", line 1263, in raise_error
raise error
sqlglot.errors.ParseError: Expecting (. Line 2, Col: 25.
with data as materialized (
select a.n/100 as x, 10*a.n/30 as y from generate_series(1, 1000) as a(n)
) select * from data
```
|
0.0
|
d859fc0f6eeb0971dab5b22748d1e84425829444
|
[
"tests/dialects/test_postgres.py::TestPostgres::test_postgres"
] |
[
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest_json_array",
"tests/dialects/test_postgres.py::TestPostgres::test_variance"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-19 18:55:41+00:00
|
mit
| 6,030 |
|
tobymao__sqlglot-3203
|
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
index a41b6ea8..70066677 100644
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -171,6 +171,8 @@ class Redshift(Postgres):
),
exp.SortKeyProperty: lambda self,
e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
+ exp.StartsWith: lambda self,
+ e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'",
exp.TableSample: no_tablesample_sql,
exp.TsOrDsAdd: date_delta_sql("DATEADD"),
exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index da206544..0cbaf20e 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2394,13 +2394,7 @@ class OutputModelProperty(Property):
class IsolatedLoadingProperty(Property):
- arg_types = {
- "no": False,
- "concurrent": False,
- "for_all": False,
- "for_insert": False,
- "for_none": False,
- }
+ arg_types = {"no": False, "concurrent": False, "target": False}
class JournalProperty(Property):
@@ -2608,6 +2602,11 @@ class UnloggedProperty(Property):
arg_types = {}
+# https://learn.microsoft.com/en-us/sql/t-sql/statements/create-view-transact-sql?view=sql-server-ver16
+class ViewAttributeProperty(Property):
+ arg_types = {"this": True}
+
+
class VolatileProperty(Property):
arg_types = {"this": False}
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 804df019..721efb61 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -46,9 +46,11 @@ class Generator(metaclass=_Generator):
'safe': Only quote identifiers that are case insensitive.
normalize: Whether to normalize identifiers to lowercase.
Default: False.
- pad: The pad size in a formatted string.
+ pad: The pad size in a formatted string. For example, this affects the indentation of
+ a projection in a query, relative to its nesting level.
Default: 2.
- indent: The indentation size in a formatted string.
+ indent: The indentation size in a formatted string. For example, this affects the
+ indentation of subqueries and filters under a `WHERE` clause.
Default: 2.
normalize_functions: How to normalize function names. Possible values are:
"upper" or True (default): Convert names to uppercase.
@@ -141,6 +143,7 @@ class Generator(metaclass=_Generator):
exp.UppercaseColumnConstraint: lambda *_: "UPPERCASE",
exp.UnloggedProperty: lambda *_: "UNLOGGED",
exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]),
+ exp.ViewAttributeProperty: lambda self, e: f"WITH {self.sql(e, 'this')}",
exp.VolatileProperty: lambda *_: "VOLATILE",
exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
exp.WithOperator: lambda self, e: f"{self.sql(e, 'this')} WITH {self.sql(e, 'op')}",
@@ -451,6 +454,7 @@ class Generator(metaclass=_Generator):
exp.TransformModelProperty: exp.Properties.Location.POST_SCHEMA,
exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA,
exp.UnloggedProperty: exp.Properties.Location.POST_CREATE,
+ exp.ViewAttributeProperty: exp.Properties.Location.POST_SCHEMA,
exp.VolatileProperty: exp.Properties.Location.POST_CREATE,
exp.WithDataProperty: exp.Properties.Location.POST_EXPRESSION,
exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME,
@@ -1442,15 +1446,9 @@ class Generator(metaclass=_Generator):
no = " NO" if no else ""
concurrent = expression.args.get("concurrent")
concurrent = " CONCURRENT" if concurrent else ""
-
- for_ = ""
- if expression.args.get("for_all"):
- for_ = " FOR ALL"
- elif expression.args.get("for_insert"):
- for_ = " FOR INSERT"
- elif expression.args.get("for_none"):
- for_ = " FOR NONE"
- return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
+ target = self.sql(expression, "target")
+ target = f" {target}" if target else ""
+ return f"WITH{no}{concurrent} ISOLATED LOADING{target}"
def partitionboundspec_sql(self, expression: exp.PartitionBoundSpec) -> str:
if isinstance(expression.this, list):
@@ -3221,7 +3219,7 @@ class Generator(metaclass=_Generator):
num_sqls = len(expressions)
# These are calculated once in case we have the leading_comma / pretty option set, correspondingly
- pad = " " * self.pad
+ pad = " " * len(sep)
stripped_sep = sep.strip()
result_sqls = []
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index b33af74a..be0b1084 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1026,6 +1026,8 @@ class Parser(metaclass=_Parser):
),
}
+ ISOLATED_LOADING_OPTIONS: OPTIONS_TYPE = {"FOR": ("ALL", "INSERT", "NONE")}
+
USABLES: OPTIONS_TYPE = dict.fromkeys(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"), tuple())
CAST_ACTIONS: OPTIONS_TYPE = dict.fromkeys(("RENAME", "ADD"), ("FIELDS",))
@@ -1041,6 +1043,8 @@ class Parser(metaclass=_Parser):
TABLE_INDEX_HINT_TOKENS = {TokenType.FORCE, TokenType.IGNORE, TokenType.USE}
+ VIEW_ATTRIBUTES = {"ENCRYPTION", "SCHEMABINDING", "VIEW_METADATA"}
+
WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER}
WINDOW_SIDES = {"FOLLOWING", "PRECEDING"}
@@ -1798,15 +1802,16 @@ class Parser(metaclass=_Parser):
return prop
- def _parse_with_property(
- self,
- ) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
+ def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
if self._match(TokenType.L_PAREN, advance=False):
return self._parse_wrapped_properties()
if self._match_text_seq("JOURNAL"):
return self._parse_withjournaltable()
+ if self._match_texts(self.VIEW_ATTRIBUTES):
+ return self.expression(exp.ViewAttributeProperty, this=self._prev.text.upper())
+
if self._match_text_seq("DATA"):
return self._parse_withdata(no=False)
elif self._match_text_seq("NO", "DATA"):
@@ -1954,20 +1959,18 @@ class Parser(metaclass=_Parser):
autotemp=autotemp,
)
- def _parse_withisolatedloading(self) -> exp.IsolatedLoadingProperty:
+ def _parse_withisolatedloading(self) -> t.Optional[exp.IsolatedLoadingProperty]:
+ index = self._index
no = self._match_text_seq("NO")
concurrent = self._match_text_seq("CONCURRENT")
- self._match_text_seq("ISOLATED", "LOADING")
- for_all = self._match_text_seq("FOR", "ALL")
- for_insert = self._match_text_seq("FOR", "INSERT")
- for_none = self._match_text_seq("FOR", "NONE")
+
+ if not self._match_text_seq("ISOLATED", "LOADING"):
+ self._retreat(index)
+ return None
+
+ target = self._parse_var_from_options(self.ISOLATED_LOADING_OPTIONS, raise_unmatched=False)
return self.expression(
- exp.IsolatedLoadingProperty,
- no=no,
- concurrent=concurrent,
- for_all=for_all,
- for_insert=for_insert,
- for_none=for_none,
+ exp.IsolatedLoadingProperty, no=no, concurrent=concurrent, target=target
)
def _parse_locking(self) -> exp.LockingProperty:
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 824ebe74..1ba8b2ad 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -565,8 +565,7 @@ class Tokenizer(metaclass=_Tokenizer):
"~": TokenType.TILDA,
"?": TokenType.PLACEHOLDER,
"@": TokenType.PARAMETER,
- # used for breaking a var like x'y' but nothing else
- # the token type doesn't matter
+ # Used for breaking a var like x'y' but nothing else the token type doesn't matter
"'": TokenType.QUOTE,
"`": TokenType.IDENTIFIER,
'"': TokenType.IDENTIFIER,
@@ -892,7 +891,7 @@ class Tokenizer(metaclass=_Tokenizer):
COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
- # handle numeric literals like in hive (3L = BIGINT)
+ # Handle numeric literals like in hive (3L = BIGINT)
NUMERIC_LITERALS: t.Dict[str, str] = {}
COMMENTS = ["--", ("/*", "*/")]
@@ -965,8 +964,7 @@ class Tokenizer(metaclass=_Tokenizer):
while self.size and not self._end:
current = self._current
- # skip spaces inline rather than iteratively call advance()
- # for performance reasons
+ # Skip spaces here rather than iteratively calling advance() for performance reasons
while current < self.size:
char = self.sql[current]
@@ -975,12 +973,10 @@ class Tokenizer(metaclass=_Tokenizer):
else:
break
- n = current - self._current
- self._start = current
- self._advance(n if n > 1 else 1)
+ offset = current - self._current if current > self._current else 1
- if self._char is None:
- break
+ self._start = current
+ self._advance(offset)
if not self._char.isspace():
if self._char.isdigit():
@@ -1008,12 +1004,9 @@ class Tokenizer(metaclass=_Tokenizer):
def _advance(self, i: int = 1, alnum: bool = False) -> None:
if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
# Ensures we don't count an extra line if we get a \r\n line break sequence
- if self._char == "\r" and self._peek == "\n":
- i = 2
- self._start += 1
-
- self._col = 1
- self._line += 1
+ if not (self._char == "\r" and self._peek == "\n"):
+ self._col = 1
+ self._line += 1
else:
self._col += i
diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs
index 2c90a650..881417e5 100644
--- a/sqlglotrs/src/tokenizer.rs
+++ b/sqlglotrs/src/tokenizer.rs
@@ -118,8 +118,27 @@ impl<'a> TokenizerState<'a> {
fn scan(&mut self, until_peek_char: Option<char>) -> Result<(), TokenizerError> {
while self.size > 0 && !self.is_end {
- self.start = self.current;
- self.advance(1)?;
+ let mut current = self.current;
+
+ // Skip spaces here rather than iteratively calling advance() for performance reasons
+ while current < self.size {
+ let ch = self.char_at(current)?;
+
+ if ch == ' ' || ch == '\t' {
+ current += 1;
+ } else {
+ break;
+ }
+ }
+
+ let offset = if current > self.current {
+ current - self.current
+ } else {
+ 1
+ };
+
+ self.start = current;
+ self.advance(offset as isize)?;
if self.current_char == '\0' {
break;
@@ -153,16 +172,12 @@ impl<'a> TokenizerState<'a> {
}
fn advance(&mut self, i: isize) -> Result<(), TokenizerError> {
- let mut i = i;
if Some(&self.token_types.break_) == self.settings.white_space.get(&self.current_char) {
// Ensures we don't count an extra line if we get a \r\n line break sequence.
- if self.current_char == '\r' && self.peek_char == '\n' {
- i = 2;
- self.start += 1;
+ if ! (self.current_char == '\r' && self.peek_char == '\n') {
+ self.column = 1;
+ self.line += 1;
}
-
- self.column = 1;
- self.line += 1;
} else {
self.column = self.column.wrapping_add_signed(i);
}
|
tobymao/sqlglot
|
3620b9974c28df7d4d189ebd5fdcb675f41a275d
|
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index 896ee451..7affe31f 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -139,6 +139,15 @@ class TestRedshift(Validator):
"presto": "LENGTH(x)",
},
)
+ self.validate_all(
+ "x LIKE 'abc' || '%'",
+ read={
+ "duckdb": "STARTS_WITH(x, 'abc')",
+ },
+ write={
+ "redshift": "x LIKE 'abc' || '%'",
+ },
+ )
self.validate_all(
"SELECT SYSDATE",
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 4efd7b91..aefd8575 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -742,6 +742,9 @@ class TestTSQL(Validator):
)
def test_ddl(self):
+ for view_attr in ("ENCRYPTION", "SCHEMABINDING", "VIEW_METADATA"):
+ self.validate_identity(f"CREATE VIEW a.b WITH {view_attr} AS SELECT * FROM x")
+
expression = parse_one("ALTER TABLE dbo.DocExe DROP CONSTRAINT FK_Column_B", dialect="tsql")
self.assertIsInstance(expression, exp.AlterTable)
self.assertIsInstance(expression.args["actions"][0], exp.Drop)
diff --git a/tests/test_tokens.py b/tests/test_tokens.py
index 970c1ac2..29ef5b61 100644
--- a/tests/test_tokens.py
+++ b/tests/test_tokens.py
@@ -85,6 +85,18 @@ x"""
],
)
+ for simple_query in ("SELECT 1\r\n", "\r\nSELECT 1"):
+ tokens = Tokenizer().tokenize(simple_query)
+ tokens = [(token.token_type, token.text) for token in tokens]
+
+ self.assertEqual(
+ tokens,
+ [
+ (TokenType.SELECT, "SELECT"),
+ (TokenType.NUMBER, "1"),
+ ],
+ )
+
def test_command(self):
tokens = Tokenizer().tokenize("SHOW;")
self.assertEqual(tokens[0].token_type, TokenType.SHOW)
diff --git a/tests/test_transpile.py b/tests/test_transpile.py
index 0170e230..f6fd2f9a 100644
--- a/tests/test_transpile.py
+++ b/tests/test_transpile.py
@@ -66,6 +66,24 @@ class TestTranspile(unittest.TestCase):
)
def test_leading_comma(self):
+ self.validate(
+ "SELECT a, b, c FROM (SELECT a, b, c FROM t)",
+ "SELECT\n"
+ " a\n"
+ " , b\n"
+ " , c\n"
+ "FROM (\n"
+ " SELECT\n"
+ " a\n"
+ " , b\n"
+ " , c\n"
+ " FROM t\n"
+ ")",
+ leading_comma=True,
+ pretty=True,
+ pad=4,
+ indent=4,
+ )
self.validate(
"SELECT FOO, BAR, BAZ",
"SELECT\n FOO\n , BAR\n , BAZ",
|
Unable to parse view definition with schemabinding
Parsing mssql view definitions for lineage information, I found this common syntax was a source of parser errors:
```
from sqlglot import parse_one, exp
# find all tables (x, y, z)
print(repr(parse_one("CREATE VIEW a.b WITH SCHEMABINDING AS SELECT * FROM x JOIN y JOIN z")))
```
Removing the "WITH SCHEMABINDING" element makes it parse correctly.
https://learn.microsoft.com/en-us/sql/t-sql/statements/create-view-transact-sql?view=sql-server-ver16
The library is already finding the WITH token and looking for properties, but it does not expect the SCHEMABINDING property at that point.
Adding it to the property parsing is pretty simple (happy to submit a PR+test) and with that change the statement is parsed properly.
|
0.0
|
3620b9974c28df7d4d189ebd5fdcb675f41a275d
|
[
"tests/dialects/test_redshift.py::TestRedshift::test_redshift",
"tests/test_tokens.py::TestTokens::test_crlf",
"tests/test_transpile.py::TestTranspile::test_leading_comma"
] |
[
"tests/dialects/test_redshift.py::TestRedshift::test_column_unnesting",
"tests/dialects/test_redshift.py::TestRedshift::test_create_table_like",
"tests/dialects/test_redshift.py::TestRedshift::test_identity",
"tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding",
"tests/dialects/test_redshift.py::TestRedshift::test_rename_table",
"tests/dialects/test_redshift.py::TestRedshift::test_values",
"tests/dialects/test_redshift.py::TestRedshift::test_varchar_max",
"tests/dialects/test_tsql.py::TestTSQL::test__types_ints",
"tests/dialects/test_tsql.py::TestTSQL::test_add_date",
"tests/dialects/test_tsql.py::TestTSQL::test_charindex",
"tests/dialects/test_tsql.py::TestTSQL::test_commit",
"tests/dialects/test_tsql.py::TestTSQL::test_convert",
"tests/dialects/test_tsql.py::TestTSQL::test_current_user",
"tests/dialects/test_tsql.py::TestTSQL::test_date_diff",
"tests/dialects/test_tsql.py::TestTSQL::test_datefromparts",
"tests/dialects/test_tsql.py::TestTSQL::test_datename",
"tests/dialects/test_tsql.py::TestTSQL::test_datepart",
"tests/dialects/test_tsql.py::TestTSQL::test_ddl",
"tests/dialects/test_tsql.py::TestTSQL::test_eomonth",
"tests/dialects/test_tsql.py::TestTSQL::test_format",
"tests/dialects/test_tsql.py::TestTSQL::test_fullproc",
"tests/dialects/test_tsql.py::TestTSQL::test_hints",
"tests/dialects/test_tsql.py::TestTSQL::test_identifier_prefixes",
"tests/dialects/test_tsql.py::TestTSQL::test_insert_cte",
"tests/dialects/test_tsql.py::TestTSQL::test_isnull",
"tests/dialects/test_tsql.py::TestTSQL::test_json",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_subquery",
"tests/dialects/test_tsql.py::TestTSQL::test_lateral_table_valued_function",
"tests/dialects/test_tsql.py::TestTSQL::test_len",
"tests/dialects/test_tsql.py::TestTSQL::test_openjson",
"tests/dialects/test_tsql.py::TestTSQL::test_option",
"tests/dialects/test_tsql.py::TestTSQL::test_procedure_keywords",
"tests/dialects/test_tsql.py::TestTSQL::test_qualify_derived_table_outputs",
"tests/dialects/test_tsql.py::TestTSQL::test_replicate",
"tests/dialects/test_tsql.py::TestTSQL::test_rollback",
"tests/dialects/test_tsql.py::TestTSQL::test_set",
"tests/dialects/test_tsql.py::TestTSQL::test_string",
"tests/dialects/test_tsql.py::TestTSQL::test_system_time",
"tests/dialects/test_tsql.py::TestTSQL::test_temp_table",
"tests/dialects/test_tsql.py::TestTSQL::test_temporal_table",
"tests/dialects/test_tsql.py::TestTSQL::test_top",
"tests/dialects/test_tsql.py::TestTSQL::test_transaction",
"tests/dialects/test_tsql.py::TestTSQL::test_tsql",
"tests/dialects/test_tsql.py::TestTSQL::test_types",
"tests/dialects/test_tsql.py::TestTSQL::test_types_bin",
"tests/dialects/test_tsql.py::TestTSQL::test_types_date",
"tests/dialects/test_tsql.py::TestTSQL::test_types_decimals",
"tests/dialects/test_tsql.py::TestTSQL::test_types_string",
"tests/dialects/test_tsql.py::TestTSQL::test_udf",
"tests/test_tokens.py::TestTokens::test_command",
"tests/test_tokens.py::TestTokens::test_comment_attachment",
"tests/test_tokens.py::TestTokens::test_error_msg",
"tests/test_tokens.py::TestTokens::test_jinja",
"tests/test_tokens.py::TestTokens::test_space_keywords",
"tests/test_tokens.py::TestTokens::test_token_line_col",
"tests/test_transpile.py::TestTranspile::test_alias",
"tests/test_transpile.py::TestTranspile::test_alter",
"tests/test_transpile.py::TestTranspile::test_command_identity",
"tests/test_transpile.py::TestTranspile::test_comments",
"tests/test_transpile.py::TestTranspile::test_error_level",
"tests/test_transpile.py::TestTranspile::test_extract",
"tests/test_transpile.py::TestTranspile::test_identify_lambda",
"tests/test_transpile.py::TestTranspile::test_identity",
"tests/test_transpile.py::TestTranspile::test_if",
"tests/test_transpile.py::TestTranspile::test_index_offset",
"tests/test_transpile.py::TestTranspile::test_normalize_name",
"tests/test_transpile.py::TestTranspile::test_not_range",
"tests/test_transpile.py::TestTranspile::test_paren",
"tests/test_transpile.py::TestTranspile::test_partial",
"tests/test_transpile.py::TestTranspile::test_pretty",
"tests/test_transpile.py::TestTranspile::test_pretty_line_breaks",
"tests/test_transpile.py::TestTranspile::test_recursion",
"tests/test_transpile.py::TestTranspile::test_some",
"tests/test_transpile.py::TestTranspile::test_space",
"tests/test_transpile.py::TestTranspile::test_time",
"tests/test_transpile.py::TestTranspile::test_types",
"tests/test_transpile.py::TestTranspile::test_unary",
"tests/test_transpile.py::TestTranspile::test_unsupported_level",
"tests/test_transpile.py::TestTranspile::test_weird_chars",
"tests/test_transpile.py::TestTranspile::test_with"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-22 16:17:57+00:00
|
mit
| 6,031 |
|
tobymao__sqlglot-3204
|
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
index a41b6ea8..70066677 100644
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -171,6 +171,8 @@ class Redshift(Postgres):
),
exp.SortKeyProperty: lambda self,
e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
+ exp.StartsWith: lambda self,
+ e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'",
exp.TableSample: no_tablesample_sql,
exp.TsOrDsAdd: date_delta_sql("DATEADD"),
exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 804df019..3186becf 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -46,9 +46,11 @@ class Generator(metaclass=_Generator):
'safe': Only quote identifiers that are case insensitive.
normalize: Whether to normalize identifiers to lowercase.
Default: False.
- pad: The pad size in a formatted string.
+ pad: The pad size in a formatted string. For example, this affects the indentation of
+ a projection in a query, relative to its nesting level.
Default: 2.
- indent: The indentation size in a formatted string.
+ indent: The indentation size in a formatted string. For example, this affects the
+ indentation of subqueries and filters under a `WHERE` clause.
Default: 2.
normalize_functions: How to normalize function names. Possible values are:
"upper" or True (default): Convert names to uppercase.
@@ -3221,7 +3223,7 @@ class Generator(metaclass=_Generator):
num_sqls = len(expressions)
# These are calculated once in case we have the leading_comma / pretty option set, correspondingly
- pad = " " * self.pad
+ pad = " " * len(sep)
stripped_sep = sep.strip()
result_sqls = []
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 824ebe74..1ba8b2ad 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -565,8 +565,7 @@ class Tokenizer(metaclass=_Tokenizer):
"~": TokenType.TILDA,
"?": TokenType.PLACEHOLDER,
"@": TokenType.PARAMETER,
- # used for breaking a var like x'y' but nothing else
- # the token type doesn't matter
+ # Used for breaking a var like x'y' but nothing else the token type doesn't matter
"'": TokenType.QUOTE,
"`": TokenType.IDENTIFIER,
'"': TokenType.IDENTIFIER,
@@ -892,7 +891,7 @@ class Tokenizer(metaclass=_Tokenizer):
COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
- # handle numeric literals like in hive (3L = BIGINT)
+ # Handle numeric literals like in hive (3L = BIGINT)
NUMERIC_LITERALS: t.Dict[str, str] = {}
COMMENTS = ["--", ("/*", "*/")]
@@ -965,8 +964,7 @@ class Tokenizer(metaclass=_Tokenizer):
while self.size and not self._end:
current = self._current
- # skip spaces inline rather than iteratively call advance()
- # for performance reasons
+ # Skip spaces here rather than iteratively calling advance() for performance reasons
while current < self.size:
char = self.sql[current]
@@ -975,12 +973,10 @@ class Tokenizer(metaclass=_Tokenizer):
else:
break
- n = current - self._current
- self._start = current
- self._advance(n if n > 1 else 1)
+ offset = current - self._current if current > self._current else 1
- if self._char is None:
- break
+ self._start = current
+ self._advance(offset)
if not self._char.isspace():
if self._char.isdigit():
@@ -1008,12 +1004,9 @@ class Tokenizer(metaclass=_Tokenizer):
def _advance(self, i: int = 1, alnum: bool = False) -> None:
if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
# Ensures we don't count an extra line if we get a \r\n line break sequence
- if self._char == "\r" and self._peek == "\n":
- i = 2
- self._start += 1
-
- self._col = 1
- self._line += 1
+ if not (self._char == "\r" and self._peek == "\n"):
+ self._col = 1
+ self._line += 1
else:
self._col += i
diff --git a/sqlglotrs/src/tokenizer.rs b/sqlglotrs/src/tokenizer.rs
index 2c90a650..881417e5 100644
--- a/sqlglotrs/src/tokenizer.rs
+++ b/sqlglotrs/src/tokenizer.rs
@@ -118,8 +118,27 @@ impl<'a> TokenizerState<'a> {
fn scan(&mut self, until_peek_char: Option<char>) -> Result<(), TokenizerError> {
while self.size > 0 && !self.is_end {
- self.start = self.current;
- self.advance(1)?;
+ let mut current = self.current;
+
+ // Skip spaces here rather than iteratively calling advance() for performance reasons
+ while current < self.size {
+ let ch = self.char_at(current)?;
+
+ if ch == ' ' || ch == '\t' {
+ current += 1;
+ } else {
+ break;
+ }
+ }
+
+ let offset = if current > self.current {
+ current - self.current
+ } else {
+ 1
+ };
+
+ self.start = current;
+ self.advance(offset as isize)?;
if self.current_char == '\0' {
break;
@@ -153,16 +172,12 @@ impl<'a> TokenizerState<'a> {
}
fn advance(&mut self, i: isize) -> Result<(), TokenizerError> {
- let mut i = i;
if Some(&self.token_types.break_) == self.settings.white_space.get(&self.current_char) {
// Ensures we don't count an extra line if we get a \r\n line break sequence.
- if self.current_char == '\r' && self.peek_char == '\n' {
- i = 2;
- self.start += 1;
+ if ! (self.current_char == '\r' && self.peek_char == '\n') {
+ self.column = 1;
+ self.line += 1;
}
-
- self.column = 1;
- self.line += 1;
} else {
self.column = self.column.wrapping_add_signed(i);
}
|
tobymao/sqlglot
|
3620b9974c28df7d4d189ebd5fdcb675f41a275d
|
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index 896ee451..7affe31f 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -139,6 +139,15 @@ class TestRedshift(Validator):
"presto": "LENGTH(x)",
},
)
+ self.validate_all(
+ "x LIKE 'abc' || '%'",
+ read={
+ "duckdb": "STARTS_WITH(x, 'abc')",
+ },
+ write={
+ "redshift": "x LIKE 'abc' || '%'",
+ },
+ )
self.validate_all(
"SELECT SYSDATE",
diff --git a/tests/test_tokens.py b/tests/test_tokens.py
index 970c1ac2..29ef5b61 100644
--- a/tests/test_tokens.py
+++ b/tests/test_tokens.py
@@ -85,6 +85,18 @@ x"""
],
)
+ for simple_query in ("SELECT 1\r\n", "\r\nSELECT 1"):
+ tokens = Tokenizer().tokenize(simple_query)
+ tokens = [(token.token_type, token.text) for token in tokens]
+
+ self.assertEqual(
+ tokens,
+ [
+ (TokenType.SELECT, "SELECT"),
+ (TokenType.NUMBER, "1"),
+ ],
+ )
+
def test_command(self):
tokens = Tokenizer().tokenize("SHOW;")
self.assertEqual(tokens[0].token_type, TokenType.SHOW)
diff --git a/tests/test_transpile.py b/tests/test_transpile.py
index 0170e230..f6fd2f9a 100644
--- a/tests/test_transpile.py
+++ b/tests/test_transpile.py
@@ -66,6 +66,24 @@ class TestTranspile(unittest.TestCase):
)
def test_leading_comma(self):
+ self.validate(
+ "SELECT a, b, c FROM (SELECT a, b, c FROM t)",
+ "SELECT\n"
+ " a\n"
+ " , b\n"
+ " , c\n"
+ "FROM (\n"
+ " SELECT\n"
+ " a\n"
+ " , b\n"
+ " , c\n"
+ " FROM t\n"
+ ")",
+ leading_comma=True,
+ pretty=True,
+ pad=4,
+ indent=4,
+ )
self.validate(
"SELECT FOO, BAR, BAZ",
"SELECT\n FOO\n , BAR\n , BAZ",
|
Windows line endings cause IndexOutOfRange during tokenization
I am parsing view and stored procedure definitions from windows-based mssql.
I am getting failures from IndexOutOfRange and strange parsing contexts where the leading character has been removed.
I think I have identified the problem + solution - are you open to a PR (with unit test)?
The issue is this:
In _advance:
https://github.com/tobymao/sqlglot/blob/a18444dbd7ccfc05b189dcb2005c85a1048cc8de/sqlglot/tokens.py#L1008
If it sees \r\n, then it sets i to 2
_current is increased by i
_char always looks at position _current-1
This can lead to it looking past the end of the source text, as _current-1 > size
I believe the fix is something like changing:
https://github.com/tobymao/sqlglot/blob/a18444dbd7ccfc05b189dcb2005c85a1048cc8de/sqlglot/tokens.py#L1022
to
```
if self._end:
self._peek = ""
self._char = self.sql[self.size - 1]
else:
self._char = self.sql[self._current - 1]
self._peek = self.sql[self._current]
```
|
0.0
|
3620b9974c28df7d4d189ebd5fdcb675f41a275d
|
[
"tests/dialects/test_redshift.py::TestRedshift::test_redshift",
"tests/test_tokens.py::TestTokens::test_crlf",
"tests/test_transpile.py::TestTranspile::test_leading_comma"
] |
[
"tests/dialects/test_redshift.py::TestRedshift::test_column_unnesting",
"tests/dialects/test_redshift.py::TestRedshift::test_create_table_like",
"tests/dialects/test_redshift.py::TestRedshift::test_identity",
"tests/dialects/test_redshift.py::TestRedshift::test_no_schema_binding",
"tests/dialects/test_redshift.py::TestRedshift::test_rename_table",
"tests/dialects/test_redshift.py::TestRedshift::test_values",
"tests/dialects/test_redshift.py::TestRedshift::test_varchar_max",
"tests/test_tokens.py::TestTokens::test_command",
"tests/test_tokens.py::TestTokens::test_comment_attachment",
"tests/test_tokens.py::TestTokens::test_error_msg",
"tests/test_tokens.py::TestTokens::test_jinja",
"tests/test_tokens.py::TestTokens::test_space_keywords",
"tests/test_tokens.py::TestTokens::test_token_line_col",
"tests/test_transpile.py::TestTranspile::test_alias",
"tests/test_transpile.py::TestTranspile::test_alter",
"tests/test_transpile.py::TestTranspile::test_command_identity",
"tests/test_transpile.py::TestTranspile::test_comments",
"tests/test_transpile.py::TestTranspile::test_error_level",
"tests/test_transpile.py::TestTranspile::test_extract",
"tests/test_transpile.py::TestTranspile::test_identify_lambda",
"tests/test_transpile.py::TestTranspile::test_identity",
"tests/test_transpile.py::TestTranspile::test_if",
"tests/test_transpile.py::TestTranspile::test_index_offset",
"tests/test_transpile.py::TestTranspile::test_normalize_name",
"tests/test_transpile.py::TestTranspile::test_not_range",
"tests/test_transpile.py::TestTranspile::test_paren",
"tests/test_transpile.py::TestTranspile::test_partial",
"tests/test_transpile.py::TestTranspile::test_pretty",
"tests/test_transpile.py::TestTranspile::test_pretty_line_breaks",
"tests/test_transpile.py::TestTranspile::test_recursion",
"tests/test_transpile.py::TestTranspile::test_some",
"tests/test_transpile.py::TestTranspile::test_space",
"tests/test_transpile.py::TestTranspile::test_time",
"tests/test_transpile.py::TestTranspile::test_types",
"tests/test_transpile.py::TestTranspile::test_unary",
"tests/test_transpile.py::TestTranspile::test_unsupported_level",
"tests/test_transpile.py::TestTranspile::test_weird_chars",
"tests/test_transpile.py::TestTranspile::test_with"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-22 17:58:41+00:00
|
mit
| 6,032 |
|
tobymao__sqlglot-3223
|
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
index 4ea89b21..aef2a759 100644
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -291,6 +291,7 @@ class MySQL(Dialect):
"DAYOFWEEK": lambda args: exp.DayOfWeek(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
"DAYOFYEAR": lambda args: exp.DayOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
"INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
+ "FROM_UNIXTIME": build_formatted_time(exp.UnixToTime, "mysql"),
"ISNULL": isnull_to_is_null,
"LOCATE": locate_to_strposition,
"MAKETIME": exp.TimeFromParts.from_arg_list,
@@ -720,6 +721,7 @@ class MySQL(Dialect):
exp.TsOrDsAdd: _date_add_sql("ADD"),
exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression),
exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+ exp.UnixToTime: lambda self, e: self.func("FROM_UNIXTIME", e.this, self.format_time(e)),
exp.Week: _remove_ts_or_ds_to_date(),
exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")),
exp.Year: _remove_ts_or_ds_to_date(),
diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py
index 70066677..1f0c411e 100644
--- a/sqlglot/dialects/redshift.py
+++ b/sqlglot/dialects/redshift.py
@@ -176,6 +176,8 @@ class Redshift(Postgres):
exp.TableSample: no_tablesample_sql,
exp.TsOrDsAdd: date_delta_sql("DATEADD"),
exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
+ exp.UnixToTime: lambda self,
+ e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')",
}
# Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 0cbaf20e..2ec0c3f2 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -5707,7 +5707,14 @@ class UnixToStr(Func):
# https://prestodb.io/docs/current/functions/datetime.html
# presto has weird zone/hours/minutes
class UnixToTime(Func):
- arg_types = {"this": True, "scale": False, "zone": False, "hours": False, "minutes": False}
+ arg_types = {
+ "this": True,
+ "scale": False,
+ "zone": False,
+ "hours": False,
+ "minutes": False,
+ "format": False,
+ }
SECONDS = Literal.number(0)
DECIS = Literal.number(1)
|
tobymao/sqlglot
|
e7c91584ac7fb35082ebd1d4873f13307ea848af
|
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index 23607da6..49552bf5 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -513,9 +513,8 @@ class TestMySQL(Validator):
)
def test_mysql_time(self):
- self.validate_identity("FROM_UNIXTIME(a, b)")
- self.validate_identity("FROM_UNIXTIME(a, b, c)")
self.validate_identity("TIME_STR_TO_UNIX(x)", "UNIX_TIMESTAMP(x)")
+ self.validate_identity("SELECT FROM_UNIXTIME(1711366265, '%Y %D %M')")
self.validate_all(
"SELECT TO_DAYS(x)",
write={
@@ -581,6 +580,17 @@ class TestMySQL(Validator):
self.validate_all(
"STR_TO_DATE(x, '%Y-%m-%dT%T')", write={"presto": "DATE_PARSE(x, '%Y-%m-%dT%T')"}
)
+ self.validate_all(
+ "SELECT FROM_UNIXTIME(col)",
+ read={
+ "postgres": "SELECT TO_TIMESTAMP(col)",
+ },
+ write={
+ "mysql": "SELECT FROM_UNIXTIME(col)",
+ "postgres": "SELECT TO_TIMESTAMP(col)",
+ "redshift": "SELECT (TIMESTAMP 'epoch' + col * INTERVAL '1 SECOND')",
+ },
+ )
def test_mysql(self):
self.validate_all(
|
function from_unixtime trans error
source code:
from sqlglot import transpile
print(transpile("select from_unixtime(1711366265)", read="mysql", write="postgres"))
print(transpile("select from_unixtime(1711366265)", read="mysql", write="redshift"))
// output
['SELECT FROM_UNIXTIME(1711366265)']
['SELECT FROM_UNIXTIME(1711366265)']
but postgres and redshift has no function from_unixtime, will post error
|
0.0
|
e7c91584ac7fb35082ebd1d4873f13307ea848af
|
[
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time"
] |
[
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_identity",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_is_null",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_monthname",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_safe_div",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_types"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-03-26 10:44:21+00:00
|
mit
| 6,033 |
|
tobymao__sqlglot-3326
|
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 5adbb1e5..d97807a3 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2063,6 +2063,7 @@ class Insert(DDL, DML):
"where": False,
"ignore": False,
"by_name": False,
+ "stored": False,
}
def with_(
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index b7da18b3..23b8d9c6 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -1520,6 +1520,8 @@ class Generator(metaclass=_Generator):
else:
this = self.INSERT_OVERWRITE if overwrite else " INTO"
+ stored = self.sql(expression, "stored")
+ stored = f" {stored}" if stored else ""
alternative = expression.args.get("alternative")
alternative = f" OR {alternative}" if alternative else ""
ignore = " IGNORE" if expression.args.get("ignore") else ""
@@ -1545,7 +1547,7 @@ class Generator(metaclass=_Generator):
else:
expression_sql = f"{returning}{expression_sql}{on_conflict}"
- sql = f"INSERT{hint}{alternative}{ignore}{this}{by_name}{exists}{partition_sql}{where}{expression_sql}"
+ sql = f"INSERT{hint}{alternative}{ignore}{this}{stored}{by_name}{exists}{partition_sql}{where}{expression_sql}"
return self.prepend_ctes(expression, sql)
def intersect_sql(self, expression: exp.Intersect) -> str:
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 2aaba600..9c075dc7 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2248,6 +2248,7 @@ class Parser(metaclass=_Parser):
hint=hint,
is_function=is_function,
this=this,
+ stored=self._match_text_seq("STORED") and self._parse_stored(),
by_name=self._match_text_seq("BY", "NAME"),
exists=self._parse_exists(),
partition=self._parse_partition(),
|
tobymao/sqlglot
|
83cff79633225fe3d8606ec3a5a9e8c1081edd0c
|
diff --git a/tests/dialects/test_hive.py b/tests/dialects/test_hive.py
index 33294ee0..d52510d2 100644
--- a/tests/dialects/test_hive.py
+++ b/tests/dialects/test_hive.py
@@ -428,6 +428,9 @@ class TestHive(Validator):
self.validate_identity(
"INSERT OVERWRITE TABLE zipcodes PARTITION(state = 0) VALUES (896, 'US', 'TAMPA', 33607)"
)
+ self.validate_identity(
+ "INSERT OVERWRITE DIRECTORY 'x' ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY '' STORED AS TEXTFILE SELECT * FROM `a`.`b`"
+ )
self.validate_identity(
"SELECT a, b, SUM(c) FROM tabl AS t GROUP BY a, b, GROUPING SETS ((a, b), a)"
)
|
Parsing of insert overwrite directory fails in hive dialect
**Before you file an issue**
[x] Make sure you specify the "read" dialect eg. `parse_one(sql, read="spark")`
[x] Make sure you specify the "write" dialect eg. `ast.sql(dialect="duckdb")`
[x] Check if the issue still exists on main
**Fully reproducible code snippet**
```
q = """INSERT OVERWRITE DIRECTORY
's3a://path' ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY '
'
STORED AS TEXTFILE
SELECT * FROM `a`.`b`"""
asts = sqlglot.parse(q, dialect='hive')
```
Exception:
```
File /opt/miniconda3/envs/databricks-repos/lib/python3.10/site-packages/sqlglot/parser.py:1170, in Parser.parse(self, raw_tokens, sql)
1156 def parse(
1157 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
1158 ) -> t.List[t.Optional[exp.Expression]]:
1159 """
1160 Parses a list of tokens and returns a list of syntax trees, one tree
1161 per parsed SQL statement.
(...)
1168 The list of the produced syntax trees.
1169 """
-> 1170 return self._parse(
1171 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
1172 )
File /opt/miniconda3/envs/databricks-repos/lib/python3.10/site-packages/sqlglot/parser.py:1239, in Parser._parse(self, parse_method, raw_tokens, sql)
1236 expressions.append(parse_method(self))
1238 if self._index < len(self._tokens):
-> 1239 self.raise_error("Invalid expression / Unexpected token")
1241 self.check_errors()
1243 return expressions
File /opt/miniconda3/envs/databricks-repos/lib/python3.10/site-packages/sqlglot/parser.py:1280, in Parser.raise_error(self, message, token)
1268 error = ParseError.new(
1269 f"{message}. Line {token.line}, Col: {token.col}.\n"
1270 f" {start_context}\033[4m{highlight}\033[0m{end_context}",
(...)
1276 end_context=end_context,
1277 )
1279 if self.error_level == ErrorLevel.IMMEDIATE:
-> 1280 raise error
1282 self.errors.append(error)
ParseError: Invalid expression / Unexpected token. Line 4, Col: 6.
INATED BY '' COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY '
'
STORED AS TEXTFILE
SELECT * FROM `a`.`b`
```
**Official Documentation**
https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML
|
0.0
|
83cff79633225fe3d8606ec3a5a9e8c1081edd0c
|
[
"tests/dialects/test_hive.py::TestHive::test_hive"
] |
[
"tests/dialects/test_hive.py::TestHive::test_bits",
"tests/dialects/test_hive.py::TestHive::test_cast",
"tests/dialects/test_hive.py::TestHive::test_data_type",
"tests/dialects/test_hive.py::TestHive::test_ddl",
"tests/dialects/test_hive.py::TestHive::test_escapes",
"tests/dialects/test_hive.py::TestHive::test_lateral_view",
"tests/dialects/test_hive.py::TestHive::test_order_by",
"tests/dialects/test_hive.py::TestHive::test_quotes",
"tests/dialects/test_hive.py::TestHive::test_regex",
"tests/dialects/test_hive.py::TestHive::test_time"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-04-16 11:45:58+00:00
|
mit
| 6,034 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.