code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
182
| url
stringlengths 46
251
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def test_priority_keyword_merge() -> None:
"""Test merging on keyword lists works as expected."""
kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")]
kw_list_2 = [("A", "reserved"), ("C", "non-reserved")]
result = priority_keyword_merge(kw_list_1, kw_list_2)
expected_result = [("A", "reserved"), ("B", "non-reserved"), ("C", "non-reserved")]
assert sorted(result) == sorted(expected_result)
kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")]
kw_list_2 = [("A", "reserved"), ("C", "non-reserved")]
result_2 = priority_keyword_merge(kw_list_2, kw_list_1)
expected_result_2 = [
("A", "not-keyword"),
("B", "non-reserved"),
("C", "non-reserved"),
]
assert sorted(result_2) == sorted(expected_result_2)
kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")]
kw_list_2 = [("A", "reserved"), ("C", "non-reserved")]
kw_list_3 = [("B", "reserved")]
result_3 = priority_keyword_merge(kw_list_2, kw_list_1, kw_list_3)
expected_result_3 = [("A", "not-keyword"), ("B", "reserved"), ("C", "non-reserved")]
assert sorted(result_3) == sorted(expected_result_3)
kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")]
result_4 = priority_keyword_merge(kw_list_1)
expected_result_4 = kw_list_1
assert sorted(result_4) == sorted(expected_result_4) | Test merging on keyword lists works as expected. | test_priority_keyword_merge | python | sqlfluff/sqlfluff | test/dialects/postgres_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/postgres_test.py | MIT |
def test_get_keywords() -> None:
"""Test keyword filtering works as expected."""
kw_list = [
("A", "not-keyword"),
("B", "reserved"),
("C", "non-reserved"),
("D", "not-keyword"),
("E", "non-reserved-(cannot-be-function-or-type)"),
]
expected_result = ["A", "D"]
assert sorted(get_keywords(kw_list, "not-keyword")) == sorted(expected_result)
expected_result_2 = ["C", "E"]
assert sorted(get_keywords(kw_list, "non-reserved")) == sorted(expected_result_2)
expected_result_3 = ["B"]
assert sorted(get_keywords(kw_list, "reserved")) == sorted(expected_result_3) | Test keyword filtering works as expected. | test_get_keywords | python | sqlfluff/sqlfluff | test/dialects/postgres_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/postgres_test.py | MIT |
def lex(raw, config):
"""Basic parsing for the tests below."""
# Set up the lexer
lex = Lexer(config=config)
# Lex the string for matching. For a good test, this would
# arguably happen as a fixture, but it's easier to pass strings
# as parameters than pre-lexed segment strings.
segments, vs = lex.lex(raw)
assert not vs
print(segments)
return segments | Basic parsing for the tests below. | lex | python | sqlfluff/sqlfluff | test/dialects/conftest.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py | MIT |
def validate_segment(segmentref, config):
"""Get and validate segment for tests below."""
Seg = config.get("dialect_obj").ref(segmentref)
if isinstance(Seg, Matchable):
return Seg
try:
if issubclass(Seg, BaseSegment):
return Seg
except TypeError:
pass
raise TypeError(
"{} is not of type Segment or Matchable. Test is invalid.".format(segmentref)
) | Get and validate segment for tests below. | validate_segment | python | sqlfluff/sqlfluff | test/dialects/conftest.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py | MIT |
def _validate_dialect_specific_statements(dialect, segment_cls, raw, stmt_count):
"""This validates one or multiple statements against specified segment class.
It even validates the number of parsed statements with the number of expected
statements.
"""
lnt = Linter(dialect=dialect)
parsed = lnt.parse_string(raw)
assert len(parsed.violations) == 0
# Find any unparsable statements
typs = parsed.tree.type_set()
assert "unparsable" not in typs
# Find the expected type in the parsed segment
child_segments = [seg for seg in parsed.tree.recursive_crawl(segment_cls.type)]
assert len(child_segments) == stmt_count
# Check if all child segments are the correct type
for c in child_segments:
assert isinstance(c, segment_cls) | This validates one or multiple statements against specified segment class.
It even validates the number of parsed statements with the number of expected
statements. | _validate_dialect_specific_statements | python | sqlfluff/sqlfluff | test/dialects/conftest.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py | MIT |
def dialect_specific_segment_parses():
"""Fixture to check specific segments of a dialect."""
return _dialect_specific_segment_parses | Fixture to check specific segments of a dialect. | dialect_specific_segment_parses | python | sqlfluff/sqlfluff | test/dialects/conftest.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py | MIT |
def dialect_specific_segment_not_match():
"""Check specific segments of a dialect which will not match to a segment."""
return _dialect_specific_segment_not_match | Check specific segments of a dialect which will not match to a segment. | dialect_specific_segment_not_match | python | sqlfluff/sqlfluff | test/dialects/conftest.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py | MIT |
def test_non_selects_unparseable(raw: str) -> None:
"""Test that non-SELECT commands are not parseable."""
cfg = FluffConfig(configs={"core": {"dialect": "soql"}})
lnt = Linter(config=cfg)
result = lnt.lint_string(raw)
assert len(result.violations) == 1
assert isinstance(result.violations[0], SQLParseError) | Test that non-SELECT commands are not parseable. | test_non_selects_unparseable | python | sqlfluff/sqlfluff | test/dialects/soql_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/soql_test.py | MIT |
def test_bigquery_relational_operator_parsing(data):
"""Tests queries with a diverse mixture of relational operators."""
# Generate a simple SELECT query with relational operators and conjunctions
# as specified in 'data'. Note the conjunctions are used as separators
# between comparisons, sn the conjunction in the first item is not used.
filter = []
for i, (relation, conjunction) in enumerate(data):
if i:
filter.append(f" {conjunction} ")
filter.append(f"a {relation} b")
raw = f'SELECT * FROM t WHERE {"".join(filter)}'
note(f"query: {raw}")
# Load the right dialect
config = FluffConfig(overrides=dict(dialect="bigquery"))
tokens, lex_vs = Lexer(config=config).lex(raw)
# From just the initial parse, check we're all there
assert "".join(token.raw for token in tokens) == raw
# Check we don't have lexing issues
assert not lex_vs
# Do the parse WITHOUT lots of logging
# The logs get too long here to be useful. We should use
# specific segment tests if we want to debug logs.
parsed = Parser(config=config).parse(tokens)
print(f"Post-parse structure: {parsed.to_tuple(show_raw=True)}")
print(f"Post-parse structure: {parsed.stringify()}")
# Check we're all there.
assert parsed.raw == raw
# Check that there's nothing un parsable
typs = parsed.type_set()
assert "unparsable" not in typs | Tests queries with a diverse mixture of relational operators. | test_bigquery_relational_operator_parsing | python | sqlfluff/sqlfluff | test/dialects/bigquery_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/bigquery_test.py | MIT |
def test_bigquery_table_reference_segment_iter_raw_references(
table_reference, reference_parts
):
"""Tests BigQuery override of TableReferenceSegment.iter_raw_references().
The BigQuery implementation is more complex, handling:
- hyphenated table references
- quoted or not quoted table references
"""
query = f"SELECT bar.user_id FROM {table_reference}"
config = FluffConfig(overrides=dict(dialect="bigquery"))
tokens, lex_vs = Lexer(config=config).lex(query)
parsed = Parser(config=config).parse(tokens)
for table_reference in parsed.recursive_crawl("table_reference"):
actual_reference_parts = [
orp.part for orp in table_reference.iter_raw_references()
]
assert reference_parts == actual_reference_parts | Tests BigQuery override of TableReferenceSegment.iter_raw_references().
The BigQuery implementation is more complex, handling:
- hyphenated table references
- quoted or not quoted table references | test_bigquery_table_reference_segment_iter_raw_references | python | sqlfluff/sqlfluff | test/dialects/bigquery_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/bigquery_test.py | MIT |
def test_dialect_unparsable(
segmentref: Optional[str], dialect: str, raw: str, structure: Any
):
"""Test the structure of unparsables."""
config = FluffConfig(overrides=dict(dialect=dialect))
# Get the referenced object (if set, otherwise root)
if segmentref:
Seg = config.get("dialect_obj").ref(segmentref)
else:
Seg = config.get("dialect_obj").get_root_segment()
# We only allow BaseSegments as matchables in this test.
assert issubclass(Seg, BaseSegment)
assert not issubclass(Seg, RawSegment)
# Lex the raw string.
lex = Lexer(config=config)
segments, vs = lex.lex(raw)
assert not vs
# Strip the end of file token if it's there. It will
# confuse most segments.
if segmentref and segments[-1].is_type("end_of_file"):
segments = segments[:-1]
ctx = ParseContext.from_config(config)
# Match against the segment.
match = Seg.match(segments, 0, ctx)
result = match.apply(segments)
assert len(result) == 1
parsed = result[0]
assert isinstance(parsed, Seg)
assert parsed.to_tuple(show_raw=True) == structure | Test the structure of unparsables. | test_dialect_unparsable | python | sqlfluff/sqlfluff | test/dialects/unparsable_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/unparsable_test.py | MIT |
def test_snowflake_queries(segment_cls, raw, caplog):
"""Test snowflake specific queries parse."""
lnt = Linter(dialect="snowflake")
parsed = lnt.parse_string(raw)
print(parsed.violations)
assert len(parsed.violations) == 0
# Find any unparsable statements
typs = parsed.tree.type_set()
assert "unparsable" not in typs
# Find the expected type in the parsed segment
seg_type = dialect_selector("snowflake").get_segment(segment_cls).type
child_segments = [seg for seg in parsed.tree.recursive_crawl(seg_type)]
assert len(child_segments) > 0 | Test snowflake specific queries parse. | test_snowflake_queries | python | sqlfluff/sqlfluff | test/dialects/snowflake_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/snowflake_test.py | MIT |
def test_dialect_exasol_specific_segment_parses(
segmentref, raw, caplog, dialect_specific_segment_parses
):
"""Test exasol specific segments."""
dialect_specific_segment_parses(TEST_DIALECT, segmentref, raw, caplog) | Test exasol specific segments. | test_dialect_exasol_specific_segment_parses | python | sqlfluff/sqlfluff | test/dialects/exasol_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/exasol_test.py | MIT |
def lex_and_parse(config_overrides: Dict[str, Any], raw: str) -> Optional[ParsedString]:
"""Performs a Lex and Parse, with cacheable inputs within fixture."""
# Load the right dialect
config = FluffConfig(overrides=config_overrides)
# Construct rendered file (to skip the templater)
templated_file = TemplatedFile.from_string(raw)
rendered_file = RenderedFile(
[templated_file],
[],
config,
{},
templated_file.fname,
"utf8",
raw,
)
# Parse (which includes lexing)
linter = Linter(config=config)
parsed_file = linter.parse_rendered(rendered_file)
if not raw: # Empty file case
# We're just checking there aren't exceptions in this case.
return None
# Check we managed to parse
assert parsed_file.tree
# From just the initial parse, check we're all there
assert "".join(token.raw for token in parsed_file.tree.raw_segments) == raw
# Check we don't have lexing or parsing issues
assert not parsed_file.violations
return parsed_file | Performs a Lex and Parse, with cacheable inputs within fixture. | lex_and_parse | python | sqlfluff/sqlfluff | test/dialects/dialects_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/dialects_test.py | MIT |
def test__dialect__base_file_parse(dialect, file):
"""For given test examples, check successful parsing."""
raw = load_file(dialect, file)
config_overrides = dict(dialect=dialect)
# Use the helper function to avoid parsing twice
parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw)
if not parsed: # Empty file case
return
print(f"Post-parse structure: {parsed.tree.to_tuple(show_raw=True)}")
print(f"Post-parse structure: {parsed.tree.stringify()}")
# Check we're all there.
assert parsed.tree.raw == raw
# Check that there's nothing unparsable
types = parsed.tree.type_set()
assert "unparsable" not in types
# When testing the validity of fixes we re-parse sections of the file.
# To ensure this is safe - here we re-parse the unfixed file to ensure
# it's still valid even in the case that no fixes have been applied.
assert parsed.tree.validate_segment_with_reparse(parsed.config.get("dialect_obj")) | For given test examples, check successful parsing. | test__dialect__base_file_parse | python | sqlfluff/sqlfluff | test/dialects/dialects_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/dialects_test.py | MIT |
def test__dialect__base_broad_fix(
dialect, file, raise_critical_errors_after_fix, caplog
):
"""Run a full fix with all rules, in search of critical errors.
NOTE: This suite does all of the same things as the above test
suite (the `parse_suite`), but also runs fix. In CI, we run
the above tests _with_ coverage tracking, but these we run
_without_.
The purpose of this test is as a more stretching run through
a wide range of test sql examples, and the full range of rules
to find any potential critical errors raised by any interactions
between different dialects and rules.
We also do not use DEBUG logging here because it gets _very_
noisy.
"""
raw = load_file(dialect, file)
config_overrides = dict(dialect=dialect)
parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw)
if not parsed: # Empty file case
return
print(parsed.tree.stringify())
config = FluffConfig(overrides=config_overrides)
linter = Linter(config=config)
rule_pack = linter.get_rulepack()
# Due to "raise_critical_errors_after_fix" fixture "fix",
# will now throw.
linter.lint_parsed(
parsed,
rule_pack,
fix=True,
) | Run a full fix with all rules, in search of critical errors.
NOTE: This suite does all of the same things as the above test
suite (the `parse_suite`), but also runs fix. In CI, we run
the above tests _with_ coverage tracking, but these we run
_without_.
The purpose of this test is as a more stretching run through
a wide range of test sql examples, and the full range of rules
to find any potential critical errors raised by any interactions
between different dialects and rules.
We also do not use DEBUG logging here because it gets _very_
noisy. | test__dialect__base_broad_fix | python | sqlfluff/sqlfluff | test/dialects/dialects_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/dialects_test.py | MIT |
def test__dialect__base_parse_struct(
dialect,
sqlfile,
code_only,
yamlfile,
yaml_loader,
):
"""For given test examples, check parsed structure against yaml."""
parsed: Optional[BaseSegment] = parse_example_file(dialect, sqlfile)
actual_hash = compute_parse_tree_hash(parsed)
# Load the YAML
expected_hash, res = yaml_loader(make_dialect_path(dialect, yamlfile))
if not parsed:
assert parsed == res
return
# Verify the current parse tree matches the historic parse tree.
parsed_tree = parsed.to_tuple(code_only=code_only, show_raw=True)
# The parsed tree consists of a tuple of "File:", followed by the
# statements. So only compare when there is at least one statement.
if parsed_tree[1] or res[1]:
assert parsed_tree == res
# Verify the current hash matches the historic hash. The main purpose of
# this check is to force contributors to use the generator script to
# create these files. New contributors have sometimes been unaware of
# this tool and have attempted to craft the YAML files manually. This
# can lead to slight differences, confusion, and errors.
assert expected_hash == actual_hash, (
"Parse tree hash does not match. Please run "
"'python test/generate_parse_fixture_yml.py' to create YAML files "
"in test/fixtures/dialects."
) | For given test examples, check parsed structure against yaml. | test__dialect__base_parse_struct | python | sqlfluff/sqlfluff | test/dialects/dialects_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/dialects_test.py | MIT |
def test__rules__std_LT02_LT09_LT01():
"""Verify that double indents don't flag LT01."""
sql = """
WITH example AS (
SELECT my_id,
other_thing,
one_more
FROM
my_table
)
SELECT my_id
FROM example\n"""
fixed_sql = """
WITH example AS (
SELECT
my_id,
other_thing,
one_more
FROM
my_table
)
SELECT my_id
FROM example\n"""
result = sqlfluff.fix(sql, exclude_rules=["LT13"])
assert result == fixed_sql | Verify that double indents don't flag LT01. | test__rules__std_LT02_LT09_LT01 | python | sqlfluff/sqlfluff | test/rules/std_LT01_LT02_LT09_combo_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT01_LT02_LT09_combo_test.py | MIT |
def test__rules__std_ST05_LT09_4137() -> None:
"""Tests observed conflict between ST05 & LT09.
In this case, the moved `t2` table was created after the first usage.
https://github.com/sqlfluff/sqlfluff/issues/4137
"""
sql = """
with
cte1 as (
select t1.x, t2.y
from tbl1 t1
join (select x, y from tbl2) t2
on t1.x = t2.x
)
, cte2 as (
select x, y from tbl2 t2
)
select x, y from cte1
union all
select x, y from cte2
;
"""
fixed_sql = """
with t2 as (select
x,
y
from tbl2),
cte1 as (
select
t1.x,
t2.y
from tbl1 t1
join t2
on t1.x = t2.x
),
cte2 as (
select
x,
y
from tbl2 t2
)
select
x,
y
from cte1
union all
select
x,
y
from cte2
;
"""
cfg = FluffConfig.from_kwargs(
dialect="ansi",
rules=["ST05", "LT09"],
)
result = Linter(config=cfg).lint_string(sql, fix=True)
assert result.fix_string()[0] == fixed_sql | Tests observed conflict between ST05 & LT09.
In this case, the moved `t2` table was created after the first usage.
https://github.com/sqlfluff/sqlfluff/issues/4137 | test__rules__std_ST05_LT09_4137 | python | sqlfluff/sqlfluff | test/rules/std_ST05_LT09_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_ST05_LT09_test.py | MIT |
def test__rules__std_ST03_multiple_unused_ctes():
"""Verify that ST03 returns multiple lint issues, one per unused CTE."""
sql = """
WITH
cte_1 AS (
SELECT 1
),
cte_2 AS (
SELECT 2
),
cte_3 AS (
SELECT 3
),
cte_4 AS (
SELECT 4
)
SELECT var_bar
FROM cte_3
"""
result = sqlfluff.lint(sql, rules=["ST03"])
assert result == [
{
"code": "ST03",
"description": 'Query defines CTE "cte_1" but does not use it.',
"name": "structure.unused_cte",
"warning": False,
"fixes": [],
"start_line_no": 3,
"start_line_pos": 5,
"start_file_pos": 14,
"end_line_no": 3,
"end_line_pos": 10,
"end_file_pos": 19,
},
{
"code": "ST03",
"description": 'Query defines CTE "cte_2" but does not use it.',
"name": "structure.unused_cte",
"warning": False,
"fixes": [],
"start_line_no": 6,
"start_line_pos": 5,
"start_file_pos": 53,
"end_line_no": 6,
"end_line_pos": 10,
"end_file_pos": 58,
},
{
"code": "ST03",
"description": 'Query defines CTE "cte_4" but does not use it.',
"name": "structure.unused_cte",
"warning": False,
"fixes": [],
"start_line_no": 12,
"start_line_pos": 5,
"start_file_pos": 131,
"end_line_no": 12,
"end_line_pos": 10,
"end_file_pos": 136,
},
] | Verify that ST03 returns multiple lint issues, one per unused CTE. | test__rules__std_ST03_multiple_unused_ctes | python | sqlfluff/sqlfluff | test/rules/std_ST03_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_ST03_test.py | MIT |
def test__rules__std_LT05_LT09_long_line_lint():
"""Verify a long line that causes a clash between LT05 and LT09 is not changed."""
sql = (
"SELECT\n1000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000\n"
)
result = sqlfluff.lint(sql)
assert "LT05" in [r["code"] for r in result]
assert "LT09" in [r["code"] for r in result] | Verify a long line that causes a clash between LT05 and LT09 is not changed. | test__rules__std_LT05_LT09_long_line_lint | python | sqlfluff/sqlfluff | test/rules/std_LT05_LT09_combo_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT05_LT09_combo_test.py | MIT |
def test__rules__std_LT05_LT09_long_line_fix():
"""Verify clash between LT05 & LT09 does not add multiple newlines (see #1424)."""
sql = (
"SELECT 10000000000000000000000000000000000000000000000000000000000000000000000"
"00000000000000000000000000000\n"
)
result = sqlfluff.fix(sql)
assert result == (
"SELECT\n 100000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000\n"
) | Verify clash between LT05 & LT09 does not add multiple newlines (see #1424). | test__rules__std_LT05_LT09_long_line_fix | python | sqlfluff/sqlfluff | test/rules/std_LT05_LT09_combo_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT05_LT09_combo_test.py | MIT |
def test__rules__std_AL04_one_aliases_one_duplicate():
"""Verify correct error message for one duplicate table aliases occur one times."""
sql = """
SELECT
a.pk
FROM table_1 AS a
JOIN table_2 AS a ON a.pk = a.pk
"""
result = sqlfluff.lint(sql)
assert "AL04" in [r["code"] for r in result]
assert [r["code"] for r in result].count("AL04") == 1 | Verify correct error message for one duplicate table aliases occur one times. | test__rules__std_AL04_one_aliases_one_duplicate | python | sqlfluff/sqlfluff | test/rules/std_AL04_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AL04_test.py | MIT |
def test__rules__std_AL04_one_aliases_two_duplicate():
"""Verify correct error message for one duplicate table aliases occur two times."""
sql = """
SELECT
a.pk
FROM table_1 AS a
JOIN table_2 AS a ON a.pk = a.pk
JOIN table_3 AS a ON a.pk = a.pk
"""
result = sqlfluff.lint(sql)
result_filter = [r for r in result if r["code"] == "AL04"]
# Error message only show two times, not three
assert len(result_filter) == 2
assert (
len(
[
r
for r in result_filter
if "Duplicate table alias 'a'" in r["description"]
]
)
== 2
)
# Test specific line number
assert result_filter[0]["start_line_no"] == 5
assert result_filter[1]["start_line_no"] == 6 | Verify correct error message for one duplicate table aliases occur two times. | test__rules__std_AL04_one_aliases_two_duplicate | python | sqlfluff/sqlfluff | test/rules/std_AL04_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AL04_test.py | MIT |
def test__rules__std_AL04_complex():
"""Verify that AL04 returns the correct error message for complex example."""
sql = """
SELECT
a.pk,
b.pk
FROM table_1 AS a
JOIN table_2 AS a ON a.pk = a.pk
JOIN table_3 AS b ON a.pk = b.pk
JOIN table_4 AS b ON b.pk = b.pk
JOIN table_5 AS a ON b.pk = a.pk
"""
result = sqlfluff.lint(sql)
result_filter = [r for r in result if r["code"] == "AL04"]
# Error message only show two times, not three
assert len(result_filter) == 3
assert (
len(
[
r
for r in result_filter
if "Duplicate table alias 'a'" in r["description"]
]
)
== 2
)
assert (
len(
[
r
for r in result_filter
if "Duplicate table alias 'b'" in r["description"]
]
)
== 1
)
# Test specific line number
assert result_filter[0]["start_line_no"] == 6
assert result_filter[1]["start_line_no"] == 8
assert result_filter[2]["start_line_no"] == 9 | Verify that AL04 returns the correct error message for complex example. | test__rules__std_AL04_complex | python | sqlfluff/sqlfluff | test/rules/std_AL04_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AL04_test.py | MIT |
def test__rules__std_CV02_raised() -> None:
"""CV02 is raised for use of ``IFNULL`` or ``NVL``."""
sql = "SELECT\n\tIFNULL(NULL, 100),\n\tNVL(NULL,100);"
result = sqlfluff.lint(sql, rules=["CV02"])
assert len(result) == 2
assert result[0]["description"] == "Use 'COALESCE' instead of 'IFNULL'."
assert result[1]["description"] == "Use 'COALESCE' instead of 'NVL'." | CV02 is raised for use of ``IFNULL`` or ``NVL``. | test__rules__std_CV02_raised | python | sqlfluff/sqlfluff | test/rules/std_CV02_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_CV02_test.py | MIT |
def generic_roundtrip_test(source_file, rulestring):
"""Run a roundtrip test given a sql file and a rule.
We take a file buffer, lint, fix and lint, finally checking that
the file fails initially but not after fixing.
"""
if isinstance(source_file, str):
# If it's a string, treat it as a path so lets load it.
with open(source_file) as f:
source_file = StringIO(f.read())
filename = "testing.sql"
# Lets get the path of a file to use
tempdir_path = tempfile.mkdtemp()
filepath = os.path.join(tempdir_path, filename)
# Open the example file and write the content to it
with open(filepath, mode="w") as dest_file:
for line in source_file:
dest_file.write(line)
runner = CliRunner()
# Check that we first detect the issue
result = runner.invoke(lint, ["--rules", rulestring, "--dialect=ansi", filepath])
assert result.exit_code == 1
# Fix the file (in force mode)
result = runner.invoke(
fix, ["--rules", rulestring, "--dialect=ansi", "-f", filepath]
)
assert result.exit_code == 0
# Now lint the file and check for exceptions
result = runner.invoke(lint, ["--rules", rulestring, "--dialect=ansi", filepath])
assert result.exit_code == 0
shutil.rmtree(tempdir_path) | Run a roundtrip test given a sql file and a rule.
We take a file buffer, lint, fix and lint, finally checking that
the file fails initially but not after fixing. | generic_roundtrip_test | python | sqlfluff/sqlfluff | test/rules/std_roundtrip_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_roundtrip_test.py | MIT |
def jinja_roundtrip_test(
source_path, rulestring, sqlfile="test.sql", cfgfile=".sqlfluff"
):
"""Run a roundtrip test path and rule.
We take a file buffer, lint, fix and lint, finally checking that
the file fails initially but not after fixing. Additionally
we also check that we haven't messed up the templating tags
in the process.
"""
tempdir_path = tempfile.mkdtemp()
sql_filepath = os.path.join(tempdir_path, sqlfile)
cfg_filepath = os.path.join(tempdir_path, cfgfile)
# Copy the SQL file
with open(sql_filepath, mode="w") as dest_file:
with open(os.path.join(source_path, sqlfile)) as source_file:
for line in source_file:
dest_file.write(line)
# Copy the Config file
with open(cfg_filepath, mode="w") as dest_file:
with open(os.path.join(source_path, cfgfile)) as source_file:
for line in source_file:
dest_file.write(line)
with open(sql_filepath) as f:
# Get a record of the pre-existing jinja tags
tags = re.findall(r"{{[^}]*}}|{%[^}%]*%}", f.read(), flags=0)
runner = CliRunner()
# Check that we first detect the issue
result = runner.invoke(
lint, ["--rules", rulestring, "--dialect=ansi", sql_filepath]
)
assert result.exit_code == 1
# Fix the file (in force mode)
result = runner.invoke(
fix, ["--rules", rulestring, "-f", "--dialect=ansi", sql_filepath]
)
assert result.exit_code == 0
# Now lint the file and check for exceptions
result = runner.invoke(
lint, ["--rules", rulestring, "--dialect=ansi", sql_filepath]
)
if result.exit_code != 0:
# Output the file content for debugging
print("File content:")
with open(sql_filepath) as f:
print(repr(f.read()))
print("Command output:")
print(result.output)
assert result.exit_code == 0
with open(sql_filepath) as f:
# Check that the tags are all still there!
new_tags = re.findall(r"{{[^}]*}}|{%[^}%]*%}", f.read(), flags=0)
# Clear up the temp dir
shutil.rmtree(tempdir_path)
# Assert that the tags are the same
assert tags == new_tags | Run a roundtrip test path and rule.
We take a file buffer, lint, fix and lint, finally checking that
the file fails initially but not after fixing. Additionally
we also check that we haven't messed up the templating tags
in the process. | jinja_roundtrip_test | python | sqlfluff/sqlfluff | test/rules/std_roundtrip_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_roundtrip_test.py | MIT |
def test__cli__command__fix(rule, path):
"""Test the round trip of detecting, fixing and then not detecting given rule."""
generic_roundtrip_test(path, rule) | Test the round trip of detecting, fixing and then not detecting given rule. | test__cli__command__fix | python | sqlfluff/sqlfluff | test/rules/std_roundtrip_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_roundtrip_test.py | MIT |
def test__cli__command__fix_templated(rule):
"""Roundtrip test, making sure that we don't drop tags while templating."""
jinja_roundtrip_test("test/fixtures/templater/jinja_d_roundtrip", rule) | Roundtrip test, making sure that we don't drop tags while templating. | test__cli__command__fix_templated | python | sqlfluff/sqlfluff | test/rules/std_roundtrip_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_roundtrip_test.py | MIT |
def test__rules__std_AM06_raised() -> None:
"""Test case for multiple AM06 errors raised with 'consistent' setting."""
sql = """
SELECT
foo,
bar,
sum(baz) AS sum_value
FROM (
SELECT
foo,
bar,
sum(baz) AS baz
FROM fake_table
GROUP BY
foo, bar
)
GROUP BY
1, 2
ORDER BY
1, 2;
"""
result = sqlfluff.lint(sql)
results_AM06 = [r for r in result if r["code"] == "AM06"]
assert len(results_AM06) == 2
assert (
results_AM06[0]["description"]
== "Inconsistent column references in 'GROUP BY/ORDER BY' clauses."
) | Test case for multiple AM06 errors raised with 'consistent' setting. | test__rules__std_AM06_raised | python | sqlfluff/sqlfluff | test/rules/std_AM06_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AM06_test.py | MIT |
def test__rules__std_AM06_unparsable() -> None:
"""Test unparsable group by doesn't result in bad rule AM06 error."""
sql = """
SELECT foo.set.barr
FROM foo
GROUP BY
foo.set.barr
"""
result = sqlfluff.lint(sql)
results_AM06 = [r for r in result if r["code"] == "AM06"]
results_prs = [r for r in result if r["code"] == "PRS"]
assert len(results_AM06) == 0
assert len(results_prs) > 0 | Test unparsable group by doesn't result in bad rule AM06 error. | test__rules__std_AM06_unparsable | python | sqlfluff/sqlfluff | test/rules/std_AM06_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AM06_test.py | MIT |
def test__rules__std_AM06_noqa() -> None:
"""Test unparsable group by with no qa doesn't result in bad rule AM06 error."""
sql = """
SELECT foo.set.barr --noqa: PRS
FROM foo
GROUP BY
[email protected] --noqa: PRS
"""
result = sqlfluff.lint(sql)
results_AM06 = [r for r in result if r["code"] == "AM06"]
results_prs = [r for r in result if r["code"] == "PRS"]
assert len(results_AM06) == 0
assert len(results_prs) == 0 | Test unparsable group by with no qa doesn't result in bad rule AM06 error. | test__rules__std_AM06_noqa | python | sqlfluff/sqlfluff | test/rules/std_AM06_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AM06_test.py | MIT |
def pytest_generate_tests(metafunc):
"""Generate yaml test cases from file.
This is a predefined pytest hook which allows us to parametrize all
other test cases defined in this module.
https://docs.pytest.org/en/stable/how-to/parametrize.html#pytest-generate-tests
"""
ids, test_cases = load_test_cases(
test_cases_path="test/fixtures/rules/std_rule_cases/*.yml"
)
# Only parametrize methods which include `test_case` in their
# list of required fixtures.
if "test_case" in metafunc.fixturenames:
metafunc.parametrize("test_case", test_cases, ids=ids) | Generate yaml test cases from file.
This is a predefined pytest hook which allows us to parametrize all
other test cases defined in this module.
https://docs.pytest.org/en/stable/how-to/parametrize.html#pytest-generate-tests | pytest_generate_tests | python | sqlfluff/sqlfluff | test/rules/yaml_test_cases_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/yaml_test_cases_test.py | MIT |
def test__rule_test_case(test_case: RuleTestCase, caplog):
"""Execute each of the rule test cases.
The cases themselves are parametrized using the above
`pytest_generate_tests` function, which both loads them
from the yaml files do generate `RuleTestCase` objects,
but also sets appropriate IDs for each test to improve the
user feedback.
"""
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules"):
with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"):
test_case.evaluate() | Execute each of the rule test cases.
The cases themselves are parametrized using the above
`pytest_generate_tests` function, which both loads them
from the yaml files do generate `RuleTestCase` objects,
but also sets appropriate IDs for each test to improve the
user feedback. | test__rule_test_case | python | sqlfluff/sqlfluff | test/rules/yaml_test_cases_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/yaml_test_cases_test.py | MIT |
def test__rule_test_global_config():
"""Test global config in rule test cases."""
ids, test_cases = load_test_cases(
os.path.join("test/fixtures/rules/R001_global_config_test.yml")
)
assert len(test_cases) == 2
# tc1: overwrites global config
assert test_cases[0].configs["core"]["dialect"] == "ansi"
# tc2: global config is used
assert test_cases[1].configs["core"]["dialect"] == "exasol" | Test global config in rule test cases. | test__rule_test_global_config | python | sqlfluff/sqlfluff | test/rules/yaml_test_cases_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/yaml_test_cases_test.py | MIT |
def test__rules__std_file(rule, path, violations):
"""Test the linter finds the given errors in (and only in) the right places."""
assert_rule_raises_violations_in_file(
rule=rule,
fpath="test/fixtures/linter/" + path,
violations=violations,
fluff_config=FluffConfig(overrides=dict(rules=rule, dialect="ansi")),
) | Test the linter finds the given errors in (and only in) the right places. | test__rules__std_file | python | sqlfluff/sqlfluff | test/rules/std_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_test.py | MIT |
def test_improper_configs_are_rejected(rule_config_dict):
"""Ensure that unsupported configs raise a ValueError."""
config = FluffConfig(
configs={"rules": rule_config_dict}, overrides={"dialect": "ansi"}
)
with pytest.raises(ValueError):
get_ruleset().get_rulepack(config) | Ensure that unsupported configs raise a ValueError. | test_improper_configs_are_rejected | python | sqlfluff/sqlfluff | test/rules/std_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_test.py | MIT |
def test__rules__std_RF02_wildcard_single_count():
"""Verify that RF02 is only raised once for wildcard (see issue #1973)."""
sql = """
SELECT *
FROM foo
INNER JOIN bar;
"""
result = sqlfluff.lint(sql)
assert "RF02" in [r["code"] for r in result]
assert [r["code"] for r in result].count("RF02") == 1 | Verify that RF02 is only raised once for wildcard (see issue #1973). | test__rules__std_RF02_wildcard_single_count | python | sqlfluff/sqlfluff | test/rules/std_RF02_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_RF02_test.py | MIT |
def test__rules__std_AL09_CP02_RF06(rules, dialect, fixed_sql, post_fix_errors):
"""Test interactions between AL09, CP02 & RF06."""
print(f"Running with rules: {rules}")
linter = Linter(dialect=dialect, rules=rules)
result = linter.lint_string(input_query, fix=True)
fixed, _ = result.fix_string()
assert fixed == fixed_sql
# Check violations after fix.
# NOTE: We should really use the rules testing utilities here
# but they don't yet support multiple rules.
post_fix_result = linter.lint_string(fixed, fix=False)
assert post_fix_result.check_tuples() == post_fix_errors | Test interactions between AL09, CP02 & RF06. | test__rules__std_AL09_CP02_RF06 | python | sqlfluff/sqlfluff | test/rules/std_AL09_CP02_RF06_combo_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AL09_CP02_RF06_combo_test.py | MIT |
def test_rules_std_LT01_and_ST02_interaction(in_sql, out_sql) -> None:
"""Test interaction between LT04 and ST06.
Test sql with two newlines with leading commas expecting trailing.
"""
# Lint expected rules.
cfg = FluffConfig.from_string(
"""[sqlfluff]
dialect = ansi
rules = LT01,ST02
"""
)
linter = Linter(config=cfg)
# Return linted/fixed file.
linted_file = linter.lint_string(in_sql, fix=True)
# Check expected lint errors are raised.
assert set([v.rule.code for v in linted_file.violations]) == {"ST02"}
# Check file is fixed.
assert linted_file.fix_string()[0] == out_sql | Test interaction between LT04 and ST06.
Test sql with two newlines with leading commas expecting trailing. | test_rules_std_LT01_and_ST02_interaction | python | sqlfluff/sqlfluff | test/rules/std_LT01_ST02_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT01_ST02_test.py | MIT |
def test__std_fix_auto(dialect, folder, caplog):
"""Automated Fixing Tests."""
auto_fix_test(dialect=dialect, folder=folder, caplog=caplog) | Automated Fixing Tests. | test__std_fix_auto | python | sqlfluff/sqlfluff | test/rules/std_fix_auto_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_fix_auto_test.py | MIT |
def test__rules__std_ST05_LT08_5265() -> None:
"""Tests observed conflict between ST05 & LT08.
In this case, the moved `oops` and `another` table was created after
the first usage. The `oops` from the `cte2` is no longer deleted.
https://github.com/sqlfluff/sqlfluff/issues/5265
"""
sql = """
WITH
cte1 AS (
SELECT COUNT(*) AS qty
FROM some_table AS st
LEFT JOIN (
SELECT 'first' AS id
) AS oops
ON st.id = oops.id
),
cte2 AS (
SELECT COUNT(*) AS other_qty
FROM other_table AS sot
LEFT JOIN (
SELECT 'middle' AS id
) AS another
ON sot.id = another.id
LEFT JOIN (
SELECT 'last' AS id
) AS oops
ON sot.id = oops.id
)
SELECT CURRENT_DATE();
"""
fixed_sql = """
WITH oops AS (
SELECT 'first' AS id
),
cte1 AS (
SELECT COUNT(*) AS qty
FROM some_table AS st
LEFT JOIN oops
ON st.id = oops.id
),
another AS (
SELECT 'middle' AS id
),
cte2 AS (
SELECT COUNT(*) AS other_qty
FROM other_table AS sot
LEFT JOIN another
ON sot.id = another.id
LEFT JOIN (
SELECT 'last' AS id
) AS oops
ON sot.id = oops.id
)
SELECT CURRENT_DATE();
"""
cfg = FluffConfig.from_kwargs(
dialect="ansi",
rules=["ST05", "LT08"],
)
result = Linter(config=cfg).lint_string(sql, fix=True)
assert result.fix_string()[0] == fixed_sql | Tests observed conflict between ST05 & LT08.
In this case, the moved `oops` and `another` table was created after
the first usage. The `oops` from the `cte2` is no longer deleted.
https://github.com/sqlfluff/sqlfluff/issues/5265 | test__rules__std_ST05_LT08_5265 | python | sqlfluff/sqlfluff | test/rules/std_ST05_LT08_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_ST05_LT08_test.py | MIT |
def test__rules__std_LT02_LT11_union_all_in_subquery_lint():
"""Verify a that LT11 reports lint errors in subqueries."""
sql = (
"SELECT * FROM (\n"
" SELECT 'g' UNION ALL\n"
" SELECT 'h'\n"
" UNION ALL SELECT 'j'\n"
")\n"
)
result = sqlfluff.lint(sql)
assert "LT11" in [r["code"] for r in result] | Verify a that LT11 reports lint errors in subqueries. | test__rules__std_LT02_LT11_union_all_in_subquery_lint | python | sqlfluff/sqlfluff | test/rules/std_LT02_LT11_combo_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT02_LT11_combo_test.py | MIT |
def test__rules__std_LT02_LT11_union_all_in_subquery_fix():
"""Verify combination of rules LT02 and LT11 produces a correct indentation."""
sql = (
"SELECT c FROM (\n"
" SELECT 'g' UNION ALL\n"
" SELECT 'h'\n"
" UNION ALL SELECT 'j'\n"
")\n"
)
fixed_sql = (
"SELECT c FROM (\n"
" SELECT 'g'\n"
" UNION ALL\n"
" SELECT 'h'\n"
" UNION ALL\n"
" SELECT 'j'\n"
")\n"
)
result = sqlfluff.fix(sql)
assert result == fixed_sql | Verify combination of rules LT02 and LT11 produces a correct indentation. | test__rules__std_LT02_LT11_union_all_in_subquery_fix | python | sqlfluff/sqlfluff | test/rules/std_LT02_LT11_combo_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT02_LT11_combo_test.py | MIT |
def test__rules__std_CV09_raised() -> None:
"""CV09 is raised for use of blocked words with correct error message."""
sql = "SELECT MYOLDFUNCTION(col1) FROM deprecated_table;\n"
cfg = FluffConfig(overrides={"dialect": "ansi"})
cfg.set_value(
config_path=["rules", "convention.blocked_words", "blocked_words"],
val="myoldfunction,deprecated_table",
)
linter = Linter(config=cfg)
result_records = linter.lint_string_wrapped(sql).as_records()
result = result_records[0]["violations"]
assert len(result) == 2
assert result[0]["description"] == "Use of blocked word 'MYOLDFUNCTION'."
assert result[1]["description"] == "Use of blocked word 'deprecated_table'." | CV09 is raised for use of blocked words with correct error message. | test__rules__std_CV09_raised | python | sqlfluff/sqlfluff | test/rules/std_CV09_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_CV09_test.py | MIT |
def test__rules__std_LT04_unparseable():
"""Verify that LT04 doesn't try to fix queries with parse errors.
This has been observed to frequently cause syntax errors, especially in
combination with Jinja templating, e.g. undefined template variables.
"""
# This example comes almost directly from a real-world example. The user
# accidentally ran "sqlfluff fix" without defining
# "readability_features_numeric" and "readability_features_count_list", and
# doing so corrupted their query.
sql = """
SELECT
user_id,
campaign_id,
business_type,
SPLIT(intents, ",") AS intent_list,
{% for feature in readability_features_numeric %}
CAST(JSON_EXTRACT(readability_scores,
'$.data.{{feature}}') AS float64) AS {{feature}} {% if not loop.last %} ,
{% endif %}
{% endfor %},
{% for feature in readability_features_count_list %}
CAST(JSON_EXTRACT(asset_structure,
'$.{{feature}}') AS float64) AS {{feature}}_count {% if not loop.last %} ,
{% endif %}
{% endfor %},
track_clicks_text,
track_clicks_html
FROM
t
"""
result = sqlfluff.lint(sql)
assert "LT04" not in [r["code"] for r in result] | Verify that LT04 doesn't try to fix queries with parse errors.
This has been observed to frequently cause syntax errors, especially in
combination with Jinja templating, e.g. undefined template variables. | test__rules__std_LT04_unparseable | python | sqlfluff/sqlfluff | test/rules/std_LT04_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT04_test.py | MIT |
def test__rules__std_LT12_and_CV06_interaction() -> None:
"""Test interaction between LT12 and CV06 doesn't stop CV06 from being applied."""
# Test sql with no final newline and no final semicolon.
sql = "SELECT foo FROM bar"
# Ensure final semicolon requirement is active.
cfg = FluffConfig(overrides={"dialect": "ansi"})
cfg.set_value(
config_path=["rules", "convention.terminator", "require_final_semicolon"],
val=True,
)
linter = Linter(config=cfg)
# Return linted/fixed file.
linted_file = linter.lint_string(sql, fix=True)
# Check expected lint errors are raised.
assert set([v.rule.code for v in linted_file.violations]) == {"LT12", "CV06"}
# Check file is fixed.
assert linted_file.fix_string()[0] == "SELECT foo FROM bar;\n" | Test interaction between LT12 and CV06 doesn't stop CV06 from being applied. | test__rules__std_LT12_and_CV06_interaction | python | sqlfluff/sqlfluff | test/rules/std_LT12_CV06_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT12_CV06_test.py | MIT |
def test_rules_std_LT02_LT04_interaction_indentation_leading(in_sql, out_sql) -> None:
"""Test interaction between LT02 and LT04.
Test sql with two newlines with trailing commas expecting leading.
"""
# Lint expected rules.
cfg = FluffConfig.from_string(
"""[sqlfluff]
dialect = snowflake
rules = LT02, LT04
[sqlfluff:layout:type:comma]
spacing_before = touch
line_position = leading
"""
)
linter = Linter(config=cfg)
# Return linted/fixed file.
linted_file = linter.lint_string(in_sql, fix=True)
# Check expected lint errors are raised.
assert set([v.rule.code for v in linted_file.violations]) == {"LT04"}
# Check file is fixed.
assert linted_file.fix_string()[0] == out_sql | Test interaction between LT02 and LT04.
Test sql with two newlines with trailing commas expecting leading. | test_rules_std_LT02_LT04_interaction_indentation_leading | python | sqlfluff/sqlfluff | test/rules/std_LT02_LT04_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT02_LT04_test.py | MIT |
def test__rules__std_RF01_LT09_copy() -> None:
"""Tests observed conflict between RF01 & LT09.
https://github.com/sqlfluff/sqlfluff/issues/5203
"""
sql = """
SELECT
DISTINCT `FIELD`
FROM `TABLE`;
"""
cfg = FluffConfig.from_kwargs(
dialect="mysql",
rules=["RF01", "LT09"],
)
result = Linter(config=cfg).lint_string(sql)
for violation in result.violations:
assert "Unexpected exception" not in violation.description
assert len(result.violations) == 1
only_violation = result.violations[0]
assert only_violation.rule_code() == "LT09" | Tests observed conflict between RF01 & LT09.
https://github.com/sqlfluff/sqlfluff/issues/5203 | test__rules__std_RF01_LT09_copy | python | sqlfluff/sqlfluff | test/rules/std_RF01_LT09_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_RF01_LT09_test.py | MIT |
def test__rules__std_LT01_single_raise() -> None:
"""Test case for multiple LT01 errors raised when no post comma whitespace."""
# This query used to triple count LT01. Added memory to log previously fixed commas
# (issue #2001).
sql = """
SELECT
col_a AS a
,col_b AS b
FROM foo;
"""
result = sqlfluff.lint(sql, rules=["LT01", "LT04"])
results_LT01 = [r for r in result if r["code"] == "LT01"]
results_LT04 = [r for r in result if r["code"] == "LT04"]
assert len(results_LT01) == 1
assert len(results_LT04) == 1 | Test case for multiple LT01 errors raised when no post comma whitespace. | test__rules__std_LT01_single_raise | python | sqlfluff/sqlfluff | test/rules/std_LT01_LT04_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT01_LT04_test.py | MIT |
def test__rules__std_LT03_default():
"""Verify that LT03 returns the correct error message for default (trailing)."""
sql = """
SELECT
a,
b
FROM foo
WHERE
a = 1 AND
b = 2
"""
result = sqlfluff.lint(sql)
assert "LT03" in [r["code"] for r in result]
assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result] | Verify that LT03 returns the correct error message for default (trailing). | test__rules__std_LT03_default | python | sqlfluff/sqlfluff | test/rules/std_LT03_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT03_test.py | MIT |
def test__rules__std_LT03_leading():
"""Verify correct error message when leading is used."""
sql = """
SELECT
a,
b
FROM foo
WHERE
a = 1 AND
b = 2
"""
config = FluffConfig(
configs={"layout": {"type": {"binary_operator": {"line_position": "leading"}}}},
overrides={"dialect": "ansi"},
)
# The sqlfluff.lint API doesn't allow us to pass config so need to do what it does
linter = Linter(config=config)
result_records = linter.lint_string_wrapped(sql).as_records()
result = result_records[0]["violations"]
assert "LT03" in [r["code"] for r in result]
assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result] | Verify correct error message when leading is used. | test__rules__std_LT03_leading | python | sqlfluff/sqlfluff | test/rules/std_LT03_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT03_test.py | MIT |
def test__rules__std_LT03_trailing():
"""Verify correct error message when trailing is used."""
sql = """
SELECT
a,
b
FROM foo
WHERE
a = 1
AND b = 2
"""
config = FluffConfig(
configs={
"layout": {"type": {"binary_operator": {"line_position": "trailing"}}}
},
overrides={"dialect": "ansi"},
)
# The sqlfluff.lint API doesn't allow us to pass config so need to do what it does
linter = Linter(config=config)
result_records = linter.lint_string_wrapped(sql).as_records()
result = result_records[0]["violations"]
assert "LT03" in [r["code"] for r in result]
assert EXPECTED_TRAILING_MESSAGE in [r["description"] for r in result] | Verify correct error message when trailing is used. | test__rules__std_LT03_trailing | python | sqlfluff/sqlfluff | test/rules/std_LT03_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT03_test.py | MIT |
def test__api__lint_string_without_violations():
"""Check lint functionality when there is no violation."""
result = sqlfluff.lint("select column from table\n")
assert result == [] | Check lint functionality when there is no violation. | test__api__lint_string_without_violations | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__fix_string_specific():
"""Basic checking of lint functionality with a specific rule."""
result = sqlfluff.fix(my_bad_query, rules=["CP01"])
# Check actual result
assert result == "SELECT *, 1, blah AS fOO FROM myTable" | Basic checking of lint functionality with a specific rule. | test__api__fix_string_specific | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__fix_string_specific_exclude():
"""Basic checking of lint functionality with a specific rule exclusion."""
result = sqlfluff.fix(my_bad_query, exclude_rules=["LT09"])
# Check actual result
assert result == "SELECT *, 1, blah AS foo FROM mytable\n" | Basic checking of lint functionality with a specific rule exclusion. | test__api__fix_string_specific_exclude | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__fix_string_unparsable():
"""Test behavior with parse errors."""
bad_query = """SELECT my_col
FROM my_schema.my_table
where processdate ! 3"""
result = sqlfluff.fix(bad_query, rules=["CP01"])
# Check fix result: should be unchanged because of the parse error.
assert result == bad_query | Test behavior with parse errors. | test__api__fix_string_unparsable | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__parse_string():
"""Basic checking of parse functionality."""
parsed = sqlfluff.parse(my_bad_query)
# Check a JSON object is returned.
assert isinstance(parsed, dict)
# Load in expected result.
with open("test/fixtures/api/parse_test/parse_test.json", "r") as f:
expected_parsed = json.load(f)
# Compare JSON from parse to expected result.
assert parsed == expected_parsed | Basic checking of parse functionality. | test__api__parse_string | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__parse_fail():
"""Basic failure mode of parse functionality."""
try:
sqlfluff.parse("Select (1 + 2 +++) FROM mytable as blah blah")
pytest.fail("sqlfluff.parse should have raised an exception.")
except Exception as err:
# Check it's the right kind of exception
assert isinstance(err, sqlfluff.api.APIParsingError)
# Check there are two violations in there.
assert len(err.violations) == 2
# Check it prints nicely.
assert (
str(err)
== """Found 2 issues while parsing string.
Line 1, Position 15: Found unparsable section: '+++'
Line 1, Position 41: Found unparsable section: 'blah'"""
) | Basic failure mode of parse functionality. | test__api__parse_fail | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__config_path():
"""Test that we can load a specified config file in the Simple API."""
# Load test SQL file.
with open("test/fixtures/api/config_path_test/config_path_test.sql", "r") as f:
sql = f.read()
# Pass a config path to the Simple API.
parsed = sqlfluff.parse(
sql,
config_path="test/fixtures/api/config_path_test/extra_configs/.sqlfluff",
)
# Load in expected result.
with open("test/fixtures/api/config_path_test/config_path_test.json", "r") as f:
expected_parsed = json.load(f)
# Compare JSON from parse to expected result.
assert parsed == expected_parsed | Test that we can load a specified config file in the Simple API. | test__api__config_path | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__config_override(kwargs, expected, tmpdir):
"""Test that parameters to lint() override .sqlfluff correctly (or not)."""
config_path = "test/fixtures/api/config_override/.sqlfluff"
sql = "SELECT TRIM(name) AS name FROM some_table"
lint_results = sqlfluff.lint(sql, config_path=config_path, **kwargs)
assert expected == {"RF02", "RF04"}.intersection(
{lr["code"] for lr in lint_results}
) | Test that parameters to lint() override .sqlfluff correctly (or not). | test__api__config_override | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__invalid_dialect():
"""Test that SQLFluffUserError is raised for a bad dialect."""
# Load test SQL file.
with open("test/fixtures/api/config_path_test/config_path_test.sql", "r") as f:
sql = f.read()
# Pass a fake dialect to the API and test the correct error is raised.
with pytest.raises(SQLFluffUserError) as err:
sqlfluff.parse(
sql,
dialect="not_a_real_dialect",
config_path="test/fixtures/api/config_path_test/extra_configs/.sqlfluff",
)
assert str(err.value) == "Error: Unknown dialect 'not_a_real_dialect'" | Test that SQLFluffUserError is raised for a bad dialect. | test__api__invalid_dialect | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__parse_exceptions():
"""Test parse behaviour with errors."""
# Parsable content
result = sqlfluff.parse("SELECT 1")
assert result
# Templater fail
with pytest.raises(APIParsingError):
sqlfluff.parse('SELECT {{ 1 > "a"}}')
# Templater success but parsing fail
with pytest.raises(APIParsingError):
sqlfluff.parse("THIS IS NOT SQL") | Test parse behaviour with errors. | test__api__parse_exceptions | python | sqlfluff/sqlfluff | test/api/simple_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py | MIT |
def test__api__lexer():
"""Basic checking of lexing functionality."""
tokens, violations = Lexer(dialect="ansi").lex(test_query)
assert violations == []
assert isinstance(tokens, tuple)
# The last element is the file end marker.
assert [elem.raw for elem in tokens] == ["SELECt", " ", "1", ""] | Basic checking of lexing functionality. | test__api__lexer | python | sqlfluff/sqlfluff | test/api/classes_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/classes_test.py | MIT |
def test__api__parser():
"""Basic checking of parsing functionality."""
tokens, _ = Lexer(dialect="ansi").lex(test_query)
parsed = Parser(dialect="ansi").parse(tokens)
assert parsed.raw == test_query | Basic checking of parsing functionality. | test__api__parser | python | sqlfluff/sqlfluff | test/api/classes_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/classes_test.py | MIT |
def test__api__info_dialects():
"""Basic linting of dialects."""
dialects = sqlfluff.list_dialects()
assert isinstance(dialects, list)
# Turn it into a dict so we can look for items in there.
dialect_dict = {dialect.label: dialect for dialect in dialects}
# Check the ansi dialect works
assert "ansi" in dialect_dict
ansi = dialect_dict["ansi"]
assert ansi.label == "ansi"
assert ansi.name == "ANSI"
assert ansi.inherits_from == "nothing"
assert "This is the base dialect" in ansi.docstring
# Check one other works
assert "postgres" in dialect_dict
postgres = dialect_dict["postgres"]
assert postgres.label == "postgres"
assert postgres.name == "PostgreSQL"
assert postgres.inherits_from == "ansi"
assert "this is often the dialect to use" in postgres.docstring | Basic linting of dialects. | test__api__info_dialects | python | sqlfluff/sqlfluff | test/api/info_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/api/info_test.py | MIT |
def default_config():
"""Return the default config for reflow tests."""
return FluffConfig(overrides={"dialect": "ansi"}) | Return the default config for reflow tests. | default_config | python | sqlfluff/sqlfluff | test/utils/reflow/conftest.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/conftest.py | MIT |
def parse_ansi_string(sql, config):
"""Parse an ansi sql string for testing."""
linter = Linter(config=config)
return linter.parse_string(sql).tree | Parse an ansi sql string for testing. | parse_ansi_string | python | sqlfluff/sqlfluff | test/utils/reflow/rebreak_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/rebreak_test.py | MIT |
def test_reflow__sequence_rebreak_root(raw_sql_in, raw_sql_out, default_config, caplog):
"""Test the ReflowSequence.rebreak() method directly.
Focused around a whole segment.
"""
root = parse_ansi_string(raw_sql_in, default_config)
print(root.stringify())
seq = ReflowSequence.from_root(root, config=default_config)
for idx, elem in enumerate(seq.elements):
print(idx, elem)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
new_seq = seq.rebreak()
print(new_seq.get_fixes())
assert new_seq.get_raw() == raw_sql_out | Test the ReflowSequence.rebreak() method directly.
Focused around a whole segment. | test_reflow__sequence_rebreak_root | python | sqlfluff/sqlfluff | test/utils/reflow/rebreak_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/rebreak_test.py | MIT |
def test_reflow__sequence_rebreak_target(
raw_sql_in, target_idx, seq_sql_in, seq_sql_out, default_config, caplog
):
"""Test the ReflowSequence.rebreak() method directly.
Focused around a target segment. This intentionally
stretches some of the span logic.
"""
root = parse_ansi_string(raw_sql_in, default_config)
print(root.stringify())
target = root.raw_segments[target_idx]
print("Target: ", target)
seq = ReflowSequence.from_around_target(target, root, config=default_config)
for idx, elem in enumerate(seq.elements):
print(idx, elem)
assert seq.get_raw() == seq_sql_in
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
new_seq = seq.rebreak()
print(new_seq.get_fixes())
assert new_seq.get_raw() == seq_sql_out | Test the ReflowSequence.rebreak() method directly.
Focused around a target segment. This intentionally
stretches some of the span logic. | test_reflow__sequence_rebreak_target | python | sqlfluff/sqlfluff | test/utils/reflow/rebreak_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/rebreak_test.py | MIT |
def test_reflow__sequence_respace(
raw_sql_in, kwargs, raw_sql_out, default_config, caplog
):
"""Test the ReflowSequence.respace() method directly."""
root = parse_ansi_string(raw_sql_in, default_config)
seq = ReflowSequence.from_root(root, config=default_config)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
new_seq = seq.respace(**kwargs)
assert new_seq.get_raw() == raw_sql_out | Test the ReflowSequence.respace() method directly. | test_reflow__sequence_respace | python | sqlfluff/sqlfluff | test/utils/reflow/respace_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/respace_test.py | MIT |
def test_reflow__point_respace_point(
raw_sql_in, point_idx, kwargs, raw_point_sql_out, fixes_out, default_config, caplog
):
"""Test the ReflowPoint.respace_point() method directly.
NOTE: This doesn't check any pre-existing fixes.
That should be a separate more specific test.
"""
root = parse_ansi_string(raw_sql_in, default_config)
seq = ReflowSequence.from_root(root, config=default_config)
pnt = seq.elements[point_idx]
assert isinstance(pnt, ReflowPoint)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
results, new_pnt = pnt.respace_point(
prev_block=seq.elements[point_idx - 1],
next_block=seq.elements[point_idx + 1],
root_segment=root,
lint_results=[],
**kwargs,
)
assert new_pnt.raw == raw_point_sql_out
# NOTE: We use set comparison, because ordering isn't important for fixes.
assert {
(fix.edit_type, fix.anchor.raw) for fix in fixes_from_results(results)
} == fixes_out | Test the ReflowPoint.respace_point() method directly.
NOTE: This doesn't check any pre-existing fixes.
That should be a separate more specific test. | test_reflow__point_respace_point | python | sqlfluff/sqlfluff | test/utils/reflow/respace_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/respace_test.py | MIT |
def assert_reflow_structure(sequence, StartClass, raw_elems):
"""Assert a ReflowSequence has the defined structure."""
assert [
[seg.raw for seg in elem.segments] for elem in sequence.elements
] == raw_elems
# We can assert all the classes just by knowing which we should start with
assert all(type(elem) is StartClass for elem in sequence.elements[::2])
OtherClass = ReflowBlock if StartClass is ReflowPoint else ReflowPoint
assert all(type(elem) is OtherClass for elem in sequence.elements[1::2]) | Assert a ReflowSequence has the defined structure. | assert_reflow_structure | python | sqlfluff/sqlfluff | test/utils/reflow/sequence_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/sequence_test.py | MIT |
def test_reflow_sequence_from_segments(
raw_sql, StartClass, raw_elems, default_config, caplog
):
"""Test direct sequence construction from segments."""
root = parse_ansi_string(raw_sql, default_config)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
result = ReflowSequence.from_raw_segments(
root.raw_segments, root, config=default_config
)
assert_reflow_structure(result, StartClass, raw_elems) | Test direct sequence construction from segments. | test_reflow_sequence_from_segments | python | sqlfluff/sqlfluff | test/utils/reflow/sequence_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/sequence_test.py | MIT |
def test_reflow_sequence_from_around_target(
raw_sql,
sides,
target_idx,
target_raw,
StartClass,
raw_elems,
default_config,
caplog,
):
"""Test direct sequence construction from a target."""
root = parse_ansi_string(raw_sql, default_config)
print("Raw Segments:", root.raw_segments)
target = root.raw_segments[target_idx]
# Check we're aiming at the right place
assert target.raw == target_raw
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
result = ReflowSequence.from_around_target(
target, root, config=default_config, sides=sides
)
assert_reflow_structure(result, StartClass, raw_elems) | Test direct sequence construction from a target. | test_reflow_sequence_from_around_target | python | sqlfluff/sqlfluff | test/utils/reflow/sequence_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/sequence_test.py | MIT |
def test_reflow_sequence_from_around_target_non_raw(default_config, caplog):
"""Test direct sequence construction from a target.
This time we use a target which isn't a RawSegment.
"""
sql = " SELECT 1 "
root = parse_ansi_string(sql, default_config)
# We should have a statement as a first level child.
statement = root.segments[1]
assert statement.is_type("statement")
assert statement.raw == "SELECT 1"
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
result = ReflowSequence.from_around_target(
statement, root, config=default_config
)
# We should start with a point, because we hit the start of the file.
# It should also hit the end of the file and effectively cover all
# the raw segments of the file.
assert_reflow_structure(
result,
ReflowPoint,
[
[" "],
["SELECT"],
["", " "],
["1"],
# dedent - ws
["", " "],
# end of file
[""],
],
) | Test direct sequence construction from a target.
This time we use a target which isn't a RawSegment. | test_reflow_sequence_from_around_target_non_raw | python | sqlfluff/sqlfluff | test/utils/reflow/sequence_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/sequence_test.py | MIT |
def test_reflow_sequence_respace_filter(
raw_sql, filter, delete_indices, edit_indices, default_config, caplog
):
"""Test iteration of trailing whitespace fixes."""
root = parse_ansi_string(raw_sql, default_config)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
sequence = ReflowSequence.from_root(root, config=default_config)
fixes = sequence.respace(filter=filter).get_fixes()
# assert deletes
assert [fix for fix in fixes if fix.edit_type == "delete"] == [
LintFix("delete", root.raw_segments[idx]) for idx in delete_indices
]
# assert edits (with slightly less detail)
assert [
root.raw_segments.index(fix.anchor)
for fix in fixes
if fix.edit_type == "replace"
] == edit_indices | Test iteration of trailing whitespace fixes. | test_reflow_sequence_respace_filter | python | sqlfluff/sqlfluff | test/utils/reflow/sequence_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/sequence_test.py | MIT |
def test_reflow_depthmap_from_parent(default_config):
"""Test map construction from a root segment."""
sql = "SELECT 1"
root = parse_ansi_string(sql, default_config)
dm = DepthMap.from_parent(root)
# We use UUIDS in the depth map so we can't assert their value.
# What we can do is use them.
# Check that we get the right depths.
assert [dm.depth_info[seg.uuid].stack_depth for seg in root.raw_segments] == [
4,
4,
4,
5,
4,
1,
]
# Check they all share the same first three hash and
# class type elements (except the end of file marker at the end).
# These should be the file, statement and select statement.
expected = ({"file", "base"}, {"statement", "base"}, {"select_statement", "base"})
assert all(
dm.depth_info[seg.uuid].stack_class_types[:3] == expected
for seg in root.raw_segments[:-1]
)
first_hashes = dm.depth_info[root.raw_segments[0].uuid].stack_hashes[:3]
assert all(
dm.depth_info[seg.uuid].stack_hashes[:3] == first_hashes
for seg in root.raw_segments[:-1]
)
# While we're here, test the DepthInfo.common_with method
select_keyword_di = dm.depth_info[root.raw_segments[0].uuid]
numeric_one_di = dm.depth_info[root.raw_segments[3].uuid]
assert len(select_keyword_di.common_with(numeric_one_di)) == 4 | Test map construction from a root segment. | test_reflow_depthmap_from_parent | python | sqlfluff/sqlfluff | test/utils/reflow/depthmap_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/depthmap_test.py | MIT |
def test_reflow_depthmap_from_raws_and_root(default_config):
"""Test that the indirect route is equivalent to the direct route."""
sql = "SELECT 1"
root = parse_ansi_string(sql, default_config)
# Direct route
dm_direct = DepthMap.from_parent(root)
# Indirect route.
dm_indirect = DepthMap.from_raws_and_root(root.raw_segments, root)
# The depth info dict depends on the sequence so we only need
# to check those are equal.
assert dm_direct.depth_info == dm_indirect.depth_info | Test that the indirect route is equivalent to the direct route. | test_reflow_depthmap_from_raws_and_root | python | sqlfluff/sqlfluff | test/utils/reflow/depthmap_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/depthmap_test.py | MIT |
def test_reflow_depthmap_order_by(default_config):
"""Test depth mapping of an order by clause."""
sql = "SELECT * FROM foo ORDER BY bar DESC\n"
root = parse_ansi_string(sql, default_config)
# Get the `ORDER` and `DESC` segments.
order_seg = None
desc_seg = None
for raw in root.raw_segments:
if raw.raw_upper == "ORDER":
order_seg = raw
elif raw.raw_upper == "DESC":
desc_seg = raw
# Make sure we find them
assert order_seg
assert desc_seg
# Generate a depth map
depth_map = DepthMap.from_parent(root)
# Check their depth info
order_seg_di = depth_map.get_depth_info(order_seg)
desc_seg_di = depth_map.get_depth_info(desc_seg)
# Make sure they both contain an order by clause.
assert frozenset({"base", "orderby_clause"}) in order_seg_di.stack_class_types
assert frozenset({"base", "orderby_clause"}) in desc_seg_di.stack_class_types
# Get the ID of one and make sure it's in the other
order_by_hash = order_seg_di.stack_hashes[
order_seg_di.stack_class_types.index(frozenset({"base", "orderby_clause"}))
]
assert order_by_hash in order_seg_di.stack_hashes
assert order_by_hash in desc_seg_di.stack_hashes
# Get the position information
order_stack_pos = order_seg_di.stack_positions[order_by_hash]
desc_stack_pos = desc_seg_di.stack_positions[order_by_hash]
# Make sure the position information is correct
print(order_stack_pos)
print(desc_stack_pos)
assert order_stack_pos == StackPosition(idx=0, len=9, type="start")
# NOTE: Even though idx 7 is not the end, the _type_ of this location
# is still an "end" because the following elements are non-code.
assert desc_stack_pos == StackPosition(idx=7, len=9, type="end") | Test depth mapping of an order by clause. | test_reflow_depthmap_order_by | python | sqlfluff/sqlfluff | test/utils/reflow/depthmap_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/depthmap_test.py | MIT |
def slice_file(
self, raw_str: str, render_func: Callable[[str], str], config=None
) -> Tuple[List[RawFileSlice], List[TemplatedFileSlice], str]:
"""Patch a sliced file returned by the superclass."""
raw_sliced, sliced_file, templated_str = super().slice_file(
raw_str, render_func, config
)
patched_sliced_file = []
for templated_slice in sliced_file:
patched_sliced_file.append(templated_slice)
# Add an EMPTY special_marker slice after every block_start.
if templated_slice.slice_type == "block_start":
# Note that both the source_slice AND the templated_slice are empty.
source_pos = templated_slice.source_slice.stop
templated_pos = templated_slice.templated_slice.stop
patched_sliced_file.append(
TemplatedFileSlice(
"special_marker",
slice(source_pos, source_pos),
slice(templated_pos, templated_pos),
)
)
return raw_sliced, patched_sliced_file, templated_str | Patch a sliced file returned by the superclass. | slice_file | python | sqlfluff/sqlfluff | test/utils/reflow/reindent_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/reindent_test.py | MIT |
def get_templaters() -> List[Type[RawTemplater]]:
"""Return templaters provided by this test module."""
return [SpecialMarkerInserter] | Return templaters provided by this test module. | get_templaters | python | sqlfluff/sqlfluff | test/utils/reflow/reindent_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/reindent_test.py | MIT |
def test_reflow__point_indent_to(
raw_sql_in, elem_idx, indent_to, point_sql_out, default_config, caplog
):
"""Test the ReflowPoint.indent_to() method directly."""
root = parse_ansi_string(raw_sql_in, default_config)
print(root.stringify())
seq = ReflowSequence.from_root(root, config=default_config)
elem = seq.elements[elem_idx]
print("Element: ", elem)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
new_fixes, new_point = elem.indent_to(
indent_to,
before=seq.elements[elem_idx - 1].segments[-1],
after=seq.elements[elem_idx + 1].segments[0],
)
print(new_fixes)
assert new_point.raw == point_sql_out | Test the ReflowPoint.indent_to() method directly. | test_reflow__point_indent_to | python | sqlfluff/sqlfluff | test/utils/reflow/reindent_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/reindent_test.py | MIT |
def test_reflow__point_get_indent(
raw_sql_in, elem_idx, indent_out, default_config, caplog
):
"""Test the ReflowPoint.get_indent() method directly."""
root = parse_ansi_string(raw_sql_in, default_config)
print(root.stringify())
seq = ReflowSequence.from_root(root, config=default_config)
elem = seq.elements[elem_idx]
print("Element: ", elem)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
result = elem.get_indent()
assert result == indent_out | Test the ReflowPoint.get_indent() method directly. | test_reflow__point_get_indent | python | sqlfluff/sqlfluff | test/utils/reflow/reindent_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/reindent_test.py | MIT |
def test_reflow__deduce_line_indent(
raw_sql_in, target_raw, indent_out, default_config, caplog
):
"""Test the deduce_line_indent() method directly."""
root = parse_ansi_string(raw_sql_in, default_config)
print(root.stringify())
for target_seg in root.raw_segments:
if target_seg.raw == target_raw:
break
else:
raise ValueError("Target Raw Not Found")
print("Target: ", target_seg)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
result = deduce_line_indent(target_seg, root)
assert result == indent_out | Test the deduce_line_indent() method directly. | test_reflow__deduce_line_indent | python | sqlfluff/sqlfluff | test/utils/reflow/reindent_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/reindent_test.py | MIT |
def test_reflow__crawl_indent_points(raw_sql_in, templater, points_out, caplog):
"""Test _crawl_indent_points directly."""
# Register the mock templater in this module.
purge_plugin_manager()
get_plugin_manager().register(sys.modules[__name__], name="reindent_test")
config = FluffConfig(overrides={"dialect": "ansi", "templater": templater})
root = parse_ansi_string(raw_sql_in, config)
print(root.stringify())
seq = ReflowSequence.from_root(root, config=config)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
points = list(_crawl_indent_points(seq.elements))
assert points == points_out | Test _crawl_indent_points directly. | test_reflow__crawl_indent_points | python | sqlfluff/sqlfluff | test/utils/reflow/reindent_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/reindent_test.py | MIT |
def test_reflow__lint_indent_points(raw_sql_in, raw_sql_out, default_config, caplog):
"""Test the lint_indent_points() method directly.
Rather than testing directly, for brevity we check
the raw output it produces. This results in a more
compact test.
"""
root = parse_ansi_string(raw_sql_in, default_config)
print(root.stringify())
seq = ReflowSequence.from_root(root, config=default_config)
with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"):
elements, results = lint_indent_points(seq.elements, single_indent=" ")
result_raw = "".join(elem.raw for elem in elements)
assert result_raw == raw_sql_out, "Raw Element Check Failed!"
# Now we've checked the elements - check that applying the fixes gets us to
# the same place.
print("Results:", results)
anchor_info = compute_anchor_edit_info(fixes_from_results(results))
fixed_tree, _, _, valid = apply_fixes(
root, default_config.get("dialect_obj"), "TEST", anchor_info
)
assert valid, f"Reparse check failed: {fixed_tree.raw!r}"
assert fixed_tree.raw == raw_sql_out, "Element check passed - but fix check failed!" | Test the lint_indent_points() method directly.
Rather than testing directly, for brevity we check
the raw output it produces. This results in a more
compact test. | test_reflow__lint_indent_points | python | sqlfluff/sqlfluff | test/utils/reflow/reindent_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/reindent_test.py | MIT |
def test_reflow__desired_indent_units(indent_line, forced_indents, expected_units):
"""Test _IndentLine.desired_indent_units() directly."""
assert indent_line.desired_indent_units(forced_indents) == expected_units | Test _IndentLine.desired_indent_units() directly. | test_reflow__desired_indent_units | python | sqlfluff/sqlfluff | test/utils/reflow/reindent_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/reflow/reindent_test.py | MIT |
def _parse_and_crawl_outer(sql):
"""Helper function for select crawlers.
Given a SQL statement this crawls the SQL and instantiates
a Query on the outer relevant segment.
"""
linter = Linter(dialect="ansi")
parsed = linter.parse_string(sql)
# Make sure it's fully parsable.
assert "unparsable" not in parsed.tree.descendant_type_set
# Create a crawler from the root segment.
query = Query.from_root(parsed.tree, linter.dialect)
# Analyse the segment.
return query, linter | Helper function for select crawlers.
Given a SQL statement this crawls the SQL and instantiates
a Query on the outer relevant segment. | _parse_and_crawl_outer | python | sqlfluff/sqlfluff | test/utils/analysis/query_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/analysis/query_test.py | MIT |
def test_select_crawler_constructor(sql, expected_json):
"""Test Query when created using constructor."""
query, _ = _parse_and_crawl_outer(sql)
assert all(cte.cte_definition_segment is not None for cte in query.ctes.values())
query_dict = query.as_dict()
assert expected_json == query_dict | Test Query when created using constructor. | test_select_crawler_constructor | python | sqlfluff/sqlfluff | test/utils/analysis/query_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/analysis/query_test.py | MIT |
def test_select_crawler_nested():
"""Test invoking with an outer from_expression_segment."""
sql = """
select
a.x, a.y, b.z
from a
join (
with d as (
select x, z from b
)
select * from d
) using (x)
"""
query, linter = _parse_and_crawl_outer(sql)
inner_from = (
query.selectables[0].select_info.table_aliases[1].from_expression_element
)
inner_select = next(inner_from.recursive_crawl("with_compound_statement"))
inner_query = Query.from_segment(inner_select, linter.dialect)
assert inner_query.as_dict() == {
"selectables": [
"select * from d",
],
"ctes": {"D": {"selectables": ["select x, z from b"]}},
"query_type": "WithCompound",
} | Test invoking with an outer from_expression_segment. | test_select_crawler_nested | python | sqlfluff/sqlfluff | test/utils/analysis/query_test.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/utils/analysis/query_test.py | MIT |
def table(name):
"""Return the parameter with foo_ in front of it."""
return f"foo_{name}" | Return the parameter with foo_ in front of it. | table | python | sqlfluff/sqlfluff | test/fixtures/templater/jinja_r_library_in_macro/libs/foo.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/fixtures/templater/jinja_r_library_in_macro/libs/foo.py | MIT |
def equals(col, val):
"""Return a string that has col = val."""
return f"{col} = {val}" | Return a string that has col = val. | equals | python | sqlfluff/sqlfluff | test/fixtures/templater/jinja_r_library_in_macro/libs/bar.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/fixtures/templater/jinja_r_library_in_macro/libs/bar.py | MIT |
def ds_filter(value: datetime.date | datetime.time | None) -> str | None:
"""Date filter."""
if value is None:
return None
return value.strftime("%Y-%m-%d") | Date filter. | ds_filter | python | sqlfluff/sqlfluff | test/fixtures/templater/jinja_s_filters_in_library/libs/__init__.py | https://github.com/sqlfluff/sqlfluff/blob/master/test/fixtures/templater/jinja_s_filters_in_library/libs/__init__.py | MIT |
def time_function(func, name, iterations=20):
"""A basic timing function."""
# Do the timing
time = timeit.timeit(func, number=iterations) / iterations
# Output the result
print(
"{:<35} {:.6}s [{} iterations]".format(
f"Time to {name}:",
time,
iterations,
)
) | A basic timing function. | time_function | python | sqlfluff/sqlfluff | examples/02_timing_api_steps.py | https://github.com/sqlfluff/sqlfluff/blob/master/examples/02_timing_api_steps.py | MIT |
def get_json_segment(
parse_result: Dict[str, Any], segment_type: str
) -> Iterator[Union[str, Dict[str, Any], List[Dict[str, Any]]]]:
"""Recursively search JSON parse result for specified segment type.
Args:
parse_result (Dict[str, Any]): JSON parse result from `sqlfluff.fix`.
segment_type (str): The segment type to search for.
Yields:
Iterator[Union[str, Dict[str, Any], List[Dict[str, Any]]]]:
Retrieves children of specified segment type as either a string for a raw
segment or as JSON or an array of JSON for non-raw segments.
"""
for k, v in parse_result.items():
if k == segment_type:
yield v
elif isinstance(v, dict):
yield from get_json_segment(v, segment_type)
elif isinstance(v, list):
for s in v:
yield from get_json_segment(s, segment_type) | Recursively search JSON parse result for specified segment type.
Args:
parse_result (Dict[str, Any]): JSON parse result from `sqlfluff.fix`.
segment_type (str): The segment type to search for.
Yields:
Iterator[Union[str, Dict[str, Any], List[Dict[str, Any]]]]:
Retrieves children of specified segment type as either a string for a raw
segment or as JSON or an array of JSON for non-raw segments. | get_json_segment | python | sqlfluff/sqlfluff | examples/01_basic_api_usage.py | https://github.com/sqlfluff/sqlfluff/blob/master/examples/01_basic_api_usage.py | MIT |
def add(self, *args, **kwargs):
"""The actual handler of the signal."""
self.heard.append((args, kwargs)) | The actual handler of the signal. | listen_to.add | python | maxcountryman/flask-login | tests/test_login.py | https://github.com/maxcountryman/flask-login/blob/master/tests/test_login.py | MIT |
def assert_heard_one(self, *args, **kwargs):
"""The signal fired once, and with the arguments given"""
if len(self.heard) == 0:
raise AssertionError("No signals were fired")
elif len(self.heard) > 1:
msg = f"{len(self.heard)} signals were fired"
raise AssertionError(msg)
elif self.heard[0] != (args, kwargs):
raise AssertionError(
"One signal was heard, but with incorrect"
f" arguments: Got ({self.heard[0]}) expected"
f" ({args}, {kwargs})"
) | The signal fired once, and with the arguments given | listen_to.assert_heard_one | python | maxcountryman/flask-login | tests/test_login.py | https://github.com/maxcountryman/flask-login/blob/master/tests/test_login.py | MIT |
def assert_heard_none(self, *args, **kwargs):
"""The signal fired no times"""
if len(self.heard) >= 1:
msg = f"{len(self.heard)} signals were fired"
raise AssertionError(msg) | The signal fired no times | listen_to.assert_heard_none | python | maxcountryman/flask-login | tests/test_login.py | https://github.com/maxcountryman/flask-login/blob/master/tests/test_login.py | MIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.