code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def test__cli__helpers__colorize(tmpdir): """Test ANSI colouring.""" formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) # Force color output for this test. formatter.plain_output = False assert formatter.colorize("foo", Color.red) == "\u001b[31mfoo\u001b[0m"
Test ANSI colouring.
test__cli__helpers__colorize
python
sqlfluff/sqlfluff
test/cli/formatters_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/cli/formatters_test.py
MIT
def test__cli__helpers__cli_table(tmpdir): """Test making tables.""" vals = [("a", 3), ("b", "c"), ("d", 4.7654), ("e", 9)] formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) txt = formatter.cli_table(vals, col_width=7, divider_char="|", label_color=None) # NB: No trailing newline assert txt == "a: 3|b: c\nd: 4.77|e: 9"
Test making tables.
test__cli__helpers__cli_table
python
sqlfluff/sqlfluff
test/cli/formatters_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/cli/formatters_test.py
MIT
def test__cli__fix_no_corrupt_file_contents(sql, fix_args, expected, tmpdir): """Test how the fix cli command creates files. Ensure there is no incorrect output from stderr that makes it to the file. """ tmp_path = pathlib.Path(str(tmpdir)) filepath = tmp_path / "testing.sql" filepath.write_text(textwrap.dedent(sql)) with tmpdir.as_cwd(): with pytest.raises(SystemExit): fix(fix_args) with open(tmp_path / "testing.sql", "r") as fin: actual = fin.read() # Ensure no corruption in formatted file assert actual.strip() == expected.strip()
Test how the fix cli command creates files. Ensure there is no incorrect output from stderr that makes it to the file.
test__cli__fix_no_corrupt_file_contents
python
sqlfluff/sqlfluff
test/cli/formatters_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/cli/formatters_test.py
MIT
def test_dialect_click_type_shell_complete(incomplete, expected): """Check that autocomplete returns dialects as expected.""" completion_items = dialect_shell_complete( ctx="dummy_not_used", param="dummy_not_used", incomplete=incomplete ) actual = [c.value for c in completion_items] assert expected == actual
Check that autocomplete returns dialects as expected.
test_dialect_click_type_shell_complete
python
sqlfluff/sqlfluff
test/cli/autocomplete_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/cli/autocomplete_test.py
MIT
def test__dialect__ansi__file_lex(raw, res, caplog): """Test we don't drop bits on simple examples.""" config = FluffConfig(overrides=dict(dialect="ansi")) lexer = Lexer(config=config) with caplog.at_level(logging.DEBUG): tokens, _ = lexer.lex(raw) # From just the initial parse, check we're all there raw_list = [token.raw for token in tokens] assert "".join(token.raw for token in tokens) == raw assert raw_list == res
Test we don't drop bits on simple examples.
test__dialect__ansi__file_lex
python
sqlfluff/sqlfluff
test/dialects/ansi_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/ansi_test.py
MIT
def test__dialect__ansi_specific_segment_parses( segmentref, raw, caplog, dialect_specific_segment_parses ): """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ dialect_specific_segment_parses("ansi", segmentref, raw, caplog)
Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested.
test__dialect__ansi_specific_segment_parses
python
sqlfluff/sqlfluff
test/dialects/ansi_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/ansi_test.py
MIT
def test__dialect__ansi_specific_segment_not_match( segmentref, raw, caplog, dialect_specific_segment_not_match ): """Test that specific segments do not match. NB: We're testing the MATCH function not the PARSE function. This is the opposite to the above. """ dialect_specific_segment_not_match("ansi", segmentref, raw, caplog)
Test that specific segments do not match. NB: We're testing the MATCH function not the PARSE function. This is the opposite to the above.
test__dialect__ansi_specific_segment_not_match
python
sqlfluff/sqlfluff
test/dialects/ansi_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/ansi_test.py
MIT
def test__dialect__ansi_specific_segment_not_parse(raw, err_locations): """Test queries do not parse, with parsing errors raised properly.""" lnt = Linter(dialect="ansi") parsed = lnt.parse_string(raw) assert len(parsed.violations) > 0 print(parsed.violations) locs = [(v.line_no, v.line_pos) for v in parsed.violations] assert locs == err_locations
Test queries do not parse, with parsing errors raised properly.
test__dialect__ansi_specific_segment_not_parse
python
sqlfluff/sqlfluff
test/dialects/ansi_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/ansi_test.py
MIT
def test__dialect__ansi_is_whitespace(): """Test proper tagging with is_whitespace.""" lnt = Linter(dialect="ansi") with open("test/fixtures/dialects/ansi/select_in_multiline_comment.sql") as f: parsed = lnt.parse_string(f.read()) # Check all the segments that *should* be whitespace, ARE for raw_seg in parsed.tree.get_raw_segments(): if raw_seg.is_type("whitespace", "newline"): assert raw_seg.is_whitespace
Test proper tagging with is_whitespace.
test__dialect__ansi_is_whitespace
python
sqlfluff/sqlfluff
test/dialects/ansi_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/ansi_test.py
MIT
def test__dialect__ansi_parse_indented_joins(sql_string, indented_joins, meta_loc): """Test parsing of meta segments using Conditional works with indented_joins.""" lnt = Linter( config=FluffConfig( configs={"indentation": {"indented_joins": indented_joins}}, overrides={"dialect": "ansi"}, ) ) parsed = lnt.parse_string(sql_string) tree = parsed.tree # Check that there's nothing unparsable assert "unparsable" not in tree.type_set() # Check all the segments that *should* be metas, ARE. # NOTE: This includes the end of file marker. res_meta_locs = tuple( idx for idx, raw_seg in enumerate(tree.get_raw_segments()) if raw_seg.is_meta ) assert res_meta_locs == meta_loc
Test parsing of meta segments using Conditional works with indented_joins.
test__dialect__ansi_parse_indented_joins
python
sqlfluff/sqlfluff
test/dialects/ansi_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/ansi_test.py
MIT
def test_dialect_postgres_specific_segment_parses( segment_reference: str, raw: str, caplog: LogCaptureFixture, dialect_specific_segment_parses: Callable, ) -> None: """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ dialect_specific_segment_parses("postgres", segment_reference, raw, caplog)
Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested.
test_dialect_postgres_specific_segment_parses
python
sqlfluff/sqlfluff
test/dialects/postgres_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/postgres_test.py
MIT
def test_epoch_datetime_unit(raw: str) -> None: """Test the EPOCH keyword for postgres dialect.""" # Don't test for new lines or capitalisation cfg = FluffConfig( configs={"core": {"exclude_rules": "LT12,LT05,LT09", "dialect": "postgres"}} ) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert result.num_violations() == 0
Test the EPOCH keyword for postgres dialect.
test_epoch_datetime_unit
python
sqlfluff/sqlfluff
test/dialects/postgres_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/postgres_test.py
MIT
def test_space_is_not_reserved(raw: str) -> None: """Ensure that SPACE is not treated as reserved.""" cfg = FluffConfig( configs={"core": {"exclude_rules": "LT12,LT05,AL07", "dialect": "postgres"}} ) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert result.num_violations() == 0
Ensure that SPACE is not treated as reserved.
test_space_is_not_reserved
python
sqlfluff/sqlfluff
test/dialects/postgres_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/postgres_test.py
MIT
def test_priority_keyword_merge() -> None: """Test merging on keyword lists works as expected.""" kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] result = priority_keyword_merge(kw_list_1, kw_list_2) expected_result = [("A", "reserved"), ("B", "non-reserved"), ("C", "non-reserved")] assert sorted(result) == sorted(expected_result) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] result_2 = priority_keyword_merge(kw_list_2, kw_list_1) expected_result_2 = [ ("A", "not-keyword"), ("B", "non-reserved"), ("C", "non-reserved"), ] assert sorted(result_2) == sorted(expected_result_2) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] kw_list_3 = [("B", "reserved")] result_3 = priority_keyword_merge(kw_list_2, kw_list_1, kw_list_3) expected_result_3 = [("A", "not-keyword"), ("B", "reserved"), ("C", "non-reserved")] assert sorted(result_3) == sorted(expected_result_3) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] result_4 = priority_keyword_merge(kw_list_1) expected_result_4 = kw_list_1 assert sorted(result_4) == sorted(expected_result_4)
Test merging on keyword lists works as expected.
test_priority_keyword_merge
python
sqlfluff/sqlfluff
test/dialects/postgres_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/postgres_test.py
MIT
def test_get_keywords() -> None: """Test keyword filtering works as expected.""" kw_list = [ ("A", "not-keyword"), ("B", "reserved"), ("C", "non-reserved"), ("D", "not-keyword"), ("E", "non-reserved-(cannot-be-function-or-type)"), ] expected_result = ["A", "D"] assert sorted(get_keywords(kw_list, "not-keyword")) == sorted(expected_result) expected_result_2 = ["C", "E"] assert sorted(get_keywords(kw_list, "non-reserved")) == sorted(expected_result_2) expected_result_3 = ["B"] assert sorted(get_keywords(kw_list, "reserved")) == sorted(expected_result_3)
Test keyword filtering works as expected.
test_get_keywords
python
sqlfluff/sqlfluff
test/dialects/postgres_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/postgres_test.py
MIT
def lex(raw, config): """Basic parsing for the tests below.""" # Set up the lexer lex = Lexer(config=config) # Lex the string for matching. For a good test, this would # arguably happen as a fixture, but it's easier to pass strings # as parameters than pre-lexed segment strings. segments, vs = lex.lex(raw) assert not vs print(segments) return segments
Basic parsing for the tests below.
lex
python
sqlfluff/sqlfluff
test/dialects/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py
MIT
def validate_segment(segmentref, config): """Get and validate segment for tests below.""" Seg = config.get("dialect_obj").ref(segmentref) if isinstance(Seg, Matchable): return Seg try: if issubclass(Seg, BaseSegment): return Seg except TypeError: pass raise TypeError( "{} is not of type Segment or Matchable. Test is invalid.".format(segmentref) )
Get and validate segment for tests below.
validate_segment
python
sqlfluff/sqlfluff
test/dialects/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py
MIT
def _dialect_specific_segment_parses(dialect, segmentref, raw, caplog): """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ config = FluffConfig(overrides=dict(dialect=dialect)) segments = lex(raw, config=config) Seg = validate_segment(segmentref, config=config) # Most segments won't handle the end of file marker. We should strip it. if segments[-1].is_type("end_of_file"): segments = segments[:-1] ctx = ParseContext.from_config(config) with caplog.at_level(logging.DEBUG): result = Seg.match(segments, 0, parse_context=ctx) assert isinstance(result, MatchResult) parsed = result.apply(segments) assert len(parsed) == 1 print(parsed) parsed = parsed[0] # Check we get a good response print(parsed) print(type(parsed)) print(type(parsed.raw)) # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs
Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested.
_dialect_specific_segment_parses
python
sqlfluff/sqlfluff
test/dialects/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py
MIT
def _dialect_specific_segment_not_match(dialect, segmentref, raw, caplog): """Test that specific segments do not match. NB: We're testing the MATCH function not the PARSE function. This is the opposite to the above. """ config = FluffConfig(overrides=dict(dialect=dialect)) segments = lex(raw, config=config) Seg = validate_segment(segmentref, config=config) ctx = ParseContext.from_config(config) with caplog.at_level(logging.DEBUG): match = Seg.match(segments, 0, parse_context=ctx) assert not match
Test that specific segments do not match. NB: We're testing the MATCH function not the PARSE function. This is the opposite to the above.
_dialect_specific_segment_not_match
python
sqlfluff/sqlfluff
test/dialects/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py
MIT
def _validate_dialect_specific_statements(dialect, segment_cls, raw, stmt_count): """This validates one or multiple statements against specified segment class. It even validates the number of parsed statements with the number of expected statements. """ lnt = Linter(dialect=dialect) parsed = lnt.parse_string(raw) assert len(parsed.violations) == 0 # Find any unparsable statements typs = parsed.tree.type_set() assert "unparsable" not in typs # Find the expected type in the parsed segment child_segments = [seg for seg in parsed.tree.recursive_crawl(segment_cls.type)] assert len(child_segments) == stmt_count # Check if all child segments are the correct type for c in child_segments: assert isinstance(c, segment_cls)
This validates one or multiple statements against specified segment class. It even validates the number of parsed statements with the number of expected statements.
_validate_dialect_specific_statements
python
sqlfluff/sqlfluff
test/dialects/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py
MIT
def dialect_specific_segment_parses(): """Fixture to check specific segments of a dialect.""" return _dialect_specific_segment_parses
Fixture to check specific segments of a dialect.
dialect_specific_segment_parses
python
sqlfluff/sqlfluff
test/dialects/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py
MIT
def dialect_specific_segment_not_match(): """Check specific segments of a dialect which will not match to a segment.""" return _dialect_specific_segment_not_match
Check specific segments of a dialect which will not match to a segment.
dialect_specific_segment_not_match
python
sqlfluff/sqlfluff
test/dialects/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py
MIT
def validate_dialect_specific_statements(): """This validates one or multiple statements against specified segment class. It even validates the number of parsed statements with the number of expected statements. """ return _validate_dialect_specific_statements
This validates one or multiple statements against specified segment class. It even validates the number of parsed statements with the number of expected statements.
validate_dialect_specific_statements
python
sqlfluff/sqlfluff
test/dialects/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/conftest.py
MIT
def test_non_selects_unparseable(raw: str) -> None: """Test that non-SELECT commands are not parseable.""" cfg = FluffConfig(configs={"core": {"dialect": "soql"}}) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert len(result.violations) == 1 assert isinstance(result.violations[0], SQLParseError)
Test that non-SELECT commands are not parseable.
test_non_selects_unparseable
python
sqlfluff/sqlfluff
test/dialects/soql_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/soql_test.py
MIT
def test_bigquery_relational_operator_parsing(data): """Tests queries with a diverse mixture of relational operators.""" # Generate a simple SELECT query with relational operators and conjunctions # as specified in 'data'. Note the conjunctions are used as separators # between comparisons, sn the conjunction in the first item is not used. filter = [] for i, (relation, conjunction) in enumerate(data): if i: filter.append(f" {conjunction} ") filter.append(f"a {relation} b") raw = f'SELECT * FROM t WHERE {"".join(filter)}' note(f"query: {raw}") # Load the right dialect config = FluffConfig(overrides=dict(dialect="bigquery")) tokens, lex_vs = Lexer(config=config).lex(raw) # From just the initial parse, check we're all there assert "".join(token.raw for token in tokens) == raw # Check we don't have lexing issues assert not lex_vs # Do the parse WITHOUT lots of logging # The logs get too long here to be useful. We should use # specific segment tests if we want to debug logs. parsed = Parser(config=config).parse(tokens) print(f"Post-parse structure: {parsed.to_tuple(show_raw=True)}") print(f"Post-parse structure: {parsed.stringify()}") # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs
Tests queries with a diverse mixture of relational operators.
test_bigquery_relational_operator_parsing
python
sqlfluff/sqlfluff
test/dialects/bigquery_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/bigquery_test.py
MIT
def test_bigquery_table_reference_segment_iter_raw_references( table_reference, reference_parts ): """Tests BigQuery override of TableReferenceSegment.iter_raw_references(). The BigQuery implementation is more complex, handling: - hyphenated table references - quoted or not quoted table references """ query = f"SELECT bar.user_id FROM {table_reference}" config = FluffConfig(overrides=dict(dialect="bigquery")) tokens, lex_vs = Lexer(config=config).lex(query) parsed = Parser(config=config).parse(tokens) for table_reference in parsed.recursive_crawl("table_reference"): actual_reference_parts = [ orp.part for orp in table_reference.iter_raw_references() ] assert reference_parts == actual_reference_parts
Tests BigQuery override of TableReferenceSegment.iter_raw_references(). The BigQuery implementation is more complex, handling: - hyphenated table references - quoted or not quoted table references
test_bigquery_table_reference_segment_iter_raw_references
python
sqlfluff/sqlfluff
test/dialects/bigquery_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/bigquery_test.py
MIT
def test_dialect_unparsable( segmentref: Optional[str], dialect: str, raw: str, structure: Any ): """Test the structure of unparsables.""" config = FluffConfig(overrides=dict(dialect=dialect)) # Get the referenced object (if set, otherwise root) if segmentref: Seg = config.get("dialect_obj").ref(segmentref) else: Seg = config.get("dialect_obj").get_root_segment() # We only allow BaseSegments as matchables in this test. assert issubclass(Seg, BaseSegment) assert not issubclass(Seg, RawSegment) # Lex the raw string. lex = Lexer(config=config) segments, vs = lex.lex(raw) assert not vs # Strip the end of file token if it's there. It will # confuse most segments. if segmentref and segments[-1].is_type("end_of_file"): segments = segments[:-1] ctx = ParseContext.from_config(config) # Match against the segment. match = Seg.match(segments, 0, ctx) result = match.apply(segments) assert len(result) == 1 parsed = result[0] assert isinstance(parsed, Seg) assert parsed.to_tuple(show_raw=True) == structure
Test the structure of unparsables.
test_dialect_unparsable
python
sqlfluff/sqlfluff
test/dialects/unparsable_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/unparsable_test.py
MIT
def test_snowflake_queries(segment_cls, raw, caplog): """Test snowflake specific queries parse.""" lnt = Linter(dialect="snowflake") parsed = lnt.parse_string(raw) print(parsed.violations) assert len(parsed.violations) == 0 # Find any unparsable statements typs = parsed.tree.type_set() assert "unparsable" not in typs # Find the expected type in the parsed segment seg_type = dialect_selector("snowflake").get_segment(segment_cls).type child_segments = [seg for seg in parsed.tree.recursive_crawl(seg_type)] assert len(child_segments) > 0
Test snowflake specific queries parse.
test_snowflake_queries
python
sqlfluff/sqlfluff
test/dialects/snowflake_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/snowflake_test.py
MIT
def test_dialect_exasol_specific_segment_parses( segmentref, raw, caplog, dialect_specific_segment_parses ): """Test exasol specific segments.""" dialect_specific_segment_parses(TEST_DIALECT, segmentref, raw, caplog)
Test exasol specific segments.
test_dialect_exasol_specific_segment_parses
python
sqlfluff/sqlfluff
test/dialects/exasol_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/exasol_test.py
MIT
def lex_and_parse(config_overrides: Dict[str, Any], raw: str) -> Optional[ParsedString]: """Performs a Lex and Parse, with cacheable inputs within fixture.""" # Load the right dialect config = FluffConfig(overrides=config_overrides) # Construct rendered file (to skip the templater) templated_file = TemplatedFile.from_string(raw) rendered_file = RenderedFile( [templated_file], [], config, {}, templated_file.fname, "utf8", raw, ) # Parse (which includes lexing) linter = Linter(config=config) parsed_file = linter.parse_rendered(rendered_file) if not raw: # Empty file case # We're just checking there aren't exceptions in this case. return None # Check we managed to parse assert parsed_file.tree # From just the initial parse, check we're all there assert "".join(token.raw for token in parsed_file.tree.raw_segments) == raw # Check we don't have lexing or parsing issues assert not parsed_file.violations return parsed_file
Performs a Lex and Parse, with cacheable inputs within fixture.
lex_and_parse
python
sqlfluff/sqlfluff
test/dialects/dialects_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/dialects_test.py
MIT
def test__dialect__base_file_parse(dialect, file): """For given test examples, check successful parsing.""" raw = load_file(dialect, file) config_overrides = dict(dialect=dialect) # Use the helper function to avoid parsing twice parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw) if not parsed: # Empty file case return print(f"Post-parse structure: {parsed.tree.to_tuple(show_raw=True)}") print(f"Post-parse structure: {parsed.tree.stringify()}") # Check we're all there. assert parsed.tree.raw == raw # Check that there's nothing unparsable types = parsed.tree.type_set() assert "unparsable" not in types # When testing the validity of fixes we re-parse sections of the file. # To ensure this is safe - here we re-parse the unfixed file to ensure # it's still valid even in the case that no fixes have been applied. assert parsed.tree.validate_segment_with_reparse(parsed.config.get("dialect_obj"))
For given test examples, check successful parsing.
test__dialect__base_file_parse
python
sqlfluff/sqlfluff
test/dialects/dialects_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/dialects_test.py
MIT
def test__dialect__base_broad_fix( dialect, file, raise_critical_errors_after_fix, caplog ): """Run a full fix with all rules, in search of critical errors. NOTE: This suite does all of the same things as the above test suite (the `parse_suite`), but also runs fix. In CI, we run the above tests _with_ coverage tracking, but these we run _without_. The purpose of this test is as a more stretching run through a wide range of test sql examples, and the full range of rules to find any potential critical errors raised by any interactions between different dialects and rules. We also do not use DEBUG logging here because it gets _very_ noisy. """ raw = load_file(dialect, file) config_overrides = dict(dialect=dialect) parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw) if not parsed: # Empty file case return print(parsed.tree.stringify()) config = FluffConfig(overrides=config_overrides) linter = Linter(config=config) rule_pack = linter.get_rulepack() # Due to "raise_critical_errors_after_fix" fixture "fix", # will now throw. linter.lint_parsed( parsed, rule_pack, fix=True, )
Run a full fix with all rules, in search of critical errors. NOTE: This suite does all of the same things as the above test suite (the `parse_suite`), but also runs fix. In CI, we run the above tests _with_ coverage tracking, but these we run _without_. The purpose of this test is as a more stretching run through a wide range of test sql examples, and the full range of rules to find any potential critical errors raised by any interactions between different dialects and rules. We also do not use DEBUG logging here because it gets _very_ noisy.
test__dialect__base_broad_fix
python
sqlfluff/sqlfluff
test/dialects/dialects_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/dialects_test.py
MIT
def test__dialect__base_parse_struct( dialect, sqlfile, code_only, yamlfile, yaml_loader, ): """For given test examples, check parsed structure against yaml.""" parsed: Optional[BaseSegment] = parse_example_file(dialect, sqlfile) actual_hash = compute_parse_tree_hash(parsed) # Load the YAML expected_hash, res = yaml_loader(make_dialect_path(dialect, yamlfile)) if not parsed: assert parsed == res return # Verify the current parse tree matches the historic parse tree. parsed_tree = parsed.to_tuple(code_only=code_only, show_raw=True) # The parsed tree consists of a tuple of "File:", followed by the # statements. So only compare when there is at least one statement. if parsed_tree[1] or res[1]: assert parsed_tree == res # Verify the current hash matches the historic hash. The main purpose of # this check is to force contributors to use the generator script to # create these files. New contributors have sometimes been unaware of # this tool and have attempted to craft the YAML files manually. This # can lead to slight differences, confusion, and errors. assert expected_hash == actual_hash, ( "Parse tree hash does not match. Please run " "'python test/generate_parse_fixture_yml.py' to create YAML files " "in test/fixtures/dialects." )
For given test examples, check parsed structure against yaml.
test__dialect__base_parse_struct
python
sqlfluff/sqlfluff
test/dialects/dialects_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/dialects/dialects_test.py
MIT
def test__rules__std_LT02_LT09_LT01(): """Verify that double indents don't flag LT01.""" sql = """ WITH example AS ( SELECT my_id, other_thing, one_more FROM my_table ) SELECT my_id FROM example\n""" fixed_sql = """ WITH example AS ( SELECT my_id, other_thing, one_more FROM my_table ) SELECT my_id FROM example\n""" result = sqlfluff.fix(sql, exclude_rules=["LT13"]) assert result == fixed_sql
Verify that double indents don't flag LT01.
test__rules__std_LT02_LT09_LT01
python
sqlfluff/sqlfluff
test/rules/std_LT01_LT02_LT09_combo_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT01_LT02_LT09_combo_test.py
MIT
def test__rules__std_ST05_LT09_4137() -> None: """Tests observed conflict between ST05 & LT09. In this case, the moved `t2` table was created after the first usage. https://github.com/sqlfluff/sqlfluff/issues/4137 """ sql = """ with cte1 as ( select t1.x, t2.y from tbl1 t1 join (select x, y from tbl2) t2 on t1.x = t2.x ) , cte2 as ( select x, y from tbl2 t2 ) select x, y from cte1 union all select x, y from cte2 ; """ fixed_sql = """ with t2 as (select x, y from tbl2), cte1 as ( select t1.x, t2.y from tbl1 t1 join t2 on t1.x = t2.x ), cte2 as ( select x, y from tbl2 t2 ) select x, y from cte1 union all select x, y from cte2 ; """ cfg = FluffConfig.from_kwargs( dialect="ansi", rules=["ST05", "LT09"], ) result = Linter(config=cfg).lint_string(sql, fix=True) assert result.fix_string()[0] == fixed_sql
Tests observed conflict between ST05 & LT09. In this case, the moved `t2` table was created after the first usage. https://github.com/sqlfluff/sqlfluff/issues/4137
test__rules__std_ST05_LT09_4137
python
sqlfluff/sqlfluff
test/rules/std_ST05_LT09_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_ST05_LT09_test.py
MIT
def test__rules__std_ST05_LT09_5265() -> None: """Tests observed conflict between ST05 & LT09. In this case, the moved `t2` table was created after the first usage. https://github.com/sqlfluff/sqlfluff/issues/4137 """ sql = """ with cte1 as ( select t1.x, t2.y from tbl1 t1 join (select x, y from tbl2) t2 on t1.x = t2.x ) , cte2 as ( select x, y from tbl2 t2 ) select x, y from cte1 union all select x, y from cte2 ; """ fixed_sql = """ with t2 as (select x, y from tbl2), cte1 as ( select t1.x, t2.y from tbl1 t1 join t2 on t1.x = t2.x ), cte2 as ( select x, y from tbl2 t2 ) select x, y from cte1 union all select x, y from cte2 ; """ cfg = FluffConfig.from_kwargs( dialect="ansi", rules=["ST05", "LT09"], ) result = Linter(config=cfg).lint_string(sql, fix=True) assert result.fix_string()[0] == fixed_sql
Tests observed conflict between ST05 & LT09. In this case, the moved `t2` table was created after the first usage. https://github.com/sqlfluff/sqlfluff/issues/4137
test__rules__std_ST05_LT09_5265
python
sqlfluff/sqlfluff
test/rules/std_ST05_LT09_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_ST05_LT09_test.py
MIT
def test__rules__std_ST03_multiple_unused_ctes(): """Verify that ST03 returns multiple lint issues, one per unused CTE.""" sql = """ WITH cte_1 AS ( SELECT 1 ), cte_2 AS ( SELECT 2 ), cte_3 AS ( SELECT 3 ), cte_4 AS ( SELECT 4 ) SELECT var_bar FROM cte_3 """ result = sqlfluff.lint(sql, rules=["ST03"]) assert result == [ { "code": "ST03", "description": 'Query defines CTE "cte_1" but does not use it.', "name": "structure.unused_cte", "warning": False, "fixes": [], "start_line_no": 3, "start_line_pos": 5, "start_file_pos": 14, "end_line_no": 3, "end_line_pos": 10, "end_file_pos": 19, }, { "code": "ST03", "description": 'Query defines CTE "cte_2" but does not use it.', "name": "structure.unused_cte", "warning": False, "fixes": [], "start_line_no": 6, "start_line_pos": 5, "start_file_pos": 53, "end_line_no": 6, "end_line_pos": 10, "end_file_pos": 58, }, { "code": "ST03", "description": 'Query defines CTE "cte_4" but does not use it.', "name": "structure.unused_cte", "warning": False, "fixes": [], "start_line_no": 12, "start_line_pos": 5, "start_file_pos": 131, "end_line_no": 12, "end_line_pos": 10, "end_file_pos": 136, }, ]
Verify that ST03 returns multiple lint issues, one per unused CTE.
test__rules__std_ST03_multiple_unused_ctes
python
sqlfluff/sqlfluff
test/rules/std_ST03_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_ST03_test.py
MIT
def test__rules__std_LT05_LT09_long_line_lint(): """Verify a long line that causes a clash between LT05 and LT09 is not changed.""" sql = ( "SELECT\n1000000000000000000000000000000000000000000000000000000000000000000000" "000000000000000000000000000000\n" ) result = sqlfluff.lint(sql) assert "LT05" in [r["code"] for r in result] assert "LT09" in [r["code"] for r in result]
Verify a long line that causes a clash between LT05 and LT09 is not changed.
test__rules__std_LT05_LT09_long_line_lint
python
sqlfluff/sqlfluff
test/rules/std_LT05_LT09_combo_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT05_LT09_combo_test.py
MIT
def test__rules__std_LT05_LT09_long_line_fix(): """Verify clash between LT05 & LT09 does not add multiple newlines (see #1424).""" sql = ( "SELECT 10000000000000000000000000000000000000000000000000000000000000000000000" "00000000000000000000000000000\n" ) result = sqlfluff.fix(sql) assert result == ( "SELECT\n 100000000000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000\n" )
Verify clash between LT05 & LT09 does not add multiple newlines (see #1424).
test__rules__std_LT05_LT09_long_line_fix
python
sqlfluff/sqlfluff
test/rules/std_LT05_LT09_combo_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT05_LT09_combo_test.py
MIT
def test__rules__std_LT05_LT09_long_line_fix2(): """Verify clash between LT05 & LT09 does not add multiple newlines (see #1424).""" sql = ( "SELECT\n 100000000000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000\n" ) result = sqlfluff.fix(sql) assert result == ( "SELECT 10000000000000000000000000000000000000000000000000000000000000000000000" "00000000000000000000000000000\n" )
Verify clash between LT05 & LT09 does not add multiple newlines (see #1424).
test__rules__std_LT05_LT09_long_line_fix2
python
sqlfluff/sqlfluff
test/rules/std_LT05_LT09_combo_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT05_LT09_combo_test.py
MIT
def test__rules__std_AL04_one_aliases_one_duplicate(): """Verify correct error message for one duplicate table aliases occur one times.""" sql = """ SELECT a.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk """ result = sqlfluff.lint(sql) assert "AL04" in [r["code"] for r in result] assert [r["code"] for r in result].count("AL04") == 1
Verify correct error message for one duplicate table aliases occur one times.
test__rules__std_AL04_one_aliases_one_duplicate
python
sqlfluff/sqlfluff
test/rules/std_AL04_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AL04_test.py
MIT
def test__rules__std_AL04_one_aliases_two_duplicate(): """Verify correct error message for one duplicate table aliases occur two times.""" sql = """ SELECT a.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk JOIN table_3 AS a ON a.pk = a.pk """ result = sqlfluff.lint(sql) result_filter = [r for r in result if r["code"] == "AL04"] # Error message only show two times, not three assert len(result_filter) == 2 assert ( len( [ r for r in result_filter if "Duplicate table alias 'a'" in r["description"] ] ) == 2 ) # Test specific line number assert result_filter[0]["start_line_no"] == 5 assert result_filter[1]["start_line_no"] == 6
Verify correct error message for one duplicate table aliases occur two times.
test__rules__std_AL04_one_aliases_two_duplicate
python
sqlfluff/sqlfluff
test/rules/std_AL04_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AL04_test.py
MIT
def test__rules__std_AL04_complex(): """Verify that AL04 returns the correct error message for complex example.""" sql = """ SELECT a.pk, b.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk JOIN table_3 AS b ON a.pk = b.pk JOIN table_4 AS b ON b.pk = b.pk JOIN table_5 AS a ON b.pk = a.pk """ result = sqlfluff.lint(sql) result_filter = [r for r in result if r["code"] == "AL04"] # Error message only show two times, not three assert len(result_filter) == 3 assert ( len( [ r for r in result_filter if "Duplicate table alias 'a'" in r["description"] ] ) == 2 ) assert ( len( [ r for r in result_filter if "Duplicate table alias 'b'" in r["description"] ] ) == 1 ) # Test specific line number assert result_filter[0]["start_line_no"] == 6 assert result_filter[1]["start_line_no"] == 8 assert result_filter[2]["start_line_no"] == 9
Verify that AL04 returns the correct error message for complex example.
test__rules__std_AL04_complex
python
sqlfluff/sqlfluff
test/rules/std_AL04_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AL04_test.py
MIT
def test__rules__std_CV02_raised() -> None: """CV02 is raised for use of ``IFNULL`` or ``NVL``.""" sql = "SELECT\n\tIFNULL(NULL, 100),\n\tNVL(NULL,100);" result = sqlfluff.lint(sql, rules=["CV02"]) assert len(result) == 2 assert result[0]["description"] == "Use 'COALESCE' instead of 'IFNULL'." assert result[1]["description"] == "Use 'COALESCE' instead of 'NVL'."
CV02 is raised for use of ``IFNULL`` or ``NVL``.
test__rules__std_CV02_raised
python
sqlfluff/sqlfluff
test/rules/std_CV02_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_CV02_test.py
MIT
def test_lint_jj01_pickled_config(): """Tests the error catching behavior of _lint_path_parallel_wrapper(). Test on MultiThread runner because otherwise we have pickling issues. """ fname = "test/fixtures/linter/jinja_spacing.sql" fresh_cfg = FluffConfig(overrides={"dialect": "ansi", "rules": "JJ01"}) # Parse the file with the fresh config. linter = Linter(config=fresh_cfg) parsed = next(linter.parse_path(fname)) rule_pack = linter.get_rulepack(config=fresh_cfg) rule = rule_pack.rules[0] # Check we got the right rule. assert rule.code == "JJ01" # Pickle the config and rehydrate to simulate threaded operation pickled = pickle.dumps(fresh_cfg) unpickled_cfg = pickle.loads(pickled) # Crawl with the pickled config. Check we don't get an error. linting_errors, _, fixes, _ = rule.crawl( parsed.tree, dialect=unpickled_cfg.get("dialect_obj"), fix=True, templated_file=parsed.parsed_variants[0].templated_file, ignore_mask=None, fname=fname, config=unpickled_cfg, # <- NOTE: This is the important part. ) # Check we successfully got the right results. assert len(linting_errors) == 1 assert linting_errors[0].check_tuple() == ("JJ01", 3, 15)
Tests the error catching behavior of _lint_path_parallel_wrapper(). Test on MultiThread runner because otherwise we have pickling issues.
test_lint_jj01_pickled_config
python
sqlfluff/sqlfluff
test/rules/std_JJ01_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_JJ01_test.py
MIT
def generic_roundtrip_test(source_file, rulestring): """Run a roundtrip test given a sql file and a rule. We take a file buffer, lint, fix and lint, finally checking that the file fails initially but not after fixing. """ if isinstance(source_file, str): # If it's a string, treat it as a path so lets load it. with open(source_file) as f: source_file = StringIO(f.read()) filename = "testing.sql" # Lets get the path of a file to use tempdir_path = tempfile.mkdtemp() filepath = os.path.join(tempdir_path, filename) # Open the example file and write the content to it with open(filepath, mode="w") as dest_file: for line in source_file: dest_file.write(line) runner = CliRunner() # Check that we first detect the issue result = runner.invoke(lint, ["--rules", rulestring, "--dialect=ansi", filepath]) assert result.exit_code == 1 # Fix the file (in force mode) result = runner.invoke( fix, ["--rules", rulestring, "--dialect=ansi", "-f", filepath] ) assert result.exit_code == 0 # Now lint the file and check for exceptions result = runner.invoke(lint, ["--rules", rulestring, "--dialect=ansi", filepath]) assert result.exit_code == 0 shutil.rmtree(tempdir_path)
Run a roundtrip test given a sql file and a rule. We take a file buffer, lint, fix and lint, finally checking that the file fails initially but not after fixing.
generic_roundtrip_test
python
sqlfluff/sqlfluff
test/rules/std_roundtrip_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_roundtrip_test.py
MIT
def jinja_roundtrip_test( source_path, rulestring, sqlfile="test.sql", cfgfile=".sqlfluff" ): """Run a roundtrip test path and rule. We take a file buffer, lint, fix and lint, finally checking that the file fails initially but not after fixing. Additionally we also check that we haven't messed up the templating tags in the process. """ tempdir_path = tempfile.mkdtemp() sql_filepath = os.path.join(tempdir_path, sqlfile) cfg_filepath = os.path.join(tempdir_path, cfgfile) # Copy the SQL file with open(sql_filepath, mode="w") as dest_file: with open(os.path.join(source_path, sqlfile)) as source_file: for line in source_file: dest_file.write(line) # Copy the Config file with open(cfg_filepath, mode="w") as dest_file: with open(os.path.join(source_path, cfgfile)) as source_file: for line in source_file: dest_file.write(line) with open(sql_filepath) as f: # Get a record of the pre-existing jinja tags tags = re.findall(r"{{[^}]*}}|{%[^}%]*%}", f.read(), flags=0) runner = CliRunner() # Check that we first detect the issue result = runner.invoke( lint, ["--rules", rulestring, "--dialect=ansi", sql_filepath] ) assert result.exit_code == 1 # Fix the file (in force mode) result = runner.invoke( fix, ["--rules", rulestring, "-f", "--dialect=ansi", sql_filepath] ) assert result.exit_code == 0 # Now lint the file and check for exceptions result = runner.invoke( lint, ["--rules", rulestring, "--dialect=ansi", sql_filepath] ) if result.exit_code != 0: # Output the file content for debugging print("File content:") with open(sql_filepath) as f: print(repr(f.read())) print("Command output:") print(result.output) assert result.exit_code == 0 with open(sql_filepath) as f: # Check that the tags are all still there! new_tags = re.findall(r"{{[^}]*}}|{%[^}%]*%}", f.read(), flags=0) # Clear up the temp dir shutil.rmtree(tempdir_path) # Assert that the tags are the same assert tags == new_tags
Run a roundtrip test path and rule. We take a file buffer, lint, fix and lint, finally checking that the file fails initially but not after fixing. Additionally we also check that we haven't messed up the templating tags in the process.
jinja_roundtrip_test
python
sqlfluff/sqlfluff
test/rules/std_roundtrip_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_roundtrip_test.py
MIT
def test__cli__command__fix(rule, path): """Test the round trip of detecting, fixing and then not detecting given rule.""" generic_roundtrip_test(path, rule)
Test the round trip of detecting, fixing and then not detecting given rule.
test__cli__command__fix
python
sqlfluff/sqlfluff
test/rules/std_roundtrip_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_roundtrip_test.py
MIT
def test__cli__command__fix_templated(rule): """Roundtrip test, making sure that we don't drop tags while templating.""" jinja_roundtrip_test("test/fixtures/templater/jinja_d_roundtrip", rule)
Roundtrip test, making sure that we don't drop tags while templating.
test__cli__command__fix_templated
python
sqlfluff/sqlfluff
test/rules/std_roundtrip_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_roundtrip_test.py
MIT
def test__rules__std_AM06_raised() -> None: """Test case for multiple AM06 errors raised with 'consistent' setting.""" sql = """ SELECT foo, bar, sum(baz) AS sum_value FROM ( SELECT foo, bar, sum(baz) AS baz FROM fake_table GROUP BY foo, bar ) GROUP BY 1, 2 ORDER BY 1, 2; """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] assert len(results_AM06) == 2 assert ( results_AM06[0]["description"] == "Inconsistent column references in 'GROUP BY/ORDER BY' clauses." )
Test case for multiple AM06 errors raised with 'consistent' setting.
test__rules__std_AM06_raised
python
sqlfluff/sqlfluff
test/rules/std_AM06_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AM06_test.py
MIT
def test__rules__std_AM06_unparsable() -> None: """Test unparsable group by doesn't result in bad rule AM06 error.""" sql = """ SELECT foo.set.barr FROM foo GROUP BY foo.set.barr """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] results_prs = [r for r in result if r["code"] == "PRS"] assert len(results_AM06) == 0 assert len(results_prs) > 0
Test unparsable group by doesn't result in bad rule AM06 error.
test__rules__std_AM06_unparsable
python
sqlfluff/sqlfluff
test/rules/std_AM06_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AM06_test.py
MIT
def test__rules__std_AM06_noqa() -> None: """Test unparsable group by with no qa doesn't result in bad rule AM06 error.""" sql = """ SELECT foo.set.barr --noqa: PRS FROM foo GROUP BY [email protected] --noqa: PRS """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] results_prs = [r for r in result if r["code"] == "PRS"] assert len(results_AM06) == 0 assert len(results_prs) == 0
Test unparsable group by with no qa doesn't result in bad rule AM06 error.
test__rules__std_AM06_noqa
python
sqlfluff/sqlfluff
test/rules/std_AM06_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AM06_test.py
MIT
def pytest_generate_tests(metafunc): """Generate yaml test cases from file. This is a predefined pytest hook which allows us to parametrize all other test cases defined in this module. https://docs.pytest.org/en/stable/how-to/parametrize.html#pytest-generate-tests """ ids, test_cases = load_test_cases( test_cases_path="test/fixtures/rules/std_rule_cases/*.yml" ) # Only parametrize methods which include `test_case` in their # list of required fixtures. if "test_case" in metafunc.fixturenames: metafunc.parametrize("test_case", test_cases, ids=ids)
Generate yaml test cases from file. This is a predefined pytest hook which allows us to parametrize all other test cases defined in this module. https://docs.pytest.org/en/stable/how-to/parametrize.html#pytest-generate-tests
pytest_generate_tests
python
sqlfluff/sqlfluff
test/rules/yaml_test_cases_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/yaml_test_cases_test.py
MIT
def test__rule_test_case(test_case: RuleTestCase, caplog): """Execute each of the rule test cases. The cases themselves are parametrized using the above `pytest_generate_tests` function, which both loads them from the yaml files do generate `RuleTestCase` objects, but also sets appropriate IDs for each test to improve the user feedback. """ with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules"): with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): test_case.evaluate()
Execute each of the rule test cases. The cases themselves are parametrized using the above `pytest_generate_tests` function, which both loads them from the yaml files do generate `RuleTestCase` objects, but also sets appropriate IDs for each test to improve the user feedback.
test__rule_test_case
python
sqlfluff/sqlfluff
test/rules/yaml_test_cases_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/yaml_test_cases_test.py
MIT
def test__rule_test_global_config(): """Test global config in rule test cases.""" ids, test_cases = load_test_cases( os.path.join("test/fixtures/rules/R001_global_config_test.yml") ) assert len(test_cases) == 2 # tc1: overwrites global config assert test_cases[0].configs["core"]["dialect"] == "ansi" # tc2: global config is used assert test_cases[1].configs["core"]["dialect"] == "exasol"
Test global config in rule test cases.
test__rule_test_global_config
python
sqlfluff/sqlfluff
test/rules/yaml_test_cases_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/yaml_test_cases_test.py
MIT
def test__rules__std_file(rule, path, violations): """Test the linter finds the given errors in (and only in) the right places.""" assert_rule_raises_violations_in_file( rule=rule, fpath="test/fixtures/linter/" + path, violations=violations, fluff_config=FluffConfig(overrides=dict(rules=rule, dialect="ansi")), )
Test the linter finds the given errors in (and only in) the right places.
test__rules__std_file
python
sqlfluff/sqlfluff
test/rules/std_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_test.py
MIT
def test_improper_configs_are_rejected(rule_config_dict): """Ensure that unsupported configs raise a ValueError.""" config = FluffConfig( configs={"rules": rule_config_dict}, overrides={"dialect": "ansi"} ) with pytest.raises(ValueError): get_ruleset().get_rulepack(config)
Ensure that unsupported configs raise a ValueError.
test_improper_configs_are_rejected
python
sqlfluff/sqlfluff
test/rules/std_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_test.py
MIT
def test__rules__std_RF02_wildcard_single_count(): """Verify that RF02 is only raised once for wildcard (see issue #1973).""" sql = """ SELECT * FROM foo INNER JOIN bar; """ result = sqlfluff.lint(sql) assert "RF02" in [r["code"] for r in result] assert [r["code"] for r in result].count("RF02") == 1
Verify that RF02 is only raised once for wildcard (see issue #1973).
test__rules__std_RF02_wildcard_single_count
python
sqlfluff/sqlfluff
test/rules/std_RF02_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_RF02_test.py
MIT
def test__rules__std_AL09_CP02_RF06(rules, dialect, fixed_sql, post_fix_errors): """Test interactions between AL09, CP02 & RF06.""" print(f"Running with rules: {rules}") linter = Linter(dialect=dialect, rules=rules) result = linter.lint_string(input_query, fix=True) fixed, _ = result.fix_string() assert fixed == fixed_sql # Check violations after fix. # NOTE: We should really use the rules testing utilities here # but they don't yet support multiple rules. post_fix_result = linter.lint_string(fixed, fix=False) assert post_fix_result.check_tuples() == post_fix_errors
Test interactions between AL09, CP02 & RF06.
test__rules__std_AL09_CP02_RF06
python
sqlfluff/sqlfluff
test/rules/std_AL09_CP02_RF06_combo_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_AL09_CP02_RF06_combo_test.py
MIT
def test_rules_std_LT01_and_ST02_interaction(in_sql, out_sql) -> None: """Test interaction between LT04 and ST06. Test sql with two newlines with leading commas expecting trailing. """ # Lint expected rules. cfg = FluffConfig.from_string( """[sqlfluff] dialect = ansi rules = LT01,ST02 """ ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(in_sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"ST02"} # Check file is fixed. assert linted_file.fix_string()[0] == out_sql
Test interaction between LT04 and ST06. Test sql with two newlines with leading commas expecting trailing.
test_rules_std_LT01_and_ST02_interaction
python
sqlfluff/sqlfluff
test/rules/std_LT01_ST02_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT01_ST02_test.py
MIT
def make_dialect_path(dialect, fname): """Work out how to find paths given a dialect and a file name.""" return os.path.join("test", "fixtures", "dialects", dialect, fname)
Work out how to find paths given a dialect and a file name.
make_dialect_path
python
sqlfluff/sqlfluff
test/rules/std_fix_auto_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_fix_auto_test.py
MIT
def auto_fix_test(dialect, folder, caplog): """A test for roundtrip testing, take a file buffer, lint, fix and lint. This is explicitly different from the linter version of this, in that it uses the command line rather than the direct api. """ # Log just the rules logger for this test. # NOTE: In debugging it may be instructive to enable some of # the other loggers listed here to debug particular issues. # Enabling all of them results in very long logs so use # wisely. # caplog.set_level(logging.DEBUG, logger="sqlfluff.templater") # caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") caplog.set_level(logging.DEBUG, logger="sqlfluff.linter") caplog.set_level(logging.DEBUG, logger="sqlfluff.rules") filename = "testing.sql" # Lets get the path of a file to use tempdir_path = tempfile.mkdtemp() filepath = os.path.join(tempdir_path, filename) cfgpath = os.path.join(tempdir_path, ".sqlfluff") src_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "before.sql") cmp_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "after.sql") vio_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "violations.json") cfg_filepath = os.path.join(*base_auto_fix_path, dialect, folder, ".sqlfluff") test_conf_filepath = os.path.join( *base_auto_fix_path, dialect, folder, "test-config.yml" ) # Load the config file for the test: with open(test_conf_filepath) as cfg_file: cfg = yaml.safe_load(cfg_file) print("## Config: ", cfg) rules: Optional[str] = ",".join(cfg["test-config"].get("rules")).upper() if "ALL" in rules: rules = None raise_on_non_linting_violations = cfg["test-config"].get( "raise_on_non_linting_violations", True ) # Open the example file and write the content to it print_buff = "" with open(filepath, mode="w") as dest_file: with open(src_filepath) as source_file: for line in source_file: dest_file.write(line) print_buff += line # Copy the config file too try: with open(cfgpath, mode="w") as dest_file: with open(cfg_filepath) as source_file: print("## Config File Found.") for line in source_file: dest_file.write(line) except FileNotFoundError: # No config file? No big deal print("## No Config File Found.") pass print(f"## Input file:\n{print_buff}") # Do we need to do a violations check? try: with open(vio_filepath) as vio_file: violations = json.load(vio_file) except FileNotFoundError: # No violations file. Let's not worry violations = None # Run the fix command overrides = {"dialect": dialect} if rules: overrides["rules"] = rules # Clear config caches before loading. The way we move files around # makes the filepath based caching inaccurate, which leads to unstable # test cases unless we regularly clear the cache. clear_config_caches() cfg = FluffConfig.from_root(overrides=overrides) lnt = Linter(config=cfg) res = lnt.lint_path(filepath, fix=True) if not res.files: raise ValueError("LintedDir empty: Parsing likely failed.") print(f"## Templated file:\n{res.tree.raw}") # We call the check_tuples here, even to makes sure any non-linting # violations are raised, and the test fails. vs = set( res.check_tuples( raise_on_non_linting_violations=raise_on_non_linting_violations ) ) # If we have a violations structure, let's enforce it. if violations: # Format the violations file expected_vs = set() for rule_key in violations["violations"]["linting"]: for elem in violations["violations"]["linting"][rule_key]: expected_vs.add((rule_key, *elem)) assert expected_vs == vs # Actually do the fixes res = res.persist_changes() # Read the fixed file with open(filepath) as fixed_file: fixed_buff = fixed_file.read() # Clear up once read shutil.rmtree(tempdir_path) # Also clear the config cache again so it's not polluted for later tests. clear_config_caches() # Read the comparison file with open(cmp_filepath) as comp_file: comp_buff = comp_file.read() # Make sure we were successful assert res # Assert that we fixed as expected assert fixed_buff == comp_buff
A test for roundtrip testing, take a file buffer, lint, fix and lint. This is explicitly different from the linter version of this, in that it uses the command line rather than the direct api.
auto_fix_test
python
sqlfluff/sqlfluff
test/rules/std_fix_auto_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_fix_auto_test.py
MIT
def test__std_fix_auto(dialect, folder, caplog): """Automated Fixing Tests.""" auto_fix_test(dialect=dialect, folder=folder, caplog=caplog)
Automated Fixing Tests.
test__std_fix_auto
python
sqlfluff/sqlfluff
test/rules/std_fix_auto_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_fix_auto_test.py
MIT
def test__rules__std_ST05_LT08_5265() -> None: """Tests observed conflict between ST05 & LT08. In this case, the moved `oops` and `another` table was created after the first usage. The `oops` from the `cte2` is no longer deleted. https://github.com/sqlfluff/sqlfluff/issues/5265 """ sql = """ WITH cte1 AS ( SELECT COUNT(*) AS qty FROM some_table AS st LEFT JOIN ( SELECT 'first' AS id ) AS oops ON st.id = oops.id ), cte2 AS ( SELECT COUNT(*) AS other_qty FROM other_table AS sot LEFT JOIN ( SELECT 'middle' AS id ) AS another ON sot.id = another.id LEFT JOIN ( SELECT 'last' AS id ) AS oops ON sot.id = oops.id ) SELECT CURRENT_DATE(); """ fixed_sql = """ WITH oops AS ( SELECT 'first' AS id ), cte1 AS ( SELECT COUNT(*) AS qty FROM some_table AS st LEFT JOIN oops ON st.id = oops.id ), another AS ( SELECT 'middle' AS id ), cte2 AS ( SELECT COUNT(*) AS other_qty FROM other_table AS sot LEFT JOIN another ON sot.id = another.id LEFT JOIN ( SELECT 'last' AS id ) AS oops ON sot.id = oops.id ) SELECT CURRENT_DATE(); """ cfg = FluffConfig.from_kwargs( dialect="ansi", rules=["ST05", "LT08"], ) result = Linter(config=cfg).lint_string(sql, fix=True) assert result.fix_string()[0] == fixed_sql
Tests observed conflict between ST05 & LT08. In this case, the moved `oops` and `another` table was created after the first usage. The `oops` from the `cte2` is no longer deleted. https://github.com/sqlfluff/sqlfluff/issues/5265
test__rules__std_ST05_LT08_5265
python
sqlfluff/sqlfluff
test/rules/std_ST05_LT08_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_ST05_LT08_test.py
MIT
def test__rules__std_LT02_LT11_union_all_in_subquery_lint(): """Verify a that LT11 reports lint errors in subqueries.""" sql = ( "SELECT * FROM (\n" " SELECT 'g' UNION ALL\n" " SELECT 'h'\n" " UNION ALL SELECT 'j'\n" ")\n" ) result = sqlfluff.lint(sql) assert "LT11" in [r["code"] for r in result]
Verify a that LT11 reports lint errors in subqueries.
test__rules__std_LT02_LT11_union_all_in_subquery_lint
python
sqlfluff/sqlfluff
test/rules/std_LT02_LT11_combo_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT02_LT11_combo_test.py
MIT
def test__rules__std_LT02_LT11_union_all_in_subquery_fix(): """Verify combination of rules LT02 and LT11 produces a correct indentation.""" sql = ( "SELECT c FROM (\n" " SELECT 'g' UNION ALL\n" " SELECT 'h'\n" " UNION ALL SELECT 'j'\n" ")\n" ) fixed_sql = ( "SELECT c FROM (\n" " SELECT 'g'\n" " UNION ALL\n" " SELECT 'h'\n" " UNION ALL\n" " SELECT 'j'\n" ")\n" ) result = sqlfluff.fix(sql) assert result == fixed_sql
Verify combination of rules LT02 and LT11 produces a correct indentation.
test__rules__std_LT02_LT11_union_all_in_subquery_fix
python
sqlfluff/sqlfluff
test/rules/std_LT02_LT11_combo_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT02_LT11_combo_test.py
MIT
def test__rules__std_CV09_raised() -> None: """CV09 is raised for use of blocked words with correct error message.""" sql = "SELECT MYOLDFUNCTION(col1) FROM deprecated_table;\n" cfg = FluffConfig(overrides={"dialect": "ansi"}) cfg.set_value( config_path=["rules", "convention.blocked_words", "blocked_words"], val="myoldfunction,deprecated_table", ) linter = Linter(config=cfg) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert len(result) == 2 assert result[0]["description"] == "Use of blocked word 'MYOLDFUNCTION'." assert result[1]["description"] == "Use of blocked word 'deprecated_table'."
CV09 is raised for use of blocked words with correct error message.
test__rules__std_CV09_raised
python
sqlfluff/sqlfluff
test/rules/std_CV09_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_CV09_test.py
MIT
def test__rules__std_LT04_unparseable(): """Verify that LT04 doesn't try to fix queries with parse errors. This has been observed to frequently cause syntax errors, especially in combination with Jinja templating, e.g. undefined template variables. """ # This example comes almost directly from a real-world example. The user # accidentally ran "sqlfluff fix" without defining # "readability_features_numeric" and "readability_features_count_list", and # doing so corrupted their query. sql = """ SELECT user_id, campaign_id, business_type, SPLIT(intents, ",") AS intent_list, {% for feature in readability_features_numeric %} CAST(JSON_EXTRACT(readability_scores, '$.data.{{feature}}') AS float64) AS {{feature}} {% if not loop.last %} , {% endif %} {% endfor %}, {% for feature in readability_features_count_list %} CAST(JSON_EXTRACT(asset_structure, '$.{{feature}}') AS float64) AS {{feature}}_count {% if not loop.last %} , {% endif %} {% endfor %}, track_clicks_text, track_clicks_html FROM t """ result = sqlfluff.lint(sql) assert "LT04" not in [r["code"] for r in result]
Verify that LT04 doesn't try to fix queries with parse errors. This has been observed to frequently cause syntax errors, especially in combination with Jinja templating, e.g. undefined template variables.
test__rules__std_LT04_unparseable
python
sqlfluff/sqlfluff
test/rules/std_LT04_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT04_test.py
MIT
def test__rules__std_LT12_and_CV06_interaction() -> None: """Test interaction between LT12 and CV06 doesn't stop CV06 from being applied.""" # Test sql with no final newline and no final semicolon. sql = "SELECT foo FROM bar" # Ensure final semicolon requirement is active. cfg = FluffConfig(overrides={"dialect": "ansi"}) cfg.set_value( config_path=["rules", "convention.terminator", "require_final_semicolon"], val=True, ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"LT12", "CV06"} # Check file is fixed. assert linted_file.fix_string()[0] == "SELECT foo FROM bar;\n"
Test interaction between LT12 and CV06 doesn't stop CV06 from being applied.
test__rules__std_LT12_and_CV06_interaction
python
sqlfluff/sqlfluff
test/rules/std_LT12_CV06_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT12_CV06_test.py
MIT
def test_rules_std_LT04_and_ST06_interaction_trailing(in_sql, out_sql) -> None: """Test interaction between LT04 and ST06. Test sql with two newlines with leading commas expecting trailing. """ # Lint expected rules. cfg = FluffConfig.from_string( """[sqlfluff] dialect = ansi rules = LT04, ST06 """ ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(in_sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"LT04", "ST06"} # Check file is fixed. assert linted_file.fix_string()[0] == out_sql
Test interaction between LT04 and ST06. Test sql with two newlines with leading commas expecting trailing.
test_rules_std_LT04_and_ST06_interaction_trailing
python
sqlfluff/sqlfluff
test/rules/std_LT04_ST06_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT04_ST06_test.py
MIT
def test_rules_std_LT02_LT04_interaction_indentation_leading(in_sql, out_sql) -> None: """Test interaction between LT02 and LT04. Test sql with two newlines with trailing commas expecting leading. """ # Lint expected rules. cfg = FluffConfig.from_string( """[sqlfluff] dialect = snowflake rules = LT02, LT04 [sqlfluff:layout:type:comma] spacing_before = touch line_position = leading """ ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(in_sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"LT04"} # Check file is fixed. assert linted_file.fix_string()[0] == out_sql
Test interaction between LT02 and LT04. Test sql with two newlines with trailing commas expecting leading.
test_rules_std_LT02_LT04_interaction_indentation_leading
python
sqlfluff/sqlfluff
test/rules/std_LT02_LT04_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT02_LT04_test.py
MIT
def test__rules__std_RF01_LT09_copy() -> None: """Tests observed conflict between RF01 & LT09. https://github.com/sqlfluff/sqlfluff/issues/5203 """ sql = """ SELECT DISTINCT `FIELD` FROM `TABLE`; """ cfg = FluffConfig.from_kwargs( dialect="mysql", rules=["RF01", "LT09"], ) result = Linter(config=cfg).lint_string(sql) for violation in result.violations: assert "Unexpected exception" not in violation.description assert len(result.violations) == 1 only_violation = result.violations[0] assert only_violation.rule_code() == "LT09"
Tests observed conflict between RF01 & LT09. https://github.com/sqlfluff/sqlfluff/issues/5203
test__rules__std_RF01_LT09_copy
python
sqlfluff/sqlfluff
test/rules/std_RF01_LT09_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_RF01_LT09_test.py
MIT
def test__rules__std_LT01_single_raise() -> None: """Test case for multiple LT01 errors raised when no post comma whitespace.""" # This query used to triple count LT01. Added memory to log previously fixed commas # (issue #2001). sql = """ SELECT col_a AS a ,col_b AS b FROM foo; """ result = sqlfluff.lint(sql, rules=["LT01", "LT04"]) results_LT01 = [r for r in result if r["code"] == "LT01"] results_LT04 = [r for r in result if r["code"] == "LT04"] assert len(results_LT01) == 1 assert len(results_LT04) == 1
Test case for multiple LT01 errors raised when no post comma whitespace.
test__rules__std_LT01_single_raise
python
sqlfluff/sqlfluff
test/rules/std_LT01_LT04_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT01_LT04_test.py
MIT
def test__rules__std_LT03_default(): """Verify that LT03 returns the correct error message for default (trailing).""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ result = sqlfluff.lint(sql) assert "LT03" in [r["code"] for r in result] assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result]
Verify that LT03 returns the correct error message for default (trailing).
test__rules__std_LT03_default
python
sqlfluff/sqlfluff
test/rules/std_LT03_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT03_test.py
MIT
def test__rules__std_LT03_leading(): """Verify correct error message when leading is used.""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ config = FluffConfig( configs={"layout": {"type": {"binary_operator": {"line_position": "leading"}}}}, overrides={"dialect": "ansi"}, ) # The sqlfluff.lint API doesn't allow us to pass config so need to do what it does linter = Linter(config=config) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert "LT03" in [r["code"] for r in result] assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result]
Verify correct error message when leading is used.
test__rules__std_LT03_leading
python
sqlfluff/sqlfluff
test/rules/std_LT03_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT03_test.py
MIT
def test__rules__std_LT03_trailing(): """Verify correct error message when trailing is used.""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ config = FluffConfig( configs={ "layout": {"type": {"binary_operator": {"line_position": "trailing"}}} }, overrides={"dialect": "ansi"}, ) # The sqlfluff.lint API doesn't allow us to pass config so need to do what it does linter = Linter(config=config) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert "LT03" in [r["code"] for r in result] assert EXPECTED_TRAILING_MESSAGE in [r["description"] for r in result]
Verify correct error message when trailing is used.
test__rules__std_LT03_trailing
python
sqlfluff/sqlfluff
test/rules/std_LT03_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/rules/std_LT03_test.py
MIT
def test__api__lint_string_without_violations(): """Check lint functionality when there is no violation.""" result = sqlfluff.lint("select column from table\n") assert result == []
Check lint functionality when there is no violation.
test__api__lint_string_without_violations
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__lint_string(): """Basic checking of lint functionality.""" result = sqlfluff.lint(my_bad_query) # Check return types. assert isinstance(result, list) assert all(isinstance(elem, dict) for elem in result) # Check actual result assert result == lint_result
Basic checking of lint functionality.
test__api__lint_string
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__lint_string_specific(): """Basic checking of lint functionality.""" rules = ["CP02", "LT12"] result = sqlfluff.lint(my_bad_query, rules=rules) # Check which rules are found assert all(elem["code"] in rules for elem in result)
Basic checking of lint functionality.
test__api__lint_string_specific
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__lint_string_specific_single(): """Basic checking of lint functionality.""" rules = ["CP02"] result = sqlfluff.lint(my_bad_query, rules=rules) # Check which rules are found assert all(elem["code"] in rules for elem in result)
Basic checking of lint functionality.
test__api__lint_string_specific_single
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__lint_string_specific_exclude(): """Basic checking of lint functionality.""" exclude_rules = ["LT12", "CP01", "AL03", "CP02", "LT09", "LT01"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check only AM04 is found assert len(result) == 1 assert "AM04" == result[0]["code"]
Basic checking of lint functionality.
test__api__lint_string_specific_exclude
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__lint_string_specific_exclude_single(): """Basic checking of lint functionality.""" exclude_rules = ["LT01"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check only AM04 is found assert len(result) == 9 assert set(["LT12", "CP01", "AL03", "CP02", "LT09", "AM04"]) == set( [r["code"] for r in result] )
Basic checking of lint functionality.
test__api__lint_string_specific_exclude_single
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__lint_string_specific_exclude_all_failed_rules(): """Basic checking of lint functionality.""" exclude_rules = ["LT12", "CP01", "AL03", "CP02", "LT09", "LT01", "AM04"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check it passes assert result == []
Basic checking of lint functionality.
test__api__lint_string_specific_exclude_all_failed_rules
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__fix_string(): """Basic checking of lint functionality.""" result = sqlfluff.fix(my_bad_query) # Check return types. assert isinstance(result, str) # Check actual result assert ( result == """SELECT *, 1, blah AS foo FROM mytable """ )
Basic checking of lint functionality.
test__api__fix_string
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__fix_string_specific(): """Basic checking of lint functionality with a specific rule.""" result = sqlfluff.fix(my_bad_query, rules=["CP01"]) # Check actual result assert result == "SELECT *, 1, blah AS fOO FROM myTable"
Basic checking of lint functionality with a specific rule.
test__api__fix_string_specific
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__fix_string_specific_exclude(): """Basic checking of lint functionality with a specific rule exclusion.""" result = sqlfluff.fix(my_bad_query, exclude_rules=["LT09"]) # Check actual result assert result == "SELECT *, 1, blah AS foo FROM mytable\n"
Basic checking of lint functionality with a specific rule exclusion.
test__api__fix_string_specific_exclude
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__fix_string_unparsable(): """Test behavior with parse errors.""" bad_query = """SELECT my_col FROM my_schema.my_table where processdate ! 3""" result = sqlfluff.fix(bad_query, rules=["CP01"]) # Check fix result: should be unchanged because of the parse error. assert result == bad_query
Test behavior with parse errors.
test__api__fix_string_unparsable
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__fix_string_unparsable_fix_even_unparsable(): """Test behavior with parse errors.""" bad_query = """SELECT my_col FROM my_schema.my_table where processdate ! 3""" result = sqlfluff.fix(bad_query, rules=["CP01"], fix_even_unparsable=True) # Check fix result: should be fixed because we overrode fix_even_unparsable. assert ( result == """SELECT my_col FROM my_schema.my_table WHERE processdate ! 3""" )
Test behavior with parse errors.
test__api__fix_string_unparsable_fix_even_unparsable
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__parse_string(): """Basic checking of parse functionality.""" parsed = sqlfluff.parse(my_bad_query) # Check a JSON object is returned. assert isinstance(parsed, dict) # Load in expected result. with open("test/fixtures/api/parse_test/parse_test.json", "r") as f: expected_parsed = json.load(f) # Compare JSON from parse to expected result. assert parsed == expected_parsed
Basic checking of parse functionality.
test__api__parse_string
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__parse_fail(): """Basic failure mode of parse functionality.""" try: sqlfluff.parse("Select (1 + 2 +++) FROM mytable as blah blah") pytest.fail("sqlfluff.parse should have raised an exception.") except Exception as err: # Check it's the right kind of exception assert isinstance(err, sqlfluff.api.APIParsingError) # Check there are two violations in there. assert len(err.violations) == 2 # Check it prints nicely. assert ( str(err) == """Found 2 issues while parsing string. Line 1, Position 15: Found unparsable section: '+++' Line 1, Position 41: Found unparsable section: 'blah'""" )
Basic failure mode of parse functionality.
test__api__parse_fail
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__config_path(): """Test that we can load a specified config file in the Simple API.""" # Load test SQL file. with open("test/fixtures/api/config_path_test/config_path_test.sql", "r") as f: sql = f.read() # Pass a config path to the Simple API. parsed = sqlfluff.parse( sql, config_path="test/fixtures/api/config_path_test/extra_configs/.sqlfluff", ) # Load in expected result. with open("test/fixtures/api/config_path_test/config_path_test.json", "r") as f: expected_parsed = json.load(f) # Compare JSON from parse to expected result. assert parsed == expected_parsed
Test that we can load a specified config file in the Simple API.
test__api__config_path
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__config_override(kwargs, expected, tmpdir): """Test that parameters to lint() override .sqlfluff correctly (or not).""" config_path = "test/fixtures/api/config_override/.sqlfluff" sql = "SELECT TRIM(name) AS name FROM some_table" lint_results = sqlfluff.lint(sql, config_path=config_path, **kwargs) assert expected == {"RF02", "RF04"}.intersection( {lr["code"] for lr in lint_results} )
Test that parameters to lint() override .sqlfluff correctly (or not).
test__api__config_override
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__invalid_dialect(): """Test that SQLFluffUserError is raised for a bad dialect.""" # Load test SQL file. with open("test/fixtures/api/config_path_test/config_path_test.sql", "r") as f: sql = f.read() # Pass a fake dialect to the API and test the correct error is raised. with pytest.raises(SQLFluffUserError) as err: sqlfluff.parse( sql, dialect="not_a_real_dialect", config_path="test/fixtures/api/config_path_test/extra_configs/.sqlfluff", ) assert str(err.value) == "Error: Unknown dialect 'not_a_real_dialect'"
Test that SQLFluffUserError is raised for a bad dialect.
test__api__invalid_dialect
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__parse_exceptions(): """Test parse behaviour with errors.""" # Parsable content result = sqlfluff.parse("SELECT 1") assert result # Templater fail with pytest.raises(APIParsingError): sqlfluff.parse('SELECT {{ 1 > "a"}}') # Templater success but parsing fail with pytest.raises(APIParsingError): sqlfluff.parse("THIS IS NOT SQL")
Test parse behaviour with errors.
test__api__parse_exceptions
python
sqlfluff/sqlfluff
test/api/simple_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/simple_test.py
MIT
def test__api__lexer(): """Basic checking of lexing functionality.""" tokens, violations = Lexer(dialect="ansi").lex(test_query) assert violations == [] assert isinstance(tokens, tuple) # The last element is the file end marker. assert [elem.raw for elem in tokens] == ["SELECt", " ", "1", ""]
Basic checking of lexing functionality.
test__api__lexer
python
sqlfluff/sqlfluff
test/api/classes_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/classes_test.py
MIT
def test__api__parser(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) assert parsed.raw == test_query
Basic checking of parsing functionality.
test__api__parser
python
sqlfluff/sqlfluff
test/api/classes_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/classes_test.py
MIT
def test__api__linter_lint(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) violations = Linter(dialect="ansi").lint(parsed) assert [v.rule.code for v in violations] == ["CP01", "LT12"]
Basic checking of parsing functionality.
test__api__linter_lint
python
sqlfluff/sqlfluff
test/api/classes_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/classes_test.py
MIT
def test__api__linter_fix(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) fixed, _ = Linter(dialect="ansi").fix(parsed) assert fixed.raw == "SELECT 1\n"
Basic checking of parsing functionality.
test__api__linter_fix
python
sqlfluff/sqlfluff
test/api/classes_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/classes_test.py
MIT
def test__api__info_dialects(): """Basic linting of dialects.""" dialects = sqlfluff.list_dialects() assert isinstance(dialects, list) # Turn it into a dict so we can look for items in there. dialect_dict = {dialect.label: dialect for dialect in dialects} # Check the ansi dialect works assert "ansi" in dialect_dict ansi = dialect_dict["ansi"] assert ansi.label == "ansi" assert ansi.name == "ANSI" assert ansi.inherits_from == "nothing" assert "This is the base dialect" in ansi.docstring # Check one other works assert "postgres" in dialect_dict postgres = dialect_dict["postgres"] assert postgres.label == "postgres" assert postgres.name == "PostgreSQL" assert postgres.inherits_from == "ansi" assert "this is often the dialect to use" in postgres.docstring
Basic linting of dialects.
test__api__info_dialects
python
sqlfluff/sqlfluff
test/api/info_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/info_test.py
MIT
def test__api__info_rules(): """Basic linting of dialects.""" rules = sqlfluff.list_rules() assert isinstance(rules, list) assert ( RuleTuple( code="LT01", name="layout.spacing", description="Inappropriate Spacing.", groups=("all", "core", "layout"), aliases=( "L001", "L005", "L006", "L008", "L023", "L024", "L039", "L048", "L071", ), ) in rules )
Basic linting of dialects.
test__api__info_rules
python
sqlfluff/sqlfluff
test/api/info_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/api/info_test.py
MIT