code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def respace_point( self, prev_block: Optional[ReflowBlock], next_block: Optional[ReflowBlock], root_segment: BaseSegment, lint_results: List[LintResult], strip_newlines: bool = False, anchor_on: str = "before", ) -> Tuple[List[LintResult], "ReflowPoint"]: """Respace a point based on given constraints. NB: This effectively includes trailing whitespace fixes. Deletion and edit fixes are generated immediately, but creations are paused to the end and done in bulk so as not to generate conflicts. Note that the `strip_newlines` functionality exists here as a slight exception to pure respacing, but as a very simple case of positioning line breaks. The default operation of `respace` does not enable it, however it exists as a convenience for rules which wish to use it. """ existing_results = lint_results[:] pre_constraint, post_constraint, strip_newlines = determine_constraints( prev_block, next_block, strip_newlines ) reflow_logger.debug("* Respacing: %r @ %s", self.raw, self.pos_marker) # The buffer is used to create the new reflow point to return segment_buffer, last_whitespace, new_results = process_spacing( list(self.segments), strip_newlines ) # Check for final trailing whitespace (which otherwise looks like an indent). if next_block and "end_of_file" in next_block.class_types and last_whitespace: new_results.append( LintResult( last_whitespace, [LintFix.delete(last_whitespace)], description="Unnecessary trailing whitespace at end of file.", ) ) segment_buffer.remove(last_whitespace) last_whitespace = None # Is there a newline? # NOTE: We do this based on the segment buffer rather than self.class_types # because we may have just removed any present newlines in the buffer. if ( any(seg.is_type("newline") for seg in segment_buffer) and not strip_newlines ) or (next_block and "end_of_file" in next_block.class_types): # Most of this section should be handled as _Indentation_. # BUT: There is one case we should handle here. # If we find that the last whitespace has a newline # before it, and the position markers imply there was # a removal between them, then remove the whitespace. # This ensures a consistent indent. if last_whitespace: ws_idx = self.segments.index(last_whitespace) if ws_idx > 0: # NOTE: Iterate by index so that we don't slice the full range. for prev_seg_idx in range(ws_idx - 1, -1, -1): prev_seg = self.segments[prev_seg_idx] # Skip past any indents if not prev_seg.is_type("indent"): break if ( prev_seg.is_type("newline") # Not just unequal. Must be actively _before_. # NOTE: Based on working locations and prev_seg.get_end_loc() < last_whitespace.get_start_loc() ): reflow_logger.debug( " Removing non-contiguous whitespace post removal." ) segment_buffer.remove(last_whitespace) # Ideally we should attach to an existing result. # To do that effectively, we should look for the removed # segment in the existing results. temp_idx = last_whitespace.pos_marker.templated_slice.start for res in existing_results: if ( res.anchor and res.anchor.pos_marker and res.anchor.pos_marker.templated_slice.stop == temp_idx ): break else: # pragma: no cover raise NotImplementedError("Could not find removal result.") existing_results.remove(res) new_results.append( LintResult( res.anchor, fixes=res.fixes + [LintFix("delete", last_whitespace)], description=res.description, ) ) # Return the results. return existing_results + new_results, ReflowPoint(tuple(segment_buffer)) # Otherwise is this an inline case? (i.e. no newline) reflow_logger.debug( " Inline case. Constraints: %s <-> %s.", pre_constraint, post_constraint, ) # Do we at least have _some_ whitespace? if last_whitespace: # We do - is it the right size? segment_buffer, results = handle_respace__inline_with_space( pre_constraint, post_constraint, prev_block, next_block, root_segment, segment_buffer, last_whitespace, ) new_results.extend(results) else: # No. Should we insert some? # NOTE: This method operates on the existing fix buffer. segment_buffer, new_results, edited = handle_respace__inline_without_space( pre_constraint, post_constraint, prev_block, next_block, segment_buffer, existing_results + new_results, anchor_on=anchor_on, ) existing_results = [] if edited: reflow_logger.debug(" Modified result buffer: %s", new_results) # Only log if we actually made a change. if new_results: reflow_logger.debug(" New Results: %s", new_results) return existing_results + new_results, ReflowPoint(tuple(segment_buffer))
Respace a point based on given constraints. NB: This effectively includes trailing whitespace fixes. Deletion and edit fixes are generated immediately, but creations are paused to the end and done in bulk so as not to generate conflicts. Note that the `strip_newlines` functionality exists here as a slight exception to pure respacing, but as a very simple case of positioning line breaks. The default operation of `respace` does not enable it, however it exists as a convenience for rules which wish to use it.
respace_point
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def as_str(self) -> str: """String representation for logging/testing.""" return self.selectable.raw
String representation for logging/testing.
as_str
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def select_info(self) -> Optional[SelectStatementColumnsAndTables]: """Returns SelectStatementColumnsAndTables on the SELECT.""" if self.selectable.is_type("select_statement"): return get_select_statement_info( self.selectable, self.dialect, early_exit=False ) else: # DML or values_clause # This is a bit dodgy, but a very useful abstraction. Here, we # interpret a DML or values_clause segment as if it were a SELECT. # Someday, we may need to tweak this, e.g. perhaps add a separate # QueryType for this (depending on the needs of the rules that use # it. # # For more info on the syntax and behavior of VALUES and its # similarity to a SELECT statement with literal values (no table # source), see the "Examples" section of the Postgres docs page: # (https://www.postgresql.org/docs/8.2/sql-values.html). values = Segments(self.selectable) alias_expression = values.children().first(sp.is_type("alias_expression")) name = alias_expression.children().first( sp.is_type("naked_identifier", "quoted_identifier") ) alias_info = AliasInfo( name[0].raw if name else "", name[0] if name else None, bool(name), self.selectable, alias_expression[0] if alias_expression else None, None, ) return SelectStatementColumnsAndTables( select_statement=self.selectable, table_aliases=[alias_info], standalone_aliases=[], reference_buffer=[], select_targets=[], col_aliases=[], using_cols=[], table_reference_buffer=[], )
Returns SelectStatementColumnsAndTables on the SELECT.
select_info
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def get_wildcard_info(self) -> List[WildcardInfo]: """Find wildcard (*) targets in the SELECT.""" buff: List[WildcardInfo] = [] # Some select-like statements don't have select_info # (e.g. test_exasol_invalid_foreign_key_from) if not self.select_info: # pragma: no cover # TODO: Review whether to remove this. # Restructure of Exasol dialect means it no longer applies. return buff for seg in self.select_info.select_targets: if seg.get_child("wildcard_expression"): if "." in seg.raw: # The wildcard specifies a target table. table = seg.raw.rsplit(".", 1)[0] buff.append(WildcardInfo(seg, [table])) else: # The wildcard is unqualified (i.e. does not specify a # table). This means to include all columns from all the # tables in the query. buff.append( WildcardInfo( seg, [ ( alias_info.ref_str if alias_info.aliased else alias_info.from_expression_element.raw ) for alias_info in self.select_info.table_aliases if alias_info.ref_str ], ) ) return buff
Find wildcard (*) targets in the SELECT.
get_wildcard_info
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def find_alias(self, table: str) -> Optional[AliasInfo]: """Find corresponding table_aliases entry (if any) matching "table".""" alias_info = [ t for t in (self.select_info.table_aliases if self.select_info else []) if t.aliased and t.ref_str == table ] assert len(alias_info) <= 1 return alias_info[0] if alias_info else None
Find corresponding table_aliases entry (if any) matching "table".
find_alias
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def children(self: T) -> List[T]: """Children could be CTEs, subselects or Others.""" return list(self.ctes.values()) + self.subqueries
Children could be CTEs, subselects or Others.
children
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def as_dict(self: T) -> Dict: """Dict representation for logging/testing.""" result: Dict[str, Union[str, List[str], Dict, List[Dict]]] = {} if self.query_type != QueryType.Simple: result["query_type"] = self.query_type.name if self.selectables: result["selectables"] = [s.as_str() for s in self.selectables] if self.ctes: result["ctes"] = {k: v.as_dict() for k, v in self.ctes.items()} if self.subqueries: result["subqueries"] = [q.as_dict() for q in self.subqueries] return result
Dict representation for logging/testing.
as_dict
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def lookup_cte(self: T, name: str, pop: bool = True) -> Optional[T]: """Look up a CTE by name, in the current or any parent scope.""" cte = self.ctes.get(name.upper()) if cte: if pop: del self.ctes[name.upper()] return cte if self.parent: return self.parent.lookup_cte(name, pop) else: return None
Look up a CTE by name, in the current or any parent scope.
lookup_cte
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def crawl_sources( self: T, segment: BaseSegment, recurse_into: bool = True, pop: bool = False, lookup_cte: bool = True, ) -> Iterator[Union[str, T]]: """Find SELECTs, table refs, or value table function calls in segment. For each SELECT, yield a list of Query objects. As we find table references or function call strings, yield those. """ found_nested_select = False for seg in segment.recursive_crawl( "table_reference", "set_expression", "select_statement", "values_clause", recurse_into=False, allow_self=False, ): # Crawl efficiently, don't recurse here. We do that later. # What do we have? # 1. If it's a table reference, work out whether it's to a CTE # or to an external table. if seg.is_type("table_reference"): _seg = cast(ObjectReferenceSegment, seg) if not _seg.is_qualified() and lookup_cte: cte = self.lookup_cte(_seg.raw, pop=pop) if cte: # It's a CTE. yield cte # It's an external table reference. yield _seg.raw # 2. If it's some kind of more complex expression which is still # valid in this position, generate an appropriate sub-select. else: assert seg.is_type( "set_expression", "select_statement", "values_clause" ) found_nested_select = True # Generate a subquery, referencing the current query # as the parent. yield self.__class__.from_segment(seg, self.dialect, parent=self) if not found_nested_select: # If we reach here, the SELECT may be querying from a value table # function, e.g. UNNEST(). For our purposes, this is basically the # same as an external table. Return the "table" part as a string. table_expr = segment.get_child("table_expression") if table_expr: yield table_expr.raw
Find SELECTs, table refs, or value table function calls in segment. For each SELECT, yield a list of Query objects. As we find table references or function call strings, yield those.
crawl_sources
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def _extract_subqueries( cls: Type[T], selectable: Selectable, dialect: Dialect ) -> Iterator[T]: """Given a Selectable, extract subqueries.""" assert selectable.selectable.is_type( *SELECTABLE_TYPES, *SUBSELECT_TYPES, ), f"Found unexpected {selectable.selectable}" # For MERGE, UPDATE & DELETE, we should expect to find a sub select. for subselect in selectable.selectable.recursive_crawl( *SELECTABLE_TYPES, recurse_into=False, allow_self=False, ): # NOTE: We don't need to set the parent here, because it will # be set when attached to the parent later. yield cls.from_segment(subselect, dialect=dialect)
Given a Selectable, extract subqueries.
_extract_subqueries
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def from_root(cls: Type[T], root_segment: BaseSegment, dialect: Dialect) -> T: """Given a root segment, find the first appropriate selectable and analyse.""" selectable_segment = next( # Could be a Selectable or a MERGE root_segment.recursive_crawl(*SELECTABLE_TYPES, "merge_statement"), None, ) assert selectable_segment, f"No selectable found in {root_segment.raw!r}." return cls.from_segment(selectable_segment, dialect=dialect)
Given a root segment, find the first appropriate selectable and analyse.
from_root
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def from_segment( cls: Type[T], segment: BaseSegment, dialect: Dialect, parent: Optional[T] = None, ) -> T: """Recursively generate a query from an appropriate segment.""" assert segment.is_type( *SELECTABLE_TYPES, *SUBSELECT_TYPES ), f"Invalid segment for `from_segment`: {segment}" selectables = [] subqueries = [] cte_defs: List[BaseSegment] = [] query_type = QueryType.Simple if segment.is_type("select_statement", *SUBSELECT_TYPES): # It's a select. Instantiate a Query. selectables = [Selectable(segment, dialect=dialect)] elif segment.is_type("set_expression"): # It's a set expression. There may be multiple selectables. for _seg in segment.recursive_crawl("select_statement", recurse_into=False): selectables.append(Selectable(_seg, dialect=dialect)) else: # Otherwise it's a WITH statement. assert segment.is_type("with_compound_statement") query_type = QueryType.WithCompound for _seg in segment.recursive_crawl( # NOTE: We don't _specify_ set expressions here, because # all set expressions are made of selects, and we # want to look straight through to those child # expressions. "select_statement", recurse_into=False, no_recursive_seg_type="common_table_expression", ): selectables.append(Selectable(_seg, dialect=dialect)) # We also need to handle CTEs for _seg in segment.recursive_crawl( "common_table_expression", recurse_into=False, # Don't recurse into any other WITH statements. no_recursive_seg_type="with_compound_statement", ): # Just store the segments for now. cte_defs.append(_seg) # Extract subqueries from any selectables. for selectable in selectables: # NOTE: If any VALUES clauses are present, they pass through here # safely without Exception. They won't yield any subqueries. subqueries += list(cls._extract_subqueries(selectable, dialect)) # Instantiate the query outer_query = cls( query_type, dialect, selectables, parent=parent, subqueries=subqueries, ) # If we don't have any CTEs, we can stop now. if not cte_defs: return outer_query # Otherwise build up the CTE map. ctes = {} for cte in cte_defs: # NOTE: This feels a little risky to just assume the first segment # is the name, but it's the same functionality we've run with for # a while. name_seg = cte.segments[0] name = name_seg.raw_normalized(False).upper() # Get the query out of it, just stop on the first one we find. try: inner_qry = next( cte.recursive_crawl( *SELECTABLE_TYPES, "values_clause", # Very rarely, we might find things like update # clauses in here, handle them accordingly. *SUBSELECT_TYPES, ), ) # If this fails it's because we didn't find anything "selectable" # in the CTE. Flag this up, but then carry on. It's likely something # strange (w.g. a Clickhouse WITH clause setting a with). except StopIteration: # pragma: no cover # Log it as an issue, but otherwise skip this one. analysis_logger.info(f"Skipping unexpected CTE structure: {cte.raw!r}") continue qry = cls.from_segment(inner_qry, dialect=dialect, parent=outer_query) assert qry # Populate the CTE specific args. qry.cte_definition_segment = cte qry.cte_name_segment = name_seg # File it in the dictionary. ctes[name] = qry # Set the CTEs attribute on the outer. # NOTE: Because we're setting this after instantiation, it's important # that we've already set the `parent` value of the cte queries. outer_query.ctes = ctes return outer_query
Recursively generate a query from an appropriate segment.
from_segment
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def get_select_statement_info( segment: BaseSegment, dialect: Optional[Dialect], early_exit: bool = True ) -> Optional[SelectStatementColumnsAndTables]: """Analyze a select statement: targets, aliases, etc. Return info.""" assert segment.is_type("select_statement") table_aliases, standalone_aliases = get_aliases_from_select(segment, dialect) if early_exit and not table_aliases and not standalone_aliases: return None # Iterate through all the references, both in the select clause, but also # potential others. sc = segment.get_child("select_clause") # Sometimes there is no select clause (e.g. "SELECT *" is a select_clause_element) if not sc: # pragma: no cover # TODO: Review whether this clause should be removed. It might only # have existed for an old way of structuring the Exasol dialect. return None # NOTE: In this first crawl, don't crawl inside any sub-selects, that's very # important for both isolation and performance reasons. reference_buffer = _get_object_references(sc) table_reference_buffer = [] for potential_clause in ( "where_clause", "groupby_clause", "having_clause", "orderby_clause", "qualify_clause", ): clause = segment.get_child(potential_clause) if clause: reference_buffer += _get_object_references(clause) # Get all select targets. _select_clause = segment.get_child("select_clause") assert _select_clause, "Select statement found without select clause." select_targets = cast( List[SelectClauseElementSegment], _select_clause.get_children("select_clause_element"), ) # Get all column aliases. NOTE: In two steps so mypy can follow. _pre_aliases = [s.get_alias() for s in select_targets] col_aliases = [_alias for _alias in _pre_aliases if _alias is not None] # Get any columns referred to in a using clause, and extract anything # from ON clauses. using_cols = [] fc = segment.get_child("from_clause") if fc: for table_expression in fc.recursive_crawl( "table_expression", no_recursive_seg_type="select_statement" ): for seg in table_expression.iter_segments(): # table references can get tricky with what is a schema, table, # project, or column. It may be best for now to use the redshift # unnest logic for dialects that support arrays or objects/structs # in AL05. However, this solves finding other types of references # in functions such as LATERAL FLATTEN. if not seg.is_type("table_reference"): reference_buffer += _get_object_references(seg) elif cast(ObjectReferenceSegment, seg).is_qualified(): table_reference_buffer += _get_object_references(seg) for join_clause in fc.recursive_crawl( "join_clause", no_recursive_seg_type="select_statement" ): seen_using = False for seg in join_clause.iter_segments(): if seg.is_type("keyword") and seg.raw_upper == "USING": seen_using = True elif seg.is_type("join_on_condition"): for on_seg in seg.segments: if on_seg.is_type("bracketed", "expression"): # Deal with expressions reference_buffer += _get_object_references(seg) elif seen_using and seg.is_type("bracketed"): for subseg in seg.segments: if subseg.is_type("identifier"): using_cols.append(subseg) seen_using = False return SelectStatementColumnsAndTables( select_statement=segment, table_aliases=table_aliases or [], standalone_aliases=standalone_aliases or [], reference_buffer=reference_buffer, select_targets=select_targets, col_aliases=col_aliases, using_cols=using_cols, table_reference_buffer=table_reference_buffer, )
Analyze a select statement: targets, aliases, etc. Return info.
get_select_statement_info
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/select.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/select.py
MIT
def get_aliases_from_select( segment: BaseSegment, dialect: Optional[Dialect] = None ) -> Tuple[Optional[List[AliasInfo]], Optional[List[BaseSegment]]]: """Gets the aliases referred to in the FROM clause. Returns a tuple of two lists: - Table aliases - Value table function aliases """ fc = segment.get_child("from_clause") if not fc: # If there's no from clause then just abort. return None, None assert isinstance(fc, (FromClauseSegment, JoinClauseSegment)) aliases = fc.get_eventual_aliases() # We only want table aliases, so filter out aliases for value table # functions, lambda parameters and pivot columns. standalone_aliases: List[BaseSegment] = [] standalone_aliases += _get_pivot_table_columns(segment, dialect) standalone_aliases += _get_lambda_argument_columns(segment, dialect) table_aliases = [] for table_expr, alias_info in aliases: if _has_value_table_function(table_expr, dialect): if alias_info.segment and alias_info.segment not in standalone_aliases: standalone_aliases.append(alias_info.segment) elif alias_info not in table_aliases: table_aliases.append(alias_info) return table_aliases, standalone_aliases
Gets the aliases referred to in the FROM clause. Returns a tuple of two lists: - Table aliases - Value table function aliases
get_aliases_from_select
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/select.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/select.py
MIT
def get_parse_fixtures( fail_on_missing_yml=False, ) -> Tuple[List[ParseExample], List[Tuple[str, str, bool, str]]]: """Search for all parsing fixtures.""" parse_success_examples = [] parse_structure_examples = [] # Generate the filenames for each dialect from the parser test directory for d in os.listdir(os.path.join("test", "fixtures", "dialects")): # Ignore documentation if d.endswith(".md"): continue # assume that d is now the name of a dialect dirlist = os.listdir(os.path.join("test", "fixtures", "dialects", d)) for f in dirlist: has_yml = False if f.endswith(".sql"): root = f[:-4] # only look for sql files parse_success_examples.append(ParseExample(d, f)) # Look for the code_only version of the structure y = root + ".yml" if y in dirlist: parse_structure_examples.append((d, f, True, y)) has_yml = True # Look for the non-code included version of the structure y = root + "_nc.yml" if y in dirlist: parse_structure_examples.append((d, f, False, y)) has_yml = True if not has_yml and fail_on_missing_yml: raise ( Exception( f"Missing .yml file for {os.path.join(d, f)}. Run the " "test/generate_parse_fixture_yml.py script!" ) ) return parse_success_examples, parse_structure_examples
Search for all parsing fixtures.
get_parse_fixtures
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def make_dialect_path(dialect, fname): """Work out how to find paths given a dialect and a file name.""" return os.path.join("test", "fixtures", "dialects", dialect, fname)
Work out how to find paths given a dialect and a file name.
make_dialect_path
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def load_file(dialect, fname): """Load a file.""" with open(make_dialect_path(dialect, fname), encoding="utf8") as f: raw = f.read() return raw
Load a file.
load_file
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def process_struct(obj): """Process a nested dict or dict-like into a check tuple.""" if isinstance(obj, dict): return tuple((k, process_struct(obj[k])) for k in obj) elif isinstance(obj, list): # If empty list, return empty tuple if not len(obj): return tuple() # We'll assume that it's a list of dicts if isinstance(obj[0], dict): buff = [process_struct(elem) for elem in obj] if any(len(elem) > 1 for elem in buff): raise ValueError(f"Not sure how to deal with multi key dict: {buff!r}") return tuple(elem[0] for elem in buff) else: raise TypeError(f"Did not expect a list of {type(obj[0])}: {obj[0]!r}") elif isinstance(obj, (str, int, float)): return str(obj) elif obj is None: return None else: raise TypeError(f"Not sure how to deal with type {type(obj)}: {obj!r}")
Process a nested dict or dict-like into a check tuple.
process_struct
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def parse_example_file(dialect: str, sqlfile: str): """Parse example SQL file, return parse tree.""" config = FluffConfig(overrides=dict(dialect=dialect)) # Load the SQL raw = load_file(dialect, sqlfile) # Lex and parse the file tokens, _ = Lexer(config=config).lex(raw) tree = Parser(config=config).parse(tokens, fname=dialect + "/" + sqlfile) return tree
Parse example SQL file, return parse tree.
parse_example_file
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def compute_parse_tree_hash(tree): """Given a parse tree, compute a consistent hash value for it.""" if tree: r = tree.as_record(code_only=True, show_raw=True) if r: r_io = io.StringIO() yaml.dump(r, r_io, sort_keys=False, allow_unicode=True) result = hashlib.blake2s(r_io.getvalue().encode("utf-8")).hexdigest() return result return None
Given a parse tree, compute a consistent hash value for it.
compute_parse_tree_hash
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def load_yaml(fpath): """Load a yaml structure and process it into a tuple.""" # Load raw file with open(fpath, encoding="utf8") as f: raw = f.read() # Parse the yaml obj = yaml.safe_load(raw) # Return the parsed and structured object _hash = None if obj: _hash = obj.pop("_hash", None) processed = process_struct(obj) if processed: return _hash, process_struct(obj)[0] else: return None, None
Load a yaml structure and process it into a tuple.
load_yaml
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def yaml_loader(): """Return a yaml loading function.""" # Return a function return load_yaml
Return a yaml loading function.
yaml_loader
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def _generate_test_segments_func(elems): """Roughly generate test segments. This function isn't totally robust, but good enough for testing. Use with caution. """ buff = [] raw_file = "".join(elems) templated_file = TemplatedFile.from_string(raw_file) idx = 0 for elem in elems: if elem == "<indent>": buff.append( Indent(pos_marker=PositionMarker.from_point(idx, idx, templated_file)) ) continue elif elem == "<dedent>": buff.append( Dedent(pos_marker=PositionMarker.from_point(idx, idx, templated_file)) ) continue seg_kwargs = {} if set(elem) <= {" ", "\t"}: SegClass = WhitespaceSegment elif set(elem) <= {"\n"}: SegClass = NewlineSegment elif elem == "(": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("start_bracket",)} elif elem == ")": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("end_bracket",)} elif elem == "[": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("start_square_bracket",)} elif elem == "]": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("end_square_bracket",)} elif elem.startswith("--"): SegClass = CommentSegment seg_kwargs = {"instance_types": ("inline_comment",)} elif elem.startswith('"'): SegClass = CodeSegment seg_kwargs = {"instance_types": ("double_quote",)} elif elem.startswith("'"): SegClass = CodeSegment seg_kwargs = {"instance_types": ("single_quote",)} else: SegClass = CodeSegment # Set a none position marker which we'll realign at the end. buff.append( SegClass( raw=elem, pos_marker=PositionMarker( slice(idx, idx + len(elem)), slice(idx, idx + len(elem)), templated_file, ), **seg_kwargs, ) ) idx += len(elem) return tuple(buff)
Roughly generate test segments. This function isn't totally robust, but good enough for testing. Use with caution.
_generate_test_segments_func
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def generate_test_segments(): """Roughly generate test segments. This is a factory function so that it works as a fixture, but when actually used, this will return the inner function which is what you actually need. """ return _generate_test_segments_func
Roughly generate test segments. This is a factory function so that it works as a fixture, but when actually used, this will return the inner function which is what you actually need.
generate_test_segments
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def raise_critical_errors_after_fix(monkeypatch): """Raises errors that break the Fix process. These errors are otherwise swallowed to allow the lint messages to reach the end user. """ @staticmethod def _log_critical_errors(error: Exception): raise error monkeypatch.setattr(BaseRule, "_log_critical_errors", _log_critical_errors)
Raises errors that break the Fix process. These errors are otherwise swallowed to allow the lint messages to reach the end user.
raise_critical_errors_after_fix
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def fail_on_parse_error_after_fix(monkeypatch): """Cause tests to fail if a lint fix introduces a parse error. In production, we have a couple of functions that, upon detecting a bug in a lint rule, just log a warning. To catch bugs in new or modified rules, we want to be more strict during dev and CI/CD testing. Here, we patch in different functions which raise runtime errors, causing tests to fail if this happens. """ @staticmethod def raise_error_apply_fixes_check_issue(message, *args): # pragma: no cover raise ValueError(message % args) @staticmethod def raise_error_conflicting_fixes_same_anchor(message: str): # pragma: no cover raise ValueError(message) monkeypatch.setattr( BaseSegment, "_log_apply_fixes_check_issue", raise_error_apply_fixes_check_issue ) monkeypatch.setattr( Linter, "_report_conflicting_fixes_same_anchor", raise_error_conflicting_fixes_same_anchor, )
Cause tests to fail if a lint fix introduces a parse error. In production, we have a couple of functions that, upon detecting a bug in a lint rule, just log a warning. To catch bugs in new or modified rules, we want to be more strict during dev and CI/CD testing. Here, we patch in different functions which raise runtime errors, causing tests to fail if this happens.
fail_on_parse_error_after_fix
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def test_verbosity_level(request): """Report the verbosity level for a given pytest run. For example: $ pytest -vv Has a verbosity level of 2 While: $ pytest Has a verbosity level of 0 """ return request.config.getoption("verbose")
Report the verbosity level for a given pytest run. For example: $ pytest -vv Has a verbosity level of 2 While: $ pytest Has a verbosity level of 0
test_verbosity_level
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def distribute_work(work_items: List[S], work_fn: Callable[[S], None]) -> None: """Distribute work keep track of progress.""" # Build up a dict of sets, where the key is the dialect and the set # contains all the expected cases. As cases return we'll check them # off. success_map = {} expected_cases = defaultdict(set) for case in work_items: expected_cases[case.dialect].add(case) errors = [] with multiprocessing.Pool(multiprocessing.cpu_count()) as pool: for example, result in pool.imap_unordered(work_fn, work_items): if result is not None: errors.append(result) success_map[example] = False else: success_map[example] = True expected_cases[example.dialect].remove(example) # Check to see whether a dialect is complete if not expected_cases[example.dialect]: # It's done. Report success rate. local_success_map = { k: v for k, v in success_map.items() if k.dialect == example.dialect } if all(local_success_map.values()): print(f"{example.dialect!r} complete.\t\tAll Success ✅") else: fail_files = [ k.sqlfile for k, v in local_success_map.items() if not v ] print( f"{example.dialect!r} complete.\t\t{len(fail_files)} fails. ⚠️" ) for fname in fail_files: print(f" - {fname!r}") if errors: print(errors) print("FAILED TO GENERATE ALL CASES") sys.exit(1)
Distribute work keep track of progress.
distribute_work
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def _is_matching_new_criteria(example: ParseExample): """Is the Yaml doesn't exist or is older than the SQL.""" yaml_path = _create_file_path(example) if not os.path.exists(yaml_path): return True sql_path = os.path.join( "test", "fixtures", "dialects", example.dialect, example.sqlfile, ) return os.path.getmtime(yaml_path) < os.path.getmtime(sql_path)
Is the Yaml doesn't exist or is older than the SQL.
_is_matching_new_criteria
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def generate_one_parse_fixture( example: ParseExample, ) -> Tuple[ParseExample, Optional[SQLParseError]]: """Parse example SQL file, write parse tree to YAML file.""" dialect, sqlfile = example sql_path = _create_file_path(example, ".sql") try: tree = parse_example_file(dialect, sqlfile) except Exception as err: # Catch parsing errors, and wrap the file path only it. return example, SQLParseError(f"Fatal parsing error: {sql_path}: {err}") # Check we don't have any base types or unparsable sections types = tree.type_set() if "base" in types: return example, SQLParseError(f"Unnamed base section when parsing: {sql_path}") if "unparsable" in types: return example, SQLParseError(f"Could not parse: {sql_path}") _hash = compute_parse_tree_hash(tree) # Remove the .sql file extension path = _create_file_path(example) with open(path, "w", newline="\n", encoding="utf8") as f: r: Optional[Dict[str, Optional[str]]] = None if not tree: f.write("") return example, None records = tree.as_record(code_only=True, show_raw=True) assert records, "TypeGuard" r = dict([("_hash", _hash), *list(records.items())]) print( "# YML test files are auto-generated from SQL files and should not be " "edited by", '# hand. To help enforce this, the "hash" field in the file must match ' "a hash", "# computed by SQLFluff when running the tests. Please run", "# `python test/generate_parse_fixture_yml.py` to generate them after " "adding or", "# altering SQL files.", file=f, sep="\n", ) yaml.dump( data=r, stream=f, default_flow_style=False, sort_keys=False, allow_unicode=True, ) return example, None
Parse example SQL file, write parse tree to YAML file.
generate_one_parse_fixture
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def gather_file_list( dialect: Optional[str] = None, glob_match_pattern: Optional[str] = None, new_only: bool = False, ) -> List[ParseExample]: """Gather the list of files to generate fixtures for. Apply filters as required.""" parse_success_examples, _ = get_parse_fixtures() if new_only: parse_success_examples = [ example for example in parse_success_examples if _is_matching_new_criteria(example) ] if dialect: dialect = dialect.lower() parse_success_examples = [ example for example in parse_success_examples if example[0] == dialect ] if len(parse_success_examples) == 0: raise ValueError(f'Unknown Dialect "{dialect}"') if not glob_match_pattern: return parse_success_examples regex = re.compile(fnmatch.translate(glob_match_pattern)) return [ example for example in parse_success_examples if regex.match(example[1]) is not None ]
Gather the list of files to generate fixtures for. Apply filters as required.
gather_file_list
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def generate_parse_fixtures( filter: Optional[str], dialect: Optional[str], new_only: bool ): """Generate fixture or a subset based on dialect or filename glob match.""" filter_str = filter or "*" dialect_str = dialect or "all" print("Match Pattern Received:") print(f"\tfilter={filter_str} dialect={dialect_str} new-only={new_only}") parse_success_examples = gather_file_list(dialect, filter, new_only) print(f"Found {len(parse_success_examples)} file(s) to generate") t0 = time.monotonic() try: distribute_work(parse_success_examples, generate_one_parse_fixture) except SQLParseError as err: # If one fails, exit early and cleanly. print(f"PARSING FAILED: {err}") sys.exit(1) dt = time.monotonic() - t0 print(f"Built {len(parse_success_examples)} fixtures in {dt:.2f}s.")
Generate fixture or a subset based on dialect or filename glob match.
generate_parse_fixtures
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def main(): """Find all example SQL files, parse and create YAML files.""" generate_parse_fixtures()
Find all example SQL files, parse and create YAML files.
main
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def test_diff_quality_plugin(sql_paths, expected_violations_lines, monkeypatch): """Test the plugin at least finds errors on the expected lines.""" def execute(command, exit_codes): printable_command_parts = [ c.decode(sys.getfilesystemencoding()) if isinstance(c, bytes) else c for c in command ] result = invoke_assert_code( ret_code=1 if expected_violations_lines else 0, args=[ lint, printable_command_parts[2:], ], ) return result.output, "" # Mock the execute function -- this is an attempt to prevent the CircleCI # coverage check from hanging. (We've seen issues in the past where using # subprocesses caused things to occasionally hang.) monkeypatch.setattr(diff_quality_plugin, "execute", execute) monkeypatch.chdir("test/fixtures/") violation_reporter = diff_quality_plugin.diff_cover_report_quality( options="--processes=1" ) assert len(sql_paths) in (0, 1) sql_paths = [str(Path(sql_path)) for sql_path in sql_paths] violations_dict = violation_reporter.violations_batch(sql_paths) assert isinstance(violations_dict, dict) if expected_violations_lines: assert len(violations_dict[sql_paths[0]]) > 0 violations_lines = {v.line for v in violations_dict[sql_paths[0]]} for expected_line in expected_violations_lines: assert expected_line in violations_lines else: assert ( len(violations_dict[sql_paths[0]]) == 0 if sql_paths else len(violations_dict) == 0 )
Test the plugin at least finds errors on the expected lines.
test_diff_quality_plugin
python
sqlfluff/sqlfluff
test/diff_quality_plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/diff_quality_plugin_test.py
MIT
def test_assert_rule_fail_in_sql_handle_parse_error(): """Util assert_rule_fail_in_sql should handle parse errors.""" with pytest.raises(Failed) as failed_test: assert_rule_fail_in_sql(code="L000", sql="select from") failed_test.match("Found the following parse errors in test case:")
Util assert_rule_fail_in_sql should handle parse errors.
test_assert_rule_fail_in_sql_handle_parse_error
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_assert_rule_fail_in_sql_should_fail_queries_that_unexpectedly_pass(): """Util assert_rule_fail_in_sql should fail if no failure.""" with pytest.raises(Failed) as failed_test: assert_rule_fail_in_sql(code="LT01", sql="select 1") failed_test.match("No LT01 failures found in query which should fail")
Util assert_rule_fail_in_sql should fail if no failure.
test_assert_rule_fail_in_sql_should_fail_queries_that_unexpectedly_pass
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_assert_rule_pass_in_sql_should_handle_parse_error(): """Util assert_rule_pass_in_sql should handle parse errors.""" with pytest.raises(Failed) as failed_test: assert_rule_pass_in_sql(code="LT01", sql="select from") failed_test.match("Found unparsable section:")
Util assert_rule_pass_in_sql should handle parse errors.
test_assert_rule_pass_in_sql_should_handle_parse_error
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_assert_rule_pass_in_sql_should_fail_when_there_are_violations(): """Util assert_rule_pass_in_sql should fail when there are violations.""" with pytest.raises(Failed) as failed_test: assert_rule_pass_in_sql(code="LT01", sql="select a , b from t") failed_test.match("Found LT01 failures in query which should pass")
Util assert_rule_pass_in_sql should fail when there are violations.
test_assert_rule_pass_in_sql_should_fail_when_there_are_violations
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_rules_test_case_skipped_when_test_case_skipped(): """Test functionality of the `RuleTestCase` skip attribute.""" rule_test_case = RuleTestCase(rule="CP01", skip="Skip this one for now") with pytest.raises(Skipped) as skipped_test: rule_test_case.evaluate() skipped_test.match("Skip this one for now")
Test functionality of the `RuleTestCase` skip attribute.
test_rules_test_case_skipped_when_test_case_skipped
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_rules_test_case_has_variable_introspection(test_verbosity_level): """Make sure the helper gives variable introspection information on failure.""" rule_test_case = RuleTestCase( rule="LT02", fail_str=""" select a, b from table """, # extra comma on purpose fix_str=""" select a, b, from table """, ) with pytest.raises(AssertionError) as skipped_test: rule_test_case.evaluate() if test_verbosity_level >= 2: # Enough to check that a query diff is displayed skipped_test.match("select")
Make sure the helper gives variable introspection information on failure.
test_rules_test_case_has_variable_introspection
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def assert_pickle_robust(err: SQLBaseError): """Test that the class remains the same through copying and pickling.""" # First try copying (and make sure they still compare equal) err_copy = copy.copy(err) assert err_copy == err # Then try picking (and make sure they also still compare equal) pickled = pickle.dumps(err) pickle_copy = pickle.loads(pickled) assert pickle_copy == err
Test that the class remains the same through copying and pickling.
assert_pickle_robust
python
sqlfluff/sqlfluff
test/core/errors_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/errors_test.py
MIT
def test__lex_error_pickle(ignore): """Test lexing error pickling.""" template = TemplatedFile.from_string("foobar") err = SQLLexError("Foo", pos=PositionMarker(slice(0, 6), slice(0, 6), template)) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err)
Test lexing error pickling.
test__lex_error_pickle
python
sqlfluff/sqlfluff
test/core/errors_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/errors_test.py
MIT
def test__parse_error_pickle(ignore): """Test parse error pickling.""" template = TemplatedFile.from_string("foobar") segment = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) err = SQLParseError("Foo", segment=segment) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err)
Test parse error pickling.
test__parse_error_pickle
python
sqlfluff/sqlfluff
test/core/errors_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/errors_test.py
MIT
def test__lint_error_pickle(ignore): """Test lint error pickling.""" template = TemplatedFile.from_string("foobar") segment = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) err = SQLLintError("Foo", segment=segment, rule=Rule_T078) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err)
Test lint error pickling.
test__lint_error_pickle
python
sqlfluff/sqlfluff
test/core/errors_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/errors_test.py
MIT
def test__plugin_manager_registers_example_plugin(): """Test that the example plugin is registered. This test also tests that warnings are raised on the import of plugins which have their imports in the wrong place (e.g. the example plugin). That means we need to make sure the plugin is definitely reimported at the start of this test, so we can see any warnings raised on imports. To do this we clear the plugin manager cache and also forcibly unload the example plugin modules if they are already loaded. This ensures that we can capture any warnings raised by importing the module. """ purge_plugin_manager() # We still to a try/except here, even though it's only run within # the context of a test because the module may or may not already # be imported depending on the order that the tests run in. try: del sys.modules["sqlfluff_plugin_example"] except KeyError: pass try: del sys.modules["sqlfluff_plugin_example.rules"] except KeyError: pass with fluff_log_catcher(logging.WARNING, "sqlfluff.rules") as caplog: plugin_manager = get_plugin_manager() # The plugin import order is non-deterministic. # Use sets in case the dbt plugin (or other plugins) are # already installed too. installed_plugins = set( plugin_module.__name__ for plugin_module in plugin_manager.get_plugins() ) print(f"Installed plugins: {installed_plugins}") assert installed_plugins.issuperset( { "sqlfluff_plugin_example", "sqlfluff.core.plugin.lib", } ) # At this stage we should also check that the example plugin # also raises a warning for it's import location. assert ( "Rule 'Rule_Example_L001' has been imported before all plugins " "have been fully loaded" ) in caplog.text
Test that the example plugin is registered. This test also tests that warnings are raised on the import of plugins which have their imports in the wrong place (e.g. the example plugin). That means we need to make sure the plugin is definitely reimported at the start of this test, so we can see any warnings raised on imports. To do this we clear the plugin manager cache and also forcibly unload the example plugin modules if they are already loaded. This ensures that we can capture any warnings raised by importing the module.
test__plugin_manager_registers_example_plugin
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__plugin_example_rules_returned(rule_ref): """Test that the example rules from the plugin are returned.""" plugin_manager = get_plugin_manager() # The plugin import order is non-deterministic rule_names = [ rule.__name__ for rules in plugin_manager.hook.get_rules() for rule in rules ] print(f"Rule names: {rule_names}") assert rule_ref in rule_names
Test that the example rules from the plugin are returned.
test__plugin_example_rules_returned
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__plugin_default_config_read(rule_ref, config_option): """Test that the example plugin default config is merged into FluffConfig.""" fluff_config = FluffConfig(overrides={"dialect": "ansi"}) # The plugin import order is non-deterministic print(f"Detected config sections: {fluff_config._configs['rules'].keys()}") # Check V1 assert config_option in fluff_config._configs["rules"][rule_ref]
Test that the example plugin default config is merged into FluffConfig.
test__plugin_default_config_read
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def load(self): """Raise an exception on load.""" raise ValueError("TEST ERROR")
Raise an exception on load.
load
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__plugin_handle_bad_load(): """Test that we can safely survive a plugin which fails to load.""" # Mock fake plugin ep = MockEntryPoint("test_name", "test_value", "sqlfluff") plugin_manager = get_plugin_manager() with fluff_log_catcher(logging.WARNING, "sqlfluff.plugin") as caplog: _load_plugin(plugin_manager, ep, "plugin_name", "v1.2.3") # Assert that there was a warning assert "ERROR: Failed to load SQLFluff plugin" in caplog.text assert "plugin_name" in caplog.text assert "TEST ERROR" in caplog.text
Test that we can safely survive a plugin which fails to load.
test__plugin_handle_bad_load
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__plugin_get_version(): """Test the plugin method of getting the version gets the right version.""" assert _get_sqlfluff_version() == pkg_version
Test the plugin method of getting the version gets the right version.
test__plugin_get_version
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__templater_python(): """Test the python templater.""" t = PythonTemplater(override_context=dict(blah="foo")) instr = PYTHON_STRING outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == "SELECT * FROM foo"
Test the python templater.
test__templater_python
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_error(): """Test error handling in the python templater.""" t = PythonTemplater(override_context=dict(noblah="foo")) instr = PYTHON_STRING with pytest.raises(SQLTemplaterError): t.process(in_str=instr, fname="test")
Test error handling in the python templater.
test__templater_python_error
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_intermediate__trim( int_slice, templated_str, head_test, tail_test, int_test ): """Test trimming IntermediateFileSlice.""" h, i, t = int_slice.trim_ends(templated_str=templated_str) assert h == head_test assert t == tail_test assert i == int_test
Test trimming IntermediateFileSlice.
test__templater_python_intermediate__trim
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_substring_occurrences(mainstr, substrings, positions): """Test _substring_occurrences.""" occurrences = PythonTemplater._substring_occurrences(mainstr, substrings) assert isinstance(occurrences, dict) pos_test = [occurrences[substring] for substring in substrings] assert pos_test == positions
Test _substring_occurrences.
test__templater_python_substring_occurrences
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_sorted_occurrence_tuples(test, result): """Test _sorted_occurrence_tuples.""" assert PythonTemplater._sorted_occurrence_tuples(test) == result
Test _sorted_occurrence_tuples.
test__templater_python_sorted_occurrence_tuples
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_slice_template(test, result): """Test _slice_template.""" resp = list(PythonTemplater._slice_template(test)) # check contiguous assert "".join(elem.raw for elem in resp) == test # check indices idx = 0 for raw_file_slice in resp: assert raw_file_slice.source_idx == idx idx += len(raw_file_slice.raw) # Check total result assert resp == result
Test _slice_template.
test__templater_python_slice_template
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_length, result, ): """Test _split_invariants.""" resp = list( PythonTemplater._split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_length, ) ) # check result assert resp == result
Test _split_invariants.
test__templater_python_split_invariants
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_split_uniques_coalesce_rest( split_file, raw_occurrences, templated_occurrences, templated_str, result, caplog ): """Test _split_uniques_coalesce_rest.""" with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): resp = list( PythonTemplater._split_uniques_coalesce_rest( split_file, raw_occurrences, templated_occurrences, templated_str, ) ) # Check contiguous prev_slice = None for elem in result: if prev_slice: assert elem[1].start == prev_slice[0].stop assert elem[2].start == prev_slice[1].stop prev_slice = (elem[1], elem[2]) # check result assert resp == result
Test _split_uniques_coalesce_rest.
test__templater_python_split_uniques_coalesce_rest
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_slice_file(raw_file, templated_file, unwrap_wrapped, result): """Test slice_file.""" _, resp, _ = PythonTemplater().slice_file( raw_file, # For the render_func we just use a function which just returns the # templated file from the test case. (lambda x: templated_file), config=FluffConfig( configs={"templater": {"unwrap_wrapped_queries": unwrap_wrapped}}, overrides={"dialect": "ansi"}, ), ) # Check contiguous prev_slice = None for templated_slice in resp: if prev_slice: assert templated_slice.source_slice.start == prev_slice[0].stop assert templated_slice.templated_slice.start == prev_slice[1].stop prev_slice = (templated_slice.source_slice, templated_slice.templated_slice) # check result assert resp == result
Test slice_file.
test__templater_python_slice_file
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_large_file_check(): """Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters. """ # First check we can process the file normally without config. PythonTemplater().process(in_str="SELECT 1", fname="<string>") # Then check we raise a skip exception when config is set low. with pytest.raises(SQLFluffSkipFile) as excinfo: PythonTemplater().process( in_str="SELECT 1", fname="<string>", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 2}, ), ) assert "Length of file" in str(excinfo.value)
Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters.
test__templater_python_large_file_check
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_dot_notation_variables(raw_str, result): """Test template variables that contain a dot character (`.`).""" context = { "foo": "bar", "num": 123, "sqlfluff": { "foo.bar": "foobar", "self.number": 42, "obj.schema": "my_schema", "obj.table": "my_table", }, } t = PythonTemplater(override_context=context) instr = raw_str outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == result
Test template variables that contain a dot character (`.`).
test__templater_python_dot_notation_variables
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_dot_notation_fail(context, error_string): """Test failures with template variables that contain a dot character (`.`).""" t = PythonTemplater(override_context=context) with pytest.raises(SQLTemplaterError) as excinfo: outstr, _ = t.process(in_str="SELECT * FROM {foo.bar}", fname="test") assert error_string in excinfo.value.desc()
Test failures with template variables that contain a dot character (`.`).
test__templater_python_dot_notation_fail
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__indices_of_newlines(raw_str, positions): """Test iter_indices_of_newlines.""" assert list(iter_indices_of_newlines(raw_str)) == positions
Test iter_indices_of_newlines.
test__indices_of_newlines
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templater_raw(): """Test the raw templater.""" t = RawTemplater() instr = "SELECT * FROM {{blah}}" outstr, _ = t.process(in_str=instr, fname="test") assert instr == str(outstr)
Test the raw templater.
test__templater_raw
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templated_file_get_line_pos_of_char_pos( tf_kwargs, in_charpos, out_line_no, out_line_pos, ): """Test TemplatedFile.get_line_pos_of_char_pos.""" file = TemplatedFile(**tf_kwargs) res_line_no, res_line_pos = file.get_line_pos_of_char_pos(in_charpos) assert res_line_no == out_line_no assert res_line_pos == out_line_pos
Test TemplatedFile.get_line_pos_of_char_pos.
test__templated_file_get_line_pos_of_char_pos
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templated_file_find_slice_indices_of_templated_pos( templated_position, inclusive, tf_kwargs, sliced_idx_start, sliced_idx_stop, ): """Test TemplatedFile._find_slice_indices_of_templated_pos.""" file = TemplatedFile(**tf_kwargs) res_start, res_stop = file._find_slice_indices_of_templated_pos( templated_position, inclusive=inclusive ) assert res_start == sliced_idx_start assert res_stop == sliced_idx_stop
Test TemplatedFile._find_slice_indices_of_templated_pos.
test__templated_file_find_slice_indices_of_templated_pos
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templated_file_templated_slice_to_source_slice( in_slice, out_slice, is_literal, tf_kwargs ): """Test TemplatedFile.templated_slice_to_source_slice.""" file = TemplatedFile(**tf_kwargs) source_slice = file.templated_slice_to_source_slice(in_slice) literal_test = file.is_source_slice_literal(source_slice) assert (is_literal, source_slice) == (literal_test, out_slice)
Test TemplatedFile.templated_slice_to_source_slice.
test__templated_file_templated_slice_to_source_slice
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templated_file_source_only_slices(file, expected_result): """Test TemplatedFile.source_only_slices.""" assert file.source_only_slices() == expected_result
Test TemplatedFile.source_only_slices.
test__templated_file_source_only_slices
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def get_parsed(path: str) -> BaseSegment: """Testing helper to parse paths.""" linter = Linter() # Get the first file matching the path string first_path = next(linter.parse_path(path)) # Delegate parse assertions to the `.tree` property return first_path.tree
Testing helper to parse paths.
get_parsed
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja(instr, expected_outstr): """Test jinja templating and the treatment of whitespace.""" t = JinjaTemplater(override_context=dict(blah="foo", condition="a < 10")) outstr, _ = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == expected_outstr
Test jinja templating and the treatment of whitespace.
test__templater_jinja
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_slices(case: RawTemplatedTestCase): """Test that Jinja templater slices raw and templated file correctly.""" t = JinjaTemplater() templated_file, _ = t.process( in_str=case.instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) assert templated_file is not None assert templated_file.source_str == case.instr assert templated_file.templated_str == case.templated_str # Build and check the list of source strings referenced by "sliced_file". actual_ts_source_list = [ case.instr[ts.source_slice] for ts in templated_file.sliced_file ] assert actual_ts_source_list == case.expected_templated_sliced__source_list # Build and check the list of templated strings referenced by "sliced_file". actual_ts_templated_list = [ templated_file.templated_str[ts.templated_slice] for ts in templated_file.sliced_file ] assert actual_ts_templated_list == case.expected_templated_sliced__templated_list # Build and check the list of source strings referenced by "raw_sliced". previous_rs = None actual_rs_source_list: List[RawFileSlice] = [] for rs in templated_file.raw_sliced + [None]: # type: ignore if previous_rs: if rs: actual_source = case.instr[previous_rs.source_idx : rs.source_idx] else: actual_source = case.instr[previous_rs.source_idx :] actual_rs_source_list.append(actual_source) previous_rs = rs assert actual_rs_source_list == case.expected_raw_sliced__source_list
Test that Jinja templater slices raw and templated file correctly.
test__templater_jinja_slices
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test_templater_set_block_handling(): """Test handling of literals in {% set %} blocks. Specifically, verify they are not modified in the alternate template. """ def run_query(sql): # Prior to the bug fix, this assertion failed. This was bad because, # inside JinjaTracer, dbt templates similar to the one in this test # would call the database with funky SQL (including weird strings it # uses internally like: 00000000000000000000000000000002. assert sql == "\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n" return sql t = JinjaTemplater(override_context=dict(run_query=run_query)) instr = """{% set my_query1 %} select 1 from foobarfoobarfoobarfoobar_{{ "dev" }} {% endset %} {% set my_query2 %} {{ my_query1 }} {% endset %} {{ run_query(my_query2) }} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n\n\n\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n\n" assert len(vs) == 0
Test handling of literals in {% set %} blocks. Specifically, verify they are not modified in the alternate template.
test_templater_set_block_handling
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_variable(): """Test missing variable error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah="foo")) instr = JINJA_STRING outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "SELECT * FROM f, o, o WHERE \n\n" # Check we have violations. assert len(vs) > 0 # Check one of them is a templating error on line 1 assert any(v.rule_code() == "TMP" and v.line_no == 1 for v in vs)
Test missing variable error handling in the jinja templater.
test__templater_jinja_error_variable
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_dynamic_variable_no_violations(): """Test no templater violation for variable defined within template.""" t = JinjaTemplater(override_context=dict(blah="foo")) instr = """{% if True %} {% set some_var %}1{% endset %} SELECT {{some_var}} {% endif %} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n \n SELECT 1\n\n" # Check we have no violations. assert len(vs) == 0
Test no templater violation for variable defined within template.
test__templater_jinja_dynamic_variable_no_violations
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_syntax(): """Test syntax problems in the jinja templater.""" t = JinjaTemplater() instr = "SELECT {{foo} FROM jinja_error\n" with pytest.raises(SQLTemplaterError) as excinfo: t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) templater_exception = excinfo.value assert templater_exception.rule_code() == "TMP" assert templater_exception.line_no == 1 assert "Failed to parse Jinja syntax" in str(templater_exception)
Test syntax problems in the jinja templater.
test__templater_jinja_error_syntax
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_catastrophic(): """Test error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah=7)) instr = JINJA_STRING with pytest.raises(SQLTemplaterError) as excinfo: t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) templater_exception = excinfo.value assert templater_exception.rule_code() == "TMP" assert templater_exception.line_no == 1 assert "Unrecoverable failure in Jinja templating" in str(templater_exception)
Test error handling in the jinja templater.
test__templater_jinja_error_catastrophic
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_macro_path_does_not_exist(): """Tests that an error is raised if macro path doesn't exist.""" with pytest.raises(ValueError) as e: JinjaTemplater().construct_render_func( config=FluffConfig.from_path( "test/fixtures/templater/jinja_macro_path_does_not_exist" ) ) assert str(e.value).startswith("Path does not exist")
Tests that an error is raised if macro path doesn't exist.
test__templater_jinja_error_macro_path_does_not_exist
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_macro_invalid(): """Tests that an error is raised if a macro is invalid.""" invalid_macro_config_string = ( "[sqlfluff]\n" "templater = jinja\n" "dialect = ansi\n" "[sqlfluff:templater:jinja:macros]\n" "a_macro_def = {% macro pkg.my_macro() %}pass{% endmacro %}\n" ) config = FluffConfig.from_string(invalid_macro_config_string) with pytest.raises(SQLFluffUserError) as e: JinjaTemplater().construct_render_func(config=config) error_string = str(e.value) assert error_string.startswith("Error loading user provided macro") assert "{% macro pkg.my_macro() %}pass{% endmacro %}" in error_string
Tests that an error is raised if a macro is invalid.
test__templater_jinja_error_macro_invalid
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_lint_empty(): """Check that parsing a file which renders to an empty string. No exception should be raised, and we should get a single templated element. """ lntr = Linter(dialect="ansi") parsed = lntr.parse_string(in_str='{{ "" }}') parsed_variant = parsed.parsed_variants[0] assert parsed_variant.templated_file.source_str == '{{ "" }}' assert parsed_variant.templated_file.templated_str == "" # Get the types of the segments print(f"Segments: {parsed_variant.tree.raw_segments}") seg_types = [seg.get_type() for seg in parsed_variant.tree.raw_segments] assert seg_types == ["placeholder", "end_of_file"]
Check that parsing a file which renders to an empty string. No exception should be raised, and we should get a single templated element.
test__templater_jinja_lint_empty
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def assert_structure(yaml_loader, path, code_only=True, include_meta=False): """Check that a parsed sql file matches the yaml file with the same name.""" parsed = get_parsed(path + ".sql") # Whitespace is important here to test how that's treated tpl = parsed.to_tuple(code_only=code_only, show_raw=True, include_meta=include_meta) # Check nothing unparsable if "unparsable" in parsed.type_set(): print(parsed.stringify()) raise ValueError("Input file is unparsable.") _, expected = yaml_loader(path + ".yml") assert tpl == expected
Check that a parsed sql file matches the yaml file with the same name.
assert_structure
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_full(subpath, code_only, include_meta, yaml_loader, caplog): """Check structure can be parsed from jinja templated files.""" # Log the templater and lexer throughout this test caplog.set_level(logging.DEBUG, logger="sqlfluff.templater") caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") assert_structure( yaml_loader, "test/fixtures/templater/" + subpath, code_only=code_only, include_meta=include_meta, )
Check structure can be parsed from jinja templated files.
test__templater_full
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_block_matching(caplog): """Test the block UUID matching works with a complicated case.""" caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") path = "test/fixtures/templater/jinja_l_metas/002.sql" # Parse the file. parsed = get_parsed(path) # We only care about the template elements template_segments = [ seg for seg in parsed.raw_segments if seg.is_type("template_loop") or ( seg.is_type("placeholder") and seg.block_type in ("block_start", "block_end", "block_mid") ) ] # Group them together by block UUID assert all( seg.block_uuid for seg in template_segments ), "All templated segments should have a block uuid!" grouped = defaultdict(list) for seg in template_segments: grouped[seg.block_uuid].append(seg.pos_marker.working_loc) print(grouped) # Now the matching block IDs should be found at the following positions. # NOTE: These are working locations in the rendered file. groups = { "for actions clause 1": [(6, 5), (9, 5), (12, 5), (15, 5)], "for actions clause 2": [(17, 5), (21, 5), (29, 5), (37, 5)], # NOTE: all the if loop clauses are grouped together. "if loop.first": [ (18, 9), (20, 9), (20, 9), (22, 9), (22, 9), (28, 9), (30, 9), (30, 9), (36, 9), ], } # Check all are accounted for: for clause in groups.keys(): for block_uuid, locations in grouped.items(): if groups[clause] == locations: print(f"Found {clause}, locations with UUID: {block_uuid}") break else: raise ValueError(f"Couldn't find appropriate grouping of blocks: {clause}")
Test the block UUID matching works with a complicated case.
test__templater_jinja_block_matching
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_slice_template(test, result, analyzer_class): """Test _slice_template.""" templater = JinjaTemplater() env, _, render_func = templater.construct_render_func() analyzer = analyzer_class(test, env) analyzer.analyze(render_func=render_func) resp = analyzer.raw_sliced # check contiguous (unless there's a comment in it) if "{#" not in test: assert "".join(elem.raw for elem in resp) == test # check indices idx = 0 for raw_slice in resp: assert raw_slice.source_idx == idx idx += len(raw_slice.raw) # Check total result assert resp == [RawFileSlice(*args) for args in result]
Test _slice_template.
test__templater_jinja_slice_template
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def parse(self, parser: Parser) -> Union[Node, List[Node]]: """Parse the up/down blocks.""" # {% up 'migration name' %} next(parser.stream) # skip the "up" token parser.parse_expression() # skip the name of this migration up_body = parser.parse_statements(("name:down",)) # {% down %} next(parser.stream) # skip the "down" token down_body = parser.parse_statements(("name:end",)) # {% end %} next(parser.stream) # This is just a test, so output the blocks verbatim one after the other: return [nodes.Scope(up_body), nodes.Scope(down_body)]
Parse the up/down blocks.
parse
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_slice_file( raw_file, override_context, result, templater_class, caplog ): """Test slice_file.""" templater = templater_class(override_context=override_context) _, _, render_func = templater.construct_render_func( config=FluffConfig.from_path( "test/fixtures/templater/jinja_slice_template_macros" ) ) with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): raw_sliced, sliced_file, templated_str = templater.slice_file( raw_file, render_func=render_func ) # Create a TemplatedFile from the results. This runs some useful sanity # checks. _ = TemplatedFile(raw_file, "<<DUMMY>>", templated_str, sliced_file, raw_sliced) # Check contiguous on the TEMPLATED VERSION print(sliced_file) prev_slice = None for elem in sliced_file: print(elem) if prev_slice: assert elem[2].start == prev_slice.stop prev_slice = elem[2] # Check that all literal segments have a raw slice for elem in sliced_file: if elem[0] == "literal": assert elem[1] is not None # check result actual = [ ( templated_file_slice.slice_type, templated_file_slice.source_slice, templated_file_slice.templated_slice, ) for templated_file_slice in sliced_file ] assert actual == result
Test slice_file.
test__templater_jinja_slice_file
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_large_file_check(): """Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters. """ # First check we can process the file normally without specific config. # i.e. check the defaults work and the default is high. JinjaTemplater().process( in_str="SELECT 1", fname="<string>", config=FluffConfig(overrides={"dialect": "ansi"}), ) # Second check setting the value low disables the check JinjaTemplater().process( in_str="SELECT 1", fname="<string>", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 0} ), ) # Finally check we raise a skip exception when config is set low. with pytest.raises(SQLFluffSkipFile) as excinfo: JinjaTemplater().process( in_str="SELECT 1", fname="<string>", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 2}, ), ) assert "Length of file" in str(excinfo.value)
Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters.
test__templater_jinja_large_file_check
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test_jinja_undefined_callable(in_str, ignore, expected_violation): """Test undefined callable returns TemplatedFile and sensible error.""" templater = JinjaTemplater() templated_file, violations = templater.process( in_str=in_str, fname="test.sql", config=FluffConfig(overrides={"dialect": "ansi", "ignore": ignore}), ) # This was previously failing to process, due to UndefinedRecorder not # supporting __call__(), also Jinja thinking it was not *safe* to call. assert templated_file is not None if expected_violation: assert len(violations) == 1 isinstance(violations[0], type(expected_violation)) assert str(violations[0]) == str(expected_violation) else: assert len(violations) == 0
Test undefined callable returns TemplatedFile and sensible error.
test_jinja_undefined_callable
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test_dummy_undefined_fail_with_undefined_error(): """Tests that a recursion error bug no longer occurs.""" ud = DummyUndefined("name") with pytest.raises(UndefinedError): # This was previously causing a recursion error. ud._fail_with_undefined_error()
Tests that a recursion error bug no longer occurs.
test_dummy_undefined_fail_with_undefined_error
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test_undefined_magic_methods(): """Test all the magic methods defined on DummyUndefined.""" ud = DummyUndefined("name") # _self_impl assert ud + ud is ud assert ud - ud is ud assert ud / ud is ud assert ud // ud is ud assert ud % ud is ud assert ud**ud is ud assert +ud is ud assert -ud is ud assert ud << ud is ud assert ud[ud] is ud assert ~ud is ud assert ud(ud) is ud # _bool_impl assert ud and ud assert ud or ud assert ud ^ ud assert bool(ud) assert ud < ud assert ud <= ud assert ud == ud assert ud != ud assert ud >= ud assert ud > ud assert ud + ud is ud
Test all the magic methods defined on DummyUndefined.
test_undefined_magic_methods
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_lint_unreached_code(sql_path: str, expected_renderings): """Test that Jinja templater slices raw and templated file correctly.""" test_dir = Path("test/fixtures/templater/jinja_lint_unreached_code") t = JinjaTemplater() renderings = [] raw_slicings = [] final_source_slices = [] for templated_file, _ in t.process_with_variants( in_str=(test_dir / sql_path).read_text(), fname=str(sql_path), config=FluffConfig.from_path(str(test_dir)), ): renderings.append(templated_file.templated_str) raw_slicings.append(templated_file.raw_sliced) # Capture the final slice for all of them. final_source_slices.append(templated_file.sliced_file[-1].source_slice) assert renderings == expected_renderings # Compare all of the additional raw slicings to make sure they're the # same as the root. root_slicing = raw_slicings[0] for additional_slicing in raw_slicings[1:]: assert additional_slicing == root_slicing # Check that the final source slices also line up in the templated files. # NOTE: Clearly the `templated_slice` values _won't_ be the same. # We're doing the _final_ slice, because it's very likely to be the same # _type_ and if it's in the right place, we can assume that all of the # others probably are. root_final_slice = final_source_slices[0] for additional_final_slice in final_source_slices[1:]: assert additional_final_slice == root_final_slice
Test that Jinja templater slices raw and templated file correctly.
test__templater_lint_unreached_code
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_raw(): """Test the templaters when nothing has to be replaced.""" t = PlaceholderTemplater(override_context=dict(param_style="colon")) instr = "SELECT * FROM {{blah}} WHERE %(gnepr)s OR e~':'" outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == instr
Test the templaters when nothing has to be replaced.
test__templater_raw
python
sqlfluff/sqlfluff
test/core/templaters/placeholder_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/placeholder_test.py
MIT
def test__templater_param_style(instr, expected_outstr, param_style, values): """Test different param_style templating.""" t = PlaceholderTemplater(override_context={**values, "param_style": param_style}) outstr, _ = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == expected_outstr
Test different param_style templating.
test__templater_param_style
python
sqlfluff/sqlfluff
test/core/templaters/placeholder_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/placeholder_test.py
MIT
def test__templater_custom_regex(): """Test custom regex templating.""" t = PlaceholderTemplater( override_context=dict(param_regex="__(?P<param_name>[\\w_]+)__", my_name="john") ) outstr, _ = t.process( in_str="SELECT bla FROM blob WHERE id = __my_name__", fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) assert str(outstr) == "SELECT bla FROM blob WHERE id = john"
Test custom regex templating.
test__templater_custom_regex
python
sqlfluff/sqlfluff
test/core/templaters/placeholder_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/placeholder_test.py
MIT
def test__templater_setup(): """Test the exception raised when config is incomplete or ambiguous.""" t = PlaceholderTemplater(override_context=dict(name="'john'")) with pytest.raises( ValueError, match=( "No param_regex nor param_style was provided to the placeholder templater" ), ): t.process(in_str="SELECT 2+2", fname="test") t = PlaceholderTemplater( override_context=dict(param_style="bla", param_regex="bli") ) with pytest.raises( ValueError, match=r"Either param_style or param_regex must be provided, not both", ): t.process(in_str="SELECT 2+2", fname="test")
Test the exception raised when config is incomplete or ambiguous.
test__templater_setup
python
sqlfluff/sqlfluff
test/core/templaters/placeholder_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/placeholder_test.py
MIT
def test__templater_styles(): """Test the exception raised when parameter style is unknown.""" t = PlaceholderTemplater(override_context=dict(param_style="pperccent")) with pytest.raises(ValueError, match=r"Unknown param_style"): t.process(in_str="SELECT 2+2", fname="test")
Test the exception raised when parameter style is unknown.
test__templater_styles
python
sqlfluff/sqlfluff
test/core/templaters/placeholder_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/placeholder_test.py
MIT
def test_function_emulator(): """Make sure the function wrapper works as expected.""" def func(x): return "foo" + x wrapped = FunctionWrapper("test_name", func) assert str(wrapped("bar")) == "foobar" with pytest.raises(SQLTemplaterError): str(wrapped)
Make sure the function wrapper works as expected.
test_function_emulator
python
sqlfluff/sqlfluff
test/core/templaters/builtins_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/builtins_test.py
MIT
def test_relation_emulator_magic_methods(): """Test all the magic methods defined on RelationEmulator.""" # tests for 'this' t = DBT_BUILTINS["this"] assert str(t) == "this_model" assert t.something is t assert str(t.database) == "this_database" assert str(t.schema) == "this_schema" assert str(t.name) == "this_model" assert str(t.identifier) == "this_model" assert str(t.type) == "this_model" assert str(t.something_new) == "this_model" assert t.is_table is True assert t.is_view is True assert t.is_materialized_view is True assert t.is_cte is True assert t.is_dynamic_table is True assert t.is_iceberg_format is True assert t.is_something_new is True assert t.something() is t assert t.something().something() is t assert t.something().something is t assert str(t.include()) == "this_model" assert str(t.include(database=False)) == "this_model" assert str(t.some_new_method()) == "this_model" assert str(t.something().something) == "this_model" # tests for 'ref' r = DBT_BUILTINS["ref"]("ref_model") assert str(r) == "ref_model" assert r.something is r assert str(r.database) == "this_database" assert str(r.schema) == "this_schema" assert str(r.name) == "ref_model" assert str(r.identifier) == "ref_model" assert str(r.type) == "ref_model" assert str(r.something_new) == "ref_model" assert r.is_table is True assert r.is_view is True assert r.is_materialized_view is True assert r.is_cte is True assert r.is_dynamic_table is True assert r.is_iceberg_format is True assert r.is_something_new is True assert r.something() is r assert r.something().something() is r assert r.something().something is r assert str(r.include()) == "ref_model" assert str(r.include(database=False)) == "ref_model" assert str(r.some_new_method()) == "ref_model" assert str(r.something().something) == "ref_model" # tests for versioned 'ref' r = DBT_BUILTINS["ref"]("ref_model", version=2) assert str(r) == "ref_model" assert r.something is r assert str(r.database) == "this_database" assert str(r.schema) == "this_schema" assert str(r.name) == "ref_model" assert str(r.identifier) == "ref_model" assert str(r.type) == "ref_model" assert str(r.something_new) == "ref_model" assert r.is_table is True assert r.is_view is True assert r.is_materialized_view is True assert r.is_cte is True assert r.is_dynamic_table is True assert r.is_iceberg_format is True assert r.is_something_new is True assert r.something() is r assert r.something().something() is r assert r.something().something is r assert str(r.include()) == "ref_model" assert str(r.include(database=False)) == "ref_model" assert str(r.some_new_method()) == "ref_model" assert str(r.something().something) == "ref_model" # tests for 'ref' from project/package r = DBT_BUILTINS["ref"]("package", "ref_model") assert str(r) == "ref_model" assert r.something is r assert str(r.database) == "this_database" assert str(r.schema) == "this_schema" assert str(r.name) == "ref_model" assert str(r.identifier) == "ref_model" assert str(r.type) == "ref_model" assert str(r.something_new) == "ref_model" assert r.is_table is True assert r.is_view is True assert r.is_materialized_view is True assert r.is_cte is True assert r.is_dynamic_table is True assert r.is_iceberg_format is True assert r.is_something_new is True assert r.something() is r assert r.something().something() is r assert r.something().something is r assert str(r.include()) == "ref_model" assert str(r.include(database=False)) == "ref_model" assert str(r.some_new_method()) == "ref_model" assert str(r.something().something) == "ref_model" # tests for versioned 'ref' from project/package r = DBT_BUILTINS["ref"]("package", "ref_model", version=2) assert str(r) == "ref_model" assert r.something is r assert str(r.database) == "this_database" assert str(r.schema) == "this_schema" assert str(r.name) == "ref_model" assert str(r.identifier) == "ref_model" assert str(r.type) == "ref_model" assert str(r.something_new) == "ref_model" assert r.is_table is True assert r.is_view is True assert r.is_materialized_view is True assert r.is_cte is True assert r.is_dynamic_table is True assert r.is_iceberg_format is True assert r.is_something_new is True assert r.something() is r assert r.something().something() is r assert r.something().something is r assert str(r.include()) == "ref_model" assert str(r.include(database=False)) == "ref_model" assert str(r.some_new_method()) == "ref_model" assert str(r.something().something) == "ref_model" # tests for 'source' s = DBT_BUILTINS["source"]("sourcename", "tablename") assert str(s) == "sourcename_tablename" assert s.something is s assert str(s.database) == "this_database" assert str(s.schema) == "this_schema" assert str(s.name) == "sourcename_tablename" assert str(s.identifier) == "sourcename_tablename" assert str(s.type) == "sourcename_tablename" assert str(s.something_new) == "sourcename_tablename" assert s.is_table is True assert s.is_view is True assert s.is_materialized_view is True assert s.is_cte is True assert s.is_dynamic_table is True assert s.is_iceberg_format is True assert s.is_something_new is True assert s.something() is s assert s.something().something() is s assert s.something().something is s assert str(s.include()) == "sourcename_tablename" assert str(s.include(database=False)) == "sourcename_tablename" assert str(s.some_new_method()) == "sourcename_tablename" assert str(s.something().something) == "sourcename_tablename"
Test all the magic methods defined on RelationEmulator.
test_relation_emulator_magic_methods
python
sqlfluff/sqlfluff
test/core/templaters/builtins_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/builtins_test.py
MIT
def test_dialect(): """A stripped back test dialect for testing brackets.""" test_dialect = Dialect("test", root_segment_name="FileSegment") test_dialect.bracket_sets("bracket_pairs").update( [("round", "StartBracketSegment", "EndBracketSegment", True)] ) test_dialect.set_lexer_matchers( [ RegexLexer("whitespace", r"[^\S\r\n]+", WhitespaceSegment), RegexLexer( "code", r"[0-9a-zA-Z_]+", CodeSegment, segment_kwargs={"type": "code"} ), ] ) test_dialect.add( StartBracketSegment=StringParser("(", SymbolSegment, type="start_bracket"), EndBracketSegment=StringParser(")", SymbolSegment, type="end_bracket"), ) # Return the expanded copy. return test_dialect.expand()
A stripped back test dialect for testing brackets.
test_dialect
python
sqlfluff/sqlfluff
test/core/parser/match_algorithms_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/parser/match_algorithms_test.py
MIT
def make_result_tuple(result_slice, matcher_keywords, test_segments): """Make a comparison tuple for test matching.""" # No result slice means no match. if not result_slice: return () return tuple( ( KeywordSegment(elem.raw, pos_marker=elem.pos_marker) if elem.raw in matcher_keywords else elem ) for elem in test_segments[result_slice] )
Make a comparison tuple for test matching.
make_result_tuple
python
sqlfluff/sqlfluff
test/core/parser/match_algorithms_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/parser/match_algorithms_test.py
MIT
def test__parser__algorithms__next_match( matcher_keywords, result_slice, winning_matcher, test_segments, ): """Test the `next_match()` method.""" # Make the string parsers for testing. matchers = [StringParser(keyword, KeywordSegment) for keyword in matcher_keywords] # Fetch the matching keyword from above (because it will have the same position) if winning_matcher: winning_matcher = matchers[matcher_keywords.index(winning_matcher)] ctx = ParseContext(dialect=None) match, matcher = next_match( test_segments, 0, matchers, ctx, ) # Check the right matcher was successful. if winning_matcher: assert matcher is winning_matcher else: # If no designated winning matcher, assert that it wasn't successful. assert matcher is None assert not match assert match.matched_slice == result_slice
Test the `next_match()` method.
test__parser__algorithms__next_match
python
sqlfluff/sqlfluff
test/core/parser/match_algorithms_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/parser/match_algorithms_test.py
MIT