code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
182
url
stringlengths
46
251
license
stringclasses
4 values
def rebreak( self, rebreak_type: Literal["lines", "keywords"] = "lines" ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` corrected line breaks. This intentionally **does not handle indentation**, as the existing indents are assumed to be correct. .. note:: Currently this only *moves* existing segments around line breaks (e.g. for operators and commas), but eventually this method will also handle line length considerations too. """ if self.lint_results: raise NotImplementedError( # pragma: no cover "rebreak cannot currently handle pre-existing embodied fixes." ) # Delegate to the rebreak algorithm if rebreak_type == "lines": elem_buff, lint_results = rebreak_sequence(self.elements, self.root_segment) elif rebreak_type == "keywords": elem_buff, lint_results = rebreak_keywords_sequence( self.elements, self.root_segment ) else: # pragma: no cover raise NotImplementedError( f"Rebreak type of `{rebreak_type}` is not supported." ) return ReflowSequence( elements=elem_buff, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=lint_results, )
Returns a new :obj:`ReflowSequence` corrected line breaks. This intentionally **does not handle indentation**, as the existing indents are assumed to be correct. .. note:: Currently this only *moves* existing segments around line breaks (e.g. for operators and commas), but eventually this method will also handle line length considerations too.
rebreak
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def reindent(self) -> "ReflowSequence": """Reindent lines within a sequence.""" if self.lint_results: raise NotImplementedError( # pragma: no cover "rebreak cannot currently handle pre-existing embodied fixes." ) single_indent = construct_single_indent( indent_unit=self.reflow_config.indent_unit, tab_space_size=self.reflow_config.tab_space_size, ) reflow_logger.info("# Evaluating indents.") elements, indent_results = lint_indent_points( self.elements, single_indent=single_indent, skip_indentation_in=self.reflow_config.skip_indentation_in, allow_implicit_indents=self.reflow_config.allow_implicit_indents, ignore_comment_lines=self.reflow_config.ignore_comment_lines, ) return ReflowSequence( elements=elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=indent_results, )
Reindent lines within a sequence.
reindent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def break_long_lines(self) -> "ReflowSequence": """Rebreak any remaining long lines in a sequence. This assumes that reindent() has already been applied. """ if self.lint_results: raise NotImplementedError( # pragma: no cover "break_long_lines cannot currently handle pre-existing " "embodied fixes." ) single_indent = construct_single_indent( indent_unit=self.reflow_config.indent_unit, tab_space_size=self.reflow_config.tab_space_size, ) reflow_logger.info("# Evaluating line lengths.") elements, length_results = lint_line_length( self.elements, self.root_segment, single_indent=single_indent, line_length_limit=self.reflow_config.max_line_length, allow_implicit_indents=self.reflow_config.allow_implicit_indents, trailing_comments=self.reflow_config.trailing_comments, ) return ReflowSequence( elements=elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=length_results, )
Rebreak any remaining long lines in a sequence. This assumes that reindent() has already been applied.
break_long_lines
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def get_consumed_whitespace(segment: Optional[RawSegment]) -> Optional[str]: """A helper function to extract possible consumed whitespace. Args: segment (:obj:`RawSegment`, optional): A segment to test for suitability and extract the source representation of if appropriate. If passed None, then returns None. Returns: Returns the :code:`source_str` if the segment is of type :code:`placeholder` and has a :code:`block_type` of :code:`literal`. Otherwise None. """ if not segment or not segment.is_type("placeholder"): return None placeholder = cast(TemplateSegment, segment) if placeholder.block_type != "literal": return None return placeholder.source_str
A helper function to extract possible consumed whitespace. Args: segment (:obj:`RawSegment`, optional): A segment to test for suitability and extract the source representation of if appropriate. If passed None, then returns None. Returns: Returns the :code:`source_str` if the segment is of type :code:`placeholder` and has a :code:`block_type` of :code:`literal`. Otherwise None.
get_consumed_whitespace
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def class_types(self) -> Set[str]: """Get the set of contained class types. Parallel to `BaseSegment.class_types` """ return self._class_types(self.segments)
Get the set of contained class types. Parallel to `BaseSegment.class_types`
class_types
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def pos_marker(self) -> Optional[PositionMarker]: """Get the first position marker of the element.""" for seg in self.segments: if seg.pos_marker: return seg.pos_marker return None
Get the first position marker of the element.
pos_marker
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def num_newlines(self) -> int: """Return the number of newlines in this element. These newlines are either newline segments or contained within consumed sections of whitespace. This counts both. """ return sum( bool("newline" in seg.class_types) + (get_consumed_whitespace(seg) or "").count("\n") for seg in self.segments )
Return the number of newlines in this element. These newlines are either newline segments or contained within consumed sections of whitespace. This counts both.
num_newlines
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def is_all_unrendered(self) -> bool: """Return whether this element is all unrendered. Returns True if contains only whitespace, indents, template loops or placeholders. Note: * ReflowBlocks will contain the placeholders and loops * ReflowPoints will contain whitespace, indents and newlines. """ for seg in self.segments: if not seg.is_type( "whitespace", "placeholder", "newline", "indent", "template_loop" ): return False return True
Return whether this element is all unrendered. Returns True if contains only whitespace, indents, template loops or placeholders. Note: * ReflowBlocks will contain the placeholders and loops * ReflowPoints will contain whitespace, indents and newlines.
is_all_unrendered
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def from_config( cls: Type["ReflowBlock"], segments: Tuple[RawSegment, ...], config: ReflowConfig, depth_info: DepthInfo, ) -> "ReflowBlock": """Construct a ReflowBlock while extracting relevant configuration. This is the primary route to construct a ReflowBlock, as is allows all of the inference of the spacing and position configuration from the segments it contains and the appropriate config objects. """ block_config = config.get_block_config(cls._class_types(segments), depth_info) stack_spacing_configs = {} line_position_configs = {} keyword_line_position_configs = {} for hash, class_types in zip( depth_info.stack_hashes, depth_info.stack_class_types ): cfg = config.get_block_config(class_types) if cfg.spacing_within: stack_spacing_configs[hash] = cfg.spacing_within if cfg.line_position: line_position_configs[hash] = cfg.line_position if cfg.keyword_line_position: keyword_line_position_configs[hash] = cfg.keyword_line_position return cls( segments=segments, spacing_before=block_config.spacing_before, spacing_after=block_config.spacing_after, line_position=block_config.line_position, depth_info=depth_info, stack_spacing_configs=stack_spacing_configs, line_position_configs=line_position_configs, keyword_line_position=block_config.keyword_line_position, keyword_line_position_configs=keyword_line_position_configs, )
Construct a ReflowBlock while extracting relevant configuration. This is the primary route to construct a ReflowBlock, as is allows all of the inference of the spacing and position configuration from the segments it contains and the appropriate config objects.
from_config
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def _indent_description(indent: str) -> str: """Construct a human readable description of the indent. NOTE: We operate assuming that the "correct" indent is never a mix of tabs and spaces. That means if the provided indent *does* contain both that this description is likely a case where we are matching a pre-existing indent, and can assume that the *description* of that indent is non-critical. To handle that situation gracefully we just return "Mixed Indent". See: https://github.com/sqlfluff/sqlfluff/issues/4255 """ if indent == "": return "no indent" elif " " in indent and "\t" in indent: return "mixed indent" elif indent[0] == " ": assert all(c == " " for c in indent) return f"indent of {len(indent)} spaces" elif indent[0] == "\t": # pragma: no cover assert all(c == "\t" for c in indent) return f"indent of {len(indent)} tabs" else: # pragma: no cover raise NotImplementedError(f"Invalid indent construction: {indent!r}")
Construct a human readable description of the indent. NOTE: We operate assuming that the "correct" indent is never a mix of tabs and spaces. That means if the provided indent *does* contain both that this description is likely a case where we are matching a pre-existing indent, and can assume that the *description* of that indent is non-critical. To handle that situation gracefully we just return "Mixed Indent". See: https://github.com/sqlfluff/sqlfluff/issues/4255
_indent_description
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def from_combination( cls, first: Optional["IndentStats"], second: "IndentStats" ) -> "IndentStats": """Create IndentStats from two consecutive IndentStats. This is mostly used for combining the effects of indent and dedent tokens either side of a comment. NOTE: The *first* is considered optional, because if we're calling this function, we're assuming that there's always a second. """ # First check for the trivial case that we only have one. if not first: return second # Otherwise, combine the two into one. return cls( first.impulse + second.impulse, min(first.trough, first.impulse + second.trough), second.implicit_indents, )
Create IndentStats from two consecutive IndentStats. This is mostly used for combining the effects of indent and dedent tokens either side of a comment. NOTE: The *first* is considered optional, because if we're calling this function, we're assuming that there's always a second.
from_combination
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def __init__(self, segments: Tuple[RawSegment, ...]): """Override the init method to calculate indent stats.""" object.__setattr__(self, "segments", segments) object.__setattr__(self, "_stats", self._generate_indent_stats(segments))
Override the init method to calculate indent stats.
__init__
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def _get_indent_segment(self) -> Optional[RawSegment]: """Get the current indent segment (if there). NOTE: This only returns _untemplated_ indents. If templated newline or whitespace segments are found they are skipped. """ indent: Optional[RawSegment] = None for seg in reversed(self.segments): if seg.pos_marker and not seg.pos_marker.is_literal(): # Skip any templated elements. # NOTE: It must _have_ a position marker at this # point however to take this route. A segment # without a position marker at all, is an edit # or insertion, and so should still be considered. continue elif seg.is_type("newline"): return indent elif seg.is_type("whitespace"): indent = seg elif "\n" in (get_consumed_whitespace(seg) or ""): # Consumed whitespace case. # NOTE: In this situation, we're not looking for # separate newline and indent segments, we're # making the assumption that they'll be together # which I think is a safe one for now. return seg # i.e. if we never find a newline, it's not an indent. return None
Get the current indent segment (if there). NOTE: This only returns _untemplated_ indents. If templated newline or whitespace segments are found they are skipped.
_get_indent_segment
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def get_indent(self) -> Optional[str]: """Get the current indent (if there).""" # If no newlines, it's not an indent. Return None. if not self.num_newlines(): return None # If there are newlines but no indent segment. Return "". seg = self._get_indent_segment() consumed_whitespace = get_consumed_whitespace(seg) if consumed_whitespace: # pragma: no cover # Return last bit after newline. # NOTE: Not tested, because usually this would happen # directly via _get_indent_segment. return consumed_whitespace.split("\n")[-1] return seg.raw if seg else ""
Get the current indent (if there).
get_indent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def get_indent_segment_vals(self, exclude_block_indents=False) -> List[int]: """Iterate through any indent segments and extract their values.""" values = [] for seg in self.segments: if seg.is_type("indent"): indent_seg = cast(Indent, seg) if exclude_block_indents and indent_seg.block_uuid: continue values.append(indent_seg.indent_val) return values
Iterate through any indent segments and extract their values.
get_indent_segment_vals
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def _generate_indent_stats( segments: Sequence[RawSegment], ) -> IndentStats: """Generate the change in intended indent balance. This is the main logic which powers .get_indent_impulse() """ trough = 0 running_sum = 0 implicit_indents = [] for seg in segments: if seg.is_type("indent"): indent_seg = cast(Indent, seg) running_sum += indent_seg.indent_val # Do we need to add a new implicit indent? if indent_seg.is_implicit: implicit_indents.append(running_sum) # NOTE: We don't check for removal of implicit indents # because it's unlikely that one would be opened, and then # closed within the same point. That would probably be the # sign of a bug in the dialect. if running_sum < trough: trough = running_sum return IndentStats(running_sum, trough, tuple(implicit_indents))
Generate the change in intended indent balance. This is the main logic which powers .get_indent_impulse()
_generate_indent_stats
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def get_indent_impulse(self) -> IndentStats: """Get the change in intended indent balance from this point.""" return self._stats
Get the change in intended indent balance from this point.
get_indent_impulse
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def indent_to( self, desired_indent: str, after: Optional[BaseSegment] = None, before: Optional[BaseSegment] = None, description: Optional[str] = None, source: Optional[str] = None, ) -> Tuple[List[LintResult], "ReflowPoint"]: """Coerce a point to have a particular indent. If the point currently contains no newlines, one will be introduced and any trailing whitespace will be effectively removed. More specifically, the newline is *inserted before* the existing whitespace, with the new indent being a *replacement* for that same whitespace. For placeholder newlines or indents we generate appropriate source fixes. """ assert "\n" not in desired_indent, "Newline found in desired indent." # Get the indent (or in the case of no newline, the last whitespace) indent_seg = self._get_indent_segment() reflow_logger.debug( "Coercing indent %s to %r. (newlines: %s)", indent_seg, desired_indent, self.num_newlines(), ) if indent_seg and indent_seg.is_type("placeholder"): # Handle the placeholder case. indent_seg = cast(TemplateSegment, indent_seg) # There should always be a newline, so assert that. assert "\n" in indent_seg.source_str # We should always replace the section _containing_ the # newline, rather than just bluntly inserting. This # makes slicing later easier. current_indent = indent_seg.source_str.split("\n")[-1] source_slice = slice( indent_seg.pos_marker.source_slice.stop - len(current_indent), indent_seg.pos_marker.source_slice.stop, ) for existing_source_fix in indent_seg.source_fixes: # pragma: no cover if slice_overlaps(existing_source_fix.source_slice, source_slice): reflow_logger.warning( "Creating overlapping source fix. Results may be " "unpredictable and this might be a sign of a bug. " "Please report this along with your query.\n" f"({existing_source_fix.source_slice} overlaps " f"{source_slice})" ) new_source_fix = SourceFix( desired_indent, source_slice, # The templated slice is going to be a zero slice _anyway_. indent_seg.pos_marker.templated_slice, ) if new_source_fix in indent_seg.source_fixes: # pragma: no cover # NOTE: If we're trying to reapply the same fix, don't. # Just return an error without the fixes. This is probably # a bug if we're taking this route, but this clause will help # catch bugs faster if they occur. reflow_logger.warning( "Attempted to apply a duplicate source fix to %r. " "Returning this time without fix.", indent_seg.pos_marker.source_str(), ) fixes = [] new_segments = self.segments else: if current_indent: new_source_str = ( indent_seg.source_str[: -len(current_indent)] + desired_indent ) else: new_source_str = indent_seg.source_str + desired_indent assert "\n" in new_source_str new_placeholder = indent_seg.edit( source_fixes=[new_source_fix], source_str=new_source_str, ) fixes = [LintFix.replace(indent_seg, [new_placeholder])] new_segments = tuple( new_placeholder if seg is indent_seg else seg for seg in self.segments ) return [ LintResult( indent_seg, fixes, description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint(new_segments) elif self.num_newlines(): # There is already a newline. Is there an indent? if indent_seg: # Coerce existing indent to desired. if indent_seg.raw == desired_indent: # Trivial case. Indent already correct return [], self elif desired_indent == "": idx = self.segments.index(indent_seg) return [ LintResult( indent_seg, # Coerce to no indent. We don't want the indent. Delete it. [LintFix.delete(indent_seg)], description=description or "Line should not be indented.", source=source, ) ], ReflowPoint(self.segments[:idx] + self.segments[idx + 1 :]) # Standard case of an indent change. new_indent = indent_seg.edit(desired_indent) idx = self.segments.index(indent_seg) return [ LintResult( indent_seg, [LintFix.replace(indent_seg, [new_indent])], description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint( self.segments[:idx] + (new_indent,) + self.segments[idx + 1 :] ) else: # There is a newline, but no indent. Make one after the newline # Find the index of the last newline (there _will_ be one because # we checked self.num_newlines() above). # Before going further, check we have a non-zero indent. if not desired_indent: # We're trying to coerce a non-existent indent to zero. This # means we're already ok. return [], self for idx in range(len(self.segments) - 1, -1, -1): # NOTE: Must be a _literal_ newline, not a templated one. # https://github.com/sqlfluff/sqlfluff/issues/4367 if self.segments[idx].is_type("newline"): if self.segments[idx].pos_marker.is_literal(): break new_indent = WhitespaceSegment(desired_indent) return [ LintResult( # The anchor for the *result* should be the segment # *after* the newline, otherwise the location of the fix # is confusing. # For this method, `before` is optional, but normally # passed. If it is there, use that as the anchor # instead. We fall back to the last newline if not. before if before else self.segments[idx], # Rather than doing a `create_after` here, we're # going to do a replace. This is effectively to give a hint # to the linter that this is safe to do before a templated # placeholder. This solves some potential bugs - although # it feels a bit like a workaround. [ LintFix.replace( self.segments[idx], [self.segments[idx], new_indent] ) ], description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint( self.segments[: idx + 1] + (new_indent,) + self.segments[idx + 1 :] ) else: # There isn't currently a newline. new_newline = NewlineSegment() new_segs: List[RawSegment] # Check for whitespace ws_seg = None for seg in self.segments[::-1]: if seg.is_type("whitespace"): ws_seg = seg if not ws_seg: # Work out the new segments. Always a newline, only whitespace if # there's a non zero indent. new_segs = [new_newline] + ( [WhitespaceSegment(desired_indent)] if desired_indent else [] ) # There isn't a whitespace segment either. We need to insert one. # Do we have an anchor? if not before and not after: # pragma: no cover raise NotImplementedError( "Not set up to handle empty points in this " "scenario without provided before/after " f"anchor: {self.segments}" ) # Otherwise make a new indent, attached to the relevant anchor. # Prefer anchoring before because it makes the labelling better. elif before: before_raw = ( cast(TemplateSegment, before).source_str if before.is_type("placeholder") else before.raw ) fix = LintFix.create_before(before, new_segs) description = description or ( "Expected line break and " f"{_indent_description(desired_indent)} " f"before {before_raw!r}." ) else: assert after # mypy hint after_raw = ( cast(TemplateSegment, after).source_str if after.is_type("placeholder") else after.raw ) fix = LintFix.create_after(after, new_segs) description = description or ( "Expected line break and " f"{_indent_description(desired_indent)} " f"after {after_raw!r}." ) new_point = ReflowPoint(tuple(new_segs)) anchor = before else: # There is whitespace. Coerce it to the right indent and add # a newline _before_. In the edge case that we're coercing to # _no indent_, edit existing indent to be the newline and leave # it there. if desired_indent == "": new_segs = [new_newline] else: new_segs = [new_newline, ws_seg.edit(desired_indent)] idx = self.segments.index(ws_seg) if not description: # Prefer before, because it makes the anchoring better. if before: description = ( "Expected line break and " f"{_indent_description(desired_indent)} " f"before {before.raw!r}." ) elif after: description = ( "Expected line break and " f"{_indent_description(desired_indent)} " f"after {after.raw!r}." ) else: # pragma: no cover # NOTE: Doesn't have test coverage because there's # normally an `after` or `before` value, so this # clause is unused. description = ( "Expected line break and " f"{_indent_description(desired_indent)}." ) fix = LintFix.replace(ws_seg, new_segs) new_point = ReflowPoint( self.segments[:idx] + tuple(new_segs) + self.segments[idx + 1 :] ) anchor = ws_seg return [ LintResult(anchor, fixes=[fix], description=description, source=source) ], new_point
Coerce a point to have a particular indent. If the point currently contains no newlines, one will be introduced and any trailing whitespace will be effectively removed. More specifically, the newline is *inserted before* the existing whitespace, with the new indent being a *replacement* for that same whitespace. For placeholder newlines or indents we generate appropriate source fixes.
indent_to
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def respace_point( self, prev_block: Optional[ReflowBlock], next_block: Optional[ReflowBlock], root_segment: BaseSegment, lint_results: List[LintResult], strip_newlines: bool = False, anchor_on: str = "before", ) -> Tuple[List[LintResult], "ReflowPoint"]: """Respace a point based on given constraints. NB: This effectively includes trailing whitespace fixes. Deletion and edit fixes are generated immediately, but creations are paused to the end and done in bulk so as not to generate conflicts. Note that the `strip_newlines` functionality exists here as a slight exception to pure respacing, but as a very simple case of positioning line breaks. The default operation of `respace` does not enable it, however it exists as a convenience for rules which wish to use it. """ existing_results = lint_results[:] pre_constraint, post_constraint, strip_newlines = determine_constraints( prev_block, next_block, strip_newlines ) reflow_logger.debug("* Respacing: %r @ %s", self.raw, self.pos_marker) # The buffer is used to create the new reflow point to return segment_buffer, last_whitespace, new_results = process_spacing( list(self.segments), strip_newlines ) # Check for final trailing whitespace (which otherwise looks like an indent). if next_block and "end_of_file" in next_block.class_types and last_whitespace: new_results.append( LintResult( last_whitespace, [LintFix.delete(last_whitespace)], description="Unnecessary trailing whitespace at end of file.", ) ) segment_buffer.remove(last_whitespace) last_whitespace = None # Is there a newline? # NOTE: We do this based on the segment buffer rather than self.class_types # because we may have just removed any present newlines in the buffer. if ( any(seg.is_type("newline") for seg in segment_buffer) and not strip_newlines ) or (next_block and "end_of_file" in next_block.class_types): # Most of this section should be handled as _Indentation_. # BUT: There is one case we should handle here. # If we find that the last whitespace has a newline # before it, and the position markers imply there was # a removal between them, then remove the whitespace. # This ensures a consistent indent. if last_whitespace: ws_idx = self.segments.index(last_whitespace) if ws_idx > 0: # NOTE: Iterate by index so that we don't slice the full range. for prev_seg_idx in range(ws_idx - 1, -1, -1): prev_seg = self.segments[prev_seg_idx] # Skip past any indents if not prev_seg.is_type("indent"): break if ( prev_seg.is_type("newline") # Not just unequal. Must be actively _before_. # NOTE: Based on working locations and prev_seg.get_end_loc() < last_whitespace.get_start_loc() ): reflow_logger.debug( " Removing non-contiguous whitespace post removal." ) segment_buffer.remove(last_whitespace) # Ideally we should attach to an existing result. # To do that effectively, we should look for the removed # segment in the existing results. temp_idx = last_whitespace.pos_marker.templated_slice.start for res in existing_results: if ( res.anchor and res.anchor.pos_marker and res.anchor.pos_marker.templated_slice.stop == temp_idx ): break else: # pragma: no cover raise NotImplementedError("Could not find removal result.") existing_results.remove(res) new_results.append( LintResult( res.anchor, fixes=res.fixes + [LintFix("delete", last_whitespace)], description=res.description, ) ) # Return the results. return existing_results + new_results, ReflowPoint(tuple(segment_buffer)) # Otherwise is this an inline case? (i.e. no newline) reflow_logger.debug( " Inline case. Constraints: %s <-> %s.", pre_constraint, post_constraint, ) # Do we at least have _some_ whitespace? if last_whitespace: # We do - is it the right size? segment_buffer, results = handle_respace__inline_with_space( pre_constraint, post_constraint, prev_block, next_block, root_segment, segment_buffer, last_whitespace, ) new_results.extend(results) else: # No. Should we insert some? # NOTE: This method operates on the existing fix buffer. segment_buffer, new_results, edited = handle_respace__inline_without_space( pre_constraint, post_constraint, prev_block, next_block, segment_buffer, existing_results + new_results, anchor_on=anchor_on, ) existing_results = [] if edited: reflow_logger.debug(" Modified result buffer: %s", new_results) # Only log if we actually made a change. if new_results: reflow_logger.debug(" New Results: %s", new_results) return existing_results + new_results, ReflowPoint(tuple(segment_buffer))
Respace a point based on given constraints. NB: This effectively includes trailing whitespace fixes. Deletion and edit fixes are generated immediately, but creations are paused to the end and done in bulk so as not to generate conflicts. Note that the `strip_newlines` functionality exists here as a slight exception to pure respacing, but as a very simple case of positioning line breaks. The default operation of `respace` does not enable it, however it exists as a convenience for rules which wish to use it.
respace_point
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def as_str(self) -> str: """String representation for logging/testing.""" return self.selectable.raw
String representation for logging/testing.
as_str
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def select_info(self) -> Optional[SelectStatementColumnsAndTables]: """Returns SelectStatementColumnsAndTables on the SELECT.""" if self.selectable.is_type("select_statement"): return get_select_statement_info( self.selectable, self.dialect, early_exit=False ) else: # DML or values_clause # This is a bit dodgy, but a very useful abstraction. Here, we # interpret a DML or values_clause segment as if it were a SELECT. # Someday, we may need to tweak this, e.g. perhaps add a separate # QueryType for this (depending on the needs of the rules that use # it. # # For more info on the syntax and behavior of VALUES and its # similarity to a SELECT statement with literal values (no table # source), see the "Examples" section of the Postgres docs page: # (https://www.postgresql.org/docs/8.2/sql-values.html). values = Segments(self.selectable) alias_expression = values.children().first(sp.is_type("alias_expression")) name = alias_expression.children().first( sp.is_type("naked_identifier", "quoted_identifier") ) alias_info = AliasInfo( name[0].raw if name else "", name[0] if name else None, bool(name), self.selectable, alias_expression[0] if alias_expression else None, None, ) return SelectStatementColumnsAndTables( select_statement=self.selectable, table_aliases=[alias_info], standalone_aliases=[], reference_buffer=[], select_targets=[], col_aliases=[], using_cols=[], table_reference_buffer=[], )
Returns SelectStatementColumnsAndTables on the SELECT.
select_info
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def get_wildcard_info(self) -> List[WildcardInfo]: """Find wildcard (*) targets in the SELECT.""" buff: List[WildcardInfo] = [] # Some select-like statements don't have select_info # (e.g. test_exasol_invalid_foreign_key_from) if not self.select_info: # pragma: no cover # TODO: Review whether to remove this. # Restructure of Exasol dialect means it no longer applies. return buff for seg in self.select_info.select_targets: if seg.get_child("wildcard_expression"): if "." in seg.raw: # The wildcard specifies a target table. table = seg.raw.rsplit(".", 1)[0] buff.append(WildcardInfo(seg, [table])) else: # The wildcard is unqualified (i.e. does not specify a # table). This means to include all columns from all the # tables in the query. buff.append( WildcardInfo( seg, [ ( alias_info.ref_str if alias_info.aliased else alias_info.from_expression_element.raw ) for alias_info in self.select_info.table_aliases if alias_info.ref_str ], ) ) return buff
Find wildcard (*) targets in the SELECT.
get_wildcard_info
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def find_alias(self, table: str) -> Optional[AliasInfo]: """Find corresponding table_aliases entry (if any) matching "table".""" alias_info = [ t for t in (self.select_info.table_aliases if self.select_info else []) if t.aliased and t.ref_str == table ] assert len(alias_info) <= 1 return alias_info[0] if alias_info else None
Find corresponding table_aliases entry (if any) matching "table".
find_alias
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def children(self: T) -> List[T]: """Children could be CTEs, subselects or Others.""" return list(self.ctes.values()) + self.subqueries
Children could be CTEs, subselects or Others.
children
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def as_dict(self: T) -> Dict: """Dict representation for logging/testing.""" result: Dict[str, Union[str, List[str], Dict, List[Dict]]] = {} if self.query_type != QueryType.Simple: result["query_type"] = self.query_type.name if self.selectables: result["selectables"] = [s.as_str() for s in self.selectables] if self.ctes: result["ctes"] = {k: v.as_dict() for k, v in self.ctes.items()} if self.subqueries: result["subqueries"] = [q.as_dict() for q in self.subqueries] return result
Dict representation for logging/testing.
as_dict
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def lookup_cte(self: T, name: str, pop: bool = True) -> Optional[T]: """Look up a CTE by name, in the current or any parent scope.""" cte = self.ctes.get(name.upper()) if cte: if pop: del self.ctes[name.upper()] return cte if self.parent: return self.parent.lookup_cte(name, pop) else: return None
Look up a CTE by name, in the current or any parent scope.
lookup_cte
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def crawl_sources( self: T, segment: BaseSegment, recurse_into: bool = True, pop: bool = False, lookup_cte: bool = True, ) -> Iterator[Union[str, T]]: """Find SELECTs, table refs, or value table function calls in segment. For each SELECT, yield a list of Query objects. As we find table references or function call strings, yield those. """ found_nested_select = False for seg in segment.recursive_crawl( "table_reference", "set_expression", "select_statement", "values_clause", recurse_into=False, allow_self=False, ): # Crawl efficiently, don't recurse here. We do that later. # What do we have? # 1. If it's a table reference, work out whether it's to a CTE # or to an external table. if seg.is_type("table_reference"): _seg = cast(ObjectReferenceSegment, seg) if not _seg.is_qualified() and lookup_cte: cte = self.lookup_cte(_seg.raw, pop=pop) if cte: # It's a CTE. yield cte # It's an external table reference. yield _seg.raw # 2. If it's some kind of more complex expression which is still # valid in this position, generate an appropriate sub-select. else: assert seg.is_type( "set_expression", "select_statement", "values_clause" ) found_nested_select = True # Generate a subquery, referencing the current query # as the parent. yield self.__class__.from_segment(seg, self.dialect, parent=self) if not found_nested_select: # If we reach here, the SELECT may be querying from a value table # function, e.g. UNNEST(). For our purposes, this is basically the # same as an external table. Return the "table" part as a string. table_expr = segment.get_child("table_expression") if table_expr: yield table_expr.raw
Find SELECTs, table refs, or value table function calls in segment. For each SELECT, yield a list of Query objects. As we find table references or function call strings, yield those.
crawl_sources
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def _extract_subqueries( cls: Type[T], selectable: Selectable, dialect: Dialect ) -> Iterator[T]: """Given a Selectable, extract subqueries.""" assert selectable.selectable.is_type( *SELECTABLE_TYPES, *SUBSELECT_TYPES, ), f"Found unexpected {selectable.selectable}" # For MERGE, UPDATE & DELETE, we should expect to find a sub select. for subselect in selectable.selectable.recursive_crawl( *SELECTABLE_TYPES, recurse_into=False, allow_self=False, ): # NOTE: We don't need to set the parent here, because it will # be set when attached to the parent later. yield cls.from_segment(subselect, dialect=dialect)
Given a Selectable, extract subqueries.
_extract_subqueries
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def from_root(cls: Type[T], root_segment: BaseSegment, dialect: Dialect) -> T: """Given a root segment, find the first appropriate selectable and analyse.""" selectable_segment = next( # Could be a Selectable or a MERGE root_segment.recursive_crawl(*SELECTABLE_TYPES, "merge_statement"), None, ) assert selectable_segment, f"No selectable found in {root_segment.raw!r}." return cls.from_segment(selectable_segment, dialect=dialect)
Given a root segment, find the first appropriate selectable and analyse.
from_root
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def from_segment( cls: Type[T], segment: BaseSegment, dialect: Dialect, parent: Optional[T] = None, ) -> T: """Recursively generate a query from an appropriate segment.""" assert segment.is_type( *SELECTABLE_TYPES, *SUBSELECT_TYPES ), f"Invalid segment for `from_segment`: {segment}" selectables = [] subqueries = [] cte_defs: List[BaseSegment] = [] query_type = QueryType.Simple if segment.is_type("select_statement", *SUBSELECT_TYPES): # It's a select. Instantiate a Query. selectables = [Selectable(segment, dialect=dialect)] elif segment.is_type("set_expression"): # It's a set expression. There may be multiple selectables. for _seg in segment.recursive_crawl("select_statement", recurse_into=False): selectables.append(Selectable(_seg, dialect=dialect)) else: # Otherwise it's a WITH statement. assert segment.is_type("with_compound_statement") query_type = QueryType.WithCompound for _seg in segment.recursive_crawl( # NOTE: We don't _specify_ set expressions here, because # all set expressions are made of selects, and we # want to look straight through to those child # expressions. "select_statement", recurse_into=False, no_recursive_seg_type="common_table_expression", ): selectables.append(Selectable(_seg, dialect=dialect)) # We also need to handle CTEs for _seg in segment.recursive_crawl( "common_table_expression", recurse_into=False, # Don't recurse into any other WITH statements. no_recursive_seg_type="with_compound_statement", ): # Just store the segments for now. cte_defs.append(_seg) # Extract subqueries from any selectables. for selectable in selectables: # NOTE: If any VALUES clauses are present, they pass through here # safely without Exception. They won't yield any subqueries. subqueries += list(cls._extract_subqueries(selectable, dialect)) # Instantiate the query outer_query = cls( query_type, dialect, selectables, parent=parent, subqueries=subqueries, ) # If we don't have any CTEs, we can stop now. if not cte_defs: return outer_query # Otherwise build up the CTE map. ctes = {} for cte in cte_defs: # NOTE: This feels a little risky to just assume the first segment # is the name, but it's the same functionality we've run with for # a while. name_seg = cte.segments[0] name = name_seg.raw_normalized(False).upper() # Get the query out of it, just stop on the first one we find. try: inner_qry = next( cte.recursive_crawl( *SELECTABLE_TYPES, "values_clause", # Very rarely, we might find things like update # clauses in here, handle them accordingly. *SUBSELECT_TYPES, ), ) # If this fails it's because we didn't find anything "selectable" # in the CTE. Flag this up, but then carry on. It's likely something # strange (w.g. a Clickhouse WITH clause setting a with). except StopIteration: # pragma: no cover # Log it as an issue, but otherwise skip this one. analysis_logger.info(f"Skipping unexpected CTE structure: {cte.raw!r}") continue qry = cls.from_segment(inner_qry, dialect=dialect, parent=outer_query) assert qry # Populate the CTE specific args. qry.cte_definition_segment = cte qry.cte_name_segment = name_seg # File it in the dictionary. ctes[name] = qry # Set the CTEs attribute on the outer. # NOTE: Because we're setting this after instantiation, it's important # that we've already set the `parent` value of the cte queries. outer_query.ctes = ctes return outer_query
Recursively generate a query from an appropriate segment.
from_segment
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/query.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/query.py
MIT
def get_select_statement_info( segment: BaseSegment, dialect: Optional[Dialect], early_exit: bool = True ) -> Optional[SelectStatementColumnsAndTables]: """Analyze a select statement: targets, aliases, etc. Return info.""" assert segment.is_type("select_statement") table_aliases, standalone_aliases = get_aliases_from_select(segment, dialect) if early_exit and not table_aliases and not standalone_aliases: return None # Iterate through all the references, both in the select clause, but also # potential others. sc = segment.get_child("select_clause") # Sometimes there is no select clause (e.g. "SELECT *" is a select_clause_element) if not sc: # pragma: no cover # TODO: Review whether this clause should be removed. It might only # have existed for an old way of structuring the Exasol dialect. return None # NOTE: In this first crawl, don't crawl inside any sub-selects, that's very # important for both isolation and performance reasons. reference_buffer = _get_object_references(sc) table_reference_buffer = [] for potential_clause in ( "where_clause", "groupby_clause", "having_clause", "orderby_clause", "qualify_clause", ): clause = segment.get_child(potential_clause) if clause: reference_buffer += _get_object_references(clause) # Get all select targets. _select_clause = segment.get_child("select_clause") assert _select_clause, "Select statement found without select clause." select_targets = cast( List[SelectClauseElementSegment], _select_clause.get_children("select_clause_element"), ) # Get all column aliases. NOTE: In two steps so mypy can follow. _pre_aliases = [s.get_alias() for s in select_targets] col_aliases = [_alias for _alias in _pre_aliases if _alias is not None] # Get any columns referred to in a using clause, and extract anything # from ON clauses. using_cols = [] fc = segment.get_child("from_clause") if fc: for table_expression in fc.recursive_crawl( "table_expression", no_recursive_seg_type="select_statement" ): for seg in table_expression.iter_segments(): # table references can get tricky with what is a schema, table, # project, or column. It may be best for now to use the redshift # unnest logic for dialects that support arrays or objects/structs # in AL05. However, this solves finding other types of references # in functions such as LATERAL FLATTEN. if not seg.is_type("table_reference"): reference_buffer += _get_object_references(seg) elif cast(ObjectReferenceSegment, seg).is_qualified(): table_reference_buffer += _get_object_references(seg) for join_clause in fc.recursive_crawl( "join_clause", no_recursive_seg_type="select_statement" ): seen_using = False for seg in join_clause.iter_segments(): if seg.is_type("keyword") and seg.raw_upper == "USING": seen_using = True elif seg.is_type("join_on_condition"): for on_seg in seg.segments: if on_seg.is_type("bracketed", "expression"): # Deal with expressions reference_buffer += _get_object_references(seg) elif seen_using and seg.is_type("bracketed"): for subseg in seg.segments: if subseg.is_type("identifier"): using_cols.append(subseg) seen_using = False return SelectStatementColumnsAndTables( select_statement=segment, table_aliases=table_aliases or [], standalone_aliases=standalone_aliases or [], reference_buffer=reference_buffer, select_targets=select_targets, col_aliases=col_aliases, using_cols=using_cols, table_reference_buffer=table_reference_buffer, )
Analyze a select statement: targets, aliases, etc. Return info.
get_select_statement_info
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/select.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/select.py
MIT
def get_aliases_from_select( segment: BaseSegment, dialect: Optional[Dialect] = None ) -> Tuple[Optional[List[AliasInfo]], Optional[List[BaseSegment]]]: """Gets the aliases referred to in the FROM clause. Returns a tuple of two lists: - Table aliases - Value table function aliases """ fc = segment.get_child("from_clause") if not fc: # If there's no from clause then just abort. return None, None assert isinstance(fc, (FromClauseSegment, JoinClauseSegment)) aliases = fc.get_eventual_aliases() # We only want table aliases, so filter out aliases for value table # functions, lambda parameters and pivot columns. standalone_aliases: List[BaseSegment] = [] standalone_aliases += _get_pivot_table_columns(segment, dialect) standalone_aliases += _get_lambda_argument_columns(segment, dialect) table_aliases = [] for table_expr, alias_info in aliases: if _has_value_table_function(table_expr, dialect): if alias_info.segment and alias_info.segment not in standalone_aliases: standalone_aliases.append(alias_info.segment) elif alias_info not in table_aliases: table_aliases.append(alias_info) return table_aliases, standalone_aliases
Gets the aliases referred to in the FROM clause. Returns a tuple of two lists: - Table aliases - Value table function aliases
get_aliases_from_select
python
sqlfluff/sqlfluff
src/sqlfluff/utils/analysis/select.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/analysis/select.py
MIT
def get_parse_fixtures( fail_on_missing_yml=False, ) -> Tuple[List[ParseExample], List[Tuple[str, str, bool, str]]]: """Search for all parsing fixtures.""" parse_success_examples = [] parse_structure_examples = [] # Generate the filenames for each dialect from the parser test directory for d in os.listdir(os.path.join("test", "fixtures", "dialects")): # Ignore documentation if d.endswith(".md"): continue # assume that d is now the name of a dialect dirlist = os.listdir(os.path.join("test", "fixtures", "dialects", d)) for f in dirlist: has_yml = False if f.endswith(".sql"): root = f[:-4] # only look for sql files parse_success_examples.append(ParseExample(d, f)) # Look for the code_only version of the structure y = root + ".yml" if y in dirlist: parse_structure_examples.append((d, f, True, y)) has_yml = True # Look for the non-code included version of the structure y = root + "_nc.yml" if y in dirlist: parse_structure_examples.append((d, f, False, y)) has_yml = True if not has_yml and fail_on_missing_yml: raise ( Exception( f"Missing .yml file for {os.path.join(d, f)}. Run the " "test/generate_parse_fixture_yml.py script!" ) ) return parse_success_examples, parse_structure_examples
Search for all parsing fixtures.
get_parse_fixtures
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def make_dialect_path(dialect, fname): """Work out how to find paths given a dialect and a file name.""" return os.path.join("test", "fixtures", "dialects", dialect, fname)
Work out how to find paths given a dialect and a file name.
make_dialect_path
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def load_file(dialect, fname): """Load a file.""" with open(make_dialect_path(dialect, fname), encoding="utf8") as f: raw = f.read() return raw
Load a file.
load_file
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def process_struct(obj): """Process a nested dict or dict-like into a check tuple.""" if isinstance(obj, dict): return tuple((k, process_struct(obj[k])) for k in obj) elif isinstance(obj, list): # If empty list, return empty tuple if not len(obj): return tuple() # We'll assume that it's a list of dicts if isinstance(obj[0], dict): buff = [process_struct(elem) for elem in obj] if any(len(elem) > 1 for elem in buff): raise ValueError(f"Not sure how to deal with multi key dict: {buff!r}") return tuple(elem[0] for elem in buff) else: raise TypeError(f"Did not expect a list of {type(obj[0])}: {obj[0]!r}") elif isinstance(obj, (str, int, float)): return str(obj) elif obj is None: return None else: raise TypeError(f"Not sure how to deal with type {type(obj)}: {obj!r}")
Process a nested dict or dict-like into a check tuple.
process_struct
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def parse_example_file(dialect: str, sqlfile: str): """Parse example SQL file, return parse tree.""" config = FluffConfig(overrides=dict(dialect=dialect)) # Load the SQL raw = load_file(dialect, sqlfile) # Lex and parse the file tokens, _ = Lexer(config=config).lex(raw) tree = Parser(config=config).parse(tokens, fname=dialect + "/" + sqlfile) return tree
Parse example SQL file, return parse tree.
parse_example_file
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def compute_parse_tree_hash(tree): """Given a parse tree, compute a consistent hash value for it.""" if tree: r = tree.as_record(code_only=True, show_raw=True) if r: r_io = io.StringIO() yaml.dump(r, r_io, sort_keys=False, allow_unicode=True) result = hashlib.blake2s(r_io.getvalue().encode("utf-8")).hexdigest() return result return None
Given a parse tree, compute a consistent hash value for it.
compute_parse_tree_hash
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def load_yaml(fpath): """Load a yaml structure and process it into a tuple.""" # Load raw file with open(fpath, encoding="utf8") as f: raw = f.read() # Parse the yaml obj = yaml.safe_load(raw) # Return the parsed and structured object _hash = None if obj: _hash = obj.pop("_hash", None) processed = process_struct(obj) if processed: return _hash, process_struct(obj)[0] else: return None, None
Load a yaml structure and process it into a tuple.
load_yaml
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def yaml_loader(): """Return a yaml loading function.""" # Return a function return load_yaml
Return a yaml loading function.
yaml_loader
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def _generate_test_segments_func(elems): """Roughly generate test segments. This function isn't totally robust, but good enough for testing. Use with caution. """ buff = [] raw_file = "".join(elems) templated_file = TemplatedFile.from_string(raw_file) idx = 0 for elem in elems: if elem == "<indent>": buff.append( Indent(pos_marker=PositionMarker.from_point(idx, idx, templated_file)) ) continue elif elem == "<dedent>": buff.append( Dedent(pos_marker=PositionMarker.from_point(idx, idx, templated_file)) ) continue seg_kwargs = {} if set(elem) <= {" ", "\t"}: SegClass = WhitespaceSegment elif set(elem) <= {"\n"}: SegClass = NewlineSegment elif elem == "(": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("start_bracket",)} elif elem == ")": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("end_bracket",)} elif elem == "[": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("start_square_bracket",)} elif elem == "]": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("end_square_bracket",)} elif elem.startswith("--"): SegClass = CommentSegment seg_kwargs = {"instance_types": ("inline_comment",)} elif elem.startswith('"'): SegClass = CodeSegment seg_kwargs = {"instance_types": ("double_quote",)} elif elem.startswith("'"): SegClass = CodeSegment seg_kwargs = {"instance_types": ("single_quote",)} else: SegClass = CodeSegment # Set a none position marker which we'll realign at the end. buff.append( SegClass( raw=elem, pos_marker=PositionMarker( slice(idx, idx + len(elem)), slice(idx, idx + len(elem)), templated_file, ), **seg_kwargs, ) ) idx += len(elem) return tuple(buff)
Roughly generate test segments. This function isn't totally robust, but good enough for testing. Use with caution.
_generate_test_segments_func
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def generate_test_segments(): """Roughly generate test segments. This is a factory function so that it works as a fixture, but when actually used, this will return the inner function which is what you actually need. """ return _generate_test_segments_func
Roughly generate test segments. This is a factory function so that it works as a fixture, but when actually used, this will return the inner function which is what you actually need.
generate_test_segments
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def raise_critical_errors_after_fix(monkeypatch): """Raises errors that break the Fix process. These errors are otherwise swallowed to allow the lint messages to reach the end user. """ @staticmethod def _log_critical_errors(error: Exception): raise error monkeypatch.setattr(BaseRule, "_log_critical_errors", _log_critical_errors)
Raises errors that break the Fix process. These errors are otherwise swallowed to allow the lint messages to reach the end user.
raise_critical_errors_after_fix
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def fail_on_parse_error_after_fix(monkeypatch): """Cause tests to fail if a lint fix introduces a parse error. In production, we have a couple of functions that, upon detecting a bug in a lint rule, just log a warning. To catch bugs in new or modified rules, we want to be more strict during dev and CI/CD testing. Here, we patch in different functions which raise runtime errors, causing tests to fail if this happens. """ @staticmethod def raise_error_apply_fixes_check_issue(message, *args): # pragma: no cover raise ValueError(message % args) @staticmethod def raise_error_conflicting_fixes_same_anchor(message: str): # pragma: no cover raise ValueError(message) monkeypatch.setattr( BaseSegment, "_log_apply_fixes_check_issue", raise_error_apply_fixes_check_issue ) monkeypatch.setattr( Linter, "_report_conflicting_fixes_same_anchor", raise_error_conflicting_fixes_same_anchor, )
Cause tests to fail if a lint fix introduces a parse error. In production, we have a couple of functions that, upon detecting a bug in a lint rule, just log a warning. To catch bugs in new or modified rules, we want to be more strict during dev and CI/CD testing. Here, we patch in different functions which raise runtime errors, causing tests to fail if this happens.
fail_on_parse_error_after_fix
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def test_verbosity_level(request): """Report the verbosity level for a given pytest run. For example: $ pytest -vv Has a verbosity level of 2 While: $ pytest Has a verbosity level of 0 """ return request.config.getoption("verbose")
Report the verbosity level for a given pytest run. For example: $ pytest -vv Has a verbosity level of 2 While: $ pytest Has a verbosity level of 0
test_verbosity_level
python
sqlfluff/sqlfluff
test/conftest.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/conftest.py
MIT
def distribute_work(work_items: List[S], work_fn: Callable[[S], None]) -> None: """Distribute work keep track of progress.""" # Build up a dict of sets, where the key is the dialect and the set # contains all the expected cases. As cases return we'll check them # off. success_map = {} expected_cases = defaultdict(set) for case in work_items: expected_cases[case.dialect].add(case) errors = [] with multiprocessing.Pool(multiprocessing.cpu_count()) as pool: for example, result in pool.imap_unordered(work_fn, work_items): if result is not None: errors.append(result) success_map[example] = False else: success_map[example] = True expected_cases[example.dialect].remove(example) # Check to see whether a dialect is complete if not expected_cases[example.dialect]: # It's done. Report success rate. local_success_map = { k: v for k, v in success_map.items() if k.dialect == example.dialect } if all(local_success_map.values()): print(f"{example.dialect!r} complete.\t\tAll Success ✅") else: fail_files = [ k.sqlfile for k, v in local_success_map.items() if not v ] print( f"{example.dialect!r} complete.\t\t{len(fail_files)} fails. ⚠️" ) for fname in fail_files: print(f" - {fname!r}") if errors: print(errors) print("FAILED TO GENERATE ALL CASES") sys.exit(1)
Distribute work keep track of progress.
distribute_work
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def _is_matching_new_criteria(example: ParseExample): """Is the Yaml doesn't exist or is older than the SQL.""" yaml_path = _create_file_path(example) if not os.path.exists(yaml_path): return True sql_path = os.path.join( "test", "fixtures", "dialects", example.dialect, example.sqlfile, ) return os.path.getmtime(yaml_path) < os.path.getmtime(sql_path)
Is the Yaml doesn't exist or is older than the SQL.
_is_matching_new_criteria
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def generate_one_parse_fixture( example: ParseExample, ) -> Tuple[ParseExample, Optional[SQLParseError]]: """Parse example SQL file, write parse tree to YAML file.""" dialect, sqlfile = example sql_path = _create_file_path(example, ".sql") try: tree = parse_example_file(dialect, sqlfile) except Exception as err: # Catch parsing errors, and wrap the file path only it. return example, SQLParseError(f"Fatal parsing error: {sql_path}: {err}") # Check we don't have any base types or unparsable sections types = tree.type_set() if "base" in types: return example, SQLParseError(f"Unnamed base section when parsing: {sql_path}") if "unparsable" in types: return example, SQLParseError(f"Could not parse: {sql_path}") _hash = compute_parse_tree_hash(tree) # Remove the .sql file extension path = _create_file_path(example) with open(path, "w", newline="\n", encoding="utf8") as f: r: Optional[Dict[str, Optional[str]]] = None if not tree: f.write("") return example, None records = tree.as_record(code_only=True, show_raw=True) assert records, "TypeGuard" r = dict([("_hash", _hash), *list(records.items())]) print( "# YML test files are auto-generated from SQL files and should not be " "edited by", '# hand. To help enforce this, the "hash" field in the file must match ' "a hash", "# computed by SQLFluff when running the tests. Please run", "# `python test/generate_parse_fixture_yml.py` to generate them after " "adding or", "# altering SQL files.", file=f, sep="\n", ) yaml.dump( data=r, stream=f, default_flow_style=False, sort_keys=False, allow_unicode=True, ) return example, None
Parse example SQL file, write parse tree to YAML file.
generate_one_parse_fixture
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def gather_file_list( dialect: Optional[str] = None, glob_match_pattern: Optional[str] = None, new_only: bool = False, ) -> List[ParseExample]: """Gather the list of files to generate fixtures for. Apply filters as required.""" parse_success_examples, _ = get_parse_fixtures() if new_only: parse_success_examples = [ example for example in parse_success_examples if _is_matching_new_criteria(example) ] if dialect: dialect = dialect.lower() parse_success_examples = [ example for example in parse_success_examples if example[0] == dialect ] if len(parse_success_examples) == 0: raise ValueError(f'Unknown Dialect "{dialect}"') if not glob_match_pattern: return parse_success_examples regex = re.compile(fnmatch.translate(glob_match_pattern)) return [ example for example in parse_success_examples if regex.match(example[1]) is not None ]
Gather the list of files to generate fixtures for. Apply filters as required.
gather_file_list
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def generate_parse_fixtures( filter: Optional[str], dialect: Optional[str], new_only: bool ): """Generate fixture or a subset based on dialect or filename glob match.""" filter_str = filter or "*" dialect_str = dialect or "all" print("Match Pattern Received:") print(f"\tfilter={filter_str} dialect={dialect_str} new-only={new_only}") parse_success_examples = gather_file_list(dialect, filter, new_only) print(f"Found {len(parse_success_examples)} file(s) to generate") t0 = time.monotonic() try: distribute_work(parse_success_examples, generate_one_parse_fixture) except SQLParseError as err: # If one fails, exit early and cleanly. print(f"PARSING FAILED: {err}") sys.exit(1) dt = time.monotonic() - t0 print(f"Built {len(parse_success_examples)} fixtures in {dt:.2f}s.")
Generate fixture or a subset based on dialect or filename glob match.
generate_parse_fixtures
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def main(): """Find all example SQL files, parse and create YAML files.""" generate_parse_fixtures()
Find all example SQL files, parse and create YAML files.
main
python
sqlfluff/sqlfluff
test/generate_parse_fixture_yml.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/generate_parse_fixture_yml.py
MIT
def test_diff_quality_plugin(sql_paths, expected_violations_lines, monkeypatch): """Test the plugin at least finds errors on the expected lines.""" def execute(command, exit_codes): printable_command_parts = [ c.decode(sys.getfilesystemencoding()) if isinstance(c, bytes) else c for c in command ] result = invoke_assert_code( ret_code=1 if expected_violations_lines else 0, args=[ lint, printable_command_parts[2:], ], ) return result.output, "" # Mock the execute function -- this is an attempt to prevent the CircleCI # coverage check from hanging. (We've seen issues in the past where using # subprocesses caused things to occasionally hang.) monkeypatch.setattr(diff_quality_plugin, "execute", execute) monkeypatch.chdir("test/fixtures/") violation_reporter = diff_quality_plugin.diff_cover_report_quality( options="--processes=1" ) assert len(sql_paths) in (0, 1) sql_paths = [str(Path(sql_path)) for sql_path in sql_paths] violations_dict = violation_reporter.violations_batch(sql_paths) assert isinstance(violations_dict, dict) if expected_violations_lines: assert len(violations_dict[sql_paths[0]]) > 0 violations_lines = {v.line for v in violations_dict[sql_paths[0]]} for expected_line in expected_violations_lines: assert expected_line in violations_lines else: assert ( len(violations_dict[sql_paths[0]]) == 0 if sql_paths else len(violations_dict) == 0 )
Test the plugin at least finds errors on the expected lines.
test_diff_quality_plugin
python
sqlfluff/sqlfluff
test/diff_quality_plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/diff_quality_plugin_test.py
MIT
def test_assert_rule_fail_in_sql_handle_parse_error(): """Util assert_rule_fail_in_sql should handle parse errors.""" with pytest.raises(Failed) as failed_test: assert_rule_fail_in_sql(code="L000", sql="select from") failed_test.match("Found the following parse errors in test case:")
Util assert_rule_fail_in_sql should handle parse errors.
test_assert_rule_fail_in_sql_handle_parse_error
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_assert_rule_fail_in_sql_should_fail_queries_that_unexpectedly_pass(): """Util assert_rule_fail_in_sql should fail if no failure.""" with pytest.raises(Failed) as failed_test: assert_rule_fail_in_sql(code="LT01", sql="select 1") failed_test.match("No LT01 failures found in query which should fail")
Util assert_rule_fail_in_sql should fail if no failure.
test_assert_rule_fail_in_sql_should_fail_queries_that_unexpectedly_pass
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_assert_rule_pass_in_sql_should_handle_parse_error(): """Util assert_rule_pass_in_sql should handle parse errors.""" with pytest.raises(Failed) as failed_test: assert_rule_pass_in_sql(code="LT01", sql="select from") failed_test.match("Found unparsable section:")
Util assert_rule_pass_in_sql should handle parse errors.
test_assert_rule_pass_in_sql_should_handle_parse_error
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_assert_rule_pass_in_sql_should_fail_when_there_are_violations(): """Util assert_rule_pass_in_sql should fail when there are violations.""" with pytest.raises(Failed) as failed_test: assert_rule_pass_in_sql(code="LT01", sql="select a , b from t") failed_test.match("Found LT01 failures in query which should pass")
Util assert_rule_pass_in_sql should fail when there are violations.
test_assert_rule_pass_in_sql_should_fail_when_there_are_violations
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_rules_test_case_skipped_when_test_case_skipped(): """Test functionality of the `RuleTestCase` skip attribute.""" rule_test_case = RuleTestCase(rule="CP01", skip="Skip this one for now") with pytest.raises(Skipped) as skipped_test: rule_test_case.evaluate() skipped_test.match("Skip this one for now")
Test functionality of the `RuleTestCase` skip attribute.
test_rules_test_case_skipped_when_test_case_skipped
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def test_rules_test_case_has_variable_introspection(test_verbosity_level): """Make sure the helper gives variable introspection information on failure.""" rule_test_case = RuleTestCase( rule="LT02", fail_str=""" select a, b from table """, # extra comma on purpose fix_str=""" select a, b, from table """, ) with pytest.raises(AssertionError) as skipped_test: rule_test_case.evaluate() if test_verbosity_level >= 2: # Enough to check that a query diff is displayed skipped_test.match("select")
Make sure the helper gives variable introspection information on failure.
test_rules_test_case_has_variable_introspection
python
sqlfluff/sqlfluff
test/testing_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/testing_test.py
MIT
def assert_pickle_robust(err: SQLBaseError): """Test that the class remains the same through copying and pickling.""" # First try copying (and make sure they still compare equal) err_copy = copy.copy(err) assert err_copy == err # Then try picking (and make sure they also still compare equal) pickled = pickle.dumps(err) pickle_copy = pickle.loads(pickled) assert pickle_copy == err
Test that the class remains the same through copying and pickling.
assert_pickle_robust
python
sqlfluff/sqlfluff
test/core/errors_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/errors_test.py
MIT
def test__lex_error_pickle(ignore): """Test lexing error pickling.""" template = TemplatedFile.from_string("foobar") err = SQLLexError("Foo", pos=PositionMarker(slice(0, 6), slice(0, 6), template)) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err)
Test lexing error pickling.
test__lex_error_pickle
python
sqlfluff/sqlfluff
test/core/errors_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/errors_test.py
MIT
def test__parse_error_pickle(ignore): """Test parse error pickling.""" template = TemplatedFile.from_string("foobar") segment = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) err = SQLParseError("Foo", segment=segment) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err)
Test parse error pickling.
test__parse_error_pickle
python
sqlfluff/sqlfluff
test/core/errors_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/errors_test.py
MIT
def test__lint_error_pickle(ignore): """Test lint error pickling.""" template = TemplatedFile.from_string("foobar") segment = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) err = SQLLintError("Foo", segment=segment, rule=Rule_T078) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err)
Test lint error pickling.
test__lint_error_pickle
python
sqlfluff/sqlfluff
test/core/errors_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/errors_test.py
MIT
def test__plugin_manager_registers_example_plugin(): """Test that the example plugin is registered. This test also tests that warnings are raised on the import of plugins which have their imports in the wrong place (e.g. the example plugin). That means we need to make sure the plugin is definitely reimported at the start of this test, so we can see any warnings raised on imports. To do this we clear the plugin manager cache and also forcibly unload the example plugin modules if they are already loaded. This ensures that we can capture any warnings raised by importing the module. """ purge_plugin_manager() # We still to a try/except here, even though it's only run within # the context of a test because the module may or may not already # be imported depending on the order that the tests run in. try: del sys.modules["sqlfluff_plugin_example"] except KeyError: pass try: del sys.modules["sqlfluff_plugin_example.rules"] except KeyError: pass with fluff_log_catcher(logging.WARNING, "sqlfluff.rules") as caplog: plugin_manager = get_plugin_manager() # The plugin import order is non-deterministic. # Use sets in case the dbt plugin (or other plugins) are # already installed too. installed_plugins = set( plugin_module.__name__ for plugin_module in plugin_manager.get_plugins() ) print(f"Installed plugins: {installed_plugins}") assert installed_plugins.issuperset( { "sqlfluff_plugin_example", "sqlfluff.core.plugin.lib", } ) # At this stage we should also check that the example plugin # also raises a warning for it's import location. assert ( "Rule 'Rule_Example_L001' has been imported before all plugins " "have been fully loaded" ) in caplog.text
Test that the example plugin is registered. This test also tests that warnings are raised on the import of plugins which have their imports in the wrong place (e.g. the example plugin). That means we need to make sure the plugin is definitely reimported at the start of this test, so we can see any warnings raised on imports. To do this we clear the plugin manager cache and also forcibly unload the example plugin modules if they are already loaded. This ensures that we can capture any warnings raised by importing the module.
test__plugin_manager_registers_example_plugin
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__plugin_example_rules_returned(rule_ref): """Test that the example rules from the plugin are returned.""" plugin_manager = get_plugin_manager() # The plugin import order is non-deterministic rule_names = [ rule.__name__ for rules in plugin_manager.hook.get_rules() for rule in rules ] print(f"Rule names: {rule_names}") assert rule_ref in rule_names
Test that the example rules from the plugin are returned.
test__plugin_example_rules_returned
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__plugin_default_config_read(rule_ref, config_option): """Test that the example plugin default config is merged into FluffConfig.""" fluff_config = FluffConfig(overrides={"dialect": "ansi"}) # The plugin import order is non-deterministic print(f"Detected config sections: {fluff_config._configs['rules'].keys()}") # Check V1 assert config_option in fluff_config._configs["rules"][rule_ref]
Test that the example plugin default config is merged into FluffConfig.
test__plugin_default_config_read
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def load(self): """Raise an exception on load.""" raise ValueError("TEST ERROR")
Raise an exception on load.
load
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__plugin_handle_bad_load(): """Test that we can safely survive a plugin which fails to load.""" # Mock fake plugin ep = MockEntryPoint("test_name", "test_value", "sqlfluff") plugin_manager = get_plugin_manager() with fluff_log_catcher(logging.WARNING, "sqlfluff.plugin") as caplog: _load_plugin(plugin_manager, ep, "plugin_name", "v1.2.3") # Assert that there was a warning assert "ERROR: Failed to load SQLFluff plugin" in caplog.text assert "plugin_name" in caplog.text assert "TEST ERROR" in caplog.text
Test that we can safely survive a plugin which fails to load.
test__plugin_handle_bad_load
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__plugin_get_version(): """Test the plugin method of getting the version gets the right version.""" assert _get_sqlfluff_version() == pkg_version
Test the plugin method of getting the version gets the right version.
test__plugin_get_version
python
sqlfluff/sqlfluff
test/core/plugin_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/plugin_test.py
MIT
def test__templater_python(): """Test the python templater.""" t = PythonTemplater(override_context=dict(blah="foo")) instr = PYTHON_STRING outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == "SELECT * FROM foo"
Test the python templater.
test__templater_python
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_error(): """Test error handling in the python templater.""" t = PythonTemplater(override_context=dict(noblah="foo")) instr = PYTHON_STRING with pytest.raises(SQLTemplaterError): t.process(in_str=instr, fname="test")
Test error handling in the python templater.
test__templater_python_error
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_intermediate__trim( int_slice, templated_str, head_test, tail_test, int_test ): """Test trimming IntermediateFileSlice.""" h, i, t = int_slice.trim_ends(templated_str=templated_str) assert h == head_test assert t == tail_test assert i == int_test
Test trimming IntermediateFileSlice.
test__templater_python_intermediate__trim
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_substring_occurrences(mainstr, substrings, positions): """Test _substring_occurrences.""" occurrences = PythonTemplater._substring_occurrences(mainstr, substrings) assert isinstance(occurrences, dict) pos_test = [occurrences[substring] for substring in substrings] assert pos_test == positions
Test _substring_occurrences.
test__templater_python_substring_occurrences
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_sorted_occurrence_tuples(test, result): """Test _sorted_occurrence_tuples.""" assert PythonTemplater._sorted_occurrence_tuples(test) == result
Test _sorted_occurrence_tuples.
test__templater_python_sorted_occurrence_tuples
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_slice_template(test, result): """Test _slice_template.""" resp = list(PythonTemplater._slice_template(test)) # check contiguous assert "".join(elem.raw for elem in resp) == test # check indices idx = 0 for raw_file_slice in resp: assert raw_file_slice.source_idx == idx idx += len(raw_file_slice.raw) # Check total result assert resp == result
Test _slice_template.
test__templater_python_slice_template
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_length, result, ): """Test _split_invariants.""" resp = list( PythonTemplater._split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_length, ) ) # check result assert resp == result
Test _split_invariants.
test__templater_python_split_invariants
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_split_uniques_coalesce_rest( split_file, raw_occurrences, templated_occurrences, templated_str, result, caplog ): """Test _split_uniques_coalesce_rest.""" with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): resp = list( PythonTemplater._split_uniques_coalesce_rest( split_file, raw_occurrences, templated_occurrences, templated_str, ) ) # Check contiguous prev_slice = None for elem in result: if prev_slice: assert elem[1].start == prev_slice[0].stop assert elem[2].start == prev_slice[1].stop prev_slice = (elem[1], elem[2]) # check result assert resp == result
Test _split_uniques_coalesce_rest.
test__templater_python_split_uniques_coalesce_rest
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_slice_file(raw_file, templated_file, unwrap_wrapped, result): """Test slice_file.""" _, resp, _ = PythonTemplater().slice_file( raw_file, # For the render_func we just use a function which just returns the # templated file from the test case. (lambda x: templated_file), config=FluffConfig( configs={"templater": {"unwrap_wrapped_queries": unwrap_wrapped}}, overrides={"dialect": "ansi"}, ), ) # Check contiguous prev_slice = None for templated_slice in resp: if prev_slice: assert templated_slice.source_slice.start == prev_slice[0].stop assert templated_slice.templated_slice.start == prev_slice[1].stop prev_slice = (templated_slice.source_slice, templated_slice.templated_slice) # check result assert resp == result
Test slice_file.
test__templater_python_slice_file
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_large_file_check(): """Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters. """ # First check we can process the file normally without config. PythonTemplater().process(in_str="SELECT 1", fname="<string>") # Then check we raise a skip exception when config is set low. with pytest.raises(SQLFluffSkipFile) as excinfo: PythonTemplater().process( in_str="SELECT 1", fname="<string>", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 2}, ), ) assert "Length of file" in str(excinfo.value)
Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters.
test__templater_python_large_file_check
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_dot_notation_variables(raw_str, result): """Test template variables that contain a dot character (`.`).""" context = { "foo": "bar", "num": 123, "sqlfluff": { "foo.bar": "foobar", "self.number": 42, "obj.schema": "my_schema", "obj.table": "my_table", }, } t = PythonTemplater(override_context=context) instr = raw_str outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == result
Test template variables that contain a dot character (`.`).
test__templater_python_dot_notation_variables
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__templater_python_dot_notation_fail(context, error_string): """Test failures with template variables that contain a dot character (`.`).""" t = PythonTemplater(override_context=context) with pytest.raises(SQLTemplaterError) as excinfo: outstr, _ = t.process(in_str="SELECT * FROM {foo.bar}", fname="test") assert error_string in excinfo.value.desc()
Test failures with template variables that contain a dot character (`.`).
test__templater_python_dot_notation_fail
python
sqlfluff/sqlfluff
test/core/templaters/python_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/python_test.py
MIT
def test__indices_of_newlines(raw_str, positions): """Test iter_indices_of_newlines.""" assert list(iter_indices_of_newlines(raw_str)) == positions
Test iter_indices_of_newlines.
test__indices_of_newlines
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templater_raw(): """Test the raw templater.""" t = RawTemplater() instr = "SELECT * FROM {{blah}}" outstr, _ = t.process(in_str=instr, fname="test") assert instr == str(outstr)
Test the raw templater.
test__templater_raw
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templated_file_get_line_pos_of_char_pos( tf_kwargs, in_charpos, out_line_no, out_line_pos, ): """Test TemplatedFile.get_line_pos_of_char_pos.""" file = TemplatedFile(**tf_kwargs) res_line_no, res_line_pos = file.get_line_pos_of_char_pos(in_charpos) assert res_line_no == out_line_no assert res_line_pos == out_line_pos
Test TemplatedFile.get_line_pos_of_char_pos.
test__templated_file_get_line_pos_of_char_pos
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templated_file_find_slice_indices_of_templated_pos( templated_position, inclusive, tf_kwargs, sliced_idx_start, sliced_idx_stop, ): """Test TemplatedFile._find_slice_indices_of_templated_pos.""" file = TemplatedFile(**tf_kwargs) res_start, res_stop = file._find_slice_indices_of_templated_pos( templated_position, inclusive=inclusive ) assert res_start == sliced_idx_start assert res_stop == sliced_idx_stop
Test TemplatedFile._find_slice_indices_of_templated_pos.
test__templated_file_find_slice_indices_of_templated_pos
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templated_file_templated_slice_to_source_slice( in_slice, out_slice, is_literal, tf_kwargs ): """Test TemplatedFile.templated_slice_to_source_slice.""" file = TemplatedFile(**tf_kwargs) source_slice = file.templated_slice_to_source_slice(in_slice) literal_test = file.is_source_slice_literal(source_slice) assert (is_literal, source_slice) == (literal_test, out_slice)
Test TemplatedFile.templated_slice_to_source_slice.
test__templated_file_templated_slice_to_source_slice
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def test__templated_file_source_only_slices(file, expected_result): """Test TemplatedFile.source_only_slices.""" assert file.source_only_slices() == expected_result
Test TemplatedFile.source_only_slices.
test__templated_file_source_only_slices
python
sqlfluff/sqlfluff
test/core/templaters/base_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/base_test.py
MIT
def get_parsed(path: str) -> BaseSegment: """Testing helper to parse paths.""" linter = Linter() # Get the first file matching the path string first_path = next(linter.parse_path(path)) # Delegate parse assertions to the `.tree` property return first_path.tree
Testing helper to parse paths.
get_parsed
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja(instr, expected_outstr): """Test jinja templating and the treatment of whitespace.""" t = JinjaTemplater(override_context=dict(blah="foo", condition="a < 10")) outstr, _ = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == expected_outstr
Test jinja templating and the treatment of whitespace.
test__templater_jinja
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_slices(case: RawTemplatedTestCase): """Test that Jinja templater slices raw and templated file correctly.""" t = JinjaTemplater() templated_file, _ = t.process( in_str=case.instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) assert templated_file is not None assert templated_file.source_str == case.instr assert templated_file.templated_str == case.templated_str # Build and check the list of source strings referenced by "sliced_file". actual_ts_source_list = [ case.instr[ts.source_slice] for ts in templated_file.sliced_file ] assert actual_ts_source_list == case.expected_templated_sliced__source_list # Build and check the list of templated strings referenced by "sliced_file". actual_ts_templated_list = [ templated_file.templated_str[ts.templated_slice] for ts in templated_file.sliced_file ] assert actual_ts_templated_list == case.expected_templated_sliced__templated_list # Build and check the list of source strings referenced by "raw_sliced". previous_rs = None actual_rs_source_list: List[RawFileSlice] = [] for rs in templated_file.raw_sliced + [None]: # type: ignore if previous_rs: if rs: actual_source = case.instr[previous_rs.source_idx : rs.source_idx] else: actual_source = case.instr[previous_rs.source_idx :] actual_rs_source_list.append(actual_source) previous_rs = rs assert actual_rs_source_list == case.expected_raw_sliced__source_list
Test that Jinja templater slices raw and templated file correctly.
test__templater_jinja_slices
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test_templater_set_block_handling(): """Test handling of literals in {% set %} blocks. Specifically, verify they are not modified in the alternate template. """ def run_query(sql): # Prior to the bug fix, this assertion failed. This was bad because, # inside JinjaTracer, dbt templates similar to the one in this test # would call the database with funky SQL (including weird strings it # uses internally like: 00000000000000000000000000000002. assert sql == "\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n" return sql t = JinjaTemplater(override_context=dict(run_query=run_query)) instr = """{% set my_query1 %} select 1 from foobarfoobarfoobarfoobar_{{ "dev" }} {% endset %} {% set my_query2 %} {{ my_query1 }} {% endset %} {{ run_query(my_query2) }} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n\n\n\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n\n" assert len(vs) == 0
Test handling of literals in {% set %} blocks. Specifically, verify they are not modified in the alternate template.
test_templater_set_block_handling
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_variable(): """Test missing variable error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah="foo")) instr = JINJA_STRING outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "SELECT * FROM f, o, o WHERE \n\n" # Check we have violations. assert len(vs) > 0 # Check one of them is a templating error on line 1 assert any(v.rule_code() == "TMP" and v.line_no == 1 for v in vs)
Test missing variable error handling in the jinja templater.
test__templater_jinja_error_variable
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_dynamic_variable_no_violations(): """Test no templater violation for variable defined within template.""" t = JinjaTemplater(override_context=dict(blah="foo")) instr = """{% if True %} {% set some_var %}1{% endset %} SELECT {{some_var}} {% endif %} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n \n SELECT 1\n\n" # Check we have no violations. assert len(vs) == 0
Test no templater violation for variable defined within template.
test__templater_jinja_dynamic_variable_no_violations
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_syntax(): """Test syntax problems in the jinja templater.""" t = JinjaTemplater() instr = "SELECT {{foo} FROM jinja_error\n" with pytest.raises(SQLTemplaterError) as excinfo: t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) templater_exception = excinfo.value assert templater_exception.rule_code() == "TMP" assert templater_exception.line_no == 1 assert "Failed to parse Jinja syntax" in str(templater_exception)
Test syntax problems in the jinja templater.
test__templater_jinja_error_syntax
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_catastrophic(): """Test error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah=7)) instr = JINJA_STRING with pytest.raises(SQLTemplaterError) as excinfo: t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) templater_exception = excinfo.value assert templater_exception.rule_code() == "TMP" assert templater_exception.line_no == 1 assert "Unrecoverable failure in Jinja templating" in str(templater_exception)
Test error handling in the jinja templater.
test__templater_jinja_error_catastrophic
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_macro_path_does_not_exist(): """Tests that an error is raised if macro path doesn't exist.""" with pytest.raises(ValueError) as e: JinjaTemplater().construct_render_func( config=FluffConfig.from_path( "test/fixtures/templater/jinja_macro_path_does_not_exist" ) ) assert str(e.value).startswith("Path does not exist")
Tests that an error is raised if macro path doesn't exist.
test__templater_jinja_error_macro_path_does_not_exist
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_error_macro_invalid(): """Tests that an error is raised if a macro is invalid.""" invalid_macro_config_string = ( "[sqlfluff]\n" "templater = jinja\n" "dialect = ansi\n" "[sqlfluff:templater:jinja:macros]\n" "a_macro_def = {% macro pkg.my_macro() %}pass{% endmacro %}\n" ) config = FluffConfig.from_string(invalid_macro_config_string) with pytest.raises(SQLFluffUserError) as e: JinjaTemplater().construct_render_func(config=config) error_string = str(e.value) assert error_string.startswith("Error loading user provided macro") assert "{% macro pkg.my_macro() %}pass{% endmacro %}" in error_string
Tests that an error is raised if a macro is invalid.
test__templater_jinja_error_macro_invalid
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_lint_empty(): """Check that parsing a file which renders to an empty string. No exception should be raised, and we should get a single templated element. """ lntr = Linter(dialect="ansi") parsed = lntr.parse_string(in_str='{{ "" }}') parsed_variant = parsed.parsed_variants[0] assert parsed_variant.templated_file.source_str == '{{ "" }}' assert parsed_variant.templated_file.templated_str == "" # Get the types of the segments print(f"Segments: {parsed_variant.tree.raw_segments}") seg_types = [seg.get_type() for seg in parsed_variant.tree.raw_segments] assert seg_types == ["placeholder", "end_of_file"]
Check that parsing a file which renders to an empty string. No exception should be raised, and we should get a single templated element.
test__templater_jinja_lint_empty
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def assert_structure(yaml_loader, path, code_only=True, include_meta=False): """Check that a parsed sql file matches the yaml file with the same name.""" parsed = get_parsed(path + ".sql") # Whitespace is important here to test how that's treated tpl = parsed.to_tuple(code_only=code_only, show_raw=True, include_meta=include_meta) # Check nothing unparsable if "unparsable" in parsed.type_set(): print(parsed.stringify()) raise ValueError("Input file is unparsable.") _, expected = yaml_loader(path + ".yml") assert tpl == expected
Check that a parsed sql file matches the yaml file with the same name.
assert_structure
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_full(subpath, code_only, include_meta, yaml_loader, caplog): """Check structure can be parsed from jinja templated files.""" # Log the templater and lexer throughout this test caplog.set_level(logging.DEBUG, logger="sqlfluff.templater") caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") assert_structure( yaml_loader, "test/fixtures/templater/" + subpath, code_only=code_only, include_meta=include_meta, )
Check structure can be parsed from jinja templated files.
test__templater_full
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT
def test__templater_jinja_block_matching(caplog): """Test the block UUID matching works with a complicated case.""" caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") path = "test/fixtures/templater/jinja_l_metas/002.sql" # Parse the file. parsed = get_parsed(path) # We only care about the template elements template_segments = [ seg for seg in parsed.raw_segments if seg.is_type("template_loop") or ( seg.is_type("placeholder") and seg.block_type in ("block_start", "block_end", "block_mid") ) ] # Group them together by block UUID assert all( seg.block_uuid for seg in template_segments ), "All templated segments should have a block uuid!" grouped = defaultdict(list) for seg in template_segments: grouped[seg.block_uuid].append(seg.pos_marker.working_loc) print(grouped) # Now the matching block IDs should be found at the following positions. # NOTE: These are working locations in the rendered file. groups = { "for actions clause 1": [(6, 5), (9, 5), (12, 5), (15, 5)], "for actions clause 2": [(17, 5), (21, 5), (29, 5), (37, 5)], # NOTE: all the if loop clauses are grouped together. "if loop.first": [ (18, 9), (20, 9), (20, 9), (22, 9), (22, 9), (28, 9), (30, 9), (30, 9), (36, 9), ], } # Check all are accounted for: for clause in groups.keys(): for block_uuid, locations in grouped.items(): if groups[clause] == locations: print(f"Found {clause}, locations with UUID: {block_uuid}") break else: raise ValueError(f"Couldn't find appropriate grouping of blocks: {clause}")
Test the block UUID matching works with a complicated case.
test__templater_jinja_block_matching
python
sqlfluff/sqlfluff
test/core/templaters/jinja_test.py
https://github.com/sqlfluff/sqlfluff/blob/master/test/core/templaters/jinja_test.py
MIT