code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def assert_rule_raises_violations_in_file( rule: str, fpath: str, violations: List[Tuple[int, int]], fluff_config: FluffConfig ) -> None: """Assert that a given rule raises given errors in specific positions of a file. Args: rule (str): The rule we're looking for. fpath (str): The path to the sql file to check. violations (:obj:`list` of :obj:`tuple`): A list of tuples, each with the line number and line position of the expected violation. fluff_config (:obj:`FluffConfig`): A config object to use while linting. """ lntr = Linter(config=fluff_config) lnt = lntr.lint_path(fpath) # Reformat the test data to match the format we're expecting. We use # sets because we really don't care about order and if one is missing, # we don't care about the orders of the correct ones. assert set(lnt.check_tuples()) == {(rule, v[0], v[1]) for v in violations}
Assert that a given rule raises given errors in specific positions of a file. Args: rule (str): The rule we're looking for. fpath (str): The path to the sql file to check. violations (:obj:`list` of :obj:`tuple`): A list of tuples, each with the line number and line position of the expected violation. fluff_config (:obj:`FluffConfig`): A config object to use while linting.
assert_rule_raises_violations_in_file
python
sqlfluff/sqlfluff
src/sqlfluff/utils/testing/rules.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py
MIT
def prep_violations( rule: str, violations: Collection[ViolationDictType] ) -> Collection[ViolationDictType]: """Default to test rule if code is omitted.""" for v in violations: if "code" not in v: v["code"] = rule return violations
Default to test rule if code is omitted.
prep_violations
python
sqlfluff/sqlfluff
src/sqlfluff/utils/testing/rules.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py
MIT
def assert_violations_before_fix( test_case: RuleTestCase, violations_before_fix: List[SQLBaseError] ) -> None: """Assert that the given violations are found in the given sql.""" print("# Asserting Violations Before Fix") violation_info = [e.to_dict() for e in violations_before_fix] assert ( test_case.violations ), "Test case must have `violations` to call `assert_violations_before_fix()`" try: assert violation_info == prep_violations(test_case.rule, test_case.violations) except AssertionError: # pragma: no cover print( "Actual violations:\n", yaml.dump(violation_info, allow_unicode=True), sep="", ) raise
Assert that the given violations are found in the given sql.
assert_violations_before_fix
python
sqlfluff/sqlfluff
src/sqlfluff/utils/testing/rules.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py
MIT
def assert_violations_after_fix(test_case: RuleTestCase) -> None: """Assert that the given violations are found in the fixed sql.""" print("# Asserting Violations After Fix") assert ( test_case.fix_str ), "Test case must have `fix_str` to call `assert_violations_after_fix()`" assert test_case.violations_after_fix, ( "Test case must have `violations_after_fix` to call " "`assert_violations_after_fix()`" ) _, violations_after_fix = assert_rule_fail_in_sql( test_case.rule, test_case.fix_str, configs=test_case.configs, line_numbers=test_case.line_numbers, ) violation_info = [e.to_dict() for e in violations_after_fix] try: assert violation_info == prep_violations( test_case.rule, test_case.violations_after_fix ) except AssertionError: # pragma: no cover print( "Actual violations_after_fix:\n", yaml.dump(violation_info, allow_unicode=True), sep="", ) raise
Assert that the given violations are found in the fixed sql.
assert_violations_after_fix
python
sqlfluff/sqlfluff
src/sqlfluff/utils/testing/rules.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py
MIT
def rules__test_helper(test_case: RuleTestCase) -> None: """Test that a rule passes/fails on a set of test_cases. Optionally, also test the fixed string if provided in the test case. """ if test_case.skip: pytest.skip(test_case.skip) if test_case.pass_str: assert_rule_pass_in_sql( test_case.rule, test_case.pass_str, configs=test_case.configs, ) if test_case.fail_str: res, violations_before_fix = assert_rule_fail_in_sql( test_case.rule, test_case.fail_str, configs=test_case.configs, line_numbers=test_case.line_numbers, ) if test_case.violations: assert_violations_before_fix(test_case, violations_before_fix) # If a `fixed` value is provided then check it matches if test_case.fix_str: assert res == test_case.fix_str if test_case.violations_after_fix: assert_violations_after_fix(test_case) else: assert_rule_pass_in_sql( test_case.rule, test_case.fix_str, configs=test_case.configs, msg="The SQL after fix is applied still contains rule violations. " "To accept a partial fix, violations_after_fix must be set " "listing the remaining, expected, violations.", ) else: # Check that tests without a fix_str do not apply any fixes. assert res == test_case.fail_str, ( "No fix_str was provided, but the rule modified the SQL. Where a fix " "can be applied by a rule, a fix_str must be supplied in the test." )
Test that a rule passes/fails on a set of test_cases. Optionally, also test the fixed string if provided in the test case.
rules__test_helper
python
sqlfluff/sqlfluff
src/sqlfluff/utils/testing/rules.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py
MIT
def invoke_assert_code( ret_code: int = 0, args: Optional[List[Any]] = None, kwargs: Optional[Dict[str, Any]] = None, cli_input: Optional[str] = None, assert_stdout_contains: str = "", assert_stderr_contains: str = "", raise_exceptions: bool = True, ) -> Result: """Invoke a command and check return code.""" args = args or [] kwargs = kwargs or {} if cli_input: kwargs["input"] = cli_input if "mix_stderr" in inspect.signature(CliRunner).parameters: # pragma: no cover runner = CliRunner(mix_stderr=False) else: # pragma: no cover runner = CliRunner() result = runner.invoke(*args, **kwargs) # Output the CLI code for debugging print(result.output) if assert_stdout_contains != "": # The replace command just accounts for cross platform testing. assert assert_stdout_contains in result.stdout.replace("\\", "/") if assert_stderr_contains != "": # The replace command just accounts for cross platform testing. assert assert_stderr_contains in result.stderr.replace("\\", "/") # Check return codes, and unless we specifically want to pass back exceptions, # we should raise any exceptions which aren't `SystemExit` ones (i.e. ones # raised by `sys.exit()`) if raise_exceptions and result.exception: if not isinstance(result.exception, SystemExit): raise result.exception # pragma: no cover assert ret_code == result.exit_code return result
Invoke a command and check return code.
invoke_assert_code
python
sqlfluff/sqlfluff
src/sqlfluff/utils/testing/cli.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/cli.py
MIT
def text(self) -> str: """The formatted log text.""" return _remove_ansi_escape_sequences(self.stream.getvalue())
The formatted log text.
text
python
sqlfluff/sqlfluff
src/sqlfluff/utils/testing/logging.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/logging.py
MIT
def fluff_log_catcher(level: int, logger_name: str) -> Iterator[FluffLogHandler]: """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the level is restored to its original value. Args: level (int): The lowest logging level to capture. logger_name (str): The name of the logger to capture. """ assert logger_name.startswith( "sqlfluff" ), "This should only be used with a SQLFluff logger." logger = logging.getLogger(logger_name) handler = FluffLogHandler() orig_level = logger.level logger.setLevel(level) logger.addHandler(handler) try: yield handler finally: logger.setLevel(orig_level) logger.removeHandler(handler)
Context manager that sets the level for capturing of logs. After the end of the 'with' statement the level is restored to its original value. Args: level (int): The lowest logging level to capture. logger_name (str): The name of the logger to capture.
fluff_log_catcher
python
sqlfluff/sqlfluff
src/sqlfluff/utils/testing/logging.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/logging.py
MIT
def from_elements( cls: Type["_RebreakIndices"], elements: ReflowSequenceType, start_idx: int, dir: int, ) -> "_RebreakIndices": """Iterate through the elements to deduce important point indices.""" assert dir in (1, -1), "Direction must be a unit direction (i.e. 1 or -1)." # Limit depends on the direction limit = 0 if dir == -1 else len(elements) # The adjacent point is just the next one. adj_point_idx = start_idx + dir # The newline point is next. We hop in 2s because we're checking # only points, which alternate with blocks. for newline_point_idx in range(adj_point_idx, limit, 2 * dir): if "newline" in elements[newline_point_idx].class_types or any( seg.is_code for seg in elements[newline_point_idx + dir].segments ): break # Finally we look for the point preceding the next code element. for pre_code_point_idx in range(newline_point_idx, limit, 2 * dir): if any(seg.is_code for seg in elements[pre_code_point_idx + dir].segments): break return cls(dir, adj_point_idx, newline_point_idx, pre_code_point_idx)
Iterate through the elements to deduce important point indices.
from_elements
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def from_span( cls: Type["_RebreakLocation"], span: _RebreakSpan, elements: ReflowSequenceType ) -> "_RebreakLocation": """Expand a span to a location.""" return cls( span.target, _RebreakIndices.from_elements(elements, span.start_idx, -1), _RebreakIndices.from_elements(elements, span.end_idx, 1), span.line_position, span.strict, )
Expand a span to a location.
from_span
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def pretty_target_name(self) -> str: """Get a nicely formatted name of the target.""" return pretty_segment_name(self.target)
Get a nicely formatted name of the target.
pretty_target_name
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def has_templated_newline(self, elements: ReflowSequenceType) -> bool: """Is either side a templated newline? If either side has a templated newline, then that's ok too. The intent here is that if the next newline is a _templated_ one, then in the source there will be a tag ({{ tag }}), which acts like _not having a newline_. """ # Check the _last_ newline of the previous point. # Slice backward to search in reverse. for seg in elements[self.prev.newline_pt_idx].segments[::-1]: if seg.is_type("newline"): if not seg.pos_marker.is_literal(): return True break # Check the _first_ newline of the next point. for seg in elements[self.next.newline_pt_idx].segments: if seg.is_type("newline"): if not seg.pos_marker.is_literal(): return True break return False
Is either side a templated newline? If either side has a templated newline, then that's ok too. The intent here is that if the next newline is a _templated_ one, then in the source there will be a tag ({{ tag }}), which acts like _not having a newline_.
has_templated_newline
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def has_inappropriate_newlines( self, elements: ReflowSequenceType, strict: bool = False ) -> bool: """Is the span surrounded by one (but not two) line breaks? Args: elements: The elements of the ReflowSequence this element is taken from to allow comparison. strict (:obj:`bool`): If set to true, this will not allow the case where there aren't newlines on either side. """ # Here we use the newline index, not # just the adjacent point, so that we can see past comments. n_prev_newlines = elements[self.prev.newline_pt_idx].num_newlines() n_next_newlines = elements[self.next.newline_pt_idx].num_newlines() newlines_on_neither_side = n_prev_newlines + n_next_newlines == 0 newlines_on_both_sides = n_prev_newlines > 0 and n_next_newlines > 0 return ( # If there isn't a newline on either side then carry # on, unless it's strict. (newlines_on_neither_side and not strict) # If there is a newline on BOTH sides. That's ok. or newlines_on_both_sides )
Is the span surrounded by one (but not two) line breaks? Args: elements: The elements of the ReflowSequence this element is taken from to allow comparison. strict (:obj:`bool`): If set to true, this will not allow the case where there aren't newlines on either side.
has_inappropriate_newlines
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def first_create_anchor( elem_buff: ReflowSequenceType, loc_range: range ) -> Tuple[RawSegment, ...]: """Handle the potential case of an empty point with the next point with segments. While a reflow element's segments are empty, search for the next available element with segments to anchor new element creation. """ # https://github.com/sqlfluff/sqlfluff/issues/4184 try: create_anchor = next( elem_buff[i].segments for i in loc_range if elem_buff[i].segments ) except StopIteration as exc: # pragma: no cover # NOTE: We don't test this because we *should* always find # _something_ to anchor the creation on, even if we're # unlucky enough not to find it on the first pass. raise NotImplementedError("Could not find anchor for creation.") from exc return create_anchor
Handle the potential case of an empty point with the next point with segments. While a reflow element's segments are empty, search for the next available element with segments to anchor new element creation.
first_create_anchor
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def identify_rebreak_spans( element_buffer: ReflowSequenceType, root_segment: BaseSegment ) -> List[_RebreakSpan]: """Identify areas in file to rebreak. A span here is a block, or group of blocks which have explicit configs for their line position, either directly as raw segments themselves or by virtue of one of their parent segments. """ spans: List[_RebreakSpan] = [] # We'll need at least two elements each side, so constrain # our range accordingly. for idx in range(2, len(element_buffer) - 2): # Only evaluate blocks: elem = element_buffer[idx] # Only evaluate blocks if not isinstance(elem, ReflowBlock): continue # Does the element itself have config? (The easy case) if elem.line_position: # We should check whether this is a valid place to break based # on whether it's in a templated tag. If it's not a literal, then skip # it. # TODO: We probably only care if the side of the element that we would # break at (i.e. the start if it's `leading` or the end if it's # `trailing`), but we'll go with the blunt logic for simplicity first. if not elem.segments[0].pos_marker.is_literal(): reflow_logger.debug( " ! Skipping rebreak span on %s because " "non-literal location.", elem.segments[0], ) continue # Blocks should only have one segment so it's easy to pick it. spans.append( _RebreakSpan( elem.segments[0], idx, idx, # NOTE: this isn't pretty but until it needs to be more # complex, this works. elem.line_position.split(":")[0], elem.line_position.endswith("strict"), ) ) # Do any of its parents have config, and are we at the start # of them? for key in elem.line_position_configs.keys(): # If we're not at the start of the segment, then pass. if elem.depth_info.stack_positions[key].idx != 0: continue # Can we find the end? # NOTE: It's safe to look right to the end here rather than up to # -2 because we're going to end up stepping back by two in the # complicated cases. for end_idx in range(idx, len(element_buffer)): end_elem = element_buffer[end_idx] final_idx = None if not isinstance(end_elem, ReflowBlock): continue elif key not in end_elem.depth_info.stack_positions: # If we get here, it means the last block was the end. # NOTE: This feels a little hacky, but it's because of a limitation # in detecting the "end" and "solo" markers effectively in larger # sections. final_idx = end_idx - 2 # pragma: no cover elif end_elem.depth_info.stack_positions[key].type in ("end", "solo"): final_idx = end_idx if final_idx is not None: # Found the end. Add it to the stack. # We reference the appropriate element from the parent stack. target_depth = elem.depth_info.stack_hashes.index(key) target = root_segment.path_to(element_buffer[idx].segments[0])[ target_depth ].segment spans.append( _RebreakSpan( target, idx, final_idx, # NOTE: this isn't pretty but until it needs to be more # complex, this works. elem.line_position_configs[key].split(":")[0], elem.line_position_configs[key].endswith("strict"), ) ) break # If we find the start, but not the end, it's not a problem, but # we won't be rebreaking this span. This is important so that we # don't rebreak part of something without the context of what's # in the rest of it. We continue without adding it to the buffer. return spans
Identify areas in file to rebreak. A span here is a block, or group of blocks which have explicit configs for their line position, either directly as raw segments themselves or by virtue of one of their parent segments.
identify_rebreak_spans
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def identify_keyword_rebreak_spans( element_buffer: ReflowSequenceType, ) -> List[_RebreakSpan]: """Identify keyword areas in file to rebreak. A span here is a block, or group of blocks which have explicit configs for their keyword's line position. """ spans: List[_RebreakSpan] = [] # We'll need at least two elements each side, so constrain # our range accordingly. for idx in range(2, len(element_buffer) - 2): elem = element_buffer[idx] # Only evaluate blocks if not isinstance(elem, ReflowBlock): continue # Do any of its parents have config, and are we at the start # of them? for key in elem.keyword_line_position_configs.keys(): # If the element has been unset by using "None" then we want to skip adding # it here. if elem.keyword_line_position_configs[key].lower() == "none": continue # If we're not at the start of the segment, then pass. Some keywords might # be at an index of 1 due to a leading indent so check for both 0 and 1. if elem.depth_info.stack_positions[key].idx > 1: continue # Next check how deep the current element is with respect to the # element which is configured. If we're operating at a deeper depth than # the configuration is applied to, then this keyword cannot be the leading # keyword for that segment. In that case continue, because we're not # looking at the trigger keyword. configured_depth = elem.depth_info.stack_hashes.index(key) if elem.depth_info.stack_depth > configured_depth + 1: continue # Then make sure it's actually a keyword. if not element_buffer[idx].segments or not element_buffer[idx].segments[ 0 ].is_type("keyword"): continue # Can we find the end? # NOTE: It's safe to look right to the end here rather than up to # -2 because we're going to end up stepping back by two in the # complicated cases. for end_idx in range(idx, len(element_buffer)): end_elem = element_buffer[end_idx] final_idx = None if not isinstance(end_elem, ReflowBlock): if any(seg.is_type("indent") for seg in end_elem.segments): final_idx = end_idx - 1 else: continue elif end_elem.depth_info.stack_positions[key].type in ("end", "solo"): final_idx = end_idx if final_idx is not None: # Found the end. Add it to the stack. # We reference the appropriate element from the parent stack. target = element_buffer[idx].segments[0] spans.append( _RebreakSpan( target, idx, final_idx, # NOTE: this isn't pretty but until it needs to be more # complex, this works. elem.keyword_line_position_configs[key].split(":")[0], elem.keyword_line_position_configs[key].endswith("strict"), ) ) break # If we find the start, but not the end, it's not a problem, but # we won't be rebreaking this span. This is important so that we # don't rebreak part of something without the context of what's # in the rest of it. We continue without adding it to the buffer. return spans
Identify keyword areas in file to rebreak. A span here is a block, or group of blocks which have explicit configs for their keyword's line position.
identify_keyword_rebreak_spans
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def rebreak_sequence( elements: ReflowSequenceType, root_segment: BaseSegment, ) -> Tuple[ReflowSequenceType, List[LintResult]]: """Reflow line breaks within a sequence. Initially this only _moves_ existing segments around line breaks (e.g. for operators and commas), but eventually this method should also handle line length considerations too. This intentionally does *not* handle indentation, as the existing indents are assumed to be correct. """ lint_results: List[LintResult] = [] fixes: List[LintFix] = [] elem_buff: ReflowSequenceType = elements.copy() # Given a sequence we should identify the objects which # make sense to rebreak. That includes any raws with config, # but also and parent segments which have config and we can # find both ends for. Given those spans, we then need to find # the points either side of them and then the blocks either # side to respace them at the same time. # 1. First find appropriate spans. spans = identify_rebreak_spans(elem_buff, root_segment) # The spans give us the edges of operators, but for line positioning we need # to handle comments differently. There are two other important points: # 1. The next newline outward before code (but passing over comments). # 2. The point before the next _code_ segment (ditto comments). locations: List[_RebreakLocation] = [] for span in spans: try: locations.append(_RebreakLocation.from_span(span, elem_buff)) # If we try and create a location from an incomplete span (i.e. one # where we're unable to find the next newline effectively), then # we'll get an exception. If we do - skip that one - we won't be # able to effectively work with it even if we could construct it. except UnboundLocalError: pass # Handle each span: for loc in locations: reflow_logger.debug( "Handing Rebreak Span (%r: %s): %r", loc.line_position, loc.target, "".join( elem.raw for elem in elem_buff[ loc.prev.pre_code_pt_idx - 1 : loc.next.pre_code_pt_idx + 2 ] ), ) if loc.has_inappropriate_newlines(elem_buff, strict=loc.strict): continue if loc.has_templated_newline(elem_buff): continue # Points and blocks either side are just offsets from the indices. prev_point = cast(ReflowPoint, elem_buff[loc.prev.adj_pt_idx]) next_point = cast(ReflowPoint, elem_buff[loc.next.adj_pt_idx]) # So we know we have a preference, is it ok? if loc.line_position == "leading": if elem_buff[loc.prev.newline_pt_idx].num_newlines(): # We're good. It's already leading. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() if loc.strict: # pragma: no cover # TODO: The 'strict' option isn't widely tested yet. desc = f"{pretty_name.capitalize()} should always start a new line." else: desc = ( f"Found trailing {pretty_name}. Expected only leading " "near line breaks." ) # Is it the simple case with no comments between the # old and new desired locations and only a single following # whitespace? if ( loc.next.adj_pt_idx == loc.next.pre_code_pt_idx and elem_buff[loc.next.newline_pt_idx].num_newlines() == 1 ): reflow_logger.debug(" Trailing Easy Case") # Simple case. No comments. # Strip newlines from the next point. Apply the indent to # the previous point. new_results, prev_point = prev_point.indent_to( next_point.get_indent() or "", before=loc.target ) new_results, next_point = next_point.respace_point( cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point else: reflow_logger.debug(" Trailing Tricky Case") # Otherwise we've got a tricky scenario where there are comments # to negotiate around. In this case, we _move the target_ # rather than just adjusting the whitespace. # Delete the existing position of the target, and # the _preceding_ point. fixes.append(LintFix.delete(loc.target)) for seg in elem_buff[loc.prev.adj_pt_idx].segments: if not seg.is_type("dedent"): fixes.append(LintFix.delete(seg)) # We always reinsert after the first point, but respace # the inserted point to ensure it's the right size given # configs. new_results, new_point = ReflowPoint(()).respace_point( cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.next.pre_code_pt_idx + 1]), root_segment=root_segment, lint_results=[], anchor_on="after", ) create_anchor = first_create_anchor( elem_buff, range(loc.next.pre_code_pt_idx, loc.next.adj_pt_idx - 1, -1), )[-1] fixes.append( LintFix.create_after( create_anchor, [loc.target], ) ) elem_buff = ( elem_buff[: loc.prev.adj_pt_idx] + elem_buff[loc.next.adj_pt_idx : loc.next.pre_code_pt_idx + 1] + elem_buff[ loc.prev.adj_pt_idx + 1 : loc.next.adj_pt_idx ] # the target + [new_point] + elem_buff[loc.next.pre_code_pt_idx + 1 :] ) elif loc.line_position == "trailing": if elem_buff[loc.next.newline_pt_idx].num_newlines(): # We're good, it's already trailing. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() if loc.strict: # pragma: no cover # TODO: The 'strict' option isn't widely tested yet. desc = ( f"{pretty_name.capitalize()} should always be at the end of a line." ) else: desc = ( f"Found leading {pretty_name}. Expected only trailing " "near line breaks." ) # Is it the simple case with no comments between the # old and new desired locations and only one previous newline? if ( loc.prev.adj_pt_idx == loc.prev.pre_code_pt_idx and elem_buff[loc.prev.newline_pt_idx].num_newlines() == 1 ): reflow_logger.debug(" Leading Easy Case") # Simple case. No comments. # Strip newlines from the previous point. Apply the indent # to the next point. new_results, next_point = next_point.indent_to( prev_point.get_indent() or "", after=loc.target ) new_results, prev_point = prev_point.respace_point( cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point else: reflow_logger.debug(" Leading Tricky Case") # Otherwise we've got a tricky scenario where there are comments # to negotiate around. In this case, we _move the target_ # rather than just adjusting the whitespace. # Delete the existing position of the target, and # the _following_ point. fixes.append(LintFix.delete(loc.target)) for seg in elem_buff[loc.next.adj_pt_idx].segments: fixes.append(LintFix.delete(seg)) # We always reinsert before the first point, but respace # the inserted point to ensure it's the right size given # configs. new_results, new_point = ReflowPoint(()).respace_point( cast(ReflowBlock, elem_buff[loc.prev.pre_code_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx + 1]), root_segment=root_segment, lint_results=[], anchor_on="before", ) lead_create_anchor = first_create_anchor( elem_buff, range(loc.prev.pre_code_pt_idx, loc.prev.adj_pt_idx + 1) ) # Attempt to skip dedent elements on reinsertion. These are typically # found at the end of segments, but we don't want to include the # reinserted segment as part of prior code segment's parent segment. prev_code_anchor = next( ( prev_code_segment for prev_code_segment in lead_create_anchor if not prev_code_segment.is_type("dedent") ), None, ) if prev_code_anchor: fixes.append( LintFix.create_before( prev_code_anchor, [loc.target], ) ) else: # All segments were dedents, append to the end instead. fixes.append( LintFix.create_after( lead_create_anchor[-1], [loc.target], ) ) elem_buff = ( elem_buff[: loc.prev.pre_code_pt_idx] + [new_point] + elem_buff[ loc.prev.adj_pt_idx + 1 : loc.next.adj_pt_idx ] # the target + elem_buff[loc.prev.pre_code_pt_idx : loc.prev.adj_pt_idx + 1] + elem_buff[loc.next.adj_pt_idx + 1 :] ) elif loc.line_position == "alone": # If we get here we can assume that the element is currently # either leading or trailing and needs to be moved onto its # own line. # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = ( f"{pretty_name.capitalize()}s should always have a line break " "both before and after." ) # First handle the following newlines first (easy). if not elem_buff[loc.next.newline_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline after in alone case") new_results, next_point = next_point.indent_to( deduce_line_indent(loc.target.raw_segments[-1], root_segment), after=loc.target, ) # Update the point in the buffer elem_buff[loc.next.adj_pt_idx] = next_point # Then handle newlines before. (hoisting past comments if needed). if not elem_buff[loc.prev.adj_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline before in alone case") # NOTE: In the case that there are comments _after_ the # target, they will be moved with it. This might break things # but there isn't an unambiguous way to do this, because we # can't be sure what the comments are referring to. # Given that, we take the simple option. new_results, prev_point = prev_point.indent_to( deduce_line_indent(loc.target.raw_segments[0], root_segment), before=loc.target, ) # Update the point in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point else: raise NotImplementedError( # pragma: no cover f"Unexpected line_position config: {loc.line_position}" ) # Consolidate results and consume fix buffer lint_results.append( LintResult( loc.target, fixes=fixes_from_results(new_results) + fixes, description=desc, ) ) fixes = [] return elem_buff, lint_results
Reflow line breaks within a sequence. Initially this only _moves_ existing segments around line breaks (e.g. for operators and commas), but eventually this method should also handle line length considerations too. This intentionally does *not* handle indentation, as the existing indents are assumed to be correct.
rebreak_sequence
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def rebreak_keywords_sequence( elements: ReflowSequenceType, root_segment: BaseSegment, ) -> Tuple[ReflowSequenceType, List[LintResult]]: """Reflow line breaks within a sequence. Initially this only _moves_ existing segments around line breaks (e.g. for operators and commas), but eventually this method should also handle line length considerations too. This intentionally does *not* handle indentation, as the existing indents are assumed to be correct. """ lint_results: List[LintResult] = [] fixes: List[LintFix] = [] elem_buff: ReflowSequenceType = elements.copy() # Given a sequence we should identify the objects which # make sense to rebreak. That includes any raws with config, # but also and parent segments which have config and we can # find both ends for. Given those spans, we then need to find # the points either side of them and then the blocks either # side to respace them at the same time. # 1. First find appropriate spans. spans = identify_keyword_rebreak_spans(elem_buff) # The spans give us the edges of operators, but for line positioning we need # to handle comments differently. There are two other important points: # 1. The next newline outward before code (but passing over comments). # 2. The point before the next _code_ segment (ditto comments). locations: List[_RebreakLocation] = [] for span in spans: try: locations.append(_RebreakLocation.from_span(span, elem_buff)) # If we try and create a location from an incomplete span (i.e. one # where we're unable to find the next newline effectively), then # we'll get an exception. If we do - skip that one - we won't be # able to effectively work with it even if we could construct it. # This would be unlikely to happen when breaking on only keywords, # but was left in that unlikely event. except UnboundLocalError: # pragma: no cover pass # Handle each span: for loc in locations: reflow_logger.debug( "Handing Rebreak Span (%r: %s): %r", loc.line_position, loc.target, "".join( elem.raw for elem in elem_buff[ loc.prev.pre_code_pt_idx - 1 : loc.next.pre_code_pt_idx + 2 ] ), ) if loc.has_inappropriate_newlines(elem_buff, True): continue if loc.has_templated_newline(elem_buff): continue # Points and blocks either side are just offsets from the indices. prev_point = cast(ReflowPoint, elem_buff[loc.prev.adj_pt_idx]) next_point = cast(ReflowPoint, elem_buff[loc.next.adj_pt_idx]) # So we know we have a preference, is it ok? if loc.line_position == "leading": if elem_buff[loc.prev.newline_pt_idx].num_newlines(): # We're good. It's already leading. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = f"The {pretty_name} should always start a new line." # Is it the simple case with no comments between the # old and new desired locations and only a single following # whitespace? reflow_logger.debug(" Trailing Easy Case") # Strip newlines from the next point. Apply the indent to # the previous point. new_results, prev_point = prev_point.indent_to( next_point.get_indent() or "", before=elem_buff[loc.prev.adj_pt_idx + 1].segments[0], ) new_results, next_point = next_point.respace_point( cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point elif loc.line_position == "trailing": if elem_buff[loc.next.newline_pt_idx].num_newlines(): # We're good, it's already trailing. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = f"The {pretty_name} should always be at the end of a line." # Is it the simple case with no comments between the # old and new desired locations and only one previous newline? reflow_logger.debug(" Leading Easy Case") # Simple case. No comments. # Strip newlines from the previous point. Apply the indent # to the next point. new_results, next_point = next_point.indent_to( prev_point.get_indent() or "", after=elem_buff[loc.next.adj_pt_idx - 1].segments[-1], ) new_results, prev_point = prev_point.respace_point( cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point elif loc.line_position == "alone": # If we get here we can assume that the element is currently # either leading or trailing and needs to be moved onto its # own line. # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = ( f"The {pretty_name} should always have a line break " "both before and after." ) # First handle the following newlines first (easy). if not elem_buff[loc.next.newline_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline after in alone case") new_results, next_point = next_point.indent_to( prev_point.get_indent() or "", after=elem_buff[loc.next.adj_pt_idx - 1].segments[-1], ) # Update the point in the buffer elem_buff[loc.next.adj_pt_idx] = next_point # Then handle newlines before. (hoisting past comments if needed). if not elem_buff[loc.prev.adj_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline before in alone case") # NOTE: In the case that there are comments _after_ the # target, they will be moved with it. This might break things # but there isn't an unambiguous way to do this, because we # can't be sure what the comments are referring to. # Given that, we take the simple option. new_results, prev_point = prev_point.indent_to( next_point.get_indent() or "", before=elem_buff[loc.prev.adj_pt_idx + 1].segments[0], ) # Update the point in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point else: raise NotImplementedError( # pragma: no cover f"Unexpected line_position config: {loc.line_position}" ) # Consolidate results and consume fix buffer lint_results.append( LintResult( loc.target, fixes=fixes_from_results(new_results) + fixes, description=desc, ) ) fixes = [] return elem_buff, lint_results
Reflow line breaks within a sequence. Initially this only _moves_ existing segments around line breaks (e.g. for operators and commas), but eventually this method should also handle line length considerations too. This intentionally does *not* handle indentation, as the existing indents are assumed to be correct.
rebreak_keywords_sequence
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/rebreak.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/rebreak.py
MIT
def _stack_pos_interpreter(path_step: PathStep) -> str: """Interpret a path step for stack_positions.""" # If no code, then no. if not path_step.code_idxs: return "" # If there's only one code element, this must be it. elif len(path_step.code_idxs) == 1: return "solo" # Check for whether first or last code element. # NOTE: code_idxs is always sorted because of how it's constructed. # That means the lowest is always as the start and the highest at the end. elif path_step.idx == path_step.code_idxs[0]: return "start" elif path_step.idx == path_step.code_idxs[-1]: return "end" else: return "" # NOTE: Empty string evaluates as falsy.
Interpret a path step for stack_positions.
_stack_pos_interpreter
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def from_path_step( cls: Type["StackPosition"], path_step: PathStep ) -> "StackPosition": """Interpret a PathStep to construct a StackPosition. The reason we don't just use the same object is partly to interpret it a little more, but also to drop the reference to a specific segment which could induce bugs at a later stage if used. """ return cls(path_step.idx, path_step.len, cls._stack_pos_interpreter(path_step))
Interpret a PathStep to construct a StackPosition. The reason we don't just use the same object is partly to interpret it a little more, but also to drop the reference to a specific segment which could induce bugs at a later stage if used.
from_path_step
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def from_raw_and_stack( cls, raw: RawSegment, stack: Sequence[PathStep] ) -> "DepthInfo": """Construct from a raw and its stack.""" stack_hashes = tuple(hash(ps.segment) for ps in stack) return cls( stack_depth=len(stack), stack_hashes=stack_hashes, stack_hash_set=frozenset(stack_hashes), stack_class_types=tuple(ps.segment.class_types for ps in stack), stack_positions={ # Reuse the hash first calculated above. stack_hashes[idx]: StackPosition.from_path_step(ps) for idx, ps in enumerate(stack) }, )
Construct from a raw and its stack.
from_raw_and_stack
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def common_with(self, other: "DepthInfo") -> Tuple[int, ...]: """Get the common depth and hashes with the other.""" # We use set intersection because it's faster and hashes should be unique. common_hashes = self.stack_hash_set.intersection(other.stack_hashes) # We should expect there to be _at least_ one common ancestor, because # they should share the same file segment. If that's not the case we # we should error because it's likely a bug or programming error. assert common_hashes, "DepthInfo comparison shares no common ancestor!" common_depth = len(common_hashes) return self.stack_hashes[:common_depth]
Get the common depth and hashes with the other.
common_with
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def trim(self, amount: int) -> "DepthInfo": """Return a DepthInfo object with some amount trimmed.""" if amount == 0: # The trivial case. return self new_hash_set = self.stack_hash_set.difference(self.stack_hashes[-amount:]) return self.__class__( stack_depth=self.stack_depth - amount, stack_hashes=self.stack_hashes[:-amount], stack_hash_set=new_hash_set, stack_class_types=self.stack_class_types[:-amount], stack_positions={ k: v for k, v in self.stack_positions.items() if k in new_hash_set }, )
Return a DepthInfo object with some amount trimmed.
trim
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def from_parent(cls: Type["DepthMap"], parent: BaseSegment) -> "DepthMap": """Generate a DepthMap from all the children of a segment. NOTE: This is the most efficient way to construct a DepthMap due to caching in the BaseSegment. """ return cls(raws_with_stack=parent.raw_segments_with_ancestors)
Generate a DepthMap from all the children of a segment. NOTE: This is the most efficient way to construct a DepthMap due to caching in the BaseSegment.
from_parent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def from_raws_and_root( cls: Type["DepthMap"], raw_segments: Sequence[RawSegment], root_segment: BaseSegment, ) -> "DepthMap": """Generate a DepthMap a sequence of raws and a root. NOTE: This is the less efficient way to construct a DepthMap as it doesn't take advantage of caching in the same way as `from_parent`. """ buff = [] for raw in raw_segments: stack = root_segment.path_to(raw) buff.append((raw, stack)) return cls(raws_with_stack=buff)
Generate a DepthMap a sequence of raws and a root. NOTE: This is the less efficient way to construct a DepthMap as it doesn't take advantage of caching in the same way as `from_parent`.
from_raws_and_root
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def get_depth_info(self, raw: RawSegment) -> DepthInfo: """Get the depth info for a given segment.""" try: return self.depth_info[raw.uuid] except KeyError as err: # pragma: no cover reflow_logger.exception("Available UUIDS: %s", self.depth_info.keys()) raise KeyError( "Tried to get depth info for unknown " f"segment {raw} with UUID {raw.uuid}" ) from err
Get the depth info for a given segment.
get_depth_info
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def copy_depth_info( self, anchor: RawSegment, new_segment: RawSegment, trim: int = 0 ) -> None: """Copy the depth info for one segment and apply to another. This mutates the existing depth map. That's ok because it's an idempotent operation and uuids should be unique. This is used in edits to a reflow sequence when new segments are inserted and can't infer their own depth info. NOTE: we don't remove the old one because it causes no harm. """ self.depth_info[new_segment.uuid] = self.get_depth_info(anchor).trim(trim)
Copy the depth info for one segment and apply to another. This mutates the existing depth map. That's ok because it's an idempotent operation and uuids should be unique. This is used in edits to a reflow sequence when new segments are inserted and can't infer their own depth info. NOTE: we don't remove the old one because it causes no harm.
copy_depth_info
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/depthmap.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/depthmap.py
MIT
def incorporate( self, before: Optional[str] = None, after: Optional[str] = None, within: Optional[str] = None, line_position: Optional[str] = None, config: Optional[ConfigElementType] = None, keyword_line_position: Optional[str] = None, ) -> None: """Mutate the config based on additional information.""" config = config or {} self.spacing_before = ( before or config.get("spacing_before", None) or self.spacing_before ) self.spacing_after = ( after or config.get("spacing_after", None) or self.spacing_after ) self.spacing_within = ( within or config.get("spacing_within", None) or self.spacing_within ) self.line_position = ( line_position or config.get("line_position", None) or self.line_position ) self.keyword_line_position = ( keyword_line_position or config.get("keyword_line_position", None) or self.keyword_line_position )
Mutate the config based on additional information.
incorporate
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/config.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/config.py
MIT
def from_dict(cls, config_dict: ConfigDictType, **kwargs: Any) -> "ReflowConfig": """Construct a ReflowConfig from a dict.""" config_types = set(config_dict.keys()) # Enrich any of the "align" keys with what they're aligning with. for seg_type in config_dict: for key in ("spacing_before", "spacing_after"): if config_dict[seg_type].get(key, None) == "align": new_key = "align:" + seg_type # Is there a limiter or boundary? # NOTE: A `boundary` is only applicable if `within` is present. if config_dict[seg_type].get("align_within", None): new_key += ":" + config_dict[seg_type]["align_within"] if config_dict[seg_type].get("align_scope", None): new_key += ":" + config_dict[seg_type]["align_scope"] config_dict[seg_type][key] = new_key return cls(_config_dict=config_dict, config_types=config_types, **kwargs)
Construct a ReflowConfig from a dict.
from_dict
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/config.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/config.py
MIT
def from_fluff_config(cls, config: FluffConfig) -> "ReflowConfig": """Constructs a ReflowConfig from a FluffConfig.""" return cls.from_dict( config.get_section(["layout", "type"]), indent_unit=config.get("indent_unit", ["indentation"]), tab_space_size=config.get("tab_space_size", ["indentation"]), hanging_indents=config.get("hanging_indents", ["indentation"]), max_line_length=config.get("max_line_length"), skip_indentation_in=frozenset( config.get("skip_indentation_in", ["indentation"]).split(",") ), allow_implicit_indents=config.get( "allow_implicit_indents", ["indentation"] ), trailing_comments=config.get("trailing_comments", ["indentation"]), ignore_comment_lines=config.get("ignore_comment_lines", ["indentation"]), )
Constructs a ReflowConfig from a FluffConfig.
from_fluff_config
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/config.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/config.py
MIT
def get_block_config( self, block_class_types: AbstractSet[str], depth_info: Optional[DepthInfo] = None, ) -> BlockConfig: """Given the class types of a ReflowBlock return spacing config. When fetching the config for a single class type for a simple block we should just get an appropriate simple config back. >>> cfg = ReflowConfig.from_dict({"comma": {"spacing_before": "touch"}}) >>> cfg.get_block_config({"comma"}) # doctest: +ELLIPSIS BlockConfig(spacing_before='touch', spacing_after='single', ...) """ # set intersection to get the class types which matter configured_types = self.config_types.intersection(block_class_types) # Start with a default config. block_config = BlockConfig() # Update with the config from any specific classes. # First: With the types of any parent segments where # we're at one end (if depth info provided). if depth_info: parent_start, parent_end = True, True for idx, key in enumerate(depth_info.stack_hashes[::-1]): # Work out if we're allowed to claim the parent. if depth_info.stack_positions[key].type not in ("solo", "start"): parent_start = False if depth_info.stack_positions[key].type not in ("solo", "end"): parent_end = False if not (parent_start or parent_end): break # Get corresponding classes. parent_classes = depth_info.stack_class_types[-1 - idx] configured_parent_types = self.config_types.intersection(parent_classes) # Claim the _before_ config if at the start. if parent_start: for seg_type in configured_parent_types: block_config.incorporate( before=self._config_dict[seg_type].get("spacing_before") ) # Claim the _after_ config if at the end. if parent_end: for seg_type in configured_parent_types: block_config.incorporate( after=self._config_dict[seg_type].get("spacing_after") ) # Second: With the types of the raw segment itself. # Unless someone is doing something complicated with their configuration # there should only be one. # TODO: Extend (or at least harden) this code to handle multiple # configured (and matched) types much better. for seg_type in configured_types: block_config.incorporate(config=self._config_dict[seg_type]) return block_config
Given the class types of a ReflowBlock return spacing config. When fetching the config for a single class type for a simple block we should just get an appropriate simple config back. >>> cfg = ReflowConfig.from_dict({"comma": {"spacing_before": "touch"}}) >>> cfg.get_block_config({"comma"}) # doctest: +ELLIPSIS BlockConfig(spacing_before='touch', spacing_after='single', ...)
get_block_config
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/config.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/config.py
MIT
def fixes_from_results(results: Iterable[LintResult]) -> List[LintFix]: """Return a list of fixes from an iterable of LintResult.""" return list(chain.from_iterable(result.fixes for result in results))
Return a list of fixes from an iterable of LintResult.
fixes_from_results
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/helpers.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/helpers.py
MIT
def pretty_segment_name(segment: BaseSegment) -> str: """Get a nicely formatted name of the segment.""" if segment.is_type("symbol"): # In a symbol reference, show the raw value and type. # (With underscores as spaces) return segment.get_type().replace("_", " ") + f" {segment.raw!r}" elif segment.is_type("keyword"): # Reference keywords as keywords. return f"{segment.raw!r} keyword" else: # Reference other segments just by their type. # (With underscores as spaces) return segment.get_type().replace("_", " ")
Get a nicely formatted name of the segment.
pretty_segment_name
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/helpers.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/helpers.py
MIT
def deduce_line_indent(raw_segment: RawSegment, root_segment: BaseSegment) -> str: """Given a raw segment, deduce the indent of its line.""" seg_idx = root_segment.raw_segments.index(raw_segment) indent_seg = None # Use range and a lookup here because it's more efficient than slicing # as we only need a subset of the long series. for idx in range(seg_idx, -1, -1): seg = root_segment.raw_segments[idx] if seg.is_code: indent_seg = None elif seg.is_type("whitespace"): indent_seg = seg elif seg.is_type("newline"): break reflow_logger.debug("Deduced indent for %s as %s", raw_segment, indent_seg) return indent_seg.raw if indent_seg else ""
Given a raw segment, deduce the indent of its line.
deduce_line_indent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/helpers.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/helpers.py
MIT
def has_untemplated_newline(point: ReflowPoint) -> bool: """Determine whether a point contains any literal newlines. NOTE: We check for standard literal newlines, but also potential placeholder newlines which have been consumed. """ # If there are no newlines (or placeholders) at all - then False. if not point.class_types.intersection({"newline", "placeholder"}): return False for seg in point.segments: # Make sure it's not templated. # NOTE: An insertion won't have a pos_marker. But that # also means it's not templated. if seg.is_type("newline") and ( not seg.pos_marker or seg.pos_marker.is_literal() ): return True if seg.is_type("placeholder"): seg = cast(TemplateSegment, seg) assert ( seg.block_type == "literal" ), "Expected only literal placeholders in ReflowPoint." if "\n" in seg.source_str: return True return False
Determine whether a point contains any literal newlines. NOTE: We check for standard literal newlines, but also potential placeholder newlines which have been consumed.
has_untemplated_newline
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def __repr__(self) -> str: """Compressed repr method to ease logging.""" return ( f"IndentLine(iib={self.initial_indent_balance}, ipts=[" + ", ".join( f"iPt@{ip.idx}({ip.indent_impulse}, {ip.indent_trough}, " f"{ip.initial_indent_balance}, {ip.last_line_break_idx}, " f"{ip.is_line_break}, {ip.untaken_indents})" for ip in self.indent_points ) + "])" )
Compressed repr method to ease logging.
__repr__
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def is_all_comments(self, elements: ReflowSequenceType) -> bool: """Is this line made up of just comments?""" block_segments = list(self.iter_block_segments(elements)) return bool(block_segments) and all( seg.is_type("comment") for seg in block_segments )
Is this line made up of just comments?
is_all_comments
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def is_all_templates(self, elements: ReflowSequenceType) -> bool: """Is this line made up of just template elements?""" return all(block.is_all_unrendered() for block in self.iter_blocks(elements))
Is this line made up of just template elements?
is_all_templates
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def desired_indent_units(self, forced_indents: List[int]) -> int: """Calculate the desired indent units. This is the heart of the indentation calculations. First we work out how many previous indents are untaken. In the easy case, we just use the number of untaken indents from previous points. The more complicated example is where *this point* has both dedents *and* indents. In this case we use the `indent_trough` to prune any previous untaken indents which were above the trough at this point. After that we calculate the indent from the incoming balance, minus any relevant untaken events *plus* any previously untaken indents which have been forced (i.e. inserted by the same operation). """ if self.indent_points[0].indent_trough: # This says - purge any untaken indents which happened before # the trough (or at least only _keep_ any which would have remained). # NOTE: Minus signs are really hard to get wrong here. relevant_untaken_indents = [ i for i in self.indent_points[0].untaken_indents if i <= self.initial_indent_balance - ( self.indent_points[0].indent_impulse - self.indent_points[0].indent_trough ) ] else: relevant_untaken_indents = list(self.indent_points[0].untaken_indents) desired_indent = ( self.initial_indent_balance - len(relevant_untaken_indents) + len(forced_indents) ) reflow_logger.debug( " Desired Indent Calculation: IB: %s, RUI: %s, UIL: %s, " "iII: %s, iIT: %s. = %s", self.initial_indent_balance, relevant_untaken_indents, self.indent_points[0].untaken_indents, self.indent_points[0].indent_impulse, self.indent_points[0].indent_trough, desired_indent, ) return desired_indent
Calculate the desired indent units. This is the heart of the indentation calculations. First we work out how many previous indents are untaken. In the easy case, we just use the number of untaken indents from previous points. The more complicated example is where *this point* has both dedents *and* indents. In this case we use the `indent_trough` to prune any previous untaken indents which were above the trough at this point. After that we calculate the indent from the incoming balance, minus any relevant untaken events *plus* any previously untaken indents which have been forced (i.e. inserted by the same operation).
desired_indent_units
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def closing_balance(self) -> int: """The closing indent balance of the line.""" return self.indent_points[-1].closing_indent_balance
The closing indent balance of the line.
closing_balance
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def opening_balance(self) -> int: """The opening indent balance of the line. NOTE: We use the first point for the starting balance rather than the line starting balance because we're using this to detect missing lines and if the line has been corrected then we don't want to do that. """ # Edge case for first line of a file (where starting indent must be zero). if self.indent_points[-1].last_line_break_idx is None: return 0 return self.indent_points[0].closing_indent_balance
The opening indent balance of the line. NOTE: We use the first point for the starting balance rather than the line starting balance because we're using this to detect missing lines and if the line has been corrected then we don't want to do that.
opening_balance
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _revise_templated_lines( lines: List[_IndentLine], elements: ReflowSequenceType ) -> None: """Given an initial set of individual lines. Revise templated ones. NOTE: This mutates the `lines` argument. We do this to ensure that templated lines are _somewhat_ consistent. Total consistency is very hard, given templated elements can be used in a wide range of places. What we do here is to try and take a somewhat rules based approach, but also one which should fit mostly with user expectations. To do this we have three scenarios: 1. Template tags are already on the same indent. 2. Template tags aren't, but can be hoisted without effectively crossing code to be on the same indent. This effectively does the same as "reshuffling" placeholders, whitespace and indent segments but does so without requiring intervention on the parsed file. 3. Template tags which actively cut across the tree (i.e. start and end tags aren't at the same level and can't be hoisted). In this case the tags should be indented at the lowest indent of the matching set. In doing this we have to attempt to match up template tags. This might fail. As we battle-test this feature there may be some interesting bugs which come up! In addition to properly indenting block tags, we also filter out any jinja tags which contain newlines because if we try and fix them, we'll only fix the *initial* part of it. The rest won't be seen because it's within the tag. TODO: This could be an interesting way to extend the indentation algorithm to also cover indentation within jinja tags. """ reflow_logger.debug("# Revise templated lines.") # Because we want to modify the original lines, we're going # to use their list index to keep track of them. depths = defaultdict(list) grouped = defaultdict(list) for idx, line in enumerate(lines): if not line.is_all_templates(elements): continue # We can't assume they're all a single block. # So handle all blocks on the line. for block in line.iter_blocks(elements): # We already checked that it's all templates. segment = cast(MetaSegment, block.segments[0]) assert segment.is_type("placeholder", "template_loop") # If it's not got a block uuid, it's not a block, so it # should just be indented as usual. No need to revise. # e.g. comments or variables if segment.block_uuid: grouped[segment.block_uuid].append(idx) depths[segment.block_uuid].append(line.initial_indent_balance) reflow_logger.debug( " UUID: %s @ %s = %r", segment.block_uuid, idx, segment.pos_marker.source_str(), ) # Sort through the lines, so we do to *most* indented first. sorted_group_indices = sorted( grouped.keys(), key=lambda x: max(depths[x]), reverse=True ) reflow_logger.debug(" Sorted Group UUIDs: %s", sorted_group_indices) for group_idx, group_uuid in enumerate(sorted_group_indices): reflow_logger.debug(" Evaluating Group UUID: %s", group_uuid) group_lines = grouped[group_uuid] # Check for case 1. if len(set(lines[idx].initial_indent_balance for idx in group_lines)) == 1: reflow_logger.debug(" Case 1: All the same") continue # Check for case 2. # In this scenario, we only need to check the adjacent points. # If there's any wiggle room, we pick the lowest option. options: List[Set[int]] = [] for idx in group_lines: line = lines[idx] steps: Set[int] = {line.initial_indent_balance} # Run backward through the pre point. indent_balance = line.initial_indent_balance first_point_idx = line.indent_points[0].idx first_block = elements[first_point_idx + 1] assert first_block.segments first_segment = first_block.segments[0] if first_segment.is_type("template_loop"): # For template loops, don't count the line. They behave # strangely. continue for i in range(first_point_idx, 0, -1): _element = elements[i] if isinstance(_element, ReflowPoint): # If it's the one straight away, after a block_end or # block_mid, skip it. We know this because it will have # block_uuid. for indent_val in _element.get_indent_segment_vals( exclude_block_indents=True )[::-1]: # Minus because we're going backward. indent_balance -= indent_val reflow_logger.debug( " Backward look. Adding Step: %s", indent_balance, ) steps.add(indent_balance) # if it's anything other than a blank placeholder, break. # NOTE: We still need the forward version of this. elif not _element.segments[0].is_type("placeholder"): break elif cast(TemplateSegment, _element.segments[0]).block_type not in ( "block_start", "block_end", "skipped_source", "block_mid", ): # Recreating this condition is hard, but we shouldn't allow any # rendered content here. break # pragma: no cover # Run forward through the post point. indent_balance = line.initial_indent_balance last_point_idx = line.indent_points[-1].idx last_point = cast(ReflowPoint, elements[last_point_idx]) for indent_val in last_point.get_indent_segment_vals( exclude_block_indents=True ): # Positive because we're going forward. indent_balance += indent_val reflow_logger.debug( " Forward look. Adding Step: %s", indent_balance, ) steps.add(indent_balance) # NOTE: Edge case for consecutive blocks of the same type. # If we're next to another block which is "inner" (i.e.) has # already been handled. We can assume all options up to it's # new indent are open for use. _case_type = None if first_segment.is_type("placeholder"): _case_type = cast(TemplateSegment, first_segment).block_type if _case_type in ("block_start", "block_mid"): # Search forward until we actually find something rendered. # Indents can usually be shuffled a bit around unrendered # elements. # NOTE: We should only be counting non-template indents, i.e. # ones that don't have a block associated with them. # NOTE: We're starting with the current line. _forward_indent_balance = line.initial_indent_balance for elem in elements[line.indent_points[0].idx :]: if isinstance(elem, ReflowBlock): if not elem.is_all_unrendered(): break continue # Otherwise it's a point. for indent_val in elem.get_indent_segment_vals( exclude_block_indents=True ): _forward_indent_balance += indent_val reflow_logger.debug( " Precedes block. Adding Step: %s", _forward_indent_balance, ) steps.add(_forward_indent_balance) if _case_type in ("block_end", "block_mid"): # Is preceding _line_ AND element also a block? # i.e. nothing else between. if first_point_idx - 1 == lines[idx - 1].indent_points[0].idx + 1: seg = elements[first_point_idx - 1].segments[0] if seg.is_type("placeholder"): if cast(TemplateSegment, seg).block_type == "block_end": _inter_steps = list( range( line.initial_indent_balance, lines[idx - 1].initial_indent_balance, ) ) reflow_logger.debug( " Follows block. Adding Steps: %s", _inter_steps ) steps.update(_inter_steps) reflow_logger.debug( " Rendered Line %s (Source %s): Initial Balance: %s Options: %s", idx, first_block.segments[0].pos_marker.source_position()[0], lines[idx].initial_indent_balance, steps, ) options.append(steps) # We should also work out what all the indents are _between_ # these options and make sure we don't go above that. # Because there might be _outer_ loops, we look for spans # between blocks in this group which don't contain any blocks # from _outer_ loops. i.e. we can't just take all the lines from # first to last. last_group_line: Optional[int] = group_lines[0] # last = previous. net_balance = 0 balance_trough: Optional[int] = None temp_balance_trough: Optional[int] = None inner_lines = [] reflow_logger.debug(" Intermediate lines:") # NOTE: +1 on the last range to make sure we _do_ process the last one. for idx in range(group_lines[0] + 1, group_lines[-1] + 1): for grp in sorted_group_indices[group_idx + 1 :]: # found an "outer" group line, reset tracker. if idx in grouped[grp]: last_group_line = None net_balance = 0 temp_balance_trough = None # Unset the buffer break # Is it in this group? if idx in group_lines: # Stash the line indices of the inner lines. if last_group_line: _inner_lines = list(range(last_group_line + 1, idx)) reflow_logger.debug( " Extending Intermediates with rendered indices %s", _inner_lines, ) inner_lines.extend(_inner_lines) # if we have a temp balance - crystallise it if temp_balance_trough is not None: balance_trough = ( temp_balance_trough if balance_trough is None else min(balance_trough, temp_balance_trough) ) reflow_logger.debug( " + Save Trough: %s (min = %s)", temp_balance_trough, balance_trough, ) temp_balance_trough = None last_group_line = idx net_balance = 0 elif last_group_line: # It's not a group line, but we're still tracking. Update with impulses. is_subgroup_line = any( idx in grouped[grp] for grp in sorted_group_indices[:group_idx] ) for ip in lines[idx].indent_points[:-1]: # Don't count the trough on group lines we've already covered. if "placeholder" in elements[ip.idx + 1].class_types: _block_type = cast( TemplateSegment, elements[ip.idx + 1].segments[0] ).block_type if _block_type in ("block_end", "block_mid"): reflow_logger.debug( " Skipping trough before %r", _block_type ) continue if ip.indent_trough < 0 and not is_subgroup_line: # NOTE: We set it temporarily here, because if we're going # to pass an outer template loop then we should discard it. # i.e. only count intervals within inner loops. # Is there anything rendered between here and the next # group line? next_group_line = min(n for n in group_lines if n > idx) next_group_line_start_point = ( lines[next_group_line].indent_points[0].idx ) for i in range(ip.idx, next_group_line_start_point): if isinstance(elements[i], ReflowBlock): if not elements[i].is_all_unrendered(): break else: # no. skip this trough continue _this_through = net_balance + ip.indent_trough temp_balance_trough = ( _this_through if temp_balance_trough is None else min(temp_balance_trough, _this_through) ) reflow_logger.debug( " Stash Trough: %s (min = %s) @ %s", _this_through, temp_balance_trough, idx, ) # NOTE: We update net_balance _after_ the clause above. net_balance += ip.indent_impulse # Evaluate options. reflow_logger.debug(" Options: %s", options) overlap = set.intersection(*options) reflow_logger.debug(" Simple Overlap: %s", overlap) # Remove any options above the limit option. # We minus one from the limit, because if it comes into effect # we'll effectively remove the effects of the indents between the elements. # Is there a mutually agreeable option? reflow_logger.debug(" Balance Trough: %s", balance_trough) if not overlap or (balance_trough is not None and balance_trough <= 0): # Set the indent to the minimum of the existing ones. best_indent = min(lines[idx].initial_indent_balance for idx in group_lines) reflow_logger.debug( " Case 3: Best: %s. Inner Lines: %s", best_indent, inner_lines ) # Remove one indent from all intermediate lines. # This is because we're effectively saying that these # placeholders shouldn't impact the indentation within them. for idx in inner_lines: # MUTATION lines[idx].initial_indent_balance -= 1 else: if len(overlap) > 1: reflow_logger.debug( " Case 2 (precheck): Overlap: %s. Checking lines on the " "immediate inside to check nesting.", overlap, ) # We've got more than one option. To help narrow down, see whether # we we can net outside the lines immediately inside. check_lines = [group_lines[0] + 1, group_lines[-1] - 1] fallback = max(lines[idx].initial_indent_balance for idx in check_lines) for idx in check_lines: # NOTE: It's important here that we've already called # _revise_skipped_source_lines. We don't want to take # them into account here as that will throw us off. reflow_logger.debug( " Discarding %s.", lines[idx].initial_indent_balance, ) overlap.discard(lines[idx].initial_indent_balance) if not overlap: best_indent = fallback reflow_logger.debug( " Using fallback since all overlaps were discarded: %s.", fallback, ) else: best_indent = max(overlap) reflow_logger.debug( " Case 2: Best: %s, Overlap: %s", best_indent, overlap ) # Set all the lines to this indent for idx in group_lines: # MUTATION lines[idx].initial_indent_balance = best_indent # Finally, look for any of the lines which contain newlines # inside the placeholders. We use a slice to make sure # we're iterating through a copy so that we can safely # modify the underlying list. for idx, line in enumerate(lines[:]): # Get the first segment. first_seg = elements[line.indent_points[0].idx + 1].segments[0] src_str = first_seg.pos_marker.source_str() if src_str != first_seg.raw and "\n" in src_str: reflow_logger.debug( " Removing line %s from linting as placeholder " "contains newlines.", first_seg.pos_marker.working_line_no, ) lines.remove(line)
Given an initial set of individual lines. Revise templated ones. NOTE: This mutates the `lines` argument. We do this to ensure that templated lines are _somewhat_ consistent. Total consistency is very hard, given templated elements can be used in a wide range of places. What we do here is to try and take a somewhat rules based approach, but also one which should fit mostly with user expectations. To do this we have three scenarios: 1. Template tags are already on the same indent. 2. Template tags aren't, but can be hoisted without effectively crossing code to be on the same indent. This effectively does the same as "reshuffling" placeholders, whitespace and indent segments but does so without requiring intervention on the parsed file. 3. Template tags which actively cut across the tree (i.e. start and end tags aren't at the same level and can't be hoisted). In this case the tags should be indented at the lowest indent of the matching set. In doing this we have to attempt to match up template tags. This might fail. As we battle-test this feature there may be some interesting bugs which come up! In addition to properly indenting block tags, we also filter out any jinja tags which contain newlines because if we try and fix them, we'll only fix the *initial* part of it. The rest won't be seen because it's within the tag. TODO: This could be an interesting way to extend the indentation algorithm to also cover indentation within jinja tags.
_revise_templated_lines
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _revise_skipped_source_lines( lines: List[_IndentLine], elements: ReflowSequenceType, ) -> None: """Given an initial set of individual lines, revise any with skipped source. NOTE: This mutates the `lines` argument. In the cases of {% if ... %} statements, there can be strange effects if we try and lint both rendered and unrendered locations. In particular when there's one at the end of a loop. In all of these cases, if we find an unrendered {% if %} block, which is rendered elsewhere in the template we skip that line. """ reflow_logger.debug("# Revise skipped source lines.") if_locs = defaultdict(list) skipped_source_blocks = [] # Slice to avoid copying for idx, line in enumerate(lines[:]): has_skipped_source = False # Find lines which _start_ with a placeholder for idx, seg in enumerate(line.iter_block_segments(elements)): if not seg.is_type("placeholder"): break template_seg = cast(TemplateSegment, seg) # For now only deal with lines that that start with a block_start. if idx == 0: # If we start with anything else, ignore this line for now. if template_seg.block_type != "block_start": break template_loc = template_seg.pos_marker.templated_position() source_loc = template_seg.pos_marker.source_position() reflow_logger.debug( f" Found block start: {seg} {template_seg.source_str!r} " f"{template_loc} {source_loc}" ) if_locs[source_loc].append(template_loc) # Search forward, and see whether it's all skipped. # NOTE: Just on the same line for now. elif template_seg.block_type == "skipped_source": has_skipped_source = True elif template_seg.block_type == "block_end": # If we get here, we've only had placeholders on this line. # If it's also had skipped source. Make a note of the location # in both the source and template. if has_skipped_source: reflow_logger.debug(f" Skipped line found: {template_loc}") skipped_source_blocks.append((source_loc, template_loc)) ignore_locs = [] # Now iterate through each of the potentially skipped blocks, and work out # if they were otherwise rendered in a different location. for source_loc, template_loc in skipped_source_blocks: # Is there at least once location of this source which isn't also # skipped. for other_template_loc in if_locs[source_loc]: if (source_loc, other_template_loc) not in skipped_source_blocks: reflow_logger.debug( " Skipped element rendered elsewhere " f"{(source_loc, template_loc)} at {other_template_loc}" ) ignore_locs.append(template_loc) # Now go back through the lines, and remove any which we can ignore. # Slice to avoid copying for idx, line in enumerate(lines[:]): # Find lines which _start_ with a placeholder try: seg = next(line.iter_block_segments(elements)) except StopIteration: continue if not seg.is_type("placeholder"): continue template_seg = cast(TemplateSegment, seg) if template_seg.block_type != "block_start": continue template_loc = template_seg.pos_marker.templated_position() if template_loc in ignore_locs: reflow_logger.debug(" Removing line from buffer...") lines.remove(line)
Given an initial set of individual lines, revise any with skipped source. NOTE: This mutates the `lines` argument. In the cases of {% if ... %} statements, there can be strange effects if we try and lint both rendered and unrendered locations. In particular when there's one at the end of a loop. In all of these cases, if we find an unrendered {% if %} block, which is rendered elsewhere in the template we skip that line.
_revise_skipped_source_lines
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _revise_comment_lines( lines: List[_IndentLine], elements: ReflowSequenceType, ignore_comment_lines: bool ) -> None: """Given an initial set of individual lines. Revise comment ones. NOTE: This mutates the `lines` argument. We do this to ensure that lines with comments are aligned to the following non-comment element. """ reflow_logger.debug("# Revise comment lines.") comment_line_buffer: List[int] = [] # Slice to avoid copying for idx, line in enumerate(lines[:]): if line.is_all_comments(elements): if ignore_comment_lines: # If we're removing comment lines, purge this line from the buffer. reflow_logger.debug("Ignoring comment line idx: %s", idx) lines.remove(line) else: comment_line_buffer.append(idx) else: # Not a comment only line, if there's a buffer anchor # to this one. for comment_line_idx in comment_line_buffer: reflow_logger.debug( " Comment Only Line: %s. Anchoring to %s", comment_line_idx, idx ) # Mutate reference lines to match this one. comment_line = lines[comment_line_idx] comment_line.initial_indent_balance = line.initial_indent_balance # Reset the buffer comment_line_buffer = [] # Any trailing comments should be anchored to the baseline. for comment_line_idx in comment_line_buffer: # Mutate reference lines to match this one. lines[comment_line_idx].initial_indent_balance = 0 reflow_logger.debug( " Comment Only Line: %s. Anchoring to baseline", comment_line_idx )
Given an initial set of individual lines. Revise comment ones. NOTE: This mutates the `lines` argument. We do this to ensure that lines with comments are aligned to the following non-comment element.
_revise_comment_lines
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def construct_single_indent(indent_unit: str, tab_space_size: int) -> str: """Construct a single indent unit.""" if indent_unit == "tab": return "\t" elif indent_unit == "space": return " " * tab_space_size else: # pragma: no cover raise SQLFluffUserError( f"Expected indent_unit of 'tab' or 'space', instead got {indent_unit}" )
Construct a single indent unit.
construct_single_indent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _prune_untaken_indents( untaken_indents: Tuple[int, ...], incoming_balance: int, indent_stats: IndentStats, has_newline: bool, ) -> Tuple[int, ...]: """Update the tracking of untaken indents. This is an internal helper function for `_crawl_indent_points`. We use the `trough` of the given indent stats to remove any untaken indents which are now no longer relevant after balances are taken into account. """ # Strip any untaken indents above the new balance. # NOTE: We strip back to the trough, not just the end point # if the trough was lower than the impulse. ui = tuple( x for x in untaken_indents if x <= ( incoming_balance + indent_stats.impulse + indent_stats.trough if indent_stats.trough < indent_stats.impulse else incoming_balance + indent_stats.impulse ) ) # After stripping, we may have to add them back in. # NOTE: all the values in the indent_stats are relative to the incoming # indent, so we correct both of them here by using the incoming_balance. if indent_stats.impulse > indent_stats.trough and not has_newline: for i in range(indent_stats.trough, indent_stats.impulse): indent_val = incoming_balance + i + 1 if indent_val - incoming_balance not in indent_stats.implicit_indents: ui += (indent_val,) return ui
Update the tracking of untaken indents. This is an internal helper function for `_crawl_indent_points`. We use the `trough` of the given indent stats to remove any untaken indents which are now no longer relevant after balances are taken into account.
_prune_untaken_indents
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _update_crawl_balances( untaken_indents: Tuple[int, ...], incoming_balance: int, indent_stats: IndentStats, has_newline: bool, ) -> Tuple[int, Tuple[int, ...]]: """Update the tracking of untaken indents and balances. This is an internal helper function for `_crawl_indent_points`. """ new_untaken_indents = _prune_untaken_indents( untaken_indents, incoming_balance, indent_stats, has_newline ) new_balance = incoming_balance + indent_stats.impulse return new_balance, new_untaken_indents
Update the tracking of untaken indents and balances. This is an internal helper function for `_crawl_indent_points`.
_update_crawl_balances
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _crawl_indent_points( elements: ReflowSequenceType, allow_implicit_indents: bool = False ) -> Iterator[_IndentPoint]: """Crawl through a reflow sequence, mapping existing indents. This is where *most* of the logic for smart indentation happens. The values returned here have a large impact on exactly how indentation is treated. NOTE: If a line ends with a comment, indent impulses are pushed to the point _after_ the comment rather than before to aid with indentation. This saves searching for them later. TODO: Once this function *works*, there's definitely headroom for simplification and optimisation. We should do that. """ last_line_break_idx: int | None = None indent_balance = 0 untaken_indents: Tuple[int, ...] = () cached_indent_stats: Optional[IndentStats] = None cached_point: Optional[_IndentPoint] = None for idx, elem in enumerate(elements): if isinstance(elem, ReflowPoint): # NOTE: The following line should never lead to an index error # because files should always have a trailing IndentBlock containing # an "end_of_file" marker, and so the final IndentPoint should always # have _something_ after it. indent_stats = IndentStats.from_combination( cached_indent_stats, elem.get_indent_impulse(), ) # If don't allow implicit indents we should remove them here. # Also, if we do - we should check for brackets. # NOTE: The reason we check following class_types is because # bracketed expressions behave a little differently and are an # exception to the normal implicit indent rules. For implicit # indents which precede bracketed expressions, the implicit indent # is treated as a normal indent. In this case the start_bracket # must be the start of the bracketed section which isn't closed # on the same line - if it _is_ closed then we keep the implicit # indents. if indent_stats.implicit_indents: unclosed_bracket = False if ( allow_implicit_indents and "start_bracket" in elements[idx + 1].class_types ): # Is it closed in the line? Iterate forward to find out. # get the stack depth next_elem = cast(ReflowBlock, elements[idx + 1]) depth = next_elem.depth_info.stack_depth for elem_j in elements[idx + 1 :]: if isinstance(elem_j, ReflowPoint): if elem_j.num_newlines() > 0: unclosed_bracket = True break elif ( "end_bracket" in elem_j.class_types and elem_j.depth_info.stack_depth == depth ): break else: # pragma: no cover unclosed_bracket = True if unclosed_bracket or not allow_implicit_indents: # Blank indent stats if not using them indent_stats = IndentStats( indent_stats.impulse, indent_stats.trough, () ) # Was there a cache? if cached_indent_stats: # If there was we can safely assume there is a cached point. assert cached_point # If there was, this is a signal that we need to yield two points. # The content of those points depends on the newlines that surround the # last segments (which will be comment block). # _leading_ comments (i.e. those preceded by a newline): Yield _before_ # _trailing_ comments (or rare "mid" comments): Yield _after_ # TODO: We might want to reconsider the treatment of comments in the # middle of lines eventually, but they're fairly unusual so not well # covered in tests as of writing. # We yield the first of those points here, and then manipulate the # indent_stats object to allow the following code to yield the other. # We can refer back to the cached point as a framework. In both # cases we use the combined impulse and trough, but we use the # current indent balance and untaken indents. if cached_point.is_line_break: # It's a leading comment. Yield all the info in that point. yield _IndentPoint( cached_point.idx, indent_stats.impulse, indent_stats.trough, indent_balance, cached_point.last_line_break_idx, True, untaken_indents, ) # Before zeroing, crystallise any effect on overall balances. indent_balance, untaken_indents = _update_crawl_balances( untaken_indents, indent_balance, indent_stats, True ) # Set indent stats to zero because we've already yielded. indent_stats = IndentStats(0, 0, indent_stats.implicit_indents) else: # It's a trailing (or mid) comment. Yield it in the next. yield _IndentPoint( cached_point.idx, 0, 0, indent_balance, cached_point.last_line_break_idx, False, untaken_indents, ) # No need to reset indent stats. It's already good. # Reset caches. cached_indent_stats = None has_newline = False cached_point = None # Do we have a newline? has_newline = has_untemplated_newline(elem) and idx != last_line_break_idx # Construct the point we may yield indent_point = _IndentPoint( idx, indent_stats.impulse, indent_stats.trough, indent_balance, last_line_break_idx, has_newline, untaken_indents, ) # Update the last newline index if this is a newline. # NOTE: We used the previous value in the construction of the # _IndentPoint above and we only reset after that construction. if has_newline: last_line_break_idx = idx # Is the next element a comment? If so - delay the decision until we've # got any indents from after the comment too. # # Also, some templaters might insert custom marker slices that are of zero # source string length as a way of marking locations in the middle of # templated output. These don't correspond to real source code, so we # can't meaningfully indent before them. We can safely handle them similar # to the comment case. if "comment" in elements[idx + 1].class_types or ( "placeholder" in elements[idx + 1].class_types and cast(TemplateSegment, elements[idx + 1].segments[0]).source_str == "" ): cached_indent_stats = indent_stats # Create parts of a point to use later. cached_point = indent_point # We loop around so that we don't do the untaken indent calcs yet. continue # Is it meaningful as an indent point? # i.e. Is it a line break? AND not a templated one. # NOTE: a point at idx zero is meaningful because it's like an indent. # NOTE: Last edge case. If we haven't yielded yet, but the # next element is the end of the file. Yield. elif ( has_newline or indent_stats.impulse or indent_stats.trough or idx == 0 or elements[idx + 1].segments[0].is_type("end_of_file") ): yield indent_point # Update balances indent_balance, untaken_indents = _update_crawl_balances( untaken_indents, indent_balance, indent_stats, has_newline )
Crawl through a reflow sequence, mapping existing indents. This is where *most* of the logic for smart indentation happens. The values returned here have a large impact on exactly how indentation is treated. NOTE: If a line ends with a comment, indent impulses are pushed to the point _after_ the comment rather than before to aid with indentation. This saves searching for them later. TODO: Once this function *works*, there's definitely headroom for simplification and optimisation. We should do that.
_crawl_indent_points
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _map_line_buffers( elements: ReflowSequenceType, allow_implicit_indents: bool = False ) -> Tuple[List[_IndentLine], List[int]]: """Map the existing elements, building up a list of _IndentLine. Returns: :obj:`tuple` of a :obj:`list` of :obj:`_IndentLine` and a :obj:`list` of :obj:`int`. The first is the main output and is designed to be used in assessing indents and their effect through a SQL file. The latter is a list of "imbalanced" indent locations, where the positive indent is untaken, but its corresponding negative indent *is* taken. """ # First build up the buffer of lines. lines = [] point_buffer = [] _previous_points = {} # Buffers to keep track of indents which are untaken on the way # up but taken on the way down. We track them explicitly so we # can force them later. #: dict of ints: maps indentation balance values to the last #: index location where they were seen. This is a working buffer #: and not directly returned by the function. untaken_indent_locs = {} #: list of ints: a list of element indices which contain untaken #: positive indents, that should be forced later because their #: corresponding negative indent _was_ taken. Several edge cases #: are excluded from this list and so not included. See code below. imbalanced_locs = [] for indent_point in _crawl_indent_points( elements, allow_implicit_indents=allow_implicit_indents ): # We evaluate all the points in a line at the same time, so # we first build up a buffer. point_buffer.append(indent_point) _previous_points[indent_point.idx] = indent_point if not indent_point.is_line_break: # If it's not a line break, we should still check whether it's # a positive untaken to keep track of them. # ...unless it's implicit. indent_stats = cast( ReflowPoint, elements[indent_point.idx] ).get_indent_impulse() if indent_point.indent_impulse > indent_point.indent_trough and not ( allow_implicit_indents and indent_stats.implicit_indents ): untaken_indent_locs[ indent_point.initial_indent_balance + indent_point.indent_impulse ] = indent_point.idx continue # If it *is* a line break, then store it. lines.append(_IndentLine.from_points(point_buffer)) # We should also evaluate whether this point inserts a newline at the close # of an indent which was untaken on the way up. # https://github.com/sqlfluff/sqlfluff/issues/4234 # Special case 1: # If we're at the end of the file we shouldn't interpret it as a line break # for problem indents, they're a bit of a special case. # Special case 2: # Bracketed expressions are a bit odd here. # e.g. # WHERE ( # foo = bar # ) # LIMIT 1 # # Technically there's an untaken indent before the opening bracket # but this layout is common practice so we're not going to force # one there even though there _is_ a line break after the closing # bracket. following_class_types = elements[indent_point.idx + 1].class_types if ( indent_point.indent_trough # End of file ends case. (Special case 1) and "end_of_file" not in following_class_types ): passing_indents = list( range( indent_point.initial_indent_balance, indent_point.initial_indent_balance + indent_point.indent_trough, -1, ) ) # There might be many indents at this point, but if any match, then # we should still force an indent # NOTE: We work _inward_ to check which have been taken. for i in reversed(passing_indents): # Was this outer one untaken? if i not in untaken_indent_locs: # No? Stop the loop. If we've a corresponding indent for # this dedent, we shouldn't use the same location to force # untaken indents at inner levels. break loc = untaken_indent_locs[i] # First check for bracket special case. It's less about whether # the section _ends_ with a lone bracket, and more about whether # the _starting point_ is a bracket which closes a line. If it # is, then skip this location. (Special case 2). # NOTE: We can safely "look ahead" here because we know all files # end with an IndentBlock, and we know here that `loc` refers to # an IndentPoint. if "start_bracket" in elements[loc + 1].class_types: continue # If the location was in the line we're just closing. That's # not a problem because it's an untaken indent which is closed # on the same line. if any(ip.idx == loc for ip in point_buffer): continue # If the only elements between current point and the end of the # reference line are comments, then don't trigger, it's a misplaced # indent. # First find the end of the reference line. for j in range(loc, indent_point.idx): _pt = _previous_points.get(j, None) if not _pt: continue if _pt.is_line_break: break assert _pt # Then check if all comments. if all( "comment" in elements[k].class_types for k in range(_pt.idx + 1, indent_point.idx, 2) ): # It is all comments. Ignore it. continue imbalanced_locs.append(loc) # Remove any which are now no longer relevant from the working buffer. for k in list(untaken_indent_locs.keys()): if k > indent_point.initial_indent_balance + indent_point.indent_trough: del untaken_indent_locs[k] # Reset the buffer point_buffer = [indent_point] # Handle potential final line if len(point_buffer) > 1: lines.append(_IndentLine.from_points(point_buffer)) return lines, imbalanced_locs
Map the existing elements, building up a list of _IndentLine. Returns: :obj:`tuple` of a :obj:`list` of :obj:`_IndentLine` and a :obj:`list` of :obj:`int`. The first is the main output and is designed to be used in assessing indents and their effect through a SQL file. The latter is a list of "imbalanced" indent locations, where the positive indent is untaken, but its corresponding negative indent *is* taken.
_map_line_buffers
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _deduce_line_current_indent( elements: ReflowSequenceType, last_line_break_idx: Optional[int] = None ) -> str: """Deduce the current indent string. This method accounts for both literal indents and indents consumed from the source as by potential templating tags. """ indent_seg = None if not elements[0].segments: return "" elif last_line_break_idx: indent_seg = cast( ReflowPoint, elements[last_line_break_idx] )._get_indent_segment() elif isinstance(elements[0], ReflowPoint) and elements[0].segments[ 0 ].pos_marker.working_loc == (1, 1): # No last_line_break_idx, but this is a point. It's the first line. # First check whether this is a first line with a leading # placeholder. if elements[0].segments[0].is_type("placeholder"): reflow_logger.debug(" Handling as initial leading placeholder") seg = cast(TemplateSegment, elements[0].segments[0]) # Is the placeholder a consumed whitespace? if seg.source_str.startswith((" ", "\t")): indent_seg = seg # Otherwise it's an initial leading literal whitespace. else: reflow_logger.debug(" Handling as initial leading whitespace") for indent_seg in elements[0].segments[::-1]: if indent_seg.is_type("whitespace") and not indent_seg.is_templated: break # Handle edge case of no whitespace, but with newline. if not indent_seg.is_type("whitespace"): indent_seg = None if not indent_seg: return "" # We have to check pos marker before checking is templated. # Insertions don't have pos_markers - so aren't templated, # but also don't support calling is_templated. if indent_seg.is_type("placeholder"): # It's a consumed indent. return cast(TemplateSegment, indent_seg).source_str.split("\n")[-1] or "" elif not indent_seg.pos_marker or not indent_seg.is_templated: # It's a literal assert "\n" not in indent_seg.raw, f"Found newline in indent: {indent_seg}" return indent_seg.raw else: # pragma: no cover # It's templated. This shouldn't happen. Segments returned by # _get_indent_segment, should be valid indents (i.e. whitespace # or placeholders for consumed whitespace). This is a bug. if indent_seg.pos_marker: reflow_logger.warning( "Segment position marker: %s: [SRC: %s, TMP:%s]", indent_seg.pos_marker, indent_seg.pos_marker.source_slice, indent_seg.pos_marker.templated_slice, ) raise NotImplementedError( "Unexpected templated indent. Report this as a bug on " f"GitHub. Segment: {indent_seg}\n" "https://github.com/sqlfluff/sqlfluff/issues/new/choose" )
Deduce the current indent string. This method accounts for both literal indents and indents consumed from the source as by potential templating tags.
_deduce_line_current_indent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _lint_line_starting_indent( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: List[int], ) -> List[LintResult]: """Lint the indent at the start of a line. NOTE: This mutates `elements` to avoid lots of copying. """ indent_points = indent_line.indent_points # Set up the default anchor initial_point_idx = indent_points[0].idx anchor = {"before": elements[initial_point_idx + 1].segments[0]} # Find initial indent, and deduce appropriate string indent. current_indent = _deduce_line_current_indent( elements, indent_points[-1].last_line_break_idx ) desired_indent_units = indent_line.desired_indent_units(forced_indents) desired_starting_indent = desired_indent_units * single_indent initial_point = cast(ReflowPoint, elements[initial_point_idx]) if current_indent == desired_starting_indent: return [] if initial_point_idx > 0 and initial_point_idx < len(elements) - 1: # Edge case: Lone comments. Normally comments are anchored to the line # _after_ where they come. However, if the existing location _matches_ # the _preceding line_, then we will allow it. It's not the "expected" # location but it is allowable. if "comment" in elements[initial_point_idx + 1].class_types: last_indent = _deduce_line_current_indent( elements, indent_points[0].last_line_break_idx ) if len(current_indent) == len(last_indent): reflow_logger.debug(" Indent matches previous line. OK.") return [] # Edge case: Multiline comments. If the previous line was a multiline # comment and this line starts with a multiline comment, then we should # only lint the indent if it's _too small_. Otherwise we risk destroying # indentation which the logic here is not smart enough to handle. if ( "block_comment" in elements[initial_point_idx - 1].class_types and "block_comment" in elements[initial_point_idx + 1].class_types ): if len(current_indent) > len(desired_starting_indent): reflow_logger.debug(" Indent is bigger than required. OK.") return [] # NOTE: If the reindent code is flagging an indent change here that you # don't agree with for a line with templated elements, especially in a # loop, it's very likely that the fix shouldn't be here but much earlier # in the code as part of `_revise_templated_lines()`. reflow_logger.debug( " Correcting indent @ line %s. Expected: %r. Found %r", elements[initial_point_idx + 1].segments[0].pos_marker.working_line_no, desired_starting_indent, current_indent, ) # Initial point gets special handling if it has no newlines. if indent_points[0].idx == 0 and not indent_points[0].is_line_break: init_seg = elements[indent_points[0].idx].segments[0] if init_seg.is_type("placeholder"): init_seg = cast(TemplateSegment, init_seg) # If it's a placeholder initial indent, then modify the placeholder # to remove the indent from it. src_fix = SourceFix( "", source_slice=slice(0, len(current_indent) + 1), templated_slice=slice(0, 0), ) fixes = [ LintFix.replace( init_seg, [init_seg.edit(source_fixes=[src_fix], source_str="")], ) ] else: # Otherwise it's just initial whitespace. Remove it. fixes = [LintFix.delete(seg) for seg in initial_point.segments] new_results = [ LintResult( initial_point.segments[0], fixes, description="First line should not be indented.", source="reflow.indent.existing", ) ] new_point = ReflowPoint(()) # Placeholder indents also get special treatment else: new_results, new_point = initial_point.indent_to( desired_starting_indent, source="reflow.indent.existing", **anchor, # type: ignore ) elements[initial_point_idx] = new_point return new_results
Lint the indent at the start of a line. NOTE: This mutates `elements` to avoid lots of copying.
_lint_line_starting_indent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _lint_line_untaken_positive_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, imbalanced_indent_locs: List[int], ) -> Tuple[List[LintResult], List[int]]: """Check for positive indents which should have been taken.""" # First check whether this line contains any of the untaken problem points. for ip in indent_line.indent_points: if ip.idx in imbalanced_indent_locs: # Force it at the relevant position. desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) ) reflow_logger.debug( " Detected imbalanced +ve break @ line %s. Indenting to %r", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[ip.idx]) results, new_point = target_point.indent_to( desired_indent, before=elements[ip.idx + 1].segments[0], source="reflow.indent.imbalance", ) elements[ip.idx] = new_point # Keep track of the indent we forced, by returning it. return results, [ip.closing_indent_balance] # If we don't close the line higher there won't be any. starting_balance = indent_line.opening_balance() last_ip = indent_line.indent_points[-1] # Check whether it closes the opening indent. if last_ip.initial_indent_balance + last_ip.indent_trough <= starting_balance: return [], [] # It's not, we don't close out an opened indent. # NOTE: Because trailing comments should always shift their any # surrounding indentation effects to _after_ their position, we # should just be able to evaluate them safely from the end of the line. indent_points = indent_line.indent_points # Account for the closing trough. closing_trough = last_ip.initial_indent_balance + ( last_ip.indent_trough or last_ip.indent_impulse ) # Edge case: Adjust closing trough for trailing indents # after comments disrupting closing trough. _bal = 0 for elem in elements[last_ip.idx + 1 :]: if not isinstance(elem, ReflowPoint): if "comment" not in elem.class_types: break continue # Otherwise it's a point stats = elem.get_indent_impulse() # If it's positive, stop. We likely won't find enough negative to come. if stats.impulse > 0: # pragma: no cover break closing_trough = _bal + stats.trough _bal += stats.impulse # On the way up we're looking for whether the ending balance # was an untaken indent or not. If it *was* untaken, there's # a good chance that we *should* take it. # NOTE: an implicit indent would not force a newline # because it wouldn't be in the untaken_indents. It's # considered _taken_ even if not. if closing_trough not in indent_points[-1].untaken_indents: # If the closing point doesn't correspond to an untaken # indent within the line (i.e. it _was_ taken), then # there won't be an appropriate place to force an indent. return [], [] # The closing indent balance *does* correspond to an # untaken indent on this line. We *should* force a newline # at that position. for ip in indent_points: if ip.closing_indent_balance == closing_trough: target_point_idx = ip.idx desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) ) break else: # pragma: no cover raise NotImplementedError("We should always find the relevant point.") reflow_logger.debug( " Detected missing +ve line break @ line %s. Indenting to %r", elements[target_point_idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[target_point_idx]) results, new_point = target_point.indent_to( desired_indent, before=elements[target_point_idx + 1].segments[0], source="reflow.indent.positive", ) elements[target_point_idx] = new_point # Keep track of the indent we forced, by returning it. return results, [closing_trough]
Check for positive indents which should have been taken.
_lint_line_untaken_positive_indents
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _lint_line_untaken_negative_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: List[int], ) -> List[LintResult]: """Check for negative indents which should have been taken.""" # If we don't close lower than we start, there won't be any. if indent_line.closing_balance() >= indent_line.opening_balance(): return [] results: List[LintResult] = [] # On the way down we're looking for indents which *were* taken on # the way up, but currently aren't on the way down. We slice so # that the _last_ point isn't evaluated, because that's fine. for ip in indent_line.indent_points[:-1]: # Is line break, or positive indent? if ip.is_line_break or ip.indent_impulse >= 0: continue # When using implicit indents, we may find untaken negatives which # aren't shallower than the line they're on. This is because they # were implicit on the way up and so not included in `untaken_indents`. # To catch them we also check that we're shallower than the start of # of the line. if ( ip.initial_indent_balance + ip.indent_trough >= indent_line.opening_balance() ): continue # It's negative, is it untaken? In the case of a multi-dedent # they must _all_ be untaken to take this route. covered_indents = set( range( ip.initial_indent_balance, ip.initial_indent_balance + ip.indent_trough, -1, ) ) untaken_indents = set(ip.untaken_indents).difference(forced_indents) if covered_indents.issubset(untaken_indents): # Yep, untaken. continue # Edge Case: Comments. Since introducing the code to push indent effects # to the point _after_ comments, we no longer need to detect an edge case # for them here. If we change that logic again in the future, so that # indent values are allowed before comments - that code should be # reintroduced here. # Edge Case: Semicolons. For now, semicolon placement is a little # more complicated than what we do here. For now we don't (by # default) introduce missing -ve indents before semicolons. # TODO: Review whether this is a good idea, or whether this should be # more configurable. # NOTE: This could potentially lead to a weird situation if two # statements are already on the same line. That's a bug to solve later. if elements[ip.idx + 1 :] and elements[ip.idx + 1].class_types.intersection( ("statement_terminator", "comma") ): reflow_logger.debug( " Detected missing -ve line break @ line %s, before " "semicolon or comma. Ignoring...", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, ) continue # Edge case: template blocks. These sometimes sit in odd places # in the parse tree so don't force newlines before them if elements[ip.idx + 1 :] and "placeholder" in elements[ip.idx + 1].class_types: # are any of those placeholders blocks? if any( cast(TemplateSegment, seg).block_type.startswith("block") for seg in elements[ip.idx + 1].segments if seg.is_type("placeholder") ): reflow_logger.debug( " Detected missing -ve line break @ line %s, before " "block placeholder. Ignoring...", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, ) continue # It's negative, not a line break and was taken on the way up. # This *should* be an indent! desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) + len(forced_indents) ) reflow_logger.debug( " Detected missing -ve line break @ line %s. Indenting to %r", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[ip.idx]) new_results, new_point = target_point.indent_to( desired_indent, before=elements[ip.idx + 1].segments[0], source="reflow.indent.negative", ) elements[ip.idx] = new_point results += new_results return results
Check for negative indents which should have been taken.
_lint_line_untaken_negative_indents
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _lint_line_buffer_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: List[int], imbalanced_indent_locs: List[int], ) -> List[LintResult]: """Evaluate a single set of indent points on one line. NOTE: This mutates the given `elements` and `forced_indents` input to avoid lots of copying. Order of operations: 1. Evaluate the starting indent for this line. 2. For points which aren't line breaks in the line, we evaluate them to see whether they *should* be. We separately address missing indents on the way *up* and then on the way *down*. - *Up* in this sense means where the indent balance goes up, but isn't closed again within the same line - e.g. :code:`SELECT a + (2 +` where the indent implied by the bracket isn't closed out before the end of the line. - *Down* in this sense means where we've dropped below the starting indent balance of the line - e.g. :code:`1 + 1) FROM foo` where the line starts within a bracket and then closes that *and* closes an apparent SELECT clause without a newline. This method returns fixes, including appropriate descriptions, to allow generation of LintResult objects directly from them. """ reflow_logger.info( # NOTE: We add a little extra ## here because it's effectively # the start of linting a single line and so the point to start # interpreting the any debug logging from. "## Evaluate Rendered Line #%s [source line #%s]. idx=%s:%s.", elements[indent_line.indent_points[0].idx + 1] .segments[0] .pos_marker.working_line_no, elements[indent_line.indent_points[0].idx + 1] .segments[0] .pos_marker.source_position()[0], indent_line.indent_points[0].idx, indent_line.indent_points[-1].idx, ) reflow_logger.debug( " Line Content: %s", [ repr(elem.raw) for elem in elements[ indent_line.indent_points[0].idx : indent_line.indent_points[-1].idx ] ], ) reflow_logger.debug(" Indent Line: %s", indent_line) reflow_logger.debug(" Forced Indents: %s", forced_indents) reflow_logger.debug(" Imbalanced Indent Locs: %s", imbalanced_indent_locs) results = [] # First, handle starting indent. results += _lint_line_starting_indent( elements, indent_line, single_indent, forced_indents ) # Second, handle potential missing positive indents. new_results, new_indents = _lint_line_untaken_positive_indents( elements, indent_line, single_indent, imbalanced_indent_locs ) # If we have any, bank them and return. We don't need to check for # negatives because we know we're on the way up. if new_results: results += new_results # Keep track of any indents we forced forced_indents.extend(new_indents) return results # Third, handle potential missing negative indents. results += _lint_line_untaken_negative_indents( elements, indent_line, single_indent, forced_indents ) # Lastly remove any forced indents above the closing balance. # Iterate through a slice so we're not editing the thing # that we're iterating through. for i in forced_indents[:]: if i > indent_line.closing_balance(): forced_indents.remove(i) return results
Evaluate a single set of indent points on one line. NOTE: This mutates the given `elements` and `forced_indents` input to avoid lots of copying. Order of operations: 1. Evaluate the starting indent for this line. 2. For points which aren't line breaks in the line, we evaluate them to see whether they *should* be. We separately address missing indents on the way *up* and then on the way *down*. - *Up* in this sense means where the indent balance goes up, but isn't closed again within the same line - e.g. :code:`SELECT a + (2 +` where the indent implied by the bracket isn't closed out before the end of the line. - *Down* in this sense means where we've dropped below the starting indent balance of the line - e.g. :code:`1 + 1) FROM foo` where the line starts within a bracket and then closes that *and* closes an apparent SELECT clause without a newline. This method returns fixes, including appropriate descriptions, to allow generation of LintResult objects directly from them.
_lint_line_buffer_indents
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def lint_indent_points( elements: ReflowSequenceType, single_indent: str, skip_indentation_in: FrozenSet[str] = frozenset(), allow_implicit_indents: bool = False, ignore_comment_lines: bool = False, ) -> Tuple[ReflowSequenceType, List[LintResult]]: """Lint the indent points to check we have line breaks where we should. For linting indentation - we *first* need to make sure there are line breaks in all the places there should be. This takes an input set of indent points, and inserts additional line breaks in the necessary places to make sure indentation can be valid. Specifically we're addressing two things: 1. Any untaken indents. An untaken indent is only valid if it's corresponding dedent is on the same line. If that is not the case, there should be a line break at the location of the indent and dedent. 2. The indentation of lines. Given the line breaks are in the right place, is the line indented correctly. We do these at the same time, because we can't do the second without having line breaks in the right place, but if we're inserting a line break, we need to also know how much to indent by. """ # First map the line buffers. lines: List[_IndentLine] imbalanced_indent_locs: List[int] lines, imbalanced_indent_locs = _map_line_buffers( elements, allow_implicit_indents=allow_implicit_indents ) # Revise templated indents. # NOTE: There's a small dependency that we should make sure we remove # any "skipped source" lines before revising the templated lines in the # second step. That's because those "skipped source" lines can throw # off the detection algorithm. _revise_skipped_source_lines(lines, elements) _revise_templated_lines(lines, elements) # Revise comment indents _revise_comment_lines(lines, elements, ignore_comment_lines=ignore_comment_lines) # Skip elements we're configured to not touch (i.e. scripts) for line in lines[:]: for block in line.iter_blocks(elements): if any( skip_indentation_in.intersection(types) for types in block.depth_info.stack_class_types ): reflow_logger.debug( "Skipping line %s because it is within one of %s", line, skip_indentation_in, ) lines.remove(line) break reflow_logger.debug("# Evaluate lines for indentation.") # Last: handle each of the lines. results: List[LintResult] = [] # NOTE: forced_indents is mutated by _lint_line_buffer_indents # It's used to pass from one call to the next. forced_indents: List[int] = [] elem_buffer = elements.copy() # Make a working copy to mutate. for line in lines: line_results = _lint_line_buffer_indents( elem_buffer, line, single_indent, forced_indents, imbalanced_indent_locs ) if line_results: reflow_logger.info(" PROBLEMS:") for res in line_results: reflow_logger.info(" %s @ %s", res.source, res.anchor) reflow_logger.info(" %s", res.description) results += line_results return elem_buffer, results
Lint the indent points to check we have line breaks where we should. For linting indentation - we *first* need to make sure there are line breaks in all the places there should be. This takes an input set of indent points, and inserts additional line breaks in the necessary places to make sure indentation can be valid. Specifically we're addressing two things: 1. Any untaken indents. An untaken indent is only valid if it's corresponding dedent is on the same line. If that is not the case, there should be a line break at the location of the indent and dedent. 2. The indentation of lines. Given the line breaks are in the right place, is the line indented correctly. We do these at the same time, because we can't do the second without having line breaks in the right place, but if we're inserting a line break, we need to also know how much to indent by.
lint_indent_points
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _source_char_len(elements: ReflowSequenceType) -> int: """Calculate length in the source file. NOTE: This relies heavily on the sequence already being split appropriately. It will raise errors if not. TODO: There's a good chance that this might not play well with other fixes. If we find segments without positions then it will probably error. Those will need ironing out. TODO: This probably needs more tests. It's already the source of quite a few fiddly sections. """ char_len = 0 last_source_slice: Optional[slice] = None for seg in chain.from_iterable(elem.segments for elem in elements): # Indent tokens occasionally have strange position markers. # They also don't have length so skip them. # TODO: This is actually caused by bugs and inconsistencies # in how the source_slice is generated for the position markers # of indent and dedent tokens. That's a job for another day # however. if seg.is_type("indent"): continue # Get the source position. If there is no source position then it's # a recent edit or modification. We shouldn't evaluate it until it's # been positioned. Without a source marker we don't know how to treat # it. if not seg.pos_marker: # pragma: no cover break source_slice = seg.pos_marker.source_slice # Is there a newline in the source string? source_str = seg.pos_marker.source_str() if "\n" in source_str: # There is. Stop here. It's probably a complicated # jinja tag, so it's safer to stop here. # TODO: In future, we should probably be a little # smarter about this, but for now this is ok. Without # an algorithm for layout out code _within_ jinja tags # we won't be able to suggest appropriate fixes. char_len += source_str.index("\n") break slice_len = slice_length(source_slice) # Only update the length if it's a new slice. if source_slice != last_source_slice: # If it's got size in the template but not in the source, it's # probably an insertion. if seg.raw and not slice_len: char_len += len(seg.raw) # NOTE: Don't update the last_source_slice. elif not slice_len: # If it's not got a raw and no length, it's # irrelevant. Ignore it. It's probably a meta. continue # Otherwise if we're literal, use the raw length # because it might be an edit. elif seg.pos_marker.is_literal(): char_len += len(seg.raw) last_source_slice = source_slice # Otherwise assume it's templated code. else: char_len += slice_length(source_slice) last_source_slice = source_slice return char_len
Calculate length in the source file. NOTE: This relies heavily on the sequence already being split appropriately. It will raise errors if not. TODO: There's a good chance that this might not play well with other fixes. If we find segments without positions then it will probably error. Those will need ironing out. TODO: This probably needs more tests. It's already the source of quite a few fiddly sections.
_source_char_len
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _rebreak_priorities(spans: List[_RebreakSpan]) -> Dict[int, int]: """Process rebreak spans into opportunities to split lines. The index to insert a potential indent at depends on the line_position of the span. Infer that here and store the indices in the elements. """ rebreak_priority = {} for span in spans: if span.line_position == "leading": rebreak_indices = [span.start_idx - 1] elif span.line_position == "trailing": rebreak_indices = [span.end_idx + 1] elif span.line_position == "alone": rebreak_indices = [span.start_idx - 1, span.end_idx + 1] else: # pragma: no cover raise NotImplementedError( "Unexpected line position: %s", span.line_position ) # NOTE: Operator precedence here is hard coded. It could be # moved to configuration in the layout section in the future. # Operator precedence is fairly consistent between dialects # so for now it feels ok that it's coded here - it also wouldn't # be a breaking change at that point so no pressure to release # it early. span_raw = span.target.raw_upper priority = 6 # Default to 6 for now i.e. the same as '+' # Override priority for specific precedence. if span_raw == ",": priority = 1 elif span.target.is_type("assignment_operator"): # This one is a little rarer so not covered in tests yet. # Logic is the same as others though. priority = 2 # pragma: no cover elif span_raw == "OR": priority = 3 elif span_raw == "AND": priority = 4 elif span.target.is_type("comparison_operator"): priority = 5 elif span_raw in ("*", "/", "%"): priority = 7 for rebreak_idx in rebreak_indices: rebreak_priority[rebreak_idx] = priority return rebreak_priority
Process rebreak spans into opportunities to split lines. The index to insert a potential indent at depends on the line_position of the span. Infer that here and store the indices in the elements.
_rebreak_priorities
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _increment_balance( input_balance: int, indent_stats: IndentStats, elem_idx: int, ) -> Tuple[int, MatchedIndentsType]: """Logic for stepping through _match_indents. This is the part of that logic which is potentially fragile so is separated here into a more isolated function for better testing. It's very easy to get wrong and necessary so we don't mistake empty elements, but potentially fragile nonetheless. Returns: A tuple where the first element is the resulting balance and the second is a :obj:`defaultdict` of the new elements to add to `matched_indents`. Positive indent example: >>> _increment_balance(0, IndentStats(1, 0), 7) (1, defaultdict(<class 'list'>, {1.0: [7]})) Negative indent example: >>> _increment_balance(3, IndentStats(-1, -1), 11) (2, defaultdict(<class 'list'>, {3.0: [11]})) Double negative indent example: >>> _increment_balance(3, IndentStats(-2, -2), 16) (1, defaultdict(<class 'list'>, {3.0: [16], 2.0: [16]})) Dip indent example: >>> _increment_balance(3, IndentStats(0, -1), 21) (3, defaultdict(<class 'list'>, {3.0: [21]})) """ balance = input_balance matched_indents: MatchedIndentsType = defaultdict(list) if indent_stats.trough < 0: # NOTE: for negative, *trough* counts. # in case of more than one indent we loop and apply to all. for b in range(0, indent_stats.trough, -1): matched_indents[(balance + b) * 1.0].append(elem_idx) # NOTE: We carry forward the impulse, not the trough. # This is important for dedent+indent pairs. balance += indent_stats.impulse elif indent_stats.impulse > 0: # NOTE: for positive, *impulse* counts. # in case of more than one indent we loop and apply to all. for b in range(0, indent_stats.impulse): matched_indents[(balance + b + 1) * 1.0].append(elem_idx) balance += indent_stats.impulse return balance, matched_indents
Logic for stepping through _match_indents. This is the part of that logic which is potentially fragile so is separated here into a more isolated function for better testing. It's very easy to get wrong and necessary so we don't mistake empty elements, but potentially fragile nonetheless. Returns: A tuple where the first element is the resulting balance and the second is a :obj:`defaultdict` of the new elements to add to `matched_indents`. Positive indent example: >>> _increment_balance(0, IndentStats(1, 0), 7) (1, defaultdict(<class 'list'>, {1.0: [7]})) Negative indent example: >>> _increment_balance(3, IndentStats(-1, -1), 11) (2, defaultdict(<class 'list'>, {3.0: [11]})) Double negative indent example: >>> _increment_balance(3, IndentStats(-2, -2), 16) (1, defaultdict(<class 'list'>, {3.0: [16], 2.0: [16]})) Dip indent example: >>> _increment_balance(3, IndentStats(0, -1), 21) (3, defaultdict(<class 'list'>, {3.0: [21]}))
_increment_balance
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _match_indents( line_elements: ReflowSequenceType, rebreak_priorities: Dict[int, int], newline_idx: int, allow_implicit_indents: bool = False, ) -> MatchedIndentsType: """Identify indent points, taking into account rebreak_priorities. Expect fractional keys, because of the half values for rebreak points. """ balance = 0 matched_indents: MatchedIndentsType = defaultdict(list) implicit_indents: Dict[int, Tuple[int, ...]] = {} for idx, e in enumerate(line_elements): # We only care about points, because only they contain indents. if not isinstance(e, ReflowPoint): continue # As usual, indents are referred to by their "uphill" side # so what number we store the point against depends on whether # it's positive or negative. # NOTE: Here we don't actually pass in the forward types because # we don't need them for the output. It doesn't make a difference. indent_stats = e.get_indent_impulse() e_idx = newline_idx - len(line_elements) + idx + 1 # Save any implicit indents. if indent_stats.implicit_indents: implicit_indents[e_idx] = indent_stats.implicit_indents balance, nmi = _increment_balance(balance, indent_stats, e_idx) # Incorporate nmi into matched_indents for b, indices in nmi.items(): matched_indents[b].extend(indices) # Something can be both an indent point AND a rebreak point. if idx in rebreak_priorities: # For potential rebreak options (i.e. ones without an indent) # we add 0.5 so that they sit *between* the varying indent # options. that means we split them before any of their # content, but don't necessarily split them when their # container is split. # Also to spread out the breaks within an indent, we further # add hints to distinguish between them. This is where operator # precedence (as defined above) actually comes into effect. priority = rebreak_priorities[idx] # Assume `priority` in range 0 - 50. So / 100 to add to 0.5. matched_indents[balance + 0.5 + (priority / 100)].append(e_idx) else: continue # Before working out the lowest option, we purge any which contain # ONLY the final point. That's because adding indents there won't # actually help the line length. There's *already* a newline there. for indent_level in list(matched_indents.keys()): if matched_indents[indent_level] == [newline_idx]: matched_indents.pop(indent_level) reflow_logger.debug( " purging balance of %s, it references only the final element.", indent_level, ) # ADDITIONALLY - if implicit indents are allowed we should # only use them if they match another untaken point (which isn't # implicit, or the end of the line). # NOTE: This logic might be best suited to be sited elsewhere # when (and if) we introduce smarter choices on where to add # indents. if allow_implicit_indents: for indent_level in list(matched_indents.keys()): major_points = set(matched_indents[indent_level]).difference( [newline_idx], implicit_indents.keys() ) if not major_points: matched_indents.pop(indent_level) reflow_logger.debug( " purging balance of %s, it references implicit indents " "or the final indent.", indent_level, ) return matched_indents
Identify indent points, taking into account rebreak_priorities. Expect fractional keys, because of the half values for rebreak points.
_match_indents
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _fix_long_line_with_comment( line_buffer: ReflowSequenceType, elements: ReflowSequenceType, current_indent: str, line_length_limit: int, last_indent_idx: Optional[int], trailing_comments: str = "before", ) -> Tuple[ReflowSequenceType, List[LintFix]]: """Fix long line by moving trailing comments if possible. This method (unlike the ones for normal lines), just returns a new `elements` argument rather than mutating it. """ # If the comment contains a noqa, don't fix it. It's unsafe. if "noqa" in line_buffer[-1].segments[-1].raw: reflow_logger.debug(" Unfixable because noqa unsafe to move.") return elements, [] # If the comment is longer than the limit _anyway_, don't move # it. It will still be too long. if len(line_buffer[-1].segments[-1].raw) + len(current_indent) > line_length_limit: reflow_logger.debug(" Unfixable because comment too long anyway.") return elements, [] comment_seg = line_buffer[-1].segments[-1] first_seg = line_buffer[0].segments[0] last_elem_idx = elements.index(line_buffer[-1]) assert trailing_comments in ( "after", "before", ), f"Unexpected value for `trailing_comments`: {trailing_comments!r}" # The simpler case if if we're moving the comment to the line # _after_. In that case we just coerce the point before it to # be an indent. if trailing_comments == "after": anchor_point = cast(ReflowPoint, line_buffer[-2]) results, new_point = anchor_point.indent_to(current_indent, before=comment_seg) elements = ( elements[: last_elem_idx - 1] + [new_point] + elements[last_elem_idx:] ) return elements, fixes_from_results(results) # Otherwise we're moving it up and _before_ the line, which is # a little more involved (but also the default). fixes = [ # Remove the comment from it's current position, and any # whitespace in the previous point. LintFix.delete(comment_seg), *[ LintFix.delete(ws) for ws in line_buffer[-2].segments if ws.is_type("whitespace") ], ] # Are we at the start of the file? If so, there's no # indent, and also no previous segments to deal with. if last_indent_idx is None: new_point = ReflowPoint((NewlineSegment(),)) prev_elems = [] anchor = first_seg else: new_segments: Tuple[RawSegment, ...] = (NewlineSegment(),) if current_indent: new_segments += (WhitespaceSegment(current_indent),) new_point = ReflowPoint(new_segments) prev_elems = elements[: last_indent_idx + 1] anchor = elements[last_indent_idx + 1].segments[0] fixes.append( # NOTE: This looks a little convoluted, but we create # *before* a block here rather than *after* a point, # because the point may have been modified already by # reflow code and may not be a reliable anchor. LintFix.create_before( anchor, [ comment_seg, *new_point.segments, ], ) ) elements = ( prev_elems + [ line_buffer[-1], new_point, ] + line_buffer[:-2] + elements[last_elem_idx + 1 :] ) return elements, fixes
Fix long line by moving trailing comments if possible. This method (unlike the ones for normal lines), just returns a new `elements` argument rather than mutating it.
_fix_long_line_with_comment
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _fix_long_line_with_fractional_targets( elements: ReflowSequenceType, target_breaks: List[int], desired_indent: str ) -> List[LintResult]: """Work out fixes for splitting a long line at locations like operators. NOTE: This mutates `elements` to avoid copying. This is a helper function within .lint_line_length(). """ line_results = [] for e_idx in target_breaks: e = cast(ReflowPoint, elements[e_idx]) new_results, new_point = e.indent_to( desired_indent, after=elements[e_idx - 1].segments[-1], before=elements[e_idx + 1].segments[0], ) # NOTE: Mutation of elements. elements[e_idx] = new_point line_results += new_results return line_results
Work out fixes for splitting a long line at locations like operators. NOTE: This mutates `elements` to avoid copying. This is a helper function within .lint_line_length().
_fix_long_line_with_fractional_targets
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _fix_long_line_with_integer_targets( elements: ReflowSequenceType, target_breaks: List[int], line_length_limit: int, inner_indent: str, outer_indent: str, ) -> List[LintResult]: """Work out fixes for splitting a long line at locations like indents. NOTE: This mutates `elements` to avoid copying. This is a helper function within .lint_line_length(). """ line_results = [] # If we can get to the uphill indent of later break, and still be within # the line limit, then we can skip everything before it. purge_before = 0 for e_idx in target_breaks: # Is the following block already past the limit? # NOTE: We use the block because we know it will have segments. if not elements[e_idx + 1].segments[0].pos_marker: # If it doesn't have position - we should just bow out # now. It's too complicated. break # pragma: no cover if ( elements[e_idx + 1].segments[0].pos_marker.working_line_pos > line_length_limit ): # If we're past the line length limit, stop looking. break e = cast(ReflowPoint, elements[e_idx]) if e.get_indent_impulse().trough < 0: # It's negative. Skip onward. continue # If we get this far, then it's positive, but still within # the line limit. We can purge any pairs before this. purge_before = e_idx reflow_logger.debug(" ...breaks before %s unnecessary.", purge_before) # Only keep indices which are after the critical point. target_breaks = [e_idx for e_idx in target_breaks if e_idx >= purge_before] reflow_logger.debug(" Remaining breaks: %s.", target_breaks) for e_idx in target_breaks: e = cast(ReflowPoint, elements[e_idx]) indent_stats = e.get_indent_impulse() # NOTE: We check against the _impulse_ here rather than the # _trough_ because if we're about to step back up again then # it should still be indented. if indent_stats.impulse < 0: new_indent = outer_indent # NOTE: If we're about to insert a dedent before a # comma or semicolon ... don't. They are a bit special # in being allowed to trail. if elements[e_idx + 1].class_types.intersection( ("statement_terminator", "comma") ): reflow_logger.debug(" Skipping dedent before comma or semicolon.") # We break rather than continue because this is # necessarily a step back down. break else: new_indent = inner_indent new_results, new_point = e.indent_to( new_indent, after=elements[e_idx - 1].segments[-1], before=elements[e_idx + 1].segments[0], ) # NOTE: Mutation of elements. elements[e_idx] = new_point line_results += new_results # If the balance is *also* negative, then we should also stop. # We've indented a whole section - that's enough for now. # We've already skipped over any unnecessary sections, and they shouldn't # be reassessed on the next pass. If there are later sections which *also* # need to be reindented, then we'll catch them when we come back around. if indent_stats.trough < 0: reflow_logger.debug(" Stopping as we're back down.") break return line_results
Work out fixes for splitting a long line at locations like indents. NOTE: This mutates `elements` to avoid copying. This is a helper function within .lint_line_length().
_fix_long_line_with_integer_targets
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def lint_line_length( elements: ReflowSequenceType, root_segment: BaseSegment, single_indent: str, line_length_limit: int, allow_implicit_indents: bool = False, trailing_comments: str = "before", ) -> Tuple[ReflowSequenceType, List[LintResult]]: """Lint the sequence to lines over the configured length. NOTE: This assumes that `lint_indent_points` has already been run. The method won't necessarily *fail* but it does assume that the current indent is correct and that indents have already been inserted where they're missing. """ # First check whether we should even be running this check. if line_length_limit <= 0: reflow_logger.debug("# Line length check disabled.") return elements, [] reflow_logger.debug("# Evaluate lines for length.") # Make a working copy to mutate. elem_buffer: ReflowSequenceType = elements.copy() line_buffer: ReflowSequenceType = [] results: List[LintResult] = [] last_indent_idx: int | None = None for i, elem in enumerate(elem_buffer): # Are there newlines in the element? # If not, add it to the buffer and wait to evaluate the line. # If yes, it's time to evaluate the line. if isinstance(elem, ReflowPoint) and ( # Is it the end of the file? # NOTE: Here, we're actually looking to see whether we're # currently on the _point before the end of the file_ rather # than actually on the final block. This is important because # the following code assumes we're on a point and not a block. # We're safe from indexing errors if we're on a point, because # we know there's always a trailing block. "end_of_file" in elem_buffer[i + 1].class_types # Or is there a newline? or has_untemplated_newline(elem) ): # In either case we want to process this, so carry on. pass else: # Otherwise build up the buffer and loop around again. line_buffer.append(elem) continue # If we don't have a buffer yet, also carry on. Nothing to lint. if not line_buffer: continue # Evaluate a line # Get the current indent. if last_indent_idx is not None: current_indent = _deduce_line_current_indent(elem_buffer, last_indent_idx) else: current_indent = "" # Get the length of all the elements on the line (other than the indent). # NOTE: This is the length in the _source_, because that's the line # length that the reader is actually looking at. char_len = _source_char_len(line_buffer) # Is the line over the limit length? line_len = len(current_indent) + char_len # NOTE: We should be able to rely on the first elements of the line having # a non-zero number of segments. If this isn't the case we may need to add # a clause to handle that scenario here. assert line_buffer[0].segments first_seg = line_buffer[0].segments[0] line_no = first_seg.pos_marker.working_line_no if line_len <= line_length_limit: reflow_logger.info( " Line #%s. Length %s <= %s. OK.", line_no, line_len, line_length_limit, ) else: reflow_logger.info( " Line #%s. Length %s > %s. PROBLEM.", line_no, line_len, line_length_limit, ) # Potential places to shorten the line are either indent locations # or segments with a defined line position (like operators). # NOTE: We make a buffer including the closing point, because we're # looking for pairs of indents and dedents. The closing dedent for one # of those pairs might be in the closing point so if we don't have it # then we'll miss any locations which have their closing dedent at # the end of the line. line_elements = line_buffer + [elem] # Type hints fixes: List[LintFix] # Identify rebreak spans first so we can work out their indentation # in the next section. # NOTE: In identifying spans, we give the method a little more than # the line, so that it can correctly identify the ends of things # accurately. It's safe to go to i+1 because there is always an # end_of_file marker at the end which we could span into. spans = identify_rebreak_spans( line_elements + [elements[i + 1]], root_segment ) reflow_logger.debug(" spans: %s", spans) rebreak_priorities = _rebreak_priorities(spans) reflow_logger.debug(" rebreak_priorities: %s", rebreak_priorities) # Identify indent points second, taking into # account rebreak_priorities. matched_indents = _match_indents( line_elements, rebreak_priorities, i, allow_implicit_indents=allow_implicit_indents, ) reflow_logger.debug(" matched_indents: %s", matched_indents) # If we don't have any matched_indents, we don't have any options. # This could be for things like comment lines. desc = f"Line is too long ({line_len} > {line_length_limit})." # Easiest option are lines ending with comments, but that aren't *all* # comments and the comment itself is shorter than the limit. # The reason for that last clause is that if the comment (plus an indent) # is already longer than the limit, then there's no point just putting it # on a new line - it will still fail - so it doesn't actually fix the issue. # Deal with them first. if ( len(line_buffer) > 1 # We can only fix _inline_ comments in this way. Others should # just be flagged as issues. and line_buffer[-1].segments[-1].is_type("inline_comment") ): reflow_logger.debug(" Handling as inline comment line.") elem_buffer, fixes = _fix_long_line_with_comment( line_buffer, elem_buffer, current_indent, line_length_limit, last_indent_idx, trailing_comments=trailing_comments, ) # Then check for cases where we have no other options. elif not matched_indents: # NOTE: In this case we have no options for shortening the line. # We'll still report a linting issue - but no fixes are provided. reflow_logger.debug(" Handling as unfixable line.") fixes = [] # Lastly deal with the "normal" case. else: # For now, the algorithm we apply isn't particularly elegant # and just finds the "outermost" opportunity to add additional # line breaks and adds them. # TODO: Make this more elegant later. The two obvious directions # would be to potentially add a) line breaks at multiple levels # in a single pass and b) to selectively skip levels if they're # "trivial", or if there would be a more suitable inner indent # to add first (e.g. the case of "(((((((a)))))))"). reflow_logger.debug(" Handling as normal line.") # NOTE: Double indents (or more likely dedents) will be # potentially in *multiple* sets - don't double count them # if we start doing something more clever. target_balance = min(matched_indents.keys()) desired_indent = current_indent if target_balance >= 1: desired_indent += single_indent target_breaks = matched_indents[target_balance] reflow_logger.debug( " Targeting balance of %s, indent: %r for %s", target_balance, desired_indent, target_breaks, ) # Is one of the locations the final element? If so remove it. # There's already a line break there. if i in target_breaks: target_breaks.remove(i) # Is it an "integer" indent or a fractional indent? # Integer indents (i.e. 1.0, 2.0, ...) are based on Indent and # Dedent tokens. Fractional indents (i.e. 1.5, 1.52, ...) are # based more on rebreak spans (e.g. around commas and operators). # The latter is simpler in that it doesn't change the indents, # just adds line breaks. The former is more complicated. # NOTE: Both of these methods mutate the `elem_buffer`. if target_balance % 1 == 0: line_results = _fix_long_line_with_integer_targets( elem_buffer, target_breaks, line_length_limit, desired_indent, current_indent, ) else: line_results = _fix_long_line_with_fractional_targets( elem_buffer, target_breaks, desired_indent ) # Consolidate all the results for the line into one. fixes = fixes_from_results(line_results) results.append( LintResult( # First segment on the line is the result anchor. first_seg, fixes=fixes, description=desc, source="reflow.long_line", ) ) # Regardless of whether the line was good or not, clear # the buffers ready for the next line. line_buffer = [] last_indent_idx = i return elem_buffer, results
Lint the sequence to lines over the configured length. NOTE: This assumes that `lint_indent_points` has already been run. The method won't necessarily *fail* but it does assume that the current indent is correct and that indents have already been inserted where they're missing.
lint_line_length
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/reindent.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/reindent.py
MIT
def _unpack_constraint(constraint: str, strip_newlines: bool) -> Tuple[str, bool]: """Unpack a spacing constraint. Used as a helper function in `determine_constraints`. """ # Check for deprecated options. if constraint == "inline": # pragma: no cover reflow_logger.warning( "Found 'inline' specified as a 'spacing_within' constraint. " "This setting is deprecated and has been replaced by the more " "explicit 'touch:inline'. Upgrade your configuration to " "remove this warning." ) constraint = "touch:inline" # Unless align, split. if constraint.startswith("align"): modifier = "" else: constraint, _, modifier = constraint.partition(":") if not modifier: pass elif modifier == "inline": strip_newlines = True else: # pragma: no cover raise SQLFluffUserError(f"Unexpected constraint modifier: {constraint!r}") return constraint, strip_newlines
Unpack a spacing constraint. Used as a helper function in `determine_constraints`.
_unpack_constraint
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/respace.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/respace.py
MIT
def determine_constraints( prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], strip_newlines: bool = False, ) -> Tuple[str, str, bool]: """Given the surrounding blocks, determine appropriate constraints.""" # Start with the defaults. pre_constraint, strip_newlines = _unpack_constraint( prev_block.spacing_after if prev_block else "single", strip_newlines ) post_constraint, strip_newlines = _unpack_constraint( next_block.spacing_before if next_block else "single", strip_newlines ) # Work out the common parent segment and depth within_spacing = "" if prev_block and next_block: common = prev_block.depth_info.common_with(next_block.depth_info) # Just check the most immediate parent for now for speed. # TODO: Review whether just checking the parent is enough. # NOTE: spacing configs will be available on both sides if they're common # so it doesn't matter whether we get it from prev_block or next_block. idx = prev_block.depth_info.stack_hashes.index(common[-1]) within_constraint = prev_block.stack_spacing_configs.get(common[-1], None) if within_constraint: within_spacing, strip_newlines = _unpack_constraint( within_constraint, strip_newlines ) # Prohibit stripping newlines after comment segments if any(seg.is_type("comment") for seg in prev_block.segments): strip_newlines = False # If segments are expected to be touch within. Then modify # constraints accordingly. if within_spacing == "touch": # NOTE: We don't override if it's already "any" if pre_constraint != "any": pre_constraint = "touch" if post_constraint != "any": post_constraint = "touch" elif within_spacing == "any": pre_constraint = "any" post_constraint = "any" elif within_spacing == "single": pass elif within_spacing: # pragma: no cover assert prev_block raise SQLFluffUserError( f"Unexpected within constraint: {within_constraint!r} for " f"{prev_block.depth_info.stack_class_types[idx]}" ) return pre_constraint, post_constraint, strip_newlines
Given the surrounding blocks, determine appropriate constraints.
determine_constraints
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/respace.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/respace.py
MIT
def process_spacing( segment_buffer: List[RawSegment], strip_newlines: bool = False ) -> Tuple[List[RawSegment], Optional[RawSegment], List[LintResult]]: """Given the existing spacing, extract information and do basic pruning.""" removal_buffer: List[RawSegment] = [] result_buffer: List[LintResult] = [] last_whitespace: List[RawSegment] = [] # Loop through the existing segments looking for spacing. for seg in segment_buffer: # If it's whitespace, store it. if seg.is_type("whitespace"): last_whitespace.append(seg) # If it's a newline, react accordingly. # NOTE: This should only trigger on literal newlines. elif seg.is_type("newline", "end_of_file"): if seg.pos_marker and not seg.pos_marker.is_literal(): last_whitespace = [] reflow_logger.debug(" Skipping templated newline: %s", seg) continue # Are we stripping newlines? if strip_newlines and seg.is_type("newline"): reflow_logger.debug(" Stripping newline: %s", seg) removal_buffer.append(seg) result_buffer.append( LintResult( seg, [LintFix.delete(seg)], description="Unexpected line break." ) ) # Carry on as though it wasn't here. continue # Check if we've just passed whitespace. If we have, remove it # as trailing whitespace, both from the buffer and create a fix. if last_whitespace: reflow_logger.debug(" Removing trailing whitespace.") for ws in last_whitespace: removal_buffer.append(ws) result_buffer.append( LintResult( ws, [LintFix.delete(ws)], description="Unnecessary trailing whitespace.", ) ) # Regardless, unset last_whitespace. # We either just deleted it, or it's not relevant for any future # segments. last_whitespace = [] if len(last_whitespace) >= 2: reflow_logger.debug(" Removing adjoining whitespace.") # If we find multiple sequential whitespaces, it's the sign # that we've removed something. Only the first one should be # a valid indent (or the one we consider for constraints). # Remove all the following ones. for ws in last_whitespace[1:]: removal_buffer.append(ws) result_buffer.append( LintResult( seg, [LintFix.delete(seg)], description="Removing duplicate whitespace.", ) ) # Turn the removal buffer updated segment buffer, last whitespace # and associated fixes. return ( [s for s in segment_buffer if s not in removal_buffer], # We should have removed all other whitespace by now. last_whitespace[0] if last_whitespace else None, result_buffer, )
Given the existing spacing, extract information and do basic pruning.
process_spacing
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/respace.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/respace.py
MIT
def _determine_aligned_inline_spacing( root_segment: BaseSegment, whitespace_seg: RawSegment, next_seg: RawSegment, next_pos: PositionMarker, segment_type: str, align_within: Optional[str], align_scope: Optional[str], ) -> str: """Work out spacing for instance of an `align` constraint.""" # Find the level of segment that we're aligning. # NOTE: Reverse slice parent_segment = None # Edge case: if next_seg has no position, we should use the position # of the whitespace for searching. if align_within: for ps in root_segment.path_to( next_seg if next_seg.pos_marker else whitespace_seg )[::-1]: if ps.segment.is_type(align_within): parent_segment = ps.segment if align_scope and ps.segment.is_type(align_scope): break if not parent_segment: reflow_logger.debug(" No Parent found for alignment case. Treat as single.") return " " # We've got a parent. Find some siblings. reflow_logger.debug(" Determining alignment within: %s", parent_segment) siblings = [] for sibling in parent_segment.recursive_crawl(segment_type): # Purge any siblings with a boundary between them if not align_scope or not any( ps.segment.is_type(align_scope) for ps in parent_segment.path_to(sibling) ): siblings.append(sibling) else: reflow_logger.debug( " Purging a sibling because they're blocked " "by a boundary: %s", sibling, ) # If the segment we're aligning, has position. Use that position. # If it doesn't, then use the provided one. We can't do sibling analysis without it. if next_seg.pos_marker: next_pos = next_seg.pos_marker # Purge any siblings which are either on the same line or on another line and # have another index siblings_by_line: Dict[int, List[BaseSegment]] = defaultdict(list) for sibling in siblings: _pos = sibling.pos_marker assert _pos siblings_by_line[_pos.working_line_no].append(sibling) # Sort all segments by position to easily access index information for line_siblings in siblings_by_line.values(): line_siblings.sort( key=lambda s: cast(PositionMarker, s.pos_marker).working_line_pos ) target_index = next( idx for idx, segment in enumerate(siblings_by_line[next_pos.working_line_no]) if ( cast(PositionMarker, segment.pos_marker).working_line_pos == next_pos.working_line_pos ) ) # Now that we know the target index, we can extract the relevant segment from # all lines siblings = [ segment for segments in siblings_by_line.values() for segment in ( [segments[target_index]] if target_index < len(segments) else [] ) ] # If there's only one sibling, we have nothing to compare to. Default to a single # space. if len(siblings) <= 1: desired_space = " " reflow_logger.debug( " desired_space: %r (based on no other siblings)", desired_space, ) return desired_space # Work out the current spacing before each. last_code: Optional[RawSegment] = None max_desired_line_pos = 0 for seg in parent_segment.raw_segments: for sibling in siblings: # NOTE: We're asserting that there must have been # a last_code. Otherwise this won't work. if ( seg.pos_marker and sibling.pos_marker and seg.pos_marker.working_loc == sibling.pos_marker.working_loc and last_code ): loc = last_code.pos_marker.working_loc_after(last_code.raw) reflow_logger.debug( " loc for %s: %s from %s", sibling, loc, last_code, ) if loc[1] > max_desired_line_pos: max_desired_line_pos = loc[1] if seg.is_code: last_code = seg desired_space = " " * ( 1 + max_desired_line_pos - whitespace_seg.pos_marker.working_line_pos ) reflow_logger.debug( " desired_space: %r (based on max line pos of %s)", desired_space, max_desired_line_pos, ) return desired_space
Work out spacing for instance of an `align` constraint.
_determine_aligned_inline_spacing
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/respace.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/respace.py
MIT
def _extract_alignment_config( constraint: str, ) -> Tuple[str, Optional[str], Optional[str]]: """Helper function to break apart an alignment config. >>> _extract_alignment_config("align:alias_expression") ('alias_expression', None, None) >>> _extract_alignment_config("align:alias_expression:statement") ('alias_expression', 'statement', None) >>> _extract_alignment_config("align:alias_expression:statement:bracketed") ('alias_expression', 'statement', 'bracketed') """ assert ":" in constraint alignment_config = constraint.split(":") assert alignment_config[0] == "align" seg_type = alignment_config[1] align_within = alignment_config[2] if len(alignment_config) > 2 else None align_scope = alignment_config[3] if len(alignment_config) > 3 else None reflow_logger.debug( " Alignment Config: %s, %s, %s", seg_type, align_within, align_scope, ) return seg_type, align_within, align_scope
Helper function to break apart an alignment config. >>> _extract_alignment_config("align:alias_expression") ('alias_expression', None, None) >>> _extract_alignment_config("align:alias_expression:statement") ('alias_expression', 'statement', None) >>> _extract_alignment_config("align:alias_expression:statement:bracketed") ('alias_expression', 'statement', 'bracketed')
_extract_alignment_config
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/respace.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/respace.py
MIT
def handle_respace__inline_with_space( pre_constraint: str, post_constraint: str, prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], root_segment: BaseSegment, segment_buffer: List[RawSegment], last_whitespace: RawSegment, ) -> Tuple[List[RawSegment], List[LintResult]]: """Check inline spacing is the right size. This forms one of the cases handled by .respace_point(). This code assumes: - a ReflowPoint with no newlines. - a ReflowPoint which has _some_ whitespace. Given this we apply constraints to ensure the whitespace is of an appropriate size. """ # Get some indices so that we can reference around them ws_idx = segment_buffer.index(last_whitespace) # Do we have either side set to "any" if "any" in [pre_constraint, post_constraint]: # In this instance - don't change anything. # e.g. this could mean there is a comment on one side. return segment_buffer, [] # Do we have either side set to "touch"? if "touch" in [pre_constraint, post_constraint]: # In this instance - no whitespace is correct, This # means we should delete it. segment_buffer.pop(ws_idx) if next_block: description = ( "Unexpected whitespace before " f"{pretty_segment_name(next_block.segments[0])}." ) else: # pragma: no cover # This clause has no test coverage because next_block is # normally provided. description = "Unexpected whitespace" return segment_buffer, [ LintResult( last_whitespace, [LintFix.delete(last_whitespace)], # Should make description from constraints. description=description, ), ] # Handle left alignment & singles if ( post_constraint.startswith("align") and next_block ) or pre_constraint == post_constraint == "single": # Determine the desired spacing, either as alignment or as a single. if post_constraint.startswith("align") and next_block: seg_type, align_within, align_scope = _extract_alignment_config( post_constraint ) next_pos: Optional[PositionMarker] if next_block.segments[0].pos_marker: next_pos = next_block.segments[0].pos_marker elif last_whitespace.pos_marker: next_pos = last_whitespace.pos_marker.end_point_marker() # These second clauses are much less likely and so are excluded from # coverage. If we find a way of covering them, that would be great # but for now they exist as backups. elif prev_block and prev_block.segments[-1].pos_marker: # pragma: no cover next_pos = prev_block.segments[-1].pos_marker.end_point_marker() else: # pragma: no cover reflow_logger.info("Unable to find position marker for alignment.") next_pos = None desired_space = " " desc = ( "Expected only single space. " "Found " f"{last_whitespace.raw!r}." ) if next_pos: desired_space = _determine_aligned_inline_spacing( root_segment, last_whitespace, next_block.segments[0], next_pos, seg_type, align_within, align_scope, ) desc = ( f"{seg_type!r} elements are expected to be aligned. Found " "incorrect whitespace before " f"{pretty_segment_name(next_block.segments[0])}: " f"{last_whitespace.raw!r}." ) else: if next_block: desc = ( "Expected only single space before " f"{pretty_segment_name(next_block.segments[0])}. Found " f"{last_whitespace.raw!r}." ) else: # pragma: no cover # This clause isn't has no test coverage because next_block is # normally provided. desc = "Expected only single space. Found " f"{last_whitespace.raw!r}." desired_space = " " new_results: List[LintResult] = [] if last_whitespace.raw != desired_space: new_seg = last_whitespace.edit(desired_space) new_results.append( LintResult( last_whitespace, [ LintFix( "replace", anchor=last_whitespace, edit=[new_seg], ) ], description=desc, ) ) segment_buffer[ws_idx] = new_seg return segment_buffer, new_results raise NotImplementedError( # pragma: no cover f"Unexpected Constraints: {pre_constraint}, {post_constraint}" )
Check inline spacing is the right size. This forms one of the cases handled by .respace_point(). This code assumes: - a ReflowPoint with no newlines. - a ReflowPoint which has _some_ whitespace. Given this we apply constraints to ensure the whitespace is of an appropriate size.
handle_respace__inline_with_space
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/respace.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/respace.py
MIT
def handle_respace__inline_without_space( pre_constraint: str, post_constraint: str, prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], segment_buffer: List[RawSegment], existing_results: List[LintResult], anchor_on: str = "before", ) -> Tuple[List[RawSegment], List[LintResult], bool]: """Ensure spacing is the right size. This forms one of the cases handled by .respace_point(). This code assumes: - a ReflowPoint with no newlines. - a ReflowPoint which _no_ whitespace. Given this we apply constraints to either confirm no spacing is required or create some of the right size. """ # Do we have either side set to "touch" or "any" if {"touch", "any"}.intersection([pre_constraint, post_constraint]): # In this instance - no whitespace is correct. # Either because there shouldn't be, or because "any" # means we shouldn't check. return segment_buffer, existing_results, False # Are we supposed to be aligning? elif post_constraint.startswith("align"): reflow_logger.debug(" Inserting Aligned Whitespace.") # TODO: We currently rely on a second pass to align # insertions. This is where we could devise alignment # in advance, but most of the alignment code relies on # having existing position markers for those insertions. # https://github.com/sqlfluff/sqlfluff/issues/4492 desired_space = " " added_whitespace = WhitespaceSegment(desired_space) # Is it anything other than the default case? elif not (pre_constraint == post_constraint == "single"): # pragma: no cover # TODO: This will get test coverage when configuration routines # are in properly. raise NotImplementedError( f"Unexpected Constraints: {pre_constraint}, {post_constraint}" ) else: # Default to a single whitespace reflow_logger.debug(" Inserting Single Whitespace.") added_whitespace = WhitespaceSegment() # Add it to the buffer first (the easy bit). The hard bit # is to then determine how to generate the appropriate LintFix # objects. segment_buffer.append(added_whitespace) # So special handling here. If segments either side # already exist then we don't care which we anchor on # but if one is already an insertion (as shown by a lack) # of pos_marker, then we should piggy back on that pre-existing # fix. existing_fix = None insertion = None if prev_block and not prev_block.segments[-1].pos_marker: existing_fix = "after" insertion = prev_block.segments[-1] elif next_block and not next_block.segments[0].pos_marker: existing_fix = "before" insertion = next_block.segments[0] if existing_fix: reflow_logger.debug(" Detected existing fix %s", existing_fix) if not existing_results: # pragma: no cover raise ValueError( "Fixes detected, but none passed to .respace(). " "This will cause conflicts." ) # Find the fix assert insertion for res in existing_results: # Does it contain the insertion? # TODO: This feels ugly - eq for BaseSegment is different # to uuid matching for RawSegment. Perhaps this should be # more aligned. There might be a better way of doing this. for fix in res.fixes or []: if fix.edit and insertion.uuid in [elem.uuid for elem in fix.edit]: break else: # pragma: no cover continue break else: # pragma: no cover reflow_logger.warning("Results %s", existing_results) raise ValueError(f"Couldn't find insertion for {insertion}") # Mutate the existing fix assert res assert fix assert fix in res.fixes assert fix.edit # It's going to be an edit if we've picked it up. # Mutate the fix, it's still in the same result, and that result # is still in the existing_results. if existing_fix == "before": fix.edit = [cast(BaseSegment, added_whitespace)] + fix.edit elif existing_fix == "after": fix.edit = fix.edit + [cast(BaseSegment, added_whitespace)] # No need to add new results, because we mutated the existing. return segment_buffer, existing_results, True # Otherwise... reflow_logger.debug(" Not Detected existing fix. Creating new") if prev_block and next_block: desc = ( "Expected single whitespace between " f"{pretty_segment_name(prev_block.segments[-1])} " f"and {pretty_segment_name(next_block.segments[0])}." ) else: # pragma: no cover # Something to fall back on if prev_block and next_block not provided. desc = "Expected single whitespace." # Take into account hint on where to anchor if given. if prev_block and anchor_on != "after": new_result = LintResult( # We do this shuffle, because for the CLI it's clearer if the # anchor for the error is at the point that the insertion will # happen which is the *start* of the next segment, even if # we're anchoring the fix on the previous. next_block.segments[0] if next_block else prev_block.segments[-1], fixes=[ LintFix( "create_after", anchor=prev_block.segments[-1], edit=[WhitespaceSegment()], ) ], description=desc, ) elif next_block: new_result = LintResult( next_block.segments[0], fixes=[ LintFix( "create_before", anchor=next_block.segments[0], edit=[WhitespaceSegment()], ) ], description=desc, ) else: # pragma: no cover NotImplementedError("Not set up to handle a missing _after_ and _before_.") return segment_buffer, existing_results + [new_result], True
Ensure spacing is the right size. This forms one of the cases handled by .respace_point(). This code assumes: - a ReflowPoint with no newlines. - a ReflowPoint which _no_ whitespace. Given this we apply constraints to either confirm no spacing is required or create some of the right size.
handle_respace__inline_without_space
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/respace.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/respace.py
MIT
def get_fixes(self) -> List[LintFix]: """Get the current fix buffer. We're hydrating them here directly from the LintResult objects, so for more accurate results, consider using .get_results(). This method is particularly useful when consolidating multiple results into one. """ return fixes_from_results(self.lint_results)
Get the current fix buffer. We're hydrating them here directly from the LintResult objects, so for more accurate results, consider using .get_results(). This method is particularly useful when consolidating multiple results into one.
get_fixes
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def get_results(self) -> List[LintResult]: """Return the current result buffer.""" return self.lint_results
Return the current result buffer.
get_results
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def get_raw(self) -> str: """Get the current raw representation.""" return "".join(elem.raw for elem in self.elements)
Get the current raw representation.
get_raw
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def _elements_from_raw_segments( segments: Sequence[RawSegment], reflow_config: ReflowConfig, depth_map: DepthMap ) -> ReflowSequenceType: """Construct reflow elements from raw segments. NOTE: ReflowBlock elements should only ever have one segment which simplifies iteration here. """ elem_buff: ReflowSequenceType = [] seg_buff: List[RawSegment] = [] for seg in segments: # NOTE: end_of_file is block-like rather than point-like. # This is to facilitate better evaluation of the ends of files. # NOTE: This also allows us to include literal placeholders for # whitespace only strings. if ( seg.is_type("whitespace", "newline", "indent") or (get_consumed_whitespace(seg) or "").isspace() ): # Add to the buffer and move on. seg_buff.append(seg) continue elif elem_buff or seg_buff: # There are elements. The last will have been a block. # Add a point before we add the block. NOTE: It may be empty. elem_buff.append(ReflowPoint(segments=tuple(seg_buff))) # Add the block, with config info. elem_buff.append( ReflowBlock.from_config( segments=(seg,), config=reflow_config, depth_info=depth_map.get_depth_info(seg), ) ) # Empty the buffer seg_buff = [] # If we ended with a buffer, apply it. # TODO: Consider removing this clause? if seg_buff: # pragma: no cover elem_buff.append(ReflowPoint(segments=tuple(seg_buff))) return elem_buff
Construct reflow elements from raw segments. NOTE: ReflowBlock elements should only ever have one segment which simplifies iteration here.
_elements_from_raw_segments
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def from_raw_segments( cls: Type["ReflowSequence"], segments: Sequence[RawSegment], root_segment: BaseSegment, config: FluffConfig, depth_map: Optional[DepthMap] = None, ) -> "ReflowSequence": """Construct a ReflowSequence from a sequence of raw segments. This is intended as a base constructor, which others can use. In particular, if no `depth_map` argument is provided, this method will generate one in a potentially inefficient way. If the calling method has access to a better way of inferring a depth map (for example because it has access to a common root segment for all the content), it should do that instead and pass it in. """ reflow_config = ReflowConfig.from_fluff_config(config) if depth_map is None: depth_map = DepthMap.from_raws_and_root(segments, root_segment) return cls( elements=cls._elements_from_raw_segments( segments, reflow_config=reflow_config, # NOTE: This pathway is inefficient. Ideally the depth # map should be constructed elsewhere and then passed in. depth_map=depth_map, ), root_segment=root_segment, reflow_config=reflow_config, depth_map=depth_map, )
Construct a ReflowSequence from a sequence of raw segments. This is intended as a base constructor, which others can use. In particular, if no `depth_map` argument is provided, this method will generate one in a potentially inefficient way. If the calling method has access to a better way of inferring a depth map (for example because it has access to a common root segment for all the content), it should do that instead and pass it in.
from_raw_segments
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def from_root( cls: Type["ReflowSequence"], root_segment: BaseSegment, config: FluffConfig ) -> "ReflowSequence": """Generate a sequence from a root segment. Args: root_segment (:obj:`BaseSegment`): The relevant root segment (usually the base :obj:`FileSegment`). config (:obj:`FluffConfig`): A config object from which to load the spacing behaviours of different segments. """ return cls.from_raw_segments( root_segment.raw_segments, root_segment, config=config, # This is the efficient route. We use it here because we can. depth_map=DepthMap.from_parent(root_segment), )
Generate a sequence from a root segment. Args: root_segment (:obj:`BaseSegment`): The relevant root segment (usually the base :obj:`FileSegment`). config (:obj:`FluffConfig`): A config object from which to load the spacing behaviours of different segments.
from_root
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def from_around_target( cls: Type["ReflowSequence"], target_segment: BaseSegment, root_segment: BaseSegment, config: FluffConfig, sides: str = "both", ) -> "ReflowSequence": """Generate a sequence around a target. Args: target_segment (:obj:`RawSegment`): The segment to center around when considering the sequence to construct. root_segment (:obj:`BaseSegment`): The relevant root segment (usually the base :obj:`FileSegment`). config (:obj:`FluffConfig`): A config object from which to load the spacing behaviours of different segments. sides (:obj:`str`): Limit the reflow sequence to just one side of the target. Default is two sided ("both"), but set to "before" or "after" to limit to either side. **NOTE**: We don't just expand to the first block around the target but to the first *code* element, which means we may swallow several `comment` blocks in the process. To evaluate reflow around a specific target, we need need to generate a sequence which goes for the preceding raw to the following raw. i.e. at least: block - point - block - point - block (where the central block is the target). """ # There's probably a more efficient way than immediately # materialising the raw_segments for the whole root, but # it works. Optimise later. all_raws = root_segment.raw_segments target_raws = target_segment.raw_segments assert target_raws pre_idx = all_raws.index(target_raws[0]) post_idx = all_raws.index(target_raws[-1]) + 1 initial_idx = (pre_idx, post_idx) if sides in ("both", "before"): # Catch at least the previous segment pre_idx -= 1 for pre_idx in range(pre_idx, -1, -1): if all_raws[pre_idx].is_code: break if sides in ("both", "after"): for post_idx in range(post_idx, len(all_raws)): if all_raws[post_idx].is_code: break # Capture one more after the whitespace. post_idx += 1 segments = all_raws[pre_idx:post_idx] reflow_logger.debug( "Generating ReflowSequence.from_around_target(). idx: %s. " "slice: %s:%s. raw: %r", initial_idx, pre_idx, post_idx, "".join(seg.raw for seg in segments), ) return cls.from_raw_segments(segments, root_segment, config=config)
Generate a sequence around a target. Args: target_segment (:obj:`RawSegment`): The segment to center around when considering the sequence to construct. root_segment (:obj:`BaseSegment`): The relevant root segment (usually the base :obj:`FileSegment`). config (:obj:`FluffConfig`): A config object from which to load the spacing behaviours of different segments. sides (:obj:`str`): Limit the reflow sequence to just one side of the target. Default is two sided ("both"), but set to "before" or "after" to limit to either side. **NOTE**: We don't just expand to the first block around the target but to the first *code* element, which means we may swallow several `comment` blocks in the process. To evaluate reflow around a specific target, we need need to generate a sequence which goes for the preceding raw to the following raw. i.e. at least: block - point - block - point - block (where the central block is the target).
from_around_target
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def without(self, target: RawSegment) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` without the specified segment. This generates appropriate deletion :obj:`LintFix` objects to direct the linter to remove those elements. """ removal_idx = self._find_element_idx_with(target) if removal_idx == 0 or removal_idx == len(self.elements) - 1: raise NotImplementedError( # pragma: no cover "Unexpected removal at one end of a ReflowSequence." ) if isinstance(self.elements[removal_idx], ReflowPoint): raise NotImplementedError( # pragma: no cover "Not expected removal of whitespace in ReflowSequence." ) merged_point = ReflowPoint( segments=self.elements[removal_idx - 1].segments + self.elements[removal_idx + 1].segments, ) return ReflowSequence( elements=self.elements[: removal_idx - 1] + [merged_point] + self.elements[removal_idx + 2 :], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[LintResult(target, [LintFix.delete(target)])], )
Returns a new :obj:`ReflowSequence` without the specified segment. This generates appropriate deletion :obj:`LintFix` objects to direct the linter to remove those elements.
without
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def insert( self, insertion: RawSegment, target: RawSegment, pos: str = "before" ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` with the new element inserted. Insertion is always relative to an existing element. Either before or after it as specified by `pos`. This generates appropriate creation :obj:`LintFix` objects to direct the linter to insert those elements. """ assert pos in ("before", "after") target_idx = self._find_element_idx_with(target) # Are we trying to insert something whitespace-like? if insertion.is_type("whitespace", "indent", "newline"): # pragma: no cover raise ValueError( "ReflowSequence.insert() does not support direct insertion of " "spacing elements such as whitespace or newlines" ) # We're inserting something blocky. That means a new block AND a new point. # It's possible we try to _split_ a point by targeting a whitespace element # inside a larger point. For now this isn't supported. # NOTE: We use the depth info of the reference anchor, with the assumption # (I think reliable) that the insertion will be applied as a sibling of # the target. self.depth_map.copy_depth_info(target, insertion) new_block = ReflowBlock.from_config( segments=(insertion,), config=self.reflow_config, depth_info=self.depth_map.get_depth_info(target), ) if isinstance(self.elements[target_idx], ReflowPoint): raise NotImplementedError( # pragma: no cover "Can't insert relative to whitespace for now." ) elif pos == "before": return ReflowSequence( elements=self.elements[:target_idx] + [new_block, ReflowPoint(())] + self.elements[target_idx:], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[ LintResult(target, [LintFix.create_before(target, [insertion])]) ], ) elif pos == "after": # pragma: no cover # TODO: This doesn't get coverage - should it even exist? # Re-evaluate whether this code path is ever taken once more rules use # this. return ReflowSequence( elements=self.elements[: target_idx + 1] + [ReflowPoint(()), new_block] + self.elements[target_idx + 1 :], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[ LintResult(target, [LintFix.create_after(target, [insertion])]) ], ) raise ValueError( f"Unexpected value for ReflowSequence.insert(pos): {pos}" ) # pragma: no cover
Returns a new :obj:`ReflowSequence` with the new element inserted. Insertion is always relative to an existing element. Either before or after it as specified by `pos`. This generates appropriate creation :obj:`LintFix` objects to direct the linter to insert those elements.
insert
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def replace( self, target: BaseSegment, edit: Sequence[BaseSegment] ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` with `edit` elements replaced. This generates appropriate replacement :obj:`LintFix` objects to direct the linter to modify those elements. """ target_raws = target.raw_segments assert target_raws edit_raws = list(chain.from_iterable(seg.raw_segments for seg in edit)) # Add the new segments to the depth map at the same level as the target. # First work out how much to trim by. trim_amount = len(target.path_to(target_raws[0])) reflow_logger.debug( "Replacement trim amount: %s.", trim_amount, ) for edit_raw in edit_raws: # NOTE: if target raws has more than one segment we take the depth info # of the first one. We trim to avoid including the implications of removed # "container" segments. self.depth_map.copy_depth_info(target_raws[0], edit_raw, trim=trim_amount) # It's much easier to just totally reconstruct the sequence rather # than do surgery on the elements. # TODO: The surgery is actually a good idea for long sequences now that # we have the depth map. current_raws = list( chain.from_iterable(elem.segments for elem in self.elements) ) start_idx = current_raws.index(target_raws[0]) last_idx = current_raws.index(target_raws[-1]) return ReflowSequence( self._elements_from_raw_segments( current_raws[:start_idx] + edit_raws + current_raws[last_idx + 1 :], reflow_config=self.reflow_config, # NOTE: the depth map has been mutated to include the new segments. depth_map=self.depth_map, ), root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=[LintResult(target, [LintFix.replace(target, edit)])], )
Returns a new :obj:`ReflowSequence` with `edit` elements replaced. This generates appropriate replacement :obj:`LintFix` objects to direct the linter to modify those elements.
replace
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def respace( self, strip_newlines: bool = False, filter: str = "all" ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` with points respaced. Args: strip_newlines (:obj:`bool`): Optionally strip newlines before respacing. This is primarily used on focused sequences to coerce objects onto a single line. This does not apply any prioritisation to which line breaks to remove and so is not a substitute for the full `reindent` or `reflow` methods. filter (:obj:`str`): Optionally filter which reflow points to respace. Default configuration is `all`. Other options are `line_break` which only respaces points containing a `newline` or followed by an `end_of_file` marker, or `inline` which is the inverse of `line_break`. This is most useful for filtering between trailing whitespace and fixes between content on a line. **NOTE** this method relies on the embodied results being correct so that we can build on them. """ assert filter in ( "all", "newline", "inline", ), f"Unexpected value for filter: {filter}" # Use the embodied fixes as a starting point. lint_results = self.get_results() new_elements: ReflowSequenceType = [] for point, pre, post in self._iter_points_with_constraints(): # We filter on the elements POST RESPACE. This is to allow # strict respacing to reclaim newlines. new_lint_results, new_point = point.respace_point( prev_block=pre, next_block=post, root_segment=self.root_segment, lint_results=lint_results, strip_newlines=strip_newlines, ) # If filter has been set, optionally unset the returned values. if ( filter == "inline" # NOTE: We test on the NEW point. if ( any(seg.is_type("newline") for seg in new_point.segments) # Or if it's followed by the end of file or (post and "end_of_file" in post.class_types) ) else filter == "newline" ): # Reset the values reflow_logger.debug( " Filter %r applied. Resetting %s", filter, point ) new_point = point # Otherwise apply the new fixes else: lint_results = new_lint_results if pre and (not new_elements or new_elements[-1] != pre): new_elements.append(pre) new_elements.append(new_point) if post: new_elements.append(post) return ReflowSequence( elements=new_elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=lint_results, )
Returns a new :obj:`ReflowSequence` with points respaced. Args: strip_newlines (:obj:`bool`): Optionally strip newlines before respacing. This is primarily used on focused sequences to coerce objects onto a single line. This does not apply any prioritisation to which line breaks to remove and so is not a substitute for the full `reindent` or `reflow` methods. filter (:obj:`str`): Optionally filter which reflow points to respace. Default configuration is `all`. Other options are `line_break` which only respaces points containing a `newline` or followed by an `end_of_file` marker, or `inline` which is the inverse of `line_break`. This is most useful for filtering between trailing whitespace and fixes between content on a line. **NOTE** this method relies on the embodied results being correct so that we can build on them.
respace
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def rebreak( self, rebreak_type: Literal["lines", "keywords"] = "lines" ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` corrected line breaks. This intentionally **does not handle indentation**, as the existing indents are assumed to be correct. .. note:: Currently this only *moves* existing segments around line breaks (e.g. for operators and commas), but eventually this method will also handle line length considerations too. """ if self.lint_results: raise NotImplementedError( # pragma: no cover "rebreak cannot currently handle pre-existing embodied fixes." ) # Delegate to the rebreak algorithm if rebreak_type == "lines": elem_buff, lint_results = rebreak_sequence(self.elements, self.root_segment) elif rebreak_type == "keywords": elem_buff, lint_results = rebreak_keywords_sequence( self.elements, self.root_segment ) else: # pragma: no cover raise NotImplementedError( f"Rebreak type of `{rebreak_type}` is not supported." ) return ReflowSequence( elements=elem_buff, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=lint_results, )
Returns a new :obj:`ReflowSequence` corrected line breaks. This intentionally **does not handle indentation**, as the existing indents are assumed to be correct. .. note:: Currently this only *moves* existing segments around line breaks (e.g. for operators and commas), but eventually this method will also handle line length considerations too.
rebreak
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def reindent(self) -> "ReflowSequence": """Reindent lines within a sequence.""" if self.lint_results: raise NotImplementedError( # pragma: no cover "rebreak cannot currently handle pre-existing embodied fixes." ) single_indent = construct_single_indent( indent_unit=self.reflow_config.indent_unit, tab_space_size=self.reflow_config.tab_space_size, ) reflow_logger.info("# Evaluating indents.") elements, indent_results = lint_indent_points( self.elements, single_indent=single_indent, skip_indentation_in=self.reflow_config.skip_indentation_in, allow_implicit_indents=self.reflow_config.allow_implicit_indents, ignore_comment_lines=self.reflow_config.ignore_comment_lines, ) return ReflowSequence( elements=elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=indent_results, )
Reindent lines within a sequence.
reindent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def break_long_lines(self) -> "ReflowSequence": """Rebreak any remaining long lines in a sequence. This assumes that reindent() has already been applied. """ if self.lint_results: raise NotImplementedError( # pragma: no cover "break_long_lines cannot currently handle pre-existing " "embodied fixes." ) single_indent = construct_single_indent( indent_unit=self.reflow_config.indent_unit, tab_space_size=self.reflow_config.tab_space_size, ) reflow_logger.info("# Evaluating line lengths.") elements, length_results = lint_line_length( self.elements, self.root_segment, single_indent=single_indent, line_length_limit=self.reflow_config.max_line_length, allow_implicit_indents=self.reflow_config.allow_implicit_indents, trailing_comments=self.reflow_config.trailing_comments, ) return ReflowSequence( elements=elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=length_results, )
Rebreak any remaining long lines in a sequence. This assumes that reindent() has already been applied.
break_long_lines
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/sequence.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/sequence.py
MIT
def get_consumed_whitespace(segment: Optional[RawSegment]) -> Optional[str]: """A helper function to extract possible consumed whitespace. Args: segment (:obj:`RawSegment`, optional): A segment to test for suitability and extract the source representation of if appropriate. If passed None, then returns None. Returns: Returns the :code:`source_str` if the segment is of type :code:`placeholder` and has a :code:`block_type` of :code:`literal`. Otherwise None. """ if not segment or not segment.is_type("placeholder"): return None placeholder = cast(TemplateSegment, segment) if placeholder.block_type != "literal": return None return placeholder.source_str
A helper function to extract possible consumed whitespace. Args: segment (:obj:`RawSegment`, optional): A segment to test for suitability and extract the source representation of if appropriate. If passed None, then returns None. Returns: Returns the :code:`source_str` if the segment is of type :code:`placeholder` and has a :code:`block_type` of :code:`literal`. Otherwise None.
get_consumed_whitespace
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def class_types(self) -> Set[str]: """Get the set of contained class types. Parallel to `BaseSegment.class_types` """ return self._class_types(self.segments)
Get the set of contained class types. Parallel to `BaseSegment.class_types`
class_types
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def raw(self) -> str: """Get the current raw representation.""" return "".join(seg.raw for seg in self.segments)
Get the current raw representation.
raw
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def pos_marker(self) -> Optional[PositionMarker]: """Get the first position marker of the element.""" for seg in self.segments: if seg.pos_marker: return seg.pos_marker return None
Get the first position marker of the element.
pos_marker
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def num_newlines(self) -> int: """Return the number of newlines in this element. These newlines are either newline segments or contained within consumed sections of whitespace. This counts both. """ return sum( bool("newline" in seg.class_types) + (get_consumed_whitespace(seg) or "").count("\n") for seg in self.segments )
Return the number of newlines in this element. These newlines are either newline segments or contained within consumed sections of whitespace. This counts both.
num_newlines
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def is_all_unrendered(self) -> bool: """Return whether this element is all unrendered. Returns True if contains only whitespace, indents, template loops or placeholders. Note: * ReflowBlocks will contain the placeholders and loops * ReflowPoints will contain whitespace, indents and newlines. """ for seg in self.segments: if not seg.is_type( "whitespace", "placeholder", "newline", "indent", "template_loop" ): return False return True
Return whether this element is all unrendered. Returns True if contains only whitespace, indents, template loops or placeholders. Note: * ReflowBlocks will contain the placeholders and loops * ReflowPoints will contain whitespace, indents and newlines.
is_all_unrendered
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def from_config( cls: Type["ReflowBlock"], segments: Tuple[RawSegment, ...], config: ReflowConfig, depth_info: DepthInfo, ) -> "ReflowBlock": """Construct a ReflowBlock while extracting relevant configuration. This is the primary route to construct a ReflowBlock, as is allows all of the inference of the spacing and position configuration from the segments it contains and the appropriate config objects. """ block_config = config.get_block_config(cls._class_types(segments), depth_info) stack_spacing_configs = {} line_position_configs = {} keyword_line_position_configs = {} for hash, class_types in zip( depth_info.stack_hashes, depth_info.stack_class_types ): cfg = config.get_block_config(class_types) if cfg.spacing_within: stack_spacing_configs[hash] = cfg.spacing_within if cfg.line_position: line_position_configs[hash] = cfg.line_position if cfg.keyword_line_position: keyword_line_position_configs[hash] = cfg.keyword_line_position return cls( segments=segments, spacing_before=block_config.spacing_before, spacing_after=block_config.spacing_after, line_position=block_config.line_position, depth_info=depth_info, stack_spacing_configs=stack_spacing_configs, line_position_configs=line_position_configs, keyword_line_position=block_config.keyword_line_position, keyword_line_position_configs=keyword_line_position_configs, )
Construct a ReflowBlock while extracting relevant configuration. This is the primary route to construct a ReflowBlock, as is allows all of the inference of the spacing and position configuration from the segments it contains and the appropriate config objects.
from_config
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def _indent_description(indent: str) -> str: """Construct a human readable description of the indent. NOTE: We operate assuming that the "correct" indent is never a mix of tabs and spaces. That means if the provided indent *does* contain both that this description is likely a case where we are matching a pre-existing indent, and can assume that the *description* of that indent is non-critical. To handle that situation gracefully we just return "Mixed Indent". See: https://github.com/sqlfluff/sqlfluff/issues/4255 """ if indent == "": return "no indent" elif " " in indent and "\t" in indent: return "mixed indent" elif indent[0] == " ": assert all(c == " " for c in indent) return f"indent of {len(indent)} spaces" elif indent[0] == "\t": # pragma: no cover assert all(c == "\t" for c in indent) return f"indent of {len(indent)} tabs" else: # pragma: no cover raise NotImplementedError(f"Invalid indent construction: {indent!r}")
Construct a human readable description of the indent. NOTE: We operate assuming that the "correct" indent is never a mix of tabs and spaces. That means if the provided indent *does* contain both that this description is likely a case where we are matching a pre-existing indent, and can assume that the *description* of that indent is non-critical. To handle that situation gracefully we just return "Mixed Indent". See: https://github.com/sqlfluff/sqlfluff/issues/4255
_indent_description
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def from_combination( cls, first: Optional["IndentStats"], second: "IndentStats" ) -> "IndentStats": """Create IndentStats from two consecutive IndentStats. This is mostly used for combining the effects of indent and dedent tokens either side of a comment. NOTE: The *first* is considered optional, because if we're calling this function, we're assuming that there's always a second. """ # First check for the trivial case that we only have one. if not first: return second # Otherwise, combine the two into one. return cls( first.impulse + second.impulse, min(first.trough, first.impulse + second.trough), second.implicit_indents, )
Create IndentStats from two consecutive IndentStats. This is mostly used for combining the effects of indent and dedent tokens either side of a comment. NOTE: The *first* is considered optional, because if we're calling this function, we're assuming that there's always a second.
from_combination
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def __init__(self, segments: Tuple[RawSegment, ...]): """Override the init method to calculate indent stats.""" object.__setattr__(self, "segments", segments) object.__setattr__(self, "_stats", self._generate_indent_stats(segments))
Override the init method to calculate indent stats.
__init__
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def _get_indent_segment(self) -> Optional[RawSegment]: """Get the current indent segment (if there). NOTE: This only returns _untemplated_ indents. If templated newline or whitespace segments are found they are skipped. """ indent: Optional[RawSegment] = None for seg in reversed(self.segments): if seg.pos_marker and not seg.pos_marker.is_literal(): # Skip any templated elements. # NOTE: It must _have_ a position marker at this # point however to take this route. A segment # without a position marker at all, is an edit # or insertion, and so should still be considered. continue elif seg.is_type("newline"): return indent elif seg.is_type("whitespace"): indent = seg elif "\n" in (get_consumed_whitespace(seg) or ""): # Consumed whitespace case. # NOTE: In this situation, we're not looking for # separate newline and indent segments, we're # making the assumption that they'll be together # which I think is a safe one for now. return seg # i.e. if we never find a newline, it's not an indent. return None
Get the current indent segment (if there). NOTE: This only returns _untemplated_ indents. If templated newline or whitespace segments are found they are skipped.
_get_indent_segment
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def get_indent(self) -> Optional[str]: """Get the current indent (if there).""" # If no newlines, it's not an indent. Return None. if not self.num_newlines(): return None # If there are newlines but no indent segment. Return "". seg = self._get_indent_segment() consumed_whitespace = get_consumed_whitespace(seg) if consumed_whitespace: # pragma: no cover # Return last bit after newline. # NOTE: Not tested, because usually this would happen # directly via _get_indent_segment. return consumed_whitespace.split("\n")[-1] return seg.raw if seg else ""
Get the current indent (if there).
get_indent
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def get_indent_segment_vals(self, exclude_block_indents=False) -> List[int]: """Iterate through any indent segments and extract their values.""" values = [] for seg in self.segments: if seg.is_type("indent"): indent_seg = cast(Indent, seg) if exclude_block_indents and indent_seg.block_uuid: continue values.append(indent_seg.indent_val) return values
Iterate through any indent segments and extract their values.
get_indent_segment_vals
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def _generate_indent_stats( segments: Sequence[RawSegment], ) -> IndentStats: """Generate the change in intended indent balance. This is the main logic which powers .get_indent_impulse() """ trough = 0 running_sum = 0 implicit_indents = [] for seg in segments: if seg.is_type("indent"): indent_seg = cast(Indent, seg) running_sum += indent_seg.indent_val # Do we need to add a new implicit indent? if indent_seg.is_implicit: implicit_indents.append(running_sum) # NOTE: We don't check for removal of implicit indents # because it's unlikely that one would be opened, and then # closed within the same point. That would probably be the # sign of a bug in the dialect. if running_sum < trough: trough = running_sum return IndentStats(running_sum, trough, tuple(implicit_indents))
Generate the change in intended indent balance. This is the main logic which powers .get_indent_impulse()
_generate_indent_stats
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def get_indent_impulse(self) -> IndentStats: """Get the change in intended indent balance from this point.""" return self._stats
Get the change in intended indent balance from this point.
get_indent_impulse
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT
def indent_to( self, desired_indent: str, after: Optional[BaseSegment] = None, before: Optional[BaseSegment] = None, description: Optional[str] = None, source: Optional[str] = None, ) -> Tuple[List[LintResult], "ReflowPoint"]: """Coerce a point to have a particular indent. If the point currently contains no newlines, one will be introduced and any trailing whitespace will be effectively removed. More specifically, the newline is *inserted before* the existing whitespace, with the new indent being a *replacement* for that same whitespace. For placeholder newlines or indents we generate appropriate source fixes. """ assert "\n" not in desired_indent, "Newline found in desired indent." # Get the indent (or in the case of no newline, the last whitespace) indent_seg = self._get_indent_segment() reflow_logger.debug( "Coercing indent %s to %r. (newlines: %s)", indent_seg, desired_indent, self.num_newlines(), ) if indent_seg and indent_seg.is_type("placeholder"): # Handle the placeholder case. indent_seg = cast(TemplateSegment, indent_seg) # There should always be a newline, so assert that. assert "\n" in indent_seg.source_str # We should always replace the section _containing_ the # newline, rather than just bluntly inserting. This # makes slicing later easier. current_indent = indent_seg.source_str.split("\n")[-1] source_slice = slice( indent_seg.pos_marker.source_slice.stop - len(current_indent), indent_seg.pos_marker.source_slice.stop, ) for existing_source_fix in indent_seg.source_fixes: # pragma: no cover if slice_overlaps(existing_source_fix.source_slice, source_slice): reflow_logger.warning( "Creating overlapping source fix. Results may be " "unpredictable and this might be a sign of a bug. " "Please report this along with your query.\n" f"({existing_source_fix.source_slice} overlaps " f"{source_slice})" ) new_source_fix = SourceFix( desired_indent, source_slice, # The templated slice is going to be a zero slice _anyway_. indent_seg.pos_marker.templated_slice, ) if new_source_fix in indent_seg.source_fixes: # pragma: no cover # NOTE: If we're trying to reapply the same fix, don't. # Just return an error without the fixes. This is probably # a bug if we're taking this route, but this clause will help # catch bugs faster if they occur. reflow_logger.warning( "Attempted to apply a duplicate source fix to %r. " "Returning this time without fix.", indent_seg.pos_marker.source_str(), ) fixes = [] new_segments = self.segments else: if current_indent: new_source_str = ( indent_seg.source_str[: -len(current_indent)] + desired_indent ) else: new_source_str = indent_seg.source_str + desired_indent assert "\n" in new_source_str new_placeholder = indent_seg.edit( source_fixes=[new_source_fix], source_str=new_source_str, ) fixes = [LintFix.replace(indent_seg, [new_placeholder])] new_segments = tuple( new_placeholder if seg is indent_seg else seg for seg in self.segments ) return [ LintResult( indent_seg, fixes, description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint(new_segments) elif self.num_newlines(): # There is already a newline. Is there an indent? if indent_seg: # Coerce existing indent to desired. if indent_seg.raw == desired_indent: # Trivial case. Indent already correct return [], self elif desired_indent == "": idx = self.segments.index(indent_seg) return [ LintResult( indent_seg, # Coerce to no indent. We don't want the indent. Delete it. [LintFix.delete(indent_seg)], description=description or "Line should not be indented.", source=source, ) ], ReflowPoint(self.segments[:idx] + self.segments[idx + 1 :]) # Standard case of an indent change. new_indent = indent_seg.edit(desired_indent) idx = self.segments.index(indent_seg) return [ LintResult( indent_seg, [LintFix.replace(indent_seg, [new_indent])], description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint( self.segments[:idx] + (new_indent,) + self.segments[idx + 1 :] ) else: # There is a newline, but no indent. Make one after the newline # Find the index of the last newline (there _will_ be one because # we checked self.num_newlines() above). # Before going further, check we have a non-zero indent. if not desired_indent: # We're trying to coerce a non-existent indent to zero. This # means we're already ok. return [], self for idx in range(len(self.segments) - 1, -1, -1): # NOTE: Must be a _literal_ newline, not a templated one. # https://github.com/sqlfluff/sqlfluff/issues/4367 if self.segments[idx].is_type("newline"): if self.segments[idx].pos_marker.is_literal(): break new_indent = WhitespaceSegment(desired_indent) return [ LintResult( # The anchor for the *result* should be the segment # *after* the newline, otherwise the location of the fix # is confusing. # For this method, `before` is optional, but normally # passed. If it is there, use that as the anchor # instead. We fall back to the last newline if not. before if before else self.segments[idx], # Rather than doing a `create_after` here, we're # going to do a replace. This is effectively to give a hint # to the linter that this is safe to do before a templated # placeholder. This solves some potential bugs - although # it feels a bit like a workaround. [ LintFix.replace( self.segments[idx], [self.segments[idx], new_indent] ) ], description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint( self.segments[: idx + 1] + (new_indent,) + self.segments[idx + 1 :] ) else: # There isn't currently a newline. new_newline = NewlineSegment() new_segs: List[RawSegment] # Check for whitespace ws_seg = None for seg in self.segments[::-1]: if seg.is_type("whitespace"): ws_seg = seg if not ws_seg: # Work out the new segments. Always a newline, only whitespace if # there's a non zero indent. new_segs = [new_newline] + ( [WhitespaceSegment(desired_indent)] if desired_indent else [] ) # There isn't a whitespace segment either. We need to insert one. # Do we have an anchor? if not before and not after: # pragma: no cover raise NotImplementedError( "Not set up to handle empty points in this " "scenario without provided before/after " f"anchor: {self.segments}" ) # Otherwise make a new indent, attached to the relevant anchor. # Prefer anchoring before because it makes the labelling better. elif before: before_raw = ( cast(TemplateSegment, before).source_str if before.is_type("placeholder") else before.raw ) fix = LintFix.create_before(before, new_segs) description = description or ( "Expected line break and " f"{_indent_description(desired_indent)} " f"before {before_raw!r}." ) else: assert after # mypy hint after_raw = ( cast(TemplateSegment, after).source_str if after.is_type("placeholder") else after.raw ) fix = LintFix.create_after(after, new_segs) description = description or ( "Expected line break and " f"{_indent_description(desired_indent)} " f"after {after_raw!r}." ) new_point = ReflowPoint(tuple(new_segs)) anchor = before else: # There is whitespace. Coerce it to the right indent and add # a newline _before_. In the edge case that we're coercing to # _no indent_, edit existing indent to be the newline and leave # it there. if desired_indent == "": new_segs = [new_newline] else: new_segs = [new_newline, ws_seg.edit(desired_indent)] idx = self.segments.index(ws_seg) if not description: # Prefer before, because it makes the anchoring better. if before: description = ( "Expected line break and " f"{_indent_description(desired_indent)} " f"before {before.raw!r}." ) elif after: description = ( "Expected line break and " f"{_indent_description(desired_indent)} " f"after {after.raw!r}." ) else: # pragma: no cover # NOTE: Doesn't have test coverage because there's # normally an `after` or `before` value, so this # clause is unused. description = ( "Expected line break and " f"{_indent_description(desired_indent)}." ) fix = LintFix.replace(ws_seg, new_segs) new_point = ReflowPoint( self.segments[:idx] + tuple(new_segs) + self.segments[idx + 1 :] ) anchor = ws_seg return [ LintResult(anchor, fixes=[fix], description=description, source=source) ], new_point
Coerce a point to have a particular indent. If the point currently contains no newlines, one will be introduced and any trailing whitespace will be effectively removed. More specifically, the newline is *inserted before* the existing whitespace, with the new indent being a *replacement* for that same whitespace. For placeholder newlines or indents we generate appropriate source fixes.
indent_to
python
sqlfluff/sqlfluff
src/sqlfluff/utils/reflow/elements.py
https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/reflow/elements.py
MIT