code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def is_capitalizable(character: str) -> bool:
"""Does the character have differing lower and upper-case versions?"""
if character.lower() == character.upper():
return False
return True | Does the character have differing lower and upper-case versions? | is_capitalizable | python | sqlfluff/sqlfluff | src/sqlfluff/rules/capitalisation/CP01.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/capitalisation/CP01.py | MIT |
def _eval(self, context: RuleContext) -> Optional[List[LintResult]]:
"""Inconsistent capitalisation of keywords.
We use the `memory` feature here to keep track of cases known to be
INconsistent with what we've seen so far as well as the top choice
for what the possible case is.
"""
# NOTE: Given the dialect structure we can assume the targets have a parent.
parent: BaseSegment = context.parent_stack[-1]
if context.segment.is_type(*self._exclude_types) or parent.is_type(
*self._exclude_parent_types
):
return [LintResult(memory=context.memory)]
# Used by CP03 (that inherits from this rule)
# If it's a qualified function_name (i.e with more than one part to
# function_name). Then it is likely an existing user defined function (UDF)
# which are case sensitive so ignore for this.
if parent.get_type() == "function_name" and len(parent.segments) != 1:
return [LintResult(memory=context.memory)]
return [self._handle_segment(context.segment, context)] | Inconsistent capitalisation of keywords.
We use the `memory` feature here to keep track of cases known to be
INconsistent with what we've seen so far as well as the top choice
for what the possible case is. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/capitalisation/CP01.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/capitalisation/CP01.py | MIT |
def _get_fix(self, segment: BaseSegment, fixed_raw: str) -> LintFix:
"""Given a segment found to have a fix, returns a LintFix for it.
May be overridden by subclasses, which is useful when the parse tree
structure varies from this simple base case.
"""
return LintFix.replace(segment, [segment.edit(fixed_raw)]) | Given a segment found to have a fix, returns a LintFix for it.
May be overridden by subclasses, which is useful when the parse tree
structure varies from this simple base case. | _get_fix | python | sqlfluff/sqlfluff | src/sqlfluff/rules/capitalisation/CP01.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/capitalisation/CP01.py | MIT |
def _init_capitalisation_policy(self, context: RuleContext):
"""Called first time rule is evaluated to fetch & cache the policy."""
cap_policy_name = next(
k for k in self.config_keywords if k.endswith("capitalisation_policy")
)
self.cap_policy = getattr(self, cap_policy_name)
self.cap_policy_opts = [
opt
for opt in get_config_info()[cap_policy_name]["validation"]
if opt != "consistent"
]
# Use str() as CP04 uses bools which might otherwise be read as bool
ignore_words_config = str(getattr(self, "ignore_words"))
if ignore_words_config and ignore_words_config != "None":
self.ignore_words_list = self.split_comma_separated_string(
ignore_words_config.lower()
)
else:
self.ignore_words_list = []
self.ignore_templated_areas = context.config.get("ignore_templated_areas")
self.logger.debug(
f"Selected '{cap_policy_name}': '{self.cap_policy}' from options "
f"{self.cap_policy_opts}"
)
cap_policy = self.cap_policy
cap_policy_opts = self.cap_policy_opts
ignore_words_list = self.ignore_words_list
ignore_templated_areas = self.ignore_templated_areas
return cap_policy, cap_policy_opts, ignore_words_list, ignore_templated_areas | Called first time rule is evaluated to fetch & cache the policy. | _init_capitalisation_policy | python | sqlfluff/sqlfluff | src/sqlfluff/rules/capitalisation/CP01.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/capitalisation/CP01.py | MIT |
def _eval(self, context: RuleContext) -> List[LintResult]:
"""Inconsistent capitalisation of datatypes.
We use the `memory` feature here to keep track of cases known to be
inconsistent with what we've seen so far as well as the top choice
for what the possible case is.
"""
results = []
# For some of these segments we want to run the code on
if context.segment.is_type(
"primitive_type", "datetime_type_identifier", "data_type"
):
for seg in context.segment.segments:
# We don't want to edit symbols, quoted things or identifiers
# if they appear.
if seg.is_type(
"symbol", "identifier", "quoted_literal"
) or not seg.is_type("raw"):
continue
res = self._handle_segment(seg, context)
if res:
results.append(res)
# NOTE: Given the dialect structure we can assume the targets have a parent.
parent: BaseSegment = context.parent_stack[-1]
# Don't process it if it's likely to have been processed by the parent.
if context.segment.is_type("data_type_identifier") and not parent.is_type(
"primitive_type", "datetime_type_identifier", "data_type"
):
results.append(
self._handle_segment(context.segment, context)
) # pragma: no cover
return results | Inconsistent capitalisation of datatypes.
We use the `memory` feature here to keep track of cases known to be
inconsistent with what we've seen so far as well as the top choice
for what the possible case is. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/capitalisation/CP05.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/capitalisation/CP05.py | MIT |
def get_configs_info() -> Dict[str, Any]:
"""Get additional rule config validations and descriptions."""
return {
"capitalisation_policy": {
"validation": ["consistent", "upper", "lower", "capitalise"],
"definition": "The capitalisation policy to enforce.",
},
"extended_capitalisation_policy": {
"validation": [
"consistent",
"upper",
"lower",
"pascal",
"capitalise",
"snake",
"camel",
],
"definition": (
"The capitalisation policy to enforce, extended with PascalCase, "
"snake_case, and camelCase. "
"This is separate from ``capitalisation_policy`` as it should not be "
"applied to keywords."
"Camel, Pascal, and Snake will never be inferred when the policy is "
"set to consistent. This is because snake can cause destructive "
"changes to the identifier, and unlinted code is too easily mistaken "
"for camel and pascal. If, when set to consistent, no consistent "
"case is found, it will default to upper."
),
},
} | Get additional rule config validations and descriptions. | get_configs_info | python | sqlfluff/sqlfluff | src/sqlfluff/rules/capitalisation/__init__.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/capitalisation/__init__.py | MIT |
def get_rules() -> List[Type[BaseRule]]:
"""Get plugin rules.
NOTE: Rules are imported only on fetch to manage import times
when rules aren't used.
"""
from sqlfluff.rules.capitalisation.CP01 import Rule_CP01
from sqlfluff.rules.capitalisation.CP02 import Rule_CP02
from sqlfluff.rules.capitalisation.CP03 import Rule_CP03
from sqlfluff.rules.capitalisation.CP04 import Rule_CP04
from sqlfluff.rules.capitalisation.CP05 import Rule_CP05
return [Rule_CP01, Rule_CP02, Rule_CP03, Rule_CP04, Rule_CP05] | Get plugin rules.
NOTE: Rules are imported only on fetch to manage import times
when rules aren't used. | get_rules | python | sqlfluff/sqlfluff | src/sqlfluff/rules/capitalisation/__init__.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/capitalisation/__init__.py | MIT |
def _get_additional_allowed_characters(self, dialect_name: str) -> str:
"""Returns additional allowed characters, with adjustments for dialect."""
result: Set[str] = set()
if self.additional_allowed_characters:
result.update(self.additional_allowed_characters)
if dialect_name == "bigquery":
# In BigQuery, also allow hyphens.
result.update("-")
if dialect_name == "snowflake":
# In Snowflake, external stage metadata uses $.
result.update("$")
return "".join(result) | Returns additional allowed characters, with adjustments for dialect. | _get_additional_allowed_characters | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF05.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF05.py | MIT |
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Do not use special characters in object names."""
# Config type hints
self.quoted_identifiers_policy: str
self.unquoted_identifiers_policy: str
self.allow_space_in_identifier: bool
self.additional_allowed_characters: str
self.ignore_words: str
self.ignore_words_regex: str
# Confirm it's a single identifier.
assert context.segment.is_type("naked_identifier", "quoted_identifier")
# Get the ignore_words_list configuration.
try:
ignore_words_list = self.ignore_words_list
except AttributeError:
# First-time only, read the settings from configuration. This is
# very slow.
ignore_words_list = self._init_ignore_words_list()
# Assume unquoted (we'll update if quoted)
policy = self.unquoted_identifiers_policy
identifier = context.segment.raw
# Skip if in ignore list
if ignore_words_list and identifier.lower() in ignore_words_list:
return None
# Skip if matches ignore regex
if self.ignore_words_regex and regex.search(
self.ignore_words_regex, identifier
):
return LintResult(memory=context.memory)
if self._is_aliased_select_clause_element(context):
# If selects are aliased, ignore unaliased column reference
return None
# Do some extra processing for quoted identifiers.
if context.segment.is_type("quoted_identifier"):
# Update the default policy to quoted
policy = self.quoted_identifiers_policy
# Strip the quotes first
identifier = context.segment.raw_normalized(casefold=False)
# Skip if in ignore list - repeat check now we've strip the quotes
if ignore_words_list and identifier.lower() in ignore_words_list:
return None
# Skip if matches ignore regex - repeat check now we've strip the quotes
if self.ignore_words_regex and regex.search(
self.ignore_words_regex, identifier
):
return LintResult(memory=context.memory)
# PostgreSQL Extension allows the use of extensions.
#
# These extensions are often qutoed identifiers.
# (https://www.postgresql.org/docs/current/contrib.html)
#
# Allow quoted identifiers in extension references
if (
context.dialect.name in ["postgres"]
and context.parent_stack
and context.parent_stack[-1].is_type("extension_reference")
):
return None
# BigQuery table references are quoted in back ticks so allow dots
#
# It also allows a star at the end of table_references for wildcards
# (https://cloud.google.com/bigquery/docs/querying-wildcard-tables)
#
# Strip both out before testing the identifier
if (
context.dialect.name in ["bigquery"]
and context.parent_stack
and context.parent_stack[-1].is_type("table_reference")
):
if identifier and identifier[-1] == "*":
identifier = identifier[:-1]
identifier = identifier.replace(".", "")
# Databricks & SparkSQL file references for direct file query
# are quoted in back ticks to allow for identifiers common
# in file paths and regex patterns for path globbing
# https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html
#
# Path Glob Filters (done inline for SQL direct file query)
# https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter
#
if (
context.dialect.name in ["databricks", "sparksql"]
and context.parent_stack
):
# Databricks & SparkSQL file references for direct file query
# are quoted in back ticks to allow for identifiers common
# in file paths and regex patterns for path globbing
# https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html
#
# Path Glob Filters (done inline for SQL direct file query)
# https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter
#
if context.parent_stack[-1].is_type("file_reference"):
return None
# Databricks & SparkSQL properties keys
# used for setting table and runtime
# configurations denote namespace using dots, so these are
# removed before testing L057 to not trigger false positives
# Runtime configurations:
# https://spark.apache.org/docs/latest/configuration.html#application-properties
# Example configurations for table:
# https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#configuration
#
if context.parent_stack[-1].is_type("property_name_identifier"):
identifier = identifier.replace(".", "")
# Strip spaces if allowed (note a separate config as only valid for quoted
# identifiers)
if self.allow_space_in_identifier:
identifier = identifier.replace(" ", "")
# We always allow underscores so strip them out
identifier = identifier.replace("_", "")
# redshift allows a # at the beginning of temporary table names
if (
context.dialect.name == "redshift"
and identifier[0] == "#"
and context.parent_stack
and context.parent_stack[-1].is_type("table_reference")
):
identifier = identifier[1:]
# Set the identified minus the allowed characters
additional_allowed_characters = self._get_additional_allowed_characters(
context.dialect.name
)
if additional_allowed_characters:
identifier = identifier.translate(
str.maketrans("", "", additional_allowed_characters)
)
# Finally test if the remaining identifier is only made up of alphanumerics
if identifiers_policy_applicable(policy, context.parent_stack) and not (
identifier.isalnum()
):
return LintResult(anchor=context.segment)
return None | Do not use special characters in object names. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF05.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF05.py | MIT |
def _init_ignore_words_list(self) -> List[str]:
"""Called first time rule is evaluated to fetch & cache the policy."""
ignore_words_config: str = str(getattr(self, "ignore_words"))
if ignore_words_config and ignore_words_config != "None":
self.ignore_words_list = self.split_comma_separated_string(
ignore_words_config.lower()
)
else:
self.ignore_words_list = []
return self.ignore_words_list | Called first time rule is evaluated to fetch & cache the policy. | _init_ignore_words_list | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF05.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF05.py | MIT |
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Unnecessary quoted identifier."""
# Config type hints
self.prefer_quoted_identifiers: bool
self.prefer_quoted_keywords: bool
self.ignore_words: str
self.ignore_words_regex: str
self.case_sensitive: bool
# Ignore some segment types
if FunctionalContext(context).parent_stack.any(sp.is_type(*self._ignore_types)):
return None
identifier_is_quoted = not regex.search(
r'^[^"\'[].+[^"\'\]]$', context.segment.raw
)
identifier_contents = context.segment.raw
if identifier_is_quoted:
identifier_contents = identifier_contents[1:-1]
identifier_is_keyword = identifier_contents.upper() in context.dialect.sets(
"reserved_keywords"
) or identifier_contents.upper() in context.dialect.sets("unreserved_keywords")
if self.prefer_quoted_identifiers:
context_policy = "naked_identifier"
else:
context_policy = "quoted_identifier"
# Get the ignore_words_list configuration.
ignore_words_list = self.ignore_words_list
# Skip if in ignore list
if ignore_words_list and identifier_contents.lower() in ignore_words_list:
return None
# Skip if matches ignore regex
if self.ignore_words_regex and regex.search(
self.ignore_words_regex, identifier_contents
):
return LintResult(memory=context.memory)
if self.prefer_quoted_keywords and identifier_is_keyword:
if not identifier_is_quoted:
return LintResult(
context.segment,
description=(
f"Missing quoted keyword identifier {identifier_contents}."
),
)
return None
# Ignore the segments that are not of the same type as the defined policy above.
# Also TSQL has a keyword called QUOTED_IDENTIFIER which maps to the name so
# need to explicitly check for that.
if not context.segment.is_type(
context_policy
) or context.segment.raw.lower() in (
"quoted_identifier",
"naked_identifier",
):
return None
# Manage cases of identifiers must be quoted first.
# Naked identifiers are _de facto_ making this rule fail as configuration forces
# them to be quoted.
# In this case, it cannot be fixed as which quote to use is dialect dependent
if self.prefer_quoted_identifiers:
return LintResult(
context.segment,
description=f"Missing quoted identifier {identifier_contents}.",
)
# Now we only deal with NOT forced quoted identifiers configuration
# (meaning prefer_quoted_identifiers=False).
# Retrieve NakedIdentifierSegment RegexParser for the dialect.
naked_identifier_parser = cast(
"RegexParser", context.dialect._library["NakedIdentifierSegment"]
)
anti_template = cast(str, naked_identifier_parser.anti_template)
NakedIdentifierSegment = cast(
Type[CodeSegment], context.dialect.get_segment("IdentifierSegment")
)
# For this to be a candidate for unquoting, it must:
# - Casefold to it's current exact case. i.e. already be in the default
# casing of the dialect *unless case_sensitive mode is False*.
# - be a valid naked identifier.
# - not be a reserved keyword.
# NOTE: If the identifier parser has no casefold defined, we assume that
# there is no casefolding (i.e. that the dialect is case sensitive, and
# even when unquoted, and therefore we should never unquote).
# EXCEPT: if we're in a totally case insensitive dialect like DuckDB.
is_case_insensitive_dialect = context.dialect.name in ("duckdb", "sparksql")
if (
not is_case_insensitive_dialect
and self.case_sensitive
and naked_identifier_parser.casefold
and identifier_contents
!= naked_identifier_parser.casefold(identifier_contents)
):
return None
if not regex.fullmatch(
naked_identifier_parser.template,
identifier_contents,
regex.IGNORECASE,
):
return None
if regex.fullmatch(
anti_template,
identifier_contents,
regex.IGNORECASE,
):
return None
return LintResult(
context.segment,
fixes=[
LintFix.replace(
context.segment,
[
NakedIdentifierSegment(
raw=identifier_contents,
**naked_identifier_parser.segment_kwargs(),
)
],
)
],
description=f"Unnecessary quoted identifier {context.segment.raw}.",
) | Unnecessary quoted identifier. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF06.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF06.py | MIT |
def ignore_words_list(self) -> List[str]:
"""Words that the rule should ignore.
Cached so that it's only evaluated on the first pass.
"""
ignore_words_config: str = str(getattr(self, "ignore_words"))
if ignore_words_config and ignore_words_config != "None":
return self.split_comma_separated_string(ignore_words_config.lower())
return [] | Words that the rule should ignore.
Cached so that it's only evaluated on the first pass. | ignore_words_list | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF06.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF06.py | MIT |
def _eval(self, context: RuleContext) -> EvalResultType:
"""Override base class for dialects that use structs, or SELECT aliases."""
# Config type hints
self.force_enable: bool
# Some dialects use structs (e.g. column.field) which look like
# table references and so incorrectly trigger this rule.
if (
context.dialect.name in self._dialects_with_structs
and not self.force_enable
):
return LintResult()
if context.dialect.name in self._dialects_with_structs:
self._is_struct_dialect = True
query: Query = Query.from_segment(context.segment, dialect=context.dialect)
visited: Set = set()
# Recursively visit and check each query in the tree.
return list(self._visit_queries(query, visited)) | Override base class for dialects that use structs, or SELECT aliases. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF03.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF03.py | MIT |
def _iter_available_targets(
self, query: Query, subquery: Optional[Query] = None
) -> Iterator[AliasInfo]:
"""Iterate along a list of valid alias targets."""
for selectable in query.selectables:
select_info = selectable.select_info
if select_info:
for alias in select_info.table_aliases:
if subquery and alias.from_expression_element.path_to(
subquery.selectables[0].selectable
):
# Skip the subquery alias itself
continue
if (subquery and not alias.object_reference) or alias.ref_str:
yield alias | Iterate along a list of valid alias targets. | _iter_available_targets | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF03.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF03.py | MIT |
def _check_references(
table_aliases: List[AliasInfo],
standalone_aliases: List[BaseSegment],
references: List[ObjectReferenceSegment],
col_aliases: List[ColumnAliasInfo],
single_table_references: str,
is_struct_dialect: bool,
fix_inconsistent_to: Optional[str],
fixable: bool,
) -> Iterator[LintResult]:
"""Iterate through references and check consistency."""
# A buffer to keep any violations.
col_alias_names: List[str] = [c.alias_identifier_name for c in col_aliases]
table_ref_str: str = table_aliases[0].ref_str
table_ref_str_source = table_aliases[0].segment
# Check all the references that we have.
seen_ref_types: Set[str] = set()
for ref in references:
this_ref_type: str = ref.qualification()
if this_ref_type == "qualified" and is_struct_dialect:
# If this col appears "qualified" check if it is more logically a struct.
if next(ref.iter_raw_references()).part != table_ref_str:
this_ref_type = "unqualified"
lint_res = _validate_one_reference(
single_table_references,
ref,
this_ref_type,
standalone_aliases,
table_ref_str,
table_ref_str_source,
col_alias_names,
seen_ref_types,
fixable,
)
seen_ref_types.add(this_ref_type)
if not lint_res:
continue
if fix_inconsistent_to and single_table_references == "consistent":
# If we found a "consistent" error but we have a fix directive,
# recurse with a different single_table_references value
yield from _check_references(
table_aliases,
standalone_aliases,
references,
col_aliases,
# NB vars are passed in a different order here
single_table_references=fix_inconsistent_to,
is_struct_dialect=is_struct_dialect,
fix_inconsistent_to=None,
fixable=fixable,
)
yield lint_res | Iterate through references and check consistency. | _check_references | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF03.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF03.py | MIT |
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Keywords should not be used as identifiers."""
# Config type hints
self.ignore_words_regex: str
# Skip 1 letter identifiers. These can be datepart keywords
# (e.g. "d" for Snowflake) but most people expect to be able to use them.
if len(context.segment.raw) == 1:
return LintResult(memory=context.memory)
# Get the ignore list configuration and cache it
try:
ignore_words_list = self.ignore_words_list
except AttributeError:
# First-time only, read the settings from configuration.
# So we can cache them for next time for speed.
ignore_words_list = self._init_ignore_string()
# Skip if in ignore list
if ignore_words_list and context.segment.raw.lower() in ignore_words_list:
return LintResult(memory=context.memory)
# Skip if matches ignore regex
if self.ignore_words_regex and regex.search(
self.ignore_words_regex, context.segment.raw
):
return LintResult(memory=context.memory)
if (
(
context.segment.is_type("naked_identifier")
and identifiers_policy_applicable(
self.unquoted_identifiers_policy, # type: ignore
context.parent_stack,
)
and (
context.segment.raw.upper()
in context.dialect.sets("unreserved_keywords")
)
)
) or (
(
context.segment.is_type("quoted_identifier")
and identifiers_policy_applicable(
self.quoted_identifiers_policy, context.parent_stack # type: ignore
)
and (
context.segment.raw.upper()[1:-1]
in context.dialect.sets("unreserved_keywords")
or context.segment.raw.upper()[1:-1]
in context.dialect.sets("reserved_keywords")
)
)
):
return LintResult(anchor=context.segment)
else:
return None | Keywords should not be used as identifiers. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF04.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF04.py | MIT |
def _init_ignore_string(self) -> List[str]:
"""Called first time rule is evaluated to fetch & cache the ignore_words."""
# Use str() in case bools are passed which might otherwise be read as bool
ignore_words_config = str(getattr(self, "ignore_words"))
if ignore_words_config and ignore_words_config != "None":
self.ignore_words_list = self.split_comma_separated_string(
ignore_words_config.lower()
)
else:
self.ignore_words_list = []
ignore_words_list = self.ignore_words_list
return ignore_words_list | Called first time rule is evaluated to fetch & cache the ignore_words. | _init_ignore_string | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF04.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF04.py | MIT |
def _is_root_from_clause(self, rule_context: RuleContext) -> bool:
"""This is to determine if a subquery is part of the from clause.
Any subqueries in the `from_clause` should be ignore, unless they are a nested
correlated query.
"""
is_from = False
for x in reversed(rule_context.parent_stack):
if x.is_type("from_clause"):
is_from = True
break
elif x.is_type("where_clause"):
break
return is_from | This is to determine if a subquery is part of the from clause.
Any subqueries in the `from_clause` should be ignore, unless they are a nested
correlated query. | _is_root_from_clause | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF02.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF02.py | MIT |
def _init_ignore_words_list(self) -> List[str]:
"""Called first time rule is evaluated to fetch & cache the policy."""
ignore_words_config: str = str(getattr(self, "ignore_words"))
if ignore_words_config and ignore_words_config != "None":
self.ignore_words_list = self.split_comma_separated_string(
ignore_words_config.lower()
)
else:
self.ignore_words_list = []
return self.ignore_words_list | Called first time rule is evaluated to fetch & cache the policy. | _init_ignore_words_list | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF02.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF02.py | MIT |
def _find_sql_variables(self, rule_context: RuleContext) -> Set[str]:
"""Get any `DECLARE`d variables in the whole of the linted file.
This assumes that the declare statement is going to be used before any reference
"""
sql_variables: Set[str] = set()
# Check for bigquery declared variables. These may only exists at the top of
# the file or at the beginning of a `BEGIN` block. The risk of collision
# _should_ be low and no `IF` chain searching should be required.
if rule_context.dialect.name == "bigquery":
sql_variables |= {
identifier.raw.lower()
for declare in rule_context.parent_stack[0].recursive_crawl(
"declare_segment"
)
for identifier in declare.get_children("identifier")
}
# TODO: Add any additional dialect specific variable names
return sql_variables | Get any `DECLARE`d variables in the whole of the linted file.
This assumes that the declare statement is going to be used before any reference | _find_sql_variables | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF02.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF02.py | MIT |
def _get_table_refs(ref, dialect):
"""Given ObjectReferenceSegment, determine possible table references."""
tbl_refs = []
# First, handle any schema.table references.
for sr, tr in ref.extract_possible_multipart_references(
levels=[
ref.ObjectReferenceLevel.SCHEMA,
ref.ObjectReferenceLevel.TABLE,
]
):
tbl_refs.append((tr, (sr.part, tr.part)))
# Maybe check for simple table references. Two cases:
# - For most dialects, skip this if it's a schema+table reference -- the
# reference was specific, so we shouldn't ignore that by looking
# elsewhere.)
# - Always do this in BigQuery. BigQuery table references are frequently
# ambiguous because BigQuery SQL supports structures, making some
# multi-level "." references impossible to interpret with certainty.
# We may need to genericize this code someday to support other
# dialects. If so, this check should probably align somehow with
# whether the dialect overrides
# ObjectReferenceSegment.extract_possible_references().
if not tbl_refs or dialect.name in ["bigquery"]:
for tr in ref.extract_possible_references(
level=ref.ObjectReferenceLevel.TABLE
):
tbl_refs.append((tr, (tr.part,)))
return tbl_refs | Given ObjectReferenceSegment, determine possible table references. | _get_table_refs | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/RF01.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/RF01.py | MIT |
def get_configs_info() -> Dict[str, Any]:
"""Get additional rule config validations and descriptions."""
return {
"single_table_references": {
"validation": ["consistent", "qualified", "unqualified"],
"definition": "The expectation for references in single-table select.",
},
"unquoted_identifiers_policy": {
"validation": ["all", "aliases", "column_aliases", "table_aliases"],
"definition": "Types of unquoted identifiers to flag violations for.",
},
"quoted_identifiers_policy": {
"validation": ["all", "aliases", "column_aliases", "table_aliases", "none"],
"definition": "Types of quoted identifiers to flag violations for.",
},
"allow_space_in_identifier": {
"validation": [True, False],
"definition": ("Should spaces in identifiers be allowed?"),
},
"additional_allowed_characters": {
"definition": (
"Optional list of extra allowed characters, "
"in addition to alphanumerics (A-Z, a-z, 0-9) and underscores."
),
},
"prefer_quoted_identifiers": {
"validation": [True, False],
"definition": (
"If ``True``, requires every identifier to be quoted. "
"Defaults to ``False``."
),
},
"prefer_quoted_keywords": {
"validation": [True, False],
"definition": (
"If ``True``, requires every keyword used as an identifier to be "
"quoted. Defaults to ``False``."
),
},
} | Get additional rule config validations and descriptions. | get_configs_info | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/__init__.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/__init__.py | MIT |
def get_rules() -> List[Type[BaseRule]]:
"""Get plugin rules.
NOTE: Rules are imported only on fetch to manage import times
when rules aren't used.
"""
from sqlfluff.rules.references.RF01 import Rule_RF01
from sqlfluff.rules.references.RF02 import Rule_RF02
from sqlfluff.rules.references.RF03 import Rule_RF03
from sqlfluff.rules.references.RF04 import Rule_RF04
from sqlfluff.rules.references.RF05 import Rule_RF05
from sqlfluff.rules.references.RF06 import Rule_RF06
return [Rule_RF01, Rule_RF02, Rule_RF03, Rule_RF04, Rule_RF05, Rule_RF06] | Get plugin rules.
NOTE: Rules are imported only on fetch to manage import times
when rules aren't used. | get_rules | python | sqlfluff/sqlfluff | src/sqlfluff/rules/references/__init__.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/references/__init__.py | MIT |
def get_rules() -> List[Type[BaseRule]]:
"""Get plugin rules.
NOTE: Rules are imported only on fetch to manage import times
when rules aren't used.
"""
from sqlfluff.rules.tsql.TQ01 import Rule_TQ01
return [Rule_TQ01] | Get plugin rules.
NOTE: Rules are imported only on fetch to manage import times
when rules aren't used. | get_rules | python | sqlfluff/sqlfluff | src/sqlfluff/rules/tsql/__init__.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/tsql/__init__.py | MIT |
def _eval(self, context: RuleContext) -> EvalResultType:
"""Find self-aliased columns and fix them.
Checks the alias in the `SELECT` clause and see if the
alias identifier is same as the column identifier (self-alias).
If the column is self-aliased, then the `AS` keyword,
whitespaces and alias identifier is removed as part of the fix.
For example: `col_a as col_a,` is fixed to `col_a,`
"""
assert context.segment.is_type("select_clause")
violations = []
children: Segments = FunctionalContext(context).segment.children()
for clause_element in children.select(sp.is_type("select_clause_element")):
clause_element_raw_segments = (
clause_element.get_raw_segments()
) # col_a as col_a
column = clause_element.get_child("column_reference") # `col_a`
alias_expression = clause_element.get_child(
"alias_expression"
) # `as col_a`
# We're only interested in direct aliasing of columns (i.e. not
# and expression), so if that isn't the case, move on.
if not (alias_expression and column):
continue
# The column needs to be a naked_identifier or quoted_identifier
# (not positional identifier like $n in snowflake).
# Move on if not. Some column references have multiple elements
# (e.g. my_table.my_column), so only fetch the last available.
_column_elements = column.get_children(
"naked_identifier", "quoted_identifier"
)
if not _column_elements: # pragma: no cover
continue
column_identifier = _column_elements[-1]
# Fetch the whitespace between the reference and the alias.
whitespace = clause_element.get_child("whitespace") # ` `
# The alias can be the naked_identifier or the quoted_identifier
alias_identifier = alias_expression.get_child(
"naked_identifier", "quoted_identifier"
)
if not (whitespace and alias_identifier): # pragma: no cover
# We *should* expect all of these to be non-null, but some bug
# reports suggest that that isn't always the case for some
# dialects. In those cases, log a warning here, but don't
# flag it as a linting issue. Hopefully this will help
# better bug reports in future.
self.logger.warning(
"AL09 found an unexpected syntax in an alias expression. "
"Unable to determine if this is a self-alias. Please "
"report this as a bug on GitHub.\n\n"
f"Debug details: dialect: {context.dialect.name}, "
f"whitespace: {whitespace is not None}, "
f"alias_identifier: {alias_identifier is not None}, "
f"alias_expression: {clause_element.raw!r}."
)
continue
case_sensitive_dialects = ["clickhouse"]
# We compare the _exact_ raw value of the column identifier
# and the alias identifier (i.e. including quoting and casing).
# Resolving aliases & references with differing quoting and casing
# should be done in conjunction with RF06 & CP02 (see docstring).
if column_identifier.raw == alias_identifier.raw:
fixes: List[LintFix] = []
fixes.append(LintFix.delete(whitespace))
fixes.append(LintFix.delete(alias_expression))
violations.append(
LintResult(
anchor=clause_element_raw_segments[0],
description="Column should not be self-aliased.",
fixes=fixes,
)
)
# If *both* are unquoted, and we're in a dialect which isn't case
# sensitive for unquoted identifiers, then flag an error but don't
# suggest a fix. It's ambiguous about what the users intent was:
# i.e. did they mean to change the case (and so the correct
# resolution is quoting), or did they mistakenly add an unnecessary
# alias?
elif (
context.dialect.name not in case_sensitive_dialects
and column_identifier.is_type("naked_identifier")
and alias_identifier.is_type("naked_identifier")
and column_identifier.raw_upper == alias_identifier.raw_upper
):
violations.append(
LintResult(
anchor=clause_element_raw_segments[0],
description=(
"Ambiguous self alias. Either remove unnecessary "
"alias, or quote alias/reference to make case "
"change explicit."
),
)
)
return violations or None | Find self-aliased columns and fix them.
Checks the alias in the `SELECT` clause and see if the
alias identifier is same as the column identifier (self-alias).
If the column is self-aliased, then the `AS` keyword,
whitespaces and alias identifier is removed as part of the fix.
For example: `col_a as col_a,` is fixed to `col_a,` | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL09.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL09.py | MIT |
def _eval(self, context: RuleContext) -> Optional[List[LintResult]]:
"""Identify aliases in from clause and join conditions.
Find base table, table expressions in join, and other expressions in select
clause and decide if it's needed to report them.
"""
self.min_alias_length: Optional[int]
self.max_alias_length: Optional[int]
assert context.segment.is_type("select_statement")
children = FunctionalContext(context).segment.children()
from_expression_elements = children.recursive_crawl("from_expression_element")
return self._lint_aliases(from_expression_elements) or None | Identify aliases in from clause and join conditions.
Find base table, table expressions in join, and other expressions in select
clause and decide if it's needed to report them. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL06.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL06.py | MIT |
def _lint_aliases(self, from_expression_elements) -> Optional[List[LintResult]]:
"""Lint all table aliases."""
# A buffer to keep any violations.
violation_buff = []
# For each table, check whether it is aliased, and if so check the
# lengths.
for from_expression_element in from_expression_elements:
table_expression = from_expression_element.get_child("table_expression")
table_ref = (
table_expression.get_child("object_reference")
if table_expression
else None
)
# If the from_expression_element has no object_reference - skip it
# An example case is a lateral flatten, where we have a function segment
# instead of a table_reference segment.
if not table_ref:
continue
# If there's no alias expression - skip it
alias_exp_ref = from_expression_element.get_child("alias_expression")
if alias_exp_ref is None:
continue
alias_identifier_ref = alias_exp_ref.get_child("identifier")
if self.min_alias_length is not None:
if len(alias_identifier_ref.raw) < self.min_alias_length:
violation_buff.append(
LintResult(
anchor=alias_identifier_ref,
description=(
"Aliases should be at least {} character(s) long."
).format(self.min_alias_length),
)
)
if self.max_alias_length is not None:
if len(alias_identifier_ref.raw) > self.max_alias_length:
violation_buff.append(
LintResult(
anchor=alias_identifier_ref,
description=(
"Aliases should be no more than {} character(s) long."
).format(self.max_alias_length),
)
)
return violation_buff or None | Lint all table aliases. | _lint_aliases | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL06.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL06.py | MIT |
def _is_alias_required(
self, from_expression_element: BaseSegment, dialect_name: str
) -> bool:
"""Given an alias, is it REQUIRED to be present?
There are a few circumstances where an alias is either required by the
dialect, or recommended by SQLFluff:
* Aliases are required in SOME, but not all dialects when there's a
VALUES clause.
* In the case of a nested SELECT, all dialect checked (MySQL, Postgres,
T-SQL) require an alias.
"""
# Look for a table_expression (i.e. VALUES clause) as a descendant of
# the FROM expression, potentially nested inside brackets. The reason we
# allow nesting in brackets is that in some dialects (e.g. TSQL), this
# is actually *required* in order for SQL Server to parse it.
for segment in from_expression_element.iter_segments(expanding=("bracketed",)):
if segment.is_type("table_expression"):
# Found a table expression. Does it have a VALUES clause?
if segment.get_child("values_clause"):
# Found a VALUES clause. Is this a dialect that requires
# VALUE clauses to be aliased?
return (
dialect_name in self._dialects_requiring_alias_for_values_clause
)
elif any(
seg.is_type(
"select_statement", "set_expression", "with_compound_statement"
)
for seg in segment.iter_segments(expanding=("bracketed",))
):
# The FROM expression is a derived table, i.e. a nested
# SELECT. In this case, the alias is required in every
# dialect we checked (MySQL, Postgres, T-SQL).
# https://pganalyze.com/docs/log-insights/app-errors/U115
return True
else:
# None of the special cases above applies, so the alias is
# not required.
return False
# This should never happen. Return False just to be safe.
return False # pragma: no cover | Given an alias, is it REQUIRED to be present?
There are a few circumstances where an alias is either required by the
dialect, or recommended by SQLFluff:
* Aliases are required in SOME, but not all dialects when there's a
VALUES clause.
* In the case of a nested SELECT, all dialect checked (MySQL, Postgres,
T-SQL) require an alias. | _is_alias_required | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL05.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL05.py | MIT |
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Column expression without alias. Use explicit `AS` clause.
We look for the select_clause_element segment, and then evaluate
whether it has an alias segment or not and whether the expression
is complicated enough. `parent_stack` is to assess how many other
elements there are.
"""
functional_context = FunctionalContext(context)
segment = functional_context.segment
children = segment.children()
# If we have an alias its all good
if children.any(sp.is_type("alias_expression")):
return None
# Ignore if it's a function with EMITS clause as EMITS is equivalent to AS
if (
children.select(sp.is_type("function"))
.children()
.select(sp.is_type("emits_segment"))
):
return None
# Ignore if it's a cast_expression with non-function enclosed children
# For example, we do not want to ignore something like func()::type
# but we can ignore something like a::type
if children.children().select(
sp.is_type("cast_expression")
) and not children.children().select(
sp.is_type("cast_expression")
).children().any(
sp.is_type("function")
):
return None
parent_stack = functional_context.parent_stack
# Ignore if it is part of a CTE with column names
if (
parent_stack.last(sp.is_type("common_table_expression"))
.children()
.any(sp.is_type("cte_column_list"))
):
return None
# Ignore if using a columns expression. A nested function such as
# ``MIN(COLUMNS(*))`` will assign the same alias to all columns.
if len(children.recursive_crawl("columns_expression")) > 0:
return None
select_clause_children = children.select(sp.not_(sp.is_type("star")))
is_complex_clause = _recursively_check_is_complex(select_clause_children)
if not is_complex_clause:
return None
# No fixes, because we don't know what the alias should be,
# the user should document it themselves.
if self.allow_scalar: # type: ignore
# Check *how many* elements/columns there are in the select
# statement. If this is the only one, then we won't
# report an error.
immediate_parent = parent_stack.last()
elements = immediate_parent.children(sp.is_type("select_clause_element"))
num_elements = len(elements)
if num_elements > 1:
return LintResult(anchor=context.segment)
return None
return LintResult(anchor=context.segment) | Column expression without alias. Use explicit `AS` clause.
We look for the select_clause_element segment, and then evaluate
whether it has an alias segment or not and whether the expression
is complicated enough. `parent_stack` is to assess how many other
elements there are. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL03.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL03.py | MIT |
def _eval(self, context: RuleContext) -> Optional[List[LintResult]]:
"""Identify aliases in from clause and join conditions.
Find base table, table expressions in join, and other expressions in select
clause and decide if it's needed to report them.
"""
# Config type hints
self.force_enable: bool
# Issue 2810: BigQuery has some tricky expectations (apparently not
# documented, but subject to change, e.g.:
# https://www.reddit.com/r/bigquery/comments/fgk31y/new_in_bigquery_no_more_backticks_around_table/)
# about whether backticks are required (and whether the query is valid
# or not, even with them), depending on whether the GCP project name is
# present, or just the dataset name. Since SQLFluff doesn't have access
# to BigQuery when it is looking at the query, it would be complex for
# this rule to do the right thing. For now, the rule simply disables
# itself.
if not self.force_enable:
return None
assert context.segment.is_type("select_statement")
children = FunctionalContext(context).segment.children()
from_clause_segment = children.select(sp.is_type("from_clause")).first()
base_table = (
from_clause_segment.children(sp.is_type("from_expression"))
.first()
.children(sp.is_type("from_expression_element"))
.first()
.children(sp.is_type("table_expression"))
.first()
.children(sp.is_type("object_reference"))
.first()
)
if not base_table:
return None
# A buffer for all table expressions in join conditions
from_expression_elements = []
column_reference_segments = []
after_from_clause = children.select(start_seg=from_clause_segment[0])
for clause in from_clause_segment + after_from_clause:
for from_expression_element in clause.recursive_crawl(
"from_expression_element"
):
from_expression_elements.append(from_expression_element)
for column_reference in clause.recursive_crawl("column_reference"):
column_reference_segments.append(column_reference)
return (
self._lint_aliases_in_join(
base_table[0] if base_table else None,
from_expression_elements,
column_reference_segments,
context.segment,
)
or None
) | Identify aliases in from clause and join conditions.
Find base table, table expressions in join, and other expressions in select
clause and decide if it's needed to report them. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL07.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL07.py | MIT |
def _lint_aliases_in_join(
self, base_table, from_expression_elements, column_reference_segments, segment
) -> Optional[List[LintResult]]:
"""Lint and fix all aliases in joins - except for self-joins."""
# A buffer to keep any violations.
violation_buff = []
to_check = list(
self._filter_table_expressions(base_table, from_expression_elements)
)
# How many times does each table appear in the FROM clause?
table_counts = Counter(ai.table_ref.raw for ai in to_check)
# What is the set of aliases used for each table? (We are mainly
# interested in the NUMBER of different aliases used.)
table_aliases = defaultdict(set)
for ai in to_check:
if ai and ai.table_ref and ai.alias_identifier_ref:
table_aliases[ai.table_ref.raw].add(ai.alias_identifier_ref.raw)
# For each aliased table, check whether to keep or remove it.
for alias_info in to_check:
# If the same table appears more than once in the FROM clause with
# different alias names, do not consider removing its aliases.
# The aliases may have been introduced simply to make each
# occurrence of the table independent within the query.
if (
table_counts[alias_info.table_ref.raw] > 1
and len(table_aliases[alias_info.table_ref.raw]) > 1
):
continue
select_clause = segment.get_child("select_clause")
ids_refs = []
# Find all references to alias in select clause
if alias_info.alias_identifier_ref:
alias_name = alias_info.alias_identifier_ref.raw
for alias_with_column in select_clause.recursive_crawl(
"object_reference"
):
used_alias_ref = alias_with_column.get_child("identifier")
if used_alias_ref and used_alias_ref.raw == alias_name:
ids_refs.append(used_alias_ref)
# Find all references to alias in column references
for exp_ref in column_reference_segments:
used_alias_ref = exp_ref.get_child("identifier")
# exp_ref.get_child('dot') ensures that the column reference includes a
# table reference
if (
used_alias_ref
and used_alias_ref.raw == alias_name
and exp_ref.get_child("dot")
):
ids_refs.append(used_alias_ref)
# Fixes for deleting ` as sth` and for editing references to aliased tables
# Note unparsable errors have cause the delete to fail (see #2484)
# so check there is a d before doing deletes.
fixes: List[LintFix] = []
fixes += [
LintFix.delete(d)
for d in [alias_info.alias_exp_ref, alias_info.whitespace_ref]
if d
]
for alias in [alias_info.alias_identifier_ref, *ids_refs]:
if alias:
identifier_parts = alias_info.table_ref.raw.split(".")
edits: List[BaseSegment] = []
for part in identifier_parts:
if edits:
edits.append(SymbolSegment(".", type="dot"))
edits.append(IdentifierSegment(part, type="naked_identifier"))
fixes.append(
LintFix.replace(
alias,
edits,
source=[alias_info.table_ref],
)
)
violation_buff.append(
LintResult(
anchor=alias_info.alias_identifier_ref,
description="Avoid aliases in from clauses and join conditions.",
fixes=fixes,
)
)
return violation_buff or None | Lint and fix all aliases in joins - except for self-joins. | _lint_aliases_in_join | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL07.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL07.py | MIT |
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Implicit aliasing of table/column not allowed. Use explicit `AS` clause.
We look for the alias segment, and then evaluate its parent and whether
it contains an AS keyword. This is the _eval function for both AL01 and AL02.
"""
# Config type hints
self.aliasing: str
# AL01 is disabled for Oracle, still run for AL02.
if context.dialect.name == "oracle" and self.name == "aliasing.table":
return None
assert context.segment.is_type("alias_expression")
if context.parent_stack[-1].is_type(*self._target_parent_types):
# Search for an AS keyword.
as_keyword: Optional[BaseSegment]
for as_keyword in context.segment.segments:
if as_keyword.raw_upper == "AS":
break
else:
as_keyword = None
if as_keyword:
if self.aliasing == "implicit":
self.logger.debug("Removing AS keyword and respacing.")
return LintResult(
anchor=as_keyword,
# Generate the fixes to remove and respace accordingly.
fixes=ReflowSequence.from_around_target(
as_keyword,
context.parent_stack[0],
config=context.config,
)
.without(cast(RawSegment, as_keyword))
.respace()
.get_fixes(),
)
elif self.aliasing != "implicit":
self.logger.debug("Inserting AS keyword and respacing.")
for identifier in context.segment.raw_segments:
if identifier.is_code:
break
else: # pragma: no cover
raise NotImplementedError(
"Failed to find identifier. Raise this as a bug on GitHub."
)
return LintResult(
anchor=context.segment,
# Work out the insertion and reflow fixes.
fixes=ReflowSequence.from_around_target(
identifier,
context.parent_stack[0],
config=context.config,
# Only reflow before, otherwise we catch too much.
sides="before",
)
.insert(
KeywordSegment("AS"),
target=identifier,
pos="before",
)
.respace()
.get_fixes(),
)
return None | Implicit aliasing of table/column not allowed. Use explicit `AS` clause.
We look for the alias segment, and then evaluate its parent and whether
it contains an AS keyword. This is the _eval function for both AL01 and AL02. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL01.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL01.py | MIT |
def _eval(self, context: RuleContext) -> EvalResultType:
"""Walk through select clauses, looking for matching identifiers."""
assert context.segment.is_type("select_clause")
used_aliases: Dict[str, BaseSegment] = {}
violations = []
# Work through each of the elements
for clause_element in context.segment.get_children("select_clause_element"):
# Is there an alias expression?
alias_expression = clause_element.get_child("alias_expression")
column_alias: Optional[BaseSegment] = None
if alias_expression:
# Get the alias (it will be the next code element after AS)
seg: Optional[BaseSegment] = None
for seg in alias_expression.segments:
if not seg or not seg.is_code or seg.raw_upper == "AS":
continue
break
assert seg
column_alias = seg
# No alias, the only other thing we'll track are column references.
else:
column_reference = clause_element.get_child("column_reference")
if column_reference:
# We don't want the whole reference, just the last section.
# If it is qualified, take the last bit. Otherwise, we still
# take the last bit but it shouldn't make a difference.
column_alias = column_reference.segments[-1]
# If we don't have an alias to work with, just skip this element
if not column_alias:
continue
# NOTE: Always case insensitive, see docstring for why.
_key = column_alias.raw_upper
# Strip any quote tokens
_key = _key.strip("\"'`")
# Otherwise check whether it's been used before
if _key in used_aliases:
# It has.
previous = used_aliases[_key]
assert previous.pos_marker
violations.append(
LintResult(
anchor=column_alias,
description=(
"Reuse of column alias "
f"{column_alias.raw!r} from line "
f"{previous.pos_marker.line_no}."
),
)
)
else:
# It's not, save it to check against others.
used_aliases[_key] = clause_element
return violations | Walk through select clauses, looking for matching identifiers. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL08.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL08.py | MIT |
def get_configs_info() -> Dict[str, Any]:
"""Get additional rule config validations and descriptions."""
return {
"aliasing": {
"validation": ["implicit", "explicit"],
"definition": (
"Should alias have an explicit AS or is implicit aliasing required?"
),
},
"allow_scalar": {
"validation": [True, False],
"definition": (
"Whether or not to allow a single element in the "
" select clause to be without an alias."
),
},
"alias_case_check": {
"validation": [
"dialect",
"case_insensitive",
"quoted_cs_naked_upper",
"quoted_cs_naked_lower",
"case_sensitive",
],
"definition": "How to handle comparison casefolding in an alias.",
},
"min_alias_length": {
"validation": range(1000),
"definition": (
"The minimum length of an alias to allow without raising a violation."
),
},
"max_alias_length": {
"validation": range(1000),
"definition": (
"The maximum length of an alias to allow without raising a violation."
),
},
} | Get additional rule config validations and descriptions. | get_configs_info | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/__init__.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/__init__.py | MIT |
def get_rules() -> List[Type[BaseRule]]:
"""Get plugin rules.
NOTE: Rules are imported only on fetch to manage import times
when rules aren't used.
"""
from sqlfluff.rules.aliasing.AL01 import Rule_AL01
from sqlfluff.rules.aliasing.AL02 import Rule_AL02
from sqlfluff.rules.aliasing.AL03 import Rule_AL03
from sqlfluff.rules.aliasing.AL04 import Rule_AL04
from sqlfluff.rules.aliasing.AL05 import Rule_AL05
from sqlfluff.rules.aliasing.AL06 import Rule_AL06
from sqlfluff.rules.aliasing.AL07 import Rule_AL07
from sqlfluff.rules.aliasing.AL08 import Rule_AL08
from sqlfluff.rules.aliasing.AL09 import Rule_AL09
return [
Rule_AL01,
Rule_AL02,
Rule_AL03,
Rule_AL04,
Rule_AL05,
Rule_AL06,
Rule_AL07,
Rule_AL08,
Rule_AL09,
] | Get plugin rules.
NOTE: Rules are imported only on fetch to manage import times
when rules aren't used. | get_rules | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/__init__.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/__init__.py | MIT |
def _lint_references_and_aliases(
self,
table_aliases: List[AliasInfo],
standalone_aliases: List[BaseSegment],
references: List[ObjectReferenceSegment],
col_aliases: List[ColumnAliasInfo],
using_cols: List[BaseSegment],
parent_select: Optional[BaseSegment],
rule_context: RuleContext,
) -> Optional[List[LintResult]]:
"""Check whether any aliases are duplicates.
NB: Subclasses of this error should override this function.
"""
if parent_select:
parent_select_info = get_select_statement_info(
parent_select, rule_context.dialect
)
if parent_select_info:
# If we are looking at a subquery, include any table references
for table_alias in parent_select_info.table_aliases:
if table_alias.from_expression_element.path_to(
rule_context.segment
):
# Skip the subquery alias itself
continue
table_aliases.append(table_alias)
# Are any of the aliases the same?
duplicate = set()
for a1, a2 in itertools.combinations(table_aliases, 2):
# Compare the strings
if a1.ref_str == a2.ref_str and a1.ref_str:
duplicate.add(a2)
if duplicate:
return [
LintResult(
# Reference the element, not the string.
anchor=aliases.segment,
description=(
"Duplicate table alias {!r}. Table " "aliases should be unique."
).format(aliases.ref_str),
)
for aliases in duplicate
]
else:
return None | Check whether any aliases are duplicates.
NB: Subclasses of this error should override this function. | _lint_references_and_aliases | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL04.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL04.py | MIT |
def _eval(self, context: RuleContext) -> EvalResultType:
"""Get References and Aliases and allow linting.
This rule covers a lot of potential cases of odd usages of
references, see the code for each of the potential cases.
Subclasses of this rule should override the
`_lint_references_and_aliases` method.
"""
assert context.segment.is_type("select_statement")
select_info = get_select_statement_info(context.segment, context.dialect)
if not select_info:
return None
# Work out if we have a parent select function
parent_select = None
for seg in reversed(context.parent_stack):
if seg.is_type("select_statement"):
parent_select = seg
break
# Pass them all to the function that does all the work.
# NB: Subclasses of this rules should override the function below
return self._lint_references_and_aliases(
select_info.table_aliases,
select_info.standalone_aliases,
select_info.reference_buffer,
select_info.col_aliases,
select_info.using_cols,
parent_select,
context,
) | Get References and Aliases and allow linting.
This rule covers a lot of potential cases of odd usages of
references, see the code for each of the potential cases.
Subclasses of this rule should override the
`_lint_references_and_aliases` method. | _eval | python | sqlfluff/sqlfluff | src/sqlfluff/rules/aliasing/AL04.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/rules/aliasing/AL04.py | MIT |
def list_rules() -> List[RuleTuple]:
"""Return a list of available rule tuples."""
linter = Linter()
return linter.rule_tuples() | Return a list of available rule tuples. | list_rules | python | sqlfluff/sqlfluff | src/sqlfluff/api/info.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/api/info.py | MIT |
def list_dialects() -> List[DialectTuple]:
"""Return a list of available dialect info."""
return list(dialect_readout()) | Return a list of available dialect info. | list_dialects | python | sqlfluff/sqlfluff | src/sqlfluff/api/info.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/api/info.py | MIT |
def get_simple_config(
dialect: Optional[str] = None,
rules: Optional[List[str]] = None,
exclude_rules: Optional[List[str]] = None,
config_path: Optional[str] = None,
) -> FluffConfig:
"""Get a config object from simple API arguments."""
# Create overrides for simple API arguments.
overrides: ConfigMappingType = {}
if dialect is not None:
# Check the requested dialect exists and is valid.
try:
dialect_selector(dialect)
except SQLFluffUserError as err: # pragma: no cover
raise SQLFluffUserError(f"Error loading dialect '{dialect}': {str(err)}")
except KeyError:
raise SQLFluffUserError(f"Error: Unknown dialect '{dialect}'")
overrides["dialect"] = dialect
if rules is not None:
overrides["rules"] = ",".join(rules)
if exclude_rules is not None:
overrides["exclude_rules"] = ",".join(exclude_rules)
# Instantiate a config object.
try:
return FluffConfig.from_root(
extra_config_path=config_path,
ignore_local_config=True,
overrides=overrides,
)
except SQLFluffUserError as err: # pragma: no cover
raise SQLFluffUserError(f"Error loading config: {str(err)}") | Get a config object from simple API arguments. | get_simple_config | python | sqlfluff/sqlfluff | src/sqlfluff/api/simple.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/api/simple.py | MIT |
def lint(
sql: str,
dialect: str = "ansi",
rules: Optional[List[str]] = None,
exclude_rules: Optional[List[str]] = None,
config: Optional[FluffConfig] = None,
config_path: Optional[str] = None,
) -> List[Dict[str, Any]]:
"""Lint a SQL string.
Args:
sql (:obj:`str`): The SQL to be linted.
dialect (:obj:`str`, optional): A reference to the dialect of the SQL
to be linted. Defaults to `ansi`.
rules (:obj:`Optional[List[str]`, optional): A list of rule
references to lint for. Defaults to None.
exclude_rules (:obj:`Optional[List[str]`, optional): A list of rule
references to avoid linting for. Defaults to None.
config (:obj:`Optional[FluffConfig]`, optional): A configuration object
to use for the operation. Defaults to None.
config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config,
which is only used if a `config` is not already provided.
Defaults to None.
Returns:
:obj:`List[Dict[str, Any]]` for each violation found.
"""
cfg = config or get_simple_config(
dialect=dialect,
rules=rules,
exclude_rules=exclude_rules,
config_path=config_path,
)
linter = Linter(config=cfg)
result = linter.lint_string_wrapped(sql)
result_records = result.as_records()
# Return just the violations for this file
return [] if not result_records else result_records[0]["violations"] | Lint a SQL string.
Args:
sql (:obj:`str`): The SQL to be linted.
dialect (:obj:`str`, optional): A reference to the dialect of the SQL
to be linted. Defaults to `ansi`.
rules (:obj:`Optional[List[str]`, optional): A list of rule
references to lint for. Defaults to None.
exclude_rules (:obj:`Optional[List[str]`, optional): A list of rule
references to avoid linting for. Defaults to None.
config (:obj:`Optional[FluffConfig]`, optional): A configuration object
to use for the operation. Defaults to None.
config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config,
which is only used if a `config` is not already provided.
Defaults to None.
Returns:
:obj:`List[Dict[str, Any]]` for each violation found. | lint | python | sqlfluff/sqlfluff | src/sqlfluff/api/simple.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/api/simple.py | MIT |
def fix(
sql: str,
dialect: str = "ansi",
rules: Optional[List[str]] = None,
exclude_rules: Optional[List[str]] = None,
config: Optional[FluffConfig] = None,
config_path: Optional[str] = None,
fix_even_unparsable: Optional[bool] = None,
) -> str:
"""Fix a SQL string.
Args:
sql (:obj:`str`): The SQL to be fixed.
dialect (:obj:`str`, optional): A reference to the dialect of the SQL
to be fixed. Defaults to `ansi`.
rules (:obj:`Optional[List[str]`, optional): A subset of rule
references to fix for. Defaults to None.
exclude_rules (:obj:`Optional[List[str]`, optional): A subset of rule
references to avoid fixing for. Defaults to None.
config (:obj:`Optional[FluffConfig]`, optional): A configuration object
to use for the operation. Defaults to None.
config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config,
which is only used if a `config` is not already provided.
Defaults to None.
fix_even_unparsable (:obj:`bool`, optional): Optional override for the
corresponding SQLFluff configuration value.
Returns:
:obj:`str` for the fixed SQL if possible.
"""
cfg = config or get_simple_config(
dialect=dialect,
rules=rules,
exclude_rules=exclude_rules,
config_path=config_path,
)
linter = Linter(config=cfg)
result = linter.lint_string_wrapped(sql, fix=True)
if fix_even_unparsable is None:
fix_even_unparsable = cfg.get("fix_even_unparsable")
should_fix = True
if not fix_even_unparsable:
# If fix_even_unparsable wasn't set, check for templating or parse
# errors and suppress fixing if there were any.
_, num_filtered_errors = result.count_tmp_prs_errors()
if num_filtered_errors > 0:
should_fix = False
if should_fix:
sql = result.paths[0].files[0].fix_string()[0]
return sql | Fix a SQL string.
Args:
sql (:obj:`str`): The SQL to be fixed.
dialect (:obj:`str`, optional): A reference to the dialect of the SQL
to be fixed. Defaults to `ansi`.
rules (:obj:`Optional[List[str]`, optional): A subset of rule
references to fix for. Defaults to None.
exclude_rules (:obj:`Optional[List[str]`, optional): A subset of rule
references to avoid fixing for. Defaults to None.
config (:obj:`Optional[FluffConfig]`, optional): A configuration object
to use for the operation. Defaults to None.
config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config,
which is only used if a `config` is not already provided.
Defaults to None.
fix_even_unparsable (:obj:`bool`, optional): Optional override for the
corresponding SQLFluff configuration value.
Returns:
:obj:`str` for the fixed SQL if possible. | fix | python | sqlfluff/sqlfluff | src/sqlfluff/api/simple.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/api/simple.py | MIT |
def parse(
sql: str,
dialect: str = "ansi",
config: Optional[FluffConfig] = None,
config_path: Optional[str] = None,
) -> Dict[str, Any]:
"""Parse a SQL string.
Args:
sql (:obj:`str`): The SQL to be parsed.
dialect (:obj:`str`, optional): A reference to the dialect of the SQL
to be parsed. Defaults to `ansi`.
config (:obj:`Optional[FluffConfig]`, optional): A configuration object
to use for the operation. Defaults to None.
config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config,
which is only used if a `config` is not already provided.
Defaults to None.
Returns:
:obj:`Dict[str, Any]` JSON containing the parsed structure.
Note:
In the case of multiple potential variants from the raw source file
only the first variant is returned by the simple API. For access to
the other variants, use the underlying main API directly.
"""
cfg = config or get_simple_config(
dialect=dialect,
config_path=config_path,
)
linter = Linter(config=cfg)
parsed = linter.parse_string(sql)
# If we encounter any parsing errors, raise them in a combined issue.
violations = parsed.violations
if violations:
raise APIParsingError(violations)
# Return a JSON representation of the parse tree.
# NOTE: For the simple API - only a single variant is returned.
root_variant = parsed.root_variant()
assert root_variant, "Files parsed without violations must have a valid variant"
assert root_variant.tree, "Files parsed without violations must have a valid tree"
record = root_variant.tree.as_record(show_raw=True)
assert record
return record | Parse a SQL string.
Args:
sql (:obj:`str`): The SQL to be parsed.
dialect (:obj:`str`, optional): A reference to the dialect of the SQL
to be parsed. Defaults to `ansi`.
config (:obj:`Optional[FluffConfig]`, optional): A configuration object
to use for the operation. Defaults to None.
config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config,
which is only used if a `config` is not already provided.
Defaults to None.
Returns:
:obj:`Dict[str, Any]` JSON containing the parsed structure.
Note:
In the case of multiple potential variants from the raw source file
only the first variant is returned by the simple API. For access to
the other variants, use the underlying main API directly. | parse | python | sqlfluff/sqlfluff | src/sqlfluff/api/simple.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/api/simple.py | MIT |
def identifiers_policy_applicable(
policy: str, parent_stack: Tuple[BaseSegment, ...]
) -> bool:
"""Does `(un)quoted_identifiers_policy` apply to this segment?
This method is used in CP02, RF04 and RF05.
"""
if policy == "all":
return True
if policy == "none":
return False
is_alias = parent_stack and parent_stack[-1].is_type(
"alias_expression", "column_definition", "with_compound_statement"
)
if policy == "aliases" and is_alias:
return True
is_inside_from = any(p.is_type("from_clause") for p in parent_stack)
if policy == "column_aliases" and is_alias and not is_inside_from:
return True
if policy == "table_aliases" and is_alias and is_inside_from:
return True
return False | Does `(un)quoted_identifiers_policy` apply to this segment?
This method is used in CP02, RF04 and RF05. | identifiers_policy_applicable | python | sqlfluff/sqlfluff | src/sqlfluff/utils/identifers.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/identifers.py | MIT |
def is_slice_type(
*slice_types: str,
) -> Callable[[RawFileSlice], bool]:
"""Returns a function that determines if segment is one of the types."""
def _(raw_slice: RawFileSlice) -> bool:
return any(raw_slice.slice_type == slice_type for slice_type in slice_types)
return _ | Returns a function that determines if segment is one of the types. | is_slice_type | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/raw_file_slice_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/raw_file_slice_predicates.py | MIT |
def __new__(
cls, *raw_slices: RawFileSlice, templated_file: Optional[TemplatedFile] = None
) -> "RawFileSlices":
"""Override new operator."""
return super(RawFileSlices, cls).__new__(cls, raw_slices) | Override new operator. | __new__ | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/raw_file_slices.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/raw_file_slices.py | MIT |
def all(self, predicate: Optional[Callable[[RawFileSlice], bool]] = None) -> bool:
"""Do all the raw slices match?"""
for s in self:
if predicate is not None and not predicate(s):
return False
return True | Do all the raw slices match? | all | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/raw_file_slices.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/raw_file_slices.py | MIT |
def any(self, predicate: Optional[Callable[[RawFileSlice], bool]] = None) -> bool:
"""Do any of the raw slices match?"""
for s in self:
if predicate is None or predicate(s):
return True
return False | Do any of the raw slices match? | any | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/raw_file_slices.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/raw_file_slices.py | MIT |
def select(
self,
select_if: Optional[Callable[[RawFileSlice], bool]] = None,
loop_while: Optional[Callable[[RawFileSlice], bool]] = None,
start_slice: Optional[RawFileSlice] = None,
stop_slice: Optional[RawFileSlice] = None,
) -> "RawFileSlices":
"""Retrieve range/subset.
NOTE: Iterates the slices BETWEEN start_slice and stop_slice, i.e. those
slices are not included in the loop.
"""
start_index = self.index(start_slice) if start_slice else -1
stop_index = self.index(stop_slice) if stop_slice else len(self)
buff = []
for slice_ in self[start_index + 1 : stop_index]:
if loop_while is not None and not loop_while(slice_):
# NOTE: This likely needs more tests.
break # pragma: no cover
if select_if is None or select_if(slice_):
buff.append(slice_)
return RawFileSlices(*buff, templated_file=self.templated_file) | Retrieve range/subset.
NOTE: Iterates the slices BETWEEN start_slice and stop_slice, i.e. those
slices are not included in the loop. | select | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/raw_file_slices.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/raw_file_slices.py | MIT |
def raw_is(*raws: str) -> Callable[[BaseSegment], bool]: # pragma: no cover
"""Returns a function that determines if segment matches one of the raw inputs."""
def _(segment: BaseSegment) -> bool:
return segment.raw in raws
return _ | Returns a function that determines if segment matches one of the raw inputs. | raw_is | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def raw_upper_is(*raws: str) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if segment matches one of the raw inputs."""
def _(segment: BaseSegment) -> bool:
return segment.raw_upper in raws
return _ | Returns a function that determines if segment matches one of the raw inputs. | raw_upper_is | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_type(*seg_type: str) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if segment is one of the types."""
def _(segment: BaseSegment) -> bool:
return segment.is_type(*seg_type)
return _ | Returns a function that determines if segment is one of the types. | is_type | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_keyword(*keyword_name: str) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if it's a matching keyword."""
return and_(
is_type("keyword"), raw_upper_is(*[raw.upper() for raw in keyword_name])
) | Returns a function that determines if it's a matching keyword. | is_keyword | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_code() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is code."""
def _(segment: BaseSegment) -> bool:
return segment.is_code
return _ | Returns a function that checks if segment is code. | is_code | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_comment() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is comment."""
def _(segment: BaseSegment) -> bool:
return segment.is_comment
return _ | Returns a function that checks if segment is comment. | is_comment | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_meta() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is meta."""
def _(segment: BaseSegment) -> bool:
return segment.is_meta
return _ | Returns a function that checks if segment is meta. | is_meta | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_raw() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is raw."""
def _(segment: BaseSegment) -> bool:
return segment.is_raw()
return _ | Returns a function that checks if segment is raw. | is_raw | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_whitespace() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is whitespace."""
def _(segment: BaseSegment) -> bool:
return segment.is_whitespace
return _ | Returns a function that checks if segment is whitespace. | is_whitespace | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_templated() -> Callable[[BaseSegment], bool]: # pragma: no cover
"""Returns a function that checks if segment is templated."""
def _(segment: BaseSegment) -> bool:
return segment.is_templated
return _ | Returns a function that checks if segment is templated. | is_templated | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def get_type() -> Callable[[BaseSegment], str]:
"""Returns a function that gets segment type."""
def _(segment: BaseSegment) -> str:
return segment.get_type()
return _ | Returns a function that gets segment type. | get_type | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def and_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes the functions and-ed together."""
def _(segment: BaseSegment) -> bool:
return all(function(segment) for function in functions)
return _ | Returns a function that computes the functions and-ed together. | and_ | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def or_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes the functions or-ed together."""
def _(segment: BaseSegment) -> bool:
return any(function(segment) for function in functions)
return _ | Returns a function that computes the functions or-ed together. | or_ | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def not_(fn: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes: not fn()."""
def _(segment: BaseSegment) -> bool:
return not fn(segment)
return _ | Returns a function that computes: not fn(). | not_ | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def raw_slices(
segment: BaseSegment,
templated_file: Optional[TemplatedFile],
) -> RawFileSlices: # pragma: no cover
"""Returns raw slices for a segment."""
if not templated_file:
raise ValueError(
'raw_slices: "templated_file" parameter is required.'
) # pragma: no cover
if not segment.pos_marker:
raise ValueError(
'raw_slices: "segment" parameter must have pos_marker set.'
) # pragma: no cover
return RawFileSlices(
*templated_file.raw_slices_spanning_source_slice(
segment.pos_marker.source_slice
),
templated_file=templated_file,
) | Returns raw slices for a segment. | raw_slices | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def templated_slices(
segment: BaseSegment,
templated_file: Optional[TemplatedFile],
) -> TemplatedFileSlices:
"""Returns raw slices for a segment."""
if not templated_file:
raise ValueError(
'templated_slices: "templated_file" parameter is required.'
) # pragma: no cover
if not segment.pos_marker:
raise ValueError(
'templated_slices: "segment" parameter must have pos_marker set.'
) # pragma: no cover
# :TRICKY: We don't use _find_slice_indices_of_templated_pos() here because
# it treats TemplatedFileSlice.templated_slice.stop as inclusive, not
# exclusive. Other parts of SQLFluff rely on this behaviour, but we don't
# want it. It's easy enough to do this ourselves.
start = segment.pos_marker.templated_slice.start
stop = segment.pos_marker.templated_slice.stop
templated_slices = [
slice_
for slice_ in templated_file.sliced_file
if (stop > slice_.templated_slice.start and start < slice_.templated_slice.stop)
]
return TemplatedFileSlices(*templated_slices, templated_file=templated_file) | Returns raw slices for a segment. | templated_slices | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segment_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segment_predicates.py | MIT |
def is_slice_type(
*slice_types: str,
) -> Callable[[TemplatedFileSlice], bool]:
"""Returns a function that determines if segment is one the types."""
def _(raw_slice: TemplatedFileSlice) -> bool:
return any(raw_slice.slice_type == slice_type for slice_type in slice_types)
return _ | Returns a function that determines if segment is one the types. | is_slice_type | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/templated_file_slice_predicates.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/templated_file_slice_predicates.py | MIT |
def __new__(
cls,
*templated_slices: TemplatedFileSlice,
templated_file: Optional[TemplatedFile] = None,
) -> "TemplatedFileSlices":
"""Override new operator."""
return super(TemplatedFileSlices, cls).__new__(cls, templated_slices) | Override new operator. | __new__ | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/templated_file_slices.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/templated_file_slices.py | MIT |
def all(
self, predicate: Optional[Callable[[TemplatedFileSlice], bool]] = None
) -> bool:
"""Do all the templated slices match?"""
for s in self:
if predicate is not None and not predicate(s):
return False
return True | Do all the templated slices match? | all | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/templated_file_slices.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/templated_file_slices.py | MIT |
def any(
self, predicate: Optional[Callable[[TemplatedFileSlice], bool]] = None
) -> bool: # pragma: no cover
"""Do any of the templated slices match?"""
for s in self:
if predicate is None or predicate(s):
return True
return False | Do any of the templated slices match? | any | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/templated_file_slices.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/templated_file_slices.py | MIT |
def select(
self,
select_if: Optional[Callable[[TemplatedFileSlice], bool]] = None,
loop_while: Optional[Callable[[TemplatedFileSlice], bool]] = None,
start_slice: Optional[TemplatedFileSlice] = None,
stop_slice: Optional[TemplatedFileSlice] = None,
) -> "TemplatedFileSlices": # pragma: no cover
"""Retrieve range/subset.
NOTE: Iterates the slices BETWEEN start_slice and stop_slice, i.e. those
slices are not included in the loop.
"""
start_index = self.index(start_slice) if start_slice else -1
stop_index = self.index(stop_slice) if stop_slice else len(self)
buff = []
for slice_ in self[start_index + 1 : stop_index]:
if loop_while is not None and not loop_while(slice_):
break
if select_if is None or select_if(slice_):
buff.append(slice_)
return TemplatedFileSlices(*buff, templated_file=self.templated_file) | Retrieve range/subset.
NOTE: Iterates the slices BETWEEN start_slice and stop_slice, i.e. those
slices are not included in the loop. | select | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/templated_file_slices.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/templated_file_slices.py | MIT |
def __new__(
cls, *segments: BaseSegment, templated_file: Optional[TemplatedFile] = None
) -> "Segments":
"""Override new operator."""
return super(Segments, cls).__new__(cls, segments) | Override new operator. | __new__ | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def find(self, segment: Optional[BaseSegment]) -> int:
"""Returns index if found, -1 if not found."""
try:
return self.index(segment)
except ValueError:
return -1 | Returns index if found, -1 if not found. | find | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def all(self, predicate: Optional[PredicateType] = None) -> bool:
"""Do all the segments match?"""
for s in self:
if predicate is not None and not predicate(s):
return False
return True | Do all the segments match? | all | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def any(self, predicate: Optional[PredicateType] = None) -> bool:
"""Do any of the segments match?"""
for s in self:
if predicate is None or predicate(s):
return True
return False | Do any of the segments match? | any | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def reversed(self) -> "Segments": # pragma: no cover
"""Return the same segments in reverse order."""
return Segments(*reversed(self), templated_file=self.templated_file) | Return the same segments in reverse order. | reversed | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def raw_slices(self) -> RawFileSlices:
"""Raw slices of the segments, sorted in source file order."""
if not self.templated_file:
raise ValueError(
'Segments.raw_slices: "templated_file" property is required.'
)
raw_slices = set()
for s in self:
if s.pos_marker is None:
raise ValueError(
"Segments include a positionless segment"
) # pragma: no cover
source_slice = s.pos_marker.source_slice
raw_slices.update(
self.templated_file.raw_slices_spanning_source_slice(source_slice)
)
return RawFileSlices(
*sorted(raw_slices, key=lambda slice_: slice_.source_idx),
templated_file=self.templated_file,
) | Raw slices of the segments, sorted in source file order. | raw_slices | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def raw_segments(self) -> "Segments": # pragma: no cover
"""Get raw segments underlying the segments."""
raw_segments_list = []
for s in self:
raw_segments_list.extend(s.raw_segments)
return Segments(*raw_segments_list, templated_file=self.templated_file) | Get raw segments underlying the segments. | raw_segments | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def recursive_crawl_all(self) -> "Segments": # pragma: no cover
"""Recursively crawl all descendant segments."""
segments: List[BaseSegment] = []
for s in self:
for i in s.recursive_crawl_all():
segments.append(i)
return Segments(*segments, templated_file=self.templated_file) | Recursively crawl all descendant segments. | recursive_crawl_all | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def recursive_crawl(self, *seg_type: str, recurse_into: bool = True) -> "Segments":
"""Recursively crawl for segments of a given type."""
segments: List[BaseSegment] = []
for s in self:
for i in s.recursive_crawl(*seg_type, recurse_into=recurse_into):
segments.append(i)
return Segments(*segments, templated_file=self.templated_file) | Recursively crawl for segments of a given type. | recursive_crawl | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def children(
self,
predicate: Optional[PredicateType] = None,
) -> "Segments":
"""Returns an object with children of the segments in this object."""
child_segments: List[BaseSegment] = []
for s in self:
for child in s.segments:
if predicate is None or predicate(child):
child_segments.append(child)
return Segments(*child_segments, templated_file=self.templated_file) | Returns an object with children of the segments in this object. | children | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def first(
self,
predicate: Optional[PredicateType] = None,
) -> "Segments":
"""Returns the first segment (if any) that satisfies the predicates."""
for s in self:
if predicate is None or predicate(s):
return Segments(s, templated_file=self.templated_file)
# If no segment satisfies "predicates", return empty Segments.
return Segments(templated_file=self.templated_file) | Returns the first segment (if any) that satisfies the predicates. | first | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def last(
self,
predicate: Optional[PredicateType] = None,
) -> "Segments":
"""Returns the last segment (if any) that satisfies the predicates."""
for s in reversed(self):
if predicate is None or predicate(s):
return Segments(s, templated_file=self.templated_file)
# If no segment satisfies "predicates", return empty Segments.
return Segments(templated_file=self.templated_file) | Returns the last segment (if any) that satisfies the predicates. | last | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def __getitem__(self, item: SupportsIndex) -> BaseSegment:
"""Individual "getting" returns a single segment.
NOTE: Using `SupportsIndex` rather than `int` is to ensure
type compatibility with the parent `tuple` implementation.
""" | Individual "getting" returns a single segment.
NOTE: Using `SupportsIndex` rather than `int` is to ensure
type compatibility with the parent `tuple` implementation. | __getitem__ | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def __getitem__(self, item: slice) -> "Segments":
"""Getting a slice returns another `Segments` object.""" | Getting a slice returns another `Segments` object. | __getitem__ | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def get(
self, index: int = 0, *, default: Optional[BaseSegment] = None
) -> Optional[BaseSegment]:
"""Return specified item. Returns default if index out of range."""
try:
return self[index]
except IndexError:
return default | Return specified item. Returns default if index out of range. | get | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def apply(self, fn: Callable[[BaseSegment], Any]) -> List[Any]:
"""Apply function to every item."""
return [fn(s) for s in self] | Apply function to every item. | apply | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def select(
self,
select_if: Optional[PredicateType] = None,
loop_while: Optional[PredicateType] = None,
start_seg: Optional[BaseSegment] = None,
stop_seg: Optional[BaseSegment] = None,
) -> "Segments":
"""Retrieve range/subset.
NOTE: Iterates the segments BETWEEN start_seg and stop_seg, i.e. those
segments are not included in the loop.
"""
start_index = self.index(start_seg) if start_seg else -1
stop_index = self.index(stop_seg) if stop_seg else len(self)
buff = []
for seg in self[start_index + 1 : stop_index]:
if loop_while is not None and not loop_while(seg):
break
if select_if is None or select_if(seg):
buff.append(seg)
return Segments(*buff, templated_file=self.templated_file) | Retrieve range/subset.
NOTE: Iterates the segments BETWEEN start_seg and stop_seg, i.e. those
segments are not included in the loop. | select | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def iterate_segments(
self,
predicate: Optional[PredicateType] = None,
) -> Iterable["Segments"]:
"""Loop over each element as a fresh Segments."""
# Looping over Segments returns BaseEls
# which is sometime what we want and sometimes not
for base_el in self:
if predicate and not predicate(base_el): # pragma: no cover
continue
yield Segments(base_el, templated_file=self.templated_file) | Loop over each element as a fresh Segments. | iterate_segments | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/segments.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/segments.py | MIT |
def segment(self) -> "Segments":
"""Returns a Segments object for context.segment."""
return Segments(
self.context.segment, templated_file=self.context.templated_file
) | Returns a Segments object for context.segment. | segment | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/context.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/context.py | MIT |
def parent_stack(self) -> "Segments": # pragma: no cover
"""Returns a Segments object for context.parent_stack."""
return Segments(
*self.context.parent_stack, templated_file=self.context.templated_file
) | Returns a Segments object for context.parent_stack. | parent_stack | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/context.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/context.py | MIT |
def siblings_pre(self) -> "Segments": # pragma: no cover
"""Returns a Segments object for context.siblings_pre."""
return Segments(
*self.context.siblings_pre, templated_file=self.context.templated_file
) | Returns a Segments object for context.siblings_pre. | siblings_pre | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/context.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/context.py | MIT |
def siblings_post(self) -> "Segments": # pragma: no cover
"""Returns a Segments object for context.siblings_post."""
return Segments(
*self.context.siblings_post, templated_file=self.context.templated_file
) | Returns a Segments object for context.siblings_post. | siblings_post | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/context.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/context.py | MIT |
def raw_stack(self) -> "Segments": # pragma: no cover
"""Returns a Segments object for context.raw_stack."""
return Segments(
*self.context.raw_stack, templated_file=self.context.templated_file
) | Returns a Segments object for context.raw_stack. | raw_stack | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/context.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/context.py | MIT |
def raw_segments(self) -> Segments: # pragma: no cover
"""Returns a Segments object for all the raw segments in the file."""
file_segment = self.context.parent_stack[0]
return Segments(
*file_segment.get_raw_segments(), templated_file=self.context.templated_file
) | Returns a Segments object for all the raw segments in the file. | raw_segments | python | sqlfluff/sqlfluff | src/sqlfluff/utils/functional/context.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/functional/context.py | MIT |
def evaluate(self) -> None:
"""Evaluate the test case.
NOTE: This method is designed to be run in a pytest context and
will call methods such as `pytest.skip()` as part of it's execution.
It may not be suitable for other testing contexts.
"""
rules__test_helper(self) | Evaluate the test case.
NOTE: This method is designed to be run in a pytest context and
will call methods such as `pytest.skip()` as part of it's execution.
It may not be suitable for other testing contexts. | evaluate | python | sqlfluff/sqlfluff | src/sqlfluff/utils/testing/rules.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py | MIT |
def load_test_cases(
test_cases_path: str,
) -> Tuple[List[str], List[RuleTestCase]]:
"""Load rule test cases from YAML files.
Args:
test_cases_path (str): A glob string specifying the files containing
test cases to load.
"""
ids = []
test_cases = []
for path in sorted(glob(test_cases_path)):
with open(path) as f:
raw = f.read()
y = yaml.safe_load(raw)
rule = y.pop("rule")
global_config = y.pop("configs", None)
if global_config:
for i in y:
if "configs" not in y[i].keys():
y[i].update({"configs": global_config})
ids.extend([rule + "_" + t for t in y])
test_cases.extend([RuleTestCase(rule=rule, **v) for k, v in y.items()])
return ids, test_cases | Load rule test cases from YAML files.
Args:
test_cases_path (str): A glob string specifying the files containing
test cases to load. | load_test_cases | python | sqlfluff/sqlfluff | src/sqlfluff/utils/testing/rules.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py | MIT |
def get_rule_from_set(code: str, config: FluffConfig) -> BaseRule:
"""Fetch a rule from the rule set."""
for r in get_ruleset().get_rulepack(config=config).rules:
if r.code == code: # pragma: no cover
return r
raise ValueError(f"{code!r} not in {get_ruleset()!r}") | Fetch a rule from the rule set. | get_rule_from_set | python | sqlfluff/sqlfluff | src/sqlfluff/utils/testing/rules.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py | MIT |
def _setup_config(
code: str, configs: Optional[ConfigMappingType] = None
) -> FluffConfig:
"""Helper function to set up config consistently for pass & fail functions."""
overrides: ConfigMappingType = {"rules": code}
_core_section = configs.get("core", {}) if configs else {}
if not isinstance(_core_section, dict) or "dialect" not in _core_section:
overrides["dialect"] = "ansi"
return FluffConfig(configs=configs, overrides=overrides) | Helper function to set up config consistently for pass & fail functions. | _setup_config | python | sqlfluff/sqlfluff | src/sqlfluff/utils/testing/rules.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py | MIT |
def assert_rule_fail_in_sql(
code: str,
sql: str,
configs: Optional[ConfigMappingType] = None,
line_numbers: Optional[List[int]] = None,
) -> Tuple[str, List[SQLBaseError]]:
"""Assert that a given rule does fail on the given sql.
Args:
code (str): The code of the rule to test.
sql (str): The SQL text to check against.
configs (:obj:`ConfigMappingType`, optional): A config dict
object containing any overrides.
line_numbers (list of int, optional): The line numbers which
we want to test that errors occurred on.
Returns:
Tuple: values(fixed_sql (str), violations (list))
fixed_sql (str): The fixed string after linting. Note that for
testing purposes, `.lint_string()` is always called with
`fix` set to `True`.
violations (list of SQLBaseError): the violations found during
linting.
"""
print("# Asserting Rule Fail in SQL")
# Set up the config to only use the rule we are testing.
cfg = _setup_config(code, configs)
# Lint it using the current config (while in fix mode)
linted = Linter(config=cfg).lint_string(sql, fix=True)
all_violations = linted.get_violations()
print("Errors Found:")
for e in all_violations:
print(" " + repr(e))
if e.desc().startswith("Unexpected exception"):
pytest.fail(f"Linter failed with {e.desc()}") # pragma: no cover
parse_errors = [
v for v in all_violations if isinstance(v, (SQLParseError, SQLTemplaterError))
]
if parse_errors:
pytest.fail(f"Found the following parse errors in test case: {parse_errors}")
lint_errors: List[SQLLintError] = [
v for v in all_violations if isinstance(v, SQLLintError)
]
if not any(v.rule.code == code for v in lint_errors):
assert linted.tree
print(f"Parsed File:\n{linted.tree.stringify()}")
pytest.fail(
f"No {code} failures found in query which should fail.",
pytrace=False,
)
if line_numbers:
actual_line_numbers = [e.line_no for e in lint_errors]
if line_numbers != actual_line_numbers: # pragma: no cover
pytest.fail(
"Expected errors on lines {}, but got errors on lines {}".format(
line_numbers, actual_line_numbers
)
)
fixed_sql, _ = linted.fix_string()
# Check that if it has made changes that this rule has set
# `is_fix_compatible` appropriately.
if fixed_sql != sql:
rule = get_rule_from_set(code, config=cfg)
assert rule.is_fix_compatible, (
f"Rule {code} returned fixes but does not specify "
"'is_fix_compatible = True'."
)
return fixed_sql, linted.violations | Assert that a given rule does fail on the given sql.
Args:
code (str): The code of the rule to test.
sql (str): The SQL text to check against.
configs (:obj:`ConfigMappingType`, optional): A config dict
object containing any overrides.
line_numbers (list of int, optional): The line numbers which
we want to test that errors occurred on.
Returns:
Tuple: values(fixed_sql (str), violations (list))
fixed_sql (str): The fixed string after linting. Note that for
testing purposes, `.lint_string()` is always called with
`fix` set to `True`.
violations (list of SQLBaseError): the violations found during
linting. | assert_rule_fail_in_sql | python | sqlfluff/sqlfluff | src/sqlfluff/utils/testing/rules.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py | MIT |
def assert_rule_pass_in_sql(
code: str,
sql: str,
configs: Optional[ConfigMappingType] = None,
msg: Optional[str] = None,
) -> None:
"""Assert that a given rule doesn't fail on the given sql."""
# Configs allows overrides if we want to use them.
print("# Asserting Rule Pass in SQL")
cfg = _setup_config(code, configs)
linter = Linter(config=cfg)
# This section is mainly for aid in debugging.
rendered = linter.render_string(sql, fname="<STR>", config=cfg, encoding="utf-8")
parsed = linter.parse_rendered(rendered)
tree = parsed.tree # Delegate assertions to the `.tree` property
violations = parsed.violations
if violations:
if msg:
print(msg) # pragma: no cover
pytest.fail(violations[0].desc() + "\n" + tree.stringify())
print(f"Parsed:\n {tree.stringify()}")
# Note that lint_string() runs the templater and parser again, in order to
# test the whole linting pipeline in the same way that users do. In other
# words, the "rendered" and "parsed" variables above are irrelevant to this
# line of code.
lint_result = linter.lint_string(sql, config=cfg, fname="<STR>")
lint_errors = [v for v in lint_result.violations if isinstance(v, SQLLintError)]
if any(v.rule.code == code for v in lint_errors):
print("Errors Found:")
for e in lint_result.violations:
print(" " + repr(e))
if msg:
print(msg) # pragma: no cover
pytest.fail(f"Found {code} failures in query which should pass.", pytrace=False) | Assert that a given rule doesn't fail on the given sql. | assert_rule_pass_in_sql | python | sqlfluff/sqlfluff | src/sqlfluff/utils/testing/rules.py | https://github.com/sqlfluff/sqlfluff/blob/master/src/sqlfluff/utils/testing/rules.py | MIT |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.