Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
substitute_value_from_azure_keyvault
(value)
This methods uses a azure.identity.DefaultAzureCredential to authenticate to the Azure SDK for Python and a azure.keyvault.secrets.SecretClient to try to retrieve the secret value from the elements it is able to parse from the input value. - value: string with pattern ``secret|https://${vault_name}.vault.azure.net/secrets/${secret_name}`` optional : after the value above, a secret version can be added ``/${secret_version}`` optional : after the value above, a secret key can be added ``|${secret_key}`` - vault_name: `Vault name of the secret manager <https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#objects-identifiers-and-versioning>`_ - secret_name: Name of the secret - secret_version: ID of the version of the secret - secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve :param value: a string that matches the following regex ``^secret|https://[a-zA-Z0-9-]{3,24}.vault.azure.net`` :return: a string with the value substituted by the secret from the Azure Key Vault store :raises: ImportError, ValueError
This methods uses a azure.identity.DefaultAzureCredential to authenticate to the Azure SDK for Python and a azure.keyvault.secrets.SecretClient to try to retrieve the secret value from the elements it is able to parse from the input value.
def substitute_value_from_azure_keyvault(value): """ This methods uses a azure.identity.DefaultAzureCredential to authenticate to the Azure SDK for Python and a azure.keyvault.secrets.SecretClient to try to retrieve the secret value from the elements it is able to parse from the input value. - value: string with pattern ``secret|https://${vault_name}.vault.azure.net/secrets/${secret_name}`` optional : after the value above, a secret version can be added ``/${secret_version}`` optional : after the value above, a secret key can be added ``|${secret_key}`` - vault_name: `Vault name of the secret manager <https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#objects-identifiers-and-versioning>`_ - secret_name: Name of the secret - secret_version: ID of the version of the secret - secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve :param value: a string that matches the following regex ``^secret|https://[a-zA-Z0-9-]{3,24}.vault.azure.net`` :return: a string with the value substituted by the secret from the Azure Key Vault store :raises: ImportError, ValueError """ regex = re.compile( r"^secret\|(https:\/\/[a-zA-Z0-9\-]{3,24}\.vault\.azure\.net)\/secrets\/([0-9a-zA-Z-]+)" r"(?:\/([a-f0-9]{32}))?(?:\|([^\|]+))?$" ) if not SecretClient: logger.error( "SecretClient is not installed, please install great_expectations with azure_secrets extra > " "pip install great_expectations[azure_secrets]" ) raise ImportError("Could not import SecretClient from azure.keyvault.secrets") matches = regex.match(value) if not matches: raise ValueError(f"Could not match the value with regex {regex}") keyvault_uri = matches.group(1) secret_name = matches.group(2) secret_version = matches.group(3) secret_key = matches.group(4) credential = DefaultAzureCredential() client = SecretClient(vault_url=keyvault_uri, credential=credential) secret = client.get_secret(name=secret_name, version=secret_version).value if secret_key: secret = json.loads(secret)[secret_key] return secret
[ "def", "substitute_value_from_azure_keyvault", "(", "value", ")", ":", "regex", "=", "re", ".", "compile", "(", "r\"^secret\\|(https:\\/\\/[a-zA-Z0-9\\-]{3,24}\\.vault\\.azure\\.net)\\/secrets\\/([0-9a-zA-Z-]+)\"", "r\"(?:\\/([a-f0-9]{32}))?(?:\\|([^\\|]+))?$\"", ")", "if", "not", "SecretClient", ":", "logger", ".", "error", "(", "\"SecretClient is not installed, please install great_expectations with azure_secrets extra > \"", "\"pip install great_expectations[azure_secrets]\"", ")", "raise", "ImportError", "(", "\"Could not import SecretClient from azure.keyvault.secrets\"", ")", "matches", "=", "regex", ".", "match", "(", "value", ")", "if", "not", "matches", ":", "raise", "ValueError", "(", "f\"Could not match the value with regex {regex}\"", ")", "keyvault_uri", "=", "matches", ".", "group", "(", "1", ")", "secret_name", "=", "matches", ".", "group", "(", "2", ")", "secret_version", "=", "matches", ".", "group", "(", "3", ")", "secret_key", "=", "matches", ".", "group", "(", "4", ")", "credential", "=", "DefaultAzureCredential", "(", ")", "client", "=", "SecretClient", "(", "vault_url", "=", "keyvault_uri", ",", "credential", "=", "credential", ")", "secret", "=", "client", ".", "get_secret", "(", "name", "=", "secret_name", ",", "version", "=", "secret_version", ")", ".", "value", "if", "secret_key", ":", "secret", "=", "json", ".", "loads", "(", "secret", ")", "[", "secret_key", "]", "return", "secret" ]
[ 398, 0 ]
[ 444, 17 ]
python
en
['en', 'error', 'th']
False
substitute_all_config_variables
( data, replace_variables_dict, dollar_sign_escape_string: str = r"\$" )
Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like config object for their values. The method traverses the dictionary recursively. :param data: :param replace_variables_dict: :return: a dictionary with all the variables replaced with their values
Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like config object for their values.
def substitute_all_config_variables( data, replace_variables_dict, dollar_sign_escape_string: str = r"\$" ): """ Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like config object for their values. The method traverses the dictionary recursively. :param data: :param replace_variables_dict: :return: a dictionary with all the variables replaced with their values """ if isinstance(data, DataContextConfig): data = DataContextConfigSchema().dump(data) if isinstance(data, CheckpointConfig): data = CheckpointConfigSchema().dump(data) if isinstance(data, dict) or isinstance(data, OrderedDict): return { k: substitute_all_config_variables(v, replace_variables_dict) for k, v in data.items() } elif isinstance(data, list): return [ substitute_all_config_variables(v, replace_variables_dict) for v in data ] return substitute_config_variable( data, replace_variables_dict, dollar_sign_escape_string )
[ "def", "substitute_all_config_variables", "(", "data", ",", "replace_variables_dict", ",", "dollar_sign_escape_string", ":", "str", "=", "r\"\\$\"", ")", ":", "if", "isinstance", "(", "data", ",", "DataContextConfig", ")", ":", "data", "=", "DataContextConfigSchema", "(", ")", ".", "dump", "(", "data", ")", "if", "isinstance", "(", "data", ",", "CheckpointConfig", ")", ":", "data", "=", "CheckpointConfigSchema", "(", ")", ".", "dump", "(", "data", ")", "if", "isinstance", "(", "data", ",", "dict", ")", "or", "isinstance", "(", "data", ",", "OrderedDict", ")", ":", "return", "{", "k", ":", "substitute_all_config_variables", "(", "v", ",", "replace_variables_dict", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "return", "[", "substitute_all_config_variables", "(", "v", ",", "replace_variables_dict", ")", "for", "v", "in", "data", "]", "return", "substitute_config_variable", "(", "data", ",", "replace_variables_dict", ",", "dollar_sign_escape_string", ")" ]
[ 447, 0 ]
[ 477, 5 ]
python
en
['en', 'error', 'th']
False
file_relative_path
(dunderfile, relative_path)
This function is useful when one needs to load a file that is relative to the position of the current file. (Such as when you encode a configuration file path in source file and want in runnable in any current working directory) It is meant to be used like the following: file_relative_path(__file__, 'path/relative/to/file') H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34
This function is useful when one needs to load a file that is relative to the position of the current file. (Such as when you encode a configuration file path in source file and want in runnable in any current working directory)
def file_relative_path(dunderfile, relative_path): """ This function is useful when one needs to load a file that is relative to the position of the current file. (Such as when you encode a configuration file path in source file and want in runnable in any current working directory) It is meant to be used like the following: file_relative_path(__file__, 'path/relative/to/file') H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34 """ return os.path.join(os.path.dirname(dunderfile), relative_path)
[ "def", "file_relative_path", "(", "dunderfile", ",", "relative_path", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "dunderfile", ")", ",", "relative_path", ")" ]
[ 480, 0 ]
[ 492, 67 ]
python
en
['en', 'error', 'th']
False
parse_substitution_variable
(substitution_variable: str)
Parse and check whether the string contains a substitution variable of the case insensitive form ${SOME_VAR} or $SOME_VAR Args: substitution_variable: string to be parsed Returns: string of variable name e.g. SOME_VAR or None if not parsable. If there are multiple substitution variables this currently returns the first e.g. $SOME_$TRING -> $SOME_
Parse and check whether the string contains a substitution variable of the case insensitive form ${SOME_VAR} or $SOME_VAR Args: substitution_variable: string to be parsed
def parse_substitution_variable(substitution_variable: str) -> Optional[str]: """ Parse and check whether the string contains a substitution variable of the case insensitive form ${SOME_VAR} or $SOME_VAR Args: substitution_variable: string to be parsed Returns: string of variable name e.g. SOME_VAR or None if not parsable. If there are multiple substitution variables this currently returns the first e.g. $SOME_$TRING -> $SOME_ """ substitution_variable_name = pp.Word(pp.alphanums + "_").setResultsName( "substitution_variable_name" ) curly_brace_parser = "${" + substitution_variable_name + "}" non_curly_brace_parser = "$" + substitution_variable_name both_parser = curly_brace_parser | non_curly_brace_parser try: parsed_substitution_variable = both_parser.parseString(substitution_variable) return parsed_substitution_variable.substitution_variable_name except pp.ParseException: return None
[ "def", "parse_substitution_variable", "(", "substitution_variable", ":", "str", ")", "->", "Optional", "[", "str", "]", ":", "substitution_variable_name", "=", "pp", ".", "Word", "(", "pp", ".", "alphanums", "+", "\"_\"", ")", ".", "setResultsName", "(", "\"substitution_variable_name\"", ")", "curly_brace_parser", "=", "\"${\"", "+", "substitution_variable_name", "+", "\"}\"", "non_curly_brace_parser", "=", "\"$\"", "+", "substitution_variable_name", "both_parser", "=", "curly_brace_parser", "|", "non_curly_brace_parser", "try", ":", "parsed_substitution_variable", "=", "both_parser", ".", "parseString", "(", "substitution_variable", ")", "return", "parsed_substitution_variable", ".", "substitution_variable_name", "except", "pp", ".", "ParseException", ":", "return", "None" ]
[ 495, 0 ]
[ 514, 19 ]
python
en
['en', 'error', 'th']
False
PasswordMasker.mask_db_url
(url: str, use_urlparse: bool = False, **kwargs)
Mask password in database url. Uses sqlalchemy engine parsing if sqlalchemy is installed, otherwise defaults to using urlparse from the stdlib which does not handle kwargs. Args: url: Database url e.g. "postgresql+psycopg2://username:password@host:65432/database" use_urlparse: Skip trying to parse url with sqlalchemy and use urlparse **kwargs: passed to create_engine() Returns: url with password masked e.g. "postgresql+psycopg2://username:***@host:65432/database"
Mask password in database url. Uses sqlalchemy engine parsing if sqlalchemy is installed, otherwise defaults to using urlparse from the stdlib which does not handle kwargs. Args: url: Database url e.g. "postgresql+psycopg2://username:password@host:65432/database" use_urlparse: Skip trying to parse url with sqlalchemy and use urlparse **kwargs: passed to create_engine()
def mask_db_url(url: str, use_urlparse: bool = False, **kwargs) -> str: """ Mask password in database url. Uses sqlalchemy engine parsing if sqlalchemy is installed, otherwise defaults to using urlparse from the stdlib which does not handle kwargs. Args: url: Database url e.g. "postgresql+psycopg2://username:password@host:65432/database" use_urlparse: Skip trying to parse url with sqlalchemy and use urlparse **kwargs: passed to create_engine() Returns: url with password masked e.g. "postgresql+psycopg2://username:***@host:65432/database" """ if sa is not None and use_urlparse is False: engine = sa.create_engine(url, **kwargs) return engine.url.__repr__() else: warnings.warn( "SQLAlchemy is not installed, using urlparse to mask database url password which ignores **kwargs." ) # oracle+cx_oracle does not parse well using urlparse, parse as oracle then swap back replace_prefix = None if url.startswith("oracle+cx_oracle"): replace_prefix = {"original": "oracle+cx_oracle", "temporary": "oracle"} url = url.replace( replace_prefix["original"], replace_prefix["temporary"] ) parsed_url = urlparse(url) # Do not parse sqlite if parsed_url.scheme == "sqlite": return url colon = ":" if parsed_url.port is not None else "" masked_url = ( f"{parsed_url.scheme}://{parsed_url.username}:{PasswordMasker.MASKED_PASSWORD_STRING}" f"@{parsed_url.hostname}{colon}{parsed_url.port or ''}{parsed_url.path or ''}" ) if replace_prefix is not None: masked_url = masked_url.replace( replace_prefix["temporary"], replace_prefix["original"] ) return masked_url
[ "def", "mask_db_url", "(", "url", ":", "str", ",", "use_urlparse", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ")", "->", "str", ":", "if", "sa", "is", "not", "None", "and", "use_urlparse", "is", "False", ":", "engine", "=", "sa", ".", "create_engine", "(", "url", ",", "*", "*", "kwargs", ")", "return", "engine", ".", "url", ".", "__repr__", "(", ")", "else", ":", "warnings", ".", "warn", "(", "\"SQLAlchemy is not installed, using urlparse to mask database url password which ignores **kwargs.\"", ")", "# oracle+cx_oracle does not parse well using urlparse, parse as oracle then swap back", "replace_prefix", "=", "None", "if", "url", ".", "startswith", "(", "\"oracle+cx_oracle\"", ")", ":", "replace_prefix", "=", "{", "\"original\"", ":", "\"oracle+cx_oracle\"", ",", "\"temporary\"", ":", "\"oracle\"", "}", "url", "=", "url", ".", "replace", "(", "replace_prefix", "[", "\"original\"", "]", ",", "replace_prefix", "[", "\"temporary\"", "]", ")", "parsed_url", "=", "urlparse", "(", "url", ")", "# Do not parse sqlite", "if", "parsed_url", ".", "scheme", "==", "\"sqlite\"", ":", "return", "url", "colon", "=", "\":\"", "if", "parsed_url", ".", "port", "is", "not", "None", "else", "\"\"", "masked_url", "=", "(", "f\"{parsed_url.scheme}://{parsed_url.username}:{PasswordMasker.MASKED_PASSWORD_STRING}\"", "f\"@{parsed_url.hostname}{colon}{parsed_url.port or ''}{parsed_url.path or ''}\"", ")", "if", "replace_prefix", "is", "not", "None", ":", "masked_url", "=", "masked_url", ".", "replace", "(", "replace_prefix", "[", "\"temporary\"", "]", ",", "replace_prefix", "[", "\"original\"", "]", ")", "return", "masked_url" ]
[ 539, 4 ]
[ 584, 29 ]
python
en
['en', 'error', 'th']
False
InferredAssetFilePathDataConnector.__init__
( self, name: str, datasource_name: str, execution_engine: Optional[ExecutionEngine] = None, default_regex: Optional[dict] = None, sorters: Optional[list] = None, batch_spec_passthrough: Optional[dict] = None, )
Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex and sorters for filtering and sorting data_references. Args: name (str): name of ConfiguredAssetFilePathDataConnector datasource_name (str): Name of datasource that this DataConnector is connected to execution_engine (ExecutionEngine): ExecutionEngine object to actually read the data default_regex (dict): Optional dict the filter and organize the data_references. sorters (list): Optional list if you want to sort the data_references batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex and sorters for filtering and sorting data_references.
def __init__( self, name: str, datasource_name: str, execution_engine: Optional[ExecutionEngine] = None, default_regex: Optional[dict] = None, sorters: Optional[list] = None, batch_spec_passthrough: Optional[dict] = None, ): """ Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex and sorters for filtering and sorting data_references. Args: name (str): name of ConfiguredAssetFilePathDataConnector datasource_name (str): Name of datasource that this DataConnector is connected to execution_engine (ExecutionEngine): ExecutionEngine object to actually read the data default_regex (dict): Optional dict the filter and organize the data_references. sorters (list): Optional list if you want to sort the data_references batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec """ logger.debug(f'Constructing InferredAssetFilePathDataConnector "{name}".') super().__init__( name=name, datasource_name=datasource_name, execution_engine=execution_engine, default_regex=default_regex, sorters=sorters, batch_spec_passthrough=batch_spec_passthrough, )
[ "def", "__init__", "(", "self", ",", "name", ":", "str", ",", "datasource_name", ":", "str", ",", "execution_engine", ":", "Optional", "[", "ExecutionEngine", "]", "=", "None", ",", "default_regex", ":", "Optional", "[", "dict", "]", "=", "None", ",", "sorters", ":", "Optional", "[", "list", "]", "=", "None", ",", "batch_spec_passthrough", ":", "Optional", "[", "dict", "]", "=", "None", ",", ")", ":", "logger", ".", "debug", "(", "f'Constructing InferredAssetFilePathDataConnector \"{name}\".'", ")", "super", "(", ")", ".", "__init__", "(", "name", "=", "name", ",", "datasource_name", "=", "datasource_name", ",", "execution_engine", "=", "execution_engine", ",", "default_regex", "=", "default_regex", ",", "sorters", "=", "sorters", ",", "batch_spec_passthrough", "=", "batch_spec_passthrough", ",", ")" ]
[ 26, 4 ]
[ 56, 9 ]
python
en
['en', 'error', 'th']
False
InferredAssetFilePathDataConnector._refresh_data_references_cache
(self)
refreshes data_reference cache
refreshes data_reference cache
def _refresh_data_references_cache(self): """refreshes data_reference cache""" # Map data_references to batch_definitions self._data_references_cache = {} for data_reference in self._get_data_reference_list(): mapped_batch_definition_list: List[ BatchDefinition ] = self._map_data_reference_to_batch_definition_list( data_reference=data_reference, data_asset_name=None ) self._data_references_cache[data_reference] = mapped_batch_definition_list
[ "def", "_refresh_data_references_cache", "(", "self", ")", ":", "# Map data_references to batch_definitions", "self", ".", "_data_references_cache", "=", "{", "}", "for", "data_reference", "in", "self", ".", "_get_data_reference_list", "(", ")", ":", "mapped_batch_definition_list", ":", "List", "[", "BatchDefinition", "]", "=", "self", ".", "_map_data_reference_to_batch_definition_list", "(", "data_reference", "=", "data_reference", ",", "data_asset_name", "=", "None", ")", "self", ".", "_data_references_cache", "[", "data_reference", "]", "=", "mapped_batch_definition_list" ]
[ 58, 4 ]
[ 69, 86 ]
python
en
['fr', 'en', 'en']
True
InferredAssetFilePathDataConnector.get_data_reference_list_count
(self)
Returns the list of data_references known by this DataConnector by looping over all data_asset_names in _data_references_cache Returns: number of data_references known by this DataConnector
Returns the list of data_references known by this DataConnector by looping over all data_asset_names in _data_references_cache
def get_data_reference_list_count(self) -> int: """ Returns the list of data_references known by this DataConnector by looping over all data_asset_names in _data_references_cache Returns: number of data_references known by this DataConnector """ return len(self._data_references_cache)
[ "def", "get_data_reference_list_count", "(", "self", ")", "->", "int", ":", "return", "len", "(", "self", ".", "_data_references_cache", ")" ]
[ 71, 4 ]
[ 79, 47 ]
python
en
['en', 'error', 'th']
False
InferredAssetFilePathDataConnector.get_unmatched_data_references
(self)
Returns the list of data_references unmatched by configuration by looping through items in _data_references_cache and returning data_references that do not have an associated data_asset. Returns: list of data_references that are not matched by configuration.
Returns the list of data_references unmatched by configuration by looping through items in _data_references_cache and returning data_references that do not have an associated data_asset.
def get_unmatched_data_references(self) -> List[str]: """ Returns the list of data_references unmatched by configuration by looping through items in _data_references_cache and returning data_references that do not have an associated data_asset. Returns: list of data_references that are not matched by configuration. """ return [k for k, v in self._data_references_cache.items() if v is None]
[ "def", "get_unmatched_data_references", "(", "self", ")", "->", "List", "[", "str", "]", ":", "return", "[", "k", "for", "k", ",", "v", "in", "self", ".", "_data_references_cache", ".", "items", "(", ")", "if", "v", "is", "None", "]" ]
[ 81, 4 ]
[ 89, 79 ]
python
en
['en', 'error', 'th']
False
InferredAssetFilePathDataConnector.get_available_data_asset_names
(self)
Return the list of asset names known by this DataConnector Returns: A list of available names
Return the list of asset names known by this DataConnector
def get_available_data_asset_names(self) -> List[str]: """ Return the list of asset names known by this DataConnector Returns: A list of available names """ if len(self._data_references_cache) == 0: self._refresh_data_references_cache() # This will fetch ALL batch_definitions in the cache batch_definition_list: List[ BatchDefinition ] = self._get_batch_definition_list_from_batch_request( batch_request=BatchRequestBase( datasource_name=self.datasource_name, data_connector_name=self.name ) ) data_asset_names: List[str] = [ batch_definition.data_asset_name for batch_definition in batch_definition_list ] return list(set(data_asset_names))
[ "def", "get_available_data_asset_names", "(", "self", ")", "->", "List", "[", "str", "]", ":", "if", "len", "(", "self", ".", "_data_references_cache", ")", "==", "0", ":", "self", ".", "_refresh_data_references_cache", "(", ")", "# This will fetch ALL batch_definitions in the cache", "batch_definition_list", ":", "List", "[", "BatchDefinition", "]", "=", "self", ".", "_get_batch_definition_list_from_batch_request", "(", "batch_request", "=", "BatchRequestBase", "(", "datasource_name", "=", "self", ".", "datasource_name", ",", "data_connector_name", "=", "self", ".", "name", ")", ")", "data_asset_names", ":", "List", "[", "str", "]", "=", "[", "batch_definition", ".", "data_asset_name", "for", "batch_definition", "in", "batch_definition_list", "]", "return", "list", "(", "set", "(", "data_asset_names", ")", ")" ]
[ 91, 4 ]
[ 115, 42 ]
python
en
['en', 'error', 'th']
False
InferredAssetFilePathDataConnector.build_batch_spec
(self, batch_definition: BatchDefinition)
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. Args: batch_definition (BatchDefinition): to be used to build batch_spec Returns: BatchSpec built from batch_definition
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
def build_batch_spec(self, batch_definition: BatchDefinition) -> PathBatchSpec: """ Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. Args: batch_definition (BatchDefinition): to be used to build batch_spec Returns: BatchSpec built from batch_definition """ batch_spec: BatchSpec = super().build_batch_spec( batch_definition=batch_definition ) return PathBatchSpec(batch_spec)
[ "def", "build_batch_spec", "(", "self", ",", "batch_definition", ":", "BatchDefinition", ")", "->", "PathBatchSpec", ":", "batch_spec", ":", "BatchSpec", "=", "super", "(", ")", ".", "build_batch_spec", "(", "batch_definition", "=", "batch_definition", ")", "return", "PathBatchSpec", "(", "batch_spec", ")" ]
[ 117, 4 ]
[ 131, 40 ]
python
en
['en', 'error', 'th']
False
test_alice_columnar_table_single_batch_batches_are_accessible
( monkeypatch, alice_columnar_table_single_batch_context, alice_columnar_table_single_batch, )
What does this test and why? Batches created in the multibatch_generic_csv_generator fixture should be available using the multibatch_generic_csv_generator_context This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture.
What does this test and why? Batches created in the multibatch_generic_csv_generator fixture should be available using the multibatch_generic_csv_generator_context This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture.
def test_alice_columnar_table_single_batch_batches_are_accessible( monkeypatch, alice_columnar_table_single_batch_context, alice_columnar_table_single_batch, ): """ What does this test and why? Batches created in the multibatch_generic_csv_generator fixture should be available using the multibatch_generic_csv_generator_context This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture. """ context: DataContext = alice_columnar_table_single_batch_context datasource_name: str = "alice_columnar_table_single_batch_datasource" data_connector_name: str = "alice_columnar_table_single_batch_data_connector" data_asset_name: str = "alice_columnar_table_single_batch_data_asset" datasource: Datasource = cast(Datasource, context.datasources[datasource_name]) data_connector: DataConnector = datasource.data_connectors[data_connector_name] file_list: List[str] = [ alice_columnar_table_single_batch["sample_data_relative_path"] ] assert ( data_connector._get_data_reference_list_from_cache_by_data_asset_name( data_asset_name=data_asset_name ) == file_list ) batch_request_1: BatchRequest = BatchRequest( datasource_name=datasource_name, data_connector_name=data_connector_name, data_asset_name=data_asset_name, data_connector_query={ "index": -1, }, ) # Should give most recent batch validator_1: Validator = context.get_validator( batch_request=batch_request_1, create_expectation_suite_with_name="my_expectation_suite_name_1", ) metric_max: int = validator_1.get_metric( MetricConfiguration("column.max", metric_domain_kwargs={"column": "event_type"}) ) assert metric_max == 73
[ "def", "test_alice_columnar_table_single_batch_batches_are_accessible", "(", "monkeypatch", ",", "alice_columnar_table_single_batch_context", ",", "alice_columnar_table_single_batch", ",", ")", ":", "context", ":", "DataContext", "=", "alice_columnar_table_single_batch_context", "datasource_name", ":", "str", "=", "\"alice_columnar_table_single_batch_datasource\"", "data_connector_name", ":", "str", "=", "\"alice_columnar_table_single_batch_data_connector\"", "data_asset_name", ":", "str", "=", "\"alice_columnar_table_single_batch_data_asset\"", "datasource", ":", "Datasource", "=", "cast", "(", "Datasource", ",", "context", ".", "datasources", "[", "datasource_name", "]", ")", "data_connector", ":", "DataConnector", "=", "datasource", ".", "data_connectors", "[", "data_connector_name", "]", "file_list", ":", "List", "[", "str", "]", "=", "[", "alice_columnar_table_single_batch", "[", "\"sample_data_relative_path\"", "]", "]", "assert", "(", "data_connector", ".", "_get_data_reference_list_from_cache_by_data_asset_name", "(", "data_asset_name", "=", "data_asset_name", ")", "==", "file_list", ")", "batch_request_1", ":", "BatchRequest", "=", "BatchRequest", "(", "datasource_name", "=", "datasource_name", ",", "data_connector_name", "=", "data_connector_name", ",", "data_asset_name", "=", "data_asset_name", ",", "data_connector_query", "=", "{", "\"index\"", ":", "-", "1", ",", "}", ",", ")", "# Should give most recent batch", "validator_1", ":", "Validator", "=", "context", ".", "get_validator", "(", "batch_request", "=", "batch_request_1", ",", "create_expectation_suite_with_name", "=", "\"my_expectation_suite_name_1\"", ",", ")", "metric_max", ":", "int", "=", "validator_1", ".", "get_metric", "(", "MetricConfiguration", "(", "\"column.max\"", ",", "metric_domain_kwargs", "=", "{", "\"column\"", ":", "\"event_type\"", "}", ")", ")", "assert", "metric_max", "==", "73" ]
[ 21, 0 ]
[ 69, 27 ]
python
en
['en', 'error', 'th']
False
test_bobby_columnar_table_multi_batch_batches_are_accessible
( monkeypatch, bobby_columnar_table_multi_batch_deterministic_data_context, bobby_columnar_table_multi_batch, )
What does this test and why? Batches created in the multibatch_generic_csv_generator fixture should be available using the multibatch_generic_csv_generator_context This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture.
What does this test and why? Batches created in the multibatch_generic_csv_generator fixture should be available using the multibatch_generic_csv_generator_context This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture.
def test_bobby_columnar_table_multi_batch_batches_are_accessible( monkeypatch, bobby_columnar_table_multi_batch_deterministic_data_context, bobby_columnar_table_multi_batch, ): """ What does this test and why? Batches created in the multibatch_generic_csv_generator fixture should be available using the multibatch_generic_csv_generator_context This test most likely duplicates tests elsewhere, but it is more of a test of the configurable fixture. """ context: DataContext = bobby_columnar_table_multi_batch_deterministic_data_context datasource_name: str = "taxi_pandas" data_connector_name: str = "monthly" data_asset_name: str = "my_reports" datasource: Datasource = cast(Datasource, context.datasources[datasource_name]) data_connector: DataConnector = datasource.data_connectors[data_connector_name] file_list: List[str] = [ "yellow_trip_data_sample_2019-01.csv", "yellow_trip_data_sample_2019-02.csv", "yellow_trip_data_sample_2019-03.csv", ] assert ( data_connector._get_data_reference_list_from_cache_by_data_asset_name( data_asset_name=data_asset_name ) == file_list ) batch_request_latest: BatchRequest = BatchRequest( datasource_name=datasource_name, data_connector_name=data_connector_name, data_asset_name=data_asset_name, data_connector_query={ "index": -1, }, ) validator_latest: Validator = context.get_validator( batch_request=batch_request_latest, create_expectation_suite_with_name="my_expectation_suite_name_1", ) metric_configuration_arguments: Dict[str, Any] = { "metric_name": "table.row_count", "metric_domain_kwargs": { "batch_id": validator_latest.active_batch_id, }, "metric_value_kwargs": None, "metric_dependencies": None, } metric_value: int = validator_latest.get_metric( metric=MetricConfiguration(**metric_configuration_arguments) ) assert metric_value == 9000 # noinspection PyUnresolvedReferences pickup_datetime: datetime.datetime = pd.to_datetime( validator_latest.head(n_rows=1)["pickup_datetime"][0] ).to_pydatetime() month: int = pickup_datetime.month assert month == 3
[ "def", "test_bobby_columnar_table_multi_batch_batches_are_accessible", "(", "monkeypatch", ",", "bobby_columnar_table_multi_batch_deterministic_data_context", ",", "bobby_columnar_table_multi_batch", ",", ")", ":", "context", ":", "DataContext", "=", "bobby_columnar_table_multi_batch_deterministic_data_context", "datasource_name", ":", "str", "=", "\"taxi_pandas\"", "data_connector_name", ":", "str", "=", "\"monthly\"", "data_asset_name", ":", "str", "=", "\"my_reports\"", "datasource", ":", "Datasource", "=", "cast", "(", "Datasource", ",", "context", ".", "datasources", "[", "datasource_name", "]", ")", "data_connector", ":", "DataConnector", "=", "datasource", ".", "data_connectors", "[", "data_connector_name", "]", "file_list", ":", "List", "[", "str", "]", "=", "[", "\"yellow_trip_data_sample_2019-01.csv\"", ",", "\"yellow_trip_data_sample_2019-02.csv\"", ",", "\"yellow_trip_data_sample_2019-03.csv\"", ",", "]", "assert", "(", "data_connector", ".", "_get_data_reference_list_from_cache_by_data_asset_name", "(", "data_asset_name", "=", "data_asset_name", ")", "==", "file_list", ")", "batch_request_latest", ":", "BatchRequest", "=", "BatchRequest", "(", "datasource_name", "=", "datasource_name", ",", "data_connector_name", "=", "data_connector_name", ",", "data_asset_name", "=", "data_asset_name", ",", "data_connector_query", "=", "{", "\"index\"", ":", "-", "1", ",", "}", ",", ")", "validator_latest", ":", "Validator", "=", "context", ".", "get_validator", "(", "batch_request", "=", "batch_request_latest", ",", "create_expectation_suite_with_name", "=", "\"my_expectation_suite_name_1\"", ",", ")", "metric_configuration_arguments", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "\"metric_name\"", ":", "\"table.row_count\"", ",", "\"metric_domain_kwargs\"", ":", "{", "\"batch_id\"", ":", "validator_latest", ".", "active_batch_id", ",", "}", ",", "\"metric_value_kwargs\"", ":", "None", ",", "\"metric_dependencies\"", ":", "None", ",", "}", "metric_value", ":", "int", "=", "validator_latest", ".", "get_metric", "(", "metric", "=", "MetricConfiguration", "(", "*", "*", "metric_configuration_arguments", ")", ")", "assert", "metric_value", "==", "9000", "# noinspection PyUnresolvedReferences", "pickup_datetime", ":", "datetime", ".", "datetime", "=", "pd", ".", "to_datetime", "(", "validator_latest", ".", "head", "(", "n_rows", "=", "1", ")", "[", "\"pickup_datetime\"", "]", "[", "0", "]", ")", ".", "to_pydatetime", "(", ")", "month", ":", "int", "=", "pickup_datetime", ".", "month", "assert", "month", "==", "3" ]
[ 104, 0 ]
[ 169, 21 ]
python
en
['en', 'error', 'th']
False
column_function_partial
( engine: Type[ExecutionEngine], partial_fn_type: str = None, **kwargs )
Provides engine-specific support for authing a metric_fn with a simplified signature. A metric function that is decorated as a column_function_partial will be called with the engine-specific column type and any value_kwargs associated with the Metric for which the provider function is being declared. Args: engine: **kwargs: Returns: An annotated metric_function which will be called with a simplified signature.
Provides engine-specific support for authing a metric_fn with a simplified signature.
def column_function_partial( engine: Type[ExecutionEngine], partial_fn_type: str = None, **kwargs ): """Provides engine-specific support for authing a metric_fn with a simplified signature. A metric function that is decorated as a column_function_partial will be called with the engine-specific column type and any value_kwargs associated with the Metric for which the provider function is being declared. Args: engine: **kwargs: Returns: An annotated metric_function which will be called with a simplified signature. """ domain_type = MetricDomainTypes.COLUMN if issubclass(engine, PandasExecutionEngine): if partial_fn_type is None: partial_fn_type = MetricPartialFunctionTypes.MAP_SERIES partial_fn_type = MetricPartialFunctionTypes(partial_fn_type) if partial_fn_type != MetricPartialFunctionTypes.MAP_SERIES: raise ValueError( "PandasExecutionEngine only supports map_series for column_function_partial partial_fn_type" ) def wrapper(metric_fn: Callable): @metric_partial( engine=engine, partial_fn_type=partial_fn_type, domain_type=domain_type, **kwargs, ) @wraps(metric_fn) def inner_func( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) ( df, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=domain_type ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) if filter_column_isnull: df = df[df[column_name].notnull()] values = metric_fn( cls, df[column_name], **metric_value_kwargs, _metrics=metrics, ) return values, compute_domain_kwargs, accessor_domain_kwargs return inner_func return wrapper elif issubclass(engine, SqlAlchemyExecutionEngine): if partial_fn_type is None: partial_fn_type = MetricPartialFunctionTypes.MAP_FN partial_fn_type = MetricPartialFunctionTypes(partial_fn_type) if partial_fn_type not in [MetricPartialFunctionTypes.MAP_FN]: raise ValueError( "SqlAlchemyExecutionEngine only supports map_fn for column_function_partial partial_fn_type" ) def wrapper(metric_fn: Callable): @metric_partial( engine=engine, partial_fn_type=partial_fn_type, domain_type=domain_type, **kwargs, ) @wraps(metric_fn) def inner_func( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) if filter_column_isnull: compute_domain_kwargs = execution_engine.add_column_row_condition( metric_domain_kwargs ) else: # We do not copy here because if compute domain is different, it will be copied by get_compute_domain compute_domain_kwargs = metric_domain_kwargs ( selectable, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=compute_domain_kwargs, domain_type=domain_type ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) dialect = execution_engine.dialect_module column_function = metric_fn( cls, sa.column(column_name), **metric_value_kwargs, _dialect=dialect, _table=selectable, _metrics=metrics, ) return column_function, compute_domain_kwargs, accessor_domain_kwargs return inner_func return wrapper elif issubclass(engine, SparkDFExecutionEngine): if partial_fn_type is None: partial_fn_type = MetricPartialFunctionTypes.MAP_FN partial_fn_type = MetricPartialFunctionTypes(partial_fn_type) if partial_fn_type not in [ MetricPartialFunctionTypes.MAP_FN, MetricPartialFunctionTypes.WINDOW_FN, ]: raise ValueError( "SparkDFExecutionEngine only supports map_fn and window_fn for column_function_partial partial_fn_type" ) def wrapper(metric_fn: Callable): @metric_partial( engine=engine, partial_fn_type=partial_fn_type, domain_type=domain_type, **kwargs, ) @wraps(metric_fn) def inner_func( cls, execution_engine: SparkDFExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) if filter_column_isnull: compute_domain_kwargs = execution_engine.add_column_row_condition( metric_domain_kwargs ) else: # We do not copy here because if compute domain is different, it will be copied by get_compute_domain compute_domain_kwargs = metric_domain_kwargs ( data, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=compute_domain_kwargs, domain_type=domain_type ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) column = data[column_name] column_function = metric_fn( cls, column=column, **metric_value_kwargs, _metrics=metrics, _compute_domain_kwargs=compute_domain_kwargs, ) return column_function, compute_domain_kwargs, accessor_domain_kwargs return inner_func return wrapper else: raise ValueError("Unsupported engine for column_function_partial")
[ "def", "column_function_partial", "(", "engine", ":", "Type", "[", "ExecutionEngine", "]", ",", "partial_fn_type", ":", "str", "=", "None", ",", "*", "*", "kwargs", ")", ":", "domain_type", "=", "MetricDomainTypes", ".", "COLUMN", "if", "issubclass", "(", "engine", ",", "PandasExecutionEngine", ")", ":", "if", "partial_fn_type", "is", "None", ":", "partial_fn_type", "=", "MetricPartialFunctionTypes", ".", "MAP_SERIES", "partial_fn_type", "=", "MetricPartialFunctionTypes", "(", "partial_fn_type", ")", "if", "partial_fn_type", "!=", "MetricPartialFunctionTypes", ".", "MAP_SERIES", ":", "raise", "ValueError", "(", "\"PandasExecutionEngine only supports map_series for column_function_partial partial_fn_type\"", ")", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_partial", "(", "engine", "=", "engine", ",", "partial_fn_type", "=", "partial_fn_type", ",", "domain_type", "=", "domain_type", ",", "*", "*", "kwargs", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "PandasExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "(", "df", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "metric_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "if", "filter_column_isnull", ":", "df", "=", "df", "[", "df", "[", "column_name", "]", ".", "notnull", "(", ")", "]", "values", "=", "metric_fn", "(", "cls", ",", "df", "[", "column_name", "]", ",", "*", "*", "metric_value_kwargs", ",", "_metrics", "=", "metrics", ",", ")", "return", "values", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "return", "inner_func", "return", "wrapper", "elif", "issubclass", "(", "engine", ",", "SqlAlchemyExecutionEngine", ")", ":", "if", "partial_fn_type", "is", "None", ":", "partial_fn_type", "=", "MetricPartialFunctionTypes", ".", "MAP_FN", "partial_fn_type", "=", "MetricPartialFunctionTypes", "(", "partial_fn_type", ")", "if", "partial_fn_type", "not", "in", "[", "MetricPartialFunctionTypes", ".", "MAP_FN", "]", ":", "raise", "ValueError", "(", "\"SqlAlchemyExecutionEngine only supports map_fn for column_function_partial partial_fn_type\"", ")", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_partial", "(", "engine", "=", "engine", ",", "partial_fn_type", "=", "partial_fn_type", ",", "domain_type", "=", "domain_type", ",", "*", "*", "kwargs", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "if", "filter_column_isnull", ":", "compute_domain_kwargs", "=", "execution_engine", ".", "add_column_row_condition", "(", "metric_domain_kwargs", ")", "else", ":", "# We do not copy here because if compute domain is different, it will be copied by get_compute_domain", "compute_domain_kwargs", "=", "metric_domain_kwargs", "(", "selectable", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "compute_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "dialect", "=", "execution_engine", ".", "dialect_module", "column_function", "=", "metric_fn", "(", "cls", ",", "sa", ".", "column", "(", "column_name", ")", ",", "*", "*", "metric_value_kwargs", ",", "_dialect", "=", "dialect", ",", "_table", "=", "selectable", ",", "_metrics", "=", "metrics", ",", ")", "return", "column_function", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "return", "inner_func", "return", "wrapper", "elif", "issubclass", "(", "engine", ",", "SparkDFExecutionEngine", ")", ":", "if", "partial_fn_type", "is", "None", ":", "partial_fn_type", "=", "MetricPartialFunctionTypes", ".", "MAP_FN", "partial_fn_type", "=", "MetricPartialFunctionTypes", "(", "partial_fn_type", ")", "if", "partial_fn_type", "not", "in", "[", "MetricPartialFunctionTypes", ".", "MAP_FN", ",", "MetricPartialFunctionTypes", ".", "WINDOW_FN", ",", "]", ":", "raise", "ValueError", "(", "\"SparkDFExecutionEngine only supports map_fn and window_fn for column_function_partial partial_fn_type\"", ")", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_partial", "(", "engine", "=", "engine", ",", "partial_fn_type", "=", "partial_fn_type", ",", "domain_type", "=", "domain_type", ",", "*", "*", "kwargs", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "SparkDFExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "if", "filter_column_isnull", ":", "compute_domain_kwargs", "=", "execution_engine", ".", "add_column_row_condition", "(", "metric_domain_kwargs", ")", "else", ":", "# We do not copy here because if compute domain is different, it will be copied by get_compute_domain", "compute_domain_kwargs", "=", "metric_domain_kwargs", "(", "data", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "compute_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "column", "=", "data", "[", "column_name", "]", "column_function", "=", "metric_fn", "(", "cls", ",", "column", "=", "column", ",", "*", "*", "metric_value_kwargs", ",", "_metrics", "=", "metrics", ",", "_compute_domain_kwargs", "=", "compute_domain_kwargs", ",", ")", "return", "column_function", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "return", "inner_func", "return", "wrapper", "else", ":", "raise", "ValueError", "(", "\"Unsupported engine for column_function_partial\"", ")" ]
[ 38, 0 ]
[ 249, 74 ]
python
en
['en', 'en', 'en']
True
column_condition_partial
( engine: Type[ExecutionEngine], partial_fn_type: Optional[Union[str, MetricPartialFunctionTypes]] = None, **kwargs, )
Provides engine-specific support for authing a metric_fn with a simplified signature. A column_condition_partial must provide a map function that evalues to a boolean value; it will be used to provide supplemental metrics, such as the unexpected_value count, unexpected_values, and unexpected_rows. A metric function that is decorated as a column_condition_partial will be called with the engine-specific column type and any value_kwargs associated with the Metric for which the provider function is being declared. Args: engine: **kwargs: Returns: An annotated metric_function which will be called with a simplified signature.
Provides engine-specific support for authing a metric_fn with a simplified signature. A column_condition_partial must provide a map function that evalues to a boolean value; it will be used to provide supplemental metrics, such as the unexpected_value count, unexpected_values, and unexpected_rows.
def column_condition_partial( engine: Type[ExecutionEngine], partial_fn_type: Optional[Union[str, MetricPartialFunctionTypes]] = None, **kwargs, ): """Provides engine-specific support for authing a metric_fn with a simplified signature. A column_condition_partial must provide a map function that evalues to a boolean value; it will be used to provide supplemental metrics, such as the unexpected_value count, unexpected_values, and unexpected_rows. A metric function that is decorated as a column_condition_partial will be called with the engine-specific column type and any value_kwargs associated with the Metric for which the provider function is being declared. Args: engine: **kwargs: Returns: An annotated metric_function which will be called with a simplified signature. """ domain_type = MetricDomainTypes.COLUMN if issubclass(engine, PandasExecutionEngine): if partial_fn_type is None: partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_SERIES partial_fn_type = MetricPartialFunctionTypes(partial_fn_type) if partial_fn_type not in [MetricPartialFunctionTypes.MAP_CONDITION_SERIES]: raise ValueError( "PandasExecutionEngine only supports map_condition_series for column_condition_partial partial_fn_type" ) def wrapper(metric_fn: Callable): @metric_partial( engine=engine, partial_fn_type=partial_fn_type, domain_type=domain_type, **kwargs, ) @wraps(metric_fn) def inner_func( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", True) ) ( df, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=domain_type ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) if filter_column_isnull: df = df[df[column_name].notnull()] meets_expectation_series = metric_fn( cls, df[column_name], **metric_value_kwargs, _metrics=metrics, ) return ( ~meets_expectation_series, compute_domain_kwargs, accessor_domain_kwargs, ) return inner_func return wrapper elif issubclass(engine, SqlAlchemyExecutionEngine): if partial_fn_type is None: partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN partial_fn_type = MetricPartialFunctionTypes(partial_fn_type) if partial_fn_type not in [ MetricPartialFunctionTypes.MAP_CONDITION_FN, MetricPartialFunctionTypes.WINDOW_CONDITION_FN, ]: raise ValueError( "SqlAlchemyExecutionEngine only supports map_condition_fn for column_condition_partial partial_fn_type" ) def wrapper(metric_fn: Callable): @metric_partial( engine=engine, partial_fn_type=partial_fn_type, domain_type=domain_type, **kwargs, ) @wraps(metric_fn) def inner_func( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", True) ) ( selectable, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( metric_domain_kwargs, domain_type=domain_type ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) sqlalchemy_engine: sa.engine.Engine = execution_engine.engine dialect = execution_engine.dialect_module expected_condition = metric_fn( cls, sa.column(column_name), **metric_value_kwargs, _dialect=dialect, _table=selectable, _sqlalchemy_engine=sqlalchemy_engine, _metrics=metrics, ) if filter_column_isnull: # If we "filter" (ignore) nulls then we allow null as part of our new expected condition unexpected_condition = sa.and_( sa.not_(sa.column(column_name).is_(None)), sa.not_(expected_condition), ) else: unexpected_condition = sa.not_(expected_condition) return ( unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs, ) return inner_func return wrapper elif issubclass(engine, SparkDFExecutionEngine): if partial_fn_type is None: partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN partial_fn_type = MetricPartialFunctionTypes(partial_fn_type) if partial_fn_type not in [ MetricPartialFunctionTypes.MAP_CONDITION_FN, MetricPartialFunctionTypes.WINDOW_CONDITION_FN, ]: raise ValueError( "SparkDFExecutionEngine only supports map_condition_fn and window_condition_fn for column_condition_partial partial_fn_type" ) def wrapper(metric_fn: Callable): @metric_partial( engine=engine, partial_fn_type=partial_fn_type, domain_type=domain_type, **kwargs, ) @wraps(metric_fn) def inner_func( cls, execution_engine: SparkDFExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", True) ) ( data, compute_domain_kwargs, accessor_domain_kwargs, ) = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=domain_type ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) column = data[column_name] expected_condition = metric_fn( cls, column, **metric_value_kwargs, _table=data, _metrics=metrics, _compute_domain_kwargs=compute_domain_kwargs, _accessor_domain_kwargs=accessor_domain_kwargs, ) if partial_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN: if filter_column_isnull: compute_domain_kwargs = ( execution_engine.add_column_row_condition( compute_domain_kwargs, column_name=column_name ) ) unexpected_condition = ~expected_condition else: if filter_column_isnull: unexpected_condition = column.isNotNull() & ~expected_condition else: unexpected_condition = ~expected_condition return ( unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs, ) return inner_func return wrapper else: raise ValueError("Unsupported engine for column_condition_partial")
[ "def", "column_condition_partial", "(", "engine", ":", "Type", "[", "ExecutionEngine", "]", ",", "partial_fn_type", ":", "Optional", "[", "Union", "[", "str", ",", "MetricPartialFunctionTypes", "]", "]", "=", "None", ",", "*", "*", "kwargs", ",", ")", ":", "domain_type", "=", "MetricDomainTypes", ".", "COLUMN", "if", "issubclass", "(", "engine", ",", "PandasExecutionEngine", ")", ":", "if", "partial_fn_type", "is", "None", ":", "partial_fn_type", "=", "MetricPartialFunctionTypes", ".", "MAP_CONDITION_SERIES", "partial_fn_type", "=", "MetricPartialFunctionTypes", "(", "partial_fn_type", ")", "if", "partial_fn_type", "not", "in", "[", "MetricPartialFunctionTypes", ".", "MAP_CONDITION_SERIES", "]", ":", "raise", "ValueError", "(", "\"PandasExecutionEngine only supports map_condition_series for column_condition_partial partial_fn_type\"", ")", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_partial", "(", "engine", "=", "engine", ",", "partial_fn_type", "=", "partial_fn_type", ",", "domain_type", "=", "domain_type", ",", "*", "*", "kwargs", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "PandasExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "True", ")", ")", "(", "df", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "metric_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "if", "filter_column_isnull", ":", "df", "=", "df", "[", "df", "[", "column_name", "]", ".", "notnull", "(", ")", "]", "meets_expectation_series", "=", "metric_fn", "(", "cls", ",", "df", "[", "column_name", "]", ",", "*", "*", "metric_value_kwargs", ",", "_metrics", "=", "metrics", ",", ")", "return", "(", "~", "meets_expectation_series", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "return", "inner_func", "return", "wrapper", "elif", "issubclass", "(", "engine", ",", "SqlAlchemyExecutionEngine", ")", ":", "if", "partial_fn_type", "is", "None", ":", "partial_fn_type", "=", "MetricPartialFunctionTypes", ".", "MAP_CONDITION_FN", "partial_fn_type", "=", "MetricPartialFunctionTypes", "(", "partial_fn_type", ")", "if", "partial_fn_type", "not", "in", "[", "MetricPartialFunctionTypes", ".", "MAP_CONDITION_FN", ",", "MetricPartialFunctionTypes", ".", "WINDOW_CONDITION_FN", ",", "]", ":", "raise", "ValueError", "(", "\"SqlAlchemyExecutionEngine only supports map_condition_fn for column_condition_partial partial_fn_type\"", ")", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_partial", "(", "engine", "=", "engine", ",", "partial_fn_type", "=", "partial_fn_type", ",", "domain_type", "=", "domain_type", ",", "*", "*", "kwargs", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "True", ")", ")", "(", "selectable", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "metric_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "sqlalchemy_engine", ":", "sa", ".", "engine", ".", "Engine", "=", "execution_engine", ".", "engine", "dialect", "=", "execution_engine", ".", "dialect_module", "expected_condition", "=", "metric_fn", "(", "cls", ",", "sa", ".", "column", "(", "column_name", ")", ",", "*", "*", "metric_value_kwargs", ",", "_dialect", "=", "dialect", ",", "_table", "=", "selectable", ",", "_sqlalchemy_engine", "=", "sqlalchemy_engine", ",", "_metrics", "=", "metrics", ",", ")", "if", "filter_column_isnull", ":", "# If we \"filter\" (ignore) nulls then we allow null as part of our new expected condition", "unexpected_condition", "=", "sa", ".", "and_", "(", "sa", ".", "not_", "(", "sa", ".", "column", "(", "column_name", ")", ".", "is_", "(", "None", ")", ")", ",", "sa", ".", "not_", "(", "expected_condition", ")", ",", ")", "else", ":", "unexpected_condition", "=", "sa", ".", "not_", "(", "expected_condition", ")", "return", "(", "unexpected_condition", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "return", "inner_func", "return", "wrapper", "elif", "issubclass", "(", "engine", ",", "SparkDFExecutionEngine", ")", ":", "if", "partial_fn_type", "is", "None", ":", "partial_fn_type", "=", "MetricPartialFunctionTypes", ".", "MAP_CONDITION_FN", "partial_fn_type", "=", "MetricPartialFunctionTypes", "(", "partial_fn_type", ")", "if", "partial_fn_type", "not", "in", "[", "MetricPartialFunctionTypes", ".", "MAP_CONDITION_FN", ",", "MetricPartialFunctionTypes", ".", "WINDOW_CONDITION_FN", ",", "]", ":", "raise", "ValueError", "(", "\"SparkDFExecutionEngine only supports map_condition_fn and window_condition_fn for column_condition_partial partial_fn_type\"", ")", "def", "wrapper", "(", "metric_fn", ":", "Callable", ")", ":", "@", "metric_partial", "(", "engine", "=", "engine", ",", "partial_fn_type", "=", "partial_fn_type", ",", "domain_type", "=", "domain_type", ",", "*", "*", "kwargs", ",", ")", "@", "wraps", "(", "metric_fn", ")", "def", "inner_func", "(", "cls", ",", "execution_engine", ":", "SparkDFExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "True", ")", ")", "(", "data", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "metric_domain_kwargs", ",", "domain_type", "=", "domain_type", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "column", "=", "data", "[", "column_name", "]", "expected_condition", "=", "metric_fn", "(", "cls", ",", "column", ",", "*", "*", "metric_value_kwargs", ",", "_table", "=", "data", ",", "_metrics", "=", "metrics", ",", "_compute_domain_kwargs", "=", "compute_domain_kwargs", ",", "_accessor_domain_kwargs", "=", "accessor_domain_kwargs", ",", ")", "if", "partial_fn_type", "==", "MetricPartialFunctionTypes", ".", "WINDOW_CONDITION_FN", ":", "if", "filter_column_isnull", ":", "compute_domain_kwargs", "=", "(", "execution_engine", ".", "add_column_row_condition", "(", "compute_domain_kwargs", ",", "column_name", "=", "column_name", ")", ")", "unexpected_condition", "=", "~", "expected_condition", "else", ":", "if", "filter_column_isnull", ":", "unexpected_condition", "=", "column", ".", "isNotNull", "(", ")", "&", "~", "expected_condition", "else", ":", "unexpected_condition", "=", "~", "expected_condition", "return", "(", "unexpected_condition", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "return", "inner_func", "return", "wrapper", "else", ":", "raise", "ValueError", "(", "\"Unsupported engine for column_condition_partial\"", ")" ]
[ 252, 0 ]
[ 495, 75 ]
python
en
['en', 'en', 'en']
True
_pandas_map_condition_unexpected_count
( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Returns unexpected count for MapExpectations
Returns unexpected count for MapExpectations
def _pandas_map_condition_unexpected_count( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """Returns unexpected count for MapExpectations""" return np.count_nonzero(metrics["unexpected_condition"][0])
[ "def", "_pandas_map_condition_unexpected_count", "(", "cls", ",", "execution_engine", ":", "PandasExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "return", "np", ".", "count_nonzero", "(", "metrics", "[", "\"unexpected_condition\"", "]", "[", "0", "]", ")" ]
[ 498, 0 ]
[ 507, 63 ]
python
en
['en', 'en', 'en']
True
_pandas_column_map_condition_values
( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Return values from the specified domain that match the map-style metric in the metrics dictionary.
Return values from the specified domain that match the map-style metric in the metrics dictionary.
def _pandas_column_map_condition_values( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """Return values from the specified domain that match the map-style metric in the metrics dictionary.""" ( boolean_mapped_unexpected_values, compute_domain_kwargs, accessor_domain_kwargs, ) = metrics["unexpected_condition"] df, _, _ = execution_engine.get_compute_domain( domain_kwargs=compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value, ) ### # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we # currently handle filter_column_isnull differently than other map_fn / map_condition # cases. ### filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) if "column" not in accessor_domain_kwargs: raise ValueError( "_pandas_column_map_condition_values requires a column in accessor_domain_kwargs" ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) if filter_column_isnull: df = df[df[column_name].notnull()] domain_values = df[column_name] domain_values = domain_values[boolean_mapped_unexpected_values == True] result_format = metric_value_kwargs["result_format"] if result_format["result_format"] == "COMPLETE": return list(domain_values) else: return list(domain_values[: result_format["partial_unexpected_count"]])
[ "def", "_pandas_column_map_condition_values", "(", "cls", ",", "execution_engine", ":", "PandasExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "(", "boolean_mapped_unexpected_values", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "metrics", "[", "\"unexpected_condition\"", "]", "df", ",", "_", ",", "_", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "compute_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ",", ")", "###", "# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we", "# currently handle filter_column_isnull differently than other map_fn / map_condition", "# cases.", "###", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "if", "\"column\"", "not", "in", "accessor_domain_kwargs", ":", "raise", "ValueError", "(", "\"_pandas_column_map_condition_values requires a column in accessor_domain_kwargs\"", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "if", "filter_column_isnull", ":", "df", "=", "df", "[", "df", "[", "column_name", "]", ".", "notnull", "(", ")", "]", "domain_values", "=", "df", "[", "column_name", "]", "domain_values", "=", "domain_values", "[", "boolean_mapped_unexpected_values", "==", "True", "]", "result_format", "=", "metric_value_kwargs", "[", "\"result_format\"", "]", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "return", "list", "(", "domain_values", ")", "else", ":", "return", "list", "(", "domain_values", "[", ":", "result_format", "[", "\"partial_unexpected_count\"", "]", "]", ")" ]
[ 510, 0 ]
[ 562, 79 ]
python
en
['en', 'en', 'en']
True
_pandas_column_map_series_and_domain_values
( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Return values from the specified domain that match the map-style metric in the metrics dictionary.
Return values from the specified domain that match the map-style metric in the metrics dictionary.
def _pandas_column_map_series_and_domain_values( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """Return values from the specified domain that match the map-style metric in the metrics dictionary.""" ( boolean_mapped_unexpected_values, compute_domain_kwargs, accessor_domain_kwargs, ) = metrics["unexpected_condition"] ( map_series, compute_domain_kwargs_2, accessor_domain_kwargs_2, ) = metrics["metric_partial_fn"] assert ( compute_domain_kwargs == compute_domain_kwargs_2 ), "map_series and condition must have the same compute domain" assert ( accessor_domain_kwargs == accessor_domain_kwargs_2 ), "map_series and condition must have the same accessor kwargs" df, _, _ = execution_engine.get_compute_domain( domain_kwargs=compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value, ) ### # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we # currently handle filter_column_isnull differently than other map_fn / map_condition # cases. ### filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) if "column" not in accessor_domain_kwargs: raise ValueError( "_pandas_column_map_series_and_domain_values requires a column in accessor_domain_kwargs" ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) if filter_column_isnull: df = df[df[column_name].notnull()] domain_values = df[column_name] domain_values = domain_values[boolean_mapped_unexpected_values == True] map_series = map_series[boolean_mapped_unexpected_values == True] result_format = metric_value_kwargs["result_format"] if result_format["result_format"] == "COMPLETE": return ( list(domain_values), list(map_series), ) else: return ( list(domain_values[: result_format["partial_unexpected_count"]]), list(map_series[: result_format["partial_unexpected_count"]]), )
[ "def", "_pandas_column_map_series_and_domain_values", "(", "cls", ",", "execution_engine", ":", "PandasExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "(", "boolean_mapped_unexpected_values", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "metrics", "[", "\"unexpected_condition\"", "]", "(", "map_series", ",", "compute_domain_kwargs_2", ",", "accessor_domain_kwargs_2", ",", ")", "=", "metrics", "[", "\"metric_partial_fn\"", "]", "assert", "(", "compute_domain_kwargs", "==", "compute_domain_kwargs_2", ")", ",", "\"map_series and condition must have the same compute domain\"", "assert", "(", "accessor_domain_kwargs", "==", "accessor_domain_kwargs_2", ")", ",", "\"map_series and condition must have the same accessor kwargs\"", "df", ",", "_", ",", "_", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "compute_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ",", ")", "###", "# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we", "# currently handle filter_column_isnull differently than other map_fn / map_condition", "# cases.", "###", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "if", "\"column\"", "not", "in", "accessor_domain_kwargs", ":", "raise", "ValueError", "(", "\"_pandas_column_map_series_and_domain_values requires a column in accessor_domain_kwargs\"", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "if", "filter_column_isnull", ":", "df", "=", "df", "[", "df", "[", "column_name", "]", ".", "notnull", "(", ")", "]", "domain_values", "=", "df", "[", "column_name", "]", "domain_values", "=", "domain_values", "[", "boolean_mapped_unexpected_values", "==", "True", "]", "map_series", "=", "map_series", "[", "boolean_mapped_unexpected_values", "==", "True", "]", "result_format", "=", "metric_value_kwargs", "[", "\"result_format\"", "]", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "return", "(", "list", "(", "domain_values", ")", ",", "list", "(", "map_series", ")", ",", ")", "else", ":", "return", "(", "list", "(", "domain_values", "[", ":", "result_format", "[", "\"partial_unexpected_count\"", "]", "]", ")", ",", "list", "(", "map_series", "[", ":", "result_format", "[", "\"partial_unexpected_count\"", "]", "]", ")", ",", ")" ]
[ 565, 0 ]
[ 635, 9 ]
python
en
['en', 'en', 'en']
True
_pandas_column_map_condition_value_counts
( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Returns respective value counts for distinct column values
Returns respective value counts for distinct column values
def _pandas_column_map_condition_value_counts( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """Returns respective value counts for distinct column values""" ( boolean_mapped_unexpected_values, compute_domain_kwargs, accessor_domain_kwargs, ) = metrics.get("unexpected_condition") df, _, _ = execution_engine.get_compute_domain( domain_kwargs=compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value, ) ### # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we # currently handle filter_column_isnull differently than other map_fn / map_condition # cases. ### filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) column_name = accessor_domain_kwargs["column"] if "column" not in accessor_domain_kwargs: raise ValueError( "_pandas_column_map_condition_value_counts requires a column in accessor_domain_kwargs" ) if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) if filter_column_isnull: df = df[df[column_name].notnull()] domain_values = df[column_name] result_format = metric_value_kwargs["result_format"] value_counts = None try: value_counts = domain_values[boolean_mapped_unexpected_values].value_counts() except ValueError: try: value_counts = ( domain_values[boolean_mapped_unexpected_values] .apply(tuple) .value_counts() ) except ValueError: pass if not value_counts: raise ge_exceptions.MetricError("Unable to compute value counts") if result_format["result_format"] == "COMPLETE": return value_counts else: return value_counts[result_format["partial_unexpected_count"]]
[ "def", "_pandas_column_map_condition_value_counts", "(", "cls", ",", "execution_engine", ":", "PandasExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "(", "boolean_mapped_unexpected_values", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "metrics", ".", "get", "(", "\"unexpected_condition\"", ")", "df", ",", "_", ",", "_", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "compute_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ",", ")", "###", "# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we", "# currently handle filter_column_isnull differently than other map_fn / map_condition", "# cases.", "###", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "\"column\"", "not", "in", "accessor_domain_kwargs", ":", "raise", "ValueError", "(", "\"_pandas_column_map_condition_value_counts requires a column in accessor_domain_kwargs\"", ")", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "if", "filter_column_isnull", ":", "df", "=", "df", "[", "df", "[", "column_name", "]", ".", "notnull", "(", ")", "]", "domain_values", "=", "df", "[", "column_name", "]", "result_format", "=", "metric_value_kwargs", "[", "\"result_format\"", "]", "value_counts", "=", "None", "try", ":", "value_counts", "=", "domain_values", "[", "boolean_mapped_unexpected_values", "]", ".", "value_counts", "(", ")", "except", "ValueError", ":", "try", ":", "value_counts", "=", "(", "domain_values", "[", "boolean_mapped_unexpected_values", "]", ".", "apply", "(", "tuple", ")", ".", "value_counts", "(", ")", ")", "except", "ValueError", ":", "pass", "if", "not", "value_counts", ":", "raise", "ge_exceptions", ".", "MetricError", "(", "\"Unable to compute value counts\"", ")", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "return", "value_counts", "else", ":", "return", "value_counts", "[", "result_format", "[", "\"partial_unexpected_count\"", "]", "]" ]
[ 687, 0 ]
[ 753, 70 ]
python
da
['da', 'fr', 'en']
False
_pandas_map_condition_rows
( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Return values from the specified domain (ignoring the column constraint) that match the map-style metric in the metrics dictionary.
Return values from the specified domain (ignoring the column constraint) that match the map-style metric in the metrics dictionary.
def _pandas_map_condition_rows( cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """Return values from the specified domain (ignoring the column constraint) that match the map-style metric in the metrics dictionary.""" ( boolean_mapped_unexpected_values, compute_domain_kwargs, accessor_domain_kwargs, ) = metrics.get("unexpected_condition") df, _, _ = execution_engine.get_compute_domain( domain_kwargs=compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value, ) ### # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we # currently handle filter_column_isnull differently than other map_fn / map_condition # cases. ### filter_column_isnull = kwargs.get( "filter_column_isnull", getattr(cls, "filter_column_isnull", False) ) if "column" in accessor_domain_kwargs: column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) if filter_column_isnull: df = df[df[column_name].notnull()] result_format = metric_value_kwargs["result_format"] df = df[boolean_mapped_unexpected_values] if result_format["result_format"] == "COMPLETE": return df return df.iloc[: result_format["partial_unexpected_count"]]
[ "def", "_pandas_map_condition_rows", "(", "cls", ",", "execution_engine", ":", "PandasExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "(", "boolean_mapped_unexpected_values", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")", "=", "metrics", ".", "get", "(", "\"unexpected_condition\"", ")", "df", ",", "_", ",", "_", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "compute_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ",", ")", "###", "# NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we", "# currently handle filter_column_isnull differently than other map_fn / map_condition", "# cases.", "###", "filter_column_isnull", "=", "kwargs", ".", "get", "(", "\"filter_column_isnull\"", ",", "getattr", "(", "cls", ",", "\"filter_column_isnull\"", ",", "False", ")", ")", "if", "\"column\"", "in", "accessor_domain_kwargs", ":", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "if", "filter_column_isnull", ":", "df", "=", "df", "[", "df", "[", "column_name", "]", ".", "notnull", "(", ")", "]", "result_format", "=", "metric_value_kwargs", "[", "\"result_format\"", "]", "df", "=", "df", "[", "boolean_mapped_unexpected_values", "]", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "return", "df", "return", "df", ".", "iloc", "[", ":", "result_format", "[", "\"partial_unexpected_count\"", "]", "]" ]
[ 756, 0 ]
[ 803, 63 ]
python
en
['en', 'en', 'en']
True
_sqlalchemy_map_condition_unexpected_count_aggregate_fn
( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Returns unexpected count for MapExpectations
Returns unexpected count for MapExpectations
def _sqlalchemy_map_condition_unexpected_count_aggregate_fn( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """Returns unexpected count for MapExpectations""" unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get( "unexpected_condition" ) return ( sa.func.sum( sa.case( [(unexpected_condition, 1)], else_=0, ) ), compute_domain_kwargs, accessor_domain_kwargs, )
[ "def", "_sqlalchemy_map_condition_unexpected_count_aggregate_fn", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "unexpected_condition", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "=", "metrics", ".", "get", "(", "\"unexpected_condition\"", ")", "return", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "unexpected_condition", ",", "1", ")", "]", ",", "else_", "=", "0", ",", ")", ")", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", ")" ]
[ 806, 0 ]
[ 827, 5 ]
python
en
['en', 'en', 'en']
True
_sqlalchemy_map_condition_unexpected_count_value
( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Returns unexpected count for MapExpectations. This is a *value* metric, which is useful for when the unexpected_condition is a window function.
Returns unexpected count for MapExpectations. This is a *value* metric, which is useful for when the unexpected_condition is a window function.
def _sqlalchemy_map_condition_unexpected_count_value( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """Returns unexpected count for MapExpectations. This is a *value* metric, which is useful for when the unexpected_condition is a window function. """ unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get( "unexpected_condition" ) (selectable, _, _,) = execution_engine.get_compute_domain( compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value ) count_case_statement: List[sa.sql.elements.Label] = [ sa.case( [ ( unexpected_condition, 1, ) ], else_=0, ).label("condition") ] try: if execution_engine.engine.dialect.name.lower() == "mssql": temp_table_name: str = f"#ge_tmp_{str(uuid.uuid4())[:8]}" with execution_engine.engine.begin(): metadata: sa.MetaData = sa.MetaData(execution_engine.engine) temp_table_obj: sa.Table = sa.Table( temp_table_name, metadata, sa.Column( "condition", sa.Integer, primary_key=False, nullable=False ), ) temp_table_obj.create(execution_engine.engine, checkfirst=True) inner_case_query: sa.sql.dml.Insert = ( temp_table_obj.insert().from_select( count_case_statement, sa.select(count_case_statement).select_from(selectable), ) ) execution_engine.engine.execute(inner_case_query) selectable_count = temp_table_obj else: selectable_count = sa.select(count_case_statement).select_from(selectable) unexpected_count_query: sa.Select = ( sa.select( [ sa.func.sum(sa.column("condition")).label("unexpected_count"), ] ) .select_from(selectable_count) .alias("UnexpectedCountSubquery") ) unexpected_count = execution_engine.engine.execute( sa.select( [ unexpected_count_query.c.unexpected_count, ] ) ).scalar() except OperationalError as oe: exception_message: str = f"An SQL execution Exception occurred: {str(oe)}." raise ge_exceptions.ExecutionEngineError(message=exception_message) return convert_to_json_serializable(unexpected_count)
[ "def", "_sqlalchemy_map_condition_unexpected_count_value", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "unexpected_condition", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "=", "metrics", ".", "get", "(", "\"unexpected_condition\"", ")", "(", "selectable", ",", "_", ",", "_", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "compute_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ")", "count_case_statement", ":", "List", "[", "sa", ".", "sql", ".", "elements", ".", "Label", "]", "=", "[", "sa", ".", "case", "(", "[", "(", "unexpected_condition", ",", "1", ",", ")", "]", ",", "else_", "=", "0", ",", ")", ".", "label", "(", "\"condition\"", ")", "]", "try", ":", "if", "execution_engine", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"mssql\"", ":", "temp_table_name", ":", "str", "=", "f\"#ge_tmp_{str(uuid.uuid4())[:8]}\"", "with", "execution_engine", ".", "engine", ".", "begin", "(", ")", ":", "metadata", ":", "sa", ".", "MetaData", "=", "sa", ".", "MetaData", "(", "execution_engine", ".", "engine", ")", "temp_table_obj", ":", "sa", ".", "Table", "=", "sa", ".", "Table", "(", "temp_table_name", ",", "metadata", ",", "sa", ".", "Column", "(", "\"condition\"", ",", "sa", ".", "Integer", ",", "primary_key", "=", "False", ",", "nullable", "=", "False", ")", ",", ")", "temp_table_obj", ".", "create", "(", "execution_engine", ".", "engine", ",", "checkfirst", "=", "True", ")", "inner_case_query", ":", "sa", ".", "sql", ".", "dml", ".", "Insert", "=", "(", "temp_table_obj", ".", "insert", "(", ")", ".", "from_select", "(", "count_case_statement", ",", "sa", ".", "select", "(", "count_case_statement", ")", ".", "select_from", "(", "selectable", ")", ",", ")", ")", "execution_engine", ".", "engine", ".", "execute", "(", "inner_case_query", ")", "selectable_count", "=", "temp_table_obj", "else", ":", "selectable_count", "=", "sa", ".", "select", "(", "count_case_statement", ")", ".", "select_from", "(", "selectable", ")", "unexpected_count_query", ":", "sa", ".", "Select", "=", "(", "sa", ".", "select", "(", "[", "sa", ".", "func", ".", "sum", "(", "sa", ".", "column", "(", "\"condition\"", ")", ")", ".", "label", "(", "\"unexpected_count\"", ")", ",", "]", ")", ".", "select_from", "(", "selectable_count", ")", ".", "alias", "(", "\"UnexpectedCountSubquery\"", ")", ")", "unexpected_count", "=", "execution_engine", ".", "engine", ".", "execute", "(", "sa", ".", "select", "(", "[", "unexpected_count_query", ".", "c", ".", "unexpected_count", ",", "]", ")", ")", ".", "scalar", "(", ")", "except", "OperationalError", "as", "oe", ":", "exception_message", ":", "str", "=", "f\"An SQL execution Exception occurred: {str(oe)}.\"", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "exception_message", ")", "return", "convert_to_json_serializable", "(", "unexpected_count", ")" ]
[ 830, 0 ]
[ 908, 57 ]
python
en
['en', 'en', 'en']
True
_sqlalchemy_column_map_condition_values
( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Particularly for the purpose of finding unexpected values, returns all the metric values which do not meet an expected Expectation condition for ColumnMapExpectation Expectations.
Particularly for the purpose of finding unexpected values, returns all the metric values which do not meet an expected Expectation condition for ColumnMapExpectation Expectations.
def _sqlalchemy_column_map_condition_values( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """ Particularly for the purpose of finding unexpected values, returns all the metric values which do not meet an expected Expectation condition for ColumnMapExpectation Expectations. """ unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get( "unexpected_condition" ) (selectable, _, _,) = execution_engine.get_compute_domain( compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value ) if "column" not in accessor_domain_kwargs: raise ValueError( "_sqlalchemy_column_map_condition_values requires a column in accessor_domain_kwargs" ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) query = ( sa.select([sa.column(column_name).label("unexpected_values")]) .select_from(selectable) .where(unexpected_condition) ) result_format = metric_value_kwargs["result_format"] if result_format["result_format"] != "COMPLETE": query = query.limit(result_format["partial_unexpected_count"]) return [ val.unexpected_values for val in execution_engine.engine.execute(query).fetchall() ]
[ "def", "_sqlalchemy_column_map_condition_values", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "unexpected_condition", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "=", "metrics", ".", "get", "(", "\"unexpected_condition\"", ")", "(", "selectable", ",", "_", ",", "_", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "compute_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ")", "if", "\"column\"", "not", "in", "accessor_domain_kwargs", ":", "raise", "ValueError", "(", "\"_sqlalchemy_column_map_condition_values requires a column in accessor_domain_kwargs\"", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "query", "=", "(", "sa", ".", "select", "(", "[", "sa", ".", "column", "(", "column_name", ")", ".", "label", "(", "\"unexpected_values\"", ")", "]", ")", ".", "select_from", "(", "selectable", ")", ".", "where", "(", "unexpected_condition", ")", ")", "result_format", "=", "metric_value_kwargs", "[", "\"result_format\"", "]", "if", "result_format", "[", "\"result_format\"", "]", "!=", "\"COMPLETE\"", ":", "query", "=", "query", ".", "limit", "(", "result_format", "[", "\"partial_unexpected_count\"", "]", ")", "return", "[", "val", ".", "unexpected_values", "for", "val", "in", "execution_engine", ".", "engine", ".", "execute", "(", "query", ")", ".", "fetchall", "(", ")", "]" ]
[ 911, 0 ]
[ 955, 5 ]
python
en
['en', 'error', 'th']
False
_sqlalchemy_column_map_condition_value_counts
( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Returns value counts for all the metric values which do not meet an expected Expectation condition for instances of ColumnMapExpectation.
Returns value counts for all the metric values which do not meet an expected Expectation condition for instances of ColumnMapExpectation.
def _sqlalchemy_column_map_condition_value_counts( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """ Returns value counts for all the metric values which do not meet an expected Expectation condition for instances of ColumnMapExpectation. """ unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get( "unexpected_condition" ) (selectable, _, _,) = execution_engine.get_compute_domain( compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value ) if "column" not in accessor_domain_kwargs: raise ValueError( "_sqlalchemy_column_map_condition_value_counts requires a column in accessor_domain_kwargs" ) column_name = accessor_domain_kwargs["column"] if column_name not in metrics["table.columns"]: raise ge_exceptions.ExecutionEngineError( message=f'Error: The column "{column_name}" in BatchData does not exist.' ) column: sa.Column = sa.column(column_name) return execution_engine.engine.execute( sa.select([column, sa.func.count(column)]) .select_from(selectable) .where(unexpected_condition) .group_by(column) ).fetchall()
[ "def", "_sqlalchemy_column_map_condition_value_counts", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "unexpected_condition", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "=", "metrics", ".", "get", "(", "\"unexpected_condition\"", ")", "(", "selectable", ",", "_", ",", "_", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "compute_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ")", "if", "\"column\"", "not", "in", "accessor_domain_kwargs", ":", "raise", "ValueError", "(", "\"_sqlalchemy_column_map_condition_value_counts requires a column in accessor_domain_kwargs\"", ")", "column_name", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "if", "column_name", "not", "in", "metrics", "[", "\"table.columns\"", "]", ":", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "f'Error: The column \"{column_name}\" in BatchData does not exist.'", ")", "column", ":", "sa", ".", "Column", "=", "sa", ".", "column", "(", "column_name", ")", "return", "execution_engine", ".", "engine", ".", "execute", "(", "sa", ".", "select", "(", "[", "column", ",", "sa", ".", "func", ".", "count", "(", "column", ")", "]", ")", ".", "select_from", "(", "selectable", ")", ".", "where", "(", "unexpected_condition", ")", ".", "group_by", "(", "column", ")", ")", ".", "fetchall", "(", ")" ]
[ 958, 0 ]
[ 996, 16 ]
python
en
['en', 'error', 'th']
False
_sqlalchemy_map_condition_rows
( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, )
Returns all rows of the metric values which do not meet an expected Expectation condition for instances of ColumnMapExpectation.
Returns all rows of the metric values which do not meet an expected Expectation condition for instances of ColumnMapExpectation.
def _sqlalchemy_map_condition_rows( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], **kwargs, ): """ Returns all rows of the metric values which do not meet an expected Expectation condition for instances of ColumnMapExpectation. """ unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get( "unexpected_condition" ) (selectable, _, _,) = execution_engine.get_compute_domain( compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value ) query = ( sa.select([sa.text("*")]).select_from(selectable).where(unexpected_condition) ) result_format = metric_value_kwargs["result_format"] if result_format["result_format"] != "COMPLETE": query = query.limit(result_format["partial_unexpected_count"]) try: return execution_engine.engine.execute(query).fetchall() except OperationalError as oe: exception_message: str = f"An SQL execution Exception occurred: {str(oe)}." raise ge_exceptions.ExecutionEngineError(message=exception_message)
[ "def", "_sqlalchemy_map_condition_rows", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "str", ",", "Any", "]", ",", "*", "*", "kwargs", ",", ")", ":", "unexpected_condition", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "=", "metrics", ".", "get", "(", "\"unexpected_condition\"", ")", "(", "selectable", ",", "_", ",", "_", ",", ")", "=", "execution_engine", ".", "get_compute_domain", "(", "compute_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ")", "query", "=", "(", "sa", ".", "select", "(", "[", "sa", ".", "text", "(", "\"*\"", ")", "]", ")", ".", "select_from", "(", "selectable", ")", ".", "where", "(", "unexpected_condition", ")", ")", "result_format", "=", "metric_value_kwargs", "[", "\"result_format\"", "]", "if", "result_format", "[", "\"result_format\"", "]", "!=", "\"COMPLETE\"", ":", "query", "=", "query", ".", "limit", "(", "result_format", "[", "\"partial_unexpected_count\"", "]", ")", "try", ":", "return", "execution_engine", ".", "engine", ".", "execute", "(", "query", ")", ".", "fetchall", "(", ")", "except", "OperationalError", "as", "oe", ":", "exception_message", ":", "str", "=", "f\"An SQL execution Exception occurred: {str(oe)}.\"", "raise", "ge_exceptions", ".", "ExecutionEngineError", "(", "message", "=", "exception_message", ")" ]
[ 999, 0 ]
[ 1028, 75 ]
python
en
['en', 'error', 'th']
False
serve_listeners
( handler, listeners, *, handler_nursery=None, task_status=trio.TASK_STATUS_IGNORED )
r"""Listen for incoming connections on ``listeners``, and for each one start a task running ``handler(stream)``. .. warning:: If ``handler`` raises an exception, then this function doesn't do anything special to catch it – so by default the exception will propagate out and crash your server. If you don't want this, then catch exceptions inside your ``handler``, or use a ``handler_nursery`` object that responds to exceptions in some other way. Args: handler: An async callable, that will be invoked like ``handler_nursery.start_soon(handler, stream)`` for each incoming connection. listeners: A list of :class:`~trio.abc.Listener` objects. :func:`serve_listeners` takes responsibility for closing them. handler_nursery: The nursery used to start handlers, or any object with a ``start_soon`` method. If ``None`` (the default), then :func:`serve_listeners` will create a new nursery internally and use that. task_status: This function can be used with ``nursery.start``, which will return ``listeners``. Returns: This function never returns unless cancelled. Resource handling: If ``handler`` neglects to close the ``stream``, then it will be closed using :func:`trio.aclose_forcefully`. Error handling: Most errors coming from :meth:`~trio.abc.Listener.accept` are allowed to propagate out (crashing the server in the process). However, some errors – those which indicate that the server is temporarily overloaded – are handled specially. These are :class:`OSError`\s with one of the following errnos: * ``EMFILE``: process is out of file descriptors * ``ENFILE``: system is out of file descriptors * ``ENOBUFS``, ``ENOMEM``: the kernel hit some sort of memory limitation when trying to create a socket object When :func:`serve_listeners` gets one of these errors, then it: * Logs the error to the standard library logger ``trio.serve_listeners`` (level = ERROR, with exception information included). By default this causes it to be printed to stderr. * Waits 100 ms before calling ``accept`` again, in hopes that the system will recover.
r"""Listen for incoming connections on ``listeners``, and for each one start a task running ``handler(stream)``.
async def serve_listeners( handler, listeners, *, handler_nursery=None, task_status=trio.TASK_STATUS_IGNORED ): r"""Listen for incoming connections on ``listeners``, and for each one start a task running ``handler(stream)``. .. warning:: If ``handler`` raises an exception, then this function doesn't do anything special to catch it – so by default the exception will propagate out and crash your server. If you don't want this, then catch exceptions inside your ``handler``, or use a ``handler_nursery`` object that responds to exceptions in some other way. Args: handler: An async callable, that will be invoked like ``handler_nursery.start_soon(handler, stream)`` for each incoming connection. listeners: A list of :class:`~trio.abc.Listener` objects. :func:`serve_listeners` takes responsibility for closing them. handler_nursery: The nursery used to start handlers, or any object with a ``start_soon`` method. If ``None`` (the default), then :func:`serve_listeners` will create a new nursery internally and use that. task_status: This function can be used with ``nursery.start``, which will return ``listeners``. Returns: This function never returns unless cancelled. Resource handling: If ``handler`` neglects to close the ``stream``, then it will be closed using :func:`trio.aclose_forcefully`. Error handling: Most errors coming from :meth:`~trio.abc.Listener.accept` are allowed to propagate out (crashing the server in the process). However, some errors – those which indicate that the server is temporarily overloaded – are handled specially. These are :class:`OSError`\s with one of the following errnos: * ``EMFILE``: process is out of file descriptors * ``ENFILE``: system is out of file descriptors * ``ENOBUFS``, ``ENOMEM``: the kernel hit some sort of memory limitation when trying to create a socket object When :func:`serve_listeners` gets one of these errors, then it: * Logs the error to the standard library logger ``trio.serve_listeners`` (level = ERROR, with exception information included). By default this causes it to be printed to stderr. * Waits 100 ms before calling ``accept`` again, in hopes that the system will recover. """ async with trio.open_nursery() as nursery: if handler_nursery is None: handler_nursery = nursery for listener in listeners: nursery.start_soon(_serve_one_listener, listener, handler_nursery, handler) # The listeners are already queueing connections when we're called, # but we wait until the end to call started() just in case we get an # error or whatever. task_status.started(listeners)
[ "async", "def", "serve_listeners", "(", "handler", ",", "listeners", ",", "*", ",", "handler_nursery", "=", "None", ",", "task_status", "=", "trio", ".", "TASK_STATUS_IGNORED", ")", ":", "async", "with", "trio", ".", "open_nursery", "(", ")", "as", "nursery", ":", "if", "handler_nursery", "is", "None", ":", "handler_nursery", "=", "nursery", "for", "listener", "in", "listeners", ":", "nursery", ".", "start_soon", "(", "_serve_one_listener", ",", "listener", ",", "handler_nursery", ",", "handler", ")", "# The listeners are already queueing connections when we're called,", "# but we wait until the end to call started() just in case we get an", "# error or whatever.", "task_status", ".", "started", "(", "listeners", ")" ]
[ 50, 0 ]
[ 120, 38 ]
python
en
['en', 'en', 'en']
True
_get_fields
(attrs, field_class, pop=False, ordered=False)
Get fields from a class. If ordered=True, fields will sorted by creation index. :param attrs: Mapping of class attributes :param type field_class: Base field class :param bool pop: Remove matching fields
Get fields from a class. If ordered=True, fields will sorted by creation index.
def _get_fields(attrs, field_class, pop=False, ordered=False): """Get fields from a class. If ordered=True, fields will sorted by creation index. :param attrs: Mapping of class attributes :param type field_class: Base field class :param bool pop: Remove matching fields """ fields = [ (field_name, field_value) for field_name, field_value in attrs.items() if is_instance_or_subclass(field_value, field_class) ] if pop: for field_name, _ in fields: del attrs[field_name] if ordered: fields.sort(key=lambda pair: pair[1]._creation_index) return fields
[ "def", "_get_fields", "(", "attrs", ",", "field_class", ",", "pop", "=", "False", ",", "ordered", "=", "False", ")", ":", "fields", "=", "[", "(", "field_name", ",", "field_value", ")", "for", "field_name", ",", "field_value", "in", "attrs", ".", "items", "(", ")", "if", "is_instance_or_subclass", "(", "field_value", ",", "field_class", ")", "]", "if", "pop", ":", "for", "field_name", ",", "_", "in", "fields", ":", "del", "attrs", "[", "field_name", "]", "if", "ordered", ":", "fields", ".", "sort", "(", "key", "=", "lambda", "pair", ":", "pair", "[", "1", "]", ".", "_creation_index", ")", "return", "fields" ]
[ 46, 0 ]
[ 63, 17 ]
python
en
['en', 'en', 'en']
True
_get_fields_by_mro
(klass, field_class, ordered=False)
Collect fields from a class, following its method resolution order. The class itself is excluded from the search; only its parents are checked. Get fields from ``_declared_fields`` if available, else use ``__dict__``. :param type klass: Class whose fields to retrieve :param type field_class: Base field class
Collect fields from a class, following its method resolution order. The class itself is excluded from the search; only its parents are checked. Get fields from ``_declared_fields`` if available, else use ``__dict__``.
def _get_fields_by_mro(klass, field_class, ordered=False): """Collect fields from a class, following its method resolution order. The class itself is excluded from the search; only its parents are checked. Get fields from ``_declared_fields`` if available, else use ``__dict__``. :param type klass: Class whose fields to retrieve :param type field_class: Base field class """ mro = inspect.getmro(klass) # Loop over mro in reverse to maintain correct order of fields return sum( ( _get_fields( getattr(base, "_declared_fields", base.__dict__), field_class, ordered=ordered, ) for base in mro[:0:-1] ), [], )
[ "def", "_get_fields_by_mro", "(", "klass", ",", "field_class", ",", "ordered", "=", "False", ")", ":", "mro", "=", "inspect", ".", "getmro", "(", "klass", ")", "# Loop over mro in reverse to maintain correct order of fields", "return", "sum", "(", "(", "_get_fields", "(", "getattr", "(", "base", ",", "\"_declared_fields\"", ",", "base", ".", "__dict__", ")", ",", "field_class", ",", "ordered", "=", "ordered", ",", ")", "for", "base", "in", "mro", "[", ":", "0", ":", "-", "1", "]", ")", ",", "[", "]", ",", ")" ]
[ 68, 0 ]
[ 88, 5 ]
python
en
['en', 'en', 'en']
True
SchemaMeta.get_declared_fields
( mcs, klass: type, cls_fields: typing.List, inherited_fields: typing.List, dict_cls: type, )
Returns a dictionary of field_name => `Field` pairs declared on the class. This is exposed mainly so that plugins can add additional fields, e.g. fields computed from class Meta options. :param klass: The class object. :param cls_fields: The fields declared on the class, including those added by the ``include`` class Meta option. :param inherited_fields: Inherited fields. :param dict_class: Either `dict` or `OrderedDict`, depending on the whether the user specified `ordered=True`.
Returns a dictionary of field_name => `Field` pairs declared on the class. This is exposed mainly so that plugins can add additional fields, e.g. fields computed from class Meta options.
def get_declared_fields( mcs, klass: type, cls_fields: typing.List, inherited_fields: typing.List, dict_cls: type, ): """Returns a dictionary of field_name => `Field` pairs declared on the class. This is exposed mainly so that plugins can add additional fields, e.g. fields computed from class Meta options. :param klass: The class object. :param cls_fields: The fields declared on the class, including those added by the ``include`` class Meta option. :param inherited_fields: Inherited fields. :param dict_class: Either `dict` or `OrderedDict`, depending on the whether the user specified `ordered=True`. """ return dict_cls(inherited_fields + cls_fields)
[ "def", "get_declared_fields", "(", "mcs", ",", "klass", ":", "type", ",", "cls_fields", ":", "typing", ".", "List", ",", "inherited_fields", ":", "typing", ".", "List", ",", "dict_cls", ":", "type", ",", ")", ":", "return", "dict_cls", "(", "inherited_fields", "+", "cls_fields", ")" ]
[ 134, 4 ]
[ 152, 54 ]
python
en
['en', 'en', 'en']
True
SchemaMeta.resolve_hooks
(cls)
Add in the decorated processors By doing this after constructing the class, we let standard inheritance do all the hard work.
Add in the decorated processors
def resolve_hooks(cls) -> typing.Dict[types.Tag, typing.List[str]]: """Add in the decorated processors By doing this after constructing the class, we let standard inheritance do all the hard work. """ mro = inspect.getmro(cls) hooks = defaultdict(list) # type: typing.Dict[types.Tag, typing.List[str]] for attr_name in dir(cls): # Need to look up the actual descriptor, not whatever might be # bound to the class. This needs to come from the __dict__ of the # declaring class. for parent in mro: try: attr = parent.__dict__[attr_name] except KeyError: continue else: break else: # In case we didn't find the attribute and didn't break above. # We should never hit this - it's just here for completeness # to exclude the possibility of attr being undefined. continue try: hook_config = attr.__marshmallow_hook__ except AttributeError: pass else: for key in hook_config.keys(): # Use name here so we can get the bound method later, in # case the processor was a descriptor or something. hooks[key].append(attr_name) return hooks
[ "def", "resolve_hooks", "(", "cls", ")", "->", "typing", ".", "Dict", "[", "types", ".", "Tag", ",", "typing", ".", "List", "[", "str", "]", "]", ":", "mro", "=", "inspect", ".", "getmro", "(", "cls", ")", "hooks", "=", "defaultdict", "(", "list", ")", "# type: typing.Dict[types.Tag, typing.List[str]]", "for", "attr_name", "in", "dir", "(", "cls", ")", ":", "# Need to look up the actual descriptor, not whatever might be", "# bound to the class. This needs to come from the __dict__ of the", "# declaring class.", "for", "parent", "in", "mro", ":", "try", ":", "attr", "=", "parent", ".", "__dict__", "[", "attr_name", "]", "except", "KeyError", ":", "continue", "else", ":", "break", "else", ":", "# In case we didn't find the attribute and didn't break above.", "# We should never hit this - it's just here for completeness", "# to exclude the possibility of attr being undefined.", "continue", "try", ":", "hook_config", "=", "attr", ".", "__marshmallow_hook__", "except", "AttributeError", ":", "pass", "else", ":", "for", "key", "in", "hook_config", ".", "keys", "(", ")", ":", "# Use name here so we can get the bound method later, in", "# case the processor was a descriptor or something.", "hooks", "[", "key", "]", ".", "append", "(", "attr_name", ")", "return", "hooks" ]
[ 160, 4 ]
[ 197, 20 ]
python
en
['en', 'it', 'en']
True
Schema.from_dict
( cls, fields: typing.Dict[str, typing.Union[ma_fields.Field, type]], *, name: str = "GeneratedSchema" )
Generate a `Schema` class given a dictionary of fields. .. code-block:: python from great_expectations.marshmallow__shade import Schema, fields PersonSchema = Schema.from_dict({"name": fields.Str()}) print(PersonSchema().load({"name": "David"})) # => {'name': 'David'} Generated schemas are not added to the class registry and therefore cannot be referred to by name in `Nested` fields. :param dict fields: Dictionary mapping field names to field instances. :param str name: Optional name for the class, which will appear in the ``repr`` for the class. .. versionadded:: 3.0.0
Generate a `Schema` class given a dictionary of fields.
def from_dict( cls, fields: typing.Dict[str, typing.Union[ma_fields.Field, type]], *, name: str = "GeneratedSchema" ) -> type: """Generate a `Schema` class given a dictionary of fields. .. code-block:: python from great_expectations.marshmallow__shade import Schema, fields PersonSchema = Schema.from_dict({"name": fields.Str()}) print(PersonSchema().load({"name": "David"})) # => {'name': 'David'} Generated schemas are not added to the class registry and therefore cannot be referred to by name in `Nested` fields. :param dict fields: Dictionary mapping field names to field instances. :param str name: Optional name for the class, which will appear in the ``repr`` for the class. .. versionadded:: 3.0.0 """ attrs = fields.copy() attrs["Meta"] = type( "GeneratedMeta", (getattr(cls, "Meta", object),), {"register": False} ) schema_cls = type(name, (cls,), attrs) return schema_cls
[ "def", "from_dict", "(", "cls", ",", "fields", ":", "typing", ".", "Dict", "[", "str", ",", "typing", ".", "Union", "[", "ma_fields", ".", "Field", ",", "type", "]", "]", ",", "*", ",", "name", ":", "str", "=", "\"GeneratedSchema\"", ")", "->", "type", ":", "attrs", "=", "fields", ".", "copy", "(", ")", "attrs", "[", "\"Meta\"", "]", "=", "type", "(", "\"GeneratedMeta\"", ",", "(", "getattr", "(", "cls", ",", "\"Meta\"", ",", "object", ")", ",", ")", ",", "{", "\"register\"", ":", "False", "}", ")", "schema_cls", "=", "type", "(", "name", ",", "(", "cls", ",", ")", ",", "attrs", ")", "return", "schema_cls" ]
[ 424, 4 ]
[ 453, 25 ]
python
en
['en', 'en', 'en']
True
Schema.handle_error
( self, error: ValidationError, data: typing.Any, *, many: bool, **kwargs )
Custom error handler function for the schema. :param error: The `ValidationError` raised during (de)serialization. :param data: The original input data. :param many: Value of ``many`` on dump or load. :param partial: Value of ``partial`` on load. .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0rc9 Receives `many` and `partial` (on deserialization) as keyword arguments.
Custom error handler function for the schema.
def handle_error( self, error: ValidationError, data: typing.Any, *, many: bool, **kwargs ): """Custom error handler function for the schema. :param error: The `ValidationError` raised during (de)serialization. :param data: The original input data. :param many: Value of ``many`` on dump or load. :param partial: Value of ``partial`` on load. .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0rc9 Receives `many` and `partial` (on deserialization) as keyword arguments. """ pass
[ "def", "handle_error", "(", "self", ",", "error", ":", "ValidationError", ",", "data", ":", "typing", ".", "Any", ",", "*", ",", "many", ":", "bool", ",", "*", "*", "kwargs", ")", ":", "pass" ]
[ 457, 4 ]
[ 472, 12 ]
python
en
['en', 'nl', 'en']
True
Schema.get_attribute
(self, obj: typing.Any, attr: str, default: typing.Any)
Defines how to pull values from an object to serialize. .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0a1 Changed position of ``obj`` and ``attr``.
Defines how to pull values from an object to serialize.
def get_attribute(self, obj: typing.Any, attr: str, default: typing.Any): """Defines how to pull values from an object to serialize. .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0a1 Changed position of ``obj`` and ``attr``. """ return get_value(obj, attr, default)
[ "def", "get_attribute", "(", "self", ",", "obj", ":", "typing", ".", "Any", ",", "attr", ":", "str", ",", "default", ":", "typing", ".", "Any", ")", ":", "return", "get_value", "(", "obj", ",", "attr", ",", "default", ")" ]
[ 474, 4 ]
[ 482, 44 ]
python
en
['en', 'en', 'en']
True
Schema._call_and_store
(getter_func, data, *, field_name, error_store, index=None)
Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`. :param callable getter_func: Function for getting the serialized/deserialized value from ``data``. :param data: The data passed to ``getter_func``. :param str field_name: Field name. :param int index: Index of the item being validated, if validating a collection, otherwise `None`.
Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
def _call_and_store(getter_func, data, *, field_name, error_store, index=None): """Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`. :param callable getter_func: Function for getting the serialized/deserialized value from ``data``. :param data: The data passed to ``getter_func``. :param str field_name: Field name. :param int index: Index of the item being validated, if validating a collection, otherwise `None`. """ try: value = getter_func(data) except ValidationError as error: error_store.store_error(error.messages, field_name, index=index) # When a Nested field fails validation, the marshalled data is stored # on the ValidationError's valid_data attribute return error.valid_data or missing return value
[ "def", "_call_and_store", "(", "getter_func", ",", "data", ",", "*", ",", "field_name", ",", "error_store", ",", "index", "=", "None", ")", ":", "try", ":", "value", "=", "getter_func", "(", "data", ")", "except", "ValidationError", "as", "error", ":", "error_store", ".", "store_error", "(", "error", ".", "messages", ",", "field_name", ",", "index", "=", "index", ")", "# When a Nested field fails validation, the marshalled data is stored", "# on the ValidationError's valid_data attribute", "return", "error", ".", "valid_data", "or", "missing", "return", "value" ]
[ 487, 4 ]
[ 504, 20 ]
python
en
['en', 'en', 'en']
True
Schema._serialize
( self, obj: typing.Union[_T, typing.Iterable[_T]], *, many: bool = False )
Serialize ``obj``. :param obj: The object(s) to serialize. :param bool many: `True` if ``data`` should be serialized as a collection. :return: A dictionary of the serialized data .. versionchanged:: 1.0.0 Renamed from ``marshal``.
Serialize ``obj``.
def _serialize( self, obj: typing.Union[_T, typing.Iterable[_T]], *, many: bool = False ): """Serialize ``obj``. :param obj: The object(s) to serialize. :param bool many: `True` if ``data`` should be serialized as a collection. :return: A dictionary of the serialized data .. versionchanged:: 1.0.0 Renamed from ``marshal``. """ if many and obj is not None: return [ self._serialize(d, many=False) for d in typing.cast(typing.Iterable[_T], obj) ] ret = self.dict_class() for attr_name, field_obj in self.dump_fields.items(): value = field_obj.serialize(attr_name, obj, accessor=self.get_attribute) if value is missing: continue key = field_obj.data_key if field_obj.data_key is not None else attr_name ret[key] = value return ret
[ "def", "_serialize", "(", "self", ",", "obj", ":", "typing", ".", "Union", "[", "_T", ",", "typing", ".", "Iterable", "[", "_T", "]", "]", ",", "*", ",", "many", ":", "bool", "=", "False", ")", ":", "if", "many", "and", "obj", "is", "not", "None", ":", "return", "[", "self", ".", "_serialize", "(", "d", ",", "many", "=", "False", ")", "for", "d", "in", "typing", ".", "cast", "(", "typing", ".", "Iterable", "[", "_T", "]", ",", "obj", ")", "]", "ret", "=", "self", ".", "dict_class", "(", ")", "for", "attr_name", ",", "field_obj", "in", "self", ".", "dump_fields", ".", "items", "(", ")", ":", "value", "=", "field_obj", ".", "serialize", "(", "attr_name", ",", "obj", ",", "accessor", "=", "self", ".", "get_attribute", ")", "if", "value", "is", "missing", ":", "continue", "key", "=", "field_obj", ".", "data_key", "if", "field_obj", ".", "data_key", "is", "not", "None", "else", "attr_name", "ret", "[", "key", "]", "=", "value", "return", "ret" ]
[ 506, 4 ]
[ 530, 18 ]
python
en
['en', 'pl', 'hi']
False
Schema.dump
(self, obj: typing.Any, *, many: bool = None)
Serialize an object to native Python data types according to this Schema's fields. :param obj: The object to serialize. :param many: Whether to serialize `obj` as a collection. If `None`, the value for `self.many` is used. :return: A dict of serialized data :rtype: dict .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0b7 This method returns the serialized data rather than a ``(data, errors)`` duple. A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised if ``obj`` is invalid. .. versionchanged:: 3.0.0rc9 Validation no longer occurs upon serialization.
Serialize an object to native Python data types according to this Schema's fields.
def dump(self, obj: typing.Any, *, many: bool = None): """Serialize an object to native Python data types according to this Schema's fields. :param obj: The object to serialize. :param many: Whether to serialize `obj` as a collection. If `None`, the value for `self.many` is used. :return: A dict of serialized data :rtype: dict .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0b7 This method returns the serialized data rather than a ``(data, errors)`` duple. A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised if ``obj`` is invalid. .. versionchanged:: 3.0.0rc9 Validation no longer occurs upon serialization. """ many = self.many if many is None else bool(many) if many and is_iterable_but_not_string(obj): obj = list(obj) if self._has_processors(PRE_DUMP): processed_obj = self._invoke_dump_processors( PRE_DUMP, obj, many=many, original_data=obj ) else: processed_obj = obj result = self._serialize(processed_obj, many=many) if self._has_processors(POST_DUMP): result = self._invoke_dump_processors( POST_DUMP, result, many=many, original_data=obj ) return result
[ "def", "dump", "(", "self", ",", "obj", ":", "typing", ".", "Any", ",", "*", ",", "many", ":", "bool", "=", "None", ")", ":", "many", "=", "self", ".", "many", "if", "many", "is", "None", "else", "bool", "(", "many", ")", "if", "many", "and", "is_iterable_but_not_string", "(", "obj", ")", ":", "obj", "=", "list", "(", "obj", ")", "if", "self", ".", "_has_processors", "(", "PRE_DUMP", ")", ":", "processed_obj", "=", "self", ".", "_invoke_dump_processors", "(", "PRE_DUMP", ",", "obj", ",", "many", "=", "many", ",", "original_data", "=", "obj", ")", "else", ":", "processed_obj", "=", "obj", "result", "=", "self", ".", "_serialize", "(", "processed_obj", ",", "many", "=", "many", ")", "if", "self", ".", "_has_processors", "(", "POST_DUMP", ")", ":", "result", "=", "self", ".", "_invoke_dump_processors", "(", "POST_DUMP", ",", "result", ",", "many", "=", "many", ",", "original_data", "=", "obj", ")", "return", "result" ]
[ 532, 4 ]
[ 568, 21 ]
python
en
['en', 'en', 'en']
True
Schema.dumps
(self, obj: typing.Any, *args, many: bool = None, **kwargs)
Same as :meth:`dump`, except return a JSON-encoded string. :param obj: The object to serialize. :param many: Whether to serialize `obj` as a collection. If `None`, the value for `self.many` is used. :return: A ``json`` string :rtype: str .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0b7 This method returns the serialized data rather than a ``(data, errors)`` duple. A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised if ``obj`` is invalid.
Same as :meth:`dump`, except return a JSON-encoded string.
def dumps(self, obj: typing.Any, *args, many: bool = None, **kwargs): """Same as :meth:`dump`, except return a JSON-encoded string. :param obj: The object to serialize. :param many: Whether to serialize `obj` as a collection. If `None`, the value for `self.many` is used. :return: A ``json`` string :rtype: str .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0b7 This method returns the serialized data rather than a ``(data, errors)`` duple. A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised if ``obj`` is invalid. """ serialized = self.dump(obj, many=many) def datetime_serializer(o): if isinstance(o, dt.datetime): return o.__str__() if "default" not in kwargs: kwargs.update({"default": datetime_serializer}) return self.opts.render_module.dumps(serialized, *args, **kwargs)
[ "def", "dumps", "(", "self", ",", "obj", ":", "typing", ".", "Any", ",", "*", "args", ",", "many", ":", "bool", "=", "None", ",", "*", "*", "kwargs", ")", ":", "serialized", "=", "self", ".", "dump", "(", "obj", ",", "many", "=", "many", ")", "def", "datetime_serializer", "(", "o", ")", ":", "if", "isinstance", "(", "o", ",", "dt", ".", "datetime", ")", ":", "return", "o", ".", "__str__", "(", ")", "if", "\"default\"", "not", "in", "kwargs", ":", "kwargs", ".", "update", "(", "{", "\"default\"", ":", "datetime_serializer", "}", ")", "return", "self", ".", "opts", ".", "render_module", ".", "dumps", "(", "serialized", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
[ 570, 4 ]
[ 593, 73 ]
python
en
['en', 'en', 'en']
True
Schema._deserialize
( self, data: typing.Union[ typing.Mapping[str, typing.Any], typing.Iterable[typing.Mapping[str, typing.Any]], ], *, error_store: ErrorStore, many: bool = False, partial=False, unknown=RAISE, index=None )
Deserialize ``data``. :param dict data: The data to deserialize. :param ErrorStore error_store: Structure to store errors. :param bool many: `True` if ``data`` should be deserialized as a collection. :param bool|tuple partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. :param int index: Index of the item being serialized (for storing errors) if serializing a collection, otherwise `None`. :return: A dictionary of the deserialized data.
Deserialize ``data``.
def _deserialize( self, data: typing.Union[ typing.Mapping[str, typing.Any], typing.Iterable[typing.Mapping[str, typing.Any]], ], *, error_store: ErrorStore, many: bool = False, partial=False, unknown=RAISE, index=None ) -> typing.Union[_T, typing.List[_T]]: """Deserialize ``data``. :param dict data: The data to deserialize. :param ErrorStore error_store: Structure to store errors. :param bool many: `True` if ``data`` should be deserialized as a collection. :param bool|tuple partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. :param int index: Index of the item being serialized (for storing errors) if serializing a collection, otherwise `None`. :return: A dictionary of the deserialized data. """ index_errors = self.opts.index_errors index = index if index_errors else None if many: if not is_collection(data): error_store.store_error([self.error_messages["type"]], index=index) ret = [] # type: typing.List[_T] else: ret = [ typing.cast( _T, self._deserialize( typing.cast(typing.Mapping[str, typing.Any], d), error_store=error_store, many=False, partial=partial, unknown=unknown, index=idx, ), ) for idx, d in enumerate(data) ] return ret ret = self.dict_class() # Check data is a dict if not isinstance(data, Mapping): error_store.store_error([self.error_messages["type"]], index=index) else: partial_is_collection = is_collection(partial) for attr_name, field_obj in self.load_fields.items(): field_name = ( field_obj.data_key if field_obj.data_key is not None else attr_name ) raw_value = data.get(field_name, missing) if raw_value is missing: # Ignore missing field if we're allowed to. if partial is True or ( partial_is_collection and attr_name in partial ): continue d_kwargs = {} # Allow partial loading of nested schemas. if partial_is_collection: prefix = field_name + "." len_prefix = len(prefix) sub_partial = [ f[len_prefix:] for f in partial if f.startswith(prefix) ] d_kwargs["partial"] = sub_partial else: d_kwargs["partial"] = partial getter = lambda val: field_obj.deserialize( val, field_name, data, **d_kwargs ) value = self._call_and_store( getter_func=getter, data=raw_value, field_name=field_name, error_store=error_store, index=index, ) if value is not missing: key = field_obj.attribute or attr_name set_value(typing.cast(typing.Dict, ret), key, value) if unknown != EXCLUDE: fields = { field_obj.data_key if field_obj.data_key is not None else field_name for field_name, field_obj in self.load_fields.items() } for key in set(data) - fields: value = data[key] if unknown == INCLUDE: set_value(typing.cast(typing.Dict, ret), key, value) elif unknown == RAISE: error_store.store_error( [self.error_messages["unknown"]], key, (index if index_errors else None), ) return ret
[ "def", "_deserialize", "(", "self", ",", "data", ":", "typing", ".", "Union", "[", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", ",", "typing", ".", "Iterable", "[", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", "]", ",", "]", ",", "*", ",", "error_store", ":", "ErrorStore", ",", "many", ":", "bool", "=", "False", ",", "partial", "=", "False", ",", "unknown", "=", "RAISE", ",", "index", "=", "None", ")", "->", "typing", ".", "Union", "[", "_T", ",", "typing", ".", "List", "[", "_T", "]", "]", ":", "index_errors", "=", "self", ".", "opts", ".", "index_errors", "index", "=", "index", "if", "index_errors", "else", "None", "if", "many", ":", "if", "not", "is_collection", "(", "data", ")", ":", "error_store", ".", "store_error", "(", "[", "self", ".", "error_messages", "[", "\"type\"", "]", "]", ",", "index", "=", "index", ")", "ret", "=", "[", "]", "# type: typing.List[_T]", "else", ":", "ret", "=", "[", "typing", ".", "cast", "(", "_T", ",", "self", ".", "_deserialize", "(", "typing", ".", "cast", "(", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", ",", "d", ")", ",", "error_store", "=", "error_store", ",", "many", "=", "False", ",", "partial", "=", "partial", ",", "unknown", "=", "unknown", ",", "index", "=", "idx", ",", ")", ",", ")", "for", "idx", ",", "d", "in", "enumerate", "(", "data", ")", "]", "return", "ret", "ret", "=", "self", ".", "dict_class", "(", ")", "# Check data is a dict", "if", "not", "isinstance", "(", "data", ",", "Mapping", ")", ":", "error_store", ".", "store_error", "(", "[", "self", ".", "error_messages", "[", "\"type\"", "]", "]", ",", "index", "=", "index", ")", "else", ":", "partial_is_collection", "=", "is_collection", "(", "partial", ")", "for", "attr_name", ",", "field_obj", "in", "self", ".", "load_fields", ".", "items", "(", ")", ":", "field_name", "=", "(", "field_obj", ".", "data_key", "if", "field_obj", ".", "data_key", "is", "not", "None", "else", "attr_name", ")", "raw_value", "=", "data", ".", "get", "(", "field_name", ",", "missing", ")", "if", "raw_value", "is", "missing", ":", "# Ignore missing field if we're allowed to.", "if", "partial", "is", "True", "or", "(", "partial_is_collection", "and", "attr_name", "in", "partial", ")", ":", "continue", "d_kwargs", "=", "{", "}", "# Allow partial loading of nested schemas.", "if", "partial_is_collection", ":", "prefix", "=", "field_name", "+", "\".\"", "len_prefix", "=", "len", "(", "prefix", ")", "sub_partial", "=", "[", "f", "[", "len_prefix", ":", "]", "for", "f", "in", "partial", "if", "f", ".", "startswith", "(", "prefix", ")", "]", "d_kwargs", "[", "\"partial\"", "]", "=", "sub_partial", "else", ":", "d_kwargs", "[", "\"partial\"", "]", "=", "partial", "getter", "=", "lambda", "val", ":", "field_obj", ".", "deserialize", "(", "val", ",", "field_name", ",", "data", ",", "*", "*", "d_kwargs", ")", "value", "=", "self", ".", "_call_and_store", "(", "getter_func", "=", "getter", ",", "data", "=", "raw_value", ",", "field_name", "=", "field_name", ",", "error_store", "=", "error_store", ",", "index", "=", "index", ",", ")", "if", "value", "is", "not", "missing", ":", "key", "=", "field_obj", ".", "attribute", "or", "attr_name", "set_value", "(", "typing", ".", "cast", "(", "typing", ".", "Dict", ",", "ret", ")", ",", "key", ",", "value", ")", "if", "unknown", "!=", "EXCLUDE", ":", "fields", "=", "{", "field_obj", ".", "data_key", "if", "field_obj", ".", "data_key", "is", "not", "None", "else", "field_name", "for", "field_name", ",", "field_obj", "in", "self", ".", "load_fields", ".", "items", "(", ")", "}", "for", "key", "in", "set", "(", "data", ")", "-", "fields", ":", "value", "=", "data", "[", "key", "]", "if", "unknown", "==", "INCLUDE", ":", "set_value", "(", "typing", ".", "cast", "(", "typing", ".", "Dict", ",", "ret", ")", ",", "key", ",", "value", ")", "elif", "unknown", "==", "RAISE", ":", "error_store", ".", "store_error", "(", "[", "self", ".", "error_messages", "[", "\"unknown\"", "]", "]", ",", "key", ",", "(", "index", "if", "index_errors", "else", "None", ")", ",", ")", "return", "ret" ]
[ 595, 4 ]
[ 701, 18 ]
python
en
['en', 'no', 'it']
False
Schema.load
( self, data: typing.Union[ typing.Mapping[str, typing.Any], typing.Iterable[typing.Mapping[str, typing.Any]], ], *, many: bool = None, partial: typing.Union[bool, types.StrSequenceOrSet] = None, unknown: str = None )
Deserialize a data structure to an object defined by this Schema's fields. :param data: The data to deserialize. :param many: Whether to deserialize `data` as a collection. If `None`, the value for `self.many` is used. :param partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. If `None`, the value for `self.unknown` is used. :return: Deserialized data .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0b7 This method returns the deserialized data rather than a ``(data, errors)`` duple. A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised if invalid data are passed.
Deserialize a data structure to an object defined by this Schema's fields.
def load( self, data: typing.Union[ typing.Mapping[str, typing.Any], typing.Iterable[typing.Mapping[str, typing.Any]], ], *, many: bool = None, partial: typing.Union[bool, types.StrSequenceOrSet] = None, unknown: str = None ): """Deserialize a data structure to an object defined by this Schema's fields. :param data: The data to deserialize. :param many: Whether to deserialize `data` as a collection. If `None`, the value for `self.many` is used. :param partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. If `None`, the value for `self.unknown` is used. :return: Deserialized data .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0b7 This method returns the deserialized data rather than a ``(data, errors)`` duple. A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised if invalid data are passed. """ return self._do_load( data, many=many, partial=partial, unknown=unknown, postprocess=True )
[ "def", "load", "(", "self", ",", "data", ":", "typing", ".", "Union", "[", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", ",", "typing", ".", "Iterable", "[", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", "]", ",", "]", ",", "*", ",", "many", ":", "bool", "=", "None", ",", "partial", ":", "typing", ".", "Union", "[", "bool", ",", "types", ".", "StrSequenceOrSet", "]", "=", "None", ",", "unknown", ":", "str", "=", "None", ")", ":", "return", "self", ".", "_do_load", "(", "data", ",", "many", "=", "many", ",", "partial", "=", "partial", ",", "unknown", "=", "unknown", ",", "postprocess", "=", "True", ")" ]
[ 703, 4 ]
[ 736, 9 ]
python
en
['en', 'en', 'en']
True
Schema.loads
( self, json_data: str, *, many: bool = None, partial: typing.Union[bool, types.StrSequenceOrSet] = None, unknown: str = None, **kwargs )
Same as :meth:`load`, except it takes a JSON string as input. :param json_data: A JSON string of the data to deserialize. :param many: Whether to deserialize `obj` as a collection. If `None`, the value for `self.many` is used. :param partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. If `None`, the value for `self.unknown` is used. :return: Deserialized data .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0b7 This method returns the deserialized data rather than a ``(data, errors)`` duple. A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised if invalid data are passed.
Same as :meth:`load`, except it takes a JSON string as input.
def loads( self, json_data: str, *, many: bool = None, partial: typing.Union[bool, types.StrSequenceOrSet] = None, unknown: str = None, **kwargs ): """Same as :meth:`load`, except it takes a JSON string as input. :param json_data: A JSON string of the data to deserialize. :param many: Whether to deserialize `obj` as a collection. If `None`, the value for `self.many` is used. :param partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. If `None`, the value for `self.unknown` is used. :return: Deserialized data .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0b7 This method returns the deserialized data rather than a ``(data, errors)`` duple. A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised if invalid data are passed. """ data = self.opts.render_module.loads(json_data, **kwargs) return self.load(data, many=many, partial=partial, unknown=unknown)
[ "def", "loads", "(", "self", ",", "json_data", ":", "str", ",", "*", ",", "many", ":", "bool", "=", "None", ",", "partial", ":", "typing", ".", "Union", "[", "bool", ",", "types", ".", "StrSequenceOrSet", "]", "=", "None", ",", "unknown", ":", "str", "=", "None", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "opts", ".", "render_module", ".", "loads", "(", "json_data", ",", "*", "*", "kwargs", ")", "return", "self", ".", "load", "(", "data", ",", "many", "=", "many", ",", "partial", "=", "partial", ",", "unknown", "=", "unknown", ")" ]
[ 738, 4 ]
[ 768, 75 ]
python
en
['en', 'en', 'en']
True
Schema.validate
( self, data: typing.Mapping, *, many: bool = None, partial: typing.Union[bool, types.StrSequenceOrSet] = None )
Validate `data` against the schema, returning a dictionary of validation errors. :param data: The data to validate. :param many: Whether to validate `data` as a collection. If `None`, the value for `self.many` is used. :param partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :return: A dictionary of validation errors. .. versionadded:: 1.1.0
Validate `data` against the schema, returning a dictionary of validation errors.
def validate( self, data: typing.Mapping, *, many: bool = None, partial: typing.Union[bool, types.StrSequenceOrSet] = None ) -> typing.Dict[str, typing.List[str]]: """Validate `data` against the schema, returning a dictionary of validation errors. :param data: The data to validate. :param many: Whether to validate `data` as a collection. If `None`, the value for `self.many` is used. :param partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :return: A dictionary of validation errors. .. versionadded:: 1.1.0 """ try: self._do_load(data, many=many, partial=partial, postprocess=False) except ValidationError as exc: return typing.cast(typing.Dict[str, typing.List[str]], exc.messages) return {}
[ "def", "validate", "(", "self", ",", "data", ":", "typing", ".", "Mapping", ",", "*", ",", "many", ":", "bool", "=", "None", ",", "partial", ":", "typing", ".", "Union", "[", "bool", ",", "types", ".", "StrSequenceOrSet", "]", "=", "None", ")", "->", "typing", ".", "Dict", "[", "str", ",", "typing", ".", "List", "[", "str", "]", "]", ":", "try", ":", "self", ".", "_do_load", "(", "data", ",", "many", "=", "many", ",", "partial", "=", "partial", ",", "postprocess", "=", "False", ")", "except", "ValidationError", "as", "exc", ":", "return", "typing", ".", "cast", "(", "typing", ".", "Dict", "[", "str", ",", "typing", ".", "List", "[", "str", "]", "]", ",", "exc", ".", "messages", ")", "return", "{", "}" ]
[ 790, 4 ]
[ 815, 17 ]
python
en
['en', 'en', 'en']
True
Schema._do_load
( self, data: typing.Union[ typing.Mapping[str, typing.Any], typing.Iterable[typing.Mapping[str, typing.Any]], ], *, many: bool = None, partial: typing.Union[bool, types.StrSequenceOrSet] = None, unknown: str = None, postprocess: bool = True )
Deserialize `data`, returning the deserialized result. This method is private API. :param data: The data to deserialize. :param many: Whether to deserialize `data` as a collection. If `None`, the value for `self.many` is used. :param partial: Whether to validate required fields. If its value is an iterable, only fields listed in that iterable will be ignored will be allowed missing. If `True`, all fields will be allowed missing. If `None`, the value for `self.partial` is used. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. If `None`, the value for `self.unknown` is used. :param postprocess: Whether to run post_load methods.. :return: Deserialized data
Deserialize `data`, returning the deserialized result. This method is private API.
def _do_load( self, data: typing.Union[ typing.Mapping[str, typing.Any], typing.Iterable[typing.Mapping[str, typing.Any]], ], *, many: bool = None, partial: typing.Union[bool, types.StrSequenceOrSet] = None, unknown: str = None, postprocess: bool = True ): """Deserialize `data`, returning the deserialized result. This method is private API. :param data: The data to deserialize. :param many: Whether to deserialize `data` as a collection. If `None`, the value for `self.many` is used. :param partial: Whether to validate required fields. If its value is an iterable, only fields listed in that iterable will be ignored will be allowed missing. If `True`, all fields will be allowed missing. If `None`, the value for `self.partial` is used. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. If `None`, the value for `self.unknown` is used. :param postprocess: Whether to run post_load methods.. :return: Deserialized data """ error_store = ErrorStore() errors = {} # type: typing.Dict[str, typing.List[str]] many = self.many if many is None else bool(many) unknown = unknown or self.unknown if partial is None: partial = self.partial # Run preprocessors if self._has_processors(PRE_LOAD): try: processed_data = self._invoke_load_processors( PRE_LOAD, data, many=many, original_data=data, partial=partial ) except ValidationError as err: errors = err.normalized_messages() result = ( None ) # type: typing.Optional[typing.Union[typing.List, typing.Dict]] else: processed_data = data if not errors: # Deserialize data result = self._deserialize( processed_data, error_store=error_store, many=many, partial=partial, unknown=unknown, ) # Run field-level validation self._invoke_field_validators( error_store=error_store, data=result, many=many ) # Run schema-level validation if self._has_processors(VALIDATES_SCHEMA): field_errors = bool(error_store.errors) self._invoke_schema_validators( error_store=error_store, pass_many=True, data=result, original_data=data, many=many, partial=partial, field_errors=field_errors, ) self._invoke_schema_validators( error_store=error_store, pass_many=False, data=result, original_data=data, many=many, partial=partial, field_errors=field_errors, ) errors = error_store.errors # Run post processors if not errors and postprocess and self._has_processors(POST_LOAD): try: result = self._invoke_load_processors( POST_LOAD, result, many=many, original_data=data, partial=partial, ) except ValidationError as err: errors = err.normalized_messages() if errors: exc = ValidationError(errors, data=data, valid_data=result) self.handle_error(exc, data, many=many, partial=partial) raise exc return result
[ "def", "_do_load", "(", "self", ",", "data", ":", "typing", ".", "Union", "[", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", ",", "typing", ".", "Iterable", "[", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", "]", ",", "]", ",", "*", ",", "many", ":", "bool", "=", "None", ",", "partial", ":", "typing", ".", "Union", "[", "bool", ",", "types", ".", "StrSequenceOrSet", "]", "=", "None", ",", "unknown", ":", "str", "=", "None", ",", "postprocess", ":", "bool", "=", "True", ")", ":", "error_store", "=", "ErrorStore", "(", ")", "errors", "=", "{", "}", "# type: typing.Dict[str, typing.List[str]]", "many", "=", "self", ".", "many", "if", "many", "is", "None", "else", "bool", "(", "many", ")", "unknown", "=", "unknown", "or", "self", ".", "unknown", "if", "partial", "is", "None", ":", "partial", "=", "self", ".", "partial", "# Run preprocessors", "if", "self", ".", "_has_processors", "(", "PRE_LOAD", ")", ":", "try", ":", "processed_data", "=", "self", ".", "_invoke_load_processors", "(", "PRE_LOAD", ",", "data", ",", "many", "=", "many", ",", "original_data", "=", "data", ",", "partial", "=", "partial", ")", "except", "ValidationError", "as", "err", ":", "errors", "=", "err", ".", "normalized_messages", "(", ")", "result", "=", "(", "None", ")", "# type: typing.Optional[typing.Union[typing.List, typing.Dict]]", "else", ":", "processed_data", "=", "data", "if", "not", "errors", ":", "# Deserialize data", "result", "=", "self", ".", "_deserialize", "(", "processed_data", ",", "error_store", "=", "error_store", ",", "many", "=", "many", ",", "partial", "=", "partial", ",", "unknown", "=", "unknown", ",", ")", "# Run field-level validation", "self", ".", "_invoke_field_validators", "(", "error_store", "=", "error_store", ",", "data", "=", "result", ",", "many", "=", "many", ")", "# Run schema-level validation", "if", "self", ".", "_has_processors", "(", "VALIDATES_SCHEMA", ")", ":", "field_errors", "=", "bool", "(", "error_store", ".", "errors", ")", "self", ".", "_invoke_schema_validators", "(", "error_store", "=", "error_store", ",", "pass_many", "=", "True", ",", "data", "=", "result", ",", "original_data", "=", "data", ",", "many", "=", "many", ",", "partial", "=", "partial", ",", "field_errors", "=", "field_errors", ",", ")", "self", ".", "_invoke_schema_validators", "(", "error_store", "=", "error_store", ",", "pass_many", "=", "False", ",", "data", "=", "result", ",", "original_data", "=", "data", ",", "many", "=", "many", ",", "partial", "=", "partial", ",", "field_errors", "=", "field_errors", ",", ")", "errors", "=", "error_store", ".", "errors", "# Run post processors", "if", "not", "errors", "and", "postprocess", "and", "self", ".", "_has_processors", "(", "POST_LOAD", ")", ":", "try", ":", "result", "=", "self", ".", "_invoke_load_processors", "(", "POST_LOAD", ",", "result", ",", "many", "=", "many", ",", "original_data", "=", "data", ",", "partial", "=", "partial", ",", ")", "except", "ValidationError", "as", "err", ":", "errors", "=", "err", ".", "normalized_messages", "(", ")", "if", "errors", ":", "exc", "=", "ValidationError", "(", "errors", ",", "data", "=", "data", ",", "valid_data", "=", "result", ")", "self", ".", "handle_error", "(", "exc", ",", "data", ",", "many", "=", "many", ",", "partial", "=", "partial", ")", "raise", "exc", "return", "result" ]
[ 819, 4 ]
[ 918, 21 ]
python
en
['en', 'no', 'en']
True
Schema._normalize_nested_options
(self)
Apply then flatten nested schema options. This method is private API.
Apply then flatten nested schema options. This method is private API.
def _normalize_nested_options(self) -> None: """Apply then flatten nested schema options. This method is private API. """ if self.only is not None: # Apply the only option to nested fields. self.__apply_nested_option("only", self.only, "intersection") # Remove the child field names from the only option. self.only = self.set_class([field.split(".", 1)[0] for field in self.only]) if self.exclude: # Apply the exclude option to nested fields. self.__apply_nested_option("exclude", self.exclude, "union") # Remove the parent field names from the exclude option. self.exclude = self.set_class( [field for field in self.exclude if "." not in field] )
[ "def", "_normalize_nested_options", "(", "self", ")", "->", "None", ":", "if", "self", ".", "only", "is", "not", "None", ":", "# Apply the only option to nested fields.", "self", ".", "__apply_nested_option", "(", "\"only\"", ",", "self", ".", "only", ",", "\"intersection\"", ")", "# Remove the child field names from the only option.", "self", ".", "only", "=", "self", ".", "set_class", "(", "[", "field", ".", "split", "(", "\".\"", ",", "1", ")", "[", "0", "]", "for", "field", "in", "self", ".", "only", "]", ")", "if", "self", ".", "exclude", ":", "# Apply the exclude option to nested fields.", "self", ".", "__apply_nested_option", "(", "\"exclude\"", ",", "self", ".", "exclude", ",", "\"union\"", ")", "# Remove the parent field names from the exclude option.", "self", ".", "exclude", "=", "self", ".", "set_class", "(", "[", "field", "for", "field", "in", "self", ".", "exclude", "if", "\".\"", "not", "in", "field", "]", ")" ]
[ 920, 4 ]
[ 935, 13 ]
python
en
['en', 'lb', 'en']
True
Schema.__apply_nested_option
(self, option_name, field_names, set_operation)
Apply nested options to nested fields
Apply nested options to nested fields
def __apply_nested_option(self, option_name, field_names, set_operation) -> None: """Apply nested options to nested fields""" # Split nested field names on the first dot. nested_fields = [name.split(".", 1) for name in field_names if "." in name] # Partition the nested field names by parent field. nested_options = defaultdict(list) # type: defaultdict for parent, nested_names in nested_fields: nested_options[parent].append(nested_names) # Apply the nested field options. for key, options in iter(nested_options.items()): new_options = self.set_class(options) original_options = getattr(self.declared_fields[key], option_name, ()) if original_options: if set_operation == "union": new_options |= self.set_class(original_options) if set_operation == "intersection": new_options &= self.set_class(original_options) setattr(self.declared_fields[key], option_name, new_options)
[ "def", "__apply_nested_option", "(", "self", ",", "option_name", ",", "field_names", ",", "set_operation", ")", "->", "None", ":", "# Split nested field names on the first dot.", "nested_fields", "=", "[", "name", ".", "split", "(", "\".\"", ",", "1", ")", "for", "name", "in", "field_names", "if", "\".\"", "in", "name", "]", "# Partition the nested field names by parent field.", "nested_options", "=", "defaultdict", "(", "list", ")", "# type: defaultdict", "for", "parent", ",", "nested_names", "in", "nested_fields", ":", "nested_options", "[", "parent", "]", ".", "append", "(", "nested_names", ")", "# Apply the nested field options.", "for", "key", ",", "options", "in", "iter", "(", "nested_options", ".", "items", "(", ")", ")", ":", "new_options", "=", "self", ".", "set_class", "(", "options", ")", "original_options", "=", "getattr", "(", "self", ".", "declared_fields", "[", "key", "]", ",", "option_name", ",", "(", ")", ")", "if", "original_options", ":", "if", "set_operation", "==", "\"union\"", ":", "new_options", "|=", "self", ".", "set_class", "(", "original_options", ")", "if", "set_operation", "==", "\"intersection\"", ":", "new_options", "&=", "self", ".", "set_class", "(", "original_options", ")", "setattr", "(", "self", ".", "declared_fields", "[", "key", "]", ",", "option_name", ",", "new_options", ")" ]
[ 937, 4 ]
[ 954, 72 ]
python
en
['en', 'en', 'en']
True
Schema._init_fields
(self)
Update self.fields, self.load_fields, and self.dump_fields based on schema options. This method is private API.
Update self.fields, self.load_fields, and self.dump_fields based on schema options. This method is private API.
def _init_fields(self) -> None: """Update self.fields, self.load_fields, and self.dump_fields based on schema options. This method is private API. """ if self.opts.fields: available_field_names = self.set_class(self.opts.fields) else: available_field_names = self.set_class(self.declared_fields.keys()) if self.opts.additional: available_field_names |= self.set_class(self.opts.additional) invalid_fields = self.set_class() if self.only is not None: # Return only fields specified in only option field_names = self.set_class(self.only) invalid_fields |= field_names - available_field_names else: field_names = available_field_names # If "exclude" option or param is specified, remove those fields. if self.exclude: # Note that this isn't available_field_names, since we want to # apply "only" for the actual calculation. field_names = field_names - self.exclude invalid_fields |= self.exclude - available_field_names if invalid_fields: message = "Invalid fields for {}: {}.".format(self, invalid_fields) raise ValueError(message) fields_dict = self.dict_class() for field_name in field_names: field_obj = self.declared_fields.get(field_name, ma_fields.Inferred()) self._bind_field(field_name, field_obj) fields_dict[field_name] = field_obj load_fields, dump_fields = self.dict_class(), self.dict_class() for field_name, field_obj in fields_dict.items(): if not field_obj.dump_only: load_fields[field_name] = field_obj if not field_obj.load_only: dump_fields[field_name] = field_obj dump_data_keys = [ field_obj.data_key if field_obj.data_key is not None else name for name, field_obj in dump_fields.items() ] if len(dump_data_keys) != len(set(dump_data_keys)): data_keys_duplicates = { x for x in dump_data_keys if dump_data_keys.count(x) > 1 } raise ValueError( "The data_key argument for one or more fields collides " "with another field's name or data_key argument. " "Check the following field names and " "data_key arguments: {}".format(list(data_keys_duplicates)) ) load_attributes = [obj.attribute or name for name, obj in load_fields.items()] if len(load_attributes) != len(set(load_attributes)): attributes_duplicates = { x for x in load_attributes if load_attributes.count(x) > 1 } raise ValueError( "The attribute argument for one or more fields collides " "with another field's name or attribute argument. " "Check the following field names and " "attribute arguments: {}".format(list(attributes_duplicates)) ) self.fields = fields_dict self.dump_fields = dump_fields self.load_fields = load_fields
[ "def", "_init_fields", "(", "self", ")", "->", "None", ":", "if", "self", ".", "opts", ".", "fields", ":", "available_field_names", "=", "self", ".", "set_class", "(", "self", ".", "opts", ".", "fields", ")", "else", ":", "available_field_names", "=", "self", ".", "set_class", "(", "self", ".", "declared_fields", ".", "keys", "(", ")", ")", "if", "self", ".", "opts", ".", "additional", ":", "available_field_names", "|=", "self", ".", "set_class", "(", "self", ".", "opts", ".", "additional", ")", "invalid_fields", "=", "self", ".", "set_class", "(", ")", "if", "self", ".", "only", "is", "not", "None", ":", "# Return only fields specified in only option", "field_names", "=", "self", ".", "set_class", "(", "self", ".", "only", ")", "invalid_fields", "|=", "field_names", "-", "available_field_names", "else", ":", "field_names", "=", "available_field_names", "# If \"exclude\" option or param is specified, remove those fields.", "if", "self", ".", "exclude", ":", "# Note that this isn't available_field_names, since we want to", "# apply \"only\" for the actual calculation.", "field_names", "=", "field_names", "-", "self", ".", "exclude", "invalid_fields", "|=", "self", ".", "exclude", "-", "available_field_names", "if", "invalid_fields", ":", "message", "=", "\"Invalid fields for {}: {}.\"", ".", "format", "(", "self", ",", "invalid_fields", ")", "raise", "ValueError", "(", "message", ")", "fields_dict", "=", "self", ".", "dict_class", "(", ")", "for", "field_name", "in", "field_names", ":", "field_obj", "=", "self", ".", "declared_fields", ".", "get", "(", "field_name", ",", "ma_fields", ".", "Inferred", "(", ")", ")", "self", ".", "_bind_field", "(", "field_name", ",", "field_obj", ")", "fields_dict", "[", "field_name", "]", "=", "field_obj", "load_fields", ",", "dump_fields", "=", "self", ".", "dict_class", "(", ")", ",", "self", ".", "dict_class", "(", ")", "for", "field_name", ",", "field_obj", "in", "fields_dict", ".", "items", "(", ")", ":", "if", "not", "field_obj", ".", "dump_only", ":", "load_fields", "[", "field_name", "]", "=", "field_obj", "if", "not", "field_obj", ".", "load_only", ":", "dump_fields", "[", "field_name", "]", "=", "field_obj", "dump_data_keys", "=", "[", "field_obj", ".", "data_key", "if", "field_obj", ".", "data_key", "is", "not", "None", "else", "name", "for", "name", ",", "field_obj", "in", "dump_fields", ".", "items", "(", ")", "]", "if", "len", "(", "dump_data_keys", ")", "!=", "len", "(", "set", "(", "dump_data_keys", ")", ")", ":", "data_keys_duplicates", "=", "{", "x", "for", "x", "in", "dump_data_keys", "if", "dump_data_keys", ".", "count", "(", "x", ")", ">", "1", "}", "raise", "ValueError", "(", "\"The data_key argument for one or more fields collides \"", "\"with another field's name or data_key argument. \"", "\"Check the following field names and \"", "\"data_key arguments: {}\"", ".", "format", "(", "list", "(", "data_keys_duplicates", ")", ")", ")", "load_attributes", "=", "[", "obj", ".", "attribute", "or", "name", "for", "name", ",", "obj", "in", "load_fields", ".", "items", "(", ")", "]", "if", "len", "(", "load_attributes", ")", "!=", "len", "(", "set", "(", "load_attributes", ")", ")", ":", "attributes_duplicates", "=", "{", "x", "for", "x", "in", "load_attributes", "if", "load_attributes", ".", "count", "(", "x", ")", ">", "1", "}", "raise", "ValueError", "(", "\"The attribute argument for one or more fields collides \"", "\"with another field's name or attribute argument. \"", "\"Check the following field names and \"", "\"attribute arguments: {}\"", ".", "format", "(", "list", "(", "attributes_duplicates", ")", ")", ")", "self", ".", "fields", "=", "fields_dict", "self", ".", "dump_fields", "=", "dump_fields", "self", ".", "load_fields", "=", "load_fields" ]
[ 956, 4 ]
[ 1029, 38 ]
python
en
['en', 'fy', 'en']
True
Schema.on_bind_field
(self, field_name: str, field_obj: ma_fields.Field)
Hook to modify a field when it is bound to the `Schema`. No-op by default.
Hook to modify a field when it is bound to the `Schema`.
def on_bind_field(self, field_name: str, field_obj: ma_fields.Field) -> None: """Hook to modify a field when it is bound to the `Schema`. No-op by default. """ return None
[ "def", "on_bind_field", "(", "self", ",", "field_name", ":", "str", ",", "field_obj", ":", "ma_fields", ".", "Field", ")", "->", "None", ":", "return", "None" ]
[ 1031, 4 ]
[ 1036, 19 ]
python
en
['en', 'en', 'en']
True
Schema._bind_field
(self, field_name: str, field_obj: ma_fields.Field)
Bind field to the schema, setting any necessary attributes on the field (e.g. parent and name). Also set field load_only and dump_only values if field_name was specified in ``class Meta``.
Bind field to the schema, setting any necessary attributes on the field (e.g. parent and name).
def _bind_field(self, field_name: str, field_obj: ma_fields.Field) -> None: """Bind field to the schema, setting any necessary attributes on the field (e.g. parent and name). Also set field load_only and dump_only values if field_name was specified in ``class Meta``. """ if field_name in self.load_only: field_obj.load_only = True if field_name in self.dump_only: field_obj.dump_only = True try: field_obj._bind_to_schema(field_name, self) except TypeError as error: # Field declared as a class, not an instance. Ignore type checking because # we handle unsupported arg types, i.e. this is dead code from # the type checker's perspective. if isinstance(field_obj, type) and issubclass(field_obj, base.FieldABC): msg = ( 'Field for "{}" must be declared as a ' "Field instance, not a class. " 'Did you mean "fields.{}()"?'.format(field_name, field_obj.__name__) ) raise TypeError(msg) from error raise error self.on_bind_field(field_name, field_obj)
[ "def", "_bind_field", "(", "self", ",", "field_name", ":", "str", ",", "field_obj", ":", "ma_fields", ".", "Field", ")", "->", "None", ":", "if", "field_name", "in", "self", ".", "load_only", ":", "field_obj", ".", "load_only", "=", "True", "if", "field_name", "in", "self", ".", "dump_only", ":", "field_obj", ".", "dump_only", "=", "True", "try", ":", "field_obj", ".", "_bind_to_schema", "(", "field_name", ",", "self", ")", "except", "TypeError", "as", "error", ":", "# Field declared as a class, not an instance. Ignore type checking because", "# we handle unsupported arg types, i.e. this is dead code from", "# the type checker's perspective.", "if", "isinstance", "(", "field_obj", ",", "type", ")", "and", "issubclass", "(", "field_obj", ",", "base", ".", "FieldABC", ")", ":", "msg", "=", "(", "'Field for \"{}\" must be declared as a '", "\"Field instance, not a class. \"", "'Did you mean \"fields.{}()\"?'", ".", "format", "(", "field_name", ",", "field_obj", ".", "__name__", ")", ")", "raise", "TypeError", "(", "msg", ")", "from", "error", "raise", "error", "self", ".", "on_bind_field", "(", "field_name", ",", "field_obj", ")" ]
[ 1038, 4 ]
[ 1063, 49 ]
python
en
['en', 'en', 'en']
True
TestcaseManager.insert_execution_data
(self, execution_query_payload)
Inserts a test execution row into the database. Returns the execution guid. "execution_start_time" is defined by milliseconds since the Epoch. (See https://currentmillis.com to convert that to a real date.)
Inserts a test execution row into the database. Returns the execution guid. "execution_start_time" is defined by milliseconds since the Epoch. (See https://currentmillis.com to convert that to a real date.)
def insert_execution_data(self, execution_query_payload): """ Inserts a test execution row into the database. Returns the execution guid. "execution_start_time" is defined by milliseconds since the Epoch. (See https://currentmillis.com to convert that to a real date.) """ query = """INSERT INTO test_execution (guid, execution_start, total_execution_time, username) VALUES (%(guid)s,%(execution_start_time)s, %(total_execution_time)s,%(username)s)""" DatabaseManager(self.database_env).execute_query( query, execution_query_payload.get_params()) return execution_query_payload.guid
[ "def", "insert_execution_data", "(", "self", ",", "execution_query_payload", ")", ":", "query", "=", "\"\"\"INSERT INTO test_execution\n (guid, execution_start, total_execution_time, username)\n VALUES (%(guid)s,%(execution_start_time)s,\n %(total_execution_time)s,%(username)s)\"\"\"", "DatabaseManager", "(", "self", ".", "database_env", ")", ".", "execute_query", "(", "query", ",", "execution_query_payload", ".", "get_params", "(", ")", ")", "return", "execution_query_payload", ".", "guid" ]
[ 8, 4 ]
[ 21, 43 ]
python
en
['en', 'en', 'en']
True
TestcaseManager.update_execution_data
(self, execution_guid, execution_time)
Updates an existing test execution row in the database.
Updates an existing test execution row in the database.
def update_execution_data(self, execution_guid, execution_time): """ Updates an existing test execution row in the database. """ query = """UPDATE test_execution SET total_execution_time=%(execution_time)s WHERE guid=%(execution_guid)s """ DatabaseManager(self.database_env).execute_query( query, {"execution_guid": execution_guid, "execution_time": execution_time})
[ "def", "update_execution_data", "(", "self", ",", "execution_guid", ",", "execution_time", ")", ":", "query", "=", "\"\"\"UPDATE test_execution\n SET total_execution_time=%(execution_time)s\n WHERE guid=%(execution_guid)s \"\"\"", "DatabaseManager", "(", "self", ".", "database_env", ")", ".", "execute_query", "(", "query", ",", "{", "\"execution_guid\"", ":", "execution_guid", ",", "\"execution_time\"", ":", "execution_time", "}", ")" ]
[ 23, 4 ]
[ 31, 47 ]
python
en
['en', 'en', 'en']
True
TestcaseManager.insert_testcase_data
(self, testcase_run_payload)
Inserts all data for the test in the DB. Returns new row guid.
Inserts all data for the test in the DB. Returns new row guid.
def insert_testcase_data(self, testcase_run_payload): """ Inserts all data for the test in the DB. Returns new row guid. """ query = """INSERT INTO test_run_data( guid, browser, state, execution_guid, env, start_time, test_address, runtime, retry_count, message, stack_trace) VALUES ( %(guid)s, %(browser)s, %(state)s, %(execution_guid)s, %(env)s, %(start_time)s, %(test_address)s, %(runtime)s, %(retry_count)s, %(message)s, %(stack_trace)s) """ DatabaseManager(self.database_env).execute_query( query, testcase_run_payload.get_params())
[ "def", "insert_testcase_data", "(", "self", ",", "testcase_run_payload", ")", ":", "query", "=", "\"\"\"INSERT INTO test_run_data(\n guid, browser, state, execution_guid, env, start_time,\n test_address, runtime, retry_count, message, stack_trace)\n VALUES (\n %(guid)s,\n %(browser)s,\n %(state)s,\n %(execution_guid)s,\n %(env)s,\n %(start_time)s,\n %(test_address)s,\n %(runtime)s,\n %(retry_count)s,\n %(message)s,\n %(stack_trace)s) \"\"\"", "DatabaseManager", "(", "self", ".", "database_env", ")", ".", "execute_query", "(", "query", ",", "testcase_run_payload", ".", "get_params", "(", ")", ")" ]
[ 33, 4 ]
[ 51, 53 ]
python
en
['en', 'en', 'en']
True
TestcaseManager.update_testcase_data
(self, testcase_payload)
Updates an existing test run in the database.
Updates an existing test run in the database.
def update_testcase_data(self, testcase_payload): """ Updates an existing test run in the database. """ query = """UPDATE test_run_data SET runtime=%(runtime)s, state=%(state)s, retry_count=%(retry_count)s, stack_trace=%(stack_trace)s, message=%(message)s WHERE guid=%(guid)s """ DatabaseManager(self.database_env).execute_query( query, testcase_payload.get_params())
[ "def", "update_testcase_data", "(", "self", ",", "testcase_payload", ")", ":", "query", "=", "\"\"\"UPDATE test_run_data SET\n runtime=%(runtime)s,\n state=%(state)s,\n retry_count=%(retry_count)s,\n stack_trace=%(stack_trace)s,\n message=%(message)s\n WHERE guid=%(guid)s \"\"\"", "DatabaseManager", "(", "self", ".", "database_env", ")", ".", "execute_query", "(", "query", ",", "testcase_payload", ".", "get_params", "(", ")", ")" ]
[ 53, 4 ]
[ 63, 49 ]
python
en
['en', 'en', 'en']
True
get_keywords
()
Get the keywords needed to look up the version information.
Get the keywords needed to look up the version information.
def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "$Format:%d$" git_full = "$Format:%H$" git_date = "$Format:%ci$" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords
[ "def", "get_keywords", "(", ")", ":", "# these strings will be replaced by git during git-archive.", "# setup.py/versioneer.py will grep for the variable names, so they must", "# each be defined on a line of their own. _version.py will just call", "# get_keywords().", "git_refnames", "=", "\"$Format:%d$\"", "git_full", "=", "\"$Format:%H$\"", "git_date", "=", "\"$Format:%ci$\"", "keywords", "=", "{", "\"refnames\"", ":", "git_refnames", ",", "\"full\"", ":", "git_full", ",", "\"date\"", ":", "git_date", "}", "return", "keywords" ]
[ 18, 0 ]
[ 28, 19 ]
python
en
['en', 'en', 'en']
True
get_config
()
Create, populate and return the VersioneerConfig() object.
Create, populate and return the VersioneerConfig() object.
def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.parentdir_prefix = "great_expectations-" cfg.versionfile_source = "great_expectations/_version.py" cfg.verbose = False return cfg
[ "def", "get_config", "(", ")", ":", "# these strings are filled in when 'setup.py versioneer' creates", "# _version.py", "cfg", "=", "VersioneerConfig", "(", ")", "cfg", ".", "VCS", "=", "\"git\"", "cfg", ".", "style", "=", "\"pep440\"", "cfg", ".", "tag_prefix", "=", "\"\"", "cfg", ".", "parentdir_prefix", "=", "\"great_expectations-\"", "cfg", ".", "versionfile_source", "=", "\"great_expectations/_version.py\"", "cfg", ".", "verbose", "=", "False", "return", "cfg" ]
[ 35, 0 ]
[ 46, 14 ]
python
en
['en', 'en', 'en']
True
register_vcs_handler
(vcs, method)
Decorator to mark a method as the handler for a particular VCS.
Decorator to mark a method as the handler for a particular VCS.
def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate
[ "def", "register_vcs_handler", "(", "vcs", ",", "method", ")", ":", "# decorator", "def", "decorate", "(", "f", ")", ":", "\"\"\"Store f in HANDLERS[vcs][method].\"\"\"", "if", "vcs", "not", "in", "HANDLERS", ":", "HANDLERS", "[", "vcs", "]", "=", "{", "}", "HANDLERS", "[", "vcs", "]", "[", "method", "]", "=", "f", "return", "f", "return", "decorate" ]
[ 57, 0 ]
[ 67, 19 ]
python
en
['en', 'en', 'en']
True
run_command
(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None)
Call the given command(s).
Call the given command(s).
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen( [c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), ) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried {}".format(commands)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode
[ "def", "run_command", "(", "commands", ",", "args", ",", "cwd", "=", "None", ",", "verbose", "=", "False", ",", "hide_stderr", "=", "False", ",", "env", "=", "None", ")", ":", "assert", "isinstance", "(", "commands", ",", "list", ")", "for", "c", "in", "commands", ":", "try", ":", "dispcmd", "=", "str", "(", "[", "c", "]", "+", "args", ")", "# remember shell=False, so use git.cmd on windows, not just git", "p", "=", "subprocess", ".", "Popen", "(", "[", "c", "]", "+", "args", ",", "cwd", "=", "cwd", ",", "env", "=", "env", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "(", "subprocess", ".", "PIPE", "if", "hide_stderr", "else", "None", ")", ",", ")", "break", "except", "OSError", ":", "e", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "continue", "if", "verbose", ":", "print", "(", "\"unable to run %s\"", "%", "dispcmd", ")", "print", "(", "e", ")", "return", "None", ",", "None", "else", ":", "if", "verbose", ":", "print", "(", "\"unable to find command, tried {}\"", ".", "format", "(", "commands", ")", ")", "return", "None", ",", "None", "stdout", "=", "p", ".", "communicate", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "sys", ".", "version_info", "[", "0", "]", ">=", "3", ":", "stdout", "=", "stdout", ".", "decode", "(", ")", "if", "p", ".", "returncode", "!=", "0", ":", "if", "verbose", ":", "print", "(", "\"unable to run %s (error)\"", "%", "dispcmd", ")", "print", "(", "\"stdout was %s\"", "%", "stdout", ")", "return", "None", ",", "p", ".", "returncode", "return", "stdout", ",", "p", ".", "returncode" ]
[ 70, 0 ]
[ 106, 31 ]
python
en
['en', 'en', 'en']
True
versions_from_parentdir
(parentdir_prefix, root, verbose)
Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory
Try to determine the version from the parent directory name.
def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None, } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print( "Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix) ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
[ "def", "versions_from_parentdir", "(", "parentdir_prefix", ",", "root", ",", "verbose", ")", ":", "rootdirs", "=", "[", "]", "for", "i", "in", "range", "(", "3", ")", ":", "dirname", "=", "os", ".", "path", ".", "basename", "(", "root", ")", "if", "dirname", ".", "startswith", "(", "parentdir_prefix", ")", ":", "return", "{", "\"version\"", ":", "dirname", "[", "len", "(", "parentdir_prefix", ")", ":", "]", ",", "\"full-revisionid\"", ":", "None", ",", "\"dirty\"", ":", "False", ",", "\"error\"", ":", "None", ",", "\"date\"", ":", "None", ",", "}", "else", ":", "rootdirs", ".", "append", "(", "root", ")", "root", "=", "os", ".", "path", ".", "dirname", "(", "root", ")", "# up a level", "if", "verbose", ":", "print", "(", "\"Tried directories %s but none started with prefix %s\"", "%", "(", "str", "(", "rootdirs", ")", ",", "parentdir_prefix", ")", ")", "raise", "NotThisMethod", "(", "\"rootdir doesn't start with parentdir_prefix\"", ")" ]
[ 109, 0 ]
[ 137, 70 ]
python
en
['en', 'en', 'en']
True
git_get_keywords
(versionfile_abs)
Extract version information from the given file.
Extract version information from the given file.
def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs) for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except OSError: pass return keywords
[ "def", "git_get_keywords", "(", "versionfile_abs", ")", ":", "# the code embedded in _version.py can just fetch the value of these", "# keywords. When used from setup.py, we don't want to import _version.py,", "# so we do it with a regexp instead. This function is not used from", "# _version.py.", "keywords", "=", "{", "}", "try", ":", "f", "=", "open", "(", "versionfile_abs", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"git_refnames =\"", ")", ":", "mo", "=", "re", ".", "search", "(", "r'=\\s*\"(.*)\"'", ",", "line", ")", "if", "mo", ":", "keywords", "[", "\"refnames\"", "]", "=", "mo", ".", "group", "(", "1", ")", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"git_full =\"", ")", ":", "mo", "=", "re", ".", "search", "(", "r'=\\s*\"(.*)\"'", ",", "line", ")", "if", "mo", ":", "keywords", "[", "\"full\"", "]", "=", "mo", ".", "group", "(", "1", ")", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"git_date =\"", ")", ":", "mo", "=", "re", ".", "search", "(", "r'=\\s*\"(.*)\"'", ",", "line", ")", "if", "mo", ":", "keywords", "[", "\"date\"", "]", "=", "mo", ".", "group", "(", "1", ")", "f", ".", "close", "(", ")", "except", "OSError", ":", "pass", "return", "keywords" ]
[ 141, 0 ]
[ 166, 19 ]
python
en
['en', 'en', 'en']
True
git_versions_from_keywords
(keywords, tag_prefix, verbose)
Get version information from git keywords.
Get version information from git keywords.
def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date, } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None, }
[ "def", "git_versions_from_keywords", "(", "keywords", ",", "tag_prefix", ",", "verbose", ")", ":", "if", "not", "keywords", ":", "raise", "NotThisMethod", "(", "\"no keywords at all, weird\"", ")", "date", "=", "keywords", ".", "get", "(", "\"date\"", ")", "if", "date", "is", "not", "None", ":", "# git-2.2.0 added \"%cI\", which expands to an ISO-8601 -compliant", "# datestamp. However we prefer \"%ci\" (which expands to an \"ISO-8601", "# -like\" string, which we must then edit to make compliant), because", "# it's been around since git-1.5.3, and it's too difficult to", "# discover which version we're using, or to work around using an", "# older one.", "date", "=", "date", ".", "strip", "(", ")", ".", "replace", "(", "\" \"", ",", "\"T\"", ",", "1", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ",", "1", ")", "refnames", "=", "keywords", "[", "\"refnames\"", "]", ".", "strip", "(", ")", "if", "refnames", ".", "startswith", "(", "\"$Format\"", ")", ":", "if", "verbose", ":", "print", "(", "\"keywords are unexpanded, not using\"", ")", "raise", "NotThisMethod", "(", "\"unexpanded keywords, not a git-archive tarball\"", ")", "refs", "=", "{", "r", ".", "strip", "(", ")", "for", "r", "in", "refnames", ".", "strip", "(", "\"()\"", ")", ".", "split", "(", "\",\"", ")", "}", "# starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of", "# just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.", "TAG", "=", "\"tag: \"", "tags", "=", "{", "r", "[", "len", "(", "TAG", ")", ":", "]", "for", "r", "in", "refs", "if", "r", ".", "startswith", "(", "TAG", ")", "}", "if", "not", "tags", ":", "# Either we're using git < 1.8.3, or there really are no tags. We use", "# a heuristic: assume all version tags have a digit. The old git %d", "# expansion behaves like git log --decorate=short and strips out the", "# refs/heads/ and refs/tags/ prefixes that would let us distinguish", "# between branches and tags. By ignoring refnames without digits, we", "# filter out many common branch names like \"release\" and", "# \"stabilization\", as well as \"HEAD\" and \"master\".", "tags", "=", "{", "r", "for", "r", "in", "refs", "if", "re", ".", "search", "(", "r\"\\d\"", ",", "r", ")", "}", "if", "verbose", ":", "print", "(", "\"discarding '%s', no digits\"", "%", "\",\"", ".", "join", "(", "refs", "-", "tags", ")", ")", "if", "verbose", ":", "print", "(", "\"likely tags: %s\"", "%", "\",\"", ".", "join", "(", "sorted", "(", "tags", ")", ")", ")", "for", "ref", "in", "sorted", "(", "tags", ")", ":", "# sorting will prefer e.g. \"2.0\" over \"2.0rc1\"", "if", "ref", ".", "startswith", "(", "tag_prefix", ")", ":", "r", "=", "ref", "[", "len", "(", "tag_prefix", ")", ":", "]", "if", "verbose", ":", "print", "(", "\"picking %s\"", "%", "r", ")", "return", "{", "\"version\"", ":", "r", ",", "\"full-revisionid\"", ":", "keywords", "[", "\"full\"", "]", ".", "strip", "(", ")", ",", "\"dirty\"", ":", "False", ",", "\"error\"", ":", "None", ",", "\"date\"", ":", "date", ",", "}", "# no suitable tags, so version is \"0+unknown\", but full hex is still there", "if", "verbose", ":", "print", "(", "\"no suitable tags, using unknown + full revision id\"", ")", "return", "{", "\"version\"", ":", "\"0+unknown\"", ",", "\"full-revisionid\"", ":", "keywords", "[", "\"full\"", "]", ".", "strip", "(", ")", ",", "\"dirty\"", ":", "False", ",", "\"error\"", ":", "\"no suitable tags\"", ",", "\"date\"", ":", "None", ",", "}" ]
[ 170, 0 ]
[ 228, 5 ]
python
en
['en', 'da', 'en']
True
git_pieces_from_vcs
(tag_prefix, root, verbose, run_command=run_command)
Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree.
Get version from 'git describe' in the root of the source tree.
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command( GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix, ], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ 0 ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces
[ "def", "git_pieces_from_vcs", "(", "tag_prefix", ",", "root", ",", "verbose", ",", "run_command", "=", "run_command", ")", ":", "GITS", "=", "[", "\"git\"", "]", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "GITS", "=", "[", "\"git.cmd\"", ",", "\"git.exe\"", "]", "out", ",", "rc", "=", "run_command", "(", "GITS", ",", "[", "\"rev-parse\"", ",", "\"--git-dir\"", "]", ",", "cwd", "=", "root", ",", "hide_stderr", "=", "True", ")", "if", "rc", "!=", "0", ":", "if", "verbose", ":", "print", "(", "\"Directory %s not under git control\"", "%", "root", ")", "raise", "NotThisMethod", "(", "\"'git rev-parse --git-dir' returned error\"", ")", "# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]", "# if there isn't one, this yields HEX[-dirty] (no NUM)", "describe_out", ",", "rc", "=", "run_command", "(", "GITS", ",", "[", "\"describe\"", ",", "\"--tags\"", ",", "\"--dirty\"", ",", "\"--always\"", ",", "\"--long\"", ",", "\"--match\"", ",", "\"%s*\"", "%", "tag_prefix", ",", "]", ",", "cwd", "=", "root", ",", ")", "# --long was added in git-1.5.5", "if", "describe_out", "is", "None", ":", "raise", "NotThisMethod", "(", "\"'git describe' failed\"", ")", "describe_out", "=", "describe_out", ".", "strip", "(", ")", "full_out", ",", "rc", "=", "run_command", "(", "GITS", ",", "[", "\"rev-parse\"", ",", "\"HEAD\"", "]", ",", "cwd", "=", "root", ")", "if", "full_out", "is", "None", ":", "raise", "NotThisMethod", "(", "\"'git rev-parse' failed\"", ")", "full_out", "=", "full_out", ".", "strip", "(", ")", "pieces", "=", "{", "}", "pieces", "[", "\"long\"", "]", "=", "full_out", "pieces", "[", "\"short\"", "]", "=", "full_out", "[", ":", "7", "]", "# maybe improved later", "pieces", "[", "\"error\"", "]", "=", "None", "# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]", "# TAG might have hyphens.", "git_describe", "=", "describe_out", "# look for -dirty suffix", "dirty", "=", "git_describe", ".", "endswith", "(", "\"-dirty\"", ")", "pieces", "[", "\"dirty\"", "]", "=", "dirty", "if", "dirty", ":", "git_describe", "=", "git_describe", "[", ":", "git_describe", ".", "rindex", "(", "\"-dirty\"", ")", "]", "# now we have TAG-NUM-gHEX or HEX", "if", "\"-\"", "in", "git_describe", ":", "# TAG-NUM-gHEX", "mo", "=", "re", ".", "search", "(", "r\"^(.+)-(\\d+)-g([0-9a-f]+)$\"", ",", "git_describe", ")", "if", "not", "mo", ":", "# unparsable. Maybe git-describe is misbehaving?", "pieces", "[", "\"error\"", "]", "=", "\"unable to parse git-describe output: '%s'\"", "%", "describe_out", "return", "pieces", "# tag", "full_tag", "=", "mo", ".", "group", "(", "1", ")", "if", "not", "full_tag", ".", "startswith", "(", "tag_prefix", ")", ":", "if", "verbose", ":", "fmt", "=", "\"tag '%s' doesn't start with prefix '%s'\"", "print", "(", "fmt", "%", "(", "full_tag", ",", "tag_prefix", ")", ")", "pieces", "[", "\"error\"", "]", "=", "\"tag '{}' doesn't start with prefix '{}'\"", ".", "format", "(", "full_tag", ",", "tag_prefix", ",", ")", "return", "pieces", "pieces", "[", "\"closest-tag\"", "]", "=", "full_tag", "[", "len", "(", "tag_prefix", ")", ":", "]", "# distance: number of commits since tag", "pieces", "[", "\"distance\"", "]", "=", "int", "(", "mo", ".", "group", "(", "2", ")", ")", "# commit: short hex revision ID", "pieces", "[", "\"short\"", "]", "=", "mo", ".", "group", "(", "3", ")", "else", ":", "# HEX: no tags", "pieces", "[", "\"closest-tag\"", "]", "=", "None", "count_out", ",", "rc", "=", "run_command", "(", "GITS", ",", "[", "\"rev-list\"", ",", "\"HEAD\"", ",", "\"--count\"", "]", ",", "cwd", "=", "root", ")", "pieces", "[", "\"distance\"", "]", "=", "int", "(", "count_out", ")", "# total number of commits", "# commit date: see ISO-8601 comment in git_versions_from_keywords()", "date", "=", "run_command", "(", "GITS", ",", "[", "\"show\"", ",", "\"-s\"", ",", "\"--format=%ci\"", ",", "\"HEAD\"", "]", ",", "cwd", "=", "root", ")", "[", "0", "]", ".", "strip", "(", ")", "pieces", "[", "\"date\"", "]", "=", "date", ".", "strip", "(", ")", ".", "replace", "(", "\" \"", ",", "\"T\"", ",", "1", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ",", "1", ")", "return", "pieces" ]
[ 232, 0 ]
[ 329, 17 ]
python
en
['en', 'en', 'en']
True
plus_or_dot
(pieces)
Return a + if we don't already have one, else return a .
Return a + if we don't already have one, else return a .
def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+"
[ "def", "plus_or_dot", "(", "pieces", ")", ":", "if", "\"+\"", "in", "pieces", ".", "get", "(", "\"closest-tag\"", ",", "\"\"", ")", ":", "return", "\".\"", "return", "\"+\"" ]
[ 332, 0 ]
[ 336, 14 ]
python
en
['en', 'en', 'en']
True
render_pep440
(pieces)
Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
Build up version string, with post-release "local version identifier".
def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered
[ "def", "render_pep440", "(", "pieces", ")", ":", "if", "pieces", "[", "\"closest-tag\"", "]", ":", "rendered", "=", "pieces", "[", "\"closest-tag\"", "]", "if", "pieces", "[", "\"distance\"", "]", "or", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "plus_or_dot", "(", "pieces", ")", "rendered", "+=", "\"%d.g%s\"", "%", "(", "pieces", "[", "\"distance\"", "]", ",", "pieces", "[", "\"short\"", "]", ")", "if", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\".dirty\"", "else", ":", "# exception #1", "rendered", "=", "\"0+untagged.%d.g%s\"", "%", "(", "pieces", "[", "\"distance\"", "]", ",", "pieces", "[", "\"short\"", "]", ")", "if", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\".dirty\"", "return", "rendered" ]
[ 339, 0 ]
[ 360, 19 ]
python
en
['en', 'en', 'en']
True
render_pep440_pre
(pieces)
TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE
TAG[.post.devDISTANCE] -- No -dirty.
def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered
[ "def", "render_pep440_pre", "(", "pieces", ")", ":", "if", "pieces", "[", "\"closest-tag\"", "]", ":", "rendered", "=", "pieces", "[", "\"closest-tag\"", "]", "if", "pieces", "[", "\"distance\"", "]", ":", "rendered", "+=", "\".post.dev%d\"", "%", "pieces", "[", "\"distance\"", "]", "else", ":", "# exception #1", "rendered", "=", "\"0.post.dev%d\"", "%", "pieces", "[", "\"distance\"", "]", "return", "rendered" ]
[ 363, 0 ]
[ 376, 19 ]
python
en
['en', 'en', 'pt']
True
render_pep440_post
(pieces)
TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]
TAG[.postDISTANCE[.dev0]+gHEX] .
def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered
[ "def", "render_pep440_post", "(", "pieces", ")", ":", "if", "pieces", "[", "\"closest-tag\"", "]", ":", "rendered", "=", "pieces", "[", "\"closest-tag\"", "]", "if", "pieces", "[", "\"distance\"", "]", "or", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\".post%d\"", "%", "pieces", "[", "\"distance\"", "]", "if", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\".dev0\"", "rendered", "+=", "plus_or_dot", "(", "pieces", ")", "rendered", "+=", "\"g%s\"", "%", "pieces", "[", "\"short\"", "]", "else", ":", "# exception #1", "rendered", "=", "\"0.post%d\"", "%", "pieces", "[", "\"distance\"", "]", "if", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\".dev0\"", "rendered", "+=", "\"+g%s\"", "%", "pieces", "[", "\"short\"", "]", "return", "rendered" ]
[ 379, 0 ]
[ 403, 19 ]
python
cy
['en', 'cy', 'hi']
False
render_pep440_old
(pieces)
TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0]
TAG[.postDISTANCE[.dev0]] .
def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered
[ "def", "render_pep440_old", "(", "pieces", ")", ":", "if", "pieces", "[", "\"closest-tag\"", "]", ":", "rendered", "=", "pieces", "[", "\"closest-tag\"", "]", "if", "pieces", "[", "\"distance\"", "]", "or", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\".post%d\"", "%", "pieces", "[", "\"distance\"", "]", "if", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\".dev0\"", "else", ":", "# exception #1", "rendered", "=", "\"0.post%d\"", "%", "pieces", "[", "\"distance\"", "]", "if", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\".dev0\"", "return", "rendered" ]
[ 406, 0 ]
[ 425, 19 ]
python
en
['en', 'mt', 'hi']
False
render_git_describe
(pieces)
TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix)
TAG[-DISTANCE-gHEX][-dirty].
def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered
[ "def", "render_git_describe", "(", "pieces", ")", ":", "if", "pieces", "[", "\"closest-tag\"", "]", ":", "rendered", "=", "pieces", "[", "\"closest-tag\"", "]", "if", "pieces", "[", "\"distance\"", "]", ":", "rendered", "+=", "\"-%d-g%s\"", "%", "(", "pieces", "[", "\"distance\"", "]", ",", "pieces", "[", "\"short\"", "]", ")", "else", ":", "# exception #1", "rendered", "=", "pieces", "[", "\"short\"", "]", "if", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\"-dirty\"", "return", "rendered" ]
[ 428, 0 ]
[ 445, 19 ]
python
en
['en', 'en', 'en']
False
render_git_describe_long
(pieces)
TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix)
TAG-DISTANCE-gHEX[-dirty].
def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered
[ "def", "render_git_describe_long", "(", "pieces", ")", ":", "if", "pieces", "[", "\"closest-tag\"", "]", ":", "rendered", "=", "pieces", "[", "\"closest-tag\"", "]", "rendered", "+=", "\"-%d-g%s\"", "%", "(", "pieces", "[", "\"distance\"", "]", ",", "pieces", "[", "\"short\"", "]", ")", "else", ":", "# exception #1", "rendered", "=", "pieces", "[", "\"short\"", "]", "if", "pieces", "[", "\"dirty\"", "]", ":", "rendered", "+=", "\"-dirty\"", "return", "rendered" ]
[ 448, 0 ]
[ 465, 19 ]
python
en
['en', 'en', 'pt']
False
render
(pieces, style)
Render the given version pieces into the requested style.
Render the given version pieces into the requested style.
def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), }
[ "def", "render", "(", "pieces", ",", "style", ")", ":", "if", "pieces", "[", "\"error\"", "]", ":", "return", "{", "\"version\"", ":", "\"unknown\"", ",", "\"full-revisionid\"", ":", "pieces", ".", "get", "(", "\"long\"", ")", ",", "\"dirty\"", ":", "None", ",", "\"error\"", ":", "pieces", "[", "\"error\"", "]", ",", "\"date\"", ":", "None", ",", "}", "if", "not", "style", "or", "style", "==", "\"default\"", ":", "style", "=", "\"pep440\"", "# the default", "if", "style", "==", "\"pep440\"", ":", "rendered", "=", "render_pep440", "(", "pieces", ")", "elif", "style", "==", "\"pep440-pre\"", ":", "rendered", "=", "render_pep440_pre", "(", "pieces", ")", "elif", "style", "==", "\"pep440-post\"", ":", "rendered", "=", "render_pep440_post", "(", "pieces", ")", "elif", "style", "==", "\"pep440-old\"", ":", "rendered", "=", "render_pep440_old", "(", "pieces", ")", "elif", "style", "==", "\"git-describe\"", ":", "rendered", "=", "render_git_describe", "(", "pieces", ")", "elif", "style", "==", "\"git-describe-long\"", ":", "rendered", "=", "render_git_describe_long", "(", "pieces", ")", "else", ":", "raise", "ValueError", "(", "\"unknown style '%s'\"", "%", "style", ")", "return", "{", "\"version\"", ":", "rendered", ",", "\"full-revisionid\"", ":", "pieces", "[", "\"long\"", "]", ",", "\"dirty\"", ":", "pieces", "[", "\"dirty\"", "]", ",", "\"error\"", ":", "None", ",", "\"date\"", ":", "pieces", ".", "get", "(", "\"date\"", ")", ",", "}" ]
[ 468, 0 ]
[ 503, 5 ]
python
en
['en', 'en', 'en']
True
get_versions
()
Get version information or return default if unable to do so.
Get version information or return default if unable to do so.
def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None, } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, }
[ "def", "get_versions", "(", ")", ":", "# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have", "# __file__, we can work backwards from there to the root. Some", "# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which", "# case we can only use expanded keywords.", "cfg", "=", "get_config", "(", ")", "verbose", "=", "cfg", ".", "verbose", "try", ":", "return", "git_versions_from_keywords", "(", "get_keywords", "(", ")", ",", "cfg", ".", "tag_prefix", ",", "verbose", ")", "except", "NotThisMethod", ":", "pass", "try", ":", "root", "=", "os", ".", "path", ".", "realpath", "(", "__file__", ")", "# versionfile_source is the relative path from the top of the source", "# tree (where the .git directory might live) to this file. Invert", "# this to find the root from __file__.", "for", "_", "in", "cfg", ".", "versionfile_source", ".", "split", "(", "\"/\"", ")", ":", "root", "=", "os", ".", "path", ".", "dirname", "(", "root", ")", "except", "NameError", ":", "return", "{", "\"version\"", ":", "\"0+unknown\"", ",", "\"full-revisionid\"", ":", "None", ",", "\"dirty\"", ":", "None", ",", "\"error\"", ":", "\"unable to find root of source tree\"", ",", "\"date\"", ":", "None", ",", "}", "try", ":", "pieces", "=", "git_pieces_from_vcs", "(", "cfg", ".", "tag_prefix", ",", "root", ",", "verbose", ")", "return", "render", "(", "pieces", ",", "cfg", ".", "style", ")", "except", "NotThisMethod", ":", "pass", "try", ":", "if", "cfg", ".", "parentdir_prefix", ":", "return", "versions_from_parentdir", "(", "cfg", ".", "parentdir_prefix", ",", "root", ",", "verbose", ")", "except", "NotThisMethod", ":", "pass", "return", "{", "\"version\"", ":", "\"0+unknown\"", ",", "\"full-revisionid\"", ":", "None", ",", "\"dirty\"", ":", "None", ",", "\"error\"", ":", "\"unable to compute version\"", ",", "\"date\"", ":", "None", ",", "}" ]
[ 506, 0 ]
[ 555, 5 ]
python
en
['it', 'en', 'en']
True
clean_text
(start_token, end_token, doc_tokens, doc_bytes, ignore_final_whitespace=True)
Remove HTML tags from a text span and reconstruct proper spacing.
Remove HTML tags from a text span and reconstruct proper spacing.
def clean_text(start_token, end_token, doc_tokens, doc_bytes, ignore_final_whitespace=True): """Remove HTML tags from a text span and reconstruct proper spacing.""" text = "" for index in range(start_token, end_token): token = doc_tokens[index] if token["html_token"]: continue text += token["token"] # Add a single space between two tokens iff there is at least one # whitespace character between them (outside of an HTML tag). For example: # # token1 token2 ==> Add space. # token1</B> <B>token2 ==> Add space. # token1</A>token2 ==> No space. # token1<A href="..." title="...">token2 ==> No space. # token1<SUP>2</SUP>token2 ==> No space. next_token = token last_index = end_token if ignore_final_whitespace else end_token + 1 for next_token in doc_tokens[index + 1:last_index]: if not next_token["html_token"]: break chars = (doc_bytes[token["end_byte"]:next_token["start_byte"]] .decode("utf-8")) # Since some HTML tags are missing from the token list, we count '<' and # '>' to detect if we're inside a tag. unclosed_brackets = 0 for char in chars: if char == "<": unclosed_brackets += 1 elif char == ">": unclosed_brackets -= 1 elif unclosed_brackets == 0 and re.match(r"\s", char): # Add a single space after this token. text += " " break return text
[ "def", "clean_text", "(", "start_token", ",", "end_token", ",", "doc_tokens", ",", "doc_bytes", ",", "ignore_final_whitespace", "=", "True", ")", ":", "text", "=", "\"\"", "for", "index", "in", "range", "(", "start_token", ",", "end_token", ")", ":", "token", "=", "doc_tokens", "[", "index", "]", "if", "token", "[", "\"html_token\"", "]", ":", "continue", "text", "+=", "token", "[", "\"token\"", "]", "# Add a single space between two tokens iff there is at least one", "# whitespace character between them (outside of an HTML tag). For example:", "#", "# token1 token2 ==> Add space.", "# token1</B> <B>token2 ==> Add space.", "# token1</A>token2 ==> No space.", "# token1<A href=\"...\" title=\"...\">token2 ==> No space.", "# token1<SUP>2</SUP>token2 ==> No space.", "next_token", "=", "token", "last_index", "=", "end_token", "if", "ignore_final_whitespace", "else", "end_token", "+", "1", "for", "next_token", "in", "doc_tokens", "[", "index", "+", "1", ":", "last_index", "]", ":", "if", "not", "next_token", "[", "\"html_token\"", "]", ":", "break", "chars", "=", "(", "doc_bytes", "[", "token", "[", "\"end_byte\"", "]", ":", "next_token", "[", "\"start_byte\"", "]", "]", ".", "decode", "(", "\"utf-8\"", ")", ")", "# Since some HTML tags are missing from the token list, we count '<' and", "# '>' to detect if we're inside a tag.", "unclosed_brackets", "=", "0", "for", "char", "in", "chars", ":", "if", "char", "==", "\"<\"", ":", "unclosed_brackets", "+=", "1", "elif", "char", "==", "\">\"", ":", "unclosed_brackets", "-=", "1", "elif", "unclosed_brackets", "==", "0", "and", "re", ".", "match", "(", "r\"\\s\"", ",", "char", ")", ":", "# Add a single space after this token.", "text", "+=", "\" \"", "break", "return", "text" ]
[ 72, 0 ]
[ 108, 13 ]
python
en
['en', 'en', 'en']
True
reduce_annotations
(anno_types, answers)
In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers, depending on which is more numerous, with a bias towards picking short_answers. Note: By this stage, all long_answer annotations and all samples with yes/no answer have been removed. This leaves just no_answer and short_answers
In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers, depending on which is more numerous, with a bias towards picking short_answers.
def reduce_annotations(anno_types, answers): """ In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers, depending on which is more numerous, with a bias towards picking short_answers. Note: By this stage, all long_answer annotations and all samples with yes/no answer have been removed. This leaves just no_answer and short_answers""" for at in set(anno_types): assert at in ("no_answer", "short_answer") if anno_types.count("short_answer") >= anno_types.count("no_answer"): majority = "short_answer" is_impossible = False else: majority = "no_answer" is_impossible = True answers = [a for at, a in zip(anno_types, answers) if at == majority] reduction = len(anno_types) - len(answers) assert reduction < 3 if not is_impossible: global n_no_ans n_no_ans += reduction else: global n_short n_short += reduction answers = [] return answers, is_impossible
[ "def", "reduce_annotations", "(", "anno_types", ",", "answers", ")", ":", "for", "at", "in", "set", "(", "anno_types", ")", ":", "assert", "at", "in", "(", "\"no_answer\"", ",", "\"short_answer\"", ")", "if", "anno_types", ".", "count", "(", "\"short_answer\"", ")", ">=", "anno_types", ".", "count", "(", "\"no_answer\"", ")", ":", "majority", "=", "\"short_answer\"", "is_impossible", "=", "False", "else", ":", "majority", "=", "\"no_answer\"", "is_impossible", "=", "True", "answers", "=", "[", "a", "for", "at", ",", "a", "in", "zip", "(", "anno_types", ",", "answers", ")", "if", "at", "==", "majority", "]", "reduction", "=", "len", "(", "anno_types", ")", "-", "len", "(", "answers", ")", "assert", "reduction", "<", "3", "if", "not", "is_impossible", ":", "global", "n_no_ans", "n_no_ans", "+=", "reduction", "else", ":", "global", "n_short", "n_short", "+=", "reduction", "answers", "=", "[", "]", "return", "answers", ",", "is_impossible" ]
[ 127, 0 ]
[ 152, 31 ]
python
en
['en', 'error', 'th']
False
nq_to_squad
(record)
Convert a Natural Questions record to SQuAD format.
Convert a Natural Questions record to SQuAD format.
def nq_to_squad(record): """Convert a Natural Questions record to SQuAD format.""" doc_bytes = record["document_html"].encode("utf-8") doc_tokens = record["document_tokens"] question_text = record["question_text"] question_text = question_text[0].upper() + question_text[1:] + "?" answers = [] anno_types = [] for annotation in record["annotations"]: anno_type = get_anno_type(annotation) long_answer = annotation["long_answer"] short_answers = annotation["short_answers"] if anno_type.lower() in ["yes", "no"]: global n_yn n_yn += 1 return # Skip examples that don't have exactly one short answer. # Note: Consider including multi-span short answers. if anno_type == "multi_short": global n_ms n_ms += 1 return elif anno_type == "short_answer": short_answer = short_answers[0] # Skip examples corresponding to HTML blocks other than <P>. long_answer_html_tag = doc_tokens[long_answer["start_token"]]["token"] if long_answer_html_tag != "<P>": global n_non_p n_non_p += 1 return answer = clean_text( short_answer["start_token"], short_answer["end_token"], doc_tokens, doc_bytes) before_answer = clean_text( 0, short_answer["start_token"], doc_tokens, doc_bytes, ignore_final_whitespace=False) elif anno_type == "no_answer": answer = "" before_answer = "" # Throw out long answer annotations elif anno_type == "long_answer": global n_long_ans n_long_ans += 1 continue anno_types.append(anno_type) answer = {"answer_start": len(before_answer), "text": answer} answers.append(answer) if len(answers) == 0: global n_long_ans_only n_long_ans_only += 1 return answers, is_impossible = reduce_annotations(anno_types, answers) paragraph = clean_text( 0, len(doc_tokens), doc_tokens, doc_bytes) return {"title": record["document_title"], "paragraphs": [{"context": paragraph, "qas": [{"answers": answers, "id": record["example_id"], "question": question_text, "is_impossible": is_impossible}]}]}
[ "def", "nq_to_squad", "(", "record", ")", ":", "doc_bytes", "=", "record", "[", "\"document_html\"", "]", ".", "encode", "(", "\"utf-8\"", ")", "doc_tokens", "=", "record", "[", "\"document_tokens\"", "]", "question_text", "=", "record", "[", "\"question_text\"", "]", "question_text", "=", "question_text", "[", "0", "]", ".", "upper", "(", ")", "+", "question_text", "[", "1", ":", "]", "+", "\"?\"", "answers", "=", "[", "]", "anno_types", "=", "[", "]", "for", "annotation", "in", "record", "[", "\"annotations\"", "]", ":", "anno_type", "=", "get_anno_type", "(", "annotation", ")", "long_answer", "=", "annotation", "[", "\"long_answer\"", "]", "short_answers", "=", "annotation", "[", "\"short_answers\"", "]", "if", "anno_type", ".", "lower", "(", ")", "in", "[", "\"yes\"", ",", "\"no\"", "]", ":", "global", "n_yn", "n_yn", "+=", "1", "return", "# Skip examples that don't have exactly one short answer.", "# Note: Consider including multi-span short answers.", "if", "anno_type", "==", "\"multi_short\"", ":", "global", "n_ms", "n_ms", "+=", "1", "return", "elif", "anno_type", "==", "\"short_answer\"", ":", "short_answer", "=", "short_answers", "[", "0", "]", "# Skip examples corresponding to HTML blocks other than <P>.", "long_answer_html_tag", "=", "doc_tokens", "[", "long_answer", "[", "\"start_token\"", "]", "]", "[", "\"token\"", "]", "if", "long_answer_html_tag", "!=", "\"<P>\"", ":", "global", "n_non_p", "n_non_p", "+=", "1", "return", "answer", "=", "clean_text", "(", "short_answer", "[", "\"start_token\"", "]", ",", "short_answer", "[", "\"end_token\"", "]", ",", "doc_tokens", ",", "doc_bytes", ")", "before_answer", "=", "clean_text", "(", "0", ",", "short_answer", "[", "\"start_token\"", "]", ",", "doc_tokens", ",", "doc_bytes", ",", "ignore_final_whitespace", "=", "False", ")", "elif", "anno_type", "==", "\"no_answer\"", ":", "answer", "=", "\"\"", "before_answer", "=", "\"\"", "# Throw out long answer annotations", "elif", "anno_type", "==", "\"long_answer\"", ":", "global", "n_long_ans", "n_long_ans", "+=", "1", "continue", "anno_types", ".", "append", "(", "anno_type", ")", "answer", "=", "{", "\"answer_start\"", ":", "len", "(", "before_answer", ")", ",", "\"text\"", ":", "answer", "}", "answers", ".", "append", "(", "answer", ")", "if", "len", "(", "answers", ")", "==", "0", ":", "global", "n_long_ans_only", "n_long_ans_only", "+=", "1", "return", "answers", ",", "is_impossible", "=", "reduce_annotations", "(", "anno_types", ",", "answers", ")", "paragraph", "=", "clean_text", "(", "0", ",", "len", "(", "doc_tokens", ")", ",", "doc_tokens", ",", "doc_bytes", ")", "return", "{", "\"title\"", ":", "record", "[", "\"document_title\"", "]", ",", "\"paragraphs\"", ":", "[", "{", "\"context\"", ":", "paragraph", ",", "\"qas\"", ":", "[", "{", "\"answers\"", ":", "answers", ",", "\"id\"", ":", "record", "[", "\"example_id\"", "]", ",", "\"question\"", ":", "question_text", ",", "\"is_impossible\"", ":", "is_impossible", "}", "]", "}", "]", "}" ]
[ 156, 0 ]
[ 231, 60 ]
python
en
['en', 'en', 'en']
True
is_main_thread
()
Attempt to reliably check if we are in the main thread.
Attempt to reliably check if we are in the main thread.
def is_main_thread(): """Attempt to reliably check if we are in the main thread.""" try: signal.signal(signal.SIGINT, signal.getsignal(signal.SIGINT)) return True except ValueError: return False
[ "def", "is_main_thread", "(", ")", ":", "try", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "getsignal", "(", "signal", ".", "SIGINT", ")", ")", "return", "True", "except", "ValueError", ":", "return", "False" ]
[ 77, 0 ]
[ 83, 20 ]
python
en
['en', 'en', 'en']
True
async_wraps
(cls, wrapped_cls, attr_name)
Similar to wraps, but for async wrappers of non-async functions.
Similar to wraps, but for async wrappers of non-async functions.
def async_wraps(cls, wrapped_cls, attr_name): """Similar to wraps, but for async wrappers of non-async functions.""" def decorator(func): func.__name__ = attr_name func.__qualname__ = ".".join((cls.__qualname__, attr_name)) func.__doc__ = """Like :meth:`~{}.{}.{}`, but async. """.format( wrapped_cls.__module__, wrapped_cls.__qualname__, attr_name ) return func return decorator
[ "def", "async_wraps", "(", "cls", ",", "wrapped_cls", ",", "attr_name", ")", ":", "def", "decorator", "(", "func", ")", ":", "func", ".", "__name__", "=", "attr_name", "func", ".", "__qualname__", "=", "\".\"", ".", "join", "(", "(", "cls", ".", "__qualname__", ",", "attr_name", ")", ")", "func", ".", "__doc__", "=", "\"\"\"Like :meth:`~{}.{}.{}`, but async.\n\n \"\"\"", ".", "format", "(", "wrapped_cls", ".", "__module__", ",", "wrapped_cls", ".", "__qualname__", ",", "attr_name", ")", "return", "func", "return", "decorator" ]
[ 199, 0 ]
[ 214, 20 ]
python
en
['en', 'en', 'en']
True
name_asyncgen
(agen)
Return the fully-qualified name of the async generator function that produced the async generator iterator *agen*.
Return the fully-qualified name of the async generator function that produced the async generator iterator *agen*.
def name_asyncgen(agen): """Return the fully-qualified name of the async generator function that produced the async generator iterator *agen*. """ if not hasattr(agen, "ag_code"): # pragma: no cover return repr(agen) try: module = agen.ag_frame.f_globals["__name__"] except (AttributeError, KeyError): module = "<{}>".format(agen.ag_code.co_filename) try: qualname = agen.__qualname__ except AttributeError: qualname = agen.ag_code.co_name return f"{module}.{qualname}"
[ "def", "name_asyncgen", "(", "agen", ")", ":", "if", "not", "hasattr", "(", "agen", ",", "\"ag_code\"", ")", ":", "# pragma: no cover", "return", "repr", "(", "agen", ")", "try", ":", "module", "=", "agen", ".", "ag_frame", ".", "f_globals", "[", "\"__name__\"", "]", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "module", "=", "\"<{}>\"", ".", "format", "(", "agen", ".", "ag_code", ".", "co_filename", ")", "try", ":", "qualname", "=", "agen", ".", "__qualname__", "except", "AttributeError", ":", "qualname", "=", "agen", ".", "ag_code", ".", "co_name", "return", "f\"{module}.{qualname}\"" ]
[ 342, 0 ]
[ 356, 33 ]
python
en
['en', 'en', 'en']
True
make_analysator
(f)
Return a static text analyser function that returns float values.
Return a static text analyser function that returns float values.
def make_analysator(f): """Return a static text analyser function that returns float values.""" def text_analyse(text): try: rv = f(text) except Exception: return 0.0 if not rv: return 0.0 try: return min(1.0, max(0.0, float(rv))) except (ValueError, TypeError): return 0.0 text_analyse.__doc__ = f.__doc__ return staticmethod(text_analyse)
[ "def", "make_analysator", "(", "f", ")", ":", "def", "text_analyse", "(", "text", ")", ":", "try", ":", "rv", "=", "f", "(", "text", ")", "except", "Exception", ":", "return", "0.0", "if", "not", "rv", ":", "return", "0.0", "try", ":", "return", "min", "(", "1.0", ",", "max", "(", "0.0", ",", "float", "(", "rv", ")", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "0.0", "text_analyse", ".", "__doc__", "=", "f", ".", "__doc__", "return", "staticmethod", "(", "text_analyse", ")" ]
[ 106, 0 ]
[ 120, 37 ]
python
en
['en', 'en', 'en']
True
shebang_matches
(text, regex)
r"""Check if the given regular expression matches the last part of the shebang if one exists. >>> from pygments.util import shebang_matches >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?') True >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?') True >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?') False >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?') False >>> shebang_matches('#!/usr/bin/startsomethingwith python', ... r'python(2\.\d)?') True It also checks for common windows executable file extensions:: >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?') True Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does the same as ``'perl -e'``) Note that this method automatically searches the whole string (eg: the regular expression is wrapped in ``'^$'``)
r"""Check if the given regular expression matches the last part of the shebang if one exists.
def shebang_matches(text, regex): r"""Check if the given regular expression matches the last part of the shebang if one exists. >>> from pygments.util import shebang_matches >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?') True >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?') True >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?') False >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?') False >>> shebang_matches('#!/usr/bin/startsomethingwith python', ... r'python(2\.\d)?') True It also checks for common windows executable file extensions:: >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?') True Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does the same as ``'perl -e'``) Note that this method automatically searches the whole string (eg: the regular expression is wrapped in ``'^$'``) """ index = text.find('\n') if index >= 0: first_line = text[:index].lower() else: first_line = text.lower() if first_line.startswith('#!'): try: found = [x for x in split_path_re.split(first_line[2:].strip()) if x and not x.startswith('-')][-1] except IndexError: return False regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE) if regex.search(found) is not None: return True return False
[ "def", "shebang_matches", "(", "text", ",", "regex", ")", ":", "index", "=", "text", ".", "find", "(", "'\\n'", ")", "if", "index", ">=", "0", ":", "first_line", "=", "text", "[", ":", "index", "]", ".", "lower", "(", ")", "else", ":", "first_line", "=", "text", ".", "lower", "(", ")", "if", "first_line", ".", "startswith", "(", "'#!'", ")", ":", "try", ":", "found", "=", "[", "x", "for", "x", "in", "split_path_re", ".", "split", "(", "first_line", "[", "2", ":", "]", ".", "strip", "(", ")", ")", "if", "x", "and", "not", "x", ".", "startswith", "(", "'-'", ")", "]", "[", "-", "1", "]", "except", "IndexError", ":", "return", "False", "regex", "=", "re", ".", "compile", "(", "r'^%s(\\.(exe|cmd|bat|bin))?$'", "%", "regex", ",", "re", ".", "IGNORECASE", ")", "if", "regex", ".", "search", "(", "found", ")", "is", "not", "None", ":", "return", "True", "return", "False" ]
[ 123, 0 ]
[ 165, 16 ]
python
en
['en', 'en', 'en']
True
doctype_matches
(text, regex)
Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
Check if the doctype matches a regular expression (if present).
def doctype_matches(text, regex): """Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"' """ m = doctype_lookup_re.match(text) if m is None: return False doctype = m.group(2) return re.compile(regex, re.I).match(doctype.strip()) is not None
[ "def", "doctype_matches", "(", "text", ",", "regex", ")", ":", "m", "=", "doctype_lookup_re", ".", "match", "(", "text", ")", "if", "m", "is", "None", ":", "return", "False", "doctype", "=", "m", ".", "group", "(", "2", ")", "return", "re", ".", "compile", "(", "regex", ",", "re", ".", "I", ")", ".", "match", "(", "doctype", ".", "strip", "(", ")", ")", "is", "not", "None" ]
[ 168, 0 ]
[ 178, 69 ]
python
en
['en', 'en', 'en']
True
html_doctype_matches
(text)
Check if the file looks like it has a html doctype.
Check if the file looks like it has a html doctype.
def html_doctype_matches(text): """Check if the file looks like it has a html doctype.""" return doctype_matches(text, r'html')
[ "def", "html_doctype_matches", "(", "text", ")", ":", "return", "doctype_matches", "(", "text", ",", "r'html'", ")" ]
[ 181, 0 ]
[ 183, 41 ]
python
en
['en', 'en', 'en']
True
looks_like_xml
(text)
Check if a doctype exists or if we have some tags.
Check if a doctype exists or if we have some tags.
def looks_like_xml(text): """Check if a doctype exists or if we have some tags.""" if xml_decl_re.match(text): return True key = hash(text) try: return _looks_like_xml_cache[key] except KeyError: m = doctype_lookup_re.match(text) if m is not None: return True rv = tag_re.search(text[:1000]) is not None _looks_like_xml_cache[key] = rv return rv
[ "def", "looks_like_xml", "(", "text", ")", ":", "if", "xml_decl_re", ".", "match", "(", "text", ")", ":", "return", "True", "key", "=", "hash", "(", "text", ")", "try", ":", "return", "_looks_like_xml_cache", "[", "key", "]", "except", "KeyError", ":", "m", "=", "doctype_lookup_re", ".", "match", "(", "text", ")", "if", "m", "is", "not", "None", ":", "return", "True", "rv", "=", "tag_re", ".", "search", "(", "text", "[", ":", "1000", "]", ")", "is", "not", "None", "_looks_like_xml_cache", "[", "key", "]", "=", "rv", "return", "rv" ]
[ 189, 0 ]
[ 202, 17 ]
python
en
['en', 'en', 'en']
True
unirange
(a, b)
Returns a regular expression string to match the given non-BMP range.
Returns a regular expression string to match the given non-BMP range.
def unirange(a, b): """Returns a regular expression string to match the given non-BMP range.""" if b < a: raise ValueError("Bad character range") if a < 0x10000 or b < 0x10000: raise ValueError("unirange is only defined for non-BMP ranges") if sys.maxunicode > 0xffff: # wide build return u'[%s-%s]' % (unichr(a), unichr(b)) else: # narrow build stores surrogates, and the 're' module handles them # (incorrectly) as characters. Since there is still ordering among # these characters, expand the range to one that it understands. Some # background in http://bugs.python.org/issue3665 and # http://bugs.python.org/issue12749 # # Additionally, the lower constants are using unichr rather than # literals because jython [which uses the wide path] can't load this # file if they are literals. ah, al = _surrogatepair(a) bh, bl = _surrogatepair(b) if ah == bh: return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl)) else: buf = [] buf.append(u'%s[%s-%s]' % (unichr(ah), unichr(al), ah == bh and unichr(bl) or unichr(0xdfff))) if ah - bh > 1: buf.append(u'[%s-%s][%s-%s]' % unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff)) if ah != bh: buf.append(u'%s[%s-%s]' % (unichr(bh), unichr(0xdc00), unichr(bl))) return u'(?:' + u'|'.join(buf) + u')'
[ "def", "unirange", "(", "a", ",", "b", ")", ":", "if", "b", "<", "a", ":", "raise", "ValueError", "(", "\"Bad character range\"", ")", "if", "a", "<", "0x10000", "or", "b", "<", "0x10000", ":", "raise", "ValueError", "(", "\"unirange is only defined for non-BMP ranges\"", ")", "if", "sys", ".", "maxunicode", ">", "0xffff", ":", "# wide build", "return", "u'[%s-%s]'", "%", "(", "unichr", "(", "a", ")", ",", "unichr", "(", "b", ")", ")", "else", ":", "# narrow build stores surrogates, and the 're' module handles them", "# (incorrectly) as characters. Since there is still ordering among", "# these characters, expand the range to one that it understands. Some", "# background in http://bugs.python.org/issue3665 and", "# http://bugs.python.org/issue12749", "#", "# Additionally, the lower constants are using unichr rather than", "# literals because jython [which uses the wide path] can't load this", "# file if they are literals.", "ah", ",", "al", "=", "_surrogatepair", "(", "a", ")", "bh", ",", "bl", "=", "_surrogatepair", "(", "b", ")", "if", "ah", "==", "bh", ":", "return", "u'(?:%s[%s-%s])'", "%", "(", "unichr", "(", "ah", ")", ",", "unichr", "(", "al", ")", ",", "unichr", "(", "bl", ")", ")", "else", ":", "buf", "=", "[", "]", "buf", ".", "append", "(", "u'%s[%s-%s]'", "%", "(", "unichr", "(", "ah", ")", ",", "unichr", "(", "al", ")", ",", "ah", "==", "bh", "and", "unichr", "(", "bl", ")", "or", "unichr", "(", "0xdfff", ")", ")", ")", "if", "ah", "-", "bh", ">", "1", ":", "buf", ".", "append", "(", "u'[%s-%s][%s-%s]'", "%", "unichr", "(", "ah", "+", "1", ")", ",", "unichr", "(", "bh", "-", "1", ")", ",", "unichr", "(", "0xdc00", ")", ",", "unichr", "(", "0xdfff", ")", ")", "if", "ah", "!=", "bh", ":", "buf", ".", "append", "(", "u'%s[%s-%s]'", "%", "(", "unichr", "(", "bh", ")", ",", "unichr", "(", "0xdc00", ")", ",", "unichr", "(", "bl", ")", ")", ")", "return", "u'(?:'", "+", "u'|'", ".", "join", "(", "buf", ")", "+", "u')'" ]
[ 216, 0 ]
[ 252, 49 ]
python
en
['en', 'en', 'en']
True
format_lines
(var_name, seq, raw=False, indent_level=0)
Formats a sequence of strings for output.
Formats a sequence of strings for output.
def format_lines(var_name, seq, raw=False, indent_level=0): """Formats a sequence of strings for output.""" lines = [] base_indent = ' ' * indent_level * 4 inner_indent = ' ' * (indent_level + 1) * 4 lines.append(base_indent + var_name + ' = (') if raw: # These should be preformatted reprs of, say, tuples. for i in seq: lines.append(inner_indent + i + ',') else: for i in seq: # Force use of single quotes r = repr(i + '"') lines.append(inner_indent + r[:-2] + r[-1] + ',') lines.append(base_indent + ')') return '\n'.join(lines)
[ "def", "format_lines", "(", "var_name", ",", "seq", ",", "raw", "=", "False", ",", "indent_level", "=", "0", ")", ":", "lines", "=", "[", "]", "base_indent", "=", "' '", "*", "indent_level", "*", "4", "inner_indent", "=", "' '", "*", "(", "indent_level", "+", "1", ")", "*", "4", "lines", ".", "append", "(", "base_indent", "+", "var_name", "+", "' = ('", ")", "if", "raw", ":", "# These should be preformatted reprs of, say, tuples.", "for", "i", "in", "seq", ":", "lines", ".", "append", "(", "inner_indent", "+", "i", "+", "','", ")", "else", ":", "for", "i", "in", "seq", ":", "# Force use of single quotes", "r", "=", "repr", "(", "i", "+", "'\"'", ")", "lines", ".", "append", "(", "inner_indent", "+", "r", "[", ":", "-", "2", "]", "+", "r", "[", "-", "1", "]", "+", "','", ")", "lines", ".", "append", "(", "base_indent", "+", "')'", ")", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
[ 255, 0 ]
[ 271, 27 ]
python
en
['en', 'en', 'en']
True
duplicates_removed
(it, already_seen=())
Returns a list with duplicates removed from the iterable `it`. Order is preserved.
Returns a list with duplicates removed from the iterable `it`.
def duplicates_removed(it, already_seen=()): """ Returns a list with duplicates removed from the iterable `it`. Order is preserved. """ lst = [] seen = set() for i in it: if i in seen or i in already_seen: continue lst.append(i) seen.add(i) return lst
[ "def", "duplicates_removed", "(", "it", ",", "already_seen", "=", "(", ")", ")", ":", "lst", "=", "[", "]", "seen", "=", "set", "(", ")", "for", "i", "in", "it", ":", "if", "i", "in", "seen", "or", "i", "in", "already_seen", ":", "continue", "lst", ".", "append", "(", "i", ")", "seen", ".", "add", "(", "i", ")", "return", "lst" ]
[ 274, 0 ]
[ 287, 14 ]
python
en
['en', 'error', 'th']
False
guess_decode
(text)
Decode *text* with guessed encoding. First try UTF-8; this should fail for non-UTF-8 encodings. Then try the preferred locale encoding. Fall back to latin-1, which always works.
Decode *text* with guessed encoding.
def guess_decode(text): """Decode *text* with guessed encoding. First try UTF-8; this should fail for non-UTF-8 encodings. Then try the preferred locale encoding. Fall back to latin-1, which always works. """ try: text = text.decode('utf-8') return text, 'utf-8' except UnicodeDecodeError: try: import locale prefencoding = locale.getpreferredencoding() text = text.decode() return text, prefencoding except (UnicodeDecodeError, LookupError): text = text.decode('latin1') return text, 'latin1'
[ "def", "guess_decode", "(", "text", ")", ":", "try", ":", "text", "=", "text", ".", "decode", "(", "'utf-8'", ")", "return", "text", ",", "'utf-8'", "except", "UnicodeDecodeError", ":", "try", ":", "import", "locale", "prefencoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "text", "=", "text", ".", "decode", "(", ")", "return", "text", ",", "prefencoding", "except", "(", "UnicodeDecodeError", ",", "LookupError", ")", ":", "text", "=", "text", ".", "decode", "(", "'latin1'", ")", "return", "text", ",", "'latin1'" ]
[ 300, 0 ]
[ 318, 33 ]
python
en
['en', 'en', 'en']
True
guess_decode_from_terminal
(text, term)
Decode *text* coming from terminal *term*. First try the terminal encoding, if given. Then try UTF-8. Then try the preferred locale encoding. Fall back to latin-1, which always works.
Decode *text* coming from terminal *term*.
def guess_decode_from_terminal(text, term): """Decode *text* coming from terminal *term*. First try the terminal encoding, if given. Then try UTF-8. Then try the preferred locale encoding. Fall back to latin-1, which always works. """ if getattr(term, 'encoding', None): try: text = text.decode(term.encoding) except UnicodeDecodeError: pass else: return text, term.encoding return guess_decode(text)
[ "def", "guess_decode_from_terminal", "(", "text", ",", "term", ")", ":", "if", "getattr", "(", "term", ",", "'encoding'", ",", "None", ")", ":", "try", ":", "text", "=", "text", ".", "decode", "(", "term", ".", "encoding", ")", "except", "UnicodeDecodeError", ":", "pass", "else", ":", "return", "text", ",", "term", ".", "encoding", "return", "guess_decode", "(", "text", ")" ]
[ 321, 0 ]
[ 335, 29 ]
python
en
['en', 'en', 'en']
True
terminal_encoding
(term)
Return our best guess of encoding for the given *term*.
Return our best guess of encoding for the given *term*.
def terminal_encoding(term): """Return our best guess of encoding for the given *term*.""" if getattr(term, 'encoding', None): return term.encoding import locale return locale.getpreferredencoding()
[ "def", "terminal_encoding", "(", "term", ")", ":", "if", "getattr", "(", "term", ",", "'encoding'", ",", "None", ")", ":", "return", "term", ".", "encoding", "import", "locale", "return", "locale", ".", "getpreferredencoding", "(", ")" ]
[ 338, 0 ]
[ 343, 40 ]
python
en
['en', 'en', 'en']
True
add_metaclass
(metaclass)
Class decorator for creating a class with a metaclass.
Class decorator for creating a class with a metaclass.
def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) for slots_var in orig_vars.get('__slots__', ()): orig_vars.pop(slots_var) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper
[ "def", "add_metaclass", "(", "metaclass", ")", ":", "def", "wrapper", "(", "cls", ")", ":", "orig_vars", "=", "cls", ".", "__dict__", ".", "copy", "(", ")", "orig_vars", ".", "pop", "(", "'__dict__'", ",", "None", ")", "orig_vars", ".", "pop", "(", "'__weakref__'", ",", "None", ")", "for", "slots_var", "in", "orig_vars", ".", "get", "(", "'__slots__'", ",", "(", ")", ")", ":", "orig_vars", ".", "pop", "(", "slots_var", ")", "return", "metaclass", "(", "cls", ".", "__name__", ",", "cls", ".", "__bases__", ",", "orig_vars", ")", "return", "wrapper" ]
[ 377, 0 ]
[ 386, 18 ]
python
en
['en', 'en', 'en']
True
WaitForSingleObject
(obj)
Async and cancellable variant of WaitForSingleObject. Windows only. Args: handle: A Win32 handle, as a Python integer. Raises: OSError: If the handle is invalid, e.g. when it is already closed.
Async and cancellable variant of WaitForSingleObject. Windows only.
async def WaitForSingleObject(obj): """Async and cancellable variant of WaitForSingleObject. Windows only. Args: handle: A Win32 handle, as a Python integer. Raises: OSError: If the handle is invalid, e.g. when it is already closed. """ # Allow ints or whatever we can convert to a win handle handle = _handle(obj) # Quick check; we might not even need to spawn a thread. The zero # means a zero timeout; this call never blocks. We also exit here # if the handle is already closed for some reason. retcode = kernel32.WaitForSingleObject(handle, 0) if retcode == ErrorCodes.WAIT_FAILED: raise_winerror() elif retcode != ErrorCodes.WAIT_TIMEOUT: return # Wait for a thread that waits for two handles: the handle plus a handle # that we can use to cancel the thread. cancel_handle = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) try: await trio.to_thread.run_sync( WaitForMultipleObjects_sync, handle, cancel_handle, cancellable=True, limiter=trio.CapacityLimiter(math.inf), ) finally: # Clean up our cancel handle. In case we get here because this task was # cancelled, we also want to set the cancel_handle to stop the thread. kernel32.SetEvent(cancel_handle) kernel32.CloseHandle(cancel_handle)
[ "async", "def", "WaitForSingleObject", "(", "obj", ")", ":", "# Allow ints or whatever we can convert to a win handle", "handle", "=", "_handle", "(", "obj", ")", "# Quick check; we might not even need to spawn a thread. The zero", "# means a zero timeout; this call never blocks. We also exit here", "# if the handle is already closed for some reason.", "retcode", "=", "kernel32", ".", "WaitForSingleObject", "(", "handle", ",", "0", ")", "if", "retcode", "==", "ErrorCodes", ".", "WAIT_FAILED", ":", "raise_winerror", "(", ")", "elif", "retcode", "!=", "ErrorCodes", ".", "WAIT_TIMEOUT", ":", "return", "# Wait for a thread that waits for two handles: the handle plus a handle", "# that we can use to cancel the thread.", "cancel_handle", "=", "kernel32", ".", "CreateEventA", "(", "ffi", ".", "NULL", ",", "True", ",", "False", ",", "ffi", ".", "NULL", ")", "try", ":", "await", "trio", ".", "to_thread", ".", "run_sync", "(", "WaitForMultipleObjects_sync", ",", "handle", ",", "cancel_handle", ",", "cancellable", "=", "True", ",", "limiter", "=", "trio", ".", "CapacityLimiter", "(", "math", ".", "inf", ")", ",", ")", "finally", ":", "# Clean up our cancel handle. In case we get here because this task was", "# cancelled, we also want to set the cancel_handle to stop the thread.", "kernel32", ".", "SetEvent", "(", "cancel_handle", ")", "kernel32", ".", "CloseHandle", "(", "cancel_handle", ")" ]
[ 12, 0 ]
[ 49, 43 ]
python
en
['en', 'en', 'en']
True
WaitForMultipleObjects_sync
(*handles)
Wait for any of the given Windows handles to be signaled.
Wait for any of the given Windows handles to be signaled.
def WaitForMultipleObjects_sync(*handles): """Wait for any of the given Windows handles to be signaled.""" n = len(handles) handle_arr = ffi.new("HANDLE[{}]".format(n)) for i in range(n): handle_arr[i] = handles[i] timeout = 0xFFFFFFFF # INFINITE retcode = kernel32.WaitForMultipleObjects(n, handle_arr, False, timeout) # blocking if retcode == ErrorCodes.WAIT_FAILED: raise_winerror()
[ "def", "WaitForMultipleObjects_sync", "(", "*", "handles", ")", ":", "n", "=", "len", "(", "handles", ")", "handle_arr", "=", "ffi", ".", "new", "(", "\"HANDLE[{}]\"", ".", "format", "(", "n", ")", ")", "for", "i", "in", "range", "(", "n", ")", ":", "handle_arr", "[", "i", "]", "=", "handles", "[", "i", "]", "timeout", "=", "0xFFFFFFFF", "# INFINITE", "retcode", "=", "kernel32", ".", "WaitForMultipleObjects", "(", "n", ",", "handle_arr", ",", "False", ",", "timeout", ")", "# blocking", "if", "retcode", "==", "ErrorCodes", ".", "WAIT_FAILED", ":", "raise_winerror", "(", ")" ]
[ 52, 0 ]
[ 61, 24 ]
python
en
['en', 'en', 'en']
True
wsc_data_query
(stations)
Fetch data from WSC for the given stations by running # the database/web queries in parallel.
Fetch data from WSC for the given stations by running # the database/web queries in parallel.
def wsc_data_query(stations): """ Fetch data from WSC for the given stations by running # the database/web queries in parallel. """ t0 = time.time() # Collect fetch methods for all dashboard modules # fetch_method = {module.id: getattr( # module, 'fetch_data') for module in modules} # Create a thread pool: one separate thread for each station to be queried with concurrent.futures.ThreadPoolExecutor(max_workers=len(stations)) as executor: # Prepare the thread tasks tasks = {} for station in stations: task = executor.submit( getattr(wsc_module, 'fetch_wsc_data'), station) tasks[task] = station # Run the tasks and collect results as they arrive results = {} for task in concurrent.futures.as_completed(tasks): key = tasks[task] results[key] = task.result() # Return results once all tasks have been completed t1 = time.time() timer.text = '(Executed queries in %s seconds)' % round(t1 - t0, 2) return getattr(wsc_module, 'get_all_data')(results)
[ "def", "wsc_data_query", "(", "stations", ")", ":", "t0", "=", "time", ".", "time", "(", ")", "# Collect fetch methods for all dashboard modules", "# fetch_method = {module.id: getattr(", "# module, 'fetch_data') for module in modules}", "# Create a thread pool: one separate thread for each station to be queried", "with", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "max_workers", "=", "len", "(", "stations", ")", ")", "as", "executor", ":", "# Prepare the thread tasks", "tasks", "=", "{", "}", "for", "station", "in", "stations", ":", "task", "=", "executor", ".", "submit", "(", "getattr", "(", "wsc_module", ",", "'fetch_wsc_data'", ")", ",", "station", ")", "tasks", "[", "task", "]", "=", "station", "# Run the tasks and collect results as they arrive", "results", "=", "{", "}", "for", "task", "in", "concurrent", ".", "futures", ".", "as_completed", "(", "tasks", ")", ":", "key", "=", "tasks", "[", "task", "]", "results", "[", "key", "]", "=", "task", ".", "result", "(", ")", "# Return results once all tasks have been completed", "t1", "=", "time", ".", "time", "(", ")", "timer", ".", "text", "=", "'(Executed queries in %s seconds)'", "%", "round", "(", "t1", "-", "t0", ",", "2", ")", "return", "getattr", "(", "wsc_module", ",", "'get_all_data'", ")", "(", "results", ")" ]
[ 50, 0 ]
[ 77, 55 ]
python
en
['en', 'error', 'th']
False
test_cli_works_from_adjacent_directory_without_config_flag
( monkeypatch, empty_data_context )
We don't care about the NOUN here just combinations of the config flag
We don't care about the NOUN here just combinations of the config flag
def test_cli_works_from_adjacent_directory_without_config_flag( monkeypatch, empty_data_context ): """We don't care about the NOUN here just combinations of the config flag""" runner = CliRunner(mix_stderr=True) monkeypatch.chdir(os.path.dirname(empty_data_context.root_directory)) result = runner.invoke(cli, "--v3-api checkpoint list", catch_exceptions=False) assert result.exit_code == 0 assert "No Checkpoints found" in result.output
[ "def", "test_cli_works_from_adjacent_directory_without_config_flag", "(", "monkeypatch", ",", "empty_data_context", ")", ":", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "True", ")", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "empty_data_context", ".", "root_directory", ")", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "\"--v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found\"", "in", "result", ".", "output" ]
[ 89, 0 ]
[ 97, 50 ]
python
en
['en', 'en', 'en']
True
test_cli_works_from_great_expectations_directory_without_config_flag
( monkeypatch, empty_data_context )
We don't care about the NOUN here just combinations of the config flag
We don't care about the NOUN here just combinations of the config flag
def test_cli_works_from_great_expectations_directory_without_config_flag( monkeypatch, empty_data_context ): """We don't care about the NOUN here just combinations of the config flag""" runner = CliRunner(mix_stderr=True) monkeypatch.chdir(empty_data_context.root_directory) result = runner.invoke(cli, "--v3-api checkpoint list", catch_exceptions=False) assert result.exit_code == 0 assert "No Checkpoints found" in result.output
[ "def", "test_cli_works_from_great_expectations_directory_without_config_flag", "(", "monkeypatch", ",", "empty_data_context", ")", ":", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "True", ")", "monkeypatch", ".", "chdir", "(", "empty_data_context", ".", "root_directory", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "\"--v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found\"", "in", "result", ".", "output" ]
[ 100, 0 ]
[ 108, 50 ]
python
en
['en', 'en', 'en']
True
test_cli_works_from_random_directory_with_config_flag_fully_specified_yml
( monkeypatch, empty_data_context, tmp_path_factory )
We don't care about the NOUN here just combinations of the config flag
We don't care about the NOUN here just combinations of the config flag
def test_cli_works_from_random_directory_with_config_flag_fully_specified_yml( monkeypatch, empty_data_context, tmp_path_factory ): """We don't care about the NOUN here just combinations of the config flag""" context = empty_data_context runner = CliRunner(mix_stderr=True) temp_dir = tmp_path_factory.mktemp("config_flag_check") monkeypatch.chdir(temp_dir) result = runner.invoke( cli, f"--config {context.root_directory}/great_expectations.yml --v3-api checkpoint list", catch_exceptions=False, ) assert result.exit_code == 0 assert "No Checkpoints found" in result.output
[ "def", "test_cli_works_from_random_directory_with_config_flag_fully_specified_yml", "(", "monkeypatch", ",", "empty_data_context", ",", "tmp_path_factory", ")", ":", "context", "=", "empty_data_context", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "True", ")", "temp_dir", "=", "tmp_path_factory", ".", "mktemp", "(", "\"config_flag_check\"", ")", "monkeypatch", ".", "chdir", "(", "temp_dir", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--config {context.root_directory}/great_expectations.yml --v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found\"", "in", "result", ".", "output" ]
[ 111, 0 ]
[ 125, 50 ]
python
en
['en', 'en', 'en']
True
test_cli_works_from_random_directory_with_config_flag_great_expectations_directory
( monkeypatch, empty_data_context, tmp_path_factory )
We don't care about the NOUN here just combinations of the config flag
We don't care about the NOUN here just combinations of the config flag
def test_cli_works_from_random_directory_with_config_flag_great_expectations_directory( monkeypatch, empty_data_context, tmp_path_factory ): """We don't care about the NOUN here just combinations of the config flag""" context = empty_data_context runner = CliRunner(mix_stderr=True) temp_dir = tmp_path_factory.mktemp("config_flag_check") monkeypatch.chdir(temp_dir) result = runner.invoke( cli, f"--config {context.root_directory} --v3-api checkpoint list", catch_exceptions=False, ) assert result.exit_code == 0 assert "No Checkpoints found" in result.output
[ "def", "test_cli_works_from_random_directory_with_config_flag_great_expectations_directory", "(", "monkeypatch", ",", "empty_data_context", ",", "tmp_path_factory", ")", ":", "context", "=", "empty_data_context", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "True", ")", "temp_dir", "=", "tmp_path_factory", ".", "mktemp", "(", "\"config_flag_check\"", ")", "monkeypatch", ".", "chdir", "(", "temp_dir", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--config {context.root_directory} --v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found\"", "in", "result", ".", "output" ]
[ 128, 0 ]
[ 142, 50 ]
python
en
['en', 'en', 'en']
True
test_cli_works_from_random_directory_with_c_flag_fully_specified_yml
( monkeypatch, empty_data_context, tmp_path_factory )
We don't care about the NOUN here just combinations of the config flag
We don't care about the NOUN here just combinations of the config flag
def test_cli_works_from_random_directory_with_c_flag_fully_specified_yml( monkeypatch, empty_data_context, tmp_path_factory ): """We don't care about the NOUN here just combinations of the config flag""" context = empty_data_context runner = CliRunner(mix_stderr=True) temp_dir = tmp_path_factory.mktemp("config_flag_check") monkeypatch.chdir(temp_dir) result = runner.invoke( cli, f"-c {context.root_directory}/great_expectations.yml --v3-api checkpoint list", catch_exceptions=False, ) assert result.exit_code == 0 assert "No Checkpoints found" in result.output
[ "def", "test_cli_works_from_random_directory_with_c_flag_fully_specified_yml", "(", "monkeypatch", ",", "empty_data_context", ",", "tmp_path_factory", ")", ":", "context", "=", "empty_data_context", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "True", ")", "temp_dir", "=", "tmp_path_factory", ".", "mktemp", "(", "\"config_flag_check\"", ")", "monkeypatch", ".", "chdir", "(", "temp_dir", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"-c {context.root_directory}/great_expectations.yml --v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found\"", "in", "result", ".", "output" ]
[ 145, 0 ]
[ 159, 50 ]
python
en
['en', 'en', 'en']
True
test_cli_works_from_random_directory_with_c_flag_great_expectations_directory
( monkeypatch, empty_data_context, tmp_path_factory )
We don't care about the NOUN here just combinations of the config flag
We don't care about the NOUN here just combinations of the config flag
def test_cli_works_from_random_directory_with_c_flag_great_expectations_directory( monkeypatch, empty_data_context, tmp_path_factory ): """We don't care about the NOUN here just combinations of the config flag""" context = empty_data_context runner = CliRunner(mix_stderr=True) temp_dir = tmp_path_factory.mktemp("config_flag_check") monkeypatch.chdir(temp_dir) result = runner.invoke( cli, f"-c {context.root_directory} --v3-api checkpoint list", catch_exceptions=False, ) assert result.exit_code == 0 assert "No Checkpoints found" in result.output
[ "def", "test_cli_works_from_random_directory_with_c_flag_great_expectations_directory", "(", "monkeypatch", ",", "empty_data_context", ",", "tmp_path_factory", ")", ":", "context", "=", "empty_data_context", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "True", ")", "temp_dir", "=", "tmp_path_factory", ".", "mktemp", "(", "\"config_flag_check\"", ")", "monkeypatch", ".", "chdir", "(", "temp_dir", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"-c {context.root_directory} --v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found\"", "in", "result", ".", "output" ]
[ 162, 0 ]
[ 176, 50 ]
python
en
['en', 'en', 'en']
True
test_assume_yes_using_full_flag_using_checkpoint_delete
( mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled, )
What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same.
What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same.
def test_assume_yes_using_full_flag_using_checkpoint_delete( mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled, ): """ What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same. """ context: DataContext = empty_context_with_checkpoint_v1_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) checkpoint_name: str = "my_v1_checkpoint" result: Result = runner.invoke( cli, f"--v3-api --assume-yes checkpoint delete {checkpoint_name}", catch_exceptions=False, ) stdout: str = result.stdout assert result.exit_code == 0 assert ( f'Are you sure you want to delete the Checkpoint "{checkpoint_name}" (this action is irreversible)?' not in stdout ) # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert 'Checkpoint "my_v1_checkpoint" deleted.' in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.checkpoint.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.checkpoint.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_count == len(expected_call_args_list) assert mock_emit.call_args_list == expected_call_args_list assert_no_logging_messages_or_tracebacks( caplog, result, ) result = runner.invoke( cli, f"--v3-api checkpoint list", catch_exceptions=False, ) stdout = result.stdout assert result.exit_code == 0 assert "No Checkpoints found." in stdout
[ "def", "test_assume_yes_using_full_flag_using_checkpoint_delete", "(", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "empty_context_with_checkpoint_v1_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "empty_context_with_checkpoint_v1_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "checkpoint_name", ":", "str", "=", "\"my_v1_checkpoint\"", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api --assume-yes checkpoint delete {checkpoint_name}\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert", "(", "f'Are you sure you want to delete the Checkpoint \"{checkpoint_name}\" (this action is irreversible)?'", "not", "in", "stdout", ")", "# This assertion is extra assurance since this test is too permissive if we change the confirmation message", "assert", "\"[Y/n]\"", "not", "in", "stdout", "assert", "'Checkpoint \"my_v1_checkpoint\" deleted.'", "in", "stdout", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.checkpoint.delete.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.checkpoint.delete.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_count", "==", "len", "(", "expected_call_args_list", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ",", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found.\"", "in", "stdout" ]
[ 480, 0 ]
[ 546, 44 ]
python
en
['en', 'error', 'th']
False
test_assume_yes_using_yes_flag_using_checkpoint_delete
( mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled, )
What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same.
What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same.
def test_assume_yes_using_yes_flag_using_checkpoint_delete( mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled, ): """ What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same. """ context: DataContext = empty_context_with_checkpoint_v1_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) checkpoint_name: str = "my_v1_checkpoint" result: Result = runner.invoke( cli, f"--v3-api --yes checkpoint delete {checkpoint_name}", catch_exceptions=False, ) stdout: str = result.stdout assert result.exit_code == 0 assert ( f'Are you sure you want to delete the Checkpoint "{checkpoint_name}" (this action is irreversible)?' not in stdout ) # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert 'Checkpoint "my_v1_checkpoint" deleted.' in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.checkpoint.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.checkpoint.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_count == len(expected_call_args_list) assert mock_emit.call_args_list == expected_call_args_list assert_no_logging_messages_or_tracebacks( caplog, result, ) result = runner.invoke( cli, f"--v3-api checkpoint list", catch_exceptions=False, ) stdout = result.stdout assert result.exit_code == 0 assert "No Checkpoints found." in stdout
[ "def", "test_assume_yes_using_yes_flag_using_checkpoint_delete", "(", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "empty_context_with_checkpoint_v1_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "empty_context_with_checkpoint_v1_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "checkpoint_name", ":", "str", "=", "\"my_v1_checkpoint\"", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api --yes checkpoint delete {checkpoint_name}\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert", "(", "f'Are you sure you want to delete the Checkpoint \"{checkpoint_name}\" (this action is irreversible)?'", "not", "in", "stdout", ")", "# This assertion is extra assurance since this test is too permissive if we change the confirmation message", "assert", "\"[Y/n]\"", "not", "in", "stdout", "assert", "'Checkpoint \"my_v1_checkpoint\" deleted.'", "in", "stdout", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.checkpoint.delete.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.checkpoint.delete.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_count", "==", "len", "(", "expected_call_args_list", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ",", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found.\"", "in", "stdout" ]
[ 552, 0 ]
[ 618, 44 ]
python
en
['en', 'error', 'th']
False
test_assume_yes_using_y_flag_using_checkpoint_delete
( mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled, )
What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same.
What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same.
def test_assume_yes_using_y_flag_using_checkpoint_delete( mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled, ): """ What does this test and why? All versions of the --assume-yes flag (--assume-yes/--yes/-y) should behave the same. """ context: DataContext = empty_context_with_checkpoint_v1_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) checkpoint_name: str = "my_v1_checkpoint" result: Result = runner.invoke( cli, f"--v3-api -y checkpoint delete {checkpoint_name}", catch_exceptions=False, ) stdout: str = result.stdout assert result.exit_code == 0 assert ( f'Are you sure you want to delete the Checkpoint "{checkpoint_name}" (this action is irreversible)?' not in stdout ) # This assertion is extra assurance since this test is too permissive if we change the confirmation message assert "[Y/n]" not in stdout assert 'Checkpoint "my_v1_checkpoint" deleted.' in stdout expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.checkpoint.delete.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.checkpoint.delete.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_count == len(expected_call_args_list) assert mock_emit.call_args_list == expected_call_args_list assert_no_logging_messages_or_tracebacks( caplog, result, ) result = runner.invoke( cli, f"--v3-api checkpoint list", catch_exceptions=False, ) stdout = result.stdout assert result.exit_code == 0 assert "No Checkpoints found." in stdout
[ "def", "test_assume_yes_using_y_flag_using_checkpoint_delete", "(", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "empty_context_with_checkpoint_v1_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "empty_context_with_checkpoint_v1_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "checkpoint_name", ":", "str", "=", "\"my_v1_checkpoint\"", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api -y checkpoint delete {checkpoint_name}\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert", "(", "f'Are you sure you want to delete the Checkpoint \"{checkpoint_name}\" (this action is irreversible)?'", "not", "in", "stdout", ")", "# This assertion is extra assurance since this test is too permissive if we change the confirmation message", "assert", "\"[Y/n]\"", "not", "in", "stdout", "assert", "'Checkpoint \"my_v1_checkpoint\" deleted.'", "in", "stdout", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.checkpoint.delete.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.checkpoint.delete.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_count", "==", "len", "(", "expected_call_args_list", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ",", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api checkpoint list\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"No Checkpoints found.\"", "in", "stdout" ]
[ 624, 0 ]
[ 690, 44 ]
python
en
['en', 'error', 'th']
False
test_using_assume_yes_flag_on_command_with_no_assume_yes_implementation
( mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates, )
What does this test and why? The --assume-yes flag should not cause issues when run with commands that do not implement any logic based on it.
What does this test and why? The --assume-yes flag should not cause issues when run with commands that do not implement any logic based on it.
def test_using_assume_yes_flag_on_command_with_no_assume_yes_implementation( mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates, ): """ What does this test and why? The --assume-yes flag should not cause issues when run with commands that do not implement any logic based on it. """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api --assume-yes checkpoint list", catch_exceptions=False, ) stdout: str = result.stdout assert result.exit_code == 0 assert "Found 8 Checkpoints." in stdout checkpoint_names_list: List[str] = [ "my_simple_checkpoint_with_slack_and_notify_with_all", "my_nested_checkpoint_template_1", "my_nested_checkpoint_template_3", "my_nested_checkpoint_template_2", "my_simple_checkpoint_with_site_names", "my_minimal_simple_checkpoint", "my_simple_checkpoint_with_slack", "my_simple_template_checkpoint", ] assert all([checkpoint_name in stdout for checkpoint_name in checkpoint_names_list]) expected_call_args_list = [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.checkpoint.list.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.checkpoint.list.end", "event_payload": {"api_version": "v3"}, "success": True, } ), ] assert mock_emit.call_count == len(expected_call_args_list) assert mock_emit.call_args_list == expected_call_args_list assert_no_logging_messages_or_tracebacks( caplog, result, )
[ "def", "test_using_assume_yes_flag_on_command_with_no_assume_yes_implementation", "(", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates", ",", ")", ":", "context", ":", "DataContext", "=", "titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api --assume-yes checkpoint list\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert", "\"Found 8 Checkpoints.\"", "in", "stdout", "checkpoint_names_list", ":", "List", "[", "str", "]", "=", "[", "\"my_simple_checkpoint_with_slack_and_notify_with_all\"", ",", "\"my_nested_checkpoint_template_1\"", ",", "\"my_nested_checkpoint_template_3\"", ",", "\"my_nested_checkpoint_template_2\"", ",", "\"my_simple_checkpoint_with_site_names\"", ",", "\"my_minimal_simple_checkpoint\"", ",", "\"my_simple_checkpoint_with_slack\"", ",", "\"my_simple_template_checkpoint\"", ",", "]", "assert", "all", "(", "[", "checkpoint_name", "in", "stdout", "for", "checkpoint_name", "in", "checkpoint_names_list", "]", ")", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.checkpoint.list.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.checkpoint.list.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_count", "==", "len", "(", "expected_call_args_list", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ",", ")" ]
[ 696, 0 ]
[ 755, 5 ]
python
en
['en', 'error', 'th']
False
variable_t.__init__
( self, name='', decl_type=None, type_qualifiers=None, value=None, bits=None, mangled=None)
creates class that describes C++ global or member variable
creates class that describes C++ global or member variable
def __init__( self, name='', decl_type=None, type_qualifiers=None, value=None, bits=None, mangled=None): """creates class that describes C++ global or member variable""" declaration.declaration_t.__init__(self, name) self._decl_type = decl_type self._type_qualifiers = type_qualifiers self._value = value self._bits = bits self._byte_offset = 0 self._mangled = mangled
[ "def", "__init__", "(", "self", ",", "name", "=", "''", ",", "decl_type", "=", "None", ",", "type_qualifiers", "=", "None", ",", "value", "=", "None", ",", "bits", "=", "None", ",", "mangled", "=", "None", ")", ":", "declaration", ".", "declaration_t", ".", "__init__", "(", "self", ",", "name", ")", "self", ".", "_decl_type", "=", "decl_type", "self", ".", "_type_qualifiers", "=", "type_qualifiers", "self", ".", "_value", "=", "value", "self", ".", "_bits", "=", "bits", "self", ".", "_byte_offset", "=", "0", "self", ".", "_mangled", "=", "mangled" ]
[ 17, 4 ]
[ 32, 31 ]
python
en
['en', 'en', 'en']
True