Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
declaration_t._get__cmp__data
(self)
Implementation detail.
Implementation detail.
def _get__cmp__data(self): """ Implementation detail. """ if self._cache.cmp_data is None: cmp_data = [ declaration_utils.declaration_path(self.parent), self.name, self.location] cmp_data.extend(self._get__cmp__items()) self._cache.cmp_data = cmp_data return self._cache.cmp_data
[ "def", "_get__cmp__data", "(", "self", ")", ":", "if", "self", ".", "_cache", ".", "cmp_data", "is", "None", ":", "cmp_data", "=", "[", "declaration_utils", ".", "declaration_path", "(", "self", ".", "parent", ")", ",", "self", ".", "name", ",", "self", ".", "location", "]", "cmp_data", ".", "extend", "(", "self", ".", "_get__cmp__items", "(", ")", ")", "self", ".", "_cache", ".", "cmp_data", "=", "cmp_data", "return", "self", ".", "_cache", ".", "cmp_data" ]
[ 81, 4 ]
[ 94, 35 ]
python
en
['en', 'error', 'th']
False
declaration_t.__eq__
(self, other)
This function will return true, if both declarations refers to the same object. This function could be implemented in terms of _get__cmp__data, but in this case it will downgrade performance. self.mangled property is not compared, because it could be changed from one compilation time to an other.
This function will return true, if both declarations refers to the same object. This function could be implemented in terms of _get__cmp__data, but in this case it will downgrade performance. self.mangled property is not compared, because it could be changed from one compilation time to an other.
def __eq__(self, other): """ This function will return true, if both declarations refers to the same object. This function could be implemented in terms of _get__cmp__data, but in this case it will downgrade performance. self.mangled property is not compared, because it could be changed from one compilation time to an other. """ if not isinstance(other, self.__class__): return False return self.name == other.name \ and self.location == other.location \ and declaration_utils.declaration_path(self.parent) \ == declaration_utils.declaration_path(other.parent)
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "return", "False", "return", "self", ".", "name", "==", "other", ".", "name", "and", "self", ".", "location", "==", "other", ".", "location", "and", "declaration_utils", ".", "declaration_path", "(", "self", ".", "parent", ")", "==", "declaration_utils", ".", "declaration_path", "(", "other", ".", "parent", ")" ]
[ 96, 4 ]
[ 112, 63 ]
python
en
['en', 'error', 'th']
False
declaration_t.__ne__
(self, other)
Return not self.__eq__( other ).
Return not self.__eq__( other ).
def __ne__(self, other): """ Return not self.__eq__( other ). """ return not self.__eq__(other)
[ "def", "__ne__", "(", "self", ",", "other", ")", ":", "return", "not", "self", ".", "__eq__", "(", "other", ")" ]
[ 119, 4 ]
[ 125, 37 ]
python
en
['en', 'error', 'th']
False
declaration_t.__lt__
(self, other)
.. code-block:: python if not isinstance( other, self.__class__ ): return self.__class__.__name__ < other.__class__.__name__ return self._get__cmp__data() < other._get__cmp__data()
.. code-block:: python
def __lt__(self, other): """ .. code-block:: python if not isinstance( other, self.__class__ ): return self.__class__.__name__ < other.__class__.__name__ return self._get__cmp__data() < other._get__cmp__data() """ if not isinstance(other, self.__class__): return self.__class__.__name__ < other.__class__.__name__ return self._get__cmp__data() < other._get__cmp__data()
[ "def", "__lt__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "return", "self", ".", "__class__", ".", "__name__", "<", "other", ".", "__class__", ".", "__name__", "return", "self", ".", "_get__cmp__data", "(", ")", "<", "other", ".", "_get__cmp__data", "(", ")" ]
[ 127, 4 ]
[ 139, 63 ]
python
en
['en', 'error', 'th']
False
declaration_t._on_rename
(self)
Placeholder method, is redefined in child class.
Placeholder method, is redefined in child class.
def _on_rename(self): """ Placeholder method, is redefined in child class. """ pass
[ "def", "_on_rename", "(", "self", ")", ":", "pass" ]
[ 144, 4 ]
[ 150, 12 ]
python
en
['en', 'error', 'th']
False
declaration_t.name
(self)
Declaration name @type: str
Declaration name @type: str
def name(self): """ Declaration name @type: str """ return self._get_name_impl()
[ "def", "name", "(", "self", ")", ":", "return", "self", ".", "_get_name_impl", "(", ")" ]
[ 153, 4 ]
[ 160, 36 ]
python
en
['en', 'error', 'th']
False
declaration_t.partial_name
(self)
Declaration name, without template default arguments. Right now std containers is the only classes that support this functionality.
Declaration name, without template default arguments.
def partial_name(self): """ Declaration name, without template default arguments. Right now std containers is the only classes that support this functionality. """ if None is self._partial_name: self._partial_name = self._get_partial_name_impl() return self._partial_name
[ "def", "partial_name", "(", "self", ")", ":", "if", "None", "is", "self", ".", "_partial_name", ":", "self", ".", "_partial_name", "=", "self", ".", "_get_partial_name_impl", "(", ")", "return", "self", ".", "_partial_name" ]
[ 176, 4 ]
[ 188, 33 ]
python
en
['en', 'error', 'th']
False
declaration_t.parent
(self)
Reference to parent declaration. @type: declaration_t
Reference to parent declaration.
def parent(self): """ Reference to parent declaration. @type: declaration_t """ return self._parent
[ "def", "parent", "(", "self", ")", ":", "return", "self", ".", "_parent" ]
[ 191, 4 ]
[ 199, 27 ]
python
en
['en', 'error', 'th']
False
declaration_t.top_parent
(self)
Reference to top parent declaration. @type: declaration_t
Reference to top parent declaration.
def top_parent(self): """ Reference to top parent declaration. @type: declaration_t """ parent = self.parent while parent is not None: if parent.parent is None: return parent else: parent = parent.parent return self
[ "def", "top_parent", "(", "self", ")", ":", "parent", "=", "self", ".", "parent", "while", "parent", "is", "not", "None", ":", "if", "parent", ".", "parent", "is", "None", ":", "return", "parent", "else", ":", "parent", "=", "parent", ".", "parent", "return", "self" ]
[ 209, 4 ]
[ 222, 19 ]
python
en
['en', 'error', 'th']
False
declaration_t.location
(self)
Location of the declaration within source file @type: :class:`location_t`
Location of the declaration within source file
def location(self): """ Location of the declaration within source file @type: :class:`location_t` """ return self._location
[ "def", "location", "(", "self", ")", ":", "return", "self", ".", "_location" ]
[ 225, 4 ]
[ 233, 29 ]
python
en
['en', 'error', 'th']
False
declaration_t.is_artificial
(self)
Describes whether declaration is compiler generated or not @type: bool
Describes whether declaration is compiler generated or not
def is_artificial(self): """ Describes whether declaration is compiler generated or not @type: bool """ return self._is_artificial
[ "def", "is_artificial", "(", "self", ")", ":", "return", "self", ".", "_is_artificial" ]
[ 240, 4 ]
[ 248, 34 ]
python
en
['en', 'error', 'th']
False
declaration_t.mangled
(self)
Unique declaration name generated by the compiler. For GCCXML, you can get the mangled name for all the declarations. When using CastXML, calling mangled is only allowed on functions and variables. For other declarations it will raise an exception. :return: the mangled name :rtype: str
Unique declaration name generated by the compiler.
def mangled(self): """ Unique declaration name generated by the compiler. For GCCXML, you can get the mangled name for all the declarations. When using CastXML, calling mangled is only allowed on functions and variables. For other declarations it will raise an exception. :return: the mangled name :rtype: str """ return self._mangled
[ "def", "mangled", "(", "self", ")", ":", "return", "self", ".", "_mangled" ]
[ 258, 4 ]
[ 271, 28 ]
python
en
['en', 'error', 'th']
False
declaration_t.demangled
(self)
Declaration name, reconstructed from GCCXML generated unique name. @type: str
Declaration name, reconstructed from GCCXML generated unique name.
def demangled(self): """ Declaration name, reconstructed from GCCXML generated unique name. @type: str """ return self._demangled
[ "def", "demangled", "(", "self", ")", ":", "return", "self", ".", "_demangled" ]
[ 278, 4 ]
[ 285, 30 ]
python
en
['en', 'error', 'th']
False
declaration_t.decorated_name
(self)
Unique declaration name extracted from a binary file ( .map, .dll, .so, etc ). @type: str
Unique declaration name extracted from a binary file ( .map, .dll, .so, etc ).
def decorated_name(self): """ Unique declaration name extracted from a binary file ( .map, .dll, .so, etc ). @type: str """ warnings.warn( "The decorated_name attribute is deprecated. See the changelog.", DeprecationWarning) # Deprecated since 1.9.0, will be removed in 2.0.0 return self._decorated_name
[ "def", "decorated_name", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"The decorated_name attribute is deprecated. See the changelog.\"", ",", "DeprecationWarning", ")", "# Deprecated since 1.9.0, will be removed in 2.0.0", "return", "self", ".", "_decorated_name" ]
[ 292, 4 ]
[ 304, 35 ]
python
en
['en', 'error', 'th']
False
declaration_t.attributes
(self)
GCCXML attributes, set using __attribute__((gccxml("..."))) @type: str
GCCXML attributes, set using __attribute__((gccxml("...")))
def attributes(self): """ GCCXML attributes, set using __attribute__((gccxml("..."))) @type: str """ return self._attributes
[ "def", "attributes", "(", "self", ")", ":", "return", "self", ".", "_attributes" ]
[ 315, 4 ]
[ 323, 31 ]
python
en
['en', 'error', 'th']
False
declaration_t.decl_string
(self)
Declaration full name.
Declaration full name.
def decl_string(self): """ Declaration full name. """ return self.create_decl_string()
[ "def", "decl_string", "(", "self", ")", ":", "return", "self", ".", "create_decl_string", "(", ")" ]
[ 333, 4 ]
[ 339, 40 ]
python
en
['en', 'error', 'th']
False
declaration_t.partial_decl_string
(self)
Declaration full name.
Declaration full name.
def partial_decl_string(self): """ Declaration full name. """ return self.create_decl_string(with_defaults=False)
[ "def", "partial_decl_string", "(", "self", ")", ":", "return", "self", ".", "create_decl_string", "(", "with_defaults", "=", "False", ")" ]
[ 342, 4 ]
[ 348, 59 ]
python
en
['en', 'error', 'th']
False
declaration_t.cache
(self)
Implementation detail. Reference to instance of :class:`algorithms_cache_t` class.
Implementation detail.
def cache(self): """ Implementation detail. Reference to instance of :class:`algorithms_cache_t` class. """ return self._cache
[ "def", "cache", "(", "self", ")", ":", "return", "self", ".", "_cache" ]
[ 351, 4 ]
[ 359, 26 ]
python
en
['en', 'error', 'th']
False
declaration_t.i_depend_on_them
(self, recursive=True)
Return list of all types and declarations the declaration depends on
Return list of all types and declarations the declaration depends on
def i_depend_on_them(self, recursive=True): """ Return list of all types and declarations the declaration depends on """ raise NotImplementedError()
[ "def", "i_depend_on_them", "(", "self", ",", "recursive", "=", "True", ")", ":", "raise", "NotImplementedError", "(", ")" ]
[ 361, 4 ]
[ 366, 35 ]
python
en
['en', 'error', 'th']
False
would_confuse_urlparse
(url: str)
Returns whether an URL-ish string would be interpretted by urlparse() differently than we want, by parsing it as a non-URL URI ("scheme:path") instead of as a URL ("[scheme:]//authority[:port]/path"). We don't want to interpret "myhost:8080" as "ParseResult(scheme='myhost', path='8080')"!
Returns whether an URL-ish string would be interpretted by urlparse() differently than we want, by parsing it as a non-URL URI ("scheme:path") instead of as a URL ("[scheme:]//authority[:port]/path"). We don't want to interpret "myhost:8080" as "ParseResult(scheme='myhost', path='8080')"!
def would_confuse_urlparse(url: str) -> bool: """Returns whether an URL-ish string would be interpretted by urlparse() differently than we want, by parsing it as a non-URL URI ("scheme:path") instead of as a URL ("[scheme:]//authority[:port]/path"). We don't want to interpret "myhost:8080" as "ParseResult(scheme='myhost', path='8080')"! """ if url.find(':') > 0 and url.lstrip(scheme_chars).startswith("://"): # has a scheme return False if url.startswith("//"): # does not have a scheme, but has the "//" URL authority marker return False return True
[ "def", "would_confuse_urlparse", "(", "url", ":", "str", ")", "->", "bool", ":", "if", "url", ".", "find", "(", "':'", ")", ">", "0", "and", "url", ".", "lstrip", "(", "scheme_chars", ")", ".", "startswith", "(", "\"://\"", ")", ":", "# has a scheme", "return", "False", "if", "url", ".", "startswith", "(", "\"//\"", ")", ":", "# does not have a scheme, but has the \"//\" URL authority marker", "return", "False", "return", "True" ]
[ 14, 0 ]
[ 27, 15 ]
python
en
['en', 'en', 'en']
True
IRBaseMapping.status
(self)
Return the new status we should have. Subclasses would typically override this. :return: new status (may be None)
Return the new status we should have. Subclasses would typically override this.
def status(self) -> Optional[Dict[str, Any]]: """ Return the new status we should have. Subclasses would typically override this. :return: new status (may be None) """ return None
[ "def", "status", "(", "self", ")", "->", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", ":", "return", "None" ]
[ 204, 4 ]
[ 211, 19 ]
python
en
['en', 'error', 'th']
False
IRBaseMapping._group_id
(self)
Compute the group ID for this Mapping. Must be defined by subclasses.
Compute the group ID for this Mapping. Must be defined by subclasses.
def _group_id(self) -> str: """ Compute the group ID for this Mapping. Must be defined by subclasses. """ raise NotImplementedError("%s._group_id is not implemented?" % self.__class__.__name__)
[ "def", "_group_id", "(", "self", ")", "->", "str", ":", "raise", "NotImplementedError", "(", "\"%s._group_id is not implemented?\"", "%", "self", ".", "__class__", ".", "__name__", ")" ]
[ 227, 4 ]
[ 229, 96 ]
python
en
['en', 'en', 'en']
True
IRBaseMapping._route_weight
(self)
Compute the route weight for this Mapping. Must be defined by subclasses.
Compute the route weight for this Mapping. Must be defined by subclasses.
def _route_weight(self) -> List[Union[str, int]]: """ Compute the route weight for this Mapping. Must be defined by subclasses. """ raise NotImplementedError("%s._route_weight is not implemented?" % self.__class__.__name__)
[ "def", "_route_weight", "(", "self", ")", "->", "List", "[", "Union", "[", "str", ",", "int", "]", "]", ":", "raise", "NotImplementedError", "(", "\"%s._route_weight is not implemented?\"", "%", "self", ".", "__class__", ".", "__name__", ")" ]
[ 231, 4 ]
[ 233, 100 ]
python
en
['en', 'en', 'en']
True
DatasourceAnonymizer.anonymize_simple_sqlalchemy_datasource
(self, name, config)
SimpleSqlalchemyDatasource requires a separate anonymization scheme.
SimpleSqlalchemyDatasource requires a separate anonymization scheme.
def anonymize_simple_sqlalchemy_datasource(self, name, config): """ SimpleSqlalchemyDatasource requires a separate anonymization scheme. """ anonymized_info_dict = dict() anonymized_info_dict["anonymized_name"] = self.anonymize(name) if config.get("module_name") is None: config["module_name"] = "great_expectations.datasource" self.anonymize_object_info( anonymized_info_dict=anonymized_info_dict, ge_classes=self._ge_classes, object_config=config, ) # Only and directly provide parent_class of execution engine anonymized_info_dict["anonymized_execution_engine"] = { "parent_class": "SqlAlchemyExecutionEngine" } # Use the `introspection` and `tables` keys to find data_connectors in SimpleSqlalchemyDatasources introspection_data_connector_configs = config.get("introspection") tables_data_connector_configs = config.get("tables") introspection_data_connector_anonymized_configs = [] if introspection_data_connector_configs is not None: for ( data_connector_name, data_connector_config, ) in introspection_data_connector_configs.items(): if data_connector_config.get("class_name") is None: data_connector_config[ "class_name" ] = "InferredAssetSqlDataConnector" if data_connector_config.get("module_name") is None: data_connector_config[ "module_name" ] = "great_expectations.datasource.data_connector" introspection_data_connector_anonymized_configs.append( self._data_connector_anonymizer.anonymize_data_connector_info( name=data_connector_name, config=data_connector_config ) ) tables_data_connector_anonymized_configs = [] if tables_data_connector_configs is not None: for ( data_connector_name, data_connector_config, ) in tables_data_connector_configs.items(): if data_connector_config.get("class_name") is None: data_connector_config[ "class_name" ] = "ConfiguredAssetSqlDataConnector" if data_connector_config.get("module_name") is None: data_connector_config[ "module_name" ] = "great_expectations.datasource.data_connector" tables_data_connector_anonymized_configs.append( self._data_connector_anonymizer.anonymize_data_connector_info( name=data_connector_name, config=data_connector_config ) ) anonymized_info_dict["anonymized_data_connectors"] = ( introspection_data_connector_anonymized_configs + tables_data_connector_anonymized_configs ) return anonymized_info_dict
[ "def", "anonymize_simple_sqlalchemy_datasource", "(", "self", ",", "name", ",", "config", ")", ":", "anonymized_info_dict", "=", "dict", "(", ")", "anonymized_info_dict", "[", "\"anonymized_name\"", "]", "=", "self", ".", "anonymize", "(", "name", ")", "if", "config", ".", "get", "(", "\"module_name\"", ")", "is", "None", ":", "config", "[", "\"module_name\"", "]", "=", "\"great_expectations.datasource\"", "self", ".", "anonymize_object_info", "(", "anonymized_info_dict", "=", "anonymized_info_dict", ",", "ge_classes", "=", "self", ".", "_ge_classes", ",", "object_config", "=", "config", ",", ")", "# Only and directly provide parent_class of execution engine", "anonymized_info_dict", "[", "\"anonymized_execution_engine\"", "]", "=", "{", "\"parent_class\"", ":", "\"SqlAlchemyExecutionEngine\"", "}", "# Use the `introspection` and `tables` keys to find data_connectors in SimpleSqlalchemyDatasources", "introspection_data_connector_configs", "=", "config", ".", "get", "(", "\"introspection\"", ")", "tables_data_connector_configs", "=", "config", ".", "get", "(", "\"tables\"", ")", "introspection_data_connector_anonymized_configs", "=", "[", "]", "if", "introspection_data_connector_configs", "is", "not", "None", ":", "for", "(", "data_connector_name", ",", "data_connector_config", ",", ")", "in", "introspection_data_connector_configs", ".", "items", "(", ")", ":", "if", "data_connector_config", ".", "get", "(", "\"class_name\"", ")", "is", "None", ":", "data_connector_config", "[", "\"class_name\"", "]", "=", "\"InferredAssetSqlDataConnector\"", "if", "data_connector_config", ".", "get", "(", "\"module_name\"", ")", "is", "None", ":", "data_connector_config", "[", "\"module_name\"", "]", "=", "\"great_expectations.datasource.data_connector\"", "introspection_data_connector_anonymized_configs", ".", "append", "(", "self", ".", "_data_connector_anonymizer", ".", "anonymize_data_connector_info", "(", "name", "=", "data_connector_name", ",", "config", "=", "data_connector_config", ")", ")", "tables_data_connector_anonymized_configs", "=", "[", "]", "if", "tables_data_connector_configs", "is", "not", "None", ":", "for", "(", "data_connector_name", ",", "data_connector_config", ",", ")", "in", "tables_data_connector_configs", ".", "items", "(", ")", ":", "if", "data_connector_config", ".", "get", "(", "\"class_name\"", ")", "is", "None", ":", "data_connector_config", "[", "\"class_name\"", "]", "=", "\"ConfiguredAssetSqlDataConnector\"", "if", "data_connector_config", ".", "get", "(", "\"module_name\"", ")", "is", "None", ":", "data_connector_config", "[", "\"module_name\"", "]", "=", "\"great_expectations.datasource.data_connector\"", "tables_data_connector_anonymized_configs", ".", "append", "(", "self", ".", "_data_connector_anonymizer", ".", "anonymize_data_connector_info", "(", "name", "=", "data_connector_name", ",", "config", "=", "data_connector_config", ")", ")", "anonymized_info_dict", "[", "\"anonymized_data_connectors\"", "]", "=", "(", "introspection_data_connector_anonymized_configs", "+", "tables_data_connector_anonymized_configs", ")", "return", "anonymized_info_dict" ]
[ 77, 4 ]
[ 145, 35 ]
python
en
['en', 'error', 'th']
False
parse
( files, config=None, compilation_mode=COMPILATION_MODE.FILE_BY_FILE, cache=None)
Parse header files. :param files: The header files that should be parsed :type files: list of str :param config: Configuration object or None :type config: :class:`parser.xml_generator_configuration_t` :param compilation_mode: Determines whether the files are parsed individually or as one single chunk :type compilation_mode: :class:`parser.COMPILATION_MODE` :param cache: Declaration cache (None=no cache) :type cache: :class:`parser.cache_base_t` or str :rtype: list of :class:`declarations.declaration_t`
Parse header files.
def parse( files, config=None, compilation_mode=COMPILATION_MODE.FILE_BY_FILE, cache=None): """ Parse header files. :param files: The header files that should be parsed :type files: list of str :param config: Configuration object or None :type config: :class:`parser.xml_generator_configuration_t` :param compilation_mode: Determines whether the files are parsed individually or as one single chunk :type compilation_mode: :class:`parser.COMPILATION_MODE` :param cache: Declaration cache (None=no cache) :type cache: :class:`parser.cache_base_t` or str :rtype: list of :class:`declarations.declaration_t` """ if not config: config = xml_generator_configuration_t() parser = project_reader_t(config=config, cache=cache) declarations = parser.read_files(files, compilation_mode) config.xml_generator_from_xml_file = parser.xml_generator_from_xml_file return declarations
[ "def", "parse", "(", "files", ",", "config", "=", "None", ",", "compilation_mode", "=", "COMPILATION_MODE", ".", "FILE_BY_FILE", ",", "cache", "=", "None", ")", ":", "if", "not", "config", ":", "config", "=", "xml_generator_configuration_t", "(", ")", "parser", "=", "project_reader_t", "(", "config", "=", "config", ",", "cache", "=", "cache", ")", "declarations", "=", "parser", ".", "read_files", "(", "files", ",", "compilation_mode", ")", "config", ".", "xml_generator_from_xml_file", "=", "parser", ".", "xml_generator_from_xml_file", "return", "declarations" ]
[ 28, 0 ]
[ 52, 23 ]
python
en
['en', 'error', 'th']
False
ConfiguredAssetS3DataConnector.__init__
( self, name: str, datasource_name: str, bucket: str, assets: dict, execution_engine: Optional[ExecutionEngine] = None, default_regex: Optional[dict] = None, sorters: Optional[list] = None, prefix: Optional[str] = "", delimiter: Optional[str] = "/", max_keys: Optional[int] = 1000, boto3_options: Optional[dict] = None, batch_spec_passthrough: Optional[dict] = None, )
ConfiguredAssetDataConnector for connecting to S3. Args: name (str): required name for DataConnector datasource_name (str): required name for datasource bucket (str): bucket for S3 assets (dict): dict of asset configuration (required for ConfiguredAssetDataConnector) execution_engine (ExecutionEngine): optional reference to ExecutionEngine default_regex (dict): optional regex configuration for filtering data_references sorters (list): optional list of sorters for sorting data_references prefix (str): S3 prefix delimiter (str): S3 delimiter max_keys (int): S3 max_keys (default is 1000) boto3_options (dict): optional boto3 options batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
ConfiguredAssetDataConnector for connecting to S3.
def __init__( self, name: str, datasource_name: str, bucket: str, assets: dict, execution_engine: Optional[ExecutionEngine] = None, default_regex: Optional[dict] = None, sorters: Optional[list] = None, prefix: Optional[str] = "", delimiter: Optional[str] = "/", max_keys: Optional[int] = 1000, boto3_options: Optional[dict] = None, batch_spec_passthrough: Optional[dict] = None, ): """ ConfiguredAssetDataConnector for connecting to S3. Args: name (str): required name for DataConnector datasource_name (str): required name for datasource bucket (str): bucket for S3 assets (dict): dict of asset configuration (required for ConfiguredAssetDataConnector) execution_engine (ExecutionEngine): optional reference to ExecutionEngine default_regex (dict): optional regex configuration for filtering data_references sorters (list): optional list of sorters for sorting data_references prefix (str): S3 prefix delimiter (str): S3 delimiter max_keys (int): S3 max_keys (default is 1000) boto3_options (dict): optional boto3 options batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec """ logger.debug(f'Constructing ConfiguredAssetS3DataConnector "{name}".') super().__init__( name=name, datasource_name=datasource_name, execution_engine=execution_engine, assets=assets, default_regex=default_regex, sorters=sorters, batch_spec_passthrough=batch_spec_passthrough, ) self._bucket = bucket self._prefix = os.path.join(prefix, "") self._delimiter = delimiter self._max_keys = max_keys if boto3_options is None: boto3_options = {} try: self._s3 = boto3.client("s3", **boto3_options) except (TypeError, AttributeError): raise ImportError( "Unable to load boto3 (it is required for ConfiguredAssetS3DataConnector)." )
[ "def", "__init__", "(", "self", ",", "name", ":", "str", ",", "datasource_name", ":", "str", ",", "bucket", ":", "str", ",", "assets", ":", "dict", ",", "execution_engine", ":", "Optional", "[", "ExecutionEngine", "]", "=", "None", ",", "default_regex", ":", "Optional", "[", "dict", "]", "=", "None", ",", "sorters", ":", "Optional", "[", "list", "]", "=", "None", ",", "prefix", ":", "Optional", "[", "str", "]", "=", "\"\"", ",", "delimiter", ":", "Optional", "[", "str", "]", "=", "\"/\"", ",", "max_keys", ":", "Optional", "[", "int", "]", "=", "1000", ",", "boto3_options", ":", "Optional", "[", "dict", "]", "=", "None", ",", "batch_spec_passthrough", ":", "Optional", "[", "dict", "]", "=", "None", ",", ")", ":", "logger", ".", "debug", "(", "f'Constructing ConfiguredAssetS3DataConnector \"{name}\".'", ")", "super", "(", ")", ".", "__init__", "(", "name", "=", "name", ",", "datasource_name", "=", "datasource_name", ",", "execution_engine", "=", "execution_engine", ",", "assets", "=", "assets", ",", "default_regex", "=", "default_regex", ",", "sorters", "=", "sorters", ",", "batch_spec_passthrough", "=", "batch_spec_passthrough", ",", ")", "self", ".", "_bucket", "=", "bucket", "self", ".", "_prefix", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "\"\"", ")", "self", ".", "_delimiter", "=", "delimiter", "self", ".", "_max_keys", "=", "max_keys", "if", "boto3_options", "is", "None", ":", "boto3_options", "=", "{", "}", "try", ":", "self", ".", "_s3", "=", "boto3", ".", "client", "(", "\"s3\"", ",", "*", "*", "boto3_options", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "raise", "ImportError", "(", "\"Unable to load boto3 (it is required for ConfiguredAssetS3DataConnector).\"", ")" ]
[ 37, 4 ]
[ 93, 13 ]
python
en
['en', 'error', 'th']
False
ConfiguredAssetS3DataConnector.build_batch_spec
(self, batch_definition: BatchDefinition)
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. Args: batch_definition (BatchDefinition): to be used to build batch_spec Returns: BatchSpec built from batch_definition
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
def build_batch_spec(self, batch_definition: BatchDefinition) -> S3BatchSpec: """ Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. Args: batch_definition (BatchDefinition): to be used to build batch_spec Returns: BatchSpec built from batch_definition """ batch_spec: PathBatchSpec = super().build_batch_spec( batch_definition=batch_definition ) return S3BatchSpec(batch_spec)
[ "def", "build_batch_spec", "(", "self", ",", "batch_definition", ":", "BatchDefinition", ")", "->", "S3BatchSpec", ":", "batch_spec", ":", "PathBatchSpec", "=", "super", "(", ")", ".", "build_batch_spec", "(", "batch_definition", "=", "batch_definition", ")", "return", "S3BatchSpec", "(", "batch_spec", ")" ]
[ 95, 4 ]
[ 108, 38 ]
python
en
['en', 'error', 'th']
False
build_evaluation_parameters
( expectation_args, evaluation_parameters=None, interactive_evaluation=True, data_context=None, )
Build a dictionary of parameters to evaluate, using the provided evaluation_parameters, AND mutate expectation_args by removing any parameter values passed in as temporary values during exploratory work.
Build a dictionary of parameters to evaluate, using the provided evaluation_parameters, AND mutate expectation_args by removing any parameter values passed in as temporary values during exploratory work.
def build_evaluation_parameters( expectation_args, evaluation_parameters=None, interactive_evaluation=True, data_context=None, ): """Build a dictionary of parameters to evaluate, using the provided evaluation_parameters, AND mutate expectation_args by removing any parameter values passed in as temporary values during exploratory work. """ evaluation_args = copy.deepcopy(expectation_args) substituted_parameters = dict() # Iterate over arguments, and replace $PARAMETER-defined args with their # specified parameters. for key, value in evaluation_args.items(): if isinstance(value, dict) and "$PARAMETER" in value: # We do not even need to search for a value if we are not going to do interactive evaluation if not interactive_evaluation: continue # First, check to see whether an argument was supplied at runtime # If it was, use that one, but remove it from the stored config if "$PARAMETER." + value["$PARAMETER"] in value: evaluation_args[key] = evaluation_args[key][ "$PARAMETER." + value["$PARAMETER"] ] del expectation_args[key]["$PARAMETER." + value["$PARAMETER"]] # If not, try to parse the evaluation parameter and substitute, which will raise # an exception if we do not have a value else: raw_value = value["$PARAMETER"] parameter_value = parse_evaluation_parameter( raw_value, evaluation_parameters=evaluation_parameters, data_context=data_context, ) evaluation_args[key] = parameter_value # Once we've substituted, we also track that we did so substituted_parameters[key] = parameter_value return evaluation_args, substituted_parameters
[ "def", "build_evaluation_parameters", "(", "expectation_args", ",", "evaluation_parameters", "=", "None", ",", "interactive_evaluation", "=", "True", ",", "data_context", "=", "None", ",", ")", ":", "evaluation_args", "=", "copy", ".", "deepcopy", "(", "expectation_args", ")", "substituted_parameters", "=", "dict", "(", ")", "# Iterate over arguments, and replace $PARAMETER-defined args with their", "# specified parameters.", "for", "key", ",", "value", "in", "evaluation_args", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", "and", "\"$PARAMETER\"", "in", "value", ":", "# We do not even need to search for a value if we are not going to do interactive evaluation", "if", "not", "interactive_evaluation", ":", "continue", "# First, check to see whether an argument was supplied at runtime", "# If it was, use that one, but remove it from the stored config", "if", "\"$PARAMETER.\"", "+", "value", "[", "\"$PARAMETER\"", "]", "in", "value", ":", "evaluation_args", "[", "key", "]", "=", "evaluation_args", "[", "key", "]", "[", "\"$PARAMETER.\"", "+", "value", "[", "\"$PARAMETER\"", "]", "]", "del", "expectation_args", "[", "key", "]", "[", "\"$PARAMETER.\"", "+", "value", "[", "\"$PARAMETER\"", "]", "]", "# If not, try to parse the evaluation parameter and substitute, which will raise", "# an exception if we do not have a value", "else", ":", "raw_value", "=", "value", "[", "\"$PARAMETER\"", "]", "parameter_value", "=", "parse_evaluation_parameter", "(", "raw_value", ",", "evaluation_parameters", "=", "evaluation_parameters", ",", "data_context", "=", "data_context", ",", ")", "evaluation_args", "[", "key", "]", "=", "parameter_value", "# Once we've substituted, we also track that we did so", "substituted_parameters", "[", "key", "]", "=", "parameter_value", "return", "evaluation_args", ",", "substituted_parameters" ]
[ 164, 0 ]
[ 206, 50 ]
python
en
['en', 'en', 'en']
True
find_evaluation_parameter_dependencies
(parameter_expression)
Parse a parameter expression to identify dependencies including GE URNs. Args: parameter_expression: the parameter to parse Returns: a dictionary including: - "urns": set of strings that are valid GE URN objects - "other": set of non-GE URN strings that are required to evaluate the parameter expression
Parse a parameter expression to identify dependencies including GE URNs.
def find_evaluation_parameter_dependencies(parameter_expression): """Parse a parameter expression to identify dependencies including GE URNs. Args: parameter_expression: the parameter to parse Returns: a dictionary including: - "urns": set of strings that are valid GE URN objects - "other": set of non-GE URN strings that are required to evaluate the parameter expression """ expr = EvaluationParameterParser() dependencies = {"urns": set(), "other": set()} # Calling get_parser clears the stack parser = expr.get_parser() try: _ = parser.parseString(parameter_expression, parseAll=True) except ParseException as err: raise EvaluationParameterError( f"Unable to parse evaluation parameter: {str(err)} at line {err.line}, column {err.column}" ) except AttributeError as err: raise EvaluationParameterError( f"Unable to parse evaluation parameter: {str(err)}" ) for word in expr.exprStack: if isinstance(word, (int, float)): continue if not isinstance(word, str): # If we have a function that itself is a tuple (e.g. (trunc, 1)) continue if word in expr.opn or word in expr.fn or word == "unary -": # operations and functions continue # if this is parseable as a number, then we do not include it try: _ = float(word) continue except ValueError: pass try: _ = ge_urn.parseString(word) dependencies["urns"].add(word) continue except ParseException: # This particular evaluation_parameter or operator is not a valid URN pass # If we got this far, it's a legitimate "other" evaluation parameter dependencies["other"].add(word) return dependencies
[ "def", "find_evaluation_parameter_dependencies", "(", "parameter_expression", ")", ":", "expr", "=", "EvaluationParameterParser", "(", ")", "dependencies", "=", "{", "\"urns\"", ":", "set", "(", ")", ",", "\"other\"", ":", "set", "(", ")", "}", "# Calling get_parser clears the stack", "parser", "=", "expr", ".", "get_parser", "(", ")", "try", ":", "_", "=", "parser", ".", "parseString", "(", "parameter_expression", ",", "parseAll", "=", "True", ")", "except", "ParseException", "as", "err", ":", "raise", "EvaluationParameterError", "(", "f\"Unable to parse evaluation parameter: {str(err)} at line {err.line}, column {err.column}\"", ")", "except", "AttributeError", "as", "err", ":", "raise", "EvaluationParameterError", "(", "f\"Unable to parse evaluation parameter: {str(err)}\"", ")", "for", "word", "in", "expr", ".", "exprStack", ":", "if", "isinstance", "(", "word", ",", "(", "int", ",", "float", ")", ")", ":", "continue", "if", "not", "isinstance", "(", "word", ",", "str", ")", ":", "# If we have a function that itself is a tuple (e.g. (trunc, 1))", "continue", "if", "word", "in", "expr", ".", "opn", "or", "word", "in", "expr", ".", "fn", "or", "word", "==", "\"unary -\"", ":", "# operations and functions", "continue", "# if this is parseable as a number, then we do not include it", "try", ":", "_", "=", "float", "(", "word", ")", "continue", "except", "ValueError", ":", "pass", "try", ":", "_", "=", "ge_urn", ".", "parseString", "(", "word", ")", "dependencies", "[", "\"urns\"", "]", ".", "add", "(", "word", ")", "continue", "except", "ParseException", ":", "# This particular evaluation_parameter or operator is not a valid URN", "pass", "# If we got this far, it's a legitimate \"other\" evaluation parameter", "dependencies", "[", "\"other\"", "]", ".", "add", "(", "word", ")", "return", "dependencies" ]
[ 212, 0 ]
[ 270, 23 ]
python
en
['en', 'en', 'en']
True
parse_evaluation_parameter
( parameter_expression, evaluation_parameters=None, data_context=None )
Use the provided evaluation_parameters dict to parse a given parameter expression. Args: parameter_expression (str): A string, potentially containing basic arithmetic operations and functions, and variables to be substituted evaluation_parameters (dict): A dictionary of name-value pairs consisting of values to substitute data_context (DataContext): A data context to use to obtain metrics, if necessary The parser will allow arithmetic operations +, -, /, *, as well as basic functions, including trunc() and round() to obtain integer values when needed for certain expectations (e.g. expect_column_value_length_to_be_between). Valid variables must begin with an alphabetic character and may contain alphanumeric characters plus '_' and '$', EXCEPT if they begin with the string "urn:great_expectations" in which case they may also include additional characters to support inclusion of GE URLs (see :ref:`evaluation_parameters` for more information).
Use the provided evaluation_parameters dict to parse a given parameter expression.
def parse_evaluation_parameter( parameter_expression, evaluation_parameters=None, data_context=None ): """Use the provided evaluation_parameters dict to parse a given parameter expression. Args: parameter_expression (str): A string, potentially containing basic arithmetic operations and functions, and variables to be substituted evaluation_parameters (dict): A dictionary of name-value pairs consisting of values to substitute data_context (DataContext): A data context to use to obtain metrics, if necessary The parser will allow arithmetic operations +, -, /, *, as well as basic functions, including trunc() and round() to obtain integer values when needed for certain expectations (e.g. expect_column_value_length_to_be_between). Valid variables must begin with an alphabetic character and may contain alphanumeric characters plus '_' and '$', EXCEPT if they begin with the string "urn:great_expectations" in which case they may also include additional characters to support inclusion of GE URLs (see :ref:`evaluation_parameters` for more information). """ if evaluation_parameters is None: evaluation_parameters = {} # Calling get_parser clears the stack parser = expr.get_parser() try: L = parser.parseString(parameter_expression, parseAll=True) except ParseException as err: L = ["Parse Failure", parameter_expression, (str(err), err.line, err.column)] if len(L) == 1 and L[0] not in evaluation_parameters: # In this special case there were no operations to find, so only one value, but we don't have something to # substitute for that value try: res = ge_urn.parseString(L[0]) if res["urn_type"] == "stores": store = data_context.stores.get(res["store_name"]) return store.get_query_result( res["metric_name"], res.get("metric_kwargs", {}) ) else: logger.error( "Unrecognized urn_type in ge_urn: must be 'stores' to use a metric store." ) raise EvaluationParameterError( "No value found for $PARAMETER " + str(L[0]) ) except ParseException as e: logger.debug( f"Parse exception while parsing evaluation parameter: {str(e)}" ) raise EvaluationParameterError("No value found for $PARAMETER " + str(L[0])) except AttributeError: logger.warning("Unable to get store for store-type valuation parameter.") raise EvaluationParameterError("No value found for $PARAMETER " + str(L[0])) elif len(L) == 1: # In this case, we *do* have a substitution for a single type. We treat this specially because in this # case, we allow complex type substitutions (i.e. do not coerce to string as part of parsing) # NOTE: 20201023 - JPC - to support MetricDefinition as an evaluation parameter type, we need to handle that # case here; is the evaluation parameter provided here in fact a metric definition? return evaluation_parameters[L[0]] elif len(L) == 0 or L[0] != "Parse Failure": for i, ob in enumerate(expr.exprStack): if isinstance(ob, str) and ob in evaluation_parameters: expr.exprStack[i] = str(evaluation_parameters[ob]) else: err_str, err_line, err_col = L[-1] raise EvaluationParameterError( f"Parse Failure: {err_str}\nStatement: {err_line}\nColumn: {err_col}" ) try: result = expr.evaluate_stack(expr.exprStack) except Exception as e: exception_traceback = traceback.format_exc() exception_message = ( f'{type(e).__name__}: "{str(e)}". Traceback: "{exception_traceback}".' ) logger.debug(exception_message, e, exc_info=True) raise EvaluationParameterError( "Error while evaluating evaluation parameter expression: " + str(e) ) return result
[ "def", "parse_evaluation_parameter", "(", "parameter_expression", ",", "evaluation_parameters", "=", "None", ",", "data_context", "=", "None", ")", ":", "if", "evaluation_parameters", "is", "None", ":", "evaluation_parameters", "=", "{", "}", "# Calling get_parser clears the stack", "parser", "=", "expr", ".", "get_parser", "(", ")", "try", ":", "L", "=", "parser", ".", "parseString", "(", "parameter_expression", ",", "parseAll", "=", "True", ")", "except", "ParseException", "as", "err", ":", "L", "=", "[", "\"Parse Failure\"", ",", "parameter_expression", ",", "(", "str", "(", "err", ")", ",", "err", ".", "line", ",", "err", ".", "column", ")", "]", "if", "len", "(", "L", ")", "==", "1", "and", "L", "[", "0", "]", "not", "in", "evaluation_parameters", ":", "# In this special case there were no operations to find, so only one value, but we don't have something to", "# substitute for that value", "try", ":", "res", "=", "ge_urn", ".", "parseString", "(", "L", "[", "0", "]", ")", "if", "res", "[", "\"urn_type\"", "]", "==", "\"stores\"", ":", "store", "=", "data_context", ".", "stores", ".", "get", "(", "res", "[", "\"store_name\"", "]", ")", "return", "store", ".", "get_query_result", "(", "res", "[", "\"metric_name\"", "]", ",", "res", ".", "get", "(", "\"metric_kwargs\"", ",", "{", "}", ")", ")", "else", ":", "logger", ".", "error", "(", "\"Unrecognized urn_type in ge_urn: must be 'stores' to use a metric store.\"", ")", "raise", "EvaluationParameterError", "(", "\"No value found for $PARAMETER \"", "+", "str", "(", "L", "[", "0", "]", ")", ")", "except", "ParseException", "as", "e", ":", "logger", ".", "debug", "(", "f\"Parse exception while parsing evaluation parameter: {str(e)}\"", ")", "raise", "EvaluationParameterError", "(", "\"No value found for $PARAMETER \"", "+", "str", "(", "L", "[", "0", "]", ")", ")", "except", "AttributeError", ":", "logger", ".", "warning", "(", "\"Unable to get store for store-type valuation parameter.\"", ")", "raise", "EvaluationParameterError", "(", "\"No value found for $PARAMETER \"", "+", "str", "(", "L", "[", "0", "]", ")", ")", "elif", "len", "(", "L", ")", "==", "1", ":", "# In this case, we *do* have a substitution for a single type. We treat this specially because in this", "# case, we allow complex type substitutions (i.e. do not coerce to string as part of parsing)", "# NOTE: 20201023 - JPC - to support MetricDefinition as an evaluation parameter type, we need to handle that", "# case here; is the evaluation parameter provided here in fact a metric definition?", "return", "evaluation_parameters", "[", "L", "[", "0", "]", "]", "elif", "len", "(", "L", ")", "==", "0", "or", "L", "[", "0", "]", "!=", "\"Parse Failure\"", ":", "for", "i", ",", "ob", "in", "enumerate", "(", "expr", ".", "exprStack", ")", ":", "if", "isinstance", "(", "ob", ",", "str", ")", "and", "ob", "in", "evaluation_parameters", ":", "expr", ".", "exprStack", "[", "i", "]", "=", "str", "(", "evaluation_parameters", "[", "ob", "]", ")", "else", ":", "err_str", ",", "err_line", ",", "err_col", "=", "L", "[", "-", "1", "]", "raise", "EvaluationParameterError", "(", "f\"Parse Failure: {err_str}\\nStatement: {err_line}\\nColumn: {err_col}\"", ")", "try", ":", "result", "=", "expr", ".", "evaluate_stack", "(", "expr", ".", "exprStack", ")", "except", "Exception", "as", "e", ":", "exception_traceback", "=", "traceback", ".", "format_exc", "(", ")", "exception_message", "=", "(", "f'{type(e).__name__}: \"{str(e)}\". Traceback: \"{exception_traceback}\".'", ")", "logger", ".", "debug", "(", "exception_message", ",", "e", ",", "exc_info", "=", "True", ")", "raise", "EvaluationParameterError", "(", "\"Error while evaluating evaluation parameter expression: \"", "+", "str", "(", "e", ")", ")", "return", "result" ]
[ 273, 0 ]
[ 357, 17 ]
python
en
['en', 'en', 'en']
True
is_generator
(obj)
Return True if ``obj`` is a generator
Return True if ``obj`` is a generator
def is_generator(obj) -> bool: """Return True if ``obj`` is a generator""" return inspect.isgeneratorfunction(obj) or inspect.isgenerator(obj)
[ "def", "is_generator", "(", "obj", ")", "->", "bool", ":", "return", "inspect", ".", "isgeneratorfunction", "(", "obj", ")", "or", "inspect", ".", "isgenerator", "(", "obj", ")" ]
[ 44, 0 ]
[ 46, 71 ]
python
en
['en', 'mt', 'en']
True
is_iterable_but_not_string
(obj)
Return True if ``obj`` is an iterable object that isn't a string.
Return True if ``obj`` is an iterable object that isn't a string.
def is_iterable_but_not_string(obj) -> bool: """Return True if ``obj`` is an iterable object that isn't a string.""" return (hasattr(obj, "__iter__") and not hasattr(obj, "strip")) or is_generator(obj)
[ "def", "is_iterable_but_not_string", "(", "obj", ")", "->", "bool", ":", "return", "(", "hasattr", "(", "obj", ",", "\"__iter__\"", ")", "and", "not", "hasattr", "(", "obj", ",", "\"strip\"", ")", ")", "or", "is_generator", "(", "obj", ")" ]
[ 49, 0 ]
[ 51, 88 ]
python
en
['en', 'en', 'en']
True
is_collection
(obj)
Return True if ``obj`` is a collection type, e.g list, tuple, queryset.
Return True if ``obj`` is a collection type, e.g list, tuple, queryset.
def is_collection(obj) -> bool: """Return True if ``obj`` is a collection type, e.g list, tuple, queryset.""" return is_iterable_but_not_string(obj) and not isinstance(obj, Mapping)
[ "def", "is_collection", "(", "obj", ")", "->", "bool", ":", "return", "is_iterable_but_not_string", "(", "obj", ")", "and", "not", "isinstance", "(", "obj", ",", "Mapping", ")" ]
[ 54, 0 ]
[ 56, 75 ]
python
en
['en', 'en', 'en']
True
is_instance_or_subclass
(val, class_)
Return True if ``val`` is either a subclass or instance of ``class_``.
Return True if ``val`` is either a subclass or instance of ``class_``.
def is_instance_or_subclass(val, class_) -> bool: """Return True if ``val`` is either a subclass or instance of ``class_``.""" try: return issubclass(val, class_) except TypeError: return isinstance(val, class_)
[ "def", "is_instance_or_subclass", "(", "val", ",", "class_", ")", "->", "bool", ":", "try", ":", "return", "issubclass", "(", "val", ",", "class_", ")", "except", "TypeError", ":", "return", "isinstance", "(", "val", ",", "class_", ")" ]
[ 59, 0 ]
[ 64, 38 ]
python
en
['en', 'en', 'en']
True
is_keyed_tuple
(obj)
Return True if ``obj`` has keyed tuple behavior, such as namedtuples or SQLAlchemy's KeyedTuples.
Return True if ``obj`` has keyed tuple behavior, such as namedtuples or SQLAlchemy's KeyedTuples.
def is_keyed_tuple(obj) -> bool: """Return True if ``obj`` has keyed tuple behavior, such as namedtuples or SQLAlchemy's KeyedTuples. """ return isinstance(obj, tuple) and hasattr(obj, "_fields")
[ "def", "is_keyed_tuple", "(", "obj", ")", "->", "bool", ":", "return", "isinstance", "(", "obj", ",", "tuple", ")", "and", "hasattr", "(", "obj", ",", "\"_fields\"", ")" ]
[ 67, 0 ]
[ 71, 61 ]
python
en
['en', 'cy', 'en']
True
pprint
(obj, *args, **kwargs)
Pretty-printing function that can pretty-print OrderedDicts like regular dictionaries. Useful for printing the output of :meth:`marshmallow.Schema.dump`.
Pretty-printing function that can pretty-print OrderedDicts like regular dictionaries. Useful for printing the output of :meth:`marshmallow.Schema.dump`.
def pprint(obj, *args, **kwargs) -> None: """Pretty-printing function that can pretty-print OrderedDicts like regular dictionaries. Useful for printing the output of :meth:`marshmallow.Schema.dump`. """ warnings.warn( "marshmallow's pprint function is deprecated and will be removed in marshmallow 4.", RemovedInMarshmallow4Warning, ) if isinstance(obj, collections.OrderedDict): print(json.dumps(obj, *args, **kwargs)) else: py_pprint(obj, *args, **kwargs)
[ "def", "pprint", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "None", ":", "warnings", ".", "warn", "(", "\"marshmallow's pprint function is deprecated and will be removed in marshmallow 4.\"", ",", "RemovedInMarshmallow4Warning", ",", ")", "if", "isinstance", "(", "obj", ",", "collections", ".", "OrderedDict", ")", ":", "print", "(", "json", ".", "dumps", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "else", ":", "py_pprint", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
[ 74, 0 ]
[ 86, 39 ]
python
en
['en', 'en', 'en']
True
from_rfc
(datestring: str)
Parse a RFC822-formatted datetime string and return a datetime object. https://stackoverflow.com/questions/885015/how-to-parse-a-rfc-2822-date-time-into-a-python-datetime # noqa: B950
Parse a RFC822-formatted datetime string and return a datetime object.
def from_rfc(datestring: str) -> dt.datetime: """Parse a RFC822-formatted datetime string and return a datetime object. https://stackoverflow.com/questions/885015/how-to-parse-a-rfc-2822-date-time-into-a-python-datetime # noqa: B950 """ return parsedate_to_datetime(datestring)
[ "def", "from_rfc", "(", "datestring", ":", "str", ")", "->", "dt", ".", "datetime", ":", "return", "parsedate_to_datetime", "(", "datestring", ")" ]
[ 96, 0 ]
[ 101, 44 ]
python
en
['en', 'en', 'en']
True
rfcformat
(datetime: dt.datetime)
Return the RFC822-formatted representation of a datetime object. :param datetime datetime: The datetime.
Return the RFC822-formatted representation of a datetime object.
def rfcformat(datetime: dt.datetime) -> str: """Return the RFC822-formatted representation of a datetime object. :param datetime datetime: The datetime. """ return format_datetime(datetime)
[ "def", "rfcformat", "(", "datetime", ":", "dt", ".", "datetime", ")", "->", "str", ":", "return", "format_datetime", "(", "datetime", ")" ]
[ 104, 0 ]
[ 109, 36 ]
python
en
['en', 'en', 'en']
True
get_fixed_timezone
(offset: typing.Union[int, float, dt.timedelta])
Return a tzinfo instance with a fixed offset from UTC.
Return a tzinfo instance with a fixed offset from UTC.
def get_fixed_timezone(offset: typing.Union[int, float, dt.timedelta]) -> dt.timezone: """Return a tzinfo instance with a fixed offset from UTC.""" if isinstance(offset, dt.timedelta): offset = offset.total_seconds() // 60 sign = "-" if offset < 0 else "+" hhmm = "%02d%02d" % divmod(abs(offset), 60) name = sign + hhmm return dt.timezone(dt.timedelta(minutes=offset), name)
[ "def", "get_fixed_timezone", "(", "offset", ":", "typing", ".", "Union", "[", "int", ",", "float", ",", "dt", ".", "timedelta", "]", ")", "->", "dt", ".", "timezone", ":", "if", "isinstance", "(", "offset", ",", "dt", ".", "timedelta", ")", ":", "offset", "=", "offset", ".", "total_seconds", "(", ")", "//", "60", "sign", "=", "\"-\"", "if", "offset", "<", "0", "else", "\"+\"", "hhmm", "=", "\"%02d%02d\"", "%", "divmod", "(", "abs", "(", "offset", ")", ",", "60", ")", "name", "=", "sign", "+", "hhmm", "return", "dt", ".", "timezone", "(", "dt", ".", "timedelta", "(", "minutes", "=", "offset", ")", ",", "name", ")" ]
[ 129, 0 ]
[ 136, 58 ]
python
en
['en', 'en', 'en']
True
from_iso_datetime
(value)
Parse a string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC.
Parse a string and return a datetime.datetime.
def from_iso_datetime(value): """Parse a string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC. """ match = _iso8601_datetime_re.match(value) if not match: raise ValueError("Not a valid ISO8601-formatted datetime string") kw = match.groupdict() kw["microsecond"] = kw["microsecond"] and kw["microsecond"].ljust(6, "0") tzinfo = kw.pop("tzinfo") if tzinfo == "Z": tzinfo = dt.timezone.utc elif tzinfo is not None: offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0 offset = 60 * int(tzinfo[1:3]) + offset_mins if tzinfo[0] == "-": offset = -offset tzinfo = get_fixed_timezone(offset) kw = {k: int(v) for k, v in kw.items() if v is not None} kw["tzinfo"] = tzinfo return dt.datetime(**kw)
[ "def", "from_iso_datetime", "(", "value", ")", ":", "match", "=", "_iso8601_datetime_re", ".", "match", "(", "value", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Not a valid ISO8601-formatted datetime string\"", ")", "kw", "=", "match", ".", "groupdict", "(", ")", "kw", "[", "\"microsecond\"", "]", "=", "kw", "[", "\"microsecond\"", "]", "and", "kw", "[", "\"microsecond\"", "]", ".", "ljust", "(", "6", ",", "\"0\"", ")", "tzinfo", "=", "kw", ".", "pop", "(", "\"tzinfo\"", ")", "if", "tzinfo", "==", "\"Z\"", ":", "tzinfo", "=", "dt", ".", "timezone", ".", "utc", "elif", "tzinfo", "is", "not", "None", ":", "offset_mins", "=", "int", "(", "tzinfo", "[", "-", "2", ":", "]", ")", "if", "len", "(", "tzinfo", ")", ">", "3", "else", "0", "offset", "=", "60", "*", "int", "(", "tzinfo", "[", "1", ":", "3", "]", ")", "+", "offset_mins", "if", "tzinfo", "[", "0", "]", "==", "\"-\"", ":", "offset", "=", "-", "offset", "tzinfo", "=", "get_fixed_timezone", "(", "offset", ")", "kw", "=", "{", "k", ":", "int", "(", "v", ")", "for", "k", ",", "v", "in", "kw", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "kw", "[", "\"tzinfo\"", "]", "=", "tzinfo", "return", "dt", ".", "datetime", "(", "*", "*", "kw", ")" ]
[ 139, 0 ]
[ 161, 28 ]
python
en
['en', 'en', 'en']
True
from_iso_time
(value)
Parse a string and return a datetime.time. This function doesn't support time zone offsets.
Parse a string and return a datetime.time.
def from_iso_time(value): """Parse a string and return a datetime.time. This function doesn't support time zone offsets. """ match = _iso8601_time_re.match(value) if not match: raise ValueError("Not a valid ISO8601-formatted time string") kw = match.groupdict() kw["microsecond"] = kw["microsecond"] and kw["microsecond"].ljust(6, "0") kw = {k: int(v) for k, v in kw.items() if v is not None} return dt.time(**kw)
[ "def", "from_iso_time", "(", "value", ")", ":", "match", "=", "_iso8601_time_re", ".", "match", "(", "value", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Not a valid ISO8601-formatted time string\"", ")", "kw", "=", "match", ".", "groupdict", "(", ")", "kw", "[", "\"microsecond\"", "]", "=", "kw", "[", "\"microsecond\"", "]", "and", "kw", "[", "\"microsecond\"", "]", ".", "ljust", "(", "6", ",", "\"0\"", ")", "kw", "=", "{", "k", ":", "int", "(", "v", ")", "for", "k", ",", "v", "in", "kw", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "return", "dt", ".", "time", "(", "*", "*", "kw", ")" ]
[ 164, 0 ]
[ 175, 24 ]
python
en
['en', 'en', 'en']
True
from_iso_date
(value)
Parse a string and return a datetime.date.
Parse a string and return a datetime.date.
def from_iso_date(value): """Parse a string and return a datetime.date.""" match = _iso8601_date_re.match(value) if not match: raise ValueError("Not a valid ISO8601-formatted date string") kw = {k: int(v) for k, v in match.groupdict().items()} return dt.date(**kw)
[ "def", "from_iso_date", "(", "value", ")", ":", "match", "=", "_iso8601_date_re", ".", "match", "(", "value", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Not a valid ISO8601-formatted date string\"", ")", "kw", "=", "{", "k", ":", "int", "(", "v", ")", "for", "k", ",", "v", "in", "match", ".", "groupdict", "(", ")", ".", "items", "(", ")", "}", "return", "dt", ".", "date", "(", "*", "*", "kw", ")" ]
[ 178, 0 ]
[ 184, 24 ]
python
en
['en', 'en', 'en']
True
isoformat
(datetime: dt.datetime)
Return the ISO8601-formatted representation of a datetime object. :param datetime datetime: The datetime.
Return the ISO8601-formatted representation of a datetime object.
def isoformat(datetime: dt.datetime) -> str: """Return the ISO8601-formatted representation of a datetime object. :param datetime datetime: The datetime. """ return datetime.isoformat()
[ "def", "isoformat", "(", "datetime", ":", "dt", ".", "datetime", ")", "->", "str", ":", "return", "datetime", ".", "isoformat", "(", ")" ]
[ 187, 0 ]
[ 192, 31 ]
python
en
['en', 'en', 'en']
True
pluck
(dictlist: typing.List[typing.Dict[str, typing.Any]], key: str)
Extracts a list of dictionary values from a list of dictionaries. :: >>> dlist = [{'id': 1, 'name': 'foo'}, {'id': 2, 'name': 'bar'}] >>> pluck(dlist, 'id') [1, 2]
Extracts a list of dictionary values from a list of dictionaries. ::
def pluck(dictlist: typing.List[typing.Dict[str, typing.Any]], key: str): """Extracts a list of dictionary values from a list of dictionaries. :: >>> dlist = [{'id': 1, 'name': 'foo'}, {'id': 2, 'name': 'bar'}] >>> pluck(dlist, 'id') [1, 2] """ return [d[key] for d in dictlist]
[ "def", "pluck", "(", "dictlist", ":", "typing", ".", "List", "[", "typing", ".", "Dict", "[", "str", ",", "typing", ".", "Any", "]", "]", ",", "key", ":", "str", ")", ":", "return", "[", "d", "[", "key", "]", "for", "d", "in", "dictlist", "]" ]
[ 205, 0 ]
[ 213, 37 ]
python
en
['en', 'en', 'en']
True
get_value
(obj, key: typing.Union[int, str], default=missing)
Helper for pulling a keyed value off various types of objects. Fields use this method by default to access attributes of the source object. For object `x` and attribute `i`, this method first tries to access `x[i]`, and then falls back to `x.i` if an exception is raised. .. warning:: If an object `x` does not raise an exception when `x[i]` does not exist, `get_value` will never check the value `x.i`. Consider overriding `marshmallow.fields.Field.get_value` in this case.
Helper for pulling a keyed value off various types of objects. Fields use this method by default to access attributes of the source object. For object `x` and attribute `i`, this method first tries to access `x[i]`, and then falls back to `x.i` if an exception is raised.
def get_value(obj, key: typing.Union[int, str], default=missing): """Helper for pulling a keyed value off various types of objects. Fields use this method by default to access attributes of the source object. For object `x` and attribute `i`, this method first tries to access `x[i]`, and then falls back to `x.i` if an exception is raised. .. warning:: If an object `x` does not raise an exception when `x[i]` does not exist, `get_value` will never check the value `x.i`. Consider overriding `marshmallow.fields.Field.get_value` in this case. """ if not isinstance(key, int) and "." in key: return _get_value_for_keys(obj, key.split("."), default) else: return _get_value_for_key(obj, key, default)
[ "def", "get_value", "(", "obj", ",", "key", ":", "typing", ".", "Union", "[", "int", ",", "str", "]", ",", "default", "=", "missing", ")", ":", "if", "not", "isinstance", "(", "key", ",", "int", ")", "and", "\".\"", "in", "key", ":", "return", "_get_value_for_keys", "(", "obj", ",", "key", ".", "split", "(", "\".\"", ")", ",", "default", ")", "else", ":", "return", "_get_value_for_key", "(", "obj", ",", "key", ",", "default", ")" ]
[ 219, 0 ]
[ 233, 52 ]
python
en
['en', 'en', 'en']
True
set_value
(dct: typing.Dict[str, typing.Any], key: str, value: typing.Any)
Set a value in a dict. If `key` contains a '.', it is assumed be a path (i.e. dot-delimited string) to the value's location. :: >>> d = {} >>> set_value(d, 'foo.bar', 42) >>> d {'foo': {'bar': 42}}
Set a value in a dict. If `key` contains a '.', it is assumed be a path (i.e. dot-delimited string) to the value's location.
def set_value(dct: typing.Dict[str, typing.Any], key: str, value: typing.Any): """Set a value in a dict. If `key` contains a '.', it is assumed be a path (i.e. dot-delimited string) to the value's location. :: >>> d = {} >>> set_value(d, 'foo.bar', 42) >>> d {'foo': {'bar': 42}} """ if "." in key: head, rest = key.split(".", 1) target = dct.setdefault(head, {}) if not isinstance(target, dict): raise ValueError( "Cannot set {key} in {head} " "due to existing value: {target}".format( key=key, head=head, target=target ) ) set_value(target, rest, value) else: dct[key] = value
[ "def", "set_value", "(", "dct", ":", "typing", ".", "Dict", "[", "str", ",", "typing", ".", "Any", "]", ",", "key", ":", "str", ",", "value", ":", "typing", ".", "Any", ")", ":", "if", "\".\"", "in", "key", ":", "head", ",", "rest", "=", "key", ".", "split", "(", "\".\"", ",", "1", ")", "target", "=", "dct", ".", "setdefault", "(", "head", ",", "{", "}", ")", "if", "not", "isinstance", "(", "target", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"Cannot set {key} in {head} \"", "\"due to existing value: {target}\"", ".", "format", "(", "key", "=", "key", ",", "head", "=", "head", ",", "target", "=", "target", ")", ")", "set_value", "(", "target", ",", "rest", ",", "value", ")", "else", ":", "dct", "[", "key", "]", "=", "value" ]
[ 255, 0 ]
[ 278, 24 ]
python
en
['en', 'en', 'en']
True
callable_or_raise
(obj)
Check that an object is callable, else raise a :exc:`ValueError`.
Check that an object is callable, else raise a :exc:`ValueError`.
def callable_or_raise(obj): """Check that an object is callable, else raise a :exc:`ValueError`.""" if not callable(obj): raise ValueError("Object {!r} is not callable.".format(obj)) return obj
[ "def", "callable_or_raise", "(", "obj", ")", ":", "if", "not", "callable", "(", "obj", ")", ":", "raise", "ValueError", "(", "\"Object {!r} is not callable.\"", ".", "format", "(", "obj", ")", ")", "return", "obj" ]
[ 281, 0 ]
[ 285, 14 ]
python
en
['en', 'en', 'en']
True
get_func_args
(func: typing.Callable)
Given a callable, return a list of argument names. Handles `functools.partial` objects and class-based callables. .. versionchanged:: 3.0.0a1 Do not return bound arguments, eg. ``self``.
Given a callable, return a list of argument names. Handles `functools.partial` objects and class-based callables.
def get_func_args(func: typing.Callable) -> typing.List[str]: """Given a callable, return a list of argument names. Handles `functools.partial` objects and class-based callables. .. versionchanged:: 3.0.0a1 Do not return bound arguments, eg. ``self``. """ if inspect.isfunction(func) or inspect.ismethod(func): return _signature(func) if isinstance(func, functools.partial): return _signature(func.func) # Callable class return _signature(func)
[ "def", "get_func_args", "(", "func", ":", "typing", ".", "Callable", ")", "->", "typing", ".", "List", "[", "str", "]", ":", "if", "inspect", ".", "isfunction", "(", "func", ")", "or", "inspect", ".", "ismethod", "(", "func", ")", ":", "return", "_signature", "(", "func", ")", "if", "isinstance", "(", "func", ",", "functools", ".", "partial", ")", ":", "return", "_signature", "(", "func", ".", "func", ")", "# Callable class", "return", "_signature", "(", "func", ")" ]
[ 292, 0 ]
[ 304, 27 ]
python
en
['en', 'en', 'en']
True
resolve_field_instance
(cls_or_instance)
Return a Schema instance from a Schema class or instance. :param type|Schema cls_or_instance: Marshmallow Schema class or instance.
Return a Schema instance from a Schema class or instance.
def resolve_field_instance(cls_or_instance): """Return a Schema instance from a Schema class or instance. :param type|Schema cls_or_instance: Marshmallow Schema class or instance. """ if isinstance(cls_or_instance, type): if not issubclass(cls_or_instance, FieldABC): raise FieldInstanceResolutionError return cls_or_instance() else: if not isinstance(cls_or_instance, FieldABC): raise FieldInstanceResolutionError return cls_or_instance
[ "def", "resolve_field_instance", "(", "cls_or_instance", ")", ":", "if", "isinstance", "(", "cls_or_instance", ",", "type", ")", ":", "if", "not", "issubclass", "(", "cls_or_instance", ",", "FieldABC", ")", ":", "raise", "FieldInstanceResolutionError", "return", "cls_or_instance", "(", ")", "else", ":", "if", "not", "isinstance", "(", "cls_or_instance", ",", "FieldABC", ")", ":", "raise", "FieldInstanceResolutionError", "return", "cls_or_instance" ]
[ 307, 0 ]
[ 319, 30 ]
python
en
['en', 'lb', 'en']
True
_merge_meta
(base, child)
Merge the base and the child meta attributes. List entries, such as ``indexes`` are concatenated. ``abstract`` value is set to ``True`` only if defined as such in the child class. Args: base (dict): ``meta`` attribute from the base class. child (dict): ``meta`` attribute from the child class. Returns: dict: Merged metadata.
Merge the base and the child meta attributes.
def _merge_meta(base, child): """Merge the base and the child meta attributes. List entries, such as ``indexes`` are concatenated. ``abstract`` value is set to ``True`` only if defined as such in the child class. Args: base (dict): ``meta`` attribute from the base class. child (dict): ``meta`` attribute from the child class. Returns: dict: Merged metadata. """ base = copy.deepcopy(base) child.setdefault('abstract', False) for key, value in child.items(): if isinstance(value, list): base.setdefault(key, []).extend(value) else: base[key] = value return base
[ "def", "_merge_meta", "(", "base", ",", "child", ")", ":", "base", "=", "copy", ".", "deepcopy", "(", "base", ")", "child", ".", "setdefault", "(", "'abstract'", ",", "False", ")", "for", "key", ",", "value", "in", "child", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "base", ".", "setdefault", "(", "key", ",", "[", "]", ")", ".", "extend", "(", "value", ")", "else", ":", "base", "[", "key", "]", "=", "value", "return", "base" ]
[ 40, 0 ]
[ 65, 15 ]
python
en
['en', 'en', 'en']
True
key_has_dollar
(d)
Recursively check if any key in a dict contains a dollar sign.
Recursively check if any key in a dict contains a dollar sign.
def key_has_dollar(d): """Recursively check if any key in a dict contains a dollar sign.""" for k, v in d.items(): if k.startswith('$') or (isinstance(v, dict) and key_has_dollar(v)): return True
[ "def", "key_has_dollar", "(", "d", ")", ":", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "k", ".", "startswith", "(", "'$'", ")", "or", "(", "isinstance", "(", "v", ",", "dict", ")", "and", "key_has_dollar", "(", "v", ")", ")", ":", "return", "True" ]
[ 214, 0 ]
[ 218, 23 ]
python
en
['en', 'en', 'en']
True
PipelineField.validate
(self, value)
Make sure that a list of valid fields is being used.
Make sure that a list of valid fields is being used.
def validate(self, value): """Make sure that a list of valid fields is being used.""" if not isinstance(value, dict): self.error('Only dictionaries may be used in a PipelineField') if fields.key_not_string(value): msg = ('Invalid dictionary key - documents must ' 'have only string keys') self.error(msg) if key_has_dollar(value): self.error('Invalid dictionary key name - keys may not start with ' '"$" character') super(fields.DictField, self).validate(value)
[ "def", "validate", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "dict", ")", ":", "self", ".", "error", "(", "'Only dictionaries may be used in a PipelineField'", ")", "if", "fields", ".", "key_not_string", "(", "value", ")", ":", "msg", "=", "(", "'Invalid dictionary key - documents must '", "'have only string keys'", ")", "self", ".", "error", "(", "msg", ")", "if", "key_has_dollar", "(", "value", ")", ":", "self", ".", "error", "(", "'Invalid dictionary key name - keys may not start with '", "'\"$\" character'", ")", "super", "(", "fields", ".", "DictField", ",", "self", ")", ".", "validate", "(", "value", ")" ]
[ 231, 4 ]
[ 245, 53 ]
python
en
['en', 'en', 'en']
True
clean_xgboost_objective
(objective)
Translate objective to be compatible with loaded xgboost version Args ---- objective : string The objective to translate. Returns ------- The translated objective, or original if no translation was required.
Translate objective to be compatible with loaded xgboost version
def clean_xgboost_objective(objective): """ Translate objective to be compatible with loaded xgboost version Args ---- objective : string The objective to translate. Returns ------- The translated objective, or original if no translation was required. """ compat_before_v83 = {'reg:squarederror': 'reg:linear'} compat_v83_or_later = {'reg:linear': 'reg:squarederror'} if version.parse(xgboost_version) < version.parse('0.83'): if objective in compat_before_v83: objective = compat_before_v83[objective] else: if objective in compat_v83_or_later: objective = compat_v83_or_later[objective] return objective
[ "def", "clean_xgboost_objective", "(", "objective", ")", ":", "compat_before_v83", "=", "{", "'reg:squarederror'", ":", "'reg:linear'", "}", "compat_v83_or_later", "=", "{", "'reg:linear'", ":", "'reg:squarederror'", "}", "if", "version", ".", "parse", "(", "xgboost_version", ")", "<", "version", ".", "parse", "(", "'0.83'", ")", ":", "if", "objective", "in", "compat_before_v83", ":", "objective", "=", "compat_before_v83", "[", "objective", "]", "else", ":", "if", "objective", "in", "compat_v83_or_later", ":", "objective", "=", "compat_v83_or_later", "[", "objective", "]", "return", "objective" ]
[ 50, 0 ]
[ 72, 20 ]
python
en
['en', 'error', 'th']
False
get_xgboost_objective_metric
(objective)
Get the xgboost version-compatible objective and evaluation metric from a potentially version-incompatible input. Args ---- objective : string An xgboost objective that may be incompatible with the installed version. Returns ------- A tuple with the translated objective and evaluation metric.
Get the xgboost version-compatible objective and evaluation metric from a potentially version-incompatible input.
def get_xgboost_objective_metric(objective): """ Get the xgboost version-compatible objective and evaluation metric from a potentially version-incompatible input. Args ---- objective : string An xgboost objective that may be incompatible with the installed version. Returns ------- A tuple with the translated objective and evaluation metric. """ def clean_dict_keys(orig): return {clean_xgboost_objective(k): v for (k, v) in orig.items()} metric_mapping = clean_dict_keys({ 'rank:pairwise': 'auc', 'reg:squarederror': 'rmse', }) objective = clean_xgboost_objective(objective) assert (objective in metric_mapping), \ 'Effect learner objective must be one of: ' + ", ".join(metric_mapping) return objective, metric_mapping[objective]
[ "def", "get_xgboost_objective_metric", "(", "objective", ")", ":", "def", "clean_dict_keys", "(", "orig", ")", ":", "return", "{", "clean_xgboost_objective", "(", "k", ")", ":", "v", "for", "(", "k", ",", "v", ")", "in", "orig", ".", "items", "(", ")", "}", "metric_mapping", "=", "clean_dict_keys", "(", "{", "'rank:pairwise'", ":", "'auc'", ",", "'reg:squarederror'", ":", "'rmse'", ",", "}", ")", "objective", "=", "clean_xgboost_objective", "(", "objective", ")", "assert", "(", "objective", "in", "metric_mapping", ")", ",", "'Effect learner objective must be one of: '", "+", "\", \"", ".", "join", "(", "metric_mapping", ")", "return", "objective", ",", "metric_mapping", "[", "objective", "]" ]
[ 75, 0 ]
[ 101, 47 ]
python
en
['en', 'error', 'th']
False
matcher_base_t.__invert__
(self)
not-operator (~)
not-operator (~)
def __invert__(self): """not-operator (~)""" return not_matcher_t(self)
[ "def", "__invert__", "(", "self", ")", ":", "return", "not_matcher_t", "(", "self", ")" ]
[ 27, 4 ]
[ 29, 34 ]
python
en
['en', 'zh', 'en']
False
matcher_base_t.__and__
(self, other)
and-operator (&)
and-operator (&)
def __and__(self, other): """and-operator (&)""" return and_matcher_t([self, other])
[ "def", "__and__", "(", "self", ",", "other", ")", ":", "return", "and_matcher_t", "(", "[", "self", ",", "other", "]", ")" ]
[ 31, 4 ]
[ 33, 43 ]
python
en
['en', 'en', 'en']
False
matcher_base_t.__or__
(self, other)
or-operator (|)
or-operator (|)
def __or__(self, other): """or-operator (|)""" return or_matcher_t([self, other])
[ "def", "__or__", "(", "self", ",", "other", ")", ":", "return", "or_matcher_t", "(", "[", "self", ",", "other", "]", ")" ]
[ 35, 4 ]
[ 37, 42 ]
python
en
['en', 'yo', 'en']
False
regex_matcher_t.__init__
(self, regex, function=None)
:param regex: regular expression :type regex: string, an instance of this class will compile it for you :param function: function that will be called to get an information from declaration as string. As input this function takes single argument - reference to a declaration. Return value should be string. If function is None, then the matcher will use declaration name.
:param regex: regular expression :type regex: string, an instance of this class will compile it for you
def __init__(self, regex, function=None): """ :param regex: regular expression :type regex: string, an instance of this class will compile it for you :param function: function that will be called to get an information from declaration as string. As input this function takes single argument - reference to a declaration. Return value should be string. If function is None, then the matcher will use declaration name. """ matcher_base_t.__init__(self) self.regex = re.compile(regex) self.function = function if self.function is None: self.function = lambda decl: decl.name
[ "def", "__init__", "(", "self", ",", "regex", ",", "function", "=", "None", ")", ":", "matcher_base_t", ".", "__init__", "(", "self", ")", "self", ".", "regex", "=", "re", ".", "compile", "(", "regex", ")", "self", ".", "function", "=", "function", "if", "self", ".", "function", "is", "None", ":", "self", ".", "function", "=", "lambda", "decl", ":", "decl", ".", "name" ]
[ 129, 4 ]
[ 145, 50 ]
python
en
['en', 'error', 'th']
False
custom_matcher_t.__init__
(self, function)
:param function: callable, that takes single argument - declaration instance should return True or False
:param function: callable, that takes single argument - declaration instance should return True or False
def __init__(self, function): """ :param function: callable, that takes single argument - declaration instance should return True or False """ matcher_base_t.__init__(self) self.function = function
[ "def", "__init__", "(", "self", ",", "function", ")", ":", "matcher_base_t", ".", "__init__", "(", "self", ")", "self", ".", "function", "=", "function" ]
[ 161, 4 ]
[ 167, 32 ]
python
en
['en', 'error', 'th']
False
access_type_matcher_t.__init__
(self, access_type)
:param access_type: declaration access type, could be "public", "private", "protected" :type access_type: :class: `str`
:param access_type: declaration access type, could be "public", "private", "protected" :type access_type: :class: `str`
def __init__(self, access_type): """ :param access_type: declaration access type, could be "public", "private", "protected" :type access_type: :class: `str` """ matcher_base_t.__init__(self) self.access_type = access_type
[ "def", "__init__", "(", "self", ",", "access_type", ")", ":", "matcher_base_t", ".", "__init__", "(", "self", ")", "self", ".", "access_type", "=", "access_type" ]
[ 184, 4 ]
[ 191, 38 ]
python
en
['en', 'error', 'th']
False
virtuality_type_matcher_t.__init__
(self, virtuality_type)
:param access_type: declaration access type :type access_type: :class:VIRTUALITY_TYPES defines few constants for your convenience.
:param access_type: declaration access type :type access_type: :class:VIRTUALITY_TYPES defines few constants for your convenience.
def __init__(self, virtuality_type): """ :param access_type: declaration access type :type access_type: :class:VIRTUALITY_TYPES defines few constants for your convenience. """ matcher_base_t.__init__(self) self.virtuality_type = virtuality_type
[ "def", "__init__", "(", "self", ",", "virtuality_type", ")", ":", "matcher_base_t", ".", "__init__", "(", "self", ")", "self", ".", "virtuality_type", "=", "virtuality_type" ]
[ 212, 4 ]
[ 219, 46 ]
python
en
['en', 'error', 'th']
False
make_query_packet
()
Construct a UDP packet suitable for querying an NTP server to ask for the current time.
Construct a UDP packet suitable for querying an NTP server to ask for the current time.
def make_query_packet(): """Construct a UDP packet suitable for querying an NTP server to ask for the current time.""" # The structure of an NTP packet is described here: # https://tools.ietf.org/html/rfc5905#page-19 # They're always 48 bytes long, unless you're using extensions, which we # aren't. packet = bytearray(48) # The first byte contains 3 subfields: # first 2 bits: 11, leap second status unknown # next 3 bits: 100, NTP version indicator, 0b100 == 4 = version 4 # last 3 bits: 011, NTP mode indicator, 0b011 == 3 == "client" packet[0] = 0b11100011 # For an outgoing request, all other fields can be left as zeros. return packet
[ "def", "make_query_packet", "(", ")", ":", "# The structure of an NTP packet is described here:", "# https://tools.ietf.org/html/rfc5905#page-19", "# They're always 48 bytes long, unless you're using extensions, which we", "# aren't.", "packet", "=", "bytearray", "(", "48", ")", "# The first byte contains 3 subfields:", "# first 2 bits: 11, leap second status unknown", "# next 3 bits: 100, NTP version indicator, 0b100 == 4 = version 4", "# last 3 bits: 011, NTP mode indicator, 0b011 == 3 == \"client\"", "packet", "[", "0", "]", "=", "0b11100011", "# For an outgoing request, all other fields can be left as zeros.", "return", "packet" ]
[ 9, 0 ]
[ 27, 17 ]
python
en
['en', 'en', 'en']
True
extract_transmit_timestamp
(ntp_packet)
Given an NTP packet, extract the "transmit timestamp" field, as a Python datetime.
Given an NTP packet, extract the "transmit timestamp" field, as a Python datetime.
def extract_transmit_timestamp(ntp_packet): """Given an NTP packet, extract the "transmit timestamp" field, as a Python datetime.""" # The transmit timestamp is the time that the server sent its response. # It's stored in bytes 40-47 of the NTP packet. See: # https://tools.ietf.org/html/rfc5905#page-19 encoded_transmit_timestamp = ntp_packet[40:48] # The timestamp is stored in the "NTP timestamp format", which is a 32 # byte count of whole seconds, followed by a 32 byte count of fractions of # a second. See: # https://tools.ietf.org/html/rfc5905#page-13 seconds, fraction = struct.unpack("!II", encoded_transmit_timestamp) # The timestamp is the number of seconds since January 1, 1900 (ignoring # leap seconds). To convert it to a datetime object, we do some simple # datetime arithmetic: base_time = datetime.datetime(1900, 1, 1) offset = datetime.timedelta(seconds=seconds + fraction / 2**32) return base_time + offset
[ "def", "extract_transmit_timestamp", "(", "ntp_packet", ")", ":", "# The transmit timestamp is the time that the server sent its response.", "# It's stored in bytes 40-47 of the NTP packet. See:", "# https://tools.ietf.org/html/rfc5905#page-19", "encoded_transmit_timestamp", "=", "ntp_packet", "[", "40", ":", "48", "]", "# The timestamp is stored in the \"NTP timestamp format\", which is a 32", "# byte count of whole seconds, followed by a 32 byte count of fractions of", "# a second. See:", "# https://tools.ietf.org/html/rfc5905#page-13", "seconds", ",", "fraction", "=", "struct", ".", "unpack", "(", "\"!II\"", ",", "encoded_transmit_timestamp", ")", "# The timestamp is the number of seconds since January 1, 1900 (ignoring", "# leap seconds). To convert it to a datetime object, we do some simple", "# datetime arithmetic:", "base_time", "=", "datetime", ".", "datetime", "(", "1900", ",", "1", ",", "1", ")", "offset", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "seconds", "+", "fraction", "/", "2", "**", "32", ")", "return", "base_time", "+", "offset" ]
[ 29, 0 ]
[ 49, 29 ]
python
en
['en', 'en', 'en']
True
ExpectColumnWassersteinDistanceToBeLessThan.validate_configuration
(self, configuration: Optional[ExpectationConfiguration])
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that necessary configuration arguments have been provided for the validation of the expectation. Args: configuration (OPTIONAL[ExpectationConfiguration]): \ An optional Expectation Configuration entry that will be used to configure the expectation Returns: True if the configuration has been validated successfully. Otherwise, raises an exception
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that necessary configuration arguments have been provided for the validation of the expectation.
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]): """ Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that necessary configuration arguments have been provided for the validation of the expectation. Args: configuration (OPTIONAL[ExpectationConfiguration]): \ An optional Expectation Configuration entry that will be used to configure the expectation Returns: True if the configuration has been validated successfully. Otherwise, raises an exception """ super().validate_configuration(configuration) self.validate_metric_value_between_configuration(configuration=configuration)
[ "def", "validate_configuration", "(", "self", ",", "configuration", ":", "Optional", "[", "ExpectationConfiguration", "]", ")", ":", "super", "(", ")", ".", "validate_configuration", "(", "configuration", ")", "self", ".", "validate_metric_value_between_configuration", "(", "configuration", "=", "configuration", ")" ]
[ 241, 4 ]
[ 253, 85 ]
python
en
['en', 'error', 'th']
False
_calc_validation_statistics
(validation_results)
Calculate summary statistics for the validation results and return ``ExpectationStatistics``.
Calculate summary statistics for the validation results and return ``ExpectationStatistics``.
def _calc_validation_statistics(validation_results): """ Calculate summary statistics for the validation results and return ``ExpectationStatistics``. """ # calc stats successful_expectations = sum(exp.success for exp in validation_results) evaluated_expectations = len(validation_results) unsuccessful_expectations = evaluated_expectations - successful_expectations success = successful_expectations == evaluated_expectations try: success_percent = successful_expectations / evaluated_expectations * 100 except ZeroDivisionError: # success_percent = float("nan") success_percent = None return ValidationStatistics( successful_expectations=successful_expectations, evaluated_expectations=evaluated_expectations, unsuccessful_expectations=unsuccessful_expectations, success=success, success_percent=success_percent, )
[ "def", "_calc_validation_statistics", "(", "validation_results", ")", ":", "# calc stats", "successful_expectations", "=", "sum", "(", "exp", ".", "success", "for", "exp", "in", "validation_results", ")", "evaluated_expectations", "=", "len", "(", "validation_results", ")", "unsuccessful_expectations", "=", "evaluated_expectations", "-", "successful_expectations", "success", "=", "successful_expectations", "==", "evaluated_expectations", "try", ":", "success_percent", "=", "successful_expectations", "/", "evaluated_expectations", "*", "100", "except", "ZeroDivisionError", ":", "# success_percent = float(\"nan\")", "success_percent", "=", "None", "return", "ValidationStatistics", "(", "successful_expectations", "=", "successful_expectations", ",", "evaluated_expectations", "=", "evaluated_expectations", ",", "unsuccessful_expectations", "=", "unsuccessful_expectations", ",", "success", "=", "success", ",", "success_percent", "=", "success_percent", ",", ")" ]
[ 1279, 0 ]
[ 1301, 5 ]
python
en
['en', 'error', 'th']
False
DataAsset.__init__
(self, *args, **kwargs)
Initialize the DataAsset. :param profiler (profiler class) = None: The profiler that should be run on the data_asset to build a baseline expectation suite. Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the support for the profiler parameter not obvious from the signature.
Initialize the DataAsset.
def __init__(self, *args, **kwargs): """ Initialize the DataAsset. :param profiler (profiler class) = None: The profiler that should be run on the data_asset to build a baseline expectation suite. Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the support for the profiler parameter not obvious from the signature. """ interactive_evaluation = kwargs.pop("interactive_evaluation", True) profiler = kwargs.pop("profiler", None) expectation_suite = kwargs.pop("expectation_suite", None) expectation_suite_name = kwargs.pop("expectation_suite_name", None) data_context = kwargs.pop("data_context", None) batch_kwargs = kwargs.pop( "batch_kwargs", BatchKwargs(ge_batch_id=str(uuid.uuid1())) ) batch_parameters = kwargs.pop("batch_parameters", {}) batch_markers = kwargs.pop("batch_markers", {}) if "autoinspect_func" in kwargs: warnings.warn( "Autoinspect_func is no longer supported; use a profiler instead (migration is easy!).", category=DeprecationWarning, ) super().__init__(*args, **kwargs) self._config = {"interactive_evaluation": interactive_evaluation} self._initialize_expectations( expectation_suite=expectation_suite, expectation_suite_name=expectation_suite_name, ) self._data_context = data_context self._batch_kwargs = BatchKwargs(batch_kwargs) self._batch_markers = batch_markers self._batch_parameters = batch_parameters # This special state variable tracks whether a validation run is going on, which will disable # saving expectation config objects self._active_validation = False if profiler is not None: profiler.profile(self) if data_context and hasattr(data_context, "_expectation_explorer_manager"): self.set_default_expectation_argument("include_config", True)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "interactive_evaluation", "=", "kwargs", ".", "pop", "(", "\"interactive_evaluation\"", ",", "True", ")", "profiler", "=", "kwargs", ".", "pop", "(", "\"profiler\"", ",", "None", ")", "expectation_suite", "=", "kwargs", ".", "pop", "(", "\"expectation_suite\"", ",", "None", ")", "expectation_suite_name", "=", "kwargs", ".", "pop", "(", "\"expectation_suite_name\"", ",", "None", ")", "data_context", "=", "kwargs", ".", "pop", "(", "\"data_context\"", ",", "None", ")", "batch_kwargs", "=", "kwargs", ".", "pop", "(", "\"batch_kwargs\"", ",", "BatchKwargs", "(", "ge_batch_id", "=", "str", "(", "uuid", ".", "uuid1", "(", ")", ")", ")", ")", "batch_parameters", "=", "kwargs", ".", "pop", "(", "\"batch_parameters\"", ",", "{", "}", ")", "batch_markers", "=", "kwargs", ".", "pop", "(", "\"batch_markers\"", ",", "{", "}", ")", "if", "\"autoinspect_func\"", "in", "kwargs", ":", "warnings", ".", "warn", "(", "\"Autoinspect_func is no longer supported; use a profiler instead (migration is easy!).\"", ",", "category", "=", "DeprecationWarning", ",", ")", "super", "(", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_config", "=", "{", "\"interactive_evaluation\"", ":", "interactive_evaluation", "}", "self", ".", "_initialize_expectations", "(", "expectation_suite", "=", "expectation_suite", ",", "expectation_suite_name", "=", "expectation_suite_name", ",", ")", "self", ".", "_data_context", "=", "data_context", "self", ".", "_batch_kwargs", "=", "BatchKwargs", "(", "batch_kwargs", ")", "self", ".", "_batch_markers", "=", "batch_markers", "self", ".", "_batch_parameters", "=", "batch_parameters", "# This special state variable tracks whether a validation run is going on, which will disable", "# saving expectation config objects", "self", ".", "_active_validation", "=", "False", "if", "profiler", "is", "not", "None", ":", "profiler", ".", "profile", "(", "self", ")", "if", "data_context", "and", "hasattr", "(", "data_context", ",", "\"_expectation_explorer_manager\"", ")", ":", "self", ".", "set_default_expectation_argument", "(", "\"include_config\"", ",", "True", ")" ]
[ 46, 4 ]
[ 93, 73 ]
python
en
['en', 'error', 'th']
False
DataAsset.autoinspect
(self, profiler)
Deprecated: use profile instead. Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own. Args: profiler: The profiler to use Returns: tuple(expectation_suite, validation_results)
Deprecated: use profile instead.
def autoinspect(self, profiler): """Deprecated: use profile instead. Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own. Args: profiler: The profiler to use Returns: tuple(expectation_suite, validation_results) """ warnings.warn( "The term autoinspect is deprecated and will be removed in a future release. Please use 'profile'\ instead." ) expectation_suite, validation_results = profiler.profile(self) return expectation_suite, validation_results
[ "def", "autoinspect", "(", "self", ",", "profiler", ")", ":", "warnings", ".", "warn", "(", "\"The term autoinspect is deprecated and will be removed in a future release. Please use 'profile'\\\n instead.\"", ")", "expectation_suite", ",", "validation_results", "=", "profiler", ".", "profile", "(", "self", ")", "return", "expectation_suite", ",", "validation_results" ]
[ 101, 4 ]
[ 117, 52 ]
python
en
['en', 'ro', 'en']
True
DataAsset.profile
(self, profiler, profiler_configuration=None)
Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own. Args: profiler: The profiler to use profiler_configuration: Optional profiler configuration dict Returns: tuple(expectation_suite, validation_results)
Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own.
def profile(self, profiler, profiler_configuration=None): """Use the provided profiler to evaluate this data_asset and assign the resulting expectation suite as its own. Args: profiler: The profiler to use profiler_configuration: Optional profiler configuration dict Returns: tuple(expectation_suite, validation_results) """ expectation_suite, validation_results = profiler.profile( self, profiler_configuration ) return expectation_suite, validation_results
[ "def", "profile", "(", "self", ",", "profiler", ",", "profiler_configuration", "=", "None", ")", ":", "expectation_suite", ",", "validation_results", "=", "profiler", ".", "profile", "(", "self", ",", "profiler_configuration", ")", "return", "expectation_suite", ",", "validation_results" ]
[ 119, 4 ]
[ 133, 52 ]
python
en
['en', 'en', 'en']
True
DataAsset.expectation
(cls, method_arg_names)
Manages configuration and running of expectation objects. Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator \ used by great expectations to manage expectation configurations. Args: method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation \ (typically the result of inspection). Positional arguments are explicitly mapped to \ keyword arguments when the expectation is run. Notes: Intermediate decorators that call the core @expectation decorator will most likely need to pass their \ decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset \ column_map_expectation decorator relies on the DataAsset expectation decorator, but will pass through the \ signature from the implementing method. @expectation intercepts and takes action based on the following parameters: * include_config (boolean or None) : \ If True, then include the generated expectation config as part of the result object. \ For more detail, see :ref:`include_config`. * catch_exceptions (boolean or None) : \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. * result_format (str or None) : \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. * meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see :ref:`meta`.
Manages configuration and running of expectation objects.
def expectation(cls, method_arg_names): """Manages configuration and running of expectation objects. Expectation builds and saves a new expectation configuration to the DataAsset object. It is the core decorator \ used by great expectations to manage expectation configurations. Args: method_arg_names (List) : An ordered list of the arguments used by the method implementing the expectation \ (typically the result of inspection). Positional arguments are explicitly mapped to \ keyword arguments when the expectation is run. Notes: Intermediate decorators that call the core @expectation decorator will most likely need to pass their \ decorated methods' signature up to the expectation decorator. For example, the MetaPandasDataset \ column_map_expectation decorator relies on the DataAsset expectation decorator, but will pass through the \ signature from the implementing method. @expectation intercepts and takes action based on the following parameters: * include_config (boolean or None) : \ If True, then include the generated expectation config as part of the result object. \ For more detail, see :ref:`include_config`. * catch_exceptions (boolean or None) : \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. * result_format (str or None) : \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. * meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see :ref:`meta`. """ def outer_wrapper(func): @wraps(func) def wrapper(self, *args, **kwargs): # Get the name of the method method_name = func.__name__ # Combine all arguments into a single new "all_args" dictionary to name positional parameters all_args = dict(zip(method_arg_names, args)) all_args.update(kwargs) # Unpack display parameters; remove them from all_args if appropriate if "include_config" in kwargs: include_config = kwargs["include_config"] del all_args["include_config"] else: include_config = self.default_expectation_args["include_config"] if "catch_exceptions" in kwargs: catch_exceptions = kwargs["catch_exceptions"] del all_args["catch_exceptions"] else: catch_exceptions = self.default_expectation_args["catch_exceptions"] if "result_format" in kwargs: result_format = kwargs["result_format"] else: result_format = self.default_expectation_args["result_format"] # Extract the meta object for use as a top-level expectation_config holder if "meta" in kwargs: meta = kwargs["meta"] del all_args["meta"] else: meta = None # Get the signature of the inner wrapper: argspec = inspect.getfullargspec(func)[0][1:] if "result_format" in argspec: all_args["result_format"] = result_format else: if "result_format" in all_args: del all_args["result_format"] all_args = recursively_convert_to_json_serializable(all_args) # Patch in PARAMETER args, and remove locally-supplied arguments # This will become the stored config expectation_args = copy.deepcopy(all_args) if self._expectation_suite.evaluation_parameters: ( evaluation_args, substituted_parameters, ) = build_evaluation_parameters( expectation_args, self._expectation_suite.evaluation_parameters, self._config.get("interactive_evaluation", True), self._data_context, ) else: ( evaluation_args, substituted_parameters, ) = build_evaluation_parameters( expectation_args, None, self._config.get("interactive_evaluation", True), self._data_context, ) # update evaluation_args with defaults from expectation signature if method_name not in ExpectationConfiguration.kwarg_lookup_dict: default_kwarg_values = { k: v.default for k, v in inspect.signature(func).parameters.items() if v.default is not inspect.Parameter.empty } default_kwarg_values.update(evaluation_args) evaluation_args = default_kwarg_values # Construct the expectation_config object expectation_config = ExpectationConfiguration( expectation_type=method_name, kwargs=expectation_args, meta=meta ) raised_exception = False exception_traceback = None exception_message = None # Finally, execute the expectation method itself if ( self._config.get("interactive_evaluation", True) or self._active_validation ): try: return_obj = func(self, **evaluation_args) if isinstance(return_obj, dict): return_obj = ExpectationValidationResult(**return_obj) except Exception as err: if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() exception_message = "{}: {}".format( type(err).__name__, str(err) ) return_obj = ExpectationValidationResult(success=False) else: raise err else: return_obj = ExpectationValidationResult( expectation_config=copy.deepcopy(expectation_config) ) # If validate has set active_validation to true, then we do not save the config to avoid # saving updating expectation configs to the same suite during validation runs if self._active_validation is True: stored_config = expectation_config else: # Append the expectation to the config. stored_config = self._expectation_suite.add_expectation( expectation_config ) if include_config: return_obj.expectation_config = copy.deepcopy(stored_config) # If there was no interactive evaluation, success will not have been computed. if return_obj.success is not None: # Add a "success" object to the config stored_config.success_on_last_run = return_obj.success if catch_exceptions: return_obj.exception_info = { "raised_exception": raised_exception, "exception_message": exception_message, "exception_traceback": exception_traceback, } if len(substituted_parameters) > 0: if meta is None: meta = dict() meta["substituted_parameters"] = substituted_parameters # Add meta to return object if meta is not None: return_obj.meta = meta return_obj = recursively_convert_to_json_serializable(return_obj) if self._data_context is not None: return_obj = self._data_context.update_return_obj(self, return_obj) return return_obj return wrapper return outer_wrapper
[ "def", "expectation", "(", "cls", ",", "method_arg_names", ")", ":", "def", "outer_wrapper", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Get the name of the method", "method_name", "=", "func", ".", "__name__", "# Combine all arguments into a single new \"all_args\" dictionary to name positional parameters", "all_args", "=", "dict", "(", "zip", "(", "method_arg_names", ",", "args", ")", ")", "all_args", ".", "update", "(", "kwargs", ")", "# Unpack display parameters; remove them from all_args if appropriate", "if", "\"include_config\"", "in", "kwargs", ":", "include_config", "=", "kwargs", "[", "\"include_config\"", "]", "del", "all_args", "[", "\"include_config\"", "]", "else", ":", "include_config", "=", "self", ".", "default_expectation_args", "[", "\"include_config\"", "]", "if", "\"catch_exceptions\"", "in", "kwargs", ":", "catch_exceptions", "=", "kwargs", "[", "\"catch_exceptions\"", "]", "del", "all_args", "[", "\"catch_exceptions\"", "]", "else", ":", "catch_exceptions", "=", "self", ".", "default_expectation_args", "[", "\"catch_exceptions\"", "]", "if", "\"result_format\"", "in", "kwargs", ":", "result_format", "=", "kwargs", "[", "\"result_format\"", "]", "else", ":", "result_format", "=", "self", ".", "default_expectation_args", "[", "\"result_format\"", "]", "# Extract the meta object for use as a top-level expectation_config holder", "if", "\"meta\"", "in", "kwargs", ":", "meta", "=", "kwargs", "[", "\"meta\"", "]", "del", "all_args", "[", "\"meta\"", "]", "else", ":", "meta", "=", "None", "# Get the signature of the inner wrapper:", "argspec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "[", "0", "]", "[", "1", ":", "]", "if", "\"result_format\"", "in", "argspec", ":", "all_args", "[", "\"result_format\"", "]", "=", "result_format", "else", ":", "if", "\"result_format\"", "in", "all_args", ":", "del", "all_args", "[", "\"result_format\"", "]", "all_args", "=", "recursively_convert_to_json_serializable", "(", "all_args", ")", "# Patch in PARAMETER args, and remove locally-supplied arguments", "# This will become the stored config", "expectation_args", "=", "copy", ".", "deepcopy", "(", "all_args", ")", "if", "self", ".", "_expectation_suite", ".", "evaluation_parameters", ":", "(", "evaluation_args", ",", "substituted_parameters", ",", ")", "=", "build_evaluation_parameters", "(", "expectation_args", ",", "self", ".", "_expectation_suite", ".", "evaluation_parameters", ",", "self", ".", "_config", ".", "get", "(", "\"interactive_evaluation\"", ",", "True", ")", ",", "self", ".", "_data_context", ",", ")", "else", ":", "(", "evaluation_args", ",", "substituted_parameters", ",", ")", "=", "build_evaluation_parameters", "(", "expectation_args", ",", "None", ",", "self", ".", "_config", ".", "get", "(", "\"interactive_evaluation\"", ",", "True", ")", ",", "self", ".", "_data_context", ",", ")", "# update evaluation_args with defaults from expectation signature", "if", "method_name", "not", "in", "ExpectationConfiguration", ".", "kwarg_lookup_dict", ":", "default_kwarg_values", "=", "{", "k", ":", "v", ".", "default", "for", "k", ",", "v", "in", "inspect", ".", "signature", "(", "func", ")", ".", "parameters", ".", "items", "(", ")", "if", "v", ".", "default", "is", "not", "inspect", ".", "Parameter", ".", "empty", "}", "default_kwarg_values", ".", "update", "(", "evaluation_args", ")", "evaluation_args", "=", "default_kwarg_values", "# Construct the expectation_config object", "expectation_config", "=", "ExpectationConfiguration", "(", "expectation_type", "=", "method_name", ",", "kwargs", "=", "expectation_args", ",", "meta", "=", "meta", ")", "raised_exception", "=", "False", "exception_traceback", "=", "None", "exception_message", "=", "None", "# Finally, execute the expectation method itself", "if", "(", "self", ".", "_config", ".", "get", "(", "\"interactive_evaluation\"", ",", "True", ")", "or", "self", ".", "_active_validation", ")", ":", "try", ":", "return_obj", "=", "func", "(", "self", ",", "*", "*", "evaluation_args", ")", "if", "isinstance", "(", "return_obj", ",", "dict", ")", ":", "return_obj", "=", "ExpectationValidationResult", "(", "*", "*", "return_obj", ")", "except", "Exception", "as", "err", ":", "if", "catch_exceptions", ":", "raised_exception", "=", "True", "exception_traceback", "=", "traceback", ".", "format_exc", "(", ")", "exception_message", "=", "\"{}: {}\"", ".", "format", "(", "type", "(", "err", ")", ".", "__name__", ",", "str", "(", "err", ")", ")", "return_obj", "=", "ExpectationValidationResult", "(", "success", "=", "False", ")", "else", ":", "raise", "err", "else", ":", "return_obj", "=", "ExpectationValidationResult", "(", "expectation_config", "=", "copy", ".", "deepcopy", "(", "expectation_config", ")", ")", "# If validate has set active_validation to true, then we do not save the config to avoid", "# saving updating expectation configs to the same suite during validation runs", "if", "self", ".", "_active_validation", "is", "True", ":", "stored_config", "=", "expectation_config", "else", ":", "# Append the expectation to the config.", "stored_config", "=", "self", ".", "_expectation_suite", ".", "add_expectation", "(", "expectation_config", ")", "if", "include_config", ":", "return_obj", ".", "expectation_config", "=", "copy", ".", "deepcopy", "(", "stored_config", ")", "# If there was no interactive evaluation, success will not have been computed.", "if", "return_obj", ".", "success", "is", "not", "None", ":", "# Add a \"success\" object to the config", "stored_config", ".", "success_on_last_run", "=", "return_obj", ".", "success", "if", "catch_exceptions", ":", "return_obj", ".", "exception_info", "=", "{", "\"raised_exception\"", ":", "raised_exception", ",", "\"exception_message\"", ":", "exception_message", ",", "\"exception_traceback\"", ":", "exception_traceback", ",", "}", "if", "len", "(", "substituted_parameters", ")", ">", "0", ":", "if", "meta", "is", "None", ":", "meta", "=", "dict", "(", ")", "meta", "[", "\"substituted_parameters\"", "]", "=", "substituted_parameters", "# Add meta to return object", "if", "meta", "is", "not", "None", ":", "return_obj", ".", "meta", "=", "meta", "return_obj", "=", "recursively_convert_to_json_serializable", "(", "return_obj", ")", "if", "self", ".", "_data_context", "is", "not", "None", ":", "return_obj", "=", "self", ".", "_data_context", ".", "update_return_obj", "(", "self", ",", "return_obj", ")", "return", "return_obj", "return", "wrapper", "return", "outer_wrapper" ]
[ 142, 4 ]
[ 336, 28 ]
python
en
['en', 'en', 'en']
True
DataAsset._initialize_expectations
( self, expectation_suite=None, expectation_suite_name=None )
Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`. In addition, this always sets the `default_expectation_args` to: `include_config`: False, `catch_exceptions`: False, `output_format`: 'BASIC' By default, initializes data_asset_type to the name of the implementing class, but subclasses that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their interoperability. Args: expectation_suite (json): \ A json-serializable expectation config. \ If None, creates default `_expectation_suite` with an empty list of expectations and \ key value `data_asset_name` as `data_asset_name`. expectation_suite_name (string): \ The name to assign to the `expectation_suite.expectation_suite_name` Returns: None
Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`. In addition, this always sets the `default_expectation_args` to: `include_config`: False, `catch_exceptions`: False, `output_format`: 'BASIC'
def _initialize_expectations( self, expectation_suite=None, expectation_suite_name=None ): """Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`. In addition, this always sets the `default_expectation_args` to: `include_config`: False, `catch_exceptions`: False, `output_format`: 'BASIC' By default, initializes data_asset_type to the name of the implementing class, but subclasses that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their interoperability. Args: expectation_suite (json): \ A json-serializable expectation config. \ If None, creates default `_expectation_suite` with an empty list of expectations and \ key value `data_asset_name` as `data_asset_name`. expectation_suite_name (string): \ The name to assign to the `expectation_suite.expectation_suite_name` Returns: None """ if expectation_suite is not None: if isinstance(expectation_suite, dict): expectation_suite = expectationSuiteSchema.load(expectation_suite) else: expectation_suite = copy.deepcopy(expectation_suite) self._expectation_suite = expectation_suite if expectation_suite_name is not None: if ( self._expectation_suite.expectation_suite_name != expectation_suite_name ): logger.warning( "Overriding existing expectation_suite_name {n1} with new name {n2}".format( n1=self._expectation_suite.expectation_suite_name, n2=expectation_suite_name, ) ) self._expectation_suite.expectation_suite_name = expectation_suite_name else: if expectation_suite_name is None: expectation_suite_name = "default" self._expectation_suite = ExpectationSuite( expectation_suite_name=expectation_suite_name ) self._expectation_suite.data_asset_type = self._data_asset_type self.default_expectation_args = { "include_config": True, "catch_exceptions": False, "result_format": "BASIC", }
[ "def", "_initialize_expectations", "(", "self", ",", "expectation_suite", "=", "None", ",", "expectation_suite_name", "=", "None", ")", ":", "if", "expectation_suite", "is", "not", "None", ":", "if", "isinstance", "(", "expectation_suite", ",", "dict", ")", ":", "expectation_suite", "=", "expectationSuiteSchema", ".", "load", "(", "expectation_suite", ")", "else", ":", "expectation_suite", "=", "copy", ".", "deepcopy", "(", "expectation_suite", ")", "self", ".", "_expectation_suite", "=", "expectation_suite", "if", "expectation_suite_name", "is", "not", "None", ":", "if", "(", "self", ".", "_expectation_suite", ".", "expectation_suite_name", "!=", "expectation_suite_name", ")", ":", "logger", ".", "warning", "(", "\"Overriding existing expectation_suite_name {n1} with new name {n2}\"", ".", "format", "(", "n1", "=", "self", ".", "_expectation_suite", ".", "expectation_suite_name", ",", "n2", "=", "expectation_suite_name", ",", ")", ")", "self", ".", "_expectation_suite", ".", "expectation_suite_name", "=", "expectation_suite_name", "else", ":", "if", "expectation_suite_name", "is", "None", ":", "expectation_suite_name", "=", "\"default\"", "self", ".", "_expectation_suite", "=", "ExpectationSuite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "self", ".", "_expectation_suite", ".", "data_asset_type", "=", "self", ".", "_data_asset_type", "self", ".", "default_expectation_args", "=", "{", "\"include_config\"", ":", "True", ",", "\"catch_exceptions\"", ":", "False", ",", "\"result_format\"", ":", "\"BASIC\"", ",", "}" ]
[ 338, 4 ]
[ 395, 9 ]
python
en
['en', 'en', 'en']
True
DataAsset.append_expectation
(self, expectation_config)
This method is a thin wrapper for ExpectationSuite.append_expectation
This method is a thin wrapper for ExpectationSuite.append_expectation
def append_expectation(self, expectation_config): """This method is a thin wrapper for ExpectationSuite.append_expectation""" warnings.warn( "append_expectation is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.add_expectation instead.", DeprecationWarning, ) self._expectation_suite.append_expectation(expectation_config)
[ "def", "append_expectation", "(", "self", ",", "expectation_config", ")", ":", "warnings", ".", "warn", "(", "\"append_expectation is deprecated, and will be removed in a future release. \"", "+", "\"Please use ExpectationSuite.add_expectation instead.\"", ",", "DeprecationWarning", ",", ")", "self", ".", "_expectation_suite", ".", "append_expectation", "(", "expectation_config", ")" ]
[ 397, 4 ]
[ 404, 70 ]
python
en
['en', 'en', 'en']
True
DataAsset.find_expectation_indexes
( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", )
This method is a thin wrapper for ExpectationSuite.find_expectation_indexes
This method is a thin wrapper for ExpectationSuite.find_expectation_indexes
def find_expectation_indexes( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", ) -> List[int]: """This method is a thin wrapper for ExpectationSuite.find_expectation_indexes""" warnings.warn( "find_expectation_indexes is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.find_expectation_indexes instead.", DeprecationWarning, ) return self._expectation_suite.find_expectation_indexes( expectation_configuration=expectation_configuration, match_type=match_type )
[ "def", "find_expectation_indexes", "(", "self", ",", "expectation_configuration", ":", "ExpectationConfiguration", ",", "match_type", ":", "str", "=", "\"domain\"", ",", ")", "->", "List", "[", "int", "]", ":", "warnings", ".", "warn", "(", "\"find_expectation_indexes is deprecated, and will be removed in a future release. \"", "+", "\"Please use ExpectationSuite.find_expectation_indexes instead.\"", ",", "DeprecationWarning", ",", ")", "return", "self", ".", "_expectation_suite", ".", "find_expectation_indexes", "(", "expectation_configuration", "=", "expectation_configuration", ",", "match_type", "=", "match_type", ")" ]
[ 406, 4 ]
[ 419, 9 ]
python
en
['en', 'en', 'en']
True
DataAsset.find_expectations
( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", )
This method is a thin wrapper for ExpectationSuite.find_expectations()
This method is a thin wrapper for ExpectationSuite.find_expectations()
def find_expectations( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", ) -> List[ExpectationConfiguration]: """This method is a thin wrapper for ExpectationSuite.find_expectations()""" warnings.warn( "find_expectations is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.find_expectation_indexes instead.", DeprecationWarning, ) return self._expectation_suite.find_expectations( expectation_configuration=expectation_configuration, match_type=match_type )
[ "def", "find_expectations", "(", "self", ",", "expectation_configuration", ":", "ExpectationConfiguration", ",", "match_type", ":", "str", "=", "\"domain\"", ",", ")", "->", "List", "[", "ExpectationConfiguration", "]", ":", "warnings", ".", "warn", "(", "\"find_expectations is deprecated, and will be removed in a future release. \"", "+", "\"Please use ExpectationSuite.find_expectation_indexes instead.\"", ",", "DeprecationWarning", ",", ")", "return", "self", ".", "_expectation_suite", ".", "find_expectations", "(", "expectation_configuration", "=", "expectation_configuration", ",", "match_type", "=", "match_type", ")" ]
[ 421, 4 ]
[ 434, 9 ]
python
en
['en', 'en', 'en']
True
DataAsset.remove_expectation
( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", remove_multiple_matches: bool = False, )
This method is a thin wrapper for ExpectationSuite.remove()
This method is a thin wrapper for ExpectationSuite.remove()
def remove_expectation( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", remove_multiple_matches: bool = False, ) -> List[ExpectationConfiguration]: """This method is a thin wrapper for ExpectationSuite.remove()""" warnings.warn( "DataAsset.remove_expectations is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.remove_expectation instead.", DeprecationWarning, ) return self._expectation_suite.remove_expectation( expectation_configuration=expectation_configuration, match_type=match_type, remove_multiple_matches=remove_multiple_matches, )
[ "def", "remove_expectation", "(", "self", ",", "expectation_configuration", ":", "ExpectationConfiguration", ",", "match_type", ":", "str", "=", "\"domain\"", ",", "remove_multiple_matches", ":", "bool", "=", "False", ",", ")", "->", "List", "[", "ExpectationConfiguration", "]", ":", "warnings", ".", "warn", "(", "\"DataAsset.remove_expectations is deprecated, and will be removed in a future release. \"", "+", "\"Please use ExpectationSuite.remove_expectation instead.\"", ",", "DeprecationWarning", ",", ")", "return", "self", ".", "_expectation_suite", ".", "remove_expectation", "(", "expectation_configuration", "=", "expectation_configuration", ",", "match_type", "=", "match_type", ",", "remove_multiple_matches", "=", "remove_multiple_matches", ",", ")" ]
[ 436, 4 ]
[ 452, 9 ]
python
en
['en', 'en', 'en']
True
DataAsset.get_default_expectation_arguments
(self)
Fetch default expectation arguments for this data_asset Returns: A dictionary containing all the current default expectation arguments for a data_asset Ex:: { "include_config" : True, "catch_exceptions" : False, "result_format" : 'BASIC' } See also: set_default_expectation_arguments
Fetch default expectation arguments for this data_asset
def get_default_expectation_arguments(self): """Fetch default expectation arguments for this data_asset Returns: A dictionary containing all the current default expectation arguments for a data_asset Ex:: { "include_config" : True, "catch_exceptions" : False, "result_format" : 'BASIC' } See also: set_default_expectation_arguments """ return self.default_expectation_args
[ "def", "get_default_expectation_arguments", "(", "self", ")", ":", "return", "self", ".", "default_expectation_args" ]
[ 486, 4 ]
[ 503, 44 ]
python
en
['en', 'fr', 'en']
True
DataAsset.set_default_expectation_argument
(self, argument, value)
Set a default expectation argument for this data_asset Args: argument (string): The argument to be replaced value : The New argument to use for replacement Returns: None See also: get_default_expectation_arguments
Set a default expectation argument for this data_asset
def set_default_expectation_argument(self, argument, value): """Set a default expectation argument for this data_asset Args: argument (string): The argument to be replaced value : The New argument to use for replacement Returns: None See also: get_default_expectation_arguments """ # !!! Maybe add a validation check here? self.default_expectation_args[argument] = value
[ "def", "set_default_expectation_argument", "(", "self", ",", "argument", ",", "value", ")", ":", "# !!! Maybe add a validation check here?", "self", ".", "default_expectation_args", "[", "argument", "]", "=", "value" ]
[ 505, 4 ]
[ 520, 55 ]
python
en
['en', 'fr', 'en']
True
DataAsset.get_expectation_suite
( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False, )
Returns _expectation_config as a JSON object, and perform some cleaning along the way. Args: discard_failed_expectations (boolean): \ Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`. discard_result_format_kwargs (boolean): \ In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`. discard_include_config_kwargs (boolean): \ In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`. discard_catch_exceptions_kwargs (boolean): \ In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`. suppress_warnings (boolean): \ If true, do not include warnings in logging information about the operation. suppress_logging (boolean): \ If true, do not create a log entry (useful when using get_expectation_suite programmatically) Returns: An expectation suite. Note: get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \ copy of _expectation_suite, not the original object.
Returns _expectation_config as a JSON object, and perform some cleaning along the way.
def get_expectation_suite( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False, ): """Returns _expectation_config as a JSON object, and perform some cleaning along the way. Args: discard_failed_expectations (boolean): \ Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`. discard_result_format_kwargs (boolean): \ In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`. discard_include_config_kwargs (boolean): \ In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`. discard_catch_exceptions_kwargs (boolean): \ In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`. suppress_warnings (boolean): \ If true, do not include warnings in logging information about the operation. suppress_logging (boolean): \ If true, do not create a log entry (useful when using get_expectation_suite programmatically) Returns: An expectation suite. Note: get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \ copy of _expectation_suite, not the original object. """ expectation_suite = copy.deepcopy(self._expectation_suite) expectations = expectation_suite.expectations discards = defaultdict(int) if discard_failed_expectations: new_expectations = [] for expectation in expectations: # Note: This is conservative logic. # Instead of retaining expectations IFF success==True, it discard expectations IFF success==False. # In cases where expectation.success is missing or None, expectations are *retained*. # Such a case could occur if expectations were loaded from a config file and never run. if expectation.success_on_last_run is False: discards["failed_expectations"] += 1 else: new_expectations.append(expectation) expectations = new_expectations message = "\t%d expectation(s) included in expectation_suite." % len( expectations ) if discards["failed_expectations"] > 0 and not suppress_warnings: message += ( " Omitting %d expectation(s) that failed when last run; set " "discard_failed_expectations=False to include them." % discards["failed_expectations"] ) for expectation in expectations: # FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, # which calls _copy_and_clean_up_expectation expectation.success_on_last_run = None if discard_result_format_kwargs: if "result_format" in expectation.kwargs: del expectation.kwargs["result_format"] discards["result_format"] += 1 if discard_include_config_kwargs: if "include_config" in expectation.kwargs: del expectation.kwargs["include_config"] discards["include_config"] += 1 if discard_catch_exceptions_kwargs: if "catch_exceptions" in expectation.kwargs: del expectation.kwargs["catch_exceptions"] discards["catch_exceptions"] += 1 settings_message = "" if discards["result_format"] > 0 and not suppress_warnings: settings_message += " result_format" if discards["include_config"] > 0 and not suppress_warnings: settings_message += " include_config" if discards["catch_exceptions"] > 0 and not suppress_warnings: settings_message += " catch_exceptions" if ( len(settings_message) > 1 ): # Only add this if we added one of the settings above. settings_message += " settings filtered." expectation_suite.expectations = expectations if not suppress_logging: logger.info(message + settings_message) return expectation_suite
[ "def", "get_expectation_suite", "(", "self", ",", "discard_failed_expectations", "=", "True", ",", "discard_result_format_kwargs", "=", "True", ",", "discard_include_config_kwargs", "=", "True", ",", "discard_catch_exceptions_kwargs", "=", "True", ",", "suppress_warnings", "=", "False", ",", "suppress_logging", "=", "False", ",", ")", ":", "expectation_suite", "=", "copy", ".", "deepcopy", "(", "self", ".", "_expectation_suite", ")", "expectations", "=", "expectation_suite", ".", "expectations", "discards", "=", "defaultdict", "(", "int", ")", "if", "discard_failed_expectations", ":", "new_expectations", "=", "[", "]", "for", "expectation", "in", "expectations", ":", "# Note: This is conservative logic.", "# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.", "# In cases where expectation.success is missing or None, expectations are *retained*.", "# Such a case could occur if expectations were loaded from a config file and never run.", "if", "expectation", ".", "success_on_last_run", "is", "False", ":", "discards", "[", "\"failed_expectations\"", "]", "+=", "1", "else", ":", "new_expectations", ".", "append", "(", "expectation", ")", "expectations", "=", "new_expectations", "message", "=", "\"\\t%d expectation(s) included in expectation_suite.\"", "%", "len", "(", "expectations", ")", "if", "discards", "[", "\"failed_expectations\"", "]", ">", "0", "and", "not", "suppress_warnings", ":", "message", "+=", "(", "\" Omitting %d expectation(s) that failed when last run; set \"", "\"discard_failed_expectations=False to include them.\"", "%", "discards", "[", "\"failed_expectations\"", "]", ")", "for", "expectation", "in", "expectations", ":", "# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation,", "# which calls _copy_and_clean_up_expectation", "expectation", ".", "success_on_last_run", "=", "None", "if", "discard_result_format_kwargs", ":", "if", "\"result_format\"", "in", "expectation", ".", "kwargs", ":", "del", "expectation", ".", "kwargs", "[", "\"result_format\"", "]", "discards", "[", "\"result_format\"", "]", "+=", "1", "if", "discard_include_config_kwargs", ":", "if", "\"include_config\"", "in", "expectation", ".", "kwargs", ":", "del", "expectation", ".", "kwargs", "[", "\"include_config\"", "]", "discards", "[", "\"include_config\"", "]", "+=", "1", "if", "discard_catch_exceptions_kwargs", ":", "if", "\"catch_exceptions\"", "in", "expectation", ".", "kwargs", ":", "del", "expectation", ".", "kwargs", "[", "\"catch_exceptions\"", "]", "discards", "[", "\"catch_exceptions\"", "]", "+=", "1", "settings_message", "=", "\"\"", "if", "discards", "[", "\"result_format\"", "]", ">", "0", "and", "not", "suppress_warnings", ":", "settings_message", "+=", "\" result_format\"", "if", "discards", "[", "\"include_config\"", "]", ">", "0", "and", "not", "suppress_warnings", ":", "settings_message", "+=", "\" include_config\"", "if", "discards", "[", "\"catch_exceptions\"", "]", ">", "0", "and", "not", "suppress_warnings", ":", "settings_message", "+=", "\" catch_exceptions\"", "if", "(", "len", "(", "settings_message", ")", ">", "1", ")", ":", "# Only add this if we added one of the settings above.", "settings_message", "+=", "\" settings filtered.\"", "expectation_suite", ".", "expectations", "=", "expectations", "if", "not", "suppress_logging", ":", "logger", ".", "info", "(", "message", "+", "settings_message", ")", "return", "expectation_suite" ]
[ 543, 4 ]
[ 646, 32 ]
python
en
['en', 'en', 'en']
True
DataAsset.save_expectation_suite
( self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, )
Writes ``_expectation_config`` to a JSON file. Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \ can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \ pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \ the JSON expectations config. Args: filepath (string): \ The location and name to write the JSON config file to. discard_failed_expectations (boolean): \ If True, excludes expectations that do not return ``success = True``. \ If False, all expectations are written to the JSON config file. discard_result_format_kwargs (boolean): \ If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \ file. discard_include_config_kwargs (boolean): \ If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \ file. discard_catch_exceptions_kwargs (boolean): \ If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \ config file. suppress_warnings (boolean): \ It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \ suppressed.
Writes ``_expectation_config`` to a JSON file.
def save_expectation_suite( self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, ): """Writes ``_expectation_config`` to a JSON file. Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \ can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \ pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \ the JSON expectations config. Args: filepath (string): \ The location and name to write the JSON config file to. discard_failed_expectations (boolean): \ If True, excludes expectations that do not return ``success = True``. \ If False, all expectations are written to the JSON config file. discard_result_format_kwargs (boolean): \ If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \ file. discard_include_config_kwargs (boolean): \ If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \ file. discard_catch_exceptions_kwargs (boolean): \ If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \ config file. suppress_warnings (boolean): \ It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \ suppressed. """ expectation_suite = self.get_expectation_suite( discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings, ) if filepath is None and self._data_context is not None: self._data_context.save_expectation_suite(expectation_suite) elif filepath is not None: with open(filepath, "w") as outfile: json.dump( expectationSuiteSchema.dump(expectation_suite), outfile, indent=2, sort_keys=True, ) else: raise ValueError( "Unable to save config: filepath or data_context must be available." )
[ "def", "save_expectation_suite", "(", "self", ",", "filepath", "=", "None", ",", "discard_failed_expectations", "=", "True", ",", "discard_result_format_kwargs", "=", "True", ",", "discard_include_config_kwargs", "=", "True", ",", "discard_catch_exceptions_kwargs", "=", "True", ",", "suppress_warnings", "=", "False", ",", ")", ":", "expectation_suite", "=", "self", ".", "get_expectation_suite", "(", "discard_failed_expectations", ",", "discard_result_format_kwargs", ",", "discard_include_config_kwargs", ",", "discard_catch_exceptions_kwargs", ",", "suppress_warnings", ",", ")", "if", "filepath", "is", "None", "and", "self", ".", "_data_context", "is", "not", "None", ":", "self", ".", "_data_context", ".", "save_expectation_suite", "(", "expectation_suite", ")", "elif", "filepath", "is", "not", "None", ":", "with", "open", "(", "filepath", ",", "\"w\"", ")", "as", "outfile", ":", "json", ".", "dump", "(", "expectationSuiteSchema", ".", "dump", "(", "expectation_suite", ")", ",", "outfile", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ",", ")", "else", ":", "raise", "ValueError", "(", "\"Unable to save config: filepath or data_context must be available.\"", ")" ]
[ 648, 4 ]
[ 704, 13 ]
python
en
['en', 'en', 'en']
True
DataAsset.validate
( self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None, )
Generates a JSON-formatted report describing the outcome of all expectations. Use the default expectation_suite=None to validate the expectations config associated with the DataAsset. Args: expectation_suite (json or None): \ If None, uses the expectations config generated with the DataAsset during the current session. \ If a JSON file, validates those expectations. run_name (str): \ Used to identify this validation result as part of a collection of validations. \ See DataContext for more information. data_context (DataContext): \ A datacontext object to use as part of validation for binding evaluation parameters and \ registering validation results. evaluation_parameters (dict or None): \ If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \ data_asset. If a dict, uses the evaluation parameters in the dictionary. catch_exceptions (boolean): \ If True, exceptions raised by tests will not end validation and will be described in the returned \ report. result_format (string or None): \ If None, uses the default value ('BASIC' or as specified). \ If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \ etc.). only_return_failures (boolean): \ If True, expectation results are only returned when ``success = False`` \ Returns: A JSON-formatted dictionary containing a list of the validation results. \ An example of the returned format:: { "results": [ { "unexpected_list": [unexpected_value_1, unexpected_value_2], "expectation_type": "expect_*", "kwargs": { "column": "Column_Name", "output_format": "SUMMARY" }, "success": true, "raised_exception: false. "exception_traceback": null }, { ... (Second expectation results) }, ... (More expectations results) ], "success": true, "statistics": { "evaluated_expectations": n, "successful_expectations": m, "unsuccessful_expectations": n - m, "success_percent": m / n } } Notes: If the configuration object was built with a different version of great expectations then the \ current environment. If no version was found in the configuration file. Raises: AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
Generates a JSON-formatted report describing the outcome of all expectations.
def validate( self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None, ): """Generates a JSON-formatted report describing the outcome of all expectations. Use the default expectation_suite=None to validate the expectations config associated with the DataAsset. Args: expectation_suite (json or None): \ If None, uses the expectations config generated with the DataAsset during the current session. \ If a JSON file, validates those expectations. run_name (str): \ Used to identify this validation result as part of a collection of validations. \ See DataContext for more information. data_context (DataContext): \ A datacontext object to use as part of validation for binding evaluation parameters and \ registering validation results. evaluation_parameters (dict or None): \ If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \ data_asset. If a dict, uses the evaluation parameters in the dictionary. catch_exceptions (boolean): \ If True, exceptions raised by tests will not end validation and will be described in the returned \ report. result_format (string or None): \ If None, uses the default value ('BASIC' or as specified). \ If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \ etc.). only_return_failures (boolean): \ If True, expectation results are only returned when ``success = False`` \ Returns: A JSON-formatted dictionary containing a list of the validation results. \ An example of the returned format:: { "results": [ { "unexpected_list": [unexpected_value_1, unexpected_value_2], "expectation_type": "expect_*", "kwargs": { "column": "Column_Name", "output_format": "SUMMARY" }, "success": true, "raised_exception: false. "exception_traceback": null }, { ... (Second expectation results) }, ... (More expectations results) ], "success": true, "statistics": { "evaluated_expectations": n, "successful_expectations": m, "unsuccessful_expectations": n - m, "success_percent": m / n } } Notes: If the configuration object was built with a different version of great expectations then the \ current environment. If no version was found in the configuration file. Raises: AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError """ try: validation_time = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S.%fZ" ) assert not (run_id and run_name) and not ( run_id and run_time ), "Please provide either a run_id or run_name and/or run_time." if isinstance(run_id, str) and not run_name: warnings.warn( "String run_ids will be deprecated in the future. Please provide a run_id of type " "RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name " "and run_time (both optional). Instead of providing a run_id, you may also provide" "run_name and run_time separately.", DeprecationWarning, ) try: run_time = parse(run_id) except (ValueError, TypeError): pass run_id = RunIdentifier(run_name=run_id, run_time=run_time) elif isinstance(run_id, dict): run_id = RunIdentifier(**run_id) elif not isinstance(run_id, RunIdentifier): run_id = RunIdentifier(run_name=run_name, run_time=run_time) self._active_validation = True # If a different validation data context was provided, override validate__data_context = self._data_context if data_context is None and self._data_context is not None: data_context = self._data_context elif data_context is not None: # temporarily set self._data_context so it is used inside the expectation decorator self._data_context = data_context results = [] if expectation_suite is None: expectation_suite = self.get_expectation_suite( discard_failed_expectations=False, discard_result_format_kwargs=False, discard_include_config_kwargs=False, discard_catch_exceptions_kwargs=False, ) elif isinstance(expectation_suite, str): try: with open(expectation_suite) as infile: expectation_suite = expectationSuiteSchema.loads(infile.read()) except ValidationError: raise except OSError: raise GreatExpectationsError( "Unable to load expectation suite: IO error while reading %s" % expectation_suite ) elif not isinstance(expectation_suite, ExpectationSuite): logger.error( "Unable to validate using the provided value for expectation suite; does it need to be " "loaded from a dictionary?" ) if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info( self ), success=False, ) return ExpectationValidationResult(success=False) # Evaluation parameter priority is # 1. from provided parameters # 2. from expectation configuration # 3. from data context # So, we load them in reverse order if data_context is not None: runtime_evaluation_parameters = ( data_context.evaluation_parameter_store.get_bind_params(run_id) ) else: runtime_evaluation_parameters = {} if expectation_suite.evaluation_parameters: runtime_evaluation_parameters.update( expectation_suite.evaluation_parameters ) if evaluation_parameters is not None: runtime_evaluation_parameters.update(evaluation_parameters) # Convert evaluation parameters to be json-serializable runtime_evaluation_parameters = recursively_convert_to_json_serializable( runtime_evaluation_parameters ) # Warn if our version is different from the version in the configuration # TODO: Deprecate "great_expectations.__version__" suite_ge_version = expectation_suite.meta.get( "great_expectations_version" ) or expectation_suite.meta.get("great_expectations.__version__") ### # This is an early example of what will become part of the ValidationOperator # This operator would be dataset-semantic aware # Adding now to simply ensure we can be slightly better at ordering our expectation evaluation ### # Group expectations by column columns = {} for expectation in expectation_suite.expectations: if "column" in expectation.kwargs and isinstance( expectation.kwargs["column"], Hashable ): column = expectation.kwargs["column"] else: column = "_nocolumn" if column not in columns: columns[column] = [] columns[column].append(expectation) expectations_to_evaluate = [] for col in columns: expectations_to_evaluate.extend(columns[col]) for expectation in expectations_to_evaluate: try: # copy the config so we can modify it below if needed expectation = copy.deepcopy(expectation) expectation_method = getattr(self, expectation.expectation_type) if result_format is not None: expectation.kwargs.update({"result_format": result_format}) # A missing parameter will raise an EvaluationParameterError ( evaluation_args, substituted_parameters, ) = build_evaluation_parameters( expectation.kwargs, runtime_evaluation_parameters, self._config.get("interactive_evaluation", True), self._data_context, ) result = expectation_method( catch_exceptions=catch_exceptions, include_config=True, **evaluation_args ) except Exception as err: if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() result = ExpectationValidationResult( success=False, exception_info={ "raised_exception": raised_exception, "exception_traceback": exception_traceback, "exception_message": str(err), }, ) else: raise err # if include_config: result.expectation_config = expectation # Add an empty exception_info object if no exception was caught if catch_exceptions and result.exception_info is None: result.exception_info = { "raised_exception": False, "exception_traceback": None, "exception_message": None, } results.append(result) statistics = _calc_validation_statistics(results) if only_return_failures: abbrev_results = [] for exp in results: if not exp.success: abbrev_results.append(exp) results = abbrev_results expectation_suite_name = expectation_suite.expectation_suite_name result = ExpectationSuiteValidationResult( results=results, success=statistics.success, statistics={ "evaluated_expectations": statistics.evaluated_expectations, "successful_expectations": statistics.successful_expectations, "unsuccessful_expectations": statistics.unsuccessful_expectations, "success_percent": statistics.success_percent, }, evaluation_parameters=runtime_evaluation_parameters, meta={ "great_expectations_version": ge_version, "expectation_suite_name": expectation_suite_name, "run_id": run_id, "batch_kwargs": self.batch_kwargs, "batch_markers": self.batch_markers, "batch_parameters": self.batch_parameters, "validation_time": validation_time, }, ) self._data_context = validate__data_context except Exception: if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=False, ) raise finally: self._active_validation = False if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=True, ) return result
[ "def", "validate", "(", "self", ",", "expectation_suite", "=", "None", ",", "run_id", "=", "None", ",", "data_context", "=", "None", ",", "evaluation_parameters", "=", "None", ",", "catch_exceptions", "=", "True", ",", "result_format", "=", "None", ",", "only_return_failures", "=", "False", ",", "run_name", "=", "None", ",", "run_time", "=", "None", ",", ")", ":", "try", ":", "validation_time", "=", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", ".", "strftime", "(", "\"%Y%m%dT%H%M%S.%fZ\"", ")", "assert", "not", "(", "run_id", "and", "run_name", ")", "and", "not", "(", "run_id", "and", "run_time", ")", ",", "\"Please provide either a run_id or run_name and/or run_time.\"", "if", "isinstance", "(", "run_id", ",", "str", ")", "and", "not", "run_name", ":", "warnings", ".", "warn", "(", "\"String run_ids will be deprecated in the future. Please provide a run_id of type \"", "\"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name \"", "\"and run_time (both optional). Instead of providing a run_id, you may also provide\"", "\"run_name and run_time separately.\"", ",", "DeprecationWarning", ",", ")", "try", ":", "run_time", "=", "parse", "(", "run_id", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "run_id", "=", "RunIdentifier", "(", "run_name", "=", "run_id", ",", "run_time", "=", "run_time", ")", "elif", "isinstance", "(", "run_id", ",", "dict", ")", ":", "run_id", "=", "RunIdentifier", "(", "*", "*", "run_id", ")", "elif", "not", "isinstance", "(", "run_id", ",", "RunIdentifier", ")", ":", "run_id", "=", "RunIdentifier", "(", "run_name", "=", "run_name", ",", "run_time", "=", "run_time", ")", "self", ".", "_active_validation", "=", "True", "# If a different validation data context was provided, override", "validate__data_context", "=", "self", ".", "_data_context", "if", "data_context", "is", "None", "and", "self", ".", "_data_context", "is", "not", "None", ":", "data_context", "=", "self", ".", "_data_context", "elif", "data_context", "is", "not", "None", ":", "# temporarily set self._data_context so it is used inside the expectation decorator", "self", ".", "_data_context", "=", "data_context", "results", "=", "[", "]", "if", "expectation_suite", "is", "None", ":", "expectation_suite", "=", "self", ".", "get_expectation_suite", "(", "discard_failed_expectations", "=", "False", ",", "discard_result_format_kwargs", "=", "False", ",", "discard_include_config_kwargs", "=", "False", ",", "discard_catch_exceptions_kwargs", "=", "False", ",", ")", "elif", "isinstance", "(", "expectation_suite", ",", "str", ")", ":", "try", ":", "with", "open", "(", "expectation_suite", ")", "as", "infile", ":", "expectation_suite", "=", "expectationSuiteSchema", ".", "loads", "(", "infile", ".", "read", "(", ")", ")", "except", "ValidationError", ":", "raise", "except", "OSError", ":", "raise", "GreatExpectationsError", "(", "\"Unable to load expectation suite: IO error while reading %s\"", "%", "expectation_suite", ")", "elif", "not", "isinstance", "(", "expectation_suite", ",", "ExpectationSuite", ")", ":", "logger", ".", "error", "(", "\"Unable to validate using the provided value for expectation suite; does it need to be \"", "\"loaded from a dictionary?\"", ")", "if", "getattr", "(", "data_context", ",", "\"_usage_statistics_handler\"", ",", "None", ")", ":", "handler", "=", "data_context", ".", "_usage_statistics_handler", "handler", ".", "send_usage_message", "(", "event", "=", "\"data_asset.validate\"", ",", "event_payload", "=", "handler", ".", "_batch_anonymizer", ".", "anonymize_batch_info", "(", "self", ")", ",", "success", "=", "False", ",", ")", "return", "ExpectationValidationResult", "(", "success", "=", "False", ")", "# Evaluation parameter priority is", "# 1. from provided parameters", "# 2. from expectation configuration", "# 3. from data context", "# So, we load them in reverse order", "if", "data_context", "is", "not", "None", ":", "runtime_evaluation_parameters", "=", "(", "data_context", ".", "evaluation_parameter_store", ".", "get_bind_params", "(", "run_id", ")", ")", "else", ":", "runtime_evaluation_parameters", "=", "{", "}", "if", "expectation_suite", ".", "evaluation_parameters", ":", "runtime_evaluation_parameters", ".", "update", "(", "expectation_suite", ".", "evaluation_parameters", ")", "if", "evaluation_parameters", "is", "not", "None", ":", "runtime_evaluation_parameters", ".", "update", "(", "evaluation_parameters", ")", "# Convert evaluation parameters to be json-serializable", "runtime_evaluation_parameters", "=", "recursively_convert_to_json_serializable", "(", "runtime_evaluation_parameters", ")", "# Warn if our version is different from the version in the configuration", "# TODO: Deprecate \"great_expectations.__version__\"", "suite_ge_version", "=", "expectation_suite", ".", "meta", ".", "get", "(", "\"great_expectations_version\"", ")", "or", "expectation_suite", ".", "meta", ".", "get", "(", "\"great_expectations.__version__\"", ")", "###", "# This is an early example of what will become part of the ValidationOperator", "# This operator would be dataset-semantic aware", "# Adding now to simply ensure we can be slightly better at ordering our expectation evaluation", "###", "# Group expectations by column", "columns", "=", "{", "}", "for", "expectation", "in", "expectation_suite", ".", "expectations", ":", "if", "\"column\"", "in", "expectation", ".", "kwargs", "and", "isinstance", "(", "expectation", ".", "kwargs", "[", "\"column\"", "]", ",", "Hashable", ")", ":", "column", "=", "expectation", ".", "kwargs", "[", "\"column\"", "]", "else", ":", "column", "=", "\"_nocolumn\"", "if", "column", "not", "in", "columns", ":", "columns", "[", "column", "]", "=", "[", "]", "columns", "[", "column", "]", ".", "append", "(", "expectation", ")", "expectations_to_evaluate", "=", "[", "]", "for", "col", "in", "columns", ":", "expectations_to_evaluate", ".", "extend", "(", "columns", "[", "col", "]", ")", "for", "expectation", "in", "expectations_to_evaluate", ":", "try", ":", "# copy the config so we can modify it below if needed", "expectation", "=", "copy", ".", "deepcopy", "(", "expectation", ")", "expectation_method", "=", "getattr", "(", "self", ",", "expectation", ".", "expectation_type", ")", "if", "result_format", "is", "not", "None", ":", "expectation", ".", "kwargs", ".", "update", "(", "{", "\"result_format\"", ":", "result_format", "}", ")", "# A missing parameter will raise an EvaluationParameterError", "(", "evaluation_args", ",", "substituted_parameters", ",", ")", "=", "build_evaluation_parameters", "(", "expectation", ".", "kwargs", ",", "runtime_evaluation_parameters", ",", "self", ".", "_config", ".", "get", "(", "\"interactive_evaluation\"", ",", "True", ")", ",", "self", ".", "_data_context", ",", ")", "result", "=", "expectation_method", "(", "catch_exceptions", "=", "catch_exceptions", ",", "include_config", "=", "True", ",", "*", "*", "evaluation_args", ")", "except", "Exception", "as", "err", ":", "if", "catch_exceptions", ":", "raised_exception", "=", "True", "exception_traceback", "=", "traceback", ".", "format_exc", "(", ")", "result", "=", "ExpectationValidationResult", "(", "success", "=", "False", ",", "exception_info", "=", "{", "\"raised_exception\"", ":", "raised_exception", ",", "\"exception_traceback\"", ":", "exception_traceback", ",", "\"exception_message\"", ":", "str", "(", "err", ")", ",", "}", ",", ")", "else", ":", "raise", "err", "# if include_config:", "result", ".", "expectation_config", "=", "expectation", "# Add an empty exception_info object if no exception was caught", "if", "catch_exceptions", "and", "result", ".", "exception_info", "is", "None", ":", "result", ".", "exception_info", "=", "{", "\"raised_exception\"", ":", "False", ",", "\"exception_traceback\"", ":", "None", ",", "\"exception_message\"", ":", "None", ",", "}", "results", ".", "append", "(", "result", ")", "statistics", "=", "_calc_validation_statistics", "(", "results", ")", "if", "only_return_failures", ":", "abbrev_results", "=", "[", "]", "for", "exp", "in", "results", ":", "if", "not", "exp", ".", "success", ":", "abbrev_results", ".", "append", "(", "exp", ")", "results", "=", "abbrev_results", "expectation_suite_name", "=", "expectation_suite", ".", "expectation_suite_name", "result", "=", "ExpectationSuiteValidationResult", "(", "results", "=", "results", ",", "success", "=", "statistics", ".", "success", ",", "statistics", "=", "{", "\"evaluated_expectations\"", ":", "statistics", ".", "evaluated_expectations", ",", "\"successful_expectations\"", ":", "statistics", ".", "successful_expectations", ",", "\"unsuccessful_expectations\"", ":", "statistics", ".", "unsuccessful_expectations", ",", "\"success_percent\"", ":", "statistics", ".", "success_percent", ",", "}", ",", "evaluation_parameters", "=", "runtime_evaluation_parameters", ",", "meta", "=", "{", "\"great_expectations_version\"", ":", "ge_version", ",", "\"expectation_suite_name\"", ":", "expectation_suite_name", ",", "\"run_id\"", ":", "run_id", ",", "\"batch_kwargs\"", ":", "self", ".", "batch_kwargs", ",", "\"batch_markers\"", ":", "self", ".", "batch_markers", ",", "\"batch_parameters\"", ":", "self", ".", "batch_parameters", ",", "\"validation_time\"", ":", "validation_time", ",", "}", ",", ")", "self", ".", "_data_context", "=", "validate__data_context", "except", "Exception", ":", "if", "getattr", "(", "data_context", ",", "\"_usage_statistics_handler\"", ",", "None", ")", ":", "handler", "=", "data_context", ".", "_usage_statistics_handler", "handler", ".", "send_usage_message", "(", "event", "=", "\"data_asset.validate\"", ",", "event_payload", "=", "handler", ".", "_batch_anonymizer", ".", "anonymize_batch_info", "(", "self", ")", ",", "success", "=", "False", ",", ")", "raise", "finally", ":", "self", ".", "_active_validation", "=", "False", "if", "getattr", "(", "data_context", ",", "\"_usage_statistics_handler\"", ",", "None", ")", ":", "handler", "=", "data_context", ".", "_usage_statistics_handler", "handler", ".", "send_usage_message", "(", "event", "=", "\"data_asset.validate\"", ",", "event_payload", "=", "handler", ".", "_batch_anonymizer", ".", "anonymize_batch_info", "(", "self", ")", ",", "success", "=", "True", ",", ")", "return", "result" ]
[ 706, 4 ]
[ 1020, 21 ]
python
en
['en', 'en', 'en']
True
DataAsset.get_evaluation_parameter
(self, parameter_name, default_value=None)
Get an evaluation parameter value that has been stored in meta. Args: parameter_name (string): The name of the parameter to store. default_value (any): The default value to be returned if the parameter is not found. Returns: The current value of the evaluation parameter.
Get an evaluation parameter value that has been stored in meta.
def get_evaluation_parameter(self, parameter_name, default_value=None): """Get an evaluation parameter value that has been stored in meta. Args: parameter_name (string): The name of the parameter to store. default_value (any): The default value to be returned if the parameter is not found. Returns: The current value of the evaluation parameter. """ if parameter_name in self._expectation_suite.evaluation_parameters: return self._expectation_suite.evaluation_parameters[parameter_name] else: return default_value
[ "def", "get_evaluation_parameter", "(", "self", ",", "parameter_name", ",", "default_value", "=", "None", ")", ":", "if", "parameter_name", "in", "self", ".", "_expectation_suite", ".", "evaluation_parameters", ":", "return", "self", ".", "_expectation_suite", ".", "evaluation_parameters", "[", "parameter_name", "]", "else", ":", "return", "default_value" ]
[ 1022, 4 ]
[ 1035, 32 ]
python
en
['en', 'en', 'en']
True
DataAsset.set_evaluation_parameter
(self, parameter_name, parameter_value)
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations. Args: parameter_name (string): The name of the kwarg to be replaced at evaluation time parameter_value (any): The value to be used
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations.
def set_evaluation_parameter(self, parameter_name, parameter_value): """Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations. Args: parameter_name (string): The name of the kwarg to be replaced at evaluation time parameter_value (any): The value to be used """ self._expectation_suite.evaluation_parameters.update( {parameter_name: parameter_value} )
[ "def", "set_evaluation_parameter", "(", "self", ",", "parameter_name", ",", "parameter_value", ")", ":", "self", ".", "_expectation_suite", ".", "evaluation_parameters", ".", "update", "(", "{", "parameter_name", ":", "parameter_value", "}", ")" ]
[ 1037, 4 ]
[ 1047, 9 ]
python
en
['en', 'en', 'en']
True
DataAsset.expectation_suite_name
(self)
Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.
Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.
def expectation_suite_name(self): """Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.""" return self._expectation_suite.expectation_suite_name
[ "def", "expectation_suite_name", "(", "self", ")", ":", "return", "self", ".", "_expectation_suite", ".", "expectation_suite_name" ]
[ 1072, 4 ]
[ 1074, 61 ]
python
en
['en', 'en', 'en']
True
DataAsset.expectation_suite_name
(self, expectation_suite_name)
Sets the expectation_suite name of this data_asset as stored in the expectations configuration.
Sets the expectation_suite name of this data_asset as stored in the expectations configuration.
def expectation_suite_name(self, expectation_suite_name): """Sets the expectation_suite name of this data_asset as stored in the expectations configuration.""" self._expectation_suite.expectation_suite_name = expectation_suite_name
[ "def", "expectation_suite_name", "(", "self", ",", "expectation_suite_name", ")", ":", "self", ".", "_expectation_suite", ".", "expectation_suite_name", "=", "expectation_suite_name" ]
[ 1077, 4 ]
[ 1079, 79 ]
python
en
['en', 'en', 'en']
True
DataAsset._format_map_output
( self, result_format, success, element_count, nonnull_count, unexpected_count, unexpected_list, unexpected_index_list, )
Helper function to construct expectation result objects for map_expectations (such as column_map_expectation and file_lines_map_expectation). Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE. In each case, the object returned has a different set of populated fields. See :ref:`result_format` for more information. This function handles the logic for mapping those fields for column_map_expectations.
Helper function to construct expectation result objects for map_expectations (such as column_map_expectation and file_lines_map_expectation).
def _format_map_output( self, result_format, success, element_count, nonnull_count, unexpected_count, unexpected_list, unexpected_index_list, ): """Helper function to construct expectation result objects for map_expectations (such as column_map_expectation and file_lines_map_expectation). Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE. In each case, the object returned has a different set of populated fields. See :ref:`result_format` for more information. This function handles the logic for mapping those fields for column_map_expectations. """ # NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list # Retain support for string-only output formats: result_format = parse_result_format(result_format) # Incrementally add to result and return when all values for the specified level are present return_obj = {"success": success} if result_format["result_format"] == "BOOLEAN_ONLY": return return_obj missing_count = element_count - nonnull_count if element_count > 0: missing_percent = missing_count / element_count * 100 if nonnull_count > 0: unexpected_percent_total = unexpected_count / element_count * 100 unexpected_percent_nonmissing = unexpected_count / nonnull_count * 100 else: unexpected_percent_total = None unexpected_percent_nonmissing = None else: missing_percent = None unexpected_percent_total = None unexpected_percent_nonmissing = None return_obj["result"] = { "element_count": element_count, "missing_count": missing_count, "missing_percent": missing_percent, "unexpected_count": unexpected_count, "unexpected_percent": unexpected_percent_nonmissing, "unexpected_percent_total": unexpected_percent_total, "unexpected_percent_nonmissing": unexpected_percent_nonmissing, "partial_unexpected_list": unexpected_list[ : result_format["partial_unexpected_count"] ], } if result_format["result_format"] == "BASIC": return return_obj # Try to return the most common values, if possible. if 0 < result_format.get("partial_unexpected_count"): try: partial_unexpected_counts = [ {"value": key, "count": value} for key, value in sorted( Counter(unexpected_list).most_common( result_format["partial_unexpected_count"] ), key=lambda x: (-x[1], str(x[0])), ) ] except TypeError: partial_unexpected_counts = [] if "details" not in return_obj["result"]: return_obj["result"]["details"] = {} return_obj["result"]["details"][ "partial_unexpected_counts_error" ] = "partial_unexpected_counts requested, but requires a hashable type" finally: return_obj["result"].update( { "partial_unexpected_index_list": unexpected_index_list[ : result_format["partial_unexpected_count"] ] if unexpected_index_list is not None else None, "partial_unexpected_counts": partial_unexpected_counts, } ) if result_format["result_format"] == "SUMMARY": return return_obj return_obj["result"].update( { "unexpected_list": unexpected_list, "unexpected_index_list": unexpected_index_list, } ) if result_format["result_format"] == "COMPLETE": return return_obj raise ValueError( "Unknown result_format {}.".format(result_format["result_format"]) )
[ "def", "_format_map_output", "(", "self", ",", "result_format", ",", "success", ",", "element_count", ",", "nonnull_count", ",", "unexpected_count", ",", "unexpected_list", ",", "unexpected_index_list", ",", ")", ":", "# NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list", "# Retain support for string-only output formats:", "result_format", "=", "parse_result_format", "(", "result_format", ")", "# Incrementally add to result and return when all values for the specified level are present", "return_obj", "=", "{", "\"success\"", ":", "success", "}", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"BOOLEAN_ONLY\"", ":", "return", "return_obj", "missing_count", "=", "element_count", "-", "nonnull_count", "if", "element_count", ">", "0", ":", "missing_percent", "=", "missing_count", "/", "element_count", "*", "100", "if", "nonnull_count", ">", "0", ":", "unexpected_percent_total", "=", "unexpected_count", "/", "element_count", "*", "100", "unexpected_percent_nonmissing", "=", "unexpected_count", "/", "nonnull_count", "*", "100", "else", ":", "unexpected_percent_total", "=", "None", "unexpected_percent_nonmissing", "=", "None", "else", ":", "missing_percent", "=", "None", "unexpected_percent_total", "=", "None", "unexpected_percent_nonmissing", "=", "None", "return_obj", "[", "\"result\"", "]", "=", "{", "\"element_count\"", ":", "element_count", ",", "\"missing_count\"", ":", "missing_count", ",", "\"missing_percent\"", ":", "missing_percent", ",", "\"unexpected_count\"", ":", "unexpected_count", ",", "\"unexpected_percent\"", ":", "unexpected_percent_nonmissing", ",", "\"unexpected_percent_total\"", ":", "unexpected_percent_total", ",", "\"unexpected_percent_nonmissing\"", ":", "unexpected_percent_nonmissing", ",", "\"partial_unexpected_list\"", ":", "unexpected_list", "[", ":", "result_format", "[", "\"partial_unexpected_count\"", "]", "]", ",", "}", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"BASIC\"", ":", "return", "return_obj", "# Try to return the most common values, if possible.", "if", "0", "<", "result_format", ".", "get", "(", "\"partial_unexpected_count\"", ")", ":", "try", ":", "partial_unexpected_counts", "=", "[", "{", "\"value\"", ":", "key", ",", "\"count\"", ":", "value", "}", "for", "key", ",", "value", "in", "sorted", "(", "Counter", "(", "unexpected_list", ")", ".", "most_common", "(", "result_format", "[", "\"partial_unexpected_count\"", "]", ")", ",", "key", "=", "lambda", "x", ":", "(", "-", "x", "[", "1", "]", ",", "str", "(", "x", "[", "0", "]", ")", ")", ",", ")", "]", "except", "TypeError", ":", "partial_unexpected_counts", "=", "[", "]", "if", "\"details\"", "not", "in", "return_obj", "[", "\"result\"", "]", ":", "return_obj", "[", "\"result\"", "]", "[", "\"details\"", "]", "=", "{", "}", "return_obj", "[", "\"result\"", "]", "[", "\"details\"", "]", "[", "\"partial_unexpected_counts_error\"", "]", "=", "\"partial_unexpected_counts requested, but requires a hashable type\"", "finally", ":", "return_obj", "[", "\"result\"", "]", ".", "update", "(", "{", "\"partial_unexpected_index_list\"", ":", "unexpected_index_list", "[", ":", "result_format", "[", "\"partial_unexpected_count\"", "]", "]", "if", "unexpected_index_list", "is", "not", "None", "else", "None", ",", "\"partial_unexpected_counts\"", ":", "partial_unexpected_counts", ",", "}", ")", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"SUMMARY\"", ":", "return", "return_obj", "return_obj", "[", "\"result\"", "]", ".", "update", "(", "{", "\"unexpected_list\"", ":", "unexpected_list", ",", "\"unexpected_index_list\"", ":", "unexpected_index_list", ",", "}", ")", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "return", "return_obj", "raise", "ValueError", "(", "\"Unknown result_format {}.\"", ".", "format", "(", "result_format", "[", "\"result_format\"", "]", ")", ")" ]
[ 1087, 4 ]
[ 1195, 9 ]
python
en
['en', 'en', 'en']
True
DataAsset._calc_map_expectation_success
(self, success_count, nonnull_count, mostly)
Calculate success and percent_success for column_map_expectations Args: success_count (int): \ The number of successful values in the column nonnull_count (int): \ The number of nonnull values in the column mostly (float or None): \ A value between 0 and 1 (or None), indicating the fraction of successes required to pass the \ expectation as a whole. If mostly=None, then all values must succeed in order for the expectation as \ a whole to succeed. Returns: success (boolean), percent_success (float)
Calculate success and percent_success for column_map_expectations
def _calc_map_expectation_success(self, success_count, nonnull_count, mostly): """Calculate success and percent_success for column_map_expectations Args: success_count (int): \ The number of successful values in the column nonnull_count (int): \ The number of nonnull values in the column mostly (float or None): \ A value between 0 and 1 (or None), indicating the fraction of successes required to pass the \ expectation as a whole. If mostly=None, then all values must succeed in order for the expectation as \ a whole to succeed. Returns: success (boolean), percent_success (float) """ if isinstance(success_count, decimal.Decimal): raise ValueError( "success_count must not be a decimal; check your db configuration" ) if isinstance(nonnull_count, decimal.Decimal): raise ValueError( "nonnull_count must not be a decimal; check your db configuration" ) if nonnull_count > 0: percent_success = success_count / nonnull_count if mostly is not None: success = bool(percent_success >= mostly) else: success = bool(nonnull_count - success_count == 0) else: success = True percent_success = None return success, percent_success
[ "def", "_calc_map_expectation_success", "(", "self", ",", "success_count", ",", "nonnull_count", ",", "mostly", ")", ":", "if", "isinstance", "(", "success_count", ",", "decimal", ".", "Decimal", ")", ":", "raise", "ValueError", "(", "\"success_count must not be a decimal; check your db configuration\"", ")", "if", "isinstance", "(", "nonnull_count", ",", "decimal", ".", "Decimal", ")", ":", "raise", "ValueError", "(", "\"nonnull_count must not be a decimal; check your db configuration\"", ")", "if", "nonnull_count", ">", "0", ":", "percent_success", "=", "success_count", "/", "nonnull_count", "if", "mostly", "is", "not", "None", ":", "success", "=", "bool", "(", "percent_success", ">=", "mostly", ")", "else", ":", "success", "=", "bool", "(", "nonnull_count", "-", "success_count", "==", "0", ")", "else", ":", "success", "=", "True", "percent_success", "=", "None", "return", "success", ",", "percent_success" ]
[ 1197, 4 ]
[ 1234, 39 ]
python
en
['en', 'en', 'en']
True
DataAsset.test_expectation_function
(self, function, *args, **kwargs)
Test a generic expectation function Args: function (func): The function to be tested. (Must be a valid expectation function.) *args : Positional arguments to be passed the the function **kwargs : Keyword arguments to be passed the the function Returns: A JSON-serializable expectation result object. Notes: This function is a thin layer to allow quick testing of new expectation functions, without having to \ define custom classes, etc. To use developed expectations from the command-line tool, you will still need \ to define custom classes, etc. Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
Test a generic expectation function
def test_expectation_function(self, function, *args, **kwargs): """Test a generic expectation function Args: function (func): The function to be tested. (Must be a valid expectation function.) *args : Positional arguments to be passed the the function **kwargs : Keyword arguments to be passed the the function Returns: A JSON-serializable expectation result object. Notes: This function is a thin layer to allow quick testing of new expectation functions, without having to \ define custom classes, etc. To use developed expectations from the command-line tool, you will still need \ to define custom classes, etc. Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information. """ argspec = inspect.getfullargspec(function)[0][1:] new_function = self.expectation(argspec)(function) return new_function(self, *args, **kwargs)
[ "def", "test_expectation_function", "(", "self", ",", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "function", ")", "[", "0", "]", "[", "1", ":", "]", "new_function", "=", "self", ".", "expectation", "(", "argspec", ")", "(", "function", ")", "return", "new_function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
[ 1242, 4 ]
[ 1264, 50 ]
python
en
['ro', 'en', 'en']
True
test_get_available_data_asset_names_with_multiple_datasources_with_and_without_generators
( empty_data_context, sa )
Test datasources with and without generators.
Test datasources with and without generators.
def test_get_available_data_asset_names_with_multiple_datasources_with_and_without_generators( empty_data_context, sa ): """Test datasources with and without generators.""" # requires sqlalchemy because it instantiates sqlalchemydatasource context = empty_data_context connection_kwargs = {"credentials": {"drivername": "sqlite"}} context.add_datasource( "first", class_name="SqlAlchemyDatasource", batch_kwargs_generators={ "foo": { "class_name": "TableBatchKwargsGenerator", } }, **connection_kwargs, ) context.add_datasource( "second", class_name="SqlAlchemyDatasource", **connection_kwargs ) context.add_datasource( "third", class_name="SqlAlchemyDatasource", batch_kwargs_generators={ "bar": { "class_name": "TableBatchKwargsGenerator", } }, **connection_kwargs, ) obs = context.get_available_data_asset_names() assert isinstance(obs, dict) assert set(obs.keys()) == {"first", "second", "third"} assert obs == { "first": {"foo": {"is_complete_list": True, "names": []}}, "second": {}, "third": {"bar": {"is_complete_list": True, "names": []}}, }
[ "def", "test_get_available_data_asset_names_with_multiple_datasources_with_and_without_generators", "(", "empty_data_context", ",", "sa", ")", ":", "# requires sqlalchemy because it instantiates sqlalchemydatasource", "context", "=", "empty_data_context", "connection_kwargs", "=", "{", "\"credentials\"", ":", "{", "\"drivername\"", ":", "\"sqlite\"", "}", "}", "context", ".", "add_datasource", "(", "\"first\"", ",", "class_name", "=", "\"SqlAlchemyDatasource\"", ",", "batch_kwargs_generators", "=", "{", "\"foo\"", ":", "{", "\"class_name\"", ":", "\"TableBatchKwargsGenerator\"", ",", "}", "}", ",", "*", "*", "connection_kwargs", ",", ")", "context", ".", "add_datasource", "(", "\"second\"", ",", "class_name", "=", "\"SqlAlchemyDatasource\"", ",", "*", "*", "connection_kwargs", ")", "context", ".", "add_datasource", "(", "\"third\"", ",", "class_name", "=", "\"SqlAlchemyDatasource\"", ",", "batch_kwargs_generators", "=", "{", "\"bar\"", ":", "{", "\"class_name\"", ":", "\"TableBatchKwargsGenerator\"", ",", "}", "}", ",", "*", "*", "connection_kwargs", ",", ")", "obs", "=", "context", ".", "get_available_data_asset_names", "(", ")", "assert", "isinstance", "(", "obs", ",", "dict", ")", "assert", "set", "(", "obs", ".", "keys", "(", ")", ")", "==", "{", "\"first\"", ",", "\"second\"", ",", "\"third\"", "}", "assert", "obs", "==", "{", "\"first\"", ":", "{", "\"foo\"", ":", "{", "\"is_complete_list\"", ":", "True", ",", "\"names\"", ":", "[", "]", "}", "}", ",", "\"second\"", ":", "{", "}", ",", "\"third\"", ":", "{", "\"bar\"", ":", "{", "\"is_complete_list\"", ":", "True", ",", "\"names\"", ":", "[", "]", "}", "}", ",", "}" ]
[ 116, 0 ]
[ 156, 5 ]
python
en
['en', 'en', 'en']
True
test_data_context_get_validation_result
(titanic_data_context)
Test that validation results can be correctly fetched from the configured results store
Test that validation results can be correctly fetched from the configured results store
def test_data_context_get_validation_result(titanic_data_context): """ Test that validation results can be correctly fetched from the configured results store """ run_id = RunIdentifier(run_name="profiling") profiling_results = titanic_data_context.profile_datasource( "mydatasource", run_id=run_id ) all_validation_result = titanic_data_context.get_validation_result( "mydatasource.mygenerator.Titanic.BasicDatasetProfiler", run_id=run_id ) assert len(all_validation_result.results) == 51 failed_validation_result = titanic_data_context.get_validation_result( "mydatasource.mygenerator.Titanic.BasicDatasetProfiler", run_id=run_id, failed_only=True, ) assert len(failed_validation_result.results) == 8
[ "def", "test_data_context_get_validation_result", "(", "titanic_data_context", ")", ":", "run_id", "=", "RunIdentifier", "(", "run_name", "=", "\"profiling\"", ")", "profiling_results", "=", "titanic_data_context", ".", "profile_datasource", "(", "\"mydatasource\"", ",", "run_id", "=", "run_id", ")", "all_validation_result", "=", "titanic_data_context", ".", "get_validation_result", "(", "\"mydatasource.mygenerator.Titanic.BasicDatasetProfiler\"", ",", "run_id", "=", "run_id", ")", "assert", "len", "(", "all_validation_result", ".", "results", ")", "==", "51", "failed_validation_result", "=", "titanic_data_context", ".", "get_validation_result", "(", "\"mydatasource.mygenerator.Titanic.BasicDatasetProfiler\"", ",", "run_id", "=", "run_id", ",", "failed_only", "=", "True", ",", ")", "assert", "len", "(", "failed_validation_result", ".", "results", ")", "==", "8" ]
[ 370, 0 ]
[ 389, 53 ]
python
en
['en', 'error', 'th']
False
test_get_batch_multiple_datasources_do_not_scan_all
( data_context_with_bad_datasource, )
What does this test and why? A DataContext can have "stale" datasources in its configuration (ie. connections to DBs that are now offline). If we configure a new datasource and are only using it (like the PandasDatasource below), then we don't want to be dependent on all the "stale" datasources working too. data_context_with_bad_datasource is a fixture that contains a configuration for an invalid datasource (with "fake_port" and "fake_host") In the test we configure a new expectation_suite, a local pandas_datasource and retrieve a single batch. This tests a fix for the following issue: https://github.com/great-expectations/great_expectations/issues/2241
What does this test and why?
def test_get_batch_multiple_datasources_do_not_scan_all( data_context_with_bad_datasource, ): """ What does this test and why? A DataContext can have "stale" datasources in its configuration (ie. connections to DBs that are now offline). If we configure a new datasource and are only using it (like the PandasDatasource below), then we don't want to be dependent on all the "stale" datasources working too. data_context_with_bad_datasource is a fixture that contains a configuration for an invalid datasource (with "fake_port" and "fake_host") In the test we configure a new expectation_suite, a local pandas_datasource and retrieve a single batch. This tests a fix for the following issue: https://github.com/great-expectations/great_expectations/issues/2241 """ context = data_context_with_bad_datasource context.create_expectation_suite(expectation_suite_name="local_test.default") expectation_suite = context.get_expectation_suite("local_test.default") context.add_datasource("pandas_datasource", class_name="PandasDatasource") df = pd.DataFrame({"a": [1, 2, 3]}) batch = context.get_batch( batch_kwargs={"datasource": "pandas_datasource", "dataset": df}, expectation_suite_name=expectation_suite, ) assert len(batch) == 3
[ "def", "test_get_batch_multiple_datasources_do_not_scan_all", "(", "data_context_with_bad_datasource", ",", ")", ":", "context", "=", "data_context_with_bad_datasource", "context", ".", "create_expectation_suite", "(", "expectation_suite_name", "=", "\"local_test.default\"", ")", "expectation_suite", "=", "context", ".", "get_expectation_suite", "(", "\"local_test.default\"", ")", "context", ".", "add_datasource", "(", "\"pandas_datasource\"", ",", "class_name", "=", "\"PandasDatasource\"", ")", "df", "=", "pd", ".", "DataFrame", "(", "{", "\"a\"", ":", "[", "1", ",", "2", ",", "3", "]", "}", ")", "batch", "=", "context", ".", "get_batch", "(", "batch_kwargs", "=", "{", "\"datasource\"", ":", "\"pandas_datasource\"", ",", "\"dataset\"", ":", "df", "}", ",", "expectation_suite_name", "=", "expectation_suite", ",", ")", "assert", "len", "(", "batch", ")", "==", "3" ]
[ 1715, 0 ]
[ 1743, 26 ]
python
en
['en', 'error', 'th']
False
test_add_checkpoint_from_yaml
(mock_emit, empty_data_context_stats_enabled)
What does this test and why? We should be able to add a checkpoint directly from a valid yaml configuration. test_yaml_config() should not automatically save a checkpoint if valid. checkpoint yaml in a store should match the configuration, even if created from SimpleCheckpoints Note: This tests multiple items and could stand to be broken up.
What does this test and why? We should be able to add a checkpoint directly from a valid yaml configuration. test_yaml_config() should not automatically save a checkpoint if valid. checkpoint yaml in a store should match the configuration, even if created from SimpleCheckpoints Note: This tests multiple items and could stand to be broken up.
def test_add_checkpoint_from_yaml(mock_emit, empty_data_context_stats_enabled): """ What does this test and why? We should be able to add a checkpoint directly from a valid yaml configuration. test_yaml_config() should not automatically save a checkpoint if valid. checkpoint yaml in a store should match the configuration, even if created from SimpleCheckpoints Note: This tests multiple items and could stand to be broken up. """ context: DataContext = empty_data_context_stats_enabled checkpoint_name: str = "my_new_checkpoint" assert checkpoint_name not in context.list_checkpoints() assert len(context.list_checkpoints()) == 0 checkpoint_yaml_config = f""" name: {checkpoint_name} config_version: 1.0 class_name: SimpleCheckpoint run_name_template: "%Y%m%d-%H%M%S-my-run-name-template" validations: - batch_request: datasource_name: data_dir data_connector_name: data_dir_example_data_connector data_asset_name: DEFAULT_ASSET_NAME partition_request: index: -1 expectation_suite_name: newsuite """ checkpoint_from_test_yaml_config = context.test_yaml_config( checkpoint_yaml_config, name=checkpoint_name ) assert mock_emit.call_count == 1 # Substitute anonymized name since it changes for each run anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_name" ] expected_call_args_list = [ mock.call( { "event": "data_context.test_yaml_config", "event_payload": { "anonymized_name": anonymized_name, "parent_class": "SimpleCheckpoint", }, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list # test_yaml_config() no longer stores checkpoints automatically assert checkpoint_name not in context.list_checkpoints() assert len(context.list_checkpoints()) == 0 checkpoint_from_yaml = context.add_checkpoint( **yaml.load(checkpoint_yaml_config), ) expected_checkpoint_yaml: str = """name: my_new_checkpoint config_version: 1.0 template_name: module_name: great_expectations.checkpoint class_name: Checkpoint run_name_template: '%Y%m%d-%H%M%S-my-run-name-template' expectation_suite_name: batch_request: action_list: - name: store_validation_result action: class_name: StoreValidationResultAction - name: store_evaluation_params action: class_name: StoreEvaluationParametersAction - name: update_data_docs action: class_name: UpdateDataDocsAction site_names: [] evaluation_parameters: {} runtime_configuration: {} validations: - batch_request: datasource_name: data_dir data_connector_name: data_dir_example_data_connector data_asset_name: DEFAULT_ASSET_NAME partition_request: index: -1 expectation_suite_name: newsuite profilers: [] ge_cloud_id: """ checkpoint_dir = os.path.join( context.root_directory, context.checkpoint_store.config["store_backend"]["base_directory"], ) checkpoint_file = os.path.join(checkpoint_dir, f"{checkpoint_name}.yml") with open(checkpoint_file) as cf: checkpoint_from_disk = cf.read() assert checkpoint_from_disk == expected_checkpoint_yaml assert checkpoint_from_yaml.config.to_yaml_str() == expected_checkpoint_yaml checkpoint_from_store = context.get_checkpoint(checkpoint_name) assert checkpoint_from_store.config.to_yaml_str() == expected_checkpoint_yaml expected_action_list = [ { "name": "store_validation_result", "action": {"class_name": "StoreValidationResultAction"}, }, { "name": "store_evaluation_params", "action": {"class_name": "StoreEvaluationParametersAction"}, }, { "name": "update_data_docs", "action": {"class_name": "UpdateDataDocsAction", "site_names": []}, }, ] assert checkpoint_from_yaml.action_list == expected_action_list assert checkpoint_from_store.action_list == expected_action_list assert checkpoint_from_test_yaml_config.action_list == expected_action_list assert checkpoint_from_store.action_list == expected_action_list assert checkpoint_from_test_yaml_config.name == checkpoint_from_yaml.name assert ( checkpoint_from_test_yaml_config.action_list == checkpoint_from_yaml.action_list ) assert checkpoint_from_yaml.name == checkpoint_name assert checkpoint_from_yaml.config.to_json_dict() == { "name": "my_new_checkpoint", "config_version": 1.0, "template_name": None, "module_name": "great_expectations.checkpoint", "class_name": "Checkpoint", "run_name_template": "%Y%m%d-%H%M%S-my-run-name-template", "expectation_suite_name": None, "batch_request": None, "action_list": [ { "name": "store_validation_result", "action": {"class_name": "StoreValidationResultAction"}, }, { "name": "store_evaluation_params", "action": {"class_name": "StoreEvaluationParametersAction"}, }, { "name": "update_data_docs", "action": {"class_name": "UpdateDataDocsAction", "site_names": []}, }, ], "evaluation_parameters": {}, "runtime_configuration": {}, "validations": [ { "batch_request": { "datasource_name": "data_dir", "data_connector_name": "data_dir_example_data_connector", "data_asset_name": "DEFAULT_ASSET_NAME", "partition_request": {"index": -1}, }, "expectation_suite_name": "newsuite", } ], "profilers": [], "ge_cloud_id": None, } assert isinstance(checkpoint_from_yaml, Checkpoint) assert checkpoint_name in context.list_checkpoints() assert len(context.list_checkpoints()) == 1 # No other usage stats calls detected assert mock_emit.call_count == 1
[ "def", "test_add_checkpoint_from_yaml", "(", "mock_emit", ",", "empty_data_context_stats_enabled", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "checkpoint_name", ":", "str", "=", "\"my_new_checkpoint\"", "assert", "checkpoint_name", "not", "in", "context", ".", "list_checkpoints", "(", ")", "assert", "len", "(", "context", ".", "list_checkpoints", "(", ")", ")", "==", "0", "checkpoint_yaml_config", "=", "f\"\"\"\nname: {checkpoint_name}\nconfig_version: 1.0\nclass_name: SimpleCheckpoint\nrun_name_template: \"%Y%m%d-%H%M%S-my-run-name-template\"\nvalidations:\n - batch_request:\n datasource_name: data_dir\n data_connector_name: data_dir_example_data_connector\n data_asset_name: DEFAULT_ASSET_NAME\n partition_request:\n index: -1\n expectation_suite_name: newsuite\n \"\"\"", "checkpoint_from_test_yaml_config", "=", "context", ".", "test_yaml_config", "(", "checkpoint_yaml_config", ",", "name", "=", "checkpoint_name", ")", "assert", "mock_emit", ".", "call_count", "==", "1", "# Substitute anonymized name since it changes for each run", "anonymized_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_name\"", "]", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.test_yaml_config\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_name\"", ":", "anonymized_name", ",", "\"parent_class\"", ":", "\"SimpleCheckpoint\"", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "# test_yaml_config() no longer stores checkpoints automatically", "assert", "checkpoint_name", "not", "in", "context", ".", "list_checkpoints", "(", ")", "assert", "len", "(", "context", ".", "list_checkpoints", "(", ")", ")", "==", "0", "checkpoint_from_yaml", "=", "context", ".", "add_checkpoint", "(", "*", "*", "yaml", ".", "load", "(", "checkpoint_yaml_config", ")", ",", ")", "expected_checkpoint_yaml", ":", "str", "=", "\"\"\"name: my_new_checkpoint\nconfig_version: 1.0\ntemplate_name:\nmodule_name: great_expectations.checkpoint\nclass_name: Checkpoint\nrun_name_template: '%Y%m%d-%H%M%S-my-run-name-template'\nexpectation_suite_name:\nbatch_request:\naction_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n site_names: []\nevaluation_parameters: {}\nruntime_configuration: {}\nvalidations:\n - batch_request:\n datasource_name: data_dir\n data_connector_name: data_dir_example_data_connector\n data_asset_name: DEFAULT_ASSET_NAME\n partition_request:\n index: -1\n expectation_suite_name: newsuite\nprofilers: []\nge_cloud_id:\n\"\"\"", "checkpoint_dir", "=", "os", ".", "path", ".", "join", "(", "context", ".", "root_directory", ",", "context", ".", "checkpoint_store", ".", "config", "[", "\"store_backend\"", "]", "[", "\"base_directory\"", "]", ",", ")", "checkpoint_file", "=", "os", ".", "path", ".", "join", "(", "checkpoint_dir", ",", "f\"{checkpoint_name}.yml\"", ")", "with", "open", "(", "checkpoint_file", ")", "as", "cf", ":", "checkpoint_from_disk", "=", "cf", ".", "read", "(", ")", "assert", "checkpoint_from_disk", "==", "expected_checkpoint_yaml", "assert", "checkpoint_from_yaml", ".", "config", ".", "to_yaml_str", "(", ")", "==", "expected_checkpoint_yaml", "checkpoint_from_store", "=", "context", ".", "get_checkpoint", "(", "checkpoint_name", ")", "assert", "checkpoint_from_store", ".", "config", ".", "to_yaml_str", "(", ")", "==", "expected_checkpoint_yaml", "expected_action_list", "=", "[", "{", "\"name\"", ":", "\"store_validation_result\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"StoreValidationResultAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"store_evaluation_params\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"StoreEvaluationParametersAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"update_data_docs\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"UpdateDataDocsAction\"", ",", "\"site_names\"", ":", "[", "]", "}", ",", "}", ",", "]", "assert", "checkpoint_from_yaml", ".", "action_list", "==", "expected_action_list", "assert", "checkpoint_from_store", ".", "action_list", "==", "expected_action_list", "assert", "checkpoint_from_test_yaml_config", ".", "action_list", "==", "expected_action_list", "assert", "checkpoint_from_store", ".", "action_list", "==", "expected_action_list", "assert", "checkpoint_from_test_yaml_config", ".", "name", "==", "checkpoint_from_yaml", ".", "name", "assert", "(", "checkpoint_from_test_yaml_config", ".", "action_list", "==", "checkpoint_from_yaml", ".", "action_list", ")", "assert", "checkpoint_from_yaml", ".", "name", "==", "checkpoint_name", "assert", "checkpoint_from_yaml", ".", "config", ".", "to_json_dict", "(", ")", "==", "{", "\"name\"", ":", "\"my_new_checkpoint\"", ",", "\"config_version\"", ":", "1.0", ",", "\"template_name\"", ":", "None", ",", "\"module_name\"", ":", "\"great_expectations.checkpoint\"", ",", "\"class_name\"", ":", "\"Checkpoint\"", ",", "\"run_name_template\"", ":", "\"%Y%m%d-%H%M%S-my-run-name-template\"", ",", "\"expectation_suite_name\"", ":", "None", ",", "\"batch_request\"", ":", "None", ",", "\"action_list\"", ":", "[", "{", "\"name\"", ":", "\"store_validation_result\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"StoreValidationResultAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"store_evaluation_params\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"StoreEvaluationParametersAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"update_data_docs\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"UpdateDataDocsAction\"", ",", "\"site_names\"", ":", "[", "]", "}", ",", "}", ",", "]", ",", "\"evaluation_parameters\"", ":", "{", "}", ",", "\"runtime_configuration\"", ":", "{", "}", ",", "\"validations\"", ":", "[", "{", "\"batch_request\"", ":", "{", "\"datasource_name\"", ":", "\"data_dir\"", ",", "\"data_connector_name\"", ":", "\"data_dir_example_data_connector\"", ",", "\"data_asset_name\"", ":", "\"DEFAULT_ASSET_NAME\"", ",", "\"partition_request\"", ":", "{", "\"index\"", ":", "-", "1", "}", ",", "}", ",", "\"expectation_suite_name\"", ":", "\"newsuite\"", ",", "}", "]", ",", "\"profilers\"", ":", "[", "]", ",", "\"ge_cloud_id\"", ":", "None", ",", "}", "assert", "isinstance", "(", "checkpoint_from_yaml", ",", "Checkpoint", ")", "assert", "checkpoint_name", "in", "context", ".", "list_checkpoints", "(", ")", "assert", "len", "(", "context", ".", "list_checkpoints", "(", ")", ")", "==", "1", "# No other usage stats calls detected", "assert", "mock_emit", ".", "call_count", "==", "1" ]
[ 1749, 0 ]
[ 1929, 36 ]
python
en
['en', 'error', 'th']
False
test_add_checkpoint_from_yaml_fails_for_unrecognized_class_name
( mock_emit, empty_data_context_stats_enabled )
What does this test and why? Checkpoint yaml should have a valid class_name
What does this test and why? Checkpoint yaml should have a valid class_name
def test_add_checkpoint_from_yaml_fails_for_unrecognized_class_name( mock_emit, empty_data_context_stats_enabled ): """ What does this test and why? Checkpoint yaml should have a valid class_name """ context: DataContext = empty_data_context_stats_enabled checkpoint_name: str = "my_new_checkpoint" assert checkpoint_name not in context.list_checkpoints() assert len(context.list_checkpoints()) == 0 checkpoint_yaml_config = f""" name: {checkpoint_name} config_version: 1.0 class_name: NotAValidCheckpointClassName run_name_template: "%Y%m%d-%H%M%S-my-run-name-template" validations: - batch_request: datasource_name: data_dir data_connector_name: data_dir_example_data_connector data_asset_name: DEFAULT_ASSET_NAME partition_request: index: -1 expectation_suite_name: newsuite """ with pytest.raises(KeyError): context.test_yaml_config(checkpoint_yaml_config, name=checkpoint_name) with pytest.raises(AttributeError): context.add_checkpoint( **yaml.load(checkpoint_yaml_config), ) assert checkpoint_name not in context.list_checkpoints() assert len(context.list_checkpoints()) == 0 assert mock_emit.call_count == 1 expected_call_args_list = [ mock.call( { "event": "data_context.test_yaml_config", "event_payload": {}, "success": False, } ), ] assert mock_emit.call_args_list == expected_call_args_list
[ "def", "test_add_checkpoint_from_yaml_fails_for_unrecognized_class_name", "(", "mock_emit", ",", "empty_data_context_stats_enabled", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "checkpoint_name", ":", "str", "=", "\"my_new_checkpoint\"", "assert", "checkpoint_name", "not", "in", "context", ".", "list_checkpoints", "(", ")", "assert", "len", "(", "context", ".", "list_checkpoints", "(", ")", ")", "==", "0", "checkpoint_yaml_config", "=", "f\"\"\"\nname: {checkpoint_name}\nconfig_version: 1.0\nclass_name: NotAValidCheckpointClassName\nrun_name_template: \"%Y%m%d-%H%M%S-my-run-name-template\"\nvalidations:\n - batch_request:\n datasource_name: data_dir\n data_connector_name: data_dir_example_data_connector\n data_asset_name: DEFAULT_ASSET_NAME\n partition_request:\n index: -1\n expectation_suite_name: newsuite\n \"\"\"", "with", "pytest", ".", "raises", "(", "KeyError", ")", ":", "context", ".", "test_yaml_config", "(", "checkpoint_yaml_config", ",", "name", "=", "checkpoint_name", ")", "with", "pytest", ".", "raises", "(", "AttributeError", ")", ":", "context", ".", "add_checkpoint", "(", "*", "*", "yaml", ".", "load", "(", "checkpoint_yaml_config", ")", ",", ")", "assert", "checkpoint_name", "not", "in", "context", ".", "list_checkpoints", "(", ")", "assert", "len", "(", "context", ".", "list_checkpoints", "(", ")", ")", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "1", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.test_yaml_config\"", ",", "\"event_payload\"", ":", "{", "}", ",", "\"success\"", ":", "False", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list" ]
[ 1935, 0 ]
[ 1984, 62 ]
python
en
['en', 'error', 'th']
False
test_add_datasource_from_yaml
(mock_emit, empty_data_context_stats_enabled)
What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables should work as expected.
What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables should work as expected.
def test_add_datasource_from_yaml(mock_emit, empty_data_context_stats_enabled): """ What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables should work as expected. """ context: DataContext = empty_data_context_stats_enabled assert "my_new_datasource" not in context.datasources.keys() assert "my_new_datasource" not in context.list_datasources() assert "my_new_datasource" not in context.get_config()["datasources"] datasource_name: str = "my_datasource" example_yaml = f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: data_dir_example_data_connector: class_name: InferredAssetFilesystemDataConnector datasource_name: {datasource_name} base_directory: ../data default_regex: group_names: data_asset_name pattern: (.*) """ datasource_from_test_yaml_config = context.test_yaml_config( example_yaml, name=datasource_name ) assert mock_emit.call_count == 1 # Substitute anonymized names since it changes for each run anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_name" ] anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][ "event_payload" ]["anonymized_execution_engine"]["anonymized_name"] anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_data_connectors" ][0]["anonymized_name"] expected_call_args_list = [ mock.call( { "event": "data_context.test_yaml_config", "event_payload": { "anonymized_name": anonymized_datasource_name, "parent_class": "Datasource", "anonymized_execution_engine": { "anonymized_name": anonymized_execution_engine_name, "parent_class": "PandasExecutionEngine", }, "anonymized_data_connectors": [ { "anonymized_name": anonymized_data_connector_name, "parent_class": "InferredAssetFilesystemDataConnector", } ], }, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list datasource_from_yaml = context.add_datasource( name=datasource_name, **yaml.load(example_yaml) ) assert mock_emit.call_count == 2 expected_call_args_list.extend( [ mock.call( { "event": "data_context.add_datasource", "event_payload": {}, "success": True, } ), ] ) assert mock_emit.call_args_list == expected_call_args_list assert datasource_from_test_yaml_config.config == datasource_from_yaml.config assert datasource_from_yaml.name == datasource_name assert datasource_from_yaml.config == { "execution_engine": { "class_name": "PandasExecutionEngine", "module_name": "great_expectations.execution_engine", }, "data_connectors": { "data_dir_example_data_connector": { "class_name": "InferredAssetFilesystemDataConnector", "module_name": "great_expectations.datasource.data_connector", "default_regex": {"group_names": "data_asset_name", "pattern": "(.*)"}, "base_directory": "../data", } }, } assert isinstance(datasource_from_yaml, Datasource) assert datasource_from_yaml.__class__.__name__ == "Datasource" assert datasource_name in [d["name"] for d in context.list_datasources()] assert datasource_name in context.datasources assert datasource_name in context.get_config()["datasources"] # Check that the datasource was written to disk as expected root_directory = context.root_directory del context context = DataContext(root_directory) assert datasource_name in [d["name"] for d in context.list_datasources()] assert datasource_name in context.datasources assert datasource_name in context.get_config()["datasources"] assert mock_emit.call_count == 3 expected_call_args_list.extend( [ mock.call( { "event": "data_context.__init__", "event_payload": {}, "success": True, } ), ] ) assert mock_emit.call_args_list == expected_call_args_list
[ "def", "test_add_datasource_from_yaml", "(", "mock_emit", ",", "empty_data_context_stats_enabled", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "datasources", ".", "keys", "(", ")", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "list_datasources", "(", ")", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "datasource_name", ":", "str", "=", "\"my_datasource\"", "example_yaml", "=", "f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: PandasExecutionEngine\n data_connectors:\n data_dir_example_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n datasource_name: {datasource_name}\n base_directory: ../data\n default_regex:\n group_names: data_asset_name\n pattern: (.*)\n \"\"\"", "datasource_from_test_yaml_config", "=", "context", ".", "test_yaml_config", "(", "example_yaml", ",", "name", "=", "datasource_name", ")", "assert", "mock_emit", ".", "call_count", "==", "1", "# Substitute anonymized names since it changes for each run", "anonymized_datasource_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_name\"", "]", "anonymized_execution_engine_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_execution_engine\"", "]", "[", "\"anonymized_name\"", "]", "anonymized_data_connector_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_data_connectors\"", "]", "[", "0", "]", "[", "\"anonymized_name\"", "]", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.test_yaml_config\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_name\"", ":", "anonymized_datasource_name", ",", "\"parent_class\"", ":", "\"Datasource\"", ",", "\"anonymized_execution_engine\"", ":", "{", "\"anonymized_name\"", ":", "anonymized_execution_engine_name", ",", "\"parent_class\"", ":", "\"PandasExecutionEngine\"", ",", "}", ",", "\"anonymized_data_connectors\"", ":", "[", "{", "\"anonymized_name\"", ":", "anonymized_data_connector_name", ",", "\"parent_class\"", ":", "\"InferredAssetFilesystemDataConnector\"", ",", "}", "]", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "datasource_from_yaml", "=", "context", ".", "add_datasource", "(", "name", "=", "datasource_name", ",", "*", "*", "yaml", ".", "load", "(", "example_yaml", ")", ")", "assert", "mock_emit", ".", "call_count", "==", "2", "expected_call_args_list", ".", "extend", "(", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.add_datasource\"", ",", "\"event_payload\"", ":", "{", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "assert", "datasource_from_test_yaml_config", ".", "config", "==", "datasource_from_yaml", ".", "config", "assert", "datasource_from_yaml", ".", "name", "==", "datasource_name", "assert", "datasource_from_yaml", ".", "config", "==", "{", "\"execution_engine\"", ":", "{", "\"class_name\"", ":", "\"PandasExecutionEngine\"", ",", "\"module_name\"", ":", "\"great_expectations.execution_engine\"", ",", "}", ",", "\"data_connectors\"", ":", "{", "\"data_dir_example_data_connector\"", ":", "{", "\"class_name\"", ":", "\"InferredAssetFilesystemDataConnector\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"default_regex\"", ":", "{", "\"group_names\"", ":", "\"data_asset_name\"", ",", "\"pattern\"", ":", "\"(.*)\"", "}", ",", "\"base_directory\"", ":", "\"../data\"", ",", "}", "}", ",", "}", "assert", "isinstance", "(", "datasource_from_yaml", ",", "Datasource", ")", "assert", "datasource_from_yaml", ".", "__class__", ".", "__name__", "==", "\"Datasource\"", "assert", "datasource_name", "in", "[", "d", "[", "\"name\"", "]", "for", "d", "in", "context", ".", "list_datasources", "(", ")", "]", "assert", "datasource_name", "in", "context", ".", "datasources", "assert", "datasource_name", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "# Check that the datasource was written to disk as expected", "root_directory", "=", "context", ".", "root_directory", "del", "context", "context", "=", "DataContext", "(", "root_directory", ")", "assert", "datasource_name", "in", "[", "d", "[", "\"name\"", "]", "for", "d", "in", "context", ".", "list_datasources", "(", ")", "]", "assert", "datasource_name", "in", "context", ".", "datasources", "assert", "datasource_name", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "assert", "mock_emit", ".", "call_count", "==", "3", "expected_call_args_list", ".", "extend", "(", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"event_payload\"", ":", "{", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list" ]
[ 1990, 0 ]
[ 2115, 62 ]
python
en
['en', 'error', 'th']
False
test_add_datasource_from_yaml_sql_datasource
( mock_emit, sa, test_backends, empty_data_context_stats_enabled, )
What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables should work as expected.
What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables should work as expected.
def test_add_datasource_from_yaml_sql_datasource( mock_emit, sa, test_backends, empty_data_context_stats_enabled, ): """ What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables should work as expected. """ if "postgresql" not in test_backends: pytest.skip("test_add_datasource_from_yaml_sql_datasource requires postgresql") context: DataContext = empty_data_context_stats_enabled assert "my_new_datasource" not in context.datasources.keys() assert "my_new_datasource" not in context.list_datasources() assert "my_new_datasource" not in context.get_config()["datasources"] datasource_name: str = "my_datasource" example_yaml = f""" class_name: SimpleSqlalchemyDatasource introspection: whole_table: data_asset_name_suffix: __whole_table credentials: drivername: postgresql host: localhost port: '5432' username: postgres password: '' database: postgres """ datasource_from_test_yaml_config = context.test_yaml_config( example_yaml, name=datasource_name ) assert mock_emit.call_count == 1 # Substitute anonymized name since it changes for each run anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_name" ] anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_data_connectors" ][0]["anonymized_name"] expected_call_args_list = [ mock.call( { "event": "data_context.test_yaml_config", "event_payload": { "anonymized_name": anonymized_name, "parent_class": "SimpleSqlalchemyDatasource", "anonymized_execution_engine": { "parent_class": "SqlAlchemyExecutionEngine" }, "anonymized_data_connectors": [ { "anonymized_name": anonymized_data_connector_name, "parent_class": "InferredAssetSqlDataConnector", } ], }, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list datasource_from_yaml = context.add_datasource( name=datasource_name, **yaml.load(example_yaml) ) assert mock_emit.call_count == 2 expected_call_args_list.extend( [ mock.call( { "event": "data_context.add_datasource", "event_payload": {}, "success": True, } ), ] ) assert mock_emit.call_args_list == expected_call_args_list # .config not implemented for SimpleSqlalchemyDatasource assert datasource_from_test_yaml_config.config == {} assert datasource_from_yaml.config == {} assert datasource_from_yaml.name == datasource_name assert isinstance(datasource_from_yaml, SimpleSqlalchemyDatasource) assert datasource_from_yaml.__class__.__name__ == "SimpleSqlalchemyDatasource" assert datasource_name in [d["name"] for d in context.list_datasources()] assert datasource_name in context.datasources assert datasource_name in context.get_config()["datasources"] assert isinstance( context.get_datasource(datasource_name=datasource_name), SimpleSqlalchemyDatasource, ) assert isinstance( context.get_config()["datasources"][datasource_name], DatasourceConfig ) # As of 20210312 SimpleSqlalchemyDatasource returns an empty {} .config # so here we check for each part of the config individually datasource_config = context.get_config()["datasources"][datasource_name] assert datasource_config.class_name == "SimpleSqlalchemyDatasource" assert datasource_config.credentials == { "drivername": "postgresql", "host": "localhost", "port": "5432", "username": "postgres", "password": "", "database": "postgres", } assert datasource_config.credentials == OrderedDict( [ ("drivername", "postgresql"), ("host", "localhost"), ("port", "5432"), ("username", "postgres"), ("password", ""), ("database", "postgres"), ] ) assert datasource_config.introspection == OrderedDict( [("whole_table", OrderedDict([("data_asset_name_suffix", "__whole_table")]))] ) assert datasource_config.module_name == "great_expectations.datasource" # Check that the datasource was written to disk as expected root_directory = context.root_directory del context context = DataContext(root_directory) assert datasource_name in [d["name"] for d in context.list_datasources()] assert datasource_name in context.datasources assert datasource_name in context.get_config()["datasources"] assert isinstance( context.get_datasource(datasource_name=datasource_name), SimpleSqlalchemyDatasource, ) assert isinstance( context.get_config()["datasources"][datasource_name], DatasourceConfig ) # As of 20210312 SimpleSqlalchemyDatasource returns an empty {} .config # so here we check for each part of the config individually datasource_config = context.get_config()["datasources"][datasource_name] assert datasource_config.class_name == "SimpleSqlalchemyDatasource" assert datasource_config.credentials == { "drivername": "postgresql", "host": "localhost", "port": "5432", "username": "postgres", "password": "", "database": "postgres", } assert datasource_config.credentials == OrderedDict( [ ("drivername", "postgresql"), ("host", "localhost"), ("port", "5432"), ("username", "postgres"), ("password", ""), ("database", "postgres"), ] ) assert datasource_config.introspection == OrderedDict( [("whole_table", OrderedDict([("data_asset_name_suffix", "__whole_table")]))] ) assert datasource_config.module_name == "great_expectations.datasource" assert mock_emit.call_count == 3 expected_call_args_list.extend( [ mock.call( { "event": "data_context.__init__", "event_payload": {}, "success": True, } ), ] ) assert mock_emit.call_args_list == expected_call_args_list
[ "def", "test_add_datasource_from_yaml_sql_datasource", "(", "mock_emit", ",", "sa", ",", "test_backends", ",", "empty_data_context_stats_enabled", ",", ")", ":", "if", "\"postgresql\"", "not", "in", "test_backends", ":", "pytest", ".", "skip", "(", "\"test_add_datasource_from_yaml_sql_datasource requires postgresql\"", ")", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "datasources", ".", "keys", "(", ")", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "list_datasources", "(", ")", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "datasource_name", ":", "str", "=", "\"my_datasource\"", "example_yaml", "=", "f\"\"\"\n class_name: SimpleSqlalchemyDatasource\n introspection:\n whole_table:\n data_asset_name_suffix: __whole_table\n credentials:\n drivername: postgresql\n host: localhost\n port: '5432'\n username: postgres\n password: ''\n database: postgres\n \"\"\"", "datasource_from_test_yaml_config", "=", "context", ".", "test_yaml_config", "(", "example_yaml", ",", "name", "=", "datasource_name", ")", "assert", "mock_emit", ".", "call_count", "==", "1", "# Substitute anonymized name since it changes for each run", "anonymized_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_name\"", "]", "anonymized_data_connector_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_data_connectors\"", "]", "[", "0", "]", "[", "\"anonymized_name\"", "]", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.test_yaml_config\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_name\"", ":", "anonymized_name", ",", "\"parent_class\"", ":", "\"SimpleSqlalchemyDatasource\"", ",", "\"anonymized_execution_engine\"", ":", "{", "\"parent_class\"", ":", "\"SqlAlchemyExecutionEngine\"", "}", ",", "\"anonymized_data_connectors\"", ":", "[", "{", "\"anonymized_name\"", ":", "anonymized_data_connector_name", ",", "\"parent_class\"", ":", "\"InferredAssetSqlDataConnector\"", ",", "}", "]", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "datasource_from_yaml", "=", "context", ".", "add_datasource", "(", "name", "=", "datasource_name", ",", "*", "*", "yaml", ".", "load", "(", "example_yaml", ")", ")", "assert", "mock_emit", ".", "call_count", "==", "2", "expected_call_args_list", ".", "extend", "(", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.add_datasource\"", ",", "\"event_payload\"", ":", "{", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "# .config not implemented for SimpleSqlalchemyDatasource", "assert", "datasource_from_test_yaml_config", ".", "config", "==", "{", "}", "assert", "datasource_from_yaml", ".", "config", "==", "{", "}", "assert", "datasource_from_yaml", ".", "name", "==", "datasource_name", "assert", "isinstance", "(", "datasource_from_yaml", ",", "SimpleSqlalchemyDatasource", ")", "assert", "datasource_from_yaml", ".", "__class__", ".", "__name__", "==", "\"SimpleSqlalchemyDatasource\"", "assert", "datasource_name", "in", "[", "d", "[", "\"name\"", "]", "for", "d", "in", "context", ".", "list_datasources", "(", ")", "]", "assert", "datasource_name", "in", "context", ".", "datasources", "assert", "datasource_name", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "assert", "isinstance", "(", "context", ".", "get_datasource", "(", "datasource_name", "=", "datasource_name", ")", ",", "SimpleSqlalchemyDatasource", ",", ")", "assert", "isinstance", "(", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "[", "datasource_name", "]", ",", "DatasourceConfig", ")", "# As of 20210312 SimpleSqlalchemyDatasource returns an empty {} .config", "# so here we check for each part of the config individually", "datasource_config", "=", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "[", "datasource_name", "]", "assert", "datasource_config", ".", "class_name", "==", "\"SimpleSqlalchemyDatasource\"", "assert", "datasource_config", ".", "credentials", "==", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "\"localhost\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "\"\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", "assert", "datasource_config", ".", "credentials", "==", "OrderedDict", "(", "[", "(", "\"drivername\"", ",", "\"postgresql\"", ")", ",", "(", "\"host\"", ",", "\"localhost\"", ")", ",", "(", "\"port\"", ",", "\"5432\"", ")", ",", "(", "\"username\"", ",", "\"postgres\"", ")", ",", "(", "\"password\"", ",", "\"\"", ")", ",", "(", "\"database\"", ",", "\"postgres\"", ")", ",", "]", ")", "assert", "datasource_config", ".", "introspection", "==", "OrderedDict", "(", "[", "(", "\"whole_table\"", ",", "OrderedDict", "(", "[", "(", "\"data_asset_name_suffix\"", ",", "\"__whole_table\"", ")", "]", ")", ")", "]", ")", "assert", "datasource_config", ".", "module_name", "==", "\"great_expectations.datasource\"", "# Check that the datasource was written to disk as expected", "root_directory", "=", "context", ".", "root_directory", "del", "context", "context", "=", "DataContext", "(", "root_directory", ")", "assert", "datasource_name", "in", "[", "d", "[", "\"name\"", "]", "for", "d", "in", "context", ".", "list_datasources", "(", ")", "]", "assert", "datasource_name", "in", "context", ".", "datasources", "assert", "datasource_name", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "assert", "isinstance", "(", "context", ".", "get_datasource", "(", "datasource_name", "=", "datasource_name", ")", ",", "SimpleSqlalchemyDatasource", ",", ")", "assert", "isinstance", "(", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "[", "datasource_name", "]", ",", "DatasourceConfig", ")", "# As of 20210312 SimpleSqlalchemyDatasource returns an empty {} .config", "# so here we check for each part of the config individually", "datasource_config", "=", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "[", "datasource_name", "]", "assert", "datasource_config", ".", "class_name", "==", "\"SimpleSqlalchemyDatasource\"", "assert", "datasource_config", ".", "credentials", "==", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "\"localhost\"", ",", "\"port\"", ":", "\"5432\"", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "\"\"", ",", "\"database\"", ":", "\"postgres\"", ",", "}", "assert", "datasource_config", ".", "credentials", "==", "OrderedDict", "(", "[", "(", "\"drivername\"", ",", "\"postgresql\"", ")", ",", "(", "\"host\"", ",", "\"localhost\"", ")", ",", "(", "\"port\"", ",", "\"5432\"", ")", ",", "(", "\"username\"", ",", "\"postgres\"", ")", ",", "(", "\"password\"", ",", "\"\"", ")", ",", "(", "\"database\"", ",", "\"postgres\"", ")", ",", "]", ")", "assert", "datasource_config", ".", "introspection", "==", "OrderedDict", "(", "[", "(", "\"whole_table\"", ",", "OrderedDict", "(", "[", "(", "\"data_asset_name_suffix\"", ",", "\"__whole_table\"", ")", "]", ")", ")", "]", ")", "assert", "datasource_config", ".", "module_name", "==", "\"great_expectations.datasource\"", "assert", "mock_emit", ".", "call_count", "==", "3", "expected_call_args_list", ".", "extend", "(", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"event_payload\"", ":", "{", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list" ]
[ 2121, 0 ]
[ 2309, 62 ]
python
en
['en', 'error', 'th']
False
test_add_datasource_from_yaml_sql_datasource_with_credentials
( mock_emit, sa, test_backends, empty_data_context_stats_enabled )
What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables. In addition, this tests whether the same can be accomplished using credentials with a Datasource and SqlAlchemyExecutionEngine, rather than a SimpleSqlalchemyDatasource
What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables. In addition, this tests whether the same can be accomplished using credentials with a Datasource and SqlAlchemyExecutionEngine, rather than a SimpleSqlalchemyDatasource
def test_add_datasource_from_yaml_sql_datasource_with_credentials( mock_emit, sa, test_backends, empty_data_context_stats_enabled ): """ What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string without substitution variables. In addition, this tests whether the same can be accomplished using credentials with a Datasource and SqlAlchemyExecutionEngine, rather than a SimpleSqlalchemyDatasource """ if "postgresql" not in test_backends: pytest.skip( "test_add_datasource_from_yaml_sql_datasource_with_credentials requires postgresql" ) context: DataContext = empty_data_context_stats_enabled assert "my_new_datasource" not in context.datasources.keys() assert "my_new_datasource" not in context.list_datasources() assert "my_new_datasource" not in context.get_config()["datasources"] datasource_name: str = "my_datasource" example_yaml = f""" class_name: Datasource execution_engine: class_name: SqlAlchemyExecutionEngine credentials: host: localhost port: 5432 username: postgres password: database: test_ci drivername: postgresql data_connectors: default_inferred_data_connector_name: class_name: InferredAssetSqlDataConnector name: whole_table default_runtime_data_connector_name: class_name: RuntimeDataConnector batch_identifiers: - default_identifier_name """ datasource_from_test_yaml_config = context.test_yaml_config( example_yaml, name=datasource_name ) assert mock_emit.call_count == 1 # Substitute anonymized name since it changes for each run anonymized_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_name" ] anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][ "event_payload" ]["anonymized_execution_engine"]["anonymized_name"] anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_data_connectors" ][0]["anonymized_name"] anonymized_data_connector_name_1 = mock_emit.call_args_list[0][0][0][ "event_payload" ]["anonymized_data_connectors"][1]["anonymized_name"] expected_call_args_list = [ mock.call( { "event": "data_context.test_yaml_config", "event_payload": { "anonymized_name": anonymized_name, "parent_class": "Datasource", "anonymized_execution_engine": { "anonymized_name": anonymized_execution_engine_name, "parent_class": "SqlAlchemyExecutionEngine", }, "anonymized_data_connectors": [ { "anonymized_name": anonymized_data_connector_name, "parent_class": "InferredAssetSqlDataConnector", }, { "anonymized_name": anonymized_data_connector_name_1, "parent_class": "RuntimeDataConnector", }, ], }, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list datasource_from_yaml = context.add_datasource( name=datasource_name, **yaml.load(example_yaml) ) assert mock_emit.call_count == 2 expected_call_args_list.extend( [ mock.call( { "event": "data_context.add_datasource", "event_payload": {}, "success": True, } ), ] ) assert mock_emit.call_args_list == expected_call_args_list assert datasource_from_test_yaml_config.config == { "execution_engine": { "class_name": "SqlAlchemyExecutionEngine", "credentials": { "host": "localhost", "port": 5432, "username": "postgres", "password": None, "database": "test_ci", "drivername": "postgresql", }, "module_name": "great_expectations.execution_engine", }, "data_connectors": { "default_inferred_data_connector_name": { "class_name": "InferredAssetSqlDataConnector", "module_name": "great_expectations.datasource.data_connector", }, "default_runtime_data_connector_name": { "class_name": "RuntimeDataConnector", "batch_identifiers": ["default_identifier_name"], "module_name": "great_expectations.datasource.data_connector", }, }, } assert datasource_from_yaml.config == { "execution_engine": { "class_name": "SqlAlchemyExecutionEngine", "credentials": { "host": "localhost", "port": 5432, "username": "postgres", "password": None, "database": "test_ci", "drivername": "postgresql", }, "module_name": "great_expectations.execution_engine", }, "data_connectors": { "default_inferred_data_connector_name": { "class_name": "InferredAssetSqlDataConnector", "module_name": "great_expectations.datasource.data_connector", }, "default_runtime_data_connector_name": { "class_name": "RuntimeDataConnector", "batch_identifiers": ["default_identifier_name"], "module_name": "great_expectations.datasource.data_connector", }, }, } assert datasource_from_yaml.name == datasource_name assert isinstance(datasource_from_yaml, Datasource) assert datasource_from_yaml.__class__.__name__ == "Datasource" assert datasource_name == context.list_datasources()[0]["name"] assert isinstance(context.datasources[datasource_name], Datasource) assert isinstance( context.get_datasource(datasource_name=datasource_name), Datasource, ) assert isinstance( context.get_config()["datasources"][datasource_name], DatasourceConfig ) # making sure the config is right datasource_config = context.get_config()["datasources"][datasource_name] assert datasource_config.class_name == "Datasource" assert datasource_config.execution_engine.credentials == { "host": "localhost", "port": 5432, "username": "postgres", "password": None, "database": "test_ci", "drivername": "postgresql", } assert datasource_config.execution_engine.credentials == OrderedDict( [ ("host", "localhost"), ("port", 5432), ("username", "postgres"), ("password", None), ("database", "test_ci"), ("drivername", "postgresql"), ] ) # No other usage stats calls detected assert mock_emit.call_count == 2
[ "def", "test_add_datasource_from_yaml_sql_datasource_with_credentials", "(", "mock_emit", ",", "sa", ",", "test_backends", ",", "empty_data_context_stats_enabled", ")", ":", "if", "\"postgresql\"", "not", "in", "test_backends", ":", "pytest", ".", "skip", "(", "\"test_add_datasource_from_yaml_sql_datasource_with_credentials requires postgresql\"", ")", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "datasources", ".", "keys", "(", ")", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "list_datasources", "(", ")", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "datasource_name", ":", "str", "=", "\"my_datasource\"", "example_yaml", "=", "f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n credentials:\n host: localhost\n port: 5432\n username: postgres\n password:\n database: test_ci\n drivername: postgresql\n data_connectors:\n default_inferred_data_connector_name:\n class_name: InferredAssetSqlDataConnector\n name: whole_table\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\"", "datasource_from_test_yaml_config", "=", "context", ".", "test_yaml_config", "(", "example_yaml", ",", "name", "=", "datasource_name", ")", "assert", "mock_emit", ".", "call_count", "==", "1", "# Substitute anonymized name since it changes for each run", "anonymized_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_name\"", "]", "anonymized_execution_engine_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_execution_engine\"", "]", "[", "\"anonymized_name\"", "]", "anonymized_data_connector_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_data_connectors\"", "]", "[", "0", "]", "[", "\"anonymized_name\"", "]", "anonymized_data_connector_name_1", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_data_connectors\"", "]", "[", "1", "]", "[", "\"anonymized_name\"", "]", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.test_yaml_config\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_name\"", ":", "anonymized_name", ",", "\"parent_class\"", ":", "\"Datasource\"", ",", "\"anonymized_execution_engine\"", ":", "{", "\"anonymized_name\"", ":", "anonymized_execution_engine_name", ",", "\"parent_class\"", ":", "\"SqlAlchemyExecutionEngine\"", ",", "}", ",", "\"anonymized_data_connectors\"", ":", "[", "{", "\"anonymized_name\"", ":", "anonymized_data_connector_name", ",", "\"parent_class\"", ":", "\"InferredAssetSqlDataConnector\"", ",", "}", ",", "{", "\"anonymized_name\"", ":", "anonymized_data_connector_name_1", ",", "\"parent_class\"", ":", "\"RuntimeDataConnector\"", ",", "}", ",", "]", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "datasource_from_yaml", "=", "context", ".", "add_datasource", "(", "name", "=", "datasource_name", ",", "*", "*", "yaml", ".", "load", "(", "example_yaml", ")", ")", "assert", "mock_emit", ".", "call_count", "==", "2", "expected_call_args_list", ".", "extend", "(", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.add_datasource\"", ",", "\"event_payload\"", ":", "{", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "assert", "datasource_from_test_yaml_config", ".", "config", "==", "{", "\"execution_engine\"", ":", "{", "\"class_name\"", ":", "\"SqlAlchemyExecutionEngine\"", ",", "\"credentials\"", ":", "{", "\"host\"", ":", "\"localhost\"", ",", "\"port\"", ":", "5432", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "None", ",", "\"database\"", ":", "\"test_ci\"", ",", "\"drivername\"", ":", "\"postgresql\"", ",", "}", ",", "\"module_name\"", ":", "\"great_expectations.execution_engine\"", ",", "}", ",", "\"data_connectors\"", ":", "{", "\"default_inferred_data_connector_name\"", ":", "{", "\"class_name\"", ":", "\"InferredAssetSqlDataConnector\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "}", ",", "\"default_runtime_data_connector_name\"", ":", "{", "\"class_name\"", ":", "\"RuntimeDataConnector\"", ",", "\"batch_identifiers\"", ":", "[", "\"default_identifier_name\"", "]", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "}", ",", "}", ",", "}", "assert", "datasource_from_yaml", ".", "config", "==", "{", "\"execution_engine\"", ":", "{", "\"class_name\"", ":", "\"SqlAlchemyExecutionEngine\"", ",", "\"credentials\"", ":", "{", "\"host\"", ":", "\"localhost\"", ",", "\"port\"", ":", "5432", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "None", ",", "\"database\"", ":", "\"test_ci\"", ",", "\"drivername\"", ":", "\"postgresql\"", ",", "}", ",", "\"module_name\"", ":", "\"great_expectations.execution_engine\"", ",", "}", ",", "\"data_connectors\"", ":", "{", "\"default_inferred_data_connector_name\"", ":", "{", "\"class_name\"", ":", "\"InferredAssetSqlDataConnector\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "}", ",", "\"default_runtime_data_connector_name\"", ":", "{", "\"class_name\"", ":", "\"RuntimeDataConnector\"", ",", "\"batch_identifiers\"", ":", "[", "\"default_identifier_name\"", "]", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "}", ",", "}", ",", "}", "assert", "datasource_from_yaml", ".", "name", "==", "datasource_name", "assert", "isinstance", "(", "datasource_from_yaml", ",", "Datasource", ")", "assert", "datasource_from_yaml", ".", "__class__", ".", "__name__", "==", "\"Datasource\"", "assert", "datasource_name", "==", "context", ".", "list_datasources", "(", ")", "[", "0", "]", "[", "\"name\"", "]", "assert", "isinstance", "(", "context", ".", "datasources", "[", "datasource_name", "]", ",", "Datasource", ")", "assert", "isinstance", "(", "context", ".", "get_datasource", "(", "datasource_name", "=", "datasource_name", ")", ",", "Datasource", ",", ")", "assert", "isinstance", "(", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "[", "datasource_name", "]", ",", "DatasourceConfig", ")", "# making sure the config is right", "datasource_config", "=", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "[", "datasource_name", "]", "assert", "datasource_config", ".", "class_name", "==", "\"Datasource\"", "assert", "datasource_config", ".", "execution_engine", ".", "credentials", "==", "{", "\"host\"", ":", "\"localhost\"", ",", "\"port\"", ":", "5432", ",", "\"username\"", ":", "\"postgres\"", ",", "\"password\"", ":", "None", ",", "\"database\"", ":", "\"test_ci\"", ",", "\"drivername\"", ":", "\"postgresql\"", ",", "}", "assert", "datasource_config", ".", "execution_engine", ".", "credentials", "==", "OrderedDict", "(", "[", "(", "\"host\"", ",", "\"localhost\"", ")", ",", "(", "\"port\"", ",", "5432", ")", ",", "(", "\"username\"", ",", "\"postgres\"", ")", ",", "(", "\"password\"", ",", "None", ")", ",", "(", "\"database\"", ",", "\"test_ci\"", ")", ",", "(", "\"drivername\"", ",", "\"postgresql\"", ")", ",", "]", ")", "# No other usage stats calls detected", "assert", "mock_emit", ".", "call_count", "==", "2" ]
[ 2315, 0 ]
[ 2508, 36 ]
python
en
['en', 'error', 'th']
False
test_add_datasource_from_yaml_with_substitution_variables
( mock_emit, empty_data_context_stats_enabled, monkeypatch )
What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string containing substitution variables should work as expected.
What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string containing substitution variables should work as expected.
def test_add_datasource_from_yaml_with_substitution_variables( mock_emit, empty_data_context_stats_enabled, monkeypatch ): """ What does this test and why? Adding a datasource using context.add_datasource() via a config from a parsed yaml string containing substitution variables should work as expected. """ context: DataContext = empty_data_context_stats_enabled assert "my_new_datasource" not in context.datasources.keys() assert "my_new_datasource" not in context.list_datasources() assert "my_new_datasource" not in context.get_config()["datasources"] datasource_name: str = "my_datasource" monkeypatch.setenv("SUBSTITUTED_BASE_DIRECTORY", "../data") example_yaml = f""" class_name: Datasource execution_engine: class_name: PandasExecutionEngine data_connectors: data_dir_example_data_connector: class_name: InferredAssetFilesystemDataConnector datasource_name: {datasource_name} base_directory: ${{SUBSTITUTED_BASE_DIRECTORY}} default_regex: group_names: data_asset_name pattern: (.*) """ datasource_from_test_yaml_config = context.test_yaml_config( example_yaml, name=datasource_name ) assert mock_emit.call_count == 1 # Substitute anonymized names since it changes for each run anonymized_datasource_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_name" ] anonymized_execution_engine_name = mock_emit.call_args_list[0][0][0][ "event_payload" ]["anonymized_execution_engine"]["anonymized_name"] anonymized_data_connector_name = mock_emit.call_args_list[0][0][0]["event_payload"][ "anonymized_data_connectors" ][0]["anonymized_name"] expected_call_args_list = [ mock.call( { "event": "data_context.test_yaml_config", "event_payload": { "anonymized_name": anonymized_datasource_name, "parent_class": "Datasource", "anonymized_execution_engine": { "anonymized_name": anonymized_execution_engine_name, "parent_class": "PandasExecutionEngine", }, "anonymized_data_connectors": [ { "anonymized_name": anonymized_data_connector_name, "parent_class": "InferredAssetFilesystemDataConnector", } ], }, "success": True, } ), ] assert mock_emit.call_args_list == expected_call_args_list datasource_from_yaml = context.add_datasource( name=datasource_name, **yaml.load(example_yaml) ) assert mock_emit.call_count == 2 expected_call_args_list.extend( [ mock.call( { "event": "data_context.add_datasource", "event_payload": {}, "success": True, } ), ] ) assert mock_emit.call_args_list == expected_call_args_list assert datasource_from_test_yaml_config.config == datasource_from_yaml.config assert datasource_from_yaml.name == datasource_name assert datasource_from_yaml.config == { "execution_engine": { "class_name": "PandasExecutionEngine", "module_name": "great_expectations.execution_engine", }, "data_connectors": { "data_dir_example_data_connector": { "class_name": "InferredAssetFilesystemDataConnector", "module_name": "great_expectations.datasource.data_connector", "default_regex": {"group_names": "data_asset_name", "pattern": "(.*)"}, "base_directory": "../data", } }, } assert isinstance(datasource_from_yaml, Datasource) assert datasource_from_yaml.__class__.__name__ == "Datasource" assert datasource_name in [d["name"] for d in context.list_datasources()] assert datasource_name in context.datasources assert datasource_name in context.get_config()["datasources"] # Check that the datasource was written to disk as expected root_directory = context.root_directory del context context = DataContext(root_directory) assert datasource_name in [d["name"] for d in context.list_datasources()] assert datasource_name in context.datasources assert datasource_name in context.get_config()["datasources"] assert mock_emit.call_count == 3 expected_call_args_list.extend( [ mock.call( { "event": "data_context.__init__", "event_payload": {}, "success": True, } ), ] ) assert mock_emit.call_args_list == expected_call_args_list
[ "def", "test_add_datasource_from_yaml_with_substitution_variables", "(", "mock_emit", ",", "empty_data_context_stats_enabled", ",", "monkeypatch", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "datasources", ".", "keys", "(", ")", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "list_datasources", "(", ")", "assert", "\"my_new_datasource\"", "not", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "datasource_name", ":", "str", "=", "\"my_datasource\"", "monkeypatch", ".", "setenv", "(", "\"SUBSTITUTED_BASE_DIRECTORY\"", ",", "\"../data\"", ")", "example_yaml", "=", "f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: PandasExecutionEngine\n data_connectors:\n data_dir_example_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n datasource_name: {datasource_name}\n base_directory: ${{SUBSTITUTED_BASE_DIRECTORY}}\n default_regex:\n group_names: data_asset_name\n pattern: (.*)\n \"\"\"", "datasource_from_test_yaml_config", "=", "context", ".", "test_yaml_config", "(", "example_yaml", ",", "name", "=", "datasource_name", ")", "assert", "mock_emit", ".", "call_count", "==", "1", "# Substitute anonymized names since it changes for each run", "anonymized_datasource_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_name\"", "]", "anonymized_execution_engine_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_execution_engine\"", "]", "[", "\"anonymized_name\"", "]", "anonymized_data_connector_name", "=", "mock_emit", ".", "call_args_list", "[", "0", "]", "[", "0", "]", "[", "0", "]", "[", "\"event_payload\"", "]", "[", "\"anonymized_data_connectors\"", "]", "[", "0", "]", "[", "\"anonymized_name\"", "]", "expected_call_args_list", "=", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.test_yaml_config\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_name\"", ":", "anonymized_datasource_name", ",", "\"parent_class\"", ":", "\"Datasource\"", ",", "\"anonymized_execution_engine\"", ":", "{", "\"anonymized_name\"", ":", "anonymized_execution_engine_name", ",", "\"parent_class\"", ":", "\"PandasExecutionEngine\"", ",", "}", ",", "\"anonymized_data_connectors\"", ":", "[", "{", "\"anonymized_name\"", ":", "anonymized_data_connector_name", ",", "\"parent_class\"", ":", "\"InferredAssetFilesystemDataConnector\"", ",", "}", "]", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "datasource_from_yaml", "=", "context", ".", "add_datasource", "(", "name", "=", "datasource_name", ",", "*", "*", "yaml", ".", "load", "(", "example_yaml", ")", ")", "assert", "mock_emit", ".", "call_count", "==", "2", "expected_call_args_list", ".", "extend", "(", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.add_datasource\"", ",", "\"event_payload\"", ":", "{", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list", "assert", "datasource_from_test_yaml_config", ".", "config", "==", "datasource_from_yaml", ".", "config", "assert", "datasource_from_yaml", ".", "name", "==", "datasource_name", "assert", "datasource_from_yaml", ".", "config", "==", "{", "\"execution_engine\"", ":", "{", "\"class_name\"", ":", "\"PandasExecutionEngine\"", ",", "\"module_name\"", ":", "\"great_expectations.execution_engine\"", ",", "}", ",", "\"data_connectors\"", ":", "{", "\"data_dir_example_data_connector\"", ":", "{", "\"class_name\"", ":", "\"InferredAssetFilesystemDataConnector\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"default_regex\"", ":", "{", "\"group_names\"", ":", "\"data_asset_name\"", ",", "\"pattern\"", ":", "\"(.*)\"", "}", ",", "\"base_directory\"", ":", "\"../data\"", ",", "}", "}", ",", "}", "assert", "isinstance", "(", "datasource_from_yaml", ",", "Datasource", ")", "assert", "datasource_from_yaml", ".", "__class__", ".", "__name__", "==", "\"Datasource\"", "assert", "datasource_name", "in", "[", "d", "[", "\"name\"", "]", "for", "d", "in", "context", ".", "list_datasources", "(", ")", "]", "assert", "datasource_name", "in", "context", ".", "datasources", "assert", "datasource_name", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "# Check that the datasource was written to disk as expected", "root_directory", "=", "context", ".", "root_directory", "del", "context", "context", "=", "DataContext", "(", "root_directory", ")", "assert", "datasource_name", "in", "[", "d", "[", "\"name\"", "]", "for", "d", "in", "context", ".", "list_datasources", "(", ")", "]", "assert", "datasource_name", "in", "context", ".", "datasources", "assert", "datasource_name", "in", "context", ".", "get_config", "(", ")", "[", "\"datasources\"", "]", "assert", "mock_emit", ".", "call_count", "==", "3", "expected_call_args_list", ".", "extend", "(", "[", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"event_payload\"", ":", "{", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", ")", "assert", "mock_emit", ".", "call_args_list", "==", "expected_call_args_list" ]
[ 2514, 0 ]
[ 2644, 62 ]
python
en
['en', 'error', 'th']
False
BaseCase.open
(self, url)
Navigates the current browser window to the specified page.
Navigates the current browser window to the specified page.
def open(self, url): """ Navigates the current browser window to the specified page. """ self.__last_page_load_url = None if url.startswith("://"): # Convert URLs such as "://google.com" into "https://google.com" url = "https" + url self.driver.get(url) if settings.WAIT_FOR_RSC_ON_PAGE_LOADS: self.wait_for_ready_state_complete() self.__demo_mode_pause_if_active()
[ "def", "open", "(", "self", ",", "url", ")", ":", "self", ".", "__last_page_load_url", "=", "None", "if", "url", ".", "startswith", "(", "\"://\"", ")", ":", "# Convert URLs such as \"://google.com\" into \"https://google.com\"", "url", "=", "\"https\"", "+", "url", "self", ".", "driver", ".", "get", "(", "url", ")", "if", "settings", ".", "WAIT_FOR_RSC_ON_PAGE_LOADS", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "self", ".", "__demo_mode_pause_if_active", "(", ")" ]
[ 91, 4 ]
[ 100, 42 ]
python
en
['en', 'en', 'en']
True
BaseCase.get
(self, url)
If url looks like a page URL, opens the URL in the web browser. Otherwise, returns self.get_element(URL_AS_A_SELECTOR) Examples: self.get("https://seleniumbase.io") # Navigates to the URL self.get("input.class") # Finds and returns the WebElement
If url looks like a page URL, opens the URL in the web browser. Otherwise, returns self.get_element(URL_AS_A_SELECTOR) Examples: self.get("https://seleniumbase.io") # Navigates to the URL self.get("input.class") # Finds and returns the WebElement
def get(self, url): """ If url looks like a page URL, opens the URL in the web browser. Otherwise, returns self.get_element(URL_AS_A_SELECTOR) Examples: self.get("https://seleniumbase.io") # Navigates to the URL self.get("input.class") # Finds and returns the WebElement """ if self.__looks_like_a_page_url(url): self.open(url) else: return self.get_element(url)
[ "def", "get", "(", "self", ",", "url", ")", ":", "if", "self", ".", "__looks_like_a_page_url", "(", "url", ")", ":", "self", ".", "open", "(", "url", ")", "else", ":", "return", "self", ".", "get_element", "(", "url", ")" ]
[ 102, 4 ]
[ 112, 40 ]
python
en
['en', 'en', 'en']
True
BaseCase.slow_click
(self, selector, by=By.CSS_SELECTOR, timeout=None)
Similar to click(), but pauses for a brief moment before clicking. When used in combination with setting the user-agent, you can often bypass bot-detection by tricking websites into thinking that you're not a bot. (Useful on websites that block web automation tools.) To set the user-agent, use: ``--agent=AGENT``. Here's an example message from GitHub's bot-blocker: ``You have triggered an abuse detection mechanism...``
Similar to click(), but pauses for a brief moment before clicking. When used in combination with setting the user-agent, you can often bypass bot-detection by tricking websites into thinking that you're not a bot. (Useful on websites that block web automation tools.) To set the user-agent, use: ``--agent=AGENT``. Here's an example message from GitHub's bot-blocker: ``You have triggered an abuse detection mechanism...``
def slow_click(self, selector, by=By.CSS_SELECTOR, timeout=None): """ Similar to click(), but pauses for a brief moment before clicking. When used in combination with setting the user-agent, you can often bypass bot-detection by tricking websites into thinking that you're not a bot. (Useful on websites that block web automation tools.) To set the user-agent, use: ``--agent=AGENT``. Here's an example message from GitHub's bot-blocker: ``You have triggered an abuse detection mechanism...`` """ if not timeout: timeout = settings.SMALL_TIMEOUT if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) if not self.demo_mode: self.click(selector, by=by, timeout=timeout, delay=1.05) else: # Demo Mode already includes a small delay self.click(selector, by=by, timeout=timeout, delay=0.25)
[ "def", "slow_click", "(", "self", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "None", ")", ":", "if", "not", "timeout", ":", "timeout", "=", "settings", ".", "SMALL_TIMEOUT", "if", "self", ".", "timeout_multiplier", "and", "timeout", "==", "settings", ".", "SMALL_TIMEOUT", ":", "timeout", "=", "self", ".", "__get_new_timeout", "(", "timeout", ")", "if", "not", "self", ".", "demo_mode", ":", "self", ".", "click", "(", "selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ",", "delay", "=", "1.05", ")", "else", ":", "# Demo Mode already includes a small delay", "self", ".", "click", "(", "selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ",", "delay", "=", "0.25", ")" ]
[ 185, 4 ]
[ 201, 68 ]
python
en
['en', 'en', 'en']
True
BaseCase.click_chain
(self, selectors_list, by=By.CSS_SELECTOR, timeout=None, spacing=0)
This method clicks on a list of elements in succession. 'spacing' is the amount of time to wait between clicks. (sec)
This method clicks on a list of elements in succession. 'spacing' is the amount of time to wait between clicks. (sec)
def click_chain(self, selectors_list, by=By.CSS_SELECTOR, timeout=None, spacing=0): """ This method clicks on a list of elements in succession. 'spacing' is the amount of time to wait between clicks. (sec) """ if not timeout: timeout = settings.SMALL_TIMEOUT if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) for selector in selectors_list: self.click(selector, by=by, timeout=timeout) if spacing > 0: time.sleep(spacing)
[ "def", "click_chain", "(", "self", ",", "selectors_list", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "None", ",", "spacing", "=", "0", ")", ":", "if", "not", "timeout", ":", "timeout", "=", "settings", ".", "SMALL_TIMEOUT", "if", "self", ".", "timeout_multiplier", "and", "timeout", "==", "settings", ".", "SMALL_TIMEOUT", ":", "timeout", "=", "self", ".", "__get_new_timeout", "(", "timeout", ")", "for", "selector", "in", "selectors_list", ":", "self", ".", "click", "(", "selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ")", "if", "spacing", ">", "0", ":", "time", ".", "sleep", "(", "spacing", ")" ]
[ 236, 4 ]
[ 247, 35 ]
python
en
['en', 'en', 'en']
True
BaseCase.update_text
(self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False)
This method updates an element's text field with new text. Has multiple parts: * Waits for the element to be visible. * Waits for the element to be interactive. * Clears the text field. * Types in the new text. * Hits Enter/Submit (if the text ends in "\n"). @Params selector - the selector of the text field text - the new text to type into the text field by - the type of selector to search by (Default: CSS Selector) timeout - how long to wait for the selector to be visible retry - if True, use JS if the Selenium text update fails
This method updates an element's text field with new text. Has multiple parts: * Waits for the element to be visible. * Waits for the element to be interactive. * Clears the text field. * Types in the new text. * Hits Enter/Submit (if the text ends in "\n").
def update_text(self, selector, text, by=By.CSS_SELECTOR, timeout=None, retry=False): """ This method updates an element's text field with new text. Has multiple parts: * Waits for the element to be visible. * Waits for the element to be interactive. * Clears the text field. * Types in the new text. * Hits Enter/Submit (if the text ends in "\n"). @Params selector - the selector of the text field text - the new text to type into the text field by - the type of selector to search by (Default: CSS Selector) timeout - how long to wait for the selector to be visible retry - if True, use JS if the Selenium text update fails """ if not timeout: timeout = settings.LARGE_TIMEOUT if self.timeout_multiplier and timeout == settings.LARGE_TIMEOUT: timeout = self.__get_new_timeout(timeout) selector, by = self.__recalculate_selector(selector, by) element = self.wait_for_element_visible( selector, by=by, timeout=timeout) self.__demo_mode_highlight_if_active(selector, by) if not self.demo_mode: self.__scroll_to_element(element, selector, by) try: element.clear() except (StaleElementReferenceException, ENI_Exception): self.wait_for_ready_state_complete() time.sleep(0.06) element = self.wait_for_element_visible( selector, by=by, timeout=timeout) try: element.clear() except Exception: pass # Clearing the text field first isn't critical except Exception: pass # Clearing the text field first isn't critical self.__demo_mode_pause_if_active(tiny=True) pre_action_url = self.driver.current_url if type(text) is int or type(text) is float: text = str(text) try: if not text.endswith('\n'): element.send_keys(text) if settings.WAIT_FOR_RSC_ON_PAGE_LOADS: self.wait_for_ready_state_complete() else: element.send_keys(text[:-1]) element.send_keys(Keys.RETURN) if settings.WAIT_FOR_RSC_ON_PAGE_LOADS: self.wait_for_ready_state_complete() except (StaleElementReferenceException, ENI_Exception): self.wait_for_ready_state_complete() time.sleep(0.06) element = self.wait_for_element_visible( selector, by=by, timeout=timeout) element.clear() if not text.endswith('\n'): element.send_keys(text) else: element.send_keys(text[:-1]) element.send_keys(Keys.RETURN) if settings.WAIT_FOR_RSC_ON_PAGE_LOADS: self.wait_for_ready_state_complete() except Exception: exc_message = self.__get_improved_exception_message() raise Exception(exc_message) if (retry and element.get_attribute('value') != text and ( not text.endswith('\n'))): logging.debug('update_text() is falling back to JavaScript!') self.set_value(selector, text, by=by) if self.demo_mode: if self.driver.current_url != pre_action_url: self.__demo_mode_pause_if_active() else: self.__demo_mode_pause_if_active(tiny=True) elif self.slow_mode: self.__slow_mode_pause_if_active()
[ "def", "update_text", "(", "self", ",", "selector", ",", "text", ",", "by", "=", "By", ".", "CSS_SELECTOR", ",", "timeout", "=", "None", ",", "retry", "=", "False", ")", ":", "if", "not", "timeout", ":", "timeout", "=", "settings", ".", "LARGE_TIMEOUT", "if", "self", ".", "timeout_multiplier", "and", "timeout", "==", "settings", ".", "LARGE_TIMEOUT", ":", "timeout", "=", "self", ".", "__get_new_timeout", "(", "timeout", ")", "selector", ",", "by", "=", "self", ".", "__recalculate_selector", "(", "selector", ",", "by", ")", "element", "=", "self", ".", "wait_for_element_visible", "(", "selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ")", "self", ".", "__demo_mode_highlight_if_active", "(", "selector", ",", "by", ")", "if", "not", "self", ".", "demo_mode", ":", "self", ".", "__scroll_to_element", "(", "element", ",", "selector", ",", "by", ")", "try", ":", "element", ".", "clear", "(", ")", "except", "(", "StaleElementReferenceException", ",", "ENI_Exception", ")", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "time", ".", "sleep", "(", "0.06", ")", "element", "=", "self", ".", "wait_for_element_visible", "(", "selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ")", "try", ":", "element", ".", "clear", "(", ")", "except", "Exception", ":", "pass", "# Clearing the text field first isn't critical", "except", "Exception", ":", "pass", "# Clearing the text field first isn't critical", "self", ".", "__demo_mode_pause_if_active", "(", "tiny", "=", "True", ")", "pre_action_url", "=", "self", ".", "driver", ".", "current_url", "if", "type", "(", "text", ")", "is", "int", "or", "type", "(", "text", ")", "is", "float", ":", "text", "=", "str", "(", "text", ")", "try", ":", "if", "not", "text", ".", "endswith", "(", "'\\n'", ")", ":", "element", ".", "send_keys", "(", "text", ")", "if", "settings", ".", "WAIT_FOR_RSC_ON_PAGE_LOADS", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "else", ":", "element", ".", "send_keys", "(", "text", "[", ":", "-", "1", "]", ")", "element", ".", "send_keys", "(", "Keys", ".", "RETURN", ")", "if", "settings", ".", "WAIT_FOR_RSC_ON_PAGE_LOADS", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "except", "(", "StaleElementReferenceException", ",", "ENI_Exception", ")", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "time", ".", "sleep", "(", "0.06", ")", "element", "=", "self", ".", "wait_for_element_visible", "(", "selector", ",", "by", "=", "by", ",", "timeout", "=", "timeout", ")", "element", ".", "clear", "(", ")", "if", "not", "text", ".", "endswith", "(", "'\\n'", ")", ":", "element", ".", "send_keys", "(", "text", ")", "else", ":", "element", ".", "send_keys", "(", "text", "[", ":", "-", "1", "]", ")", "element", ".", "send_keys", "(", "Keys", ".", "RETURN", ")", "if", "settings", ".", "WAIT_FOR_RSC_ON_PAGE_LOADS", ":", "self", ".", "wait_for_ready_state_complete", "(", ")", "except", "Exception", ":", "exc_message", "=", "self", ".", "__get_improved_exception_message", "(", ")", "raise", "Exception", "(", "exc_message", ")", "if", "(", "retry", "and", "element", ".", "get_attribute", "(", "'value'", ")", "!=", "text", "and", "(", "not", "text", ".", "endswith", "(", "'\\n'", ")", ")", ")", ":", "logging", ".", "debug", "(", "'update_text() is falling back to JavaScript!'", ")", "self", ".", "set_value", "(", "selector", ",", "text", ",", "by", "=", "by", ")", "if", "self", ".", "demo_mode", ":", "if", "self", ".", "driver", ".", "current_url", "!=", "pre_action_url", ":", "self", ".", "__demo_mode_pause_if_active", "(", ")", "else", ":", "self", ".", "__demo_mode_pause_if_active", "(", "tiny", "=", "True", ")", "elif", "self", ".", "slow_mode", ":", "self", ".", "__slow_mode_pause_if_active", "(", ")" ]
[ 249, 4 ]
[ 328, 46 ]
python
en
['en', 'en', 'en']
True