Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
_instantiate_datasource_from_config
( self, name: str, config: dict )
Instantiate a new datasource to the data context, with configuration provided as kwargs. Args: name(str): name of datasource config(dict): dictionary of configuration Returns: datasource (Datasource)
Instantiate a new datasource to the data context, with configuration provided as kwargs. Args: name(str): name of datasource config(dict): dictionary of configuration
def _instantiate_datasource_from_config( self, name: str, config: dict ) -> Union[LegacyDatasource, BaseDatasource]: """Instantiate a new datasource to the data context, with configuration provided as kwargs. Args: name(str): name of datasource config(dict): dictionary of configuration Returns: datasource (Datasource) """ # We perform variable substitution in the datasource's config here before using the config # to instantiate the datasource object. Variable substitution is a service that the data # context provides. Datasources should not see unsubstituted variables in their config. try: datasource: Union[ LegacyDatasource, BaseDatasource ] = self._build_datasource_from_config(name=name, config=config) except Exception as e: raise ge_exceptions.DatasourceInitializationError( datasource_name=name, message=str(e) ) return datasource
[ "def", "_instantiate_datasource_from_config", "(", "self", ",", "name", ":", "str", ",", "config", ":", "dict", ")", "->", "Union", "[", "LegacyDatasource", ",", "BaseDatasource", "]", ":", "# We perform variable substitution in the datasource's config here before using the config", "# to instantiate the datasource object. Variable substitution is a service that the data", "# context provides. Datasources should not see unsubstituted variables in their config.", "try", ":", "datasource", ":", "Union", "[", "LegacyDatasource", ",", "BaseDatasource", "]", "=", "self", ".", "_build_datasource_from_config", "(", "name", "=", "name", ",", "config", "=", "config", ")", "except", "Exception", "as", "e", ":", "raise", "ge_exceptions", ".", "DatasourceInitializationError", "(", "datasource_name", "=", "name", ",", "message", "=", "str", "(", "e", ")", ")", "return", "datasource" ]
[ 1863, 4 ]
[ 1886, 25 ]
python
en
['en', 'en', 'en']
True
add_batch_kwargs_generator
( self, datasource_name, batch_kwargs_generator_name, class_name, **kwargs )
Add a batch kwargs generator to the named datasource, using the provided configuration. Args: datasource_name: name of datasource to which to add the new batch kwargs generator batch_kwargs_generator_name: name of the generator to add class_name: class of the batch kwargs generator to add **kwargs: batch kwargs generator configuration, provided as kwargs Returns:
Add a batch kwargs generator to the named datasource, using the provided configuration.
def add_batch_kwargs_generator( self, datasource_name, batch_kwargs_generator_name, class_name, **kwargs ): """ Add a batch kwargs generator to the named datasource, using the provided configuration. Args: datasource_name: name of datasource to which to add the new batch kwargs generator batch_kwargs_generator_name: name of the generator to add class_name: class of the batch kwargs generator to add **kwargs: batch kwargs generator configuration, provided as kwargs Returns: """ datasource_obj = self.get_datasource(datasource_name) generator = datasource_obj.add_batch_kwargs_generator( name=batch_kwargs_generator_name, class_name=class_name, **kwargs ) return generator
[ "def", "add_batch_kwargs_generator", "(", "self", ",", "datasource_name", ",", "batch_kwargs_generator_name", ",", "class_name", ",", "*", "*", "kwargs", ")", ":", "datasource_obj", "=", "self", ".", "get_datasource", "(", "datasource_name", ")", "generator", "=", "datasource_obj", ".", "add_batch_kwargs_generator", "(", "name", "=", "batch_kwargs_generator_name", ",", "class_name", "=", "class_name", ",", "*", "*", "kwargs", ")", "return", "generator" ]
[ 1888, 4 ]
[ 1908, 24 ]
python
en
['en', 'error', 'th']
False
get_datasource
( self, datasource_name: str = "default" )
Get the named datasource Args: datasource_name (str): the name of the datasource from the configuration Returns: datasource (Datasource)
Get the named datasource
def get_datasource( self, datasource_name: str = "default" ) -> Optional[Union[LegacyDatasource, BaseDatasource]]: """Get the named datasource Args: datasource_name (str): the name of the datasource from the configuration Returns: datasource (Datasource) """ if datasource_name in self._cached_datasources: return self._cached_datasources[datasource_name] if ( datasource_name in self.project_config_with_variables_substituted.datasources ): datasource_config: DatasourceConfig = copy.deepcopy( self.project_config_with_variables_substituted.datasources[ datasource_name ] ) else: raise ValueError( f"Unable to load datasource `{datasource_name}` -- no configuration found or invalid configuration." ) config: dict = dict(datasourceConfigSchema.dump(datasource_config)) datasource: Optional[ Union[LegacyDatasource, BaseDatasource] ] = self._instantiate_datasource_from_config( name=datasource_name, config=config ) self._cached_datasources[datasource_name] = datasource return datasource
[ "def", "get_datasource", "(", "self", ",", "datasource_name", ":", "str", "=", "\"default\"", ")", "->", "Optional", "[", "Union", "[", "LegacyDatasource", ",", "BaseDatasource", "]", "]", ":", "if", "datasource_name", "in", "self", ".", "_cached_datasources", ":", "return", "self", ".", "_cached_datasources", "[", "datasource_name", "]", "if", "(", "datasource_name", "in", "self", ".", "project_config_with_variables_substituted", ".", "datasources", ")", ":", "datasource_config", ":", "DatasourceConfig", "=", "copy", ".", "deepcopy", "(", "self", ".", "project_config_with_variables_substituted", ".", "datasources", "[", "datasource_name", "]", ")", "else", ":", "raise", "ValueError", "(", "f\"Unable to load datasource `{datasource_name}` -- no configuration found or invalid configuration.\"", ")", "config", ":", "dict", "=", "dict", "(", "datasourceConfigSchema", ".", "dump", "(", "datasource_config", ")", ")", "datasource", ":", "Optional", "[", "Union", "[", "LegacyDatasource", ",", "BaseDatasource", "]", "]", "=", "self", ".", "_instantiate_datasource_from_config", "(", "name", "=", "datasource_name", ",", "config", "=", "config", ")", "self", ".", "_cached_datasources", "[", "datasource_name", "]", "=", "datasource", "return", "datasource" ]
[ 1960, 4 ]
[ 1993, 25 ]
python
en
['en', 'en', 'en']
True
list_expectation_suites
(self)
Return a list of available expectation suite names.
Return a list of available expectation suite names.
def list_expectation_suites(self): """Return a list of available expectation suite names.""" try: keys = self.expectations_store.list_keys() except KeyError as e: raise ge_exceptions.InvalidConfigError( "Unable to find configured store: %s" % str(e) ) return keys
[ "def", "list_expectation_suites", "(", "self", ")", ":", "try", ":", "keys", "=", "self", ".", "expectations_store", ".", "list_keys", "(", ")", "except", "KeyError", "as", "e", ":", "raise", "ge_exceptions", ".", "InvalidConfigError", "(", "\"Unable to find configured store: %s\"", "%", "str", "(", "e", ")", ")", "return", "keys" ]
[ 1995, 4 ]
[ 2003, 19 ]
python
en
['en', 'en', 'en']
True
list_datasources
(self)
List currently-configured datasources on this context. Masks passwords. Returns: List(dict): each dictionary includes "name", "class_name", and "module_name" keys
List currently-configured datasources on this context. Masks passwords.
def list_datasources(self): """List currently-configured datasources on this context. Masks passwords. Returns: List(dict): each dictionary includes "name", "class_name", and "module_name" keys """ datasources = [] for ( key, value, ) in self.project_config_with_variables_substituted.datasources.items(): value["name"] = key if "credentials" in value: if "password" in value["credentials"]: value["credentials"][ "password" ] = PasswordMasker.MASKED_PASSWORD_STRING if "url" in value["credentials"]: value["credentials"]["url"] = PasswordMasker.mask_db_url( value["credentials"]["url"] ) datasources.append(value) return datasources
[ "def", "list_datasources", "(", "self", ")", ":", "datasources", "=", "[", "]", "for", "(", "key", ",", "value", ",", ")", "in", "self", ".", "project_config_with_variables_substituted", ".", "datasources", ".", "items", "(", ")", ":", "value", "[", "\"name\"", "]", "=", "key", "if", "\"credentials\"", "in", "value", ":", "if", "\"password\"", "in", "value", "[", "\"credentials\"", "]", ":", "value", "[", "\"credentials\"", "]", "[", "\"password\"", "]", "=", "PasswordMasker", ".", "MASKED_PASSWORD_STRING", "if", "\"url\"", "in", "value", "[", "\"credentials\"", "]", ":", "value", "[", "\"credentials\"", "]", "[", "\"url\"", "]", "=", "PasswordMasker", ".", "mask_db_url", "(", "value", "[", "\"credentials\"", "]", "[", "\"url\"", "]", ")", "datasources", ".", "append", "(", "value", ")", "return", "datasources" ]
[ 2005, 4 ]
[ 2029, 26 ]
python
en
['en', 'en', 'en']
True
list_stores
(self)
List currently-configured Stores on this context
List currently-configured Stores on this context
def list_stores(self): """List currently-configured Stores on this context""" stores = [] for ( name, value, ) in self.project_config_with_variables_substituted.stores.items(): value["name"] = name stores.append(value) return stores
[ "def", "list_stores", "(", "self", ")", ":", "stores", "=", "[", "]", "for", "(", "name", ",", "value", ",", ")", "in", "self", ".", "project_config_with_variables_substituted", ".", "stores", ".", "items", "(", ")", ":", "value", "[", "\"name\"", "]", "=", "name", "stores", ".", "append", "(", "value", ")", "return", "stores" ]
[ 2031, 4 ]
[ 2041, 21 ]
python
en
['en', 'en', 'en']
True
list_active_stores
(self)
List active Stores on this context. Active stores are identified by setting the following parameters: expectations_store_name, validations_store_name, evaluation_parameter_store_name, checkpoint_store_name
List active Stores on this context. Active stores are identified by setting the following parameters: expectations_store_name, validations_store_name, evaluation_parameter_store_name, checkpoint_store_name
def list_active_stores(self): """ List active Stores on this context. Active stores are identified by setting the following parameters: expectations_store_name, validations_store_name, evaluation_parameter_store_name, checkpoint_store_name """ active_store_names: List[str] = [ self.expectations_store_name, self.validations_store_name, self.evaluation_parameter_store_name, ] try: active_store_names.append(self.checkpoint_store_name) except (AttributeError, ge_exceptions.InvalidTopLevelConfigKeyError): pass return [ store for store in self.list_stores() if store["name"] in active_store_names ]
[ "def", "list_active_stores", "(", "self", ")", ":", "active_store_names", ":", "List", "[", "str", "]", "=", "[", "self", ".", "expectations_store_name", ",", "self", ".", "validations_store_name", ",", "self", ".", "evaluation_parameter_store_name", ",", "]", "try", ":", "active_store_names", ".", "append", "(", "self", ".", "checkpoint_store_name", ")", "except", "(", "AttributeError", ",", "ge_exceptions", ".", "InvalidTopLevelConfigKeyError", ")", ":", "pass", "return", "[", "store", "for", "store", "in", "self", ".", "list_stores", "(", ")", "if", "store", "[", "\"name\"", "]", "in", "active_store_names", "]" ]
[ 2043, 4 ]
[ 2063, 9 ]
python
en
['en', 'error', 'th']
False
list_validation_operators
(self)
List currently-configured Validation Operators on this context
List currently-configured Validation Operators on this context
def list_validation_operators(self): """List currently-configured Validation Operators on this context""" validation_operators = [] for ( name, value, ) in ( self.project_config_with_variables_substituted.validation_operators.items() ): value["name"] = name validation_operators.append(value) return validation_operators
[ "def", "list_validation_operators", "(", "self", ")", ":", "validation_operators", "=", "[", "]", "for", "(", "name", ",", "value", ",", ")", "in", "(", "self", ".", "project_config_with_variables_substituted", ".", "validation_operators", ".", "items", "(", ")", ")", ":", "value", "[", "\"name\"", "]", "=", "name", "validation_operators", ".", "append", "(", "value", ")", "return", "validation_operators" ]
[ 2065, 4 ]
[ 2077, 35 ]
python
en
['en', 'en', 'en']
True
create_expectation_suite
( self, expectation_suite_name: str, overwrite_existing: Optional[bool] = False )
Build a new expectation suite and save it into the data_context expectation store. Args: expectation_suite_name: The name of the expectation_suite to create overwrite_existing (boolean): Whether to overwrite expectation suite if expectation suite with given name already exists. Returns: A new (empty) expectation suite.
Build a new expectation suite and save it into the data_context expectation store.
def create_expectation_suite( self, expectation_suite_name: str, overwrite_existing: Optional[bool] = False ) -> ExpectationSuite: """Build a new expectation suite and save it into the data_context expectation store. Args: expectation_suite_name: The name of the expectation_suite to create overwrite_existing (boolean): Whether to overwrite expectation suite if expectation suite with given name already exists. Returns: A new (empty) expectation suite. """ if not isinstance(overwrite_existing, bool): raise ValueError("Parameter overwrite_existing must be of type BOOL") expectation_suite: ExpectationSuite = ExpectationSuite( expectation_suite_name=expectation_suite_name ) key: ExpectationSuiteIdentifier = ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name ) if self.expectations_store.has_key(key) and not overwrite_existing: raise ge_exceptions.DataContextError( "expectation_suite with name {} already exists. If you would like to overwrite this " "expectation_suite, set overwrite_existing=True.".format( expectation_suite_name ) ) else: self.expectations_store.set(key, expectation_suite) return expectation_suite
[ "def", "create_expectation_suite", "(", "self", ",", "expectation_suite_name", ":", "str", ",", "overwrite_existing", ":", "Optional", "[", "bool", "]", "=", "False", ")", "->", "ExpectationSuite", ":", "if", "not", "isinstance", "(", "overwrite_existing", ",", "bool", ")", ":", "raise", "ValueError", "(", "\"Parameter overwrite_existing must be of type BOOL\"", ")", "expectation_suite", ":", "ExpectationSuite", "=", "ExpectationSuite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "key", ":", "ExpectationSuiteIdentifier", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "if", "self", ".", "expectations_store", ".", "has_key", "(", "key", ")", "and", "not", "overwrite_existing", ":", "raise", "ge_exceptions", ".", "DataContextError", "(", "\"expectation_suite with name {} already exists. If you would like to overwrite this \"", "\"expectation_suite, set overwrite_existing=True.\"", ".", "format", "(", "expectation_suite_name", ")", ")", "else", ":", "self", ".", "expectations_store", ".", "set", "(", "key", ",", "expectation_suite", ")", "return", "expectation_suite" ]
[ 2079, 4 ]
[ 2112, 32 ]
python
en
['en', 'en', 'en']
True
delete_expectation_suite
(self, expectation_suite_name)
Delete specified expectation suite from data_context expectation store. Args: expectation_suite_name: The name of the expectation_suite to create Returns: True for Success and False for Failure.
Delete specified expectation suite from data_context expectation store.
def delete_expectation_suite(self, expectation_suite_name): """Delete specified expectation suite from data_context expectation store. Args: expectation_suite_name: The name of the expectation_suite to create Returns: True for Success and False for Failure. """ key = ExpectationSuiteIdentifier(expectation_suite_name) if not self.expectations_store.has_key(key): raise ge_exceptions.DataContextError( "expectation_suite with name {} does not exist." ) else: self.expectations_store.remove_key(key) return True
[ "def", "delete_expectation_suite", "(", "self", ",", "expectation_suite_name", ")", ":", "key", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", ")", "if", "not", "self", ".", "expectations_store", ".", "has_key", "(", "key", ")", ":", "raise", "ge_exceptions", ".", "DataContextError", "(", "\"expectation_suite with name {} does not exist.\"", ")", "else", ":", "self", ".", "expectations_store", ".", "remove_key", "(", "key", ")", "return", "True" ]
[ 2114, 4 ]
[ 2130, 23 ]
python
en
['en', 'en', 'en']
True
get_expectation_suite
(self, expectation_suite_name: str)
Get a named expectation suite for the provided data_asset_name. Args: expectation_suite_name (str): the name for the expectation suite Returns: expectation_suite
Get a named expectation suite for the provided data_asset_name.
def get_expectation_suite(self, expectation_suite_name: str) -> ExpectationSuite: """Get a named expectation suite for the provided data_asset_name. Args: expectation_suite_name (str): the name for the expectation suite Returns: expectation_suite """ key: ExpectationSuiteIdentifier = ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name ) if self.expectations_store.has_key(key): return self.expectations_store.get(key) else: raise ge_exceptions.DataContextError( "expectation_suite %s not found" % expectation_suite_name )
[ "def", "get_expectation_suite", "(", "self", ",", "expectation_suite_name", ":", "str", ")", "->", "ExpectationSuite", ":", "key", ":", "ExpectationSuiteIdentifier", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "if", "self", ".", "expectations_store", ".", "has_key", "(", "key", ")", ":", "return", "self", ".", "expectations_store", ".", "get", "(", "key", ")", "else", ":", "raise", "ge_exceptions", ".", "DataContextError", "(", "\"expectation_suite %s not found\"", "%", "expectation_suite_name", ")" ]
[ 2132, 4 ]
[ 2150, 13 ]
python
en
['en', 'en', 'en']
True
list_expectation_suite_names
(self)
Lists the available expectation suite names
Lists the available expectation suite names
def list_expectation_suite_names(self): """Lists the available expectation suite names""" sorted_expectation_suite_names = [ i.expectation_suite_name for i in self.list_expectation_suites() ] sorted_expectation_suite_names.sort() return sorted_expectation_suite_names
[ "def", "list_expectation_suite_names", "(", "self", ")", ":", "sorted_expectation_suite_names", "=", "[", "i", ".", "expectation_suite_name", "for", "i", "in", "self", ".", "list_expectation_suites", "(", ")", "]", "sorted_expectation_suite_names", ".", "sort", "(", ")", "return", "sorted_expectation_suite_names" ]
[ 2152, 4 ]
[ 2158, 45 ]
python
en
['en', 'en', 'en']
True
save_expectation_suite
(self, expectation_suite, expectation_suite_name=None)
Save the provided expectation suite into the DataContext. Args: expectation_suite: the suite to save expectation_suite_name: the name of this expectation suite. If no name is provided the name will \ be read from the suite Returns: None
Save the provided expectation suite into the DataContext.
def save_expectation_suite(self, expectation_suite, expectation_suite_name=None): """Save the provided expectation suite into the DataContext. Args: expectation_suite: the suite to save expectation_suite_name: the name of this expectation suite. If no name is provided the name will \ be read from the suite Returns: None """ if expectation_suite_name is None: key = ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite.expectation_suite_name ) else: expectation_suite.expectation_suite_name = expectation_suite_name key = ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name ) self.expectations_store.set(key, expectation_suite) self._evaluation_parameter_dependencies_compiled = False
[ "def", "save_expectation_suite", "(", "self", ",", "expectation_suite", ",", "expectation_suite_name", "=", "None", ")", ":", "if", "expectation_suite_name", "is", "None", ":", "key", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "expectation_suite", ".", "expectation_suite_name", ")", "else", ":", "expectation_suite", ".", "expectation_suite_name", "=", "expectation_suite_name", "key", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "self", ".", "expectations_store", ".", "set", "(", "key", ",", "expectation_suite", ")", "self", ".", "_evaluation_parameter_dependencies_compiled", "=", "False" ]
[ 2164, 4 ]
[ 2186, 64 ]
python
en
['en', 'en', 'en']
True
_store_metrics
(self, requested_metrics, validation_results, target_store_name)
requested_metrics is a dictionary like this: requested_metrics: *: # The asterisk here matches *any* expectation suite name # use the 'kwargs' key to request metrics that are defined by kwargs, # for example because they are defined only for a particular column # - column: # Age: # - expect_column_min_to_be_between.result.observed_value - statistics.evaluated_expectations - statistics.successful_expectations Args: requested_metrics: validation_results: target_store_name: Returns:
requested_metrics is a dictionary like this:
def _store_metrics(self, requested_metrics, validation_results, target_store_name): """ requested_metrics is a dictionary like this: requested_metrics: *: # The asterisk here matches *any* expectation suite name # use the 'kwargs' key to request metrics that are defined by kwargs, # for example because they are defined only for a particular column # - column: # Age: # - expect_column_min_to_be_between.result.observed_value - statistics.evaluated_expectations - statistics.successful_expectations Args: requested_metrics: validation_results: target_store_name: Returns: """ expectation_suite_name = validation_results.meta["expectation_suite_name"] run_id = validation_results.meta["run_id"] data_asset_name = validation_results.meta.get("batch_kwargs", {}).get( "data_asset_name" ) for expectation_suite_dependency, metrics_list in requested_metrics.items(): if (expectation_suite_dependency != "*") and ( expectation_suite_dependency != expectation_suite_name ): continue if not isinstance(metrics_list, list): raise ge_exceptions.DataContextError( "Invalid requested_metrics configuration: metrics requested for " "each expectation suite must be a list." ) for metric_configuration in metrics_list: metric_configurations = _get_metric_configuration_tuples( metric_configuration ) for metric_name, metric_kwargs in metric_configurations: try: metric_value = validation_results.get_metric( metric_name, **metric_kwargs ) self.stores[target_store_name].set( ValidationMetricIdentifier( run_id=run_id, data_asset_name=data_asset_name, expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name ), metric_name=metric_name, metric_kwargs_id=get_metric_kwargs_id( metric_name, metric_kwargs ), ), metric_value, ) except ge_exceptions.UnavailableMetricError: # This will happen frequently in larger pipelines logger.debug( "metric {} was requested by another expectation suite but is not available in " "this validation result.".format(metric_name) )
[ "def", "_store_metrics", "(", "self", ",", "requested_metrics", ",", "validation_results", ",", "target_store_name", ")", ":", "expectation_suite_name", "=", "validation_results", ".", "meta", "[", "\"expectation_suite_name\"", "]", "run_id", "=", "validation_results", ".", "meta", "[", "\"run_id\"", "]", "data_asset_name", "=", "validation_results", ".", "meta", ".", "get", "(", "\"batch_kwargs\"", ",", "{", "}", ")", ".", "get", "(", "\"data_asset_name\"", ")", "for", "expectation_suite_dependency", ",", "metrics_list", "in", "requested_metrics", ".", "items", "(", ")", ":", "if", "(", "expectation_suite_dependency", "!=", "\"*\"", ")", "and", "(", "expectation_suite_dependency", "!=", "expectation_suite_name", ")", ":", "continue", "if", "not", "isinstance", "(", "metrics_list", ",", "list", ")", ":", "raise", "ge_exceptions", ".", "DataContextError", "(", "\"Invalid requested_metrics configuration: metrics requested for \"", "\"each expectation suite must be a list.\"", ")", "for", "metric_configuration", "in", "metrics_list", ":", "metric_configurations", "=", "_get_metric_configuration_tuples", "(", "metric_configuration", ")", "for", "metric_name", ",", "metric_kwargs", "in", "metric_configurations", ":", "try", ":", "metric_value", "=", "validation_results", ".", "get_metric", "(", "metric_name", ",", "*", "*", "metric_kwargs", ")", "self", ".", "stores", "[", "target_store_name", "]", ".", "set", "(", "ValidationMetricIdentifier", "(", "run_id", "=", "run_id", ",", "data_asset_name", "=", "data_asset_name", ",", "expectation_suite_identifier", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", ")", ",", "metric_name", "=", "metric_name", ",", "metric_kwargs_id", "=", "get_metric_kwargs_id", "(", "metric_name", ",", "metric_kwargs", ")", ",", ")", ",", "metric_value", ",", ")", "except", "ge_exceptions", ".", "UnavailableMetricError", ":", "# This will happen frequently in larger pipelines", "logger", ".", "debug", "(", "\"metric {} was requested by another expectation suite but is not available in \"", "\"this validation result.\"", ".", "format", "(", "metric_name", ")", ")" ]
[ 2188, 4 ]
[ 2256, 25 ]
python
en
['en', 'error', 'th']
False
get_validation_result
( self, expectation_suite_name, run_id=None, batch_identifier=None, validations_store_name=None, failed_only=False, )
Get validation results from a configured store. Args: expectation_suite_name: expectation_suite name for which to get validation result (default: "default") run_id: run_id for which to get validation result (if None, fetch the latest result by alphanumeric sort) validations_store_name: the name of the store from which to get validation results failed_only: if True, filter the result to return only failed expectations Returns: validation_result
Get validation results from a configured store.
def get_validation_result( self, expectation_suite_name, run_id=None, batch_identifier=None, validations_store_name=None, failed_only=False, ): """Get validation results from a configured store. Args: expectation_suite_name: expectation_suite name for which to get validation result (default: "default") run_id: run_id for which to get validation result (if None, fetch the latest result by alphanumeric sort) validations_store_name: the name of the store from which to get validation results failed_only: if True, filter the result to return only failed expectations Returns: validation_result """ if validations_store_name is None: validations_store_name = self.validations_store_name selected_store = self.stores[validations_store_name] if run_id is None or batch_identifier is None: # Get most recent run id # NOTE : This method requires a (potentially very inefficient) list_keys call. # It should probably move to live in an appropriate Store class, # but when we do so, that Store will need to function as more than just a key-value Store. key_list = selected_store.list_keys() filtered_key_list = [] for key in key_list: if run_id is not None and key.run_id != run_id: continue if ( batch_identifier is not None and key.batch_identifier != batch_identifier ): continue filtered_key_list.append(key) # run_id_set = set([key.run_id for key in filtered_key_list]) if len(filtered_key_list) == 0: logger.warning("No valid run_id values found.") return {} filtered_key_list = sorted(filtered_key_list, key=lambda x: x.run_id) if run_id is None: run_id = filtered_key_list[-1].run_id if batch_identifier is None: batch_identifier = filtered_key_list[-1].batch_identifier key = ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name=expectation_suite_name ), run_id=run_id, batch_identifier=batch_identifier, ) results_dict = selected_store.get(key) # TODO: This should be a convenience method of ValidationResultSuite if failed_only: failed_results_list = [ result for result in results_dict.results if not result.success ] results_dict.results = failed_results_list return results_dict else: return results_dict
[ "def", "get_validation_result", "(", "self", ",", "expectation_suite_name", ",", "run_id", "=", "None", ",", "batch_identifier", "=", "None", ",", "validations_store_name", "=", "None", ",", "failed_only", "=", "False", ",", ")", ":", "if", "validations_store_name", "is", "None", ":", "validations_store_name", "=", "self", ".", "validations_store_name", "selected_store", "=", "self", ".", "stores", "[", "validations_store_name", "]", "if", "run_id", "is", "None", "or", "batch_identifier", "is", "None", ":", "# Get most recent run id", "# NOTE : This method requires a (potentially very inefficient) list_keys call.", "# It should probably move to live in an appropriate Store class,", "# but when we do so, that Store will need to function as more than just a key-value Store.", "key_list", "=", "selected_store", ".", "list_keys", "(", ")", "filtered_key_list", "=", "[", "]", "for", "key", "in", "key_list", ":", "if", "run_id", "is", "not", "None", "and", "key", ".", "run_id", "!=", "run_id", ":", "continue", "if", "(", "batch_identifier", "is", "not", "None", "and", "key", ".", "batch_identifier", "!=", "batch_identifier", ")", ":", "continue", "filtered_key_list", ".", "append", "(", "key", ")", "# run_id_set = set([key.run_id for key in filtered_key_list])", "if", "len", "(", "filtered_key_list", ")", "==", "0", ":", "logger", ".", "warning", "(", "\"No valid run_id values found.\"", ")", "return", "{", "}", "filtered_key_list", "=", "sorted", "(", "filtered_key_list", ",", "key", "=", "lambda", "x", ":", "x", ".", "run_id", ")", "if", "run_id", "is", "None", ":", "run_id", "=", "filtered_key_list", "[", "-", "1", "]", ".", "run_id", "if", "batch_identifier", "is", "None", ":", "batch_identifier", "=", "filtered_key_list", "[", "-", "1", "]", ".", "batch_identifier", "key", "=", "ValidationResultIdentifier", "(", "expectation_suite_identifier", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", ",", "run_id", "=", "run_id", ",", "batch_identifier", "=", "batch_identifier", ",", ")", "results_dict", "=", "selected_store", ".", "get", "(", "key", ")", "# TODO: This should be a convenience method of ValidationResultSuite", "if", "failed_only", ":", "failed_results_list", "=", "[", "result", "for", "result", "in", "results_dict", ".", "results", "if", "not", "result", ".", "success", "]", "results_dict", ".", "results", "=", "failed_results_list", "return", "results_dict", "else", ":", "return", "results_dict" ]
[ 2307, 4 ]
[ 2377, 31 ]
python
en
['en', 'en', 'en']
True
update_return_obj
(self, data_asset, return_obj)
Helper called by data_asset. Args: data_asset: The data_asset whose validation produced the current return object return_obj: the return object to update Returns: return_obj: the return object, potentially changed into a widget by the configured expectation explorer
Helper called by data_asset.
def update_return_obj(self, data_asset, return_obj): """Helper called by data_asset. Args: data_asset: The data_asset whose validation produced the current return object return_obj: the return object to update Returns: return_obj: the return object, potentially changed into a widget by the configured expectation explorer """ return return_obj
[ "def", "update_return_obj", "(", "self", ",", "data_asset", ",", "return_obj", ")", ":", "return", "return_obj" ]
[ 2379, 4 ]
[ 2389, 25 ]
python
en
['en', 'lb', 'en']
True
With_all_joints.__init__
(self, *args, **kwargs)
Usage: Inputs are all joints information, which can be used to generate three different learnable values: 1.Global modulation; 2.Group-wise modulation. 3.Channel-wise modulation. For each of the above format, it can be combined with group convolution by [Addition] and [Multiply].
Usage: Inputs are all joints information, which can be used to generate three different learnable values: 1.Global modulation; 2.Group-wise modulation. 3.Channel-wise modulation. For each of the above format, it can be combined with group convolution by [Addition] and [Multiply].
def __init__(self, *args, **kwargs): super(With_all_joints, self).__init__(*args, **kwargs) """ Usage: Inputs are all joints information, which can be used to generate three different learnable values: 1.Global modulation; 2.Group-wise modulation. 3.Channel-wise modulation. For each of the above format, it can be combined with group convolution by [Addition] and [Multiply]. """ if self.modulation: print('Use overall global-joint modulation for the all groups') self.m_conv = nn.Conv1d(self.in_channel, out_channels=self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride) nn.init.constant_(self.m_conv.weight, 0) self.m_conv.register_backward_hook(self._set_lr) if self.group_modulation: print('Use [group-wise] modulation for each group') group_mo = [] for index, i in enumerate(self.out_seq): in_ch = sum(map(lambda x: self.in_channel_group[x], i)) group_mo.append(nn.Conv1d(in_ch, self.kernel_size, kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation)) nn.init.constant_(group_mo[index].weight, 0) group_mo[index].register_backward_hook(self._set_lr) self.group_mo = nn.ModuleList(group_mo) if self.split_modulation: m_conv = [] print('Use Split [global-joint] modulation for each group') for i in range(len(self.out_seq)): if self.recombine == 'concat': if self.repeat_concat: print('Use [Repeated values for concat]') m_conv.append(nn.Conv1d(self.in_channel, out_channels=self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride)) else: print('Use [different values for concat]') m_conv.append(nn.Conv1d(self.in_channel, out_channels=self.cat_num[i]*self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride)) else: m_conv.append(nn.Conv1d(self.in_channel, out_channels=self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride)) nn.init.constant_(m_conv[i].weight, 0) m_conv[i].register_backward_hook(self._set_lr) self.m_conv = nn.ModuleList(m_conv) in_ch_sum = 0 for index, i in enumerate(self.out_seq): in_ch_sum += sum(map(lambda x:self.in_channel_group[x],i)) if self.channelwise: print('Use overall global-joint channelwise modulation for the all groups') self.m_conv = nn.Conv1d(self.in_channel, out_channels=in_ch_sum*self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride) nn.init.constant_(self.m_conv.weight, 0) self.m_conv.register_backward_hook(self._set_lr)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "With_all_joints", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "self", ".", "modulation", ":", "print", "(", "'Use overall global-joint modulation for the all groups'", ")", "self", ".", "m_conv", "=", "nn", ".", "Conv1d", "(", "self", ".", "in_channel", ",", "out_channels", "=", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", "nn", ".", "init", ".", "constant_", "(", "self", ".", "m_conv", ".", "weight", ",", "0", ")", "self", ".", "m_conv", ".", "register_backward_hook", "(", "self", ".", "_set_lr", ")", "if", "self", ".", "group_modulation", ":", "print", "(", "'Use [group-wise] modulation for each group'", ")", "group_mo", "=", "[", "]", "for", "index", ",", "i", "in", "enumerate", "(", "self", ".", "out_seq", ")", ":", "in_ch", "=", "sum", "(", "map", "(", "lambda", "x", ":", "self", ".", "in_channel_group", "[", "x", "]", ",", "i", ")", ")", "group_mo", ".", "append", "(", "nn", ".", "Conv1d", "(", "in_ch", ",", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "stride", "=", "self", ".", "stride", ",", "dilation", "=", "self", ".", "dilation", ")", ")", "nn", ".", "init", ".", "constant_", "(", "group_mo", "[", "index", "]", ".", "weight", ",", "0", ")", "group_mo", "[", "index", "]", ".", "register_backward_hook", "(", "self", ".", "_set_lr", ")", "self", ".", "group_mo", "=", "nn", ".", "ModuleList", "(", "group_mo", ")", "if", "self", ".", "split_modulation", ":", "m_conv", "=", "[", "]", "print", "(", "'Use Split [global-joint] modulation for each group'", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "out_seq", ")", ")", ":", "if", "self", ".", "recombine", "==", "'concat'", ":", "if", "self", ".", "repeat_concat", ":", "print", "(", "'Use [Repeated values for concat]'", ")", "m_conv", ".", "append", "(", "nn", ".", "Conv1d", "(", "self", ".", "in_channel", ",", "out_channels", "=", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", ")", "else", ":", "print", "(", "'Use [different values for concat]'", ")", "m_conv", ".", "append", "(", "nn", ".", "Conv1d", "(", "self", ".", "in_channel", ",", "out_channels", "=", "self", ".", "cat_num", "[", "i", "]", "*", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", ")", "else", ":", "m_conv", ".", "append", "(", "nn", ".", "Conv1d", "(", "self", ".", "in_channel", ",", "out_channels", "=", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", ")", "nn", ".", "init", ".", "constant_", "(", "m_conv", "[", "i", "]", ".", "weight", ",", "0", ")", "m_conv", "[", "i", "]", ".", "register_backward_hook", "(", "self", ".", "_set_lr", ")", "self", ".", "m_conv", "=", "nn", ".", "ModuleList", "(", "m_conv", ")", "in_ch_sum", "=", "0", "for", "index", ",", "i", "in", "enumerate", "(", "self", ".", "out_seq", ")", ":", "in_ch_sum", "+=", "sum", "(", "map", "(", "lambda", "x", ":", "self", ".", "in_channel_group", "[", "x", "]", ",", "i", ")", ")", "if", "self", ".", "channelwise", ":", "print", "(", "'Use overall global-joint channelwise modulation for the all groups'", ")", "self", ".", "m_conv", "=", "nn", ".", "Conv1d", "(", "self", ".", "in_channel", ",", "out_channels", "=", "in_ch_sum", "*", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", "nn", ".", "init", ".", "constant_", "(", "self", ".", "m_conv", ".", "weight", ",", "0", ")", "self", ".", "m_conv", ".", "register_backward_hook", "(", "self", ".", "_set_lr", ")" ]
[ 54, 4 ]
[ 101, 60 ]
python
en
['en', 'error', 'th']
False
With_other_joints.__init__
(self, inc, outc, out_seq, kernel_size, padding, dilation, stride, split_modulation, recombine, repeat_concat, in_c, mean_func, mean_dim, ups_mean)
Usage: Inputs from [Out of the group] Joints to get their information that can divided into two formats: 1.Learnable local modulation. 2. Manual function,e.g. Mean opertation For each of the above format, it can be combined with group convolution by [Addition], [Multiply] and [Concat]. :param args: from parents args :param kwargs: from parents kwargs :param in_channel: different group has different in channel number.[Sum up: Other joints channel number] :param mean_dim: The output number of the mean_function. e.g. 1, group number or satisfing the ratio for addition/multipy/concat
Usage: Inputs from [Out of the group] Joints to get their information that can divided into two formats: 1.Learnable local modulation. 2. Manual function,e.g. Mean opertation For each of the above format, it can be combined with group convolution by [Addition], [Multiply] and [Concat]. :param args: from parents args :param kwargs: from parents kwargs :param in_channel: different group has different in channel number.[Sum up: Other joints channel number] :param mean_dim: The output number of the mean_function. e.g. 1, group number or satisfing the ratio for addition/multipy/concat
def __init__(self, inc, outc, out_seq, kernel_size, padding, dilation, stride, split_modulation, recombine, repeat_concat, in_c, mean_func, mean_dim, ups_mean): """ Usage: Inputs from [Out of the group] Joints to get their information that can divided into two formats: 1.Learnable local modulation. 2. Manual function,e.g. Mean opertation For each of the above format, it can be combined with group convolution by [Addition], [Multiply] and [Concat]. :param args: from parents args :param kwargs: from parents kwargs :param in_channel: different group has different in channel number.[Sum up: Other joints channel number] :param mean_dim: The output number of the mean_function. e.g. 1, group number or satisfing the ratio for addition/multipy/concat """ super(With_other_joints, self).__init__(inc, outc, out_seq, kernel_size, padding, dilation, stride, split_modulation, recombine, repeat_concat) self.split_modulation = split_modulation self.mean_func = mean_func self.cat_num = mean_dim self.in_channel = in_c self.ups_mean =ups_mean # Experiment operators self.recombine = recombine self.repeat_concat = repeat_concat if self.recombine == 'multiply': layers_bn = [] for i in range(len(self.out_seq)): layers_bn.append(nn.BatchNorm1d(self.kernel_size, momentum=0.1)) self.layers_bn = nn.ModuleList(layers_bn) if self.split_modulation: m_conv = [] print('Use Split [Other-joint] modulation for each group') for i in range(len(self.out_seq)): if self.recombine == 'concat': if self.repeat_concat: print('Use [Repeated values for concat]') m_conv.append(nn.Conv1d(self.in_channel[i], out_channels=self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride)) else: print('Use [Different values for concat]') m_conv.append(nn.Conv1d(self.in_channel[i], out_channels=self.cat_num[i]*self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride)) else: m_conv.append(nn.Conv1d(self.in_channel[i], out_channels=self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride)) nn.init.constant_(m_conv[i].weight, 0) m_conv[i].register_backward_hook(self._set_lr) self.m_conv = nn.ModuleList(m_conv) if self.mean_func: m_conv = [] print('Use Split [Other-joint] manual mean values for each group') for i in range(len(self.out_seq)): if self.recombine == 'concat': if self.ups_mean: print('Use [upsampling mean value] for concat') m_conv.append(nn.Conv1d(1, out_channels=self.cat_num[i]*self.kernel_size, kernel_size=self.kernel_size, dilation=self.dilation, stride=self.stride)) nn.init.constant_(m_conv[i].weight, 0) m_conv[i].register_backward_hook(self._set_lr) self.m_conv = nn.ModuleList(m_conv)
[ "def", "__init__", "(", "self", ",", "inc", ",", "outc", ",", "out_seq", ",", "kernel_size", ",", "padding", ",", "dilation", ",", "stride", ",", "split_modulation", ",", "recombine", ",", "repeat_concat", ",", "in_c", ",", "mean_func", ",", "mean_dim", ",", "ups_mean", ")", ":", "super", "(", "With_other_joints", ",", "self", ")", ".", "__init__", "(", "inc", ",", "outc", ",", "out_seq", ",", "kernel_size", ",", "padding", ",", "dilation", ",", "stride", ",", "split_modulation", ",", "recombine", ",", "repeat_concat", ")", "self", ".", "split_modulation", "=", "split_modulation", "self", ".", "mean_func", "=", "mean_func", "self", ".", "cat_num", "=", "mean_dim", "self", ".", "in_channel", "=", "in_c", "self", ".", "ups_mean", "=", "ups_mean", "# Experiment operators", "self", ".", "recombine", "=", "recombine", "self", ".", "repeat_concat", "=", "repeat_concat", "if", "self", ".", "recombine", "==", "'multiply'", ":", "layers_bn", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "out_seq", ")", ")", ":", "layers_bn", ".", "append", "(", "nn", ".", "BatchNorm1d", "(", "self", ".", "kernel_size", ",", "momentum", "=", "0.1", ")", ")", "self", ".", "layers_bn", "=", "nn", ".", "ModuleList", "(", "layers_bn", ")", "if", "self", ".", "split_modulation", ":", "m_conv", "=", "[", "]", "print", "(", "'Use Split [Other-joint] modulation for each group'", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "out_seq", ")", ")", ":", "if", "self", ".", "recombine", "==", "'concat'", ":", "if", "self", ".", "repeat_concat", ":", "print", "(", "'Use [Repeated values for concat]'", ")", "m_conv", ".", "append", "(", "nn", ".", "Conv1d", "(", "self", ".", "in_channel", "[", "i", "]", ",", "out_channels", "=", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", ")", "else", ":", "print", "(", "'Use [Different values for concat]'", ")", "m_conv", ".", "append", "(", "nn", ".", "Conv1d", "(", "self", ".", "in_channel", "[", "i", "]", ",", "out_channels", "=", "self", ".", "cat_num", "[", "i", "]", "*", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", ")", "else", ":", "m_conv", ".", "append", "(", "nn", ".", "Conv1d", "(", "self", ".", "in_channel", "[", "i", "]", ",", "out_channels", "=", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", ")", "nn", ".", "init", ".", "constant_", "(", "m_conv", "[", "i", "]", ".", "weight", ",", "0", ")", "m_conv", "[", "i", "]", ".", "register_backward_hook", "(", "self", ".", "_set_lr", ")", "self", ".", "m_conv", "=", "nn", ".", "ModuleList", "(", "m_conv", ")", "if", "self", ".", "mean_func", ":", "m_conv", "=", "[", "]", "print", "(", "'Use Split [Other-joint] manual mean values for each group'", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "out_seq", ")", ")", ":", "if", "self", ".", "recombine", "==", "'concat'", ":", "if", "self", ".", "ups_mean", ":", "print", "(", "'Use [upsampling mean value] for concat'", ")", "m_conv", ".", "append", "(", "nn", ".", "Conv1d", "(", "1", ",", "out_channels", "=", "self", ".", "cat_num", "[", "i", "]", "*", "self", ".", "kernel_size", ",", "kernel_size", "=", "self", ".", "kernel_size", ",", "dilation", "=", "self", ".", "dilation", ",", "stride", "=", "self", ".", "stride", ")", ")", "nn", ".", "init", ".", "constant_", "(", "m_conv", "[", "i", "]", ".", "weight", ",", "0", ")", "m_conv", "[", "i", "]", ".", "register_backward_hook", "(", "self", ".", "_set_lr", ")", "self", ".", "m_conv", "=", "nn", ".", "ModuleList", "(", "m_conv", ")" ]
[ 182, 4 ]
[ 236, 47 ]
python
en
['en', 'error', 'th']
False
With_other_joints._mean_func
(self, x, cat_num, x_self)
Get mean value of each group from all other joints :param x: a list with [other joints] of the group :param cat_num: the repeat channel size :param x_self: a list with [itself joints] of the group :return: the processed mean value
Get mean value of each group from all other joints :param x: a list with [other joints] of the group :param cat_num: the repeat channel size :param x_self: a list with [itself joints] of the group :return: the processed mean value
def _mean_func(self, x, cat_num, x_self): """ Get mean value of each group from all other joints :param x: a list with [other joints] of the group :param cat_num: the repeat channel size :param x_self: a list with [itself joints] of the group :return: the processed mean value """ out_mean = [] for i, x_g in enumerate(x): m_mean = torch.mean(x_g, dim=1, keepdim=True) if self.ups_mean: # Upsample to get more variable values from mean value m1 = self.m_conv[i](m_mean) m1 = reshape_with_kernel(m1, self.kernel_size) elif self.repeat_concat: m1 = torch.cat([m_mean for _ in range(cat_num[i])], dim=1) elif self.recombine == 'add' or 'multiply': m1 = torch.cat([m_mean for _ in range(x_self[i].size(1))], dim=1) out_mean.append(m1) return out_mean
[ "def", "_mean_func", "(", "self", ",", "x", ",", "cat_num", ",", "x_self", ")", ":", "out_mean", "=", "[", "]", "for", "i", ",", "x_g", "in", "enumerate", "(", "x", ")", ":", "m_mean", "=", "torch", ".", "mean", "(", "x_g", ",", "dim", "=", "1", ",", "keepdim", "=", "True", ")", "if", "self", ".", "ups_mean", ":", "# Upsample to get more variable values from mean value", "m1", "=", "self", ".", "m_conv", "[", "i", "]", "(", "m_mean", ")", "m1", "=", "reshape_with_kernel", "(", "m1", ",", "self", ".", "kernel_size", ")", "elif", "self", ".", "repeat_concat", ":", "m1", "=", "torch", ".", "cat", "(", "[", "m_mean", "for", "_", "in", "range", "(", "cat_num", "[", "i", "]", ")", "]", ",", "dim", "=", "1", ")", "elif", "self", ".", "recombine", "==", "'add'", "or", "'multiply'", ":", "m1", "=", "torch", ".", "cat", "(", "[", "m_mean", "for", "_", "in", "range", "(", "x_self", "[", "i", "]", ".", "size", "(", "1", ")", ")", "]", ",", "dim", "=", "1", ")", "out_mean", ".", "append", "(", "m1", ")", "return", "out_mean" ]
[ 291, 4 ]
[ 310, 23 ]
python
en
['en', 'error', 'th']
False
open_file
( file, mode="r", buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None, )
Asynchronous version of :func:`io.open`. Returns: An :term:`asynchronous file object` Example:: async with await trio.open_file(filename) as f: async for line in f: pass assert f.closed See also: :func:`trio.Path.open`
Asynchronous version of :func:`io.open`.
async def open_file( file, mode="r", buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None, ): """Asynchronous version of :func:`io.open`. Returns: An :term:`asynchronous file object` Example:: async with await trio.open_file(filename) as f: async for line in f: pass assert f.closed See also: :func:`trio.Path.open` """ _file = wrap_file( await trio.to_thread.run_sync( io.open, file, mode, buffering, encoding, errors, newline, closefd, opener ) ) return _file
[ "async", "def", "open_file", "(", "file", ",", "mode", "=", "\"r\"", ",", "buffering", "=", "-", "1", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", "newline", "=", "None", ",", "closefd", "=", "True", ",", "opener", "=", "None", ",", ")", ":", "_file", "=", "wrap_file", "(", "await", "trio", ".", "to_thread", ".", "run_sync", "(", "io", ".", "open", ",", "file", ",", "mode", ",", "buffering", ",", "encoding", ",", "errors", ",", "newline", ",", "closefd", ",", "opener", ")", ")", "return", "_file" ]
[ 128, 0 ]
[ 160, 16 ]
python
en
['en', 'el-Latn', 'en']
True
wrap_file
(file)
This wraps any file object in a wrapper that provides an asynchronous file object interface. Args: file: a :term:`file object` Returns: An :term:`asynchronous file object` that wraps ``file`` Example:: async_file = trio.wrap_file(StringIO('asdf')) assert await async_file.read() == 'asdf'
This wraps any file object in a wrapper that provides an asynchronous file object interface.
def wrap_file(file): """This wraps any file object in a wrapper that provides an asynchronous file object interface. Args: file: a :term:`file object` Returns: An :term:`asynchronous file object` that wraps ``file`` Example:: async_file = trio.wrap_file(StringIO('asdf')) assert await async_file.read() == 'asdf' """ def has(attr): return hasattr(file, attr) and callable(getattr(file, attr)) if not (has("close") and (has("read") or has("write"))): raise TypeError( "{} does not implement required duck-file methods: " "close and (read or write)".format(file) ) return AsyncIOWrapper(file)
[ "def", "wrap_file", "(", "file", ")", ":", "def", "has", "(", "attr", ")", ":", "return", "hasattr", "(", "file", ",", "attr", ")", "and", "callable", "(", "getattr", "(", "file", ",", "attr", ")", ")", "if", "not", "(", "has", "(", "\"close\"", ")", "and", "(", "has", "(", "\"read\"", ")", "or", "has", "(", "\"write\"", ")", ")", ")", ":", "raise", "TypeError", "(", "\"{} does not implement required duck-file methods: \"", "\"close and (read or write)\"", ".", "format", "(", "file", ")", ")", "return", "AsyncIOWrapper", "(", "file", ")" ]
[ 163, 0 ]
[ 190, 31 ]
python
en
['en', 'en', 'en']
True
AsyncIOWrapper.wrapped
(self)
object: A reference to the wrapped file object
object: A reference to the wrapped file object
def wrapped(self): """object: A reference to the wrapped file object""" return self._wrapped
[ "def", "wrapped", "(", "self", ")", ":", "return", "self", ".", "_wrapped" ]
[ 64, 4 ]
[ 67, 28 ]
python
en
['en', 'en', 'en']
True
AsyncIOWrapper.detach
(self)
Like :meth:`io.BufferedIOBase.detach`, but async. This also re-wraps the result in a new :term:`asynchronous file object` wrapper.
Like :meth:`io.BufferedIOBase.detach`, but async.
async def detach(self): """Like :meth:`io.BufferedIOBase.detach`, but async. This also re-wraps the result in a new :term:`asynchronous file object` wrapper. """ raw = await trio.to_thread.run_sync(self._wrapped.detach) return wrap_file(raw)
[ "async", "def", "detach", "(", "self", ")", ":", "raw", "=", "await", "trio", ".", "to_thread", ".", "run_sync", "(", "self", ".", "_wrapped", ".", "detach", ")", "return", "wrap_file", "(", "raw", ")" ]
[ 102, 4 ]
[ 111, 29 ]
python
en
['en', 'cy', 'en']
True
AsyncIOWrapper.aclose
(self)
Like :meth:`io.IOBase.close`, but async. This is also shielded from cancellation; if a cancellation scope is cancelled, the wrapped file object will still be safely closed.
Like :meth:`io.IOBase.close`, but async.
async def aclose(self): """Like :meth:`io.IOBase.close`, but async. This is also shielded from cancellation; if a cancellation scope is cancelled, the wrapped file object will still be safely closed. """ # ensure the underling file is closed during cancellation with trio.CancelScope(shield=True): await trio.to_thread.run_sync(self._wrapped.close) await trio.lowlevel.checkpoint_if_cancelled()
[ "async", "def", "aclose", "(", "self", ")", ":", "# ensure the underling file is closed during cancellation", "with", "trio", ".", "CancelScope", "(", "shield", "=", "True", ")", ":", "await", "trio", ".", "to_thread", ".", "run_sync", "(", "self", ".", "_wrapped", ".", "close", ")", "await", "trio", ".", "lowlevel", ".", "checkpoint_if_cancelled", "(", ")" ]
[ 113, 4 ]
[ 125, 53 ]
python
en
['en', 'hmn', 'en']
True
LoadModule
(name, namespace=None)
This function causes a SWIG module to be loaded into memory after its dependencies are satisfied. Information about the templates defined therein is looked up from a config file, and PyTemplate instances for each are created. These template instances are placed in a module with the given name that is either looked up from sys.modules or created and placed there if it does not already exist. Optionally, a 'namespace' parameter can be provided. If it is provided, this namespace will be updated with the new template instantiations. The raw classes loaded from the named module's SWIG interface are placed in a 'swig' sub-module. If the namespace parameter is provided, this information will be placed in a sub-module named 'swig' therein as well. This later submodule will be created if it does not already exist.
This function causes a SWIG module to be loaded into memory after its dependencies are satisfied. Information about the templates defined therein is looked up from a config file, and PyTemplate instances for each are created. These template instances are placed in a module with the given name that is either looked up from sys.modules or created and placed there if it does not already exist. Optionally, a 'namespace' parameter can be provided. If it is provided, this namespace will be updated with the new template instantiations. The raw classes loaded from the named module's SWIG interface are placed in a 'swig' sub-module. If the namespace parameter is provided, this information will be placed in a sub-module named 'swig' therein as well. This later submodule will be created if it does not already exist.
def LoadModule(name, namespace=None): """This function causes a SWIG module to be loaded into memory after its dependencies are satisfied. Information about the templates defined therein is looked up from a config file, and PyTemplate instances for each are created. These template instances are placed in a module with the given name that is either looked up from sys.modules or created and placed there if it does not already exist. Optionally, a 'namespace' parameter can be provided. If it is provided, this namespace will be updated with the new template instantiations. The raw classes loaded from the named module's SWIG interface are placed in a 'swig' sub-module. If the namespace parameter is provided, this information will be placed in a sub-module named 'swig' therein as well. This later submodule will be created if it does not already exist.""" # find the module's name in sys.modules, or create a new module so named if sys.version_info >= (3, 4): this_module = sys.modules.setdefault(name, types.ModuleType(name)) else: this_module = sys.modules.setdefault(name, imp.new_module(name)) # if this library and it's template instantiations have already been loaded # into sys.modules, bail out after loading the defined symbols into # 'namespace' if hasattr(this_module, '__templates_loaded'): if namespace is not None: if sys.version_info >= (3, 4): swig = namespace.setdefault('swig', types.ModuleType('swig')) else: swig = namespace.setdefault('swig', imp.new_module('swig')) swig.__dict__.update(this_module.swig.__dict__) # don't worry about overwriting the symbols in namespace -- any # common symbols should be of type itkTemplate, which is a # singleton type. That is, they are all identical, so replacing one # with the other isn't a problem. for k, v in this_module.__dict__.items(): if not (k.startswith('_') or k == 'swig'): namespace[k] = v return # We're definitely going to load the templates. We set templates_loaded # here instead of at the end of the file to protect against cyclical # dependencies that could kill the recursive lookup below. this_module.__templates_loaded = True # For external projects : # If this_module name (variable name) is in the module_data dictionnary, # then this_module is an installed module (or a previously loaded module). # Otherwise, it may come from an external project. In this case, we must # search the Configuration/<name>Config.py file of this project. try: module_data[name] except: file = inspect.getfile(this_module) path = os.path.dirname(file) data = {} conf = name + 'Config.py' try: # for a linux tree execfile(os.path.join(path, 'Configuration', conf), data) except: try: # for a windows tree execfile(os.path.join(path, '..', 'Configuration', conf), data) except: data = None if(data): module_data[name] = data # Now, we definitely need to load the template instantiations from the # named module, and possibly also load the underlying SWIG module. Before # we can load the template instantiations of this module, we need to load # those of the modules on which this one depends. Ditto for the SWIG # modules. # So, we recursively satisfy the dependencies of named module and create # the template instantiations. # Dependencies are looked up from the auto-generated configuration files, # via the module_data instance defined at the bottom of this file, which # knows how to find those configuration files. data = module_data[name] if data: deps = sorted(data['depends']) for dep in deps: LoadModule(dep, namespace) if itkConfig.ImportCallback: itkConfig.ImportCallback(name, 0) # SWIG-generated modules have 'Python' appended. Only load the SWIG module # if we haven't already. swigModuleName = name + "Python" loader = LibraryLoader() if not swigModuleName in sys.modules: module = loader.load(swigModuleName) # OK, now the modules on which this one depends are loaded and # template-instantiated, and the SWIG module for this one is also loaded. # We're going to put the things we load and create in two places: the # optional 'namespace' parameter, and the this_module variable's namespace. # make a new 'swig' sub-module for this_module. Also look up or create a # different 'swig' module for 'namespace'. Since 'namespace' may be used to # collect symbols from multiple different ITK modules, we don't want to # stomp on an existing 'swig' module, nor do we want to share 'swig' # modules between this_module and namespace. if sys.version_info >= (3, 4): this_module.swig = types.ModuleType('swig') else: this_module.swig = imp.new_module('swig') if namespace is not None: if sys.version_info >= (3, 4): swig = namespace.setdefault('swig', types.ModuleType('swig')) else: swig = namespace.setdefault('swig', imp.new_module('swig')) for k, v in module.__dict__.items(): if not k.startswith('__'): setattr(this_module.swig, k, v) if namespace is not None and not k.startswith('__'): setattr(swig, k, v) data = module_data[name] if data: for template in data['templates']: if len(template) == 5: # This is a template description pyClassName, cppClassName, swigClassName, class_in_module, \ templateParams = template # It doesn't matter if an itkTemplate for this class name # already exists since every instance of itkTemplate with the # same name shares the same state. So we just make a new # instance and add the new templates. templateContainer = itkTemplate.itkTemplate(cppClassName) try: templateContainer.__add__( templateParams, getattr(module, swigClassName)) setattr(this_module, pyClassName, templateContainer) if namespace is not None: curval = namespace.get(pyClassName) if curval is not None and curval != templateContainer: DebugPrintError("Namespace already has a value for" " %s, which is not an itkTemplate" "instance for class %s. " "Overwriting old value." % (pyClassName, cppClassName)) namespace[pyClassName] = templateContainer except Exception as e: DebugPrintError("%s not loaded from module %s because of " "exception:\n %s" % (swigClassName, name, e)) else: # this is a description of a non-templated class # It may have 3 or 4 arguments, the last one can be a boolean value if len(template) == 4: pyClassName, cppClassName, swigClassName, class_in_module = \ template else: pyClassName, cppClassName, swigClassName = template try: swigClass = getattr(module, swigClassName) itkTemplate.registerNoTpl(cppClassName, swigClass) setattr(this_module, pyClassName, swigClass) if namespace is not None: curval = namespace.get(pyClassName) if curval is not None and curval != swigClass: DebugPrintError("Namespace already has a value for" " %s, which is not class %s. " "Overwriting old value." % (pyClassName, cppClassName)) namespace[pyClassName] = swigClass except Exception as e: DebugPrintError("%s not found in module %s because of " "exception:\n %s" % (swigClassName, name, e)) if itkConfig.ImportCallback: itkConfig.ImportCallback(name, 1)
[ "def", "LoadModule", "(", "name", ",", "namespace", "=", "None", ")", ":", "# find the module's name in sys.modules, or create a new module so named", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "4", ")", ":", "this_module", "=", "sys", ".", "modules", ".", "setdefault", "(", "name", ",", "types", ".", "ModuleType", "(", "name", ")", ")", "else", ":", "this_module", "=", "sys", ".", "modules", ".", "setdefault", "(", "name", ",", "imp", ".", "new_module", "(", "name", ")", ")", "# if this library and it's template instantiations have already been loaded", "# into sys.modules, bail out after loading the defined symbols into", "# 'namespace'", "if", "hasattr", "(", "this_module", ",", "'__templates_loaded'", ")", ":", "if", "namespace", "is", "not", "None", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "4", ")", ":", "swig", "=", "namespace", ".", "setdefault", "(", "'swig'", ",", "types", ".", "ModuleType", "(", "'swig'", ")", ")", "else", ":", "swig", "=", "namespace", ".", "setdefault", "(", "'swig'", ",", "imp", ".", "new_module", "(", "'swig'", ")", ")", "swig", ".", "__dict__", ".", "update", "(", "this_module", ".", "swig", ".", "__dict__", ")", "# don't worry about overwriting the symbols in namespace -- any", "# common symbols should be of type itkTemplate, which is a", "# singleton type. That is, they are all identical, so replacing one", "# with the other isn't a problem.", "for", "k", ",", "v", "in", "this_module", ".", "__dict__", ".", "items", "(", ")", ":", "if", "not", "(", "k", ".", "startswith", "(", "'_'", ")", "or", "k", "==", "'swig'", ")", ":", "namespace", "[", "k", "]", "=", "v", "return", "# We're definitely going to load the templates. We set templates_loaded", "# here instead of at the end of the file to protect against cyclical", "# dependencies that could kill the recursive lookup below.", "this_module", ".", "__templates_loaded", "=", "True", "# For external projects :", "# If this_module name (variable name) is in the module_data dictionnary,", "# then this_module is an installed module (or a previously loaded module).", "# Otherwise, it may come from an external project. In this case, we must", "# search the Configuration/<name>Config.py file of this project.", "try", ":", "module_data", "[", "name", "]", "except", ":", "file", "=", "inspect", ".", "getfile", "(", "this_module", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "file", ")", "data", "=", "{", "}", "conf", "=", "name", "+", "'Config.py'", "try", ":", "# for a linux tree", "execfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'Configuration'", ",", "conf", ")", ",", "data", ")", "except", ":", "try", ":", "# for a windows tree", "execfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'..'", ",", "'Configuration'", ",", "conf", ")", ",", "data", ")", "except", ":", "data", "=", "None", "if", "(", "data", ")", ":", "module_data", "[", "name", "]", "=", "data", "# Now, we definitely need to load the template instantiations from the", "# named module, and possibly also load the underlying SWIG module. Before", "# we can load the template instantiations of this module, we need to load", "# those of the modules on which this one depends. Ditto for the SWIG", "# modules.", "# So, we recursively satisfy the dependencies of named module and create", "# the template instantiations.", "# Dependencies are looked up from the auto-generated configuration files,", "# via the module_data instance defined at the bottom of this file, which", "# knows how to find those configuration files.", "data", "=", "module_data", "[", "name", "]", "if", "data", ":", "deps", "=", "sorted", "(", "data", "[", "'depends'", "]", ")", "for", "dep", "in", "deps", ":", "LoadModule", "(", "dep", ",", "namespace", ")", "if", "itkConfig", ".", "ImportCallback", ":", "itkConfig", ".", "ImportCallback", "(", "name", ",", "0", ")", "# SWIG-generated modules have 'Python' appended. Only load the SWIG module", "# if we haven't already.", "swigModuleName", "=", "name", "+", "\"Python\"", "loader", "=", "LibraryLoader", "(", ")", "if", "not", "swigModuleName", "in", "sys", ".", "modules", ":", "module", "=", "loader", ".", "load", "(", "swigModuleName", ")", "# OK, now the modules on which this one depends are loaded and", "# template-instantiated, and the SWIG module for this one is also loaded.", "# We're going to put the things we load and create in two places: the", "# optional 'namespace' parameter, and the this_module variable's namespace.", "# make a new 'swig' sub-module for this_module. Also look up or create a", "# different 'swig' module for 'namespace'. Since 'namespace' may be used to", "# collect symbols from multiple different ITK modules, we don't want to", "# stomp on an existing 'swig' module, nor do we want to share 'swig'", "# modules between this_module and namespace.", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "4", ")", ":", "this_module", ".", "swig", "=", "types", ".", "ModuleType", "(", "'swig'", ")", "else", ":", "this_module", ".", "swig", "=", "imp", ".", "new_module", "(", "'swig'", ")", "if", "namespace", "is", "not", "None", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "4", ")", ":", "swig", "=", "namespace", ".", "setdefault", "(", "'swig'", ",", "types", ".", "ModuleType", "(", "'swig'", ")", ")", "else", ":", "swig", "=", "namespace", ".", "setdefault", "(", "'swig'", ",", "imp", ".", "new_module", "(", "'swig'", ")", ")", "for", "k", ",", "v", "in", "module", ".", "__dict__", ".", "items", "(", ")", ":", "if", "not", "k", ".", "startswith", "(", "'__'", ")", ":", "setattr", "(", "this_module", ".", "swig", ",", "k", ",", "v", ")", "if", "namespace", "is", "not", "None", "and", "not", "k", ".", "startswith", "(", "'__'", ")", ":", "setattr", "(", "swig", ",", "k", ",", "v", ")", "data", "=", "module_data", "[", "name", "]", "if", "data", ":", "for", "template", "in", "data", "[", "'templates'", "]", ":", "if", "len", "(", "template", ")", "==", "5", ":", "# This is a template description", "pyClassName", ",", "cppClassName", ",", "swigClassName", ",", "class_in_module", ",", "templateParams", "=", "template", "# It doesn't matter if an itkTemplate for this class name", "# already exists since every instance of itkTemplate with the", "# same name shares the same state. So we just make a new", "# instance and add the new templates.", "templateContainer", "=", "itkTemplate", ".", "itkTemplate", "(", "cppClassName", ")", "try", ":", "templateContainer", ".", "__add__", "(", "templateParams", ",", "getattr", "(", "module", ",", "swigClassName", ")", ")", "setattr", "(", "this_module", ",", "pyClassName", ",", "templateContainer", ")", "if", "namespace", "is", "not", "None", ":", "curval", "=", "namespace", ".", "get", "(", "pyClassName", ")", "if", "curval", "is", "not", "None", "and", "curval", "!=", "templateContainer", ":", "DebugPrintError", "(", "\"Namespace already has a value for\"", "\" %s, which is not an itkTemplate\"", "\"instance for class %s. \"", "\"Overwriting old value.\"", "%", "(", "pyClassName", ",", "cppClassName", ")", ")", "namespace", "[", "pyClassName", "]", "=", "templateContainer", "except", "Exception", "as", "e", ":", "DebugPrintError", "(", "\"%s not loaded from module %s because of \"", "\"exception:\\n %s\"", "%", "(", "swigClassName", ",", "name", ",", "e", ")", ")", "else", ":", "# this is a description of a non-templated class", "# It may have 3 or 4 arguments, the last one can be a boolean value", "if", "len", "(", "template", ")", "==", "4", ":", "pyClassName", ",", "cppClassName", ",", "swigClassName", ",", "class_in_module", "=", "template", "else", ":", "pyClassName", ",", "cppClassName", ",", "swigClassName", "=", "template", "try", ":", "swigClass", "=", "getattr", "(", "module", ",", "swigClassName", ")", "itkTemplate", ".", "registerNoTpl", "(", "cppClassName", ",", "swigClass", ")", "setattr", "(", "this_module", ",", "pyClassName", ",", "swigClass", ")", "if", "namespace", "is", "not", "None", ":", "curval", "=", "namespace", ".", "get", "(", "pyClassName", ")", "if", "curval", "is", "not", "None", "and", "curval", "!=", "swigClass", ":", "DebugPrintError", "(", "\"Namespace already has a value for\"", "\" %s, which is not class %s. \"", "\"Overwriting old value.\"", "%", "(", "pyClassName", ",", "cppClassName", ")", ")", "namespace", "[", "pyClassName", "]", "=", "swigClass", "except", "Exception", "as", "e", ":", "DebugPrintError", "(", "\"%s not found in module %s because of \"", "\"exception:\\n %s\"", "%", "(", "swigClassName", ",", "name", ",", "e", ")", ")", "if", "itkConfig", ".", "ImportCallback", ":", "itkConfig", ".", "ImportCallback", "(", "name", ",", "1", ")" ]
[ 33, 0 ]
[ 213, 41 ]
python
en
['en', 'en', 'en']
True
TextConverter.__init__
(self, remove_numeric_tables: bool = False, valid_languages: Optional[List[str]] = None)
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it might likely be encoding error resulting in garbled text.
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it might likely be encoding error resulting in garbled text.
def __init__(self, remove_numeric_tables: bool = False, valid_languages: Optional[List[str]] = None): """ :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it might likely be encoding error resulting in garbled text. """ super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages)
[ "def", "__init__", "(", "self", ",", "remove_numeric_tables", ":", "bool", "=", "False", ",", "valid_languages", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ")", ":", "super", "(", ")", ".", "__init__", "(", "remove_numeric_tables", "=", "remove_numeric_tables", ",", "valid_languages", "=", "valid_languages", ")" ]
[ 10, 4 ]
[ 24, 102 ]
python
en
['en', 'error', 'th']
False
TextConverter.convert
( self, file_path: Path, meta: Optional[Dict[str, str]] = None, remove_numeric_tables: Optional[bool] = None, valid_languages: Optional[List[str]] = None, encoding: str = "utf-8", )
Reads text from a txt file and executes optional preprocessing steps. :param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it might likely be encoding error resulting in garbled text. :return: Dict of format {"text": "The text from file", "meta": meta}}
Reads text from a txt file and executes optional preprocessing steps.
def convert( self, file_path: Path, meta: Optional[Dict[str, str]] = None, remove_numeric_tables: Optional[bool] = None, valid_languages: Optional[List[str]] = None, encoding: str = "utf-8", ) -> Dict[str, Any]: """ Reads text from a txt file and executes optional preprocessing steps. :param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it might likely be encoding error resulting in garbled text. :return: Dict of format {"text": "The text from file", "meta": meta}} """ if remove_numeric_tables is None: remove_numeric_tables = self.remove_numeric_tables if valid_languages is None: valid_languages = self.valid_languages with open(file_path, encoding=encoding, errors="ignore") as f: text = f.read() pages = text.split("\f") cleaned_pages = [] for page in pages: lines = page.splitlines() cleaned_lines = [] for line in lines: words = line.split() digits = [word for word in words if any(i.isdigit() for i in word)] # remove lines having > 40% of words as digits AND not ending with a period(.) if remove_numeric_tables: if words and len(digits) / len(words) > 0.4 and not line.strip().endswith("."): logger.debug(f"Removing line '{line}' from {file_path}") continue cleaned_lines.append(line) page = "\n".join(cleaned_lines) cleaned_pages.append(page) if valid_languages: document_text = "".join(cleaned_pages) if not self.validate_language(document_text): logger.warning( f"The language for {file_path} is not one of {self.valid_languages}. The file may not have " f"been decoded in the correct text format." ) text = "".join(pages) document = {"text": text, "meta": meta} return document
[ "def", "convert", "(", "self", ",", "file_path", ":", "Path", ",", "meta", ":", "Optional", "[", "Dict", "[", "str", ",", "str", "]", "]", "=", "None", ",", "remove_numeric_tables", ":", "Optional", "[", "bool", "]", "=", "None", ",", "valid_languages", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ",", "encoding", ":", "str", "=", "\"utf-8\"", ",", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "if", "remove_numeric_tables", "is", "None", ":", "remove_numeric_tables", "=", "self", ".", "remove_numeric_tables", "if", "valid_languages", "is", "None", ":", "valid_languages", "=", "self", ".", "valid_languages", "with", "open", "(", "file_path", ",", "encoding", "=", "encoding", ",", "errors", "=", "\"ignore\"", ")", "as", "f", ":", "text", "=", "f", ".", "read", "(", ")", "pages", "=", "text", ".", "split", "(", "\"\\f\"", ")", "cleaned_pages", "=", "[", "]", "for", "page", "in", "pages", ":", "lines", "=", "page", ".", "splitlines", "(", ")", "cleaned_lines", "=", "[", "]", "for", "line", "in", "lines", ":", "words", "=", "line", ".", "split", "(", ")", "digits", "=", "[", "word", "for", "word", "in", "words", "if", "any", "(", "i", ".", "isdigit", "(", ")", "for", "i", "in", "word", ")", "]", "# remove lines having > 40% of words as digits AND not ending with a period(.)", "if", "remove_numeric_tables", ":", "if", "words", "and", "len", "(", "digits", ")", "/", "len", "(", "words", ")", ">", "0.4", "and", "not", "line", ".", "strip", "(", ")", ".", "endswith", "(", "\".\"", ")", ":", "logger", ".", "debug", "(", "f\"Removing line '{line}' from {file_path}\"", ")", "continue", "cleaned_lines", ".", "append", "(", "line", ")", "page", "=", "\"\\n\"", ".", "join", "(", "cleaned_lines", ")", "cleaned_pages", ".", "append", "(", "page", ")", "if", "valid_languages", ":", "document_text", "=", "\"\"", ".", "join", "(", "cleaned_pages", ")", "if", "not", "self", ".", "validate_language", "(", "document_text", ")", ":", "logger", ".", "warning", "(", "f\"The language for {file_path} is not one of {self.valid_languages}. The file may not have \"", "f\"been decoded in the correct text format.\"", ")", "text", "=", "\"\"", ".", "join", "(", "pages", ")", "document", "=", "{", "\"text\"", ":", "text", ",", "\"meta\"", ":", "meta", "}", "return", "document" ]
[ 26, 4 ]
[ 91, 23 ]
python
en
['en', 'error', 'th']
False
ColumnHistogram._sqlalchemy
( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, )
return a list of counts corresponding to bins Args: column: the name of the column for which to get the histogram bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
return a list of counts corresponding to bins
def _sqlalchemy( cls, execution_engine: SqlAlchemyExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): """return a list of counts corresponding to bins Args: column: the name of the column for which to get the histogram bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching """ selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN ) column = accessor_domain_kwargs["column"] bins = metric_value_kwargs["bins"] case_conditions = [] idx = 0 if isinstance(bins, np.ndarray): bins = bins.tolist() else: bins = list(bins) # If we have an infinite lower bound, don't express that in sql if ( bins[0] == get_sql_dialect_floating_point_infinity_value( schema="api_np", negative=True ) ) or ( bins[0] == get_sql_dialect_floating_point_infinity_value( schema="api_cast", negative=True ) ): case_conditions.append( sa.func.sum( sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0) ).label("bin_" + str(idx)) ) idx += 1 for idx in range(idx, len(bins) - 2): case_conditions.append( sa.func.sum( sa.case( [ ( sa.and_( bins[idx] <= sa.column(column), sa.column(column) < bins[idx + 1], ), 1, ) ], else_=0, ) ).label("bin_" + str(idx)) ) if ( bins[-1] == get_sql_dialect_floating_point_infinity_value( schema="api_np", negative=False ) ) or ( bins[-1] == get_sql_dialect_floating_point_infinity_value( schema="api_cast", negative=False ) ): case_conditions.append( sa.func.sum( sa.case([(bins[-2] <= sa.column(column), 1)], else_=0) ).label("bin_" + str(len(bins) - 1)) ) else: case_conditions.append( sa.func.sum( sa.case( [ ( sa.and_( bins[-2] <= sa.column(column), sa.column(column) <= bins[-1], ), 1, ) ], else_=0, ) ).label("bin_" + str(len(bins) - 1)) ) query = ( sa.select(case_conditions) .where( sa.column(column) != None, ) .select_from(selectable) ) # Run the data through convert_to_json_serializable to ensure we do not have Decimal types hist = convert_to_json_serializable( list(execution_engine.engine.execute(query).fetchone()) ) return hist
[ "def", "_sqlalchemy", "(", "cls", ",", "execution_engine", ":", "SqlAlchemyExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "Tuple", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "selectable", ",", "_", ",", "accessor_domain_kwargs", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "metric_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "COLUMN", ")", "column", "=", "accessor_domain_kwargs", "[", "\"column\"", "]", "bins", "=", "metric_value_kwargs", "[", "\"bins\"", "]", "case_conditions", "=", "[", "]", "idx", "=", "0", "if", "isinstance", "(", "bins", ",", "np", ".", "ndarray", ")", ":", "bins", "=", "bins", ".", "tolist", "(", ")", "else", ":", "bins", "=", "list", "(", "bins", ")", "# If we have an infinite lower bound, don't express that in sql", "if", "(", "bins", "[", "0", "]", "==", "get_sql_dialect_floating_point_infinity_value", "(", "schema", "=", "\"api_np\"", ",", "negative", "=", "True", ")", ")", "or", "(", "bins", "[", "0", "]", "==", "get_sql_dialect_floating_point_infinity_value", "(", "schema", "=", "\"api_cast\"", ",", "negative", "=", "True", ")", ")", ":", "case_conditions", ".", "append", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "column", "(", "column", ")", "<", "bins", "[", "idx", "+", "1", "]", ",", "1", ")", "]", ",", "else_", "=", "0", ")", ")", ".", "label", "(", "\"bin_\"", "+", "str", "(", "idx", ")", ")", ")", "idx", "+=", "1", "for", "idx", "in", "range", "(", "idx", ",", "len", "(", "bins", ")", "-", "2", ")", ":", "case_conditions", ".", "append", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "and_", "(", "bins", "[", "idx", "]", "<=", "sa", ".", "column", "(", "column", ")", ",", "sa", ".", "column", "(", "column", ")", "<", "bins", "[", "idx", "+", "1", "]", ",", ")", ",", "1", ",", ")", "]", ",", "else_", "=", "0", ",", ")", ")", ".", "label", "(", "\"bin_\"", "+", "str", "(", "idx", ")", ")", ")", "if", "(", "bins", "[", "-", "1", "]", "==", "get_sql_dialect_floating_point_infinity_value", "(", "schema", "=", "\"api_np\"", ",", "negative", "=", "False", ")", ")", "or", "(", "bins", "[", "-", "1", "]", "==", "get_sql_dialect_floating_point_infinity_value", "(", "schema", "=", "\"api_cast\"", ",", "negative", "=", "False", ")", ")", ":", "case_conditions", ".", "append", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "bins", "[", "-", "2", "]", "<=", "sa", ".", "column", "(", "column", ")", ",", "1", ")", "]", ",", "else_", "=", "0", ")", ")", ".", "label", "(", "\"bin_\"", "+", "str", "(", "len", "(", "bins", ")", "-", "1", ")", ")", ")", "else", ":", "case_conditions", ".", "append", "(", "sa", ".", "func", ".", "sum", "(", "sa", ".", "case", "(", "[", "(", "sa", ".", "and_", "(", "bins", "[", "-", "2", "]", "<=", "sa", ".", "column", "(", "column", ")", ",", "sa", ".", "column", "(", "column", ")", "<=", "bins", "[", "-", "1", "]", ",", ")", ",", "1", ",", ")", "]", ",", "else_", "=", "0", ",", ")", ")", ".", "label", "(", "\"bin_\"", "+", "str", "(", "len", "(", "bins", ")", "-", "1", ")", ")", ")", "query", "=", "(", "sa", ".", "select", "(", "case_conditions", ")", ".", "where", "(", "sa", ".", "column", "(", "column", ")", "!=", "None", ",", ")", ".", "select_from", "(", "selectable", ")", ")", "# Run the data through convert_to_json_serializable to ensure we do not have Decimal types", "hist", "=", "convert_to_json_serializable", "(", "list", "(", "execution_engine", ".", "engine", ".", "execute", "(", "query", ")", ".", "fetchone", "(", ")", ")", ")", "return", "hist" ]
[ 47, 4 ]
[ 157, 19 ]
python
en
['en', 'en', 'en']
True
ColumnHistogram._spark
( cls, execution_engine: SparkDFExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, )
return a list of counts corresponding to bins
return a list of counts corresponding to bins
def _spark( cls, execution_engine: SparkDFExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[Tuple, Any], runtime_configuration: Dict, ): df, _, accessor_domain_kwargs = execution_engine.get_compute_domain( domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN ) bins = metric_value_kwargs["bins"] column = metric_domain_kwargs["column"] """return a list of counts corresponding to bins""" bins = list( copy.deepcopy(bins) ) # take a copy since we are inserting and popping if bins[0] == -np.inf or bins[0] == -float("inf"): added_min = False bins[0] = -float("inf") else: added_min = True bins.insert(0, -float("inf")) if bins[-1] == np.inf or bins[-1] == float("inf"): added_max = False bins[-1] = float("inf") else: added_max = True bins.append(float("inf")) temp_column = df.select(column).where(F.col(column).isNotNull()) bucketizer = Bucketizer(splits=bins, inputCol=column, outputCol="buckets") bucketed = bucketizer.setHandleInvalid("skip").transform(temp_column) # This is painful to do, but: bucketizer cannot handle values outside of a range # (hence adding -/+ infinity above) # Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound # for all but the last bin # But, since the last bin in our case will often be +infinity, we need to # find the number of values exactly equal to the upper bound to add those # We'll try for an optimization by asking for it at the same time if added_max: upper_bound_count = ( temp_column.select(column).filter(F.col(column) == bins[-2]).count() ) else: upper_bound_count = 0 hist_rows = bucketed.groupBy("buckets").count().collect() # Spark only returns buckets that have nonzero counts. hist = [0] * (len(bins) - 1) for row in hist_rows: hist[int(row["buckets"])] = row["count"] hist[-2] += upper_bound_count if added_min: below_bins = hist.pop(0) bins.pop(0) if below_bins > 0: logger.warning("Discarding histogram values below lowest bin.") if added_max: above_bins = hist.pop(-1) bins.pop(-1) if above_bins > 0: logger.warning("Discarding histogram values above highest bin.") return hist
[ "def", "_spark", "(", "cls", ",", "execution_engine", ":", "SparkDFExecutionEngine", ",", "metric_domain_kwargs", ":", "Dict", ",", "metric_value_kwargs", ":", "Dict", ",", "metrics", ":", "Dict", "[", "Tuple", ",", "Any", "]", ",", "runtime_configuration", ":", "Dict", ",", ")", ":", "df", ",", "_", ",", "accessor_domain_kwargs", "=", "execution_engine", ".", "get_compute_domain", "(", "domain_kwargs", "=", "metric_domain_kwargs", ",", "domain_type", "=", "MetricDomainTypes", ".", "COLUMN", ")", "bins", "=", "metric_value_kwargs", "[", "\"bins\"", "]", "column", "=", "metric_domain_kwargs", "[", "\"column\"", "]", "bins", "=", "list", "(", "copy", ".", "deepcopy", "(", "bins", ")", ")", "# take a copy since we are inserting and popping", "if", "bins", "[", "0", "]", "==", "-", "np", ".", "inf", "or", "bins", "[", "0", "]", "==", "-", "float", "(", "\"inf\"", ")", ":", "added_min", "=", "False", "bins", "[", "0", "]", "=", "-", "float", "(", "\"inf\"", ")", "else", ":", "added_min", "=", "True", "bins", ".", "insert", "(", "0", ",", "-", "float", "(", "\"inf\"", ")", ")", "if", "bins", "[", "-", "1", "]", "==", "np", ".", "inf", "or", "bins", "[", "-", "1", "]", "==", "float", "(", "\"inf\"", ")", ":", "added_max", "=", "False", "bins", "[", "-", "1", "]", "=", "float", "(", "\"inf\"", ")", "else", ":", "added_max", "=", "True", "bins", ".", "append", "(", "float", "(", "\"inf\"", ")", ")", "temp_column", "=", "df", ".", "select", "(", "column", ")", ".", "where", "(", "F", ".", "col", "(", "column", ")", ".", "isNotNull", "(", ")", ")", "bucketizer", "=", "Bucketizer", "(", "splits", "=", "bins", ",", "inputCol", "=", "column", ",", "outputCol", "=", "\"buckets\"", ")", "bucketed", "=", "bucketizer", ".", "setHandleInvalid", "(", "\"skip\"", ")", ".", "transform", "(", "temp_column", ")", "# This is painful to do, but: bucketizer cannot handle values outside of a range", "# (hence adding -/+ infinity above)", "# Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound", "# for all but the last bin", "# But, since the last bin in our case will often be +infinity, we need to", "# find the number of values exactly equal to the upper bound to add those", "# We'll try for an optimization by asking for it at the same time", "if", "added_max", ":", "upper_bound_count", "=", "(", "temp_column", ".", "select", "(", "column", ")", ".", "filter", "(", "F", ".", "col", "(", "column", ")", "==", "bins", "[", "-", "2", "]", ")", ".", "count", "(", ")", ")", "else", ":", "upper_bound_count", "=", "0", "hist_rows", "=", "bucketed", ".", "groupBy", "(", "\"buckets\"", ")", ".", "count", "(", ")", ".", "collect", "(", ")", "# Spark only returns buckets that have nonzero counts.", "hist", "=", "[", "0", "]", "*", "(", "len", "(", "bins", ")", "-", "1", ")", "for", "row", "in", "hist_rows", ":", "hist", "[", "int", "(", "row", "[", "\"buckets\"", "]", ")", "]", "=", "row", "[", "\"count\"", "]", "hist", "[", "-", "2", "]", "+=", "upper_bound_count", "if", "added_min", ":", "below_bins", "=", "hist", ".", "pop", "(", "0", ")", "bins", ".", "pop", "(", "0", ")", "if", "below_bins", ">", "0", ":", "logger", ".", "warning", "(", "\"Discarding histogram values below lowest bin.\"", ")", "if", "added_max", ":", "above_bins", "=", "hist", ".", "pop", "(", "-", "1", ")", "bins", ".", "pop", "(", "-", "1", ")", "if", "above_bins", ">", "0", ":", "logger", ".", "warning", "(", "\"Discarding histogram values above highest bin.\"", ")", "return", "hist" ]
[ 160, 4 ]
[ 233, 19 ]
python
en
['en', 'en', 'en']
True
MetaSparkDFDataset.column_map_expectation
(cls, func)
Constructs an expectation using column-map semantics. The MetaSparkDFDataset implementation replaces the "column" parameter supplied by the user with a Spark Dataframe with the actual column data. The current approach for functions implementing expectation logic is to append a column named "__success" to this dataframe and return to this decorator. See :func:`column_map_expectation <great_expectations.Dataset.base.Dataset.column_map_expectation>` \ for full documentation of this function.
Constructs an expectation using column-map semantics.
def column_map_expectation(cls, func): """Constructs an expectation using column-map semantics. The MetaSparkDFDataset implementation replaces the "column" parameter supplied by the user with a Spark Dataframe with the actual column data. The current approach for functions implementing expectation logic is to append a column named "__success" to this dataframe and return to this decorator. See :func:`column_map_expectation <great_expectations.Dataset.base.Dataset.column_map_expectation>` \ for full documentation of this function. """ argspec = inspect.getfullargspec(func)[0][1:] @cls.expectation(argspec) @wraps(func) def inner_wrapper( self, column, mostly=None, result_format=None, *args, **kwargs, ): """ This whole decorator is pending a re-write. Currently there is are huge performance issues when the # of unexpected elements gets large (10s of millions). Additionally, there is likely easy optimization opportunities by coupling result_format with how many different transformations are done on the dataset, as is done in sqlalchemy_dataset. """ # Rename column so we only have to handle dot notation here eval_col = "__eval_col_" + column.replace(".", "__").replace("`", "_") self.spark_df = self.spark_df.withColumn(eval_col, col(column)) if result_format is None: result_format = self.default_expectation_args["result_format"] result_format = parse_result_format(result_format) # this is a little dangerous: expectations that specify "COMPLETE" result format and have a very # large number of unexpected results could hang for a long time. we should either call this out in docs # or put a limit on it if result_format["result_format"] == "COMPLETE": unexpected_count_limit = None else: unexpected_count_limit = result_format["partial_unexpected_count"] col_df = self.spark_df.select(col(eval_col)) # pyspark.sql.DataFrame # a couple of tests indicate that caching here helps performance col_df.persist() element_count = self.get_row_count() # FIXME temporary fix for missing/ignored value if func.__name__ not in [ "expect_column_values_to_not_be_null", "expect_column_values_to_be_null", ]: col_df = col_df.filter(col_df[0].isNotNull()) # these nonnull_counts are cached by SparkDFDataset nonnull_count = self.get_column_nonnull_count(eval_col) else: nonnull_count = element_count # success_df will have columns [column, '__success'] # this feels a little hacky, so might want to change success_df = func(self, col_df, *args, **kwargs) success_count = success_df.filter("__success = True").count() unexpected_count = nonnull_count - success_count if unexpected_count == 0: # save some computation time if no unexpected items maybe_limited_unexpected_list = [] else: # here's an example of a place where we could do optimizations if we knew result format: see # comment block below unexpected_df = success_df.filter("__success = False") if unexpected_count_limit: unexpected_df = unexpected_df.limit(unexpected_count_limit) maybe_limited_unexpected_list = [ row[eval_col] for row in unexpected_df.collect() ] if "output_strftime_format" in kwargs: output_strftime_format = kwargs["output_strftime_format"] parsed_maybe_limited_unexpected_list = [] for val in maybe_limited_unexpected_list: if val is None: parsed_maybe_limited_unexpected_list.append(val) else: if isinstance(val, str): val = parse(val) parsed_maybe_limited_unexpected_list.append( datetime.strftime(val, output_strftime_format) ) maybe_limited_unexpected_list = parsed_maybe_limited_unexpected_list success, percent_success = self._calc_map_expectation_success( success_count, nonnull_count, mostly ) # Currently the abstraction of "result_format" that _format_column_map_output provides # limits some possible optimizations within the column-map decorator. It seems that either # this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator # should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method. # In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement # properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently). # Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and # with the optimizations based on result_format. A side benefit would be implementing an interface for the user # to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for # tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done # in other dataset implementations. return_obj = self._format_map_output( result_format, success, element_count, nonnull_count, unexpected_count, maybe_limited_unexpected_list, unexpected_index_list=None, ) # FIXME Temp fix for result format if func.__name__ in [ "expect_column_values_to_not_be_null", "expect_column_values_to_be_null", ]: del return_obj["result"]["unexpected_percent_nonmissing"] del return_obj["result"]["missing_count"] del return_obj["result"]["missing_percent"] try: del return_obj["result"]["partial_unexpected_counts"] except KeyError: pass col_df.unpersist() return return_obj inner_wrapper.__name__ = func.__name__ inner_wrapper.__doc__ = func.__doc__ return inner_wrapper
[ "def", "column_map_expectation", "(", "cls", ",", "func", ")", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "[", "0", "]", "[", "1", ":", "]", "@", "cls", ".", "expectation", "(", "argspec", ")", "@", "wraps", "(", "func", ")", "def", "inner_wrapper", "(", "self", ",", "column", ",", "mostly", "=", "None", ",", "result_format", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ",", ")", ":", "\"\"\"\n This whole decorator is pending a re-write. Currently there is are huge performance issues\n when the # of unexpected elements gets large (10s of millions). Additionally, there is likely\n easy optimization opportunities by coupling result_format with how many different transformations\n are done on the dataset, as is done in sqlalchemy_dataset.\n \"\"\"", "# Rename column so we only have to handle dot notation here", "eval_col", "=", "\"__eval_col_\"", "+", "column", ".", "replace", "(", "\".\"", ",", "\"__\"", ")", ".", "replace", "(", "\"`\"", ",", "\"_\"", ")", "self", ".", "spark_df", "=", "self", ".", "spark_df", ".", "withColumn", "(", "eval_col", ",", "col", "(", "column", ")", ")", "if", "result_format", "is", "None", ":", "result_format", "=", "self", ".", "default_expectation_args", "[", "\"result_format\"", "]", "result_format", "=", "parse_result_format", "(", "result_format", ")", "# this is a little dangerous: expectations that specify \"COMPLETE\" result format and have a very", "# large number of unexpected results could hang for a long time. we should either call this out in docs", "# or put a limit on it", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "unexpected_count_limit", "=", "None", "else", ":", "unexpected_count_limit", "=", "result_format", "[", "\"partial_unexpected_count\"", "]", "col_df", "=", "self", ".", "spark_df", ".", "select", "(", "col", "(", "eval_col", ")", ")", "# pyspark.sql.DataFrame", "# a couple of tests indicate that caching here helps performance", "col_df", ".", "persist", "(", ")", "element_count", "=", "self", ".", "get_row_count", "(", ")", "# FIXME temporary fix for missing/ignored value", "if", "func", ".", "__name__", "not", "in", "[", "\"expect_column_values_to_not_be_null\"", ",", "\"expect_column_values_to_be_null\"", ",", "]", ":", "col_df", "=", "col_df", ".", "filter", "(", "col_df", "[", "0", "]", ".", "isNotNull", "(", ")", ")", "# these nonnull_counts are cached by SparkDFDataset", "nonnull_count", "=", "self", ".", "get_column_nonnull_count", "(", "eval_col", ")", "else", ":", "nonnull_count", "=", "element_count", "# success_df will have columns [column, '__success']", "# this feels a little hacky, so might want to change", "success_df", "=", "func", "(", "self", ",", "col_df", ",", "*", "args", ",", "*", "*", "kwargs", ")", "success_count", "=", "success_df", ".", "filter", "(", "\"__success = True\"", ")", ".", "count", "(", ")", "unexpected_count", "=", "nonnull_count", "-", "success_count", "if", "unexpected_count", "==", "0", ":", "# save some computation time if no unexpected items", "maybe_limited_unexpected_list", "=", "[", "]", "else", ":", "# here's an example of a place where we could do optimizations if we knew result format: see", "# comment block below", "unexpected_df", "=", "success_df", ".", "filter", "(", "\"__success = False\"", ")", "if", "unexpected_count_limit", ":", "unexpected_df", "=", "unexpected_df", ".", "limit", "(", "unexpected_count_limit", ")", "maybe_limited_unexpected_list", "=", "[", "row", "[", "eval_col", "]", "for", "row", "in", "unexpected_df", ".", "collect", "(", ")", "]", "if", "\"output_strftime_format\"", "in", "kwargs", ":", "output_strftime_format", "=", "kwargs", "[", "\"output_strftime_format\"", "]", "parsed_maybe_limited_unexpected_list", "=", "[", "]", "for", "val", "in", "maybe_limited_unexpected_list", ":", "if", "val", "is", "None", ":", "parsed_maybe_limited_unexpected_list", ".", "append", "(", "val", ")", "else", ":", "if", "isinstance", "(", "val", ",", "str", ")", ":", "val", "=", "parse", "(", "val", ")", "parsed_maybe_limited_unexpected_list", ".", "append", "(", "datetime", ".", "strftime", "(", "val", ",", "output_strftime_format", ")", ")", "maybe_limited_unexpected_list", "=", "parsed_maybe_limited_unexpected_list", "success", ",", "percent_success", "=", "self", ".", "_calc_map_expectation_success", "(", "success_count", ",", "nonnull_count", ",", "mostly", ")", "# Currently the abstraction of \"result_format\" that _format_column_map_output provides", "# limits some possible optimizations within the column-map decorator. It seems that either", "# this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator", "# should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method.", "# In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement", "# properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently).", "# Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and", "# with the optimizations based on result_format. A side benefit would be implementing an interface for the user", "# to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for", "# tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done", "# in other dataset implementations.", "return_obj", "=", "self", ".", "_format_map_output", "(", "result_format", ",", "success", ",", "element_count", ",", "nonnull_count", ",", "unexpected_count", ",", "maybe_limited_unexpected_list", ",", "unexpected_index_list", "=", "None", ",", ")", "# FIXME Temp fix for result format", "if", "func", ".", "__name__", "in", "[", "\"expect_column_values_to_not_be_null\"", ",", "\"expect_column_values_to_be_null\"", ",", "]", ":", "del", "return_obj", "[", "\"result\"", "]", "[", "\"unexpected_percent_nonmissing\"", "]", "del", "return_obj", "[", "\"result\"", "]", "[", "\"missing_count\"", "]", "del", "return_obj", "[", "\"result\"", "]", "[", "\"missing_percent\"", "]", "try", ":", "del", "return_obj", "[", "\"result\"", "]", "[", "\"partial_unexpected_counts\"", "]", "except", "KeyError", ":", "pass", "col_df", ".", "unpersist", "(", ")", "return", "return_obj", "inner_wrapper", ".", "__name__", "=", "func", ".", "__name__", "inner_wrapper", ".", "__doc__", "=", "func", ".", "__doc__", "return", "inner_wrapper" ]
[ 67, 4 ]
[ 210, 28 ]
python
en
['en', 'lb', 'en']
True
MetaSparkDFDataset.column_pair_map_expectation
(cls, func)
The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on a per row basis across a pair of columns.
The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on a per row basis across a pair of columns.
def column_pair_map_expectation(cls, func): """ The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on a per row basis across a pair of columns. """ argspec = inspect.getfullargspec(func)[0][1:] @cls.expectation(argspec) @wraps(func) def inner_wrapper( self, column_A, column_B, mostly=None, ignore_row_if="both_values_are_missing", result_format=None, *args, **kwargs, ): # Rename column so we only have to handle dot notation here eval_col_A = "__eval_col_A_" + column_A.replace(".", "__").replace("`", "_") eval_col_B = "__eval_col_B_" + column_B.replace(".", "__").replace("`", "_") self.spark_df = self.spark_df.withColumn( eval_col_A, col(column_A) ).withColumn(eval_col_B, col(column_B)) if result_format is None: result_format = self.default_expectation_args["result_format"] result_format = parse_result_format(result_format) # this is a little dangerous: expectations that specify "COMPLETE" result format and have a very # large number of unexpected results could hang for a long time. we should either call this out in docs # or put a limit on it if result_format["result_format"] == "COMPLETE": unexpected_count_limit = None else: unexpected_count_limit = result_format["partial_unexpected_count"] cols_df = self.spark_df.select(eval_col_A, eval_col_B).withColumn( "__row", monotonically_increasing_id() ) # pyspark.sql.DataFrame # a couple of tests indicate that caching here helps performance cols_df.cache() element_count = self.get_row_count() if ignore_row_if == "both_values_are_missing": boolean_mapped_null_values = cols_df.selectExpr( "`__row`", "`{0}` AS `A_{0}`".format(eval_col_A), "`{0}` AS `B_{0}`".format(eval_col_B), "ISNULL(`{}`) AND ISNULL(`{}`) AS `__null_val`".format( eval_col_A, eval_col_B ), ) elif ignore_row_if == "either_value_is_missing": boolean_mapped_null_values = cols_df.selectExpr( "`__row`", "`{0}` AS `A_{0}`".format(eval_col_A), "`{0}` AS `B_{0}`".format(eval_col_B), "ISNULL(`{}`) OR ISNULL(`{}`) AS `__null_val`".format( eval_col_A, eval_col_B ), ) elif ignore_row_if == "never": boolean_mapped_null_values = cols_df.selectExpr( "`__row`", "`{0}` AS `A_{0}`".format(eval_col_A), "`{0}` AS `B_{0}`".format(eval_col_B), lit(False).alias("__null_val"), ) else: raise ValueError("Unknown value of ignore_row_if: %s", (ignore_row_if,)) # since pyspark guaranteed each columns selected has the same number of rows, no need to do assert as in pandas # assert series_A.count() == ( # series_B.count()), "Series A and B must be the same length" nonnull_df = boolean_mapped_null_values.filter("__null_val = False") nonnull_count = nonnull_df.count() col_A_df = nonnull_df.select("__row", "`A_{}`".format(eval_col_A)) col_B_df = nonnull_df.select("__row", "`B_{}`".format(eval_col_B)) success_df = func(self, col_A_df, col_B_df, *args, **kwargs) success_count = success_df.filter("__success = True").count() unexpected_count = nonnull_count - success_count if unexpected_count == 0: # save some computation time if no unexpected items maybe_limited_unexpected_list = [] else: # here's an example of a place where we could do optimizations if we knew result format: see # comment block below unexpected_df = success_df.filter("__success = False") if unexpected_count_limit: unexpected_df = unexpected_df.limit(unexpected_count_limit) maybe_limited_unexpected_list = [ ( row["A_{}".format(eval_col_A)], row["B_{}".format(eval_col_B)], ) for row in unexpected_df.collect() ] if "output_strftime_format" in kwargs: output_strftime_format = kwargs["output_strftime_format"] parsed_maybe_limited_unexpected_list = [] for val in maybe_limited_unexpected_list: if val is None or (val[0] is None or val[1] is None): parsed_maybe_limited_unexpected_list.append(val) else: if isinstance(val[0], str) and isinstance(val[1], str): val = (parse(val[0]), parse(val[1])) parsed_maybe_limited_unexpected_list.append( ( datetime.strftime(val[0], output_strftime_format), datetime.strftime(val[1], output_strftime_format), ) ) maybe_limited_unexpected_list = parsed_maybe_limited_unexpected_list success, percent_success = self._calc_map_expectation_success( success_count, nonnull_count, mostly ) # Currently the abstraction of "result_format" that _format_column_map_output provides # limits some possible optimizations within the column-map decorator. It seems that either # this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator # should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method. # In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement # properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently). # Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and # with the optimizations based on result_format. A side benefit would be implementing an interface for the user # to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for # tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done # in other dataset implementations. return_obj = self._format_map_output( result_format, success, element_count, nonnull_count, unexpected_count, maybe_limited_unexpected_list, unexpected_index_list=None, ) # # FIXME Temp fix for result format # if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']: # del return_obj['result']['unexpected_percent_nonmissing'] # del return_obj['result']['missing_count'] # del return_obj['result']['missing_percent'] # try: # del return_obj['result']['partial_unexpected_counts'] # except KeyError: # pass cols_df.unpersist() return return_obj inner_wrapper.__name__ = func.__name__ inner_wrapper.__doc__ = func.__doc__ return inner_wrapper
[ "def", "column_pair_map_expectation", "(", "cls", ",", "func", ")", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "[", "0", "]", "[", "1", ":", "]", "@", "cls", ".", "expectation", "(", "argspec", ")", "@", "wraps", "(", "func", ")", "def", "inner_wrapper", "(", "self", ",", "column_A", ",", "column_B", ",", "mostly", "=", "None", ",", "ignore_row_if", "=", "\"both_values_are_missing\"", ",", "result_format", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ",", ")", ":", "# Rename column so we only have to handle dot notation here", "eval_col_A", "=", "\"__eval_col_A_\"", "+", "column_A", ".", "replace", "(", "\".\"", ",", "\"__\"", ")", ".", "replace", "(", "\"`\"", ",", "\"_\"", ")", "eval_col_B", "=", "\"__eval_col_B_\"", "+", "column_B", ".", "replace", "(", "\".\"", ",", "\"__\"", ")", ".", "replace", "(", "\"`\"", ",", "\"_\"", ")", "self", ".", "spark_df", "=", "self", ".", "spark_df", ".", "withColumn", "(", "eval_col_A", ",", "col", "(", "column_A", ")", ")", ".", "withColumn", "(", "eval_col_B", ",", "col", "(", "column_B", ")", ")", "if", "result_format", "is", "None", ":", "result_format", "=", "self", ".", "default_expectation_args", "[", "\"result_format\"", "]", "result_format", "=", "parse_result_format", "(", "result_format", ")", "# this is a little dangerous: expectations that specify \"COMPLETE\" result format and have a very", "# large number of unexpected results could hang for a long time. we should either call this out in docs", "# or put a limit on it", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "unexpected_count_limit", "=", "None", "else", ":", "unexpected_count_limit", "=", "result_format", "[", "\"partial_unexpected_count\"", "]", "cols_df", "=", "self", ".", "spark_df", ".", "select", "(", "eval_col_A", ",", "eval_col_B", ")", ".", "withColumn", "(", "\"__row\"", ",", "monotonically_increasing_id", "(", ")", ")", "# pyspark.sql.DataFrame", "# a couple of tests indicate that caching here helps performance", "cols_df", ".", "cache", "(", ")", "element_count", "=", "self", ".", "get_row_count", "(", ")", "if", "ignore_row_if", "==", "\"both_values_are_missing\"", ":", "boolean_mapped_null_values", "=", "cols_df", ".", "selectExpr", "(", "\"`__row`\"", ",", "\"`{0}` AS `A_{0}`\"", ".", "format", "(", "eval_col_A", ")", ",", "\"`{0}` AS `B_{0}`\"", ".", "format", "(", "eval_col_B", ")", ",", "\"ISNULL(`{}`) AND ISNULL(`{}`) AS `__null_val`\"", ".", "format", "(", "eval_col_A", ",", "eval_col_B", ")", ",", ")", "elif", "ignore_row_if", "==", "\"either_value_is_missing\"", ":", "boolean_mapped_null_values", "=", "cols_df", ".", "selectExpr", "(", "\"`__row`\"", ",", "\"`{0}` AS `A_{0}`\"", ".", "format", "(", "eval_col_A", ")", ",", "\"`{0}` AS `B_{0}`\"", ".", "format", "(", "eval_col_B", ")", ",", "\"ISNULL(`{}`) OR ISNULL(`{}`) AS `__null_val`\"", ".", "format", "(", "eval_col_A", ",", "eval_col_B", ")", ",", ")", "elif", "ignore_row_if", "==", "\"never\"", ":", "boolean_mapped_null_values", "=", "cols_df", ".", "selectExpr", "(", "\"`__row`\"", ",", "\"`{0}` AS `A_{0}`\"", ".", "format", "(", "eval_col_A", ")", ",", "\"`{0}` AS `B_{0}`\"", ".", "format", "(", "eval_col_B", ")", ",", "lit", "(", "False", ")", ".", "alias", "(", "\"__null_val\"", ")", ",", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown value of ignore_row_if: %s\"", ",", "(", "ignore_row_if", ",", ")", ")", "# since pyspark guaranteed each columns selected has the same number of rows, no need to do assert as in pandas", "# assert series_A.count() == (", "# series_B.count()), \"Series A and B must be the same length\"", "nonnull_df", "=", "boolean_mapped_null_values", ".", "filter", "(", "\"__null_val = False\"", ")", "nonnull_count", "=", "nonnull_df", ".", "count", "(", ")", "col_A_df", "=", "nonnull_df", ".", "select", "(", "\"__row\"", ",", "\"`A_{}`\"", ".", "format", "(", "eval_col_A", ")", ")", "col_B_df", "=", "nonnull_df", ".", "select", "(", "\"__row\"", ",", "\"`B_{}`\"", ".", "format", "(", "eval_col_B", ")", ")", "success_df", "=", "func", "(", "self", ",", "col_A_df", ",", "col_B_df", ",", "*", "args", ",", "*", "*", "kwargs", ")", "success_count", "=", "success_df", ".", "filter", "(", "\"__success = True\"", ")", ".", "count", "(", ")", "unexpected_count", "=", "nonnull_count", "-", "success_count", "if", "unexpected_count", "==", "0", ":", "# save some computation time if no unexpected items", "maybe_limited_unexpected_list", "=", "[", "]", "else", ":", "# here's an example of a place where we could do optimizations if we knew result format: see", "# comment block below", "unexpected_df", "=", "success_df", ".", "filter", "(", "\"__success = False\"", ")", "if", "unexpected_count_limit", ":", "unexpected_df", "=", "unexpected_df", ".", "limit", "(", "unexpected_count_limit", ")", "maybe_limited_unexpected_list", "=", "[", "(", "row", "[", "\"A_{}\"", ".", "format", "(", "eval_col_A", ")", "]", ",", "row", "[", "\"B_{}\"", ".", "format", "(", "eval_col_B", ")", "]", ",", ")", "for", "row", "in", "unexpected_df", ".", "collect", "(", ")", "]", "if", "\"output_strftime_format\"", "in", "kwargs", ":", "output_strftime_format", "=", "kwargs", "[", "\"output_strftime_format\"", "]", "parsed_maybe_limited_unexpected_list", "=", "[", "]", "for", "val", "in", "maybe_limited_unexpected_list", ":", "if", "val", "is", "None", "or", "(", "val", "[", "0", "]", "is", "None", "or", "val", "[", "1", "]", "is", "None", ")", ":", "parsed_maybe_limited_unexpected_list", ".", "append", "(", "val", ")", "else", ":", "if", "isinstance", "(", "val", "[", "0", "]", ",", "str", ")", "and", "isinstance", "(", "val", "[", "1", "]", ",", "str", ")", ":", "val", "=", "(", "parse", "(", "val", "[", "0", "]", ")", ",", "parse", "(", "val", "[", "1", "]", ")", ")", "parsed_maybe_limited_unexpected_list", ".", "append", "(", "(", "datetime", ".", "strftime", "(", "val", "[", "0", "]", ",", "output_strftime_format", ")", ",", "datetime", ".", "strftime", "(", "val", "[", "1", "]", ",", "output_strftime_format", ")", ",", ")", ")", "maybe_limited_unexpected_list", "=", "parsed_maybe_limited_unexpected_list", "success", ",", "percent_success", "=", "self", ".", "_calc_map_expectation_success", "(", "success_count", ",", "nonnull_count", ",", "mostly", ")", "# Currently the abstraction of \"result_format\" that _format_column_map_output provides", "# limits some possible optimizations within the column-map decorator. It seems that either", "# this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator", "# should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method.", "# In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement", "# properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently).", "# Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and", "# with the optimizations based on result_format. A side benefit would be implementing an interface for the user", "# to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for", "# tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done", "# in other dataset implementations.", "return_obj", "=", "self", ".", "_format_map_output", "(", "result_format", ",", "success", ",", "element_count", ",", "nonnull_count", ",", "unexpected_count", ",", "maybe_limited_unexpected_list", ",", "unexpected_index_list", "=", "None", ",", ")", "# # FIXME Temp fix for result format", "# if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']:", "# del return_obj['result']['unexpected_percent_nonmissing']", "# del return_obj['result']['missing_count']", "# del return_obj['result']['missing_percent']", "# try:", "# del return_obj['result']['partial_unexpected_counts']", "# except KeyError:", "# pass", "cols_df", ".", "unpersist", "(", ")", "return", "return_obj", "inner_wrapper", ".", "__name__", "=", "func", ".", "__name__", "inner_wrapper", ".", "__doc__", "=", "func", ".", "__doc__", "return", "inner_wrapper" ]
[ 213, 4 ]
[ 379, 28 ]
python
en
['en', 'error', 'th']
False
MetaSparkDFDataset.multicolumn_map_expectation
(cls, func)
The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on a per row basis across a set of columns.
The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on a per row basis across a set of columns.
def multicolumn_map_expectation(cls, func): """ The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating truthiness of some condition on a per row basis across a set of columns. """ argspec = inspect.getfullargspec(func)[0][1:] @cls.expectation(argspec) @wraps(func) def inner_wrapper( self, column_list, mostly=None, ignore_row_if="all_values_are_missing", result_format=None, *args, **kwargs, ): # Rename column so we only have to handle dot notation here eval_cols = [] for col_name in column_list: eval_col = "__eval_col_" + col_name.replace(".", "__").replace("`", "_") eval_cols.append(eval_col) self.spark_df = self.spark_df.withColumn(eval_col, col(col_name)) if result_format is None: result_format = self.default_expectation_args["result_format"] result_format = parse_result_format(result_format) # this is a little dangerous: expectations that specify "COMPLETE" result format and have a very # large number of unexpected results could hang for a long time. we should either call this out in docs # or put a limit on it if result_format["result_format"] == "COMPLETE": unexpected_count_limit = None else: unexpected_count_limit = result_format["partial_unexpected_count"] temp_df = self.spark_df.select(*eval_cols) # pyspark.sql.DataFrame # a couple of tests indicate that caching here helps performance temp_df.cache() element_count = self.get_row_count() if ignore_row_if == "all_values_are_missing": boolean_mapped_skip_values = temp_df.select( [ *eval_cols, reduce( lambda a, b: a & b, [col(c).isNull() for c in eval_cols] ).alias("__null_val"), ] ) elif ignore_row_if == "any_value_is_missing": boolean_mapped_skip_values = temp_df.select( [ *eval_cols, reduce( lambda a, b: a | b, [col(c).isNull() for c in eval_cols] ).alias("__null_val"), ] ) elif ignore_row_if == "never": boolean_mapped_skip_values = temp_df.select( [*eval_cols, lit(False).alias("__null_val")] ) else: raise ValueError("Unknown value of ignore_row_if: %s", (ignore_row_if,)) nonnull_df = boolean_mapped_skip_values.filter("__null_val = False") nonnull_count = nonnull_df.count() cols_df = nonnull_df.select(*eval_cols) success_df = func(self, cols_df, *args, **kwargs) success_count = success_df.filter("__success = True").count() unexpected_count = nonnull_count - success_count if unexpected_count == 0: maybe_limited_unexpected_list = [] else: # here's an example of a place where we could do optimizations if we knew result format: see # comment block below unexpected_df = success_df.filter("__success = False") if unexpected_count_limit: unexpected_df = unexpected_df.limit(unexpected_count_limit) maybe_limited_unexpected_list = [ OrderedDict( (col_name, row[eval_col_name]) for (col_name, eval_col_name) in zip(column_list, eval_cols) ) for row in unexpected_df.collect() ] if "output_strftime_format" in kwargs: output_strftime_format = kwargs["output_strftime_format"] parsed_maybe_limited_unexpected_list = [] for val in maybe_limited_unexpected_list: if val is None or not all(v for k, v in val): parsed_maybe_limited_unexpected_list.append(val) else: if all(isinstance(v, str) for k, v in val): val = OrderedDict((k, parse(v)) for k, v in val) parsed_maybe_limited_unexpected_list.append( OrderedDict( (k, datetime.strftime(v, output_strftime_format)) for k, v in val ) ) maybe_limited_unexpected_list = parsed_maybe_limited_unexpected_list success, percent_success = self._calc_map_expectation_success( success_count, nonnull_count, mostly ) # Currently the abstraction of "result_format" that _format_column_map_output provides # limits some possible optimizations within the column-map decorator. It seems that either # this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator # should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method. # In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement # properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently). # Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and # with the optimizations based on result_format. A side benefit would be implementing an interface for the user # to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for # tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done # in other dataset implementations. return_obj = self._format_map_output( result_format, success, element_count, nonnull_count, unexpected_count, maybe_limited_unexpected_list, unexpected_index_list=None, ) temp_df.unpersist() return return_obj inner_wrapper.__name__ = func.__name__ inner_wrapper.__doc__ = func.__doc__ return inner_wrapper
[ "def", "multicolumn_map_expectation", "(", "cls", ",", "func", ")", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "[", "0", "]", "[", "1", ":", "]", "@", "cls", ".", "expectation", "(", "argspec", ")", "@", "wraps", "(", "func", ")", "def", "inner_wrapper", "(", "self", ",", "column_list", ",", "mostly", "=", "None", ",", "ignore_row_if", "=", "\"all_values_are_missing\"", ",", "result_format", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ",", ")", ":", "# Rename column so we only have to handle dot notation here", "eval_cols", "=", "[", "]", "for", "col_name", "in", "column_list", ":", "eval_col", "=", "\"__eval_col_\"", "+", "col_name", ".", "replace", "(", "\".\"", ",", "\"__\"", ")", ".", "replace", "(", "\"`\"", ",", "\"_\"", ")", "eval_cols", ".", "append", "(", "eval_col", ")", "self", ".", "spark_df", "=", "self", ".", "spark_df", ".", "withColumn", "(", "eval_col", ",", "col", "(", "col_name", ")", ")", "if", "result_format", "is", "None", ":", "result_format", "=", "self", ".", "default_expectation_args", "[", "\"result_format\"", "]", "result_format", "=", "parse_result_format", "(", "result_format", ")", "# this is a little dangerous: expectations that specify \"COMPLETE\" result format and have a very", "# large number of unexpected results could hang for a long time. we should either call this out in docs", "# or put a limit on it", "if", "result_format", "[", "\"result_format\"", "]", "==", "\"COMPLETE\"", ":", "unexpected_count_limit", "=", "None", "else", ":", "unexpected_count_limit", "=", "result_format", "[", "\"partial_unexpected_count\"", "]", "temp_df", "=", "self", ".", "spark_df", ".", "select", "(", "*", "eval_cols", ")", "# pyspark.sql.DataFrame", "# a couple of tests indicate that caching here helps performance", "temp_df", ".", "cache", "(", ")", "element_count", "=", "self", ".", "get_row_count", "(", ")", "if", "ignore_row_if", "==", "\"all_values_are_missing\"", ":", "boolean_mapped_skip_values", "=", "temp_df", ".", "select", "(", "[", "*", "eval_cols", ",", "reduce", "(", "lambda", "a", ",", "b", ":", "a", "&", "b", ",", "[", "col", "(", "c", ")", ".", "isNull", "(", ")", "for", "c", "in", "eval_cols", "]", ")", ".", "alias", "(", "\"__null_val\"", ")", ",", "]", ")", "elif", "ignore_row_if", "==", "\"any_value_is_missing\"", ":", "boolean_mapped_skip_values", "=", "temp_df", ".", "select", "(", "[", "*", "eval_cols", ",", "reduce", "(", "lambda", "a", ",", "b", ":", "a", "|", "b", ",", "[", "col", "(", "c", ")", ".", "isNull", "(", ")", "for", "c", "in", "eval_cols", "]", ")", ".", "alias", "(", "\"__null_val\"", ")", ",", "]", ")", "elif", "ignore_row_if", "==", "\"never\"", ":", "boolean_mapped_skip_values", "=", "temp_df", ".", "select", "(", "[", "*", "eval_cols", ",", "lit", "(", "False", ")", ".", "alias", "(", "\"__null_val\"", ")", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown value of ignore_row_if: %s\"", ",", "(", "ignore_row_if", ",", ")", ")", "nonnull_df", "=", "boolean_mapped_skip_values", ".", "filter", "(", "\"__null_val = False\"", ")", "nonnull_count", "=", "nonnull_df", ".", "count", "(", ")", "cols_df", "=", "nonnull_df", ".", "select", "(", "*", "eval_cols", ")", "success_df", "=", "func", "(", "self", ",", "cols_df", ",", "*", "args", ",", "*", "*", "kwargs", ")", "success_count", "=", "success_df", ".", "filter", "(", "\"__success = True\"", ")", ".", "count", "(", ")", "unexpected_count", "=", "nonnull_count", "-", "success_count", "if", "unexpected_count", "==", "0", ":", "maybe_limited_unexpected_list", "=", "[", "]", "else", ":", "# here's an example of a place where we could do optimizations if we knew result format: see", "# comment block below", "unexpected_df", "=", "success_df", ".", "filter", "(", "\"__success = False\"", ")", "if", "unexpected_count_limit", ":", "unexpected_df", "=", "unexpected_df", ".", "limit", "(", "unexpected_count_limit", ")", "maybe_limited_unexpected_list", "=", "[", "OrderedDict", "(", "(", "col_name", ",", "row", "[", "eval_col_name", "]", ")", "for", "(", "col_name", ",", "eval_col_name", ")", "in", "zip", "(", "column_list", ",", "eval_cols", ")", ")", "for", "row", "in", "unexpected_df", ".", "collect", "(", ")", "]", "if", "\"output_strftime_format\"", "in", "kwargs", ":", "output_strftime_format", "=", "kwargs", "[", "\"output_strftime_format\"", "]", "parsed_maybe_limited_unexpected_list", "=", "[", "]", "for", "val", "in", "maybe_limited_unexpected_list", ":", "if", "val", "is", "None", "or", "not", "all", "(", "v", "for", "k", ",", "v", "in", "val", ")", ":", "parsed_maybe_limited_unexpected_list", ".", "append", "(", "val", ")", "else", ":", "if", "all", "(", "isinstance", "(", "v", ",", "str", ")", "for", "k", ",", "v", "in", "val", ")", ":", "val", "=", "OrderedDict", "(", "(", "k", ",", "parse", "(", "v", ")", ")", "for", "k", ",", "v", "in", "val", ")", "parsed_maybe_limited_unexpected_list", ".", "append", "(", "OrderedDict", "(", "(", "k", ",", "datetime", ".", "strftime", "(", "v", ",", "output_strftime_format", ")", ")", "for", "k", ",", "v", "in", "val", ")", ")", "maybe_limited_unexpected_list", "=", "parsed_maybe_limited_unexpected_list", "success", ",", "percent_success", "=", "self", ".", "_calc_map_expectation_success", "(", "success_count", ",", "nonnull_count", ",", "mostly", ")", "# Currently the abstraction of \"result_format\" that _format_column_map_output provides", "# limits some possible optimizations within the column-map decorator. It seems that either", "# this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator", "# should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method.", "# In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement", "# properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently).", "# Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and", "# with the optimizations based on result_format. A side benefit would be implementing an interface for the user", "# to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for", "# tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done", "# in other dataset implementations.", "return_obj", "=", "self", ".", "_format_map_output", "(", "result_format", ",", "success", ",", "element_count", ",", "nonnull_count", ",", "unexpected_count", ",", "maybe_limited_unexpected_list", ",", "unexpected_index_list", "=", "None", ",", ")", "temp_df", ".", "unpersist", "(", ")", "return", "return_obj", "inner_wrapper", ".", "__name__", "=", "func", ".", "__name__", "inner_wrapper", ".", "__doc__", "=", "func", ".", "__doc__", "return", "inner_wrapper" ]
[ 382, 4 ]
[ 524, 28 ]
python
en
['en', 'error', 'th']
False
SparkDFDataset.head
(self, n=5)
Returns a *PandasDataset* with the first *n* rows of the given Dataset
Returns a *PandasDataset* with the first *n* rows of the given Dataset
def head(self, n=5): """Returns a *PandasDataset* with the first *n* rows of the given Dataset""" return PandasDataset( self.spark_df.limit(n).toPandas(), expectation_suite=self.get_expectation_suite( discard_failed_expectations=False, discard_result_format_kwargs=False, discard_catch_exceptions_kwargs=False, discard_include_config_kwargs=False, ), )
[ "def", "head", "(", "self", ",", "n", "=", "5", ")", ":", "return", "PandasDataset", "(", "self", ".", "spark_df", ".", "limit", "(", "n", ")", ".", "toPandas", "(", ")", ",", "expectation_suite", "=", "self", ".", "get_expectation_suite", "(", "discard_failed_expectations", "=", "False", ",", "discard_result_format_kwargs", "=", "False", ",", "discard_catch_exceptions_kwargs", "=", "False", ",", "discard_include_config_kwargs", "=", "False", ",", ")", ",", ")" ]
[ 615, 4 ]
[ 625, 9 ]
python
en
['en', 'en', 'en']
True
SparkDFDataset.get_column_modes
(self, column)
leverages computation done in _get_column_value_counts
leverages computation done in _get_column_value_counts
def get_column_modes(self, column): """leverages computation done in _get_column_value_counts""" s = self.get_column_value_counts(column) return list(s[s == s.max()].index)
[ "def", "get_column_modes", "(", "self", ",", "column", ")", ":", "s", "=", "self", ".", "get_column_value_counts", "(", "column", ")", "return", "list", "(", "s", "[", "s", "==", "s", ".", "max", "(", ")", "]", ".", "index", ")" ]
[ 708, 4 ]
[ 711, 42 ]
python
en
['en', 'en', 'en']
True
SparkDFDataset.get_column_hist
(self, column, bins)
return a list of counts corresponding to bins
return a list of counts corresponding to bins
def get_column_hist(self, column, bins): """return a list of counts corresponding to bins""" bins = list( copy.deepcopy(bins) ) # take a copy since we are inserting and popping if bins[0] == -np.inf or bins[0] == -float("inf"): added_min = False bins[0] = -float("inf") else: added_min = True bins.insert(0, -float("inf")) if bins[-1] == np.inf or bins[-1] == float("inf"): added_max = False bins[-1] = float("inf") else: added_max = True bins.append(float("inf")) temp_column = self.spark_df.select(column).where(col(column).isNotNull()) bucketizer = Bucketizer(splits=bins, inputCol=column, outputCol="buckets") bucketed = bucketizer.setHandleInvalid("skip").transform(temp_column) # This is painful to do, but: bucketizer cannot handle values outside of a range # (hence adding -/+ infinity above) # Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound # for all but the last bin # But, since the last bin in our case will often be +infinity, we need to # find the number of values exactly equal to the upper bound to add those # We'll try for an optimization by asking for it at the same time if added_max: upper_bound_count = ( temp_column.select(column).filter(col(column) == bins[-2]).count() ) else: upper_bound_count = 0 hist_rows = bucketed.groupBy("buckets").count().collect() # Spark only returns buckets that have nonzero counts. hist = [0] * (len(bins) - 1) for row in hist_rows: hist[int(row["buckets"])] = row["count"] hist[-2] += upper_bound_count if added_min: below_bins = hist.pop(0) bins.pop(0) if below_bins > 0: logger.warning("Discarding histogram values below lowest bin.") if added_max: above_bins = hist.pop(-1) bins.pop(-1) if above_bins > 0: logger.warning("Discarding histogram values above highest bin.") return hist
[ "def", "get_column_hist", "(", "self", ",", "column", ",", "bins", ")", ":", "bins", "=", "list", "(", "copy", ".", "deepcopy", "(", "bins", ")", ")", "# take a copy since we are inserting and popping", "if", "bins", "[", "0", "]", "==", "-", "np", ".", "inf", "or", "bins", "[", "0", "]", "==", "-", "float", "(", "\"inf\"", ")", ":", "added_min", "=", "False", "bins", "[", "0", "]", "=", "-", "float", "(", "\"inf\"", ")", "else", ":", "added_min", "=", "True", "bins", ".", "insert", "(", "0", ",", "-", "float", "(", "\"inf\"", ")", ")", "if", "bins", "[", "-", "1", "]", "==", "np", ".", "inf", "or", "bins", "[", "-", "1", "]", "==", "float", "(", "\"inf\"", ")", ":", "added_max", "=", "False", "bins", "[", "-", "1", "]", "=", "float", "(", "\"inf\"", ")", "else", ":", "added_max", "=", "True", "bins", ".", "append", "(", "float", "(", "\"inf\"", ")", ")", "temp_column", "=", "self", ".", "spark_df", ".", "select", "(", "column", ")", ".", "where", "(", "col", "(", "column", ")", ".", "isNotNull", "(", ")", ")", "bucketizer", "=", "Bucketizer", "(", "splits", "=", "bins", ",", "inputCol", "=", "column", ",", "outputCol", "=", "\"buckets\"", ")", "bucketed", "=", "bucketizer", ".", "setHandleInvalid", "(", "\"skip\"", ")", ".", "transform", "(", "temp_column", ")", "# This is painful to do, but: bucketizer cannot handle values outside of a range", "# (hence adding -/+ infinity above)", "# Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound", "# for all but the last bin", "# But, since the last bin in our case will often be +infinity, we need to", "# find the number of values exactly equal to the upper bound to add those", "# We'll try for an optimization by asking for it at the same time", "if", "added_max", ":", "upper_bound_count", "=", "(", "temp_column", ".", "select", "(", "column", ")", ".", "filter", "(", "col", "(", "column", ")", "==", "bins", "[", "-", "2", "]", ")", ".", "count", "(", ")", ")", "else", ":", "upper_bound_count", "=", "0", "hist_rows", "=", "bucketed", ".", "groupBy", "(", "\"buckets\"", ")", ".", "count", "(", ")", ".", "collect", "(", ")", "# Spark only returns buckets that have nonzero counts.", "hist", "=", "[", "0", "]", "*", "(", "len", "(", "bins", ")", "-", "1", ")", "for", "row", "in", "hist_rows", ":", "hist", "[", "int", "(", "row", "[", "\"buckets\"", "]", ")", "]", "=", "row", "[", "\"count\"", "]", "hist", "[", "-", "2", "]", "+=", "upper_bound_count", "if", "added_min", ":", "below_bins", "=", "hist", ".", "pop", "(", "0", ")", "bins", ".", "pop", "(", "0", ")", "if", "below_bins", ">", "0", ":", "logger", ".", "warning", "(", "\"Discarding histogram values below lowest bin.\"", ")", "if", "added_max", ":", "above_bins", "=", "hist", ".", "pop", "(", "-", "1", ")", "bins", ".", "pop", "(", "-", "1", ")", "if", "above_bins", ">", "0", ":", "logger", ".", "warning", "(", "\"Discarding histogram values above highest bin.\"", ")", "return", "hist" ]
[ 745, 4 ]
[ 805, 19 ]
python
en
['en', 'en', 'en']
True
SparkDFDataset.expect_multicolumn_sum_to_equal
( self, column_list, sum_total, result_format=None, include_config=True, catch_exceptions=None, meta=None, )
Multi-Column Map Expectation Expects that sum of all rows for a set of columns is equal to a specific value Args: column_list (List[str]): \ Set of columns to be checked sum_total (int): \ expected sum of columns
Multi-Column Map Expectation
def expect_multicolumn_sum_to_equal( self, column_list, sum_total, result_format=None, include_config=True, catch_exceptions=None, meta=None, ): """ Multi-Column Map Expectation Expects that sum of all rows for a set of columns is equal to a specific value Args: column_list (List[str]): \ Set of columns to be checked sum_total (int): \ expected sum of columns """ expression = "+".join( ["COALESCE({}, 0)".format(col) for col in column_list.columns] ) column_list = column_list.withColumn("actual_total", expr(expression)) return column_list.withColumn( "__success", when(col("actual_total") == sum_total, lit(True)).otherwise(lit(False)), )
[ "def", "expect_multicolumn_sum_to_equal", "(", "self", ",", "column_list", ",", "sum_total", ",", "result_format", "=", "None", ",", "include_config", "=", "True", ",", "catch_exceptions", "=", "None", ",", "meta", "=", "None", ",", ")", ":", "expression", "=", "\"+\"", ".", "join", "(", "[", "\"COALESCE({}, 0)\"", ".", "format", "(", "col", ")", "for", "col", "in", "column_list", ".", "columns", "]", ")", "column_list", "=", "column_list", ".", "withColumn", "(", "\"actual_total\"", ",", "expr", "(", "expression", ")", ")", "return", "column_list", ".", "withColumn", "(", "\"__success\"", ",", "when", "(", "col", "(", "\"actual_total\"", ")", "==", "sum_total", ",", "lit", "(", "True", ")", ")", ".", "otherwise", "(", "lit", "(", "False", ")", ")", ",", ")" ]
[ 1621, 4 ]
[ 1647, 9 ]
python
en
['es', 'en', 'en']
True
validates
(field_name: str)
Register a field validator. :param str field_name: Name of the field that the method validates.
Register a field validator.
def validates(field_name: str): """Register a field validator. :param str field_name: Name of the field that the method validates. """ return set_hook(None, VALIDATES, field_name=field_name)
[ "def", "validates", "(", "field_name", ":", "str", ")", ":", "return", "set_hook", "(", "None", ",", "VALIDATES", ",", "field_name", "=", "field_name", ")" ]
[ 70, 0 ]
[ 75, 59 ]
python
en
['en', 'sv', 'en']
True
validates_schema
( fn=None, pass_many=False, pass_original=False, skip_on_field_errors=True )
Register a schema-level validator. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.validate` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. If ``pass_original=True``, the original data (before unmarshalling) will be passed as an additional argument to the method. If ``skip_on_field_errors=True``, this validation method will be skipped whenever validation errors have been detected when validating fields. .. versionchanged:: 3.0.0b1 ``skip_on_field_errors`` defaults to `True`. .. versionchanged:: 3.0.0 ``partial`` and ``many`` are always passed as keyword arguments to the decorated method.
Register a schema-level validator.
def validates_schema( fn=None, pass_many=False, pass_original=False, skip_on_field_errors=True ): """Register a schema-level validator. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.validate` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. If ``pass_original=True``, the original data (before unmarshalling) will be passed as an additional argument to the method. If ``skip_on_field_errors=True``, this validation method will be skipped whenever validation errors have been detected when validating fields. .. versionchanged:: 3.0.0b1 ``skip_on_field_errors`` defaults to `True`. .. versionchanged:: 3.0.0 ``partial`` and ``many`` are always passed as keyword arguments to the decorated method. """ return set_hook( fn, (VALIDATES_SCHEMA, pass_many), pass_original=pass_original, skip_on_field_errors=skip_on_field_errors, )
[ "def", "validates_schema", "(", "fn", "=", "None", ",", "pass_many", "=", "False", ",", "pass_original", "=", "False", ",", "skip_on_field_errors", "=", "True", ")", ":", "return", "set_hook", "(", "fn", ",", "(", "VALIDATES_SCHEMA", ",", "pass_many", ")", ",", "pass_original", "=", "pass_original", ",", "skip_on_field_errors", "=", "skip_on_field_errors", ",", ")" ]
[ 78, 0 ]
[ 105, 5 ]
python
de
['nl', 'de', 'en']
False
pre_dump
(fn=None, pass_many=False)
Register a method to invoke before serializing an object. The method receives the object to be serialized and returns the processed object. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.dump` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. .. versionchanged:: 3.0.0 ``many`` is always passed as a keyword arguments to the decorated method.
Register a method to invoke before serializing an object. The method receives the object to be serialized and returns the processed object.
def pre_dump(fn=None, pass_many=False): """Register a method to invoke before serializing an object. The method receives the object to be serialized and returns the processed object. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.dump` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. .. versionchanged:: 3.0.0 ``many`` is always passed as a keyword arguments to the decorated method. """ return set_hook(fn, (PRE_DUMP, pass_many))
[ "def", "pre_dump", "(", "fn", "=", "None", ",", "pass_many", "=", "False", ")", ":", "return", "set_hook", "(", "fn", ",", "(", "PRE_DUMP", ",", "pass_many", ")", ")" ]
[ 108, 0 ]
[ 119, 46 ]
python
en
['en', 'en', 'en']
True
post_dump
(fn=None, pass_many=False, pass_original=False)
Register a method to invoke after serializing an object. The method receives the serialized object and returns the processed object. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.dump` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. If ``pass_original=True``, the original data (before serializing) will be passed as an additional argument to the method. .. versionchanged:: 3.0.0 ``many`` is always passed as a keyword arguments to the decorated method.
Register a method to invoke after serializing an object. The method receives the serialized object and returns the processed object.
def post_dump(fn=None, pass_many=False, pass_original=False): """Register a method to invoke after serializing an object. The method receives the serialized object and returns the processed object. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.dump` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. If ``pass_original=True``, the original data (before serializing) will be passed as an additional argument to the method. .. versionchanged:: 3.0.0 ``many`` is always passed as a keyword arguments to the decorated method. """ return set_hook(fn, (POST_DUMP, pass_many), pass_original=pass_original)
[ "def", "post_dump", "(", "fn", "=", "None", ",", "pass_many", "=", "False", ",", "pass_original", "=", "False", ")", ":", "return", "set_hook", "(", "fn", ",", "(", "POST_DUMP", ",", "pass_many", ")", ",", "pass_original", "=", "pass_original", ")" ]
[ 122, 0 ]
[ 136, 76 ]
python
en
['en', 'en', 'en']
True
pre_load
(fn=None, pass_many=False)
Register a method to invoke before deserializing an object. The method receives the data to be deserialized and returns the processed data. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.load` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. .. versionchanged:: 3.0.0 ``partial`` and ``many`` are always passed as keyword arguments to the decorated method.
Register a method to invoke before deserializing an object. The method receives the data to be deserialized and returns the processed data.
def pre_load(fn=None, pass_many=False): """Register a method to invoke before deserializing an object. The method receives the data to be deserialized and returns the processed data. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.load` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. .. versionchanged:: 3.0.0 ``partial`` and ``many`` are always passed as keyword arguments to the decorated method. """ return set_hook(fn, (PRE_LOAD, pass_many))
[ "def", "pre_load", "(", "fn", "=", "None", ",", "pass_many", "=", "False", ")", ":", "return", "set_hook", "(", "fn", ",", "(", "PRE_LOAD", ",", "pass_many", ")", ")" ]
[ 139, 0 ]
[ 151, 46 ]
python
en
['en', 'en', 'en']
True
post_load
(fn=None, pass_many=False, pass_original=False)
Register a method to invoke after deserializing an object. The method receives the deserialized data and returns the processed data. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.load` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. If ``pass_original=True``, the original data (before deserializing) will be passed as an additional argument to the method. .. versionchanged:: 3.0.0 ``partial`` and ``many`` are always passed as keyword arguments to the decorated method.
Register a method to invoke after deserializing an object. The method receives the deserialized data and returns the processed data.
def post_load(fn=None, pass_many=False, pass_original=False): """Register a method to invoke after deserializing an object. The method receives the deserialized data and returns the processed data. By default it receives a single object at a time, transparently handling the ``many`` argument passed to the `Schema`'s :func:`~marshmallow.Schema.load` call. If ``pass_many=True``, the raw data (which may be a collection) is passed. If ``pass_original=True``, the original data (before deserializing) will be passed as an additional argument to the method. .. versionchanged:: 3.0.0 ``partial`` and ``many`` are always passed as keyword arguments to the decorated method. """ return set_hook(fn, (POST_LOAD, pass_many), pass_original=pass_original)
[ "def", "post_load", "(", "fn", "=", "None", ",", "pass_many", "=", "False", ",", "pass_original", "=", "False", ")", ":", "return", "set_hook", "(", "fn", ",", "(", "POST_LOAD", ",", "pass_many", ")", ",", "pass_original", "=", "pass_original", ")" ]
[ 154, 0 ]
[ 169, 76 ]
python
en
['en', 'en', 'en']
True
set_hook
(fn, key, **kwargs)
Mark decorated function as a hook to be picked up later. You should not need to use this method directly. .. note:: Currently only works with functions and instance methods. Class and static methods are not supported. :return: Decorated function if supplied, else this decorator with its args bound.
Mark decorated function as a hook to be picked up later. You should not need to use this method directly.
def set_hook(fn, key, **kwargs): """Mark decorated function as a hook to be picked up later. You should not need to use this method directly. .. note:: Currently only works with functions and instance methods. Class and static methods are not supported. :return: Decorated function if supplied, else this decorator with its args bound. """ # Allow using this as either a decorator or a decorator factory. if fn is None: return functools.partial(set_hook, key=key, **kwargs) # Set a __marshmallow_hook__ attribute instead of wrapping in some class, # because I still want this to end up as a normal (unbound) method. try: hook_config = fn.__marshmallow_hook__ except AttributeError: fn.__marshmallow_hook__ = hook_config = {} # Also save the kwargs for the tagged function on # __marshmallow_hook__, keyed by (<tag>, <pass_many>) hook_config[key] = kwargs return fn
[ "def", "set_hook", "(", "fn", ",", "key", ",", "*", "*", "kwargs", ")", ":", "# Allow using this as either a decorator or a decorator factory.", "if", "fn", "is", "None", ":", "return", "functools", ".", "partial", "(", "set_hook", ",", "key", "=", "key", ",", "*", "*", "kwargs", ")", "# Set a __marshmallow_hook__ attribute instead of wrapping in some class,", "# because I still want this to end up as a normal (unbound) method.", "try", ":", "hook_config", "=", "fn", ".", "__marshmallow_hook__", "except", "AttributeError", ":", "fn", ".", "__marshmallow_hook__", "=", "hook_config", "=", "{", "}", "# Also save the kwargs for the tagged function on", "# __marshmallow_hook__, keyed by (<tag>, <pass_many>)", "hook_config", "[", "key", "]", "=", "kwargs", "return", "fn" ]
[ 172, 0 ]
[ 197, 13 ]
python
en
['en', 'en', 'en']
True
_fn_matches
(fn, glob)
Return whether the supplied file name fn matches pattern filename.
Return whether the supplied file name fn matches pattern filename.
def _fn_matches(fn, glob): """Return whether the supplied file name fn matches pattern filename.""" if glob not in _pattern_cache: pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob)) return pattern.match(fn) return _pattern_cache[glob].match(fn)
[ "def", "_fn_matches", "(", "fn", ",", "glob", ")", ":", "if", "glob", "not", "in", "_pattern_cache", ":", "pattern", "=", "_pattern_cache", "[", "glob", "]", "=", "re", ".", "compile", "(", "fnmatch", ".", "translate", "(", "glob", ")", ")", "return", "pattern", ".", "match", "(", "fn", ")", "return", "_pattern_cache", "[", "glob", "]", ".", "match", "(", "fn", ")" ]
[ 28, 0 ]
[ 33, 41 ]
python
en
['en', 'en', 'en']
True
_load_formatters
(module_name)
Load a formatter (and all others in the module too).
Load a formatter (and all others in the module too).
def _load_formatters(module_name): """Load a formatter (and all others in the module too).""" mod = __import__(module_name, None, None, ['__all__']) for formatter_name in mod.__all__: cls = getattr(mod, formatter_name) _formatter_cache[cls.name] = cls
[ "def", "_load_formatters", "(", "module_name", ")", ":", "mod", "=", "__import__", "(", "module_name", ",", "None", ",", "None", ",", "[", "'__all__'", "]", ")", "for", "formatter_name", "in", "mod", ".", "__all__", ":", "cls", "=", "getattr", "(", "mod", ",", "formatter_name", ")", "_formatter_cache", "[", "cls", ".", "name", "]", "=", "cls" ]
[ 36, 0 ]
[ 41, 40 ]
python
en
['en', 'en', 'en']
True
get_all_formatters
()
Return a generator for all formatter classes.
Return a generator for all formatter classes.
def get_all_formatters(): """Return a generator for all formatter classes.""" # NB: this returns formatter classes, not info like get_all_lexers(). for info in itervalues(FORMATTERS): if info[1] not in _formatter_cache: _load_formatters(info[0]) yield _formatter_cache[info[1]] for _, formatter in find_plugin_formatters(): yield formatter
[ "def", "get_all_formatters", "(", ")", ":", "# NB: this returns formatter classes, not info like get_all_lexers().", "for", "info", "in", "itervalues", "(", "FORMATTERS", ")", ":", "if", "info", "[", "1", "]", "not", "in", "_formatter_cache", ":", "_load_formatters", "(", "info", "[", "0", "]", ")", "yield", "_formatter_cache", "[", "info", "[", "1", "]", "]", "for", "_", ",", "formatter", "in", "find_plugin_formatters", "(", ")", ":", "yield", "formatter" ]
[ 44, 0 ]
[ 52, 23 ]
python
en
['en', 'en', 'en']
True
find_formatter_class
(alias)
Lookup a formatter by alias. Returns None if not found.
Lookup a formatter by alias.
def find_formatter_class(alias): """Lookup a formatter by alias. Returns None if not found. """ for module_name, name, aliases, _, _ in itervalues(FORMATTERS): if alias in aliases: if name not in _formatter_cache: _load_formatters(module_name) return _formatter_cache[name] for _, cls in find_plugin_formatters(): if alias in cls.aliases: return cls
[ "def", "find_formatter_class", "(", "alias", ")", ":", "for", "module_name", ",", "name", ",", "aliases", ",", "_", ",", "_", "in", "itervalues", "(", "FORMATTERS", ")", ":", "if", "alias", "in", "aliases", ":", "if", "name", "not", "in", "_formatter_cache", ":", "_load_formatters", "(", "module_name", ")", "return", "_formatter_cache", "[", "name", "]", "for", "_", ",", "cls", "in", "find_plugin_formatters", "(", ")", ":", "if", "alias", "in", "cls", ".", "aliases", ":", "return", "cls" ]
[ 55, 0 ]
[ 67, 22 ]
python
en
['en', 'en', 'en']
True
get_formatter_by_name
(_alias, **options)
Lookup and instantiate a formatter by alias. Raises ClassNotFound if not found.
Lookup and instantiate a formatter by alias.
def get_formatter_by_name(_alias, **options): """Lookup and instantiate a formatter by alias. Raises ClassNotFound if not found. """ cls = find_formatter_class(_alias) if cls is None: raise ClassNotFound("no formatter found for name %r" % _alias) return cls(**options)
[ "def", "get_formatter_by_name", "(", "_alias", ",", "*", "*", "options", ")", ":", "cls", "=", "find_formatter_class", "(", "_alias", ")", "if", "cls", "is", "None", ":", "raise", "ClassNotFound", "(", "\"no formatter found for name %r\"", "%", "_alias", ")", "return", "cls", "(", "*", "*", "options", ")" ]
[ 70, 0 ]
[ 78, 25 ]
python
en
['en', 'en', 'en']
True
get_formatter_for_filename
(fn, **options)
Lookup and instantiate a formatter by filename pattern. Raises ClassNotFound if not found.
Lookup and instantiate a formatter by filename pattern.
def get_formatter_for_filename(fn, **options): """Lookup and instantiate a formatter by filename pattern. Raises ClassNotFound if not found. """ fn = basename(fn) for modname, name, _, filenames, _ in itervalues(FORMATTERS): for filename in filenames: if _fn_matches(fn, filename): if name not in _formatter_cache: _load_formatters(modname) return _formatter_cache[name](**options) for cls in find_plugin_formatters(): for filename in cls.filenames: if _fn_matches(fn, filename): return cls(**options) raise ClassNotFound("no formatter found for file name %r" % fn)
[ "def", "get_formatter_for_filename", "(", "fn", ",", "*", "*", "options", ")", ":", "fn", "=", "basename", "(", "fn", ")", "for", "modname", ",", "name", ",", "_", ",", "filenames", ",", "_", "in", "itervalues", "(", "FORMATTERS", ")", ":", "for", "filename", "in", "filenames", ":", "if", "_fn_matches", "(", "fn", ",", "filename", ")", ":", "if", "name", "not", "in", "_formatter_cache", ":", "_load_formatters", "(", "modname", ")", "return", "_formatter_cache", "[", "name", "]", "(", "*", "*", "options", ")", "for", "cls", "in", "find_plugin_formatters", "(", ")", ":", "for", "filename", "in", "cls", ".", "filenames", ":", "if", "_fn_matches", "(", "fn", ",", "filename", ")", ":", "return", "cls", "(", "*", "*", "options", ")", "raise", "ClassNotFound", "(", "\"no formatter found for file name %r\"", "%", "fn", ")" ]
[ 81, 0 ]
[ 97, 67 ]
python
en
['en', 'en', 'en']
True
logloss
(y, p)
Bounded log loss error. Args: y (numpy.array): target p (numpy.array): prediction Returns: bounded log loss error
Bounded log loss error. Args: y (numpy.array): target p (numpy.array): prediction Returns: bounded log loss error
def logloss(y, p): """Bounded log loss error. Args: y (numpy.array): target p (numpy.array): prediction Returns: bounded log loss error """ p[p < EPS] = EPS p[p > 1 - EPS] = 1 - EPS return log_loss(y, p)
[ "def", "logloss", "(", "y", ",", "p", ")", ":", "p", "[", "p", "<", "EPS", "]", "=", "EPS", "p", "[", "p", ">", "1", "-", "EPS", "]", "=", "1", "-", "EPS", "return", "log_loss", "(", "y", ",", "p", ")" ]
[ 10, 0 ]
[ 21, 25 ]
python
en
['en', 'no', 'en']
True
classification_metrics
(y, p, w=None, metrics={'AUC': roc_auc_score, 'Log Loss': logloss})
Log metrics for classifiers. Args: y (numpy.array): target p (numpy.array): prediction w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log metrics for the treatment and control group separately metrics (dict, optional): a dictionary of the metric names and functions
Log metrics for classifiers.
def classification_metrics(y, p, w=None, metrics={'AUC': roc_auc_score, 'Log Loss': logloss}): """Log metrics for classifiers. Args: y (numpy.array): target p (numpy.array): prediction w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log metrics for the treatment and control group separately metrics (dict, optional): a dictionary of the metric names and functions """ regression_metrics(y=y, p=p, w=w, metrics=metrics)
[ "def", "classification_metrics", "(", "y", ",", "p", ",", "w", "=", "None", ",", "metrics", "=", "{", "'AUC'", ":", "roc_auc_score", ",", "'Log Loss'", ":", "logloss", "}", ")", ":", "regression_metrics", "(", "y", "=", "y", ",", "p", "=", "p", ",", "w", "=", "w", ",", "metrics", "=", "metrics", ")" ]
[ 24, 0 ]
[ 34, 54 ]
python
bg
['no', 'bg', 'en']
False
XvncDisplay.__init__
(self, size=(1024, 768), color_depth=24, bgcolor='black', rfbport=5900)
:param bgcolor: 'black' or 'white' :param rfbport: Specifies the TCP port on which Xvnc listens for connections from viewers (the protocol used in VNC is called RFB - "remote framebuffer"). The default is 5900 plus the display number.
:param bgcolor: 'black' or 'white' :param rfbport: Specifies the TCP port on which Xvnc listens for connections from viewers (the protocol used in VNC is called RFB - "remote framebuffer"). The default is 5900 plus the display number.
def __init__(self, size=(1024, 768), color_depth=24, bgcolor='black', rfbport=5900): ''' :param bgcolor: 'black' or 'white' :param rfbport: Specifies the TCP port on which Xvnc listens for connections from viewers (the protocol used in VNC is called RFB - "remote framebuffer"). The default is 5900 plus the display number. ''' self.screen = 0 self.size = size self.color_depth = color_depth self.process = None self.bgcolor = bgcolor self.display = None self.rfbport = rfbport AbstractDisplay.__init__(self)
[ "def", "__init__", "(", "self", ",", "size", "=", "(", "1024", ",", "768", ")", ",", "color_depth", "=", "24", ",", "bgcolor", "=", "'black'", ",", "rfbport", "=", "5900", ")", ":", "self", ".", "screen", "=", "0", "self", ".", "size", "=", "size", "self", ".", "color_depth", "=", "color_depth", "self", ".", "process", "=", "None", "self", ".", "bgcolor", "=", "bgcolor", "self", ".", "display", "=", "None", "self", ".", "rfbport", "=", "rfbport", "AbstractDisplay", ".", "__init__", "(", "self", ")" ]
[ 10, 4 ]
[ 26, 38 ]
python
en
['en', 'error', 'th']
False
get_treatment_costs
(treatment, control_name, cc_dict, ic_dict)
Set the conversion and impression costs based on a dict of parameters. Calculate the actual cost of targeting a user with the actual treatment group using the above parameters. Params ------ treatment : array, shape = (num_samples, ) Treatment array. control_name, str Control group name as string. cc_dict : dict Dict containing the conversion cost for each treatment. ic_dict Dict containing the impression cost for each treatment. Returns ------- conversion_cost : ndarray, shape = (num_samples, num_treatments) An array of conversion costs for each treatment. impression_cost : ndarray, shape = (num_samples, num_treatments) An array of impression costs for each treatment. conditions : list, len = len(set(treatment)) A list of experimental conditions.
Set the conversion and impression costs based on a dict of parameters.
def get_treatment_costs(treatment, control_name, cc_dict, ic_dict): ''' Set the conversion and impression costs based on a dict of parameters. Calculate the actual cost of targeting a user with the actual treatment group using the above parameters. Params ------ treatment : array, shape = (num_samples, ) Treatment array. control_name, str Control group name as string. cc_dict : dict Dict containing the conversion cost for each treatment. ic_dict Dict containing the impression cost for each treatment. Returns ------- conversion_cost : ndarray, shape = (num_samples, num_treatments) An array of conversion costs for each treatment. impression_cost : ndarray, shape = (num_samples, num_treatments) An array of impression costs for each treatment. conditions : list, len = len(set(treatment)) A list of experimental conditions. ''' # Set the conversion costs of the treatments conversion_cost = np.zeros((len(treatment), len(cc_dict.keys()))) for idx, dict_key in enumerate(cc_dict.keys()): conversion_cost[:, idx] = cc_dict.get(dict_key) # Set the impression costs of the treatments impression_cost = np.zeros((len(treatment), len(ic_dict.keys()))) for idx, dict_key in enumerate(ic_dict.keys()): impression_cost[:, idx] = ic_dict.get(dict_key) # Get a sorted list of conditions conditions = list(set(treatment)) conditions.remove(control_name) conditions_sorted = sorted(conditions) conditions_sorted.insert(0, control_name) return conversion_cost, impression_cost, conditions_sorted
[ "def", "get_treatment_costs", "(", "treatment", ",", "control_name", ",", "cc_dict", ",", "ic_dict", ")", ":", "# Set the conversion costs of the treatments", "conversion_cost", "=", "np", ".", "zeros", "(", "(", "len", "(", "treatment", ")", ",", "len", "(", "cc_dict", ".", "keys", "(", ")", ")", ")", ")", "for", "idx", ",", "dict_key", "in", "enumerate", "(", "cc_dict", ".", "keys", "(", ")", ")", ":", "conversion_cost", "[", ":", ",", "idx", "]", "=", "cc_dict", ".", "get", "(", "dict_key", ")", "# Set the impression costs of the treatments", "impression_cost", "=", "np", ".", "zeros", "(", "(", "len", "(", "treatment", ")", ",", "len", "(", "ic_dict", ".", "keys", "(", ")", ")", ")", ")", "for", "idx", ",", "dict_key", "in", "enumerate", "(", "ic_dict", ".", "keys", "(", ")", ")", ":", "impression_cost", "[", ":", ",", "idx", "]", "=", "ic_dict", ".", "get", "(", "dict_key", ")", "# Get a sorted list of conditions", "conditions", "=", "list", "(", "set", "(", "treatment", ")", ")", "conditions", ".", "remove", "(", "control_name", ")", "conditions_sorted", "=", "sorted", "(", "conditions", ")", "conditions_sorted", ".", "insert", "(", "0", ",", "control_name", ")", "return", "conversion_cost", ",", "impression_cost", ",", "conditions_sorted" ]
[ 3, 0 ]
[ 52, 62 ]
python
en
['en', 'error', 'th']
False
get_actual_value
(treatment, observed_outcome, conversion_value, conditions, conversion_cost, impression_cost)
Set the conversion and impression costs based on a dict of parameters. Calculate the actual value of targeting a user with the actual treatment group using the above parameters. Params ------ treatment : array, shape = (num_samples, ) Treatment array. observed_outcome : array, shape = (num_samples, ) Observed outcome array, aka y. conversion_value : array, shape = (num_samples, ) The value of converting a given user. conditions : list, len = len(set(treatment)) List of treatment conditions. conversion_cost : array, shape = (num_samples, num_treatment) Array of conversion costs for each unit in each treatment. impression_cost : array, shape = (num_samples, num_treatment) Array of impression costs for each unit in each treatment. Returns ------- actual_value : array, shape = (num_samples, ) Array of actual values of havng a user in their actual treatment group. conversion_value : array, shape = (num_samples, ) Array of payoffs from converting a user.
Set the conversion and impression costs based on a dict of parameters.
def get_actual_value(treatment, observed_outcome, conversion_value, conditions, conversion_cost, impression_cost): ''' Set the conversion and impression costs based on a dict of parameters. Calculate the actual value of targeting a user with the actual treatment group using the above parameters. Params ------ treatment : array, shape = (num_samples, ) Treatment array. observed_outcome : array, shape = (num_samples, ) Observed outcome array, aka y. conversion_value : array, shape = (num_samples, ) The value of converting a given user. conditions : list, len = len(set(treatment)) List of treatment conditions. conversion_cost : array, shape = (num_samples, num_treatment) Array of conversion costs for each unit in each treatment. impression_cost : array, shape = (num_samples, num_treatment) Array of impression costs for each unit in each treatment. Returns ------- actual_value : array, shape = (num_samples, ) Array of actual values of havng a user in their actual treatment group. conversion_value : array, shape = (num_samples, ) Array of payoffs from converting a user. ''' cost_filter = [actual_group == possible_group for actual_group in treatment for possible_group in conditions] conversion_cost_flat = conversion_cost.flatten() actual_cc = conversion_cost_flat[cost_filter] impression_cost_flat = impression_cost.flatten() actual_ic = impression_cost_flat[cost_filter] # Calculate the actual value of having a user in their actual treatment actual_value = (conversion_value - actual_cc) * \ observed_outcome - actual_ic return actual_value
[ "def", "get_actual_value", "(", "treatment", ",", "observed_outcome", ",", "conversion_value", ",", "conditions", ",", "conversion_cost", ",", "impression_cost", ")", ":", "cost_filter", "=", "[", "actual_group", "==", "possible_group", "for", "actual_group", "in", "treatment", "for", "possible_group", "in", "conditions", "]", "conversion_cost_flat", "=", "conversion_cost", ".", "flatten", "(", ")", "actual_cc", "=", "conversion_cost_flat", "[", "cost_filter", "]", "impression_cost_flat", "=", "impression_cost", ".", "flatten", "(", ")", "actual_ic", "=", "impression_cost_flat", "[", "cost_filter", "]", "# Calculate the actual value of having a user in their actual treatment", "actual_value", "=", "(", "conversion_value", "-", "actual_cc", ")", "*", "observed_outcome", "-", "actual_ic", "return", "actual_value" ]
[ 55, 0 ]
[ 105, 23 ]
python
en
['en', 'error', 'th']
False
get_uplift_best
(cate, conditions)
Takes the CATE prediction from a learner, adds the control outcome array and finds the name of the argmax conditon. Params ------ cate : array, shape = (num_samples, ) The conditional average treatment effect prediction. conditions : list, len = len(set(treatment)) Returns ------- uplift_recomm_name : array, shape = (num_samples, ) The experimental group recommended by the learner.
Takes the CATE prediction from a learner, adds the control outcome array and finds the name of the argmax conditon.
def get_uplift_best(cate, conditions): ''' Takes the CATE prediction from a learner, adds the control outcome array and finds the name of the argmax conditon. Params ------ cate : array, shape = (num_samples, ) The conditional average treatment effect prediction. conditions : list, len = len(set(treatment)) Returns ------- uplift_recomm_name : array, shape = (num_samples, ) The experimental group recommended by the learner. ''' cate_with_control = np.c_[np.zeros(cate.shape[0]), cate] uplift_best_idx = np.argmax(cate_with_control, axis=1) uplift_best_name = [conditions[idx] for idx in uplift_best_idx] return uplift_best_name
[ "def", "get_uplift_best", "(", "cate", ",", "conditions", ")", ":", "cate_with_control", "=", "np", ".", "c_", "[", "np", ".", "zeros", "(", "cate", ".", "shape", "[", "0", "]", ")", ",", "cate", "]", "uplift_best_idx", "=", "np", ".", "argmax", "(", "cate_with_control", ",", "axis", "=", "1", ")", "uplift_best_name", "=", "[", "conditions", "[", "idx", "]", "for", "idx", "in", "uplift_best_idx", "]", "return", "uplift_best_name" ]
[ 108, 0 ]
[ 129, 27 ]
python
en
['en', 'error', 'th']
False
assert_dict_key_and_val_in_stdout
(dict_, stdout)
Use when stdout contains color info and command chars
Use when stdout contains color info and command chars
def assert_dict_key_and_val_in_stdout(dict_, stdout): """Use when stdout contains color info and command chars""" for key, val in dict_.items(): if isinstance(val, dict): assert key in stdout assert_dict_key_and_val_in_stdout(val, stdout) else: assert key in stdout assert str(val) in stdout
[ "def", "assert_dict_key_and_val_in_stdout", "(", "dict_", ",", "stdout", ")", ":", "for", "key", ",", "val", "in", "dict_", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "assert", "key", "in", "stdout", "assert_dict_key_and_val_in_stdout", "(", "val", ",", "stdout", ")", "else", ":", "assert", "key", "in", "stdout", "assert", "str", "(", "val", ")", "in", "stdout" ]
[ 12, 0 ]
[ 20, 37 ]
python
en
['en', 'en', 'en']
True
assert_no_logging_messages_or_tracebacks
( my_caplog, click_result, allowed_deprecation_message=None )
Use this assertion in all CLI tests unless you have a very good reason. Without this assertion, it is easy to let errors and tracebacks bubble up to users without being detected, unless you are manually inspecting the console output (stderr and stdout), as well as logging output from every test. Usage: ``` def test_my_stuff(caplog): ... result = runner.invoke(...) ... assert_no_logging_messages_or_tracebacks(caplog, result) ``` :param my_caplog: the caplog pytest fixutre :param click_result: the Result object returned from click runner.invoke() :param allowed_deprecation_message: Deprecation message that may be allowed
Use this assertion in all CLI tests unless you have a very good reason.
def assert_no_logging_messages_or_tracebacks( my_caplog, click_result, allowed_deprecation_message=None ): """ Use this assertion in all CLI tests unless you have a very good reason. Without this assertion, it is easy to let errors and tracebacks bubble up to users without being detected, unless you are manually inspecting the console output (stderr and stdout), as well as logging output from every test. Usage: ``` def test_my_stuff(caplog): ... result = runner.invoke(...) ... assert_no_logging_messages_or_tracebacks(caplog, result) ``` :param my_caplog: the caplog pytest fixutre :param click_result: the Result object returned from click runner.invoke() :param allowed_deprecation_message: Deprecation message that may be allowed """ if allowed_deprecation_message: assert_logging_message_present( my_caplog=my_caplog, message=allowed_deprecation_message ) else: assert_no_logging_messages(my_caplog) assert_no_tracebacks(click_result)
[ "def", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", ",", "click_result", ",", "allowed_deprecation_message", "=", "None", ")", ":", "if", "allowed_deprecation_message", ":", "assert_logging_message_present", "(", "my_caplog", "=", "my_caplog", ",", "message", "=", "allowed_deprecation_message", ")", "else", ":", "assert_no_logging_messages", "(", "my_caplog", ")", "assert_no_tracebacks", "(", "click_result", ")" ]
[ 23, 0 ]
[ 54, 38 ]
python
en
['en', 'error', 'th']
False
assert_logging_message_present
(my_caplog, message)
Assert presence of message in logging output messages. :param my_caplog: the caplog pytest fixutre :param message: message to be searched in caplog
Assert presence of message in logging output messages.
def assert_logging_message_present(my_caplog, message): """ Assert presence of message in logging output messages. :param my_caplog: the caplog pytest fixutre :param message: message to be searched in caplog """ assert isinstance( my_caplog, LogCaptureFixture ), "Please pass in the caplog object from your test." messages = my_caplog.messages assert isinstance(messages, list) if messages: print("Found logging messages:\n") print("\n".join([m for m in messages])) assert any([message in element for element in messages])
[ "def", "assert_logging_message_present", "(", "my_caplog", ",", "message", ")", ":", "assert", "isinstance", "(", "my_caplog", ",", "LogCaptureFixture", ")", ",", "\"Please pass in the caplog object from your test.\"", "messages", "=", "my_caplog", ".", "messages", "assert", "isinstance", "(", "messages", ",", "list", ")", "if", "messages", ":", "print", "(", "\"Found logging messages:\\n\"", ")", "print", "(", "\"\\n\"", ".", "join", "(", "[", "m", "for", "m", "in", "messages", "]", ")", ")", "assert", "any", "(", "[", "message", "in", "element", "for", "element", "in", "messages", "]", ")" ]
[ 57, 0 ]
[ 72, 60 ]
python
en
['en', 'error', 'th']
False
assert_no_logging_messages
(my_caplog)
Assert no logging output messages. :param my_caplog: the caplog pytest fixutre
Assert no logging output messages.
def assert_no_logging_messages(my_caplog): """ Assert no logging output messages. :param my_caplog: the caplog pytest fixutre """ assert isinstance( my_caplog, LogCaptureFixture ), "Please pass in the caplog object from your test." messages = my_caplog.messages assert isinstance(messages, list) if messages: print("Found logging messages:\n") print("\n".join([m for m in messages])) assert not messages
[ "def", "assert_no_logging_messages", "(", "my_caplog", ")", ":", "assert", "isinstance", "(", "my_caplog", ",", "LogCaptureFixture", ")", ",", "\"Please pass in the caplog object from your test.\"", "messages", "=", "my_caplog", ".", "messages", "assert", "isinstance", "(", "messages", ",", "list", ")", "if", "messages", ":", "print", "(", "\"Found logging messages:\\n\"", ")", "print", "(", "\"\\n\"", ".", "join", "(", "[", "m", "for", "m", "in", "messages", "]", ")", ")", "assert", "not", "messages" ]
[ 75, 0 ]
[ 89, 23 ]
python
en
['en', 'error', 'th']
False
assert_no_tracebacks
(click_result)
Assert no tracebacks. :param click_result: the Result object returned from click runner.invoke()
Assert no tracebacks.
def assert_no_tracebacks(click_result): """ Assert no tracebacks. :param click_result: the Result object returned from click runner.invoke() """ assert isinstance( click_result, Result ), "Please pass in the click runner invoke result object from your test." if click_result.exc_info: # introspect the call stack to make sure no exceptions found there way through # https://docs.python.org/2/library/sys.html#sys.exc_info _type, value, _traceback = click_result.exc_info if not isinstance(value, SystemExit): # SystemExit is a known "good" exit type print("".join(traceback.format_tb(_traceback))) assert False, "Found exception of type {} with message {}".format( _type, value ) if not isinstance(click_result.exception, SystemExit): # Ignore a SystemeExit, because some commands intentionally exit in an error state assert not click_result.exception, "Found exception {}".format( click_result.exception ) assert ( "traceback" not in click_result.output.lower() ), "Found a traceback in the console output: {}".format(click_result.output) assert ( "traceback" not in click_result.stdout.lower() ), "Found a traceback in the console output: {}".format(click_result.stdout) try: assert ( "traceback" not in click_result.stderr.lower() ), "Found a traceback in the console output: {}".format(click_result.stderr) except ValueError as ve: # sometimes stderr is not captured separately pass
[ "def", "assert_no_tracebacks", "(", "click_result", ")", ":", "assert", "isinstance", "(", "click_result", ",", "Result", ")", ",", "\"Please pass in the click runner invoke result object from your test.\"", "if", "click_result", ".", "exc_info", ":", "# introspect the call stack to make sure no exceptions found there way through", "# https://docs.python.org/2/library/sys.html#sys.exc_info", "_type", ",", "value", ",", "_traceback", "=", "click_result", ".", "exc_info", "if", "not", "isinstance", "(", "value", ",", "SystemExit", ")", ":", "# SystemExit is a known \"good\" exit type", "print", "(", "\"\"", ".", "join", "(", "traceback", ".", "format_tb", "(", "_traceback", ")", ")", ")", "assert", "False", ",", "\"Found exception of type {} with message {}\"", ".", "format", "(", "_type", ",", "value", ")", "if", "not", "isinstance", "(", "click_result", ".", "exception", ",", "SystemExit", ")", ":", "# Ignore a SystemeExit, because some commands intentionally exit in an error state", "assert", "not", "click_result", ".", "exception", ",", "\"Found exception {}\"", ".", "format", "(", "click_result", ".", "exception", ")", "assert", "(", "\"traceback\"", "not", "in", "click_result", ".", "output", ".", "lower", "(", ")", ")", ",", "\"Found a traceback in the console output: {}\"", ".", "format", "(", "click_result", ".", "output", ")", "assert", "(", "\"traceback\"", "not", "in", "click_result", ".", "stdout", ".", "lower", "(", ")", ")", ",", "\"Found a traceback in the console output: {}\"", ".", "format", "(", "click_result", ".", "stdout", ")", "try", ":", "assert", "(", "\"traceback\"", "not", "in", "click_result", ".", "stderr", ".", "lower", "(", ")", ")", ",", "\"Found a traceback in the console output: {}\"", ".", "format", "(", "click_result", ".", "stderr", ")", "except", "ValueError", "as", "ve", ":", "# sometimes stderr is not captured separately", "pass" ]
[ 92, 0 ]
[ 129, 12 ]
python
en
['en', 'error', 'th']
False
BatchDefinition.__hash__
(self)
Overrides the default implementation
Overrides the default implementation
def __hash__(self) -> int: """Overrides the default implementation""" _result_hash: int = ( hash(self.datasource_name) ^ hash(self.data_connector_name) ^ hash(self.data_asset_name) ) if self.batch_identifiers is not None: for key, value in self.batch_identifiers.items(): _result_hash = _result_hash ^ hash(key) ^ hash(str(value)) return _result_hash
[ "def", "__hash__", "(", "self", ")", "->", "int", ":", "_result_hash", ":", "int", "=", "(", "hash", "(", "self", ".", "datasource_name", ")", "^", "hash", "(", "self", ".", "data_connector_name", ")", "^", "hash", "(", "self", ".", "data_asset_name", ")", ")", "if", "self", ".", "batch_identifiers", "is", "not", "None", ":", "for", "key", ",", "value", "in", "self", ".", "batch_identifiers", ".", "items", "(", ")", ":", "_result_hash", "=", "_result_hash", "^", "hash", "(", "key", ")", "^", "hash", "(", "str", "(", "value", ")", ")", "return", "_result_hash" ]
[ 146, 4 ]
[ 156, 27 ]
python
en
['en', 'fr', 'en']
True
MilvusDocumentStore.__init__
( self, sql_url: str = "sqlite:///", milvus_url: str = "tcp://localhost:19530", connection_pool: str = "SingletonThread", index: str = "document", vector_dim: int = 768, index_file_size: int = 1024, similarity: str = "dot_product", index_type: IndexType = IndexType.FLAT, index_param: Optional[Dict[str, Any]] = None, search_param: Optional[Dict[str, Any]] = None, update_existing_documents: bool = False, return_embedding: bool = False, embedding_field: str = "embedding", progress_bar: bool = True, **kwargs, )
:param sql_url: SQL connection URL for storing document texts and metadata. It defaults to a local, file based SQLite DB. For large scale deployment, Postgres is recommended. If using MySQL then same server can also be used for Milvus metadata. For more details see https://milvus.io/docs/v0.10.5/data_manage.md. :param milvus_url: Milvus server connection URL for storing and processing vectors. Protocol, host and port will automatically be inferred from the URL. See https://milvus.io/docs/v0.10.5/install_milvus.md for instructions to start a Milvus instance. :param connection_pool: Connection pool type to connect with Milvus server. Default: "SingletonThread". :param index: Index name for text, embedding and metadata (in Milvus terms, this is the "collection name"). :param vector_dim: The embedding vector size. Default: 768. :param index_file_size: Specifies the size of each segment file that is stored by Milvus and its default value is 1024 MB. When the size of newly inserted vectors reaches the specified volume, Milvus packs these vectors into a new segment. Milvus creates one index file for each segment. When conducting a vector search, Milvus searches all index files one by one. As a rule of thumb, we would see a 30% ~ 50% increase in the search performance after changing the value of index_file_size from 1024 to 2048. Note that an overly large index_file_size value may cause failure to load a segment into the memory or graphics memory. (From https://milvus.io/docs/v0.10.5/performance_faq.md#How-can-I-get-the-best-performance-from-Milvus-through-setting-index_file_size) :param similarity: The similarity function used to compare document vectors. 'dot_product' is the default and recommended for DPR embeddings. 'cosine' is recommended for Sentence Transformers, but is not directly supported by Milvus. However, you can normalize your embeddings and use `dot_product` to get the same results. See https://milvus.io/docs/v0.10.5/metric.md?Inner-product-(IP)#floating. :param index_type: Type of approximate nearest neighbour (ANN) index used. The choice here determines your tradeoff between speed and accuracy. Some popular options: - FLAT (default): Exact method, slow - IVF_FLAT, inverted file based heuristic, fast - HSNW: Graph based, fast - ANNOY: Tree based, fast See: https://milvus.io/docs/v0.10.5/index.md :param index_param: Configuration parameters for the chose index_type needed at indexing time. For example: {"nlist": 16384} as the number of cluster units to create for index_type IVF_FLAT. See https://milvus.io/docs/v0.10.5/index.md :param search_param: Configuration parameters for the chose index_type needed at query time For example: {"nprobe": 10} as the number of cluster units to query for index_type IVF_FLAT. See https://milvus.io/docs/v0.10.5/index.md :param update_existing_documents: Whether to update any existing documents with the same ID when adding documents. When set as True, any document with an existing ID gets updated. If set to False, an error is raised if the document ID of the document being added already exists. :param return_embedding: To return document embedding. :param embedding_field: Name of field containing an embedding vector. :param progress_bar: Whether to show a tqdm progress bar or not. Can be helpful to disable in production deployments to keep the logs clean.
:param sql_url: SQL connection URL for storing document texts and metadata. It defaults to a local, file based SQLite DB. For large scale deployment, Postgres is recommended. If using MySQL then same server can also be used for Milvus metadata. For more details see https://milvus.io/docs/v0.10.5/data_manage.md. :param milvus_url: Milvus server connection URL for storing and processing vectors. Protocol, host and port will automatically be inferred from the URL. See https://milvus.io/docs/v0.10.5/install_milvus.md for instructions to start a Milvus instance. :param connection_pool: Connection pool type to connect with Milvus server. Default: "SingletonThread". :param index: Index name for text, embedding and metadata (in Milvus terms, this is the "collection name"). :param vector_dim: The embedding vector size. Default: 768. :param index_file_size: Specifies the size of each segment file that is stored by Milvus and its default value is 1024 MB. When the size of newly inserted vectors reaches the specified volume, Milvus packs these vectors into a new segment. Milvus creates one index file for each segment. When conducting a vector search, Milvus searches all index files one by one. As a rule of thumb, we would see a 30% ~ 50% increase in the search performance after changing the value of index_file_size from 1024 to 2048. Note that an overly large index_file_size value may cause failure to load a segment into the memory or graphics memory. (From https://milvus.io/docs/v0.10.5/performance_faq.md#How-can-I-get-the-best-performance-from-Milvus-through-setting-index_file_size) :param similarity: The similarity function used to compare document vectors. 'dot_product' is the default and recommended for DPR embeddings. 'cosine' is recommended for Sentence Transformers, but is not directly supported by Milvus. However, you can normalize your embeddings and use `dot_product` to get the same results. See https://milvus.io/docs/v0.10.5/metric.md?Inner-product-(IP)#floating. :param index_type: Type of approximate nearest neighbour (ANN) index used. The choice here determines your tradeoff between speed and accuracy. Some popular options: - FLAT (default): Exact method, slow - IVF_FLAT, inverted file based heuristic, fast - HSNW: Graph based, fast - ANNOY: Tree based, fast See: https://milvus.io/docs/v0.10.5/index.md :param index_param: Configuration parameters for the chose index_type needed at indexing time. For example: {"nlist": 16384} as the number of cluster units to create for index_type IVF_FLAT. See https://milvus.io/docs/v0.10.5/index.md :param search_param: Configuration parameters for the chose index_type needed at query time For example: {"nprobe": 10} as the number of cluster units to query for index_type IVF_FLAT. See https://milvus.io/docs/v0.10.5/index.md :param update_existing_documents: Whether to update any existing documents with the same ID when adding documents. When set as True, any document with an existing ID gets updated. If set to False, an error is raised if the document ID of the document being added already exists. :param return_embedding: To return document embedding. :param embedding_field: Name of field containing an embedding vector. :param progress_bar: Whether to show a tqdm progress bar or not. Can be helpful to disable in production deployments to keep the logs clean.
def __init__( self, sql_url: str = "sqlite:///", milvus_url: str = "tcp://localhost:19530", connection_pool: str = "SingletonThread", index: str = "document", vector_dim: int = 768, index_file_size: int = 1024, similarity: str = "dot_product", index_type: IndexType = IndexType.FLAT, index_param: Optional[Dict[str, Any]] = None, search_param: Optional[Dict[str, Any]] = None, update_existing_documents: bool = False, return_embedding: bool = False, embedding_field: str = "embedding", progress_bar: bool = True, **kwargs, ): """ :param sql_url: SQL connection URL for storing document texts and metadata. It defaults to a local, file based SQLite DB. For large scale deployment, Postgres is recommended. If using MySQL then same server can also be used for Milvus metadata. For more details see https://milvus.io/docs/v0.10.5/data_manage.md. :param milvus_url: Milvus server connection URL for storing and processing vectors. Protocol, host and port will automatically be inferred from the URL. See https://milvus.io/docs/v0.10.5/install_milvus.md for instructions to start a Milvus instance. :param connection_pool: Connection pool type to connect with Milvus server. Default: "SingletonThread". :param index: Index name for text, embedding and metadata (in Milvus terms, this is the "collection name"). :param vector_dim: The embedding vector size. Default: 768. :param index_file_size: Specifies the size of each segment file that is stored by Milvus and its default value is 1024 MB. When the size of newly inserted vectors reaches the specified volume, Milvus packs these vectors into a new segment. Milvus creates one index file for each segment. When conducting a vector search, Milvus searches all index files one by one. As a rule of thumb, we would see a 30% ~ 50% increase in the search performance after changing the value of index_file_size from 1024 to 2048. Note that an overly large index_file_size value may cause failure to load a segment into the memory or graphics memory. (From https://milvus.io/docs/v0.10.5/performance_faq.md#How-can-I-get-the-best-performance-from-Milvus-through-setting-index_file_size) :param similarity: The similarity function used to compare document vectors. 'dot_product' is the default and recommended for DPR embeddings. 'cosine' is recommended for Sentence Transformers, but is not directly supported by Milvus. However, you can normalize your embeddings and use `dot_product` to get the same results. See https://milvus.io/docs/v0.10.5/metric.md?Inner-product-(IP)#floating. :param index_type: Type of approximate nearest neighbour (ANN) index used. The choice here determines your tradeoff between speed and accuracy. Some popular options: - FLAT (default): Exact method, slow - IVF_FLAT, inverted file based heuristic, fast - HSNW: Graph based, fast - ANNOY: Tree based, fast See: https://milvus.io/docs/v0.10.5/index.md :param index_param: Configuration parameters for the chose index_type needed at indexing time. For example: {"nlist": 16384} as the number of cluster units to create for index_type IVF_FLAT. See https://milvus.io/docs/v0.10.5/index.md :param search_param: Configuration parameters for the chose index_type needed at query time For example: {"nprobe": 10} as the number of cluster units to query for index_type IVF_FLAT. See https://milvus.io/docs/v0.10.5/index.md :param update_existing_documents: Whether to update any existing documents with the same ID when adding documents. When set as True, any document with an existing ID gets updated. If set to False, an error is raised if the document ID of the document being added already exists. :param return_embedding: To return document embedding. :param embedding_field: Name of field containing an embedding vector. :param progress_bar: Whether to show a tqdm progress bar or not. Can be helpful to disable in production deployments to keep the logs clean. """ self.milvus_server = Milvus(uri=milvus_url, pool=connection_pool) self.vector_dim = vector_dim self.index_file_size = index_file_size if similarity == "dot_product": self.metric_type = MetricType.L2 else: raise ValueError("The Milvus document store can currently only support dot_product similarity. " "Please set similarity=\"dot_product\"") self.index_type = index_type self.index_param = index_param or {"nlist": 16384} self.search_param = search_param or {"nprobe": 10} self.index = index self._create_collection_and_index_if_not_exist(self.index) self.return_embedding = return_embedding self.embedding_field = embedding_field self.progress_bar = progress_bar super().__init__( url=sql_url, update_existing_documents=update_existing_documents, index=index )
[ "def", "__init__", "(", "self", ",", "sql_url", ":", "str", "=", "\"sqlite:///\"", ",", "milvus_url", ":", "str", "=", "\"tcp://localhost:19530\"", ",", "connection_pool", ":", "str", "=", "\"SingletonThread\"", ",", "index", ":", "str", "=", "\"document\"", ",", "vector_dim", ":", "int", "=", "768", ",", "index_file_size", ":", "int", "=", "1024", ",", "similarity", ":", "str", "=", "\"dot_product\"", ",", "index_type", ":", "IndexType", "=", "IndexType", ".", "FLAT", ",", "index_param", ":", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", "=", "None", ",", "search_param", ":", "Optional", "[", "Dict", "[", "str", ",", "Any", "]", "]", "=", "None", ",", "update_existing_documents", ":", "bool", "=", "False", ",", "return_embedding", ":", "bool", "=", "False", ",", "embedding_field", ":", "str", "=", "\"embedding\"", ",", "progress_bar", ":", "bool", "=", "True", ",", "*", "*", "kwargs", ",", ")", ":", "self", ".", "milvus_server", "=", "Milvus", "(", "uri", "=", "milvus_url", ",", "pool", "=", "connection_pool", ")", "self", ".", "vector_dim", "=", "vector_dim", "self", ".", "index_file_size", "=", "index_file_size", "if", "similarity", "==", "\"dot_product\"", ":", "self", ".", "metric_type", "=", "MetricType", ".", "L2", "else", ":", "raise", "ValueError", "(", "\"The Milvus document store can currently only support dot_product similarity. \"", "\"Please set similarity=\\\"dot_product\\\"\"", ")", "self", ".", "index_type", "=", "index_type", "self", ".", "index_param", "=", "index_param", "or", "{", "\"nlist\"", ":", "16384", "}", "self", ".", "search_param", "=", "search_param", "or", "{", "\"nprobe\"", ":", "10", "}", "self", ".", "index", "=", "index", "self", ".", "_create_collection_and_index_if_not_exist", "(", "self", ".", "index", ")", "self", ".", "return_embedding", "=", "return_embedding", "self", ".", "embedding_field", "=", "embedding_field", "self", ".", "progress_bar", "=", "progress_bar", "super", "(", ")", ".", "__init__", "(", "url", "=", "sql_url", ",", "update_existing_documents", "=", "update_existing_documents", ",", "index", "=", "index", ")" ]
[ 36, 4 ]
[ 119, 9 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.write_documents
( self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000 )
Add new documents to the DocumentStore. :param documents: List of `Dicts` or List of `Documents`. If they already contain the embeddings, we'll index them right away in Milvus. If not, you can later call update_embeddings() to create & index them. :param index: (SQL) index name for storing the docs and metadata :param batch_size: When working with large number of documents, batching can help reduce memory footprint. :return:
Add new documents to the DocumentStore.
def write_documents( self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000 ): """ Add new documents to the DocumentStore. :param documents: List of `Dicts` or List of `Documents`. If they already contain the embeddings, we'll index them right away in Milvus. If not, you can later call update_embeddings() to create & index them. :param index: (SQL) index name for storing the docs and metadata :param batch_size: When working with large number of documents, batching can help reduce memory footprint. :return: """ index = index or self.index self._create_collection_and_index_if_not_exist(index) field_map = self._create_document_field_map() if len(documents) == 0: logger.warning("Calling DocumentStore.write_documents() with empty list") return document_objects = [Document.from_dict(d, field_map=field_map) if isinstance(d, dict) else d for d in documents] add_vectors = False if document_objects[0].embedding is None else True batched_documents = get_batches_from_generator(document_objects, batch_size) with tqdm(total=len(document_objects), disable=not self.progress_bar) as progress_bar: for document_batch in batched_documents: vector_ids = [] if add_vectors: doc_ids = [] embeddings = [] for doc in document_batch: doc_ids.append(doc.id) if isinstance(doc.embedding, np.ndarray): embeddings.append(doc.embedding.tolist()) elif isinstance(doc.embedding, list): embeddings.append(doc.embedding) else: raise AttributeError(f'Format of supplied document embedding {type(doc.embedding)} is not ' f'supported. Please use list or numpy.ndarray') if self.update_existing_documents: existing_docs = super().get_documents_by_id(ids=doc_ids, index=index) self._delete_vector_ids_from_milvus(documents=existing_docs, index=index) status, vector_ids = self.milvus_server.insert(collection_name=index, records=embeddings) if status.code != Status.SUCCESS: raise RuntimeError(f'Vector embedding insertion failed: {status}') docs_to_write_in_sql = [] for idx, doc in enumerate(document_batch): meta = doc.meta if add_vectors: meta["vector_id"] = vector_ids[idx] docs_to_write_in_sql.append(doc) super().write_documents(docs_to_write_in_sql, index=index) progress_bar.update(batch_size) progress_bar.close() self.milvus_server.flush([index]) if self.update_existing_documents: self.milvus_server.compact(collection_name=index)
[ "def", "write_documents", "(", "self", ",", "documents", ":", "Union", "[", "List", "[", "dict", "]", ",", "List", "[", "Document", "]", "]", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ")", ":", "index", "=", "index", "or", "self", ".", "index", "self", ".", "_create_collection_and_index_if_not_exist", "(", "index", ")", "field_map", "=", "self", ".", "_create_document_field_map", "(", ")", "if", "len", "(", "documents", ")", "==", "0", ":", "logger", ".", "warning", "(", "\"Calling DocumentStore.write_documents() with empty list\"", ")", "return", "document_objects", "=", "[", "Document", ".", "from_dict", "(", "d", ",", "field_map", "=", "field_map", ")", "if", "isinstance", "(", "d", ",", "dict", ")", "else", "d", "for", "d", "in", "documents", "]", "add_vectors", "=", "False", "if", "document_objects", "[", "0", "]", ".", "embedding", "is", "None", "else", "True", "batched_documents", "=", "get_batches_from_generator", "(", "document_objects", ",", "batch_size", ")", "with", "tqdm", "(", "total", "=", "len", "(", "document_objects", ")", ",", "disable", "=", "not", "self", ".", "progress_bar", ")", "as", "progress_bar", ":", "for", "document_batch", "in", "batched_documents", ":", "vector_ids", "=", "[", "]", "if", "add_vectors", ":", "doc_ids", "=", "[", "]", "embeddings", "=", "[", "]", "for", "doc", "in", "document_batch", ":", "doc_ids", ".", "append", "(", "doc", ".", "id", ")", "if", "isinstance", "(", "doc", ".", "embedding", ",", "np", ".", "ndarray", ")", ":", "embeddings", ".", "append", "(", "doc", ".", "embedding", ".", "tolist", "(", ")", ")", "elif", "isinstance", "(", "doc", ".", "embedding", ",", "list", ")", ":", "embeddings", ".", "append", "(", "doc", ".", "embedding", ")", "else", ":", "raise", "AttributeError", "(", "f'Format of supplied document embedding {type(doc.embedding)} is not '", "f'supported. Please use list or numpy.ndarray'", ")", "if", "self", ".", "update_existing_documents", ":", "existing_docs", "=", "super", "(", ")", ".", "get_documents_by_id", "(", "ids", "=", "doc_ids", ",", "index", "=", "index", ")", "self", ".", "_delete_vector_ids_from_milvus", "(", "documents", "=", "existing_docs", ",", "index", "=", "index", ")", "status", ",", "vector_ids", "=", "self", ".", "milvus_server", ".", "insert", "(", "collection_name", "=", "index", ",", "records", "=", "embeddings", ")", "if", "status", ".", "code", "!=", "Status", ".", "SUCCESS", ":", "raise", "RuntimeError", "(", "f'Vector embedding insertion failed: {status}'", ")", "docs_to_write_in_sql", "=", "[", "]", "for", "idx", ",", "doc", "in", "enumerate", "(", "document_batch", ")", ":", "meta", "=", "doc", ".", "meta", "if", "add_vectors", ":", "meta", "[", "\"vector_id\"", "]", "=", "vector_ids", "[", "idx", "]", "docs_to_write_in_sql", ".", "append", "(", "doc", ")", "super", "(", ")", ".", "write_documents", "(", "docs_to_write_in_sql", ",", "index", "=", "index", ")", "progress_bar", ".", "update", "(", "batch_size", ")", "progress_bar", ".", "close", "(", ")", "self", ".", "milvus_server", ".", "flush", "(", "[", "index", "]", ")", "if", "self", ".", "update_existing_documents", ":", "self", ".", "milvus_server", ".", "compact", "(", "collection_name", "=", "index", ")" ]
[ 154, 4 ]
[ 216, 61 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.update_embeddings
( self, retriever: BaseRetriever, index: Optional[str] = None, batch_size: int = 10_000, update_existing_embeddings: bool = True, filters: Optional[Dict[str, List[str]]] = None, )
Updates the embeddings in the the document store using the encoding model specified in the retriever. This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config). :param retriever: Retriever to use to get embeddings for text :param index: (SQL) index name for storing the docs and metadata :param batch_size: When working with large number of documents, batching can help reduce memory footprint. :param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False, only documents without embeddings are processed. This mode can be used for incremental updating of embeddings, wherein, only newly indexed documents get processed. :param filters: Optional filters to narrow down the documents for which embeddings are to be updated. Example: {"name": ["some", "more"], "category": ["only_one"]} :return: None
Updates the embeddings in the the document store using the encoding model specified in the retriever. This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).
def update_embeddings( self, retriever: BaseRetriever, index: Optional[str] = None, batch_size: int = 10_000, update_existing_embeddings: bool = True, filters: Optional[Dict[str, List[str]]] = None, ): """ Updates the embeddings in the the document store using the encoding model specified in the retriever. This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config). :param retriever: Retriever to use to get embeddings for text :param index: (SQL) index name for storing the docs and metadata :param batch_size: When working with large number of documents, batching can help reduce memory footprint. :param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False, only documents without embeddings are processed. This mode can be used for incremental updating of embeddings, wherein, only newly indexed documents get processed. :param filters: Optional filters to narrow down the documents for which embeddings are to be updated. Example: {"name": ["some", "more"], "category": ["only_one"]} :return: None """ index = index or self.index self._create_collection_and_index_if_not_exist(index) document_count = self.get_document_count(index=index) if document_count == 0: logger.warning("Calling DocumentStore.update_embeddings() on an empty index") return logger.info(f"Updating embeddings for {document_count} docs...") result = self._query( index=index, vector_ids=None, batch_size=batch_size, filters=filters, only_documents_without_embedding=not update_existing_embeddings ) batched_documents = get_batches_from_generator(result, batch_size) with tqdm(total=document_count, disable=not self.progress_bar) as progress_bar: for document_batch in batched_documents: self._delete_vector_ids_from_milvus(documents=document_batch, index=index) embeddings = retriever.embed_passages(document_batch) # type: ignore embeddings_list = [embedding.tolist() for embedding in embeddings] assert len(document_batch) == len(embeddings_list) status, vector_ids = self.milvus_server.insert(collection_name=index, records=embeddings_list) if status.code != Status.SUCCESS: raise RuntimeError(f'Vector embedding insertion failed: {status}') vector_id_map = {} for vector_id, doc in zip(vector_ids, document_batch): vector_id_map[doc.id] = vector_id self.update_vector_ids(vector_id_map, index=index) progress_bar.update(batch_size) progress_bar.close() self.milvus_server.flush([index]) self.milvus_server.compact(collection_name=index)
[ "def", "update_embeddings", "(", "self", ",", "retriever", ":", "BaseRetriever", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ",", "update_existing_embeddings", ":", "bool", "=", "True", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", ")", ":", "index", "=", "index", "or", "self", ".", "index", "self", ".", "_create_collection_and_index_if_not_exist", "(", "index", ")", "document_count", "=", "self", ".", "get_document_count", "(", "index", "=", "index", ")", "if", "document_count", "==", "0", ":", "logger", ".", "warning", "(", "\"Calling DocumentStore.update_embeddings() on an empty index\"", ")", "return", "logger", ".", "info", "(", "f\"Updating embeddings for {document_count} docs...\"", ")", "result", "=", "self", ".", "_query", "(", "index", "=", "index", ",", "vector_ids", "=", "None", ",", "batch_size", "=", "batch_size", ",", "filters", "=", "filters", ",", "only_documents_without_embedding", "=", "not", "update_existing_embeddings", ")", "batched_documents", "=", "get_batches_from_generator", "(", "result", ",", "batch_size", ")", "with", "tqdm", "(", "total", "=", "document_count", ",", "disable", "=", "not", "self", ".", "progress_bar", ")", "as", "progress_bar", ":", "for", "document_batch", "in", "batched_documents", ":", "self", ".", "_delete_vector_ids_from_milvus", "(", "documents", "=", "document_batch", ",", "index", "=", "index", ")", "embeddings", "=", "retriever", ".", "embed_passages", "(", "document_batch", ")", "# type: ignore", "embeddings_list", "=", "[", "embedding", ".", "tolist", "(", ")", "for", "embedding", "in", "embeddings", "]", "assert", "len", "(", "document_batch", ")", "==", "len", "(", "embeddings_list", ")", "status", ",", "vector_ids", "=", "self", ".", "milvus_server", ".", "insert", "(", "collection_name", "=", "index", ",", "records", "=", "embeddings_list", ")", "if", "status", ".", "code", "!=", "Status", ".", "SUCCESS", ":", "raise", "RuntimeError", "(", "f'Vector embedding insertion failed: {status}'", ")", "vector_id_map", "=", "{", "}", "for", "vector_id", ",", "doc", "in", "zip", "(", "vector_ids", ",", "document_batch", ")", ":", "vector_id_map", "[", "doc", ".", "id", "]", "=", "vector_id", "self", ".", "update_vector_ids", "(", "vector_id_map", ",", "index", "=", "index", ")", "progress_bar", ".", "update", "(", "batch_size", ")", "progress_bar", ".", "close", "(", ")", "self", ".", "milvus_server", ".", "flush", "(", "[", "index", "]", ")", "self", ".", "milvus_server", ".", "compact", "(", "collection_name", "=", "index", ")" ]
[ 218, 4 ]
[ 280, 57 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.query_by_embedding
(self, query_emb: np.ndarray, filters: Optional[dict] = None, top_k: int = 10, index: Optional[str] = None, return_embedding: Optional[bool] = None)
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric. :param query_emb: Embedding of the query (e.g. gathered from DPR) :param filters: Optional filters to narrow down the search space. Example: {"name": ["some", "more"], "category": ["only_one"]} :param top_k: How many documents to return :param index: (SQL) index name for storing the docs and metadata :param return_embedding: To return document embedding :return:
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
def query_by_embedding(self, query_emb: np.ndarray, filters: Optional[dict] = None, top_k: int = 10, index: Optional[str] = None, return_embedding: Optional[bool] = None) -> List[Document]: """ Find the document that is most similar to the provided `query_emb` by using a vector similarity metric. :param query_emb: Embedding of the query (e.g. gathered from DPR) :param filters: Optional filters to narrow down the search space. Example: {"name": ["some", "more"], "category": ["only_one"]} :param top_k: How many documents to return :param index: (SQL) index name for storing the docs and metadata :param return_embedding: To return document embedding :return: """ if filters: raise Exception("Query filters are not implemented for the MilvusDocumentStore.") index = index or self.index status, ok = self.milvus_server.has_collection(collection_name=index) if status.code != Status.SUCCESS: raise RuntimeError(f'Milvus has collection check failed: {status}') if not ok: raise Exception("No index exists. Use 'update_embeddings()` to create an index.") if return_embedding is None: return_embedding = self.return_embedding index = index or self.index query_emb = query_emb.reshape(1, -1).astype(np.float32) status, search_result = self.milvus_server.search( collection_name=index, query_records=query_emb, top_k=top_k, params=self.search_param ) if status.code != Status.SUCCESS: raise RuntimeError(f'Vector embedding search failed: {status}') vector_ids_for_query = [] scores_for_vector_ids: Dict[str, float] = {} for vector_id_list, distance_list in zip(search_result.id_array, search_result.distance_array): for vector_id, distance in zip(vector_id_list, distance_list): vector_ids_for_query.append(str(vector_id)) scores_for_vector_ids[str(vector_id)] = distance documents = self.get_documents_by_vector_ids(vector_ids_for_query, index=index) if return_embedding: self._populate_embeddings_to_docs(index=index, docs=documents) for doc in documents: doc.score = scores_for_vector_ids[doc.meta["vector_id"]] doc.probability = float(expit(np.asarray(doc.score / 100))) return documents
[ "def", "query_by_embedding", "(", "self", ",", "query_emb", ":", "np", ".", "ndarray", ",", "filters", ":", "Optional", "[", "dict", "]", "=", "None", ",", "top_k", ":", "int", "=", "10", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "return_embedding", ":", "Optional", "[", "bool", "]", "=", "None", ")", "->", "List", "[", "Document", "]", ":", "if", "filters", ":", "raise", "Exception", "(", "\"Query filters are not implemented for the MilvusDocumentStore.\"", ")", "index", "=", "index", "or", "self", ".", "index", "status", ",", "ok", "=", "self", ".", "milvus_server", ".", "has_collection", "(", "collection_name", "=", "index", ")", "if", "status", ".", "code", "!=", "Status", ".", "SUCCESS", ":", "raise", "RuntimeError", "(", "f'Milvus has collection check failed: {status}'", ")", "if", "not", "ok", ":", "raise", "Exception", "(", "\"No index exists. Use 'update_embeddings()` to create an index.\"", ")", "if", "return_embedding", "is", "None", ":", "return_embedding", "=", "self", ".", "return_embedding", "index", "=", "index", "or", "self", ".", "index", "query_emb", "=", "query_emb", ".", "reshape", "(", "1", ",", "-", "1", ")", ".", "astype", "(", "np", ".", "float32", ")", "status", ",", "search_result", "=", "self", ".", "milvus_server", ".", "search", "(", "collection_name", "=", "index", ",", "query_records", "=", "query_emb", ",", "top_k", "=", "top_k", ",", "params", "=", "self", ".", "search_param", ")", "if", "status", ".", "code", "!=", "Status", ".", "SUCCESS", ":", "raise", "RuntimeError", "(", "f'Vector embedding search failed: {status}'", ")", "vector_ids_for_query", "=", "[", "]", "scores_for_vector_ids", ":", "Dict", "[", "str", ",", "float", "]", "=", "{", "}", "for", "vector_id_list", ",", "distance_list", "in", "zip", "(", "search_result", ".", "id_array", ",", "search_result", ".", "distance_array", ")", ":", "for", "vector_id", ",", "distance", "in", "zip", "(", "vector_id_list", ",", "distance_list", ")", ":", "vector_ids_for_query", ".", "append", "(", "str", "(", "vector_id", ")", ")", "scores_for_vector_ids", "[", "str", "(", "vector_id", ")", "]", "=", "distance", "documents", "=", "self", ".", "get_documents_by_vector_ids", "(", "vector_ids_for_query", ",", "index", "=", "index", ")", "if", "return_embedding", ":", "self", ".", "_populate_embeddings_to_docs", "(", "index", "=", "index", ",", "docs", "=", "documents", ")", "for", "doc", "in", "documents", ":", "doc", ".", "score", "=", "scores_for_vector_ids", "[", "doc", ".", "meta", "[", "\"vector_id\"", "]", "]", "doc", ".", "probability", "=", "float", "(", "expit", "(", "np", ".", "asarray", "(", "doc", ".", "score", "/", "100", ")", ")", ")", "return", "documents" ]
[ 282, 4 ]
[ 339, 24 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.delete_all_documents
(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None)
Delete all documents (from SQL AND Milvus). :param index: (SQL) index name for storing the docs and metadata :param filters: Optional filters to narrow down the search space. Example: {"name": ["some", "more"], "category": ["only_one"]} :return: None
Delete all documents (from SQL AND Milvus). :param index: (SQL) index name for storing the docs and metadata :param filters: Optional filters to narrow down the search space. Example: {"name": ["some", "more"], "category": ["only_one"]} :return: None
def delete_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None): """ Delete all documents (from SQL AND Milvus). :param index: (SQL) index name for storing the docs and metadata :param filters: Optional filters to narrow down the search space. Example: {"name": ["some", "more"], "category": ["only_one"]} :return: None """ index = index or self.index super().delete_all_documents(index=index, filters=filters) status, ok = self.milvus_server.has_collection(collection_name=index) if status.code != Status.SUCCESS: raise RuntimeError(f'Milvus has collection check failed: {status}') if ok: status = self.milvus_server.drop_collection(collection_name=index) if status.code != Status.SUCCESS: raise RuntimeError(f'Milvus drop collection failed: {status}') self.milvus_server.flush([index]) self.milvus_server.compact(collection_name=index)
[ "def", "delete_all_documents", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ")", ":", "index", "=", "index", "or", "self", ".", "index", "super", "(", ")", ".", "delete_all_documents", "(", "index", "=", "index", ",", "filters", "=", "filters", ")", "status", ",", "ok", "=", "self", ".", "milvus_server", ".", "has_collection", "(", "collection_name", "=", "index", ")", "if", "status", ".", "code", "!=", "Status", ".", "SUCCESS", ":", "raise", "RuntimeError", "(", "f'Milvus has collection check failed: {status}'", ")", "if", "ok", ":", "status", "=", "self", ".", "milvus_server", ".", "drop_collection", "(", "collection_name", "=", "index", ")", "if", "status", ".", "code", "!=", "Status", ".", "SUCCESS", ":", "raise", "RuntimeError", "(", "f'Milvus drop collection failed: {status}'", ")", "self", ".", "milvus_server", ".", "flush", "(", "[", "index", "]", ")", "self", ".", "milvus_server", ".", "compact", "(", "collection_name", "=", "index", ")" ]
[ 341, 4 ]
[ 360, 61 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.get_all_documents_generator
( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None, batch_size: int = 10_000, )
Get all documents from the document store. Under-the-hood, documents are fetched in batches from the document store and yielded as individual documents. This method can be used to iteratively process a large number of documents without having to load all documents in memory. :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param filters: Optional filters to narrow down the documents to return. Example: {"name": ["some", "more"], "category": ["only_one"]} :param return_embedding: Whether to return the document embeddings. :param batch_size: When working with large number of documents, batching can help reduce memory footprint.
Get all documents from the document store. Under-the-hood, documents are fetched in batches from the document store and yielded as individual documents. This method can be used to iteratively process a large number of documents without having to load all documents in memory.
def get_all_documents_generator( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None, batch_size: int = 10_000, ) -> Generator[Document, None, None]: """ Get all documents from the document store. Under-the-hood, documents are fetched in batches from the document store and yielded as individual documents. This method can be used to iteratively process a large number of documents without having to load all documents in memory. :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param filters: Optional filters to narrow down the documents to return. Example: {"name": ["some", "more"], "category": ["only_one"]} :param return_embedding: Whether to return the document embeddings. :param batch_size: When working with large number of documents, batching can help reduce memory footprint. """ index = index or self.index documents = super().get_all_documents_generator( index=index, filters=filters, batch_size=batch_size ) if return_embedding is None: return_embedding = self.return_embedding for doc in documents: if return_embedding: self._populate_embeddings_to_docs(index=index, docs=[doc]) yield doc
[ "def", "get_all_documents_generator", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "return_embedding", ":", "Optional", "[", "bool", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ",", ")", "->", "Generator", "[", "Document", ",", "None", ",", "None", "]", ":", "index", "=", "index", "or", "self", ".", "index", "documents", "=", "super", "(", ")", ".", "get_all_documents_generator", "(", "index", "=", "index", ",", "filters", "=", "filters", ",", "batch_size", "=", "batch_size", ")", "if", "return_embedding", "is", "None", ":", "return_embedding", "=", "self", ".", "return_embedding", "for", "doc", "in", "documents", ":", "if", "return_embedding", ":", "self", ".", "_populate_embeddings_to_docs", "(", "index", "=", "index", ",", "docs", "=", "[", "doc", "]", ")", "yield", "doc" ]
[ 362, 4 ]
[ 391, 21 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.get_all_documents
( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None, batch_size: int = 10_000, )
Get documents from the document store (optionally using filter criteria). :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param filters: Optional filters to narrow down the documents to return. Example: {"name": ["some", "more"], "category": ["only_one"]} :param return_embedding: Whether to return the document embeddings. :param batch_size: When working with large number of documents, batching can help reduce memory footprint.
Get documents from the document store (optionally using filter criteria).
def get_all_documents( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None, batch_size: int = 10_000, ) -> List[Document]: """ Get documents from the document store (optionally using filter criteria). :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param filters: Optional filters to narrow down the documents to return. Example: {"name": ["some", "more"], "category": ["only_one"]} :param return_embedding: Whether to return the document embeddings. :param batch_size: When working with large number of documents, batching can help reduce memory footprint. """ index = index or self.index result = self.get_all_documents_generator( index=index, filters=filters, return_embedding=return_embedding, batch_size=batch_size ) documents = list(result) return documents
[ "def", "get_all_documents", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "return_embedding", ":", "Optional", "[", "bool", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ",", ")", "->", "List", "[", "Document", "]", ":", "index", "=", "index", "or", "self", ".", "index", "result", "=", "self", ".", "get_all_documents_generator", "(", "index", "=", "index", ",", "filters", "=", "filters", ",", "return_embedding", "=", "return_embedding", ",", "batch_size", "=", "batch_size", ")", "documents", "=", "list", "(", "result", ")", "return", "documents" ]
[ 393, 4 ]
[ 416, 24 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.get_document_by_id
(self, id: str, index: Optional[str] = None)
Fetch a document by specifying its text id string :param id: ID of the document :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used.
Fetch a document by specifying its text id string
def get_document_by_id(self, id: str, index: Optional[str] = None) -> Optional[Document]: """ Fetch a document by specifying its text id string :param id: ID of the document :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. """ documents = self.get_documents_by_id([id], index) document = documents[0] if documents else None return document
[ "def", "get_document_by_id", "(", "self", ",", "id", ":", "str", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Optional", "[", "Document", "]", ":", "documents", "=", "self", ".", "get_documents_by_id", "(", "[", "id", "]", ",", "index", ")", "document", "=", "documents", "[", "0", "]", "if", "documents", "else", "None", "return", "document" ]
[ 418, 4 ]
[ 428, 23 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.get_documents_by_id
( self, ids: List[str], index: Optional[str] = None, batch_size: int = 10_000 )
Fetch multiple documents by specifying their IDs (strings) :param ids: List of IDs of the documents :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param batch_size: When working with large number of documents, batching can help reduce memory footprint.
Fetch multiple documents by specifying their IDs (strings)
def get_documents_by_id( self, ids: List[str], index: Optional[str] = None, batch_size: int = 10_000 ) -> List[Document]: """ Fetch multiple documents by specifying their IDs (strings) :param ids: List of IDs of the documents :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param batch_size: When working with large number of documents, batching can help reduce memory footprint. """ index = index or self.index documents = super().get_documents_by_id(ids=ids, index=index) if self.return_embedding: self._populate_embeddings_to_docs(index=index, docs=documents) return documents
[ "def", "get_documents_by_id", "(", "self", ",", "ids", ":", "List", "[", "str", "]", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ")", "->", "List", "[", "Document", "]", ":", "index", "=", "index", "or", "self", ".", "index", "documents", "=", "super", "(", ")", ".", "get_documents_by_id", "(", "ids", "=", "ids", ",", "index", "=", "index", ")", "if", "self", ".", "return_embedding", ":", "self", ".", "_populate_embeddings_to_docs", "(", "index", "=", "index", ",", "docs", "=", "documents", ")", "return", "documents" ]
[ 430, 4 ]
[ 446, 24 ]
python
en
['en', 'error', 'th']
False
MilvusDocumentStore.get_all_vectors
(self, index: Optional[str] = None)
Helper function to dump all vectors stored in Milvus server. :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :return: List[np.array]: List of vectors.
Helper function to dump all vectors stored in Milvus server.
def get_all_vectors(self, index: Optional[str] = None) -> List[np.ndarray]: """ Helper function to dump all vectors stored in Milvus server. :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :return: List[np.array]: List of vectors. """ index = index or self.index status, collection_info = self.milvus_server.get_collection_stats(collection_name=index) if not status.OK(): logger.info(f"Failed fetch stats from store ...") return list() logger.debug(f"collection_info = {collection_info}") ids = list() partition_list = collection_info["partitions"] for partition in partition_list: segment_list = partition["segments"] for segment in segment_list: segment_name = segment["name"] status, id_list = self.milvus_server.list_id_in_segment( collection_name=index, segment_name=segment_name) logger.debug(f"{status}: segment {segment_name} has {len(id_list)} vectors ...") ids.extend(id_list) if len(ids) == 0: logger.info(f"No documents in the store ...") return list() status, vectors = self.milvus_server.get_entity_by_id(collection_name=index, ids=ids) if not status.OK(): logger.info(f"Failed fetch document for ids {ids} from store ...") return list() return vectors
[ "def", "get_all_vectors", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "List", "[", "np", ".", "ndarray", "]", ":", "index", "=", "index", "or", "self", ".", "index", "status", ",", "collection_info", "=", "self", ".", "milvus_server", ".", "get_collection_stats", "(", "collection_name", "=", "index", ")", "if", "not", "status", ".", "OK", "(", ")", ":", "logger", ".", "info", "(", "f\"Failed fetch stats from store ...\"", ")", "return", "list", "(", ")", "logger", ".", "debug", "(", "f\"collection_info = {collection_info}\"", ")", "ids", "=", "list", "(", ")", "partition_list", "=", "collection_info", "[", "\"partitions\"", "]", "for", "partition", "in", "partition_list", ":", "segment_list", "=", "partition", "[", "\"segments\"", "]", "for", "segment", "in", "segment_list", ":", "segment_name", "=", "segment", "[", "\"name\"", "]", "status", ",", "id_list", "=", "self", ".", "milvus_server", ".", "list_id_in_segment", "(", "collection_name", "=", "index", ",", "segment_name", "=", "segment_name", ")", "logger", ".", "debug", "(", "f\"{status}: segment {segment_name} has {len(id_list)} vectors ...\"", ")", "ids", ".", "extend", "(", "id_list", ")", "if", "len", "(", "ids", ")", "==", "0", ":", "logger", ".", "info", "(", "f\"No documents in the store ...\"", ")", "return", "list", "(", ")", "status", ",", "vectors", "=", "self", ".", "milvus_server", ".", "get_entity_by_id", "(", "collection_name", "=", "index", ",", "ids", "=", "ids", ")", "if", "not", "status", ".", "OK", "(", ")", ":", "logger", ".", "info", "(", "f\"Failed fetch document for ids {ids} from store ...\"", ")", "return", "list", "(", ")", "return", "vectors" ]
[ 483, 4 ]
[ 520, 22 ]
python
en
['en', 'error', 'th']
False
ExpectationValidationResult.__eq__
(self, other)
ExpectationValidationResult equality ignores instance identity, relying only on properties.
ExpectationValidationResult equality ignores instance identity, relying only on properties.
def __eq__(self, other): """ExpectationValidationResult equality ignores instance identity, relying only on properties.""" # NOTE: JPC - 20200213 - need to spend some time thinking about whether we want to # consistently allow dict as a comparison alternative in situations like these... # if isinstance(other, dict): # try: # other = ExpectationValidationResult(**other) # except ValueError: # return NotImplemented if not isinstance(other, self.__class__): # Delegate comparison to the other instance's __eq__. return NotImplemented try: return all( ( self.success == other.success, ( self.expectation_config is None and other.expectation_config is None ) or ( self.expectation_config is not None and self.expectation_config.isEquivalentTo( other.expectation_config ) ), # Result is a dictionary allowed to have nested dictionaries that are still of complex types (e.g. # numpy) consequently, series' comparison can persist. Wrapping in all() ensures comparison is # handled appropriately. (self.result is None and other.result is None) or (all(self.result) == all(other.result)), self.meta == other.meta, self.exception_info == other.exception_info, ) ) except (ValueError, TypeError): # if invalid comparisons are attempted, the objects are not equal. return False
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "# NOTE: JPC - 20200213 - need to spend some time thinking about whether we want to", "# consistently allow dict as a comparison alternative in situations like these...", "# if isinstance(other, dict):", "# try:", "# other = ExpectationValidationResult(**other)", "# except ValueError:", "# return NotImplemented", "if", "not", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "# Delegate comparison to the other instance's __eq__.", "return", "NotImplemented", "try", ":", "return", "all", "(", "(", "self", ".", "success", "==", "other", ".", "success", ",", "(", "self", ".", "expectation_config", "is", "None", "and", "other", ".", "expectation_config", "is", "None", ")", "or", "(", "self", ".", "expectation_config", "is", "not", "None", "and", "self", ".", "expectation_config", ".", "isEquivalentTo", "(", "other", ".", "expectation_config", ")", ")", ",", "# Result is a dictionary allowed to have nested dictionaries that are still of complex types (e.g.", "# numpy) consequently, series' comparison can persist. Wrapping in all() ensures comparison is", "# handled appropriately.", "(", "self", ".", "result", "is", "None", "and", "other", ".", "result", "is", "None", ")", "or", "(", "all", "(", "self", ".", "result", ")", "==", "all", "(", "other", ".", "result", ")", ")", ",", "self", ".", "meta", "==", "other", ".", "meta", ",", "self", ".", "exception_info", "==", "other", ".", "exception_info", ",", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "# if invalid comparisons are attempted, the objects are not equal.", "return", "False" ]
[ 66, 4 ]
[ 103, 24 ]
python
en
['en', 'en', 'en']
True
ExpectationSuiteValidationResult.__eq__
(self, other)
ExpectationSuiteValidationResult equality ignores instance identity, relying only on properties.
ExpectationSuiteValidationResult equality ignores instance identity, relying only on properties.
def __eq__(self, other): """ExpectationSuiteValidationResult equality ignores instance identity, relying only on properties.""" if not isinstance(other, self.__class__): # Delegate comparison to the other instance's __eq__. return NotImplemented return all( ( self.success == other.success, self.results == other.results, self.evaluation_parameters == other.evaluation_parameters, self.statistics == other.statistics, self.meta == other.meta, ) )
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "# Delegate comparison to the other instance's __eq__.", "return", "NotImplemented", "return", "all", "(", "(", "self", ".", "success", "==", "other", ".", "success", ",", "self", ".", "results", "==", "other", ".", "results", ",", "self", ".", "evaluation_parameters", "==", "other", ".", "evaluation_parameters", ",", "self", ".", "statistics", "==", "other", ".", "statistics", ",", "self", ".", "meta", "==", "other", ".", "meta", ",", ")", ")" ]
[ 285, 4 ]
[ 298, 9 ]
python
en
['en', 'en', 'en']
True
image_upload_url
(recipe_id)
Return url for recipe image upload
Return url for recipe image upload
def image_upload_url(recipe_id): """Return url for recipe image upload""" return reverse('recipe:recipe-upload-image', args=[recipe_id])
[ "def", "image_upload_url", "(", "recipe_id", ")", ":", "return", "reverse", "(", "'recipe:recipe-upload-image'", ",", "args", "=", "[", "recipe_id", "]", ")" ]
[ 20, 0 ]
[ 22, 66 ]
python
en
['en', 'da', 'en']
True
detail_url
(recipe_id)
Return recipe detail url
Return recipe detail url
def detail_url(recipe_id): """Return recipe detail url""" return reverse('recipe:recipe-detail', args=[recipe_id])
[ "def", "detail_url", "(", "recipe_id", ")", ":", "return", "reverse", "(", "'recipe:recipe-detail'", ",", "args", "=", "[", "recipe_id", "]", ")" ]
[ 25, 0 ]
[ 27, 60 ]
python
co
['it', 'co', 'en']
False
sample_tag
(user, name='Main course')
Create and return a sample tag
Create and return a sample tag
def sample_tag(user, name='Main course'): """Create and return a sample tag""" return Tag.objects.create(user=user, name=name)
[ "def", "sample_tag", "(", "user", ",", "name", "=", "'Main course'", ")", ":", "return", "Tag", ".", "objects", ".", "create", "(", "user", "=", "user", ",", "name", "=", "name", ")" ]
[ 30, 0 ]
[ 32, 51 ]
python
en
['en', 'sm', 'en']
True
sample_ingredient
(user, name='Cinnamon')
Create and return a sample ingredient
Create and return a sample ingredient
def sample_ingredient(user, name='Cinnamon'): """Create and return a sample ingredient""" return Ingredient.objects.create(user=user, name=name)
[ "def", "sample_ingredient", "(", "user", ",", "name", "=", "'Cinnamon'", ")", ":", "return", "Ingredient", ".", "objects", ".", "create", "(", "user", "=", "user", ",", "name", "=", "name", ")" ]
[ 35, 0 ]
[ 37, 58 ]
python
en
['en', 'en', 'en']
True
sample_recipe
(user, **params)
Create and return a sample recipe
Create and return a sample recipe
def sample_recipe(user, **params): """Create and return a sample recipe""" defaults = { 'title': 'Sample recipe', 'time_minutes': 10, 'price': 5.00 } defaults.update(params) return Recipe.objects.create(user=user, **defaults)
[ "def", "sample_recipe", "(", "user", ",", "*", "*", "params", ")", ":", "defaults", "=", "{", "'title'", ":", "'Sample recipe'", ",", "'time_minutes'", ":", "10", ",", "'price'", ":", "5.00", "}", "defaults", ".", "update", "(", "params", ")", "return", "Recipe", ".", "objects", ".", "create", "(", "user", "=", "user", ",", "*", "*", "defaults", ")" ]
[ 40, 0 ]
[ 49, 55 ]
python
en
['en', 'co', 'en']
True
PublicRecipeApiTests.test_auth_required
(self)
Test that authentication is required
Test that authentication is required
def test_auth_required(self): """Test that authentication is required""" res = self.client.get(RECIPES_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
[ "def", "test_auth_required", "(", "self", ")", ":", "res", "=", "self", ".", "client", ".", "get", "(", "RECIPES_URL", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_401_UNAUTHORIZED", ")" ]
[ 58, 4 ]
[ 62, 71 ]
python
en
['en', 'en', 'en']
True
PrivateRecipeApiTests.test_retrieve_recipes
(self)
Test retrieving a list of recipes
Test retrieving a list of recipes
def test_retrieve_recipes(self): """Test retrieving a list of recipes""" sample_recipe(user=self.user) sample_recipe(user=self.user) res = self.client.get(RECIPES_URL) recipes = Recipe.objects.all().order_by('-id') serializer = RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data)
[ "def", "test_retrieve_recipes", "(", "self", ")", ":", "sample_recipe", "(", "user", "=", "self", ".", "user", ")", "sample_recipe", "(", "user", "=", "self", ".", "user", ")", "res", "=", "self", ".", "client", ".", "get", "(", "RECIPES_URL", ")", "recipes", "=", "Recipe", ".", "objects", ".", "all", "(", ")", ".", "order_by", "(", "'-id'", ")", "serializer", "=", "RecipeSerializer", "(", "recipes", ",", "many", "=", "True", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_200_OK", ")", "self", ".", "assertEqual", "(", "res", ".", "data", ",", "serializer", ".", "data", ")" ]
[ 76, 4 ]
[ 86, 51 ]
python
en
['en', 'pt', 'en']
True
PrivateRecipeApiTests.test_recipes_limited_to_user
(self)
Test retrieving recipes for user
Test retrieving recipes for user
def test_recipes_limited_to_user(self): """Test retrieving recipes for user""" user2 = get_user_model().objects.create_user( '[email protected]', 'testpass' ) sample_recipe(user=user2) sample_recipe(user=self.user) res = self.client.get(RECIPES_URL) recipes = Recipe.objects.filter(user=self.user) serializer = RecipeSerializer(recipes, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data, serializer.data)
[ "def", "test_recipes_limited_to_user", "(", "self", ")", ":", "user2", "=", "get_user_model", "(", ")", ".", "objects", ".", "create_user", "(", "'[email protected]'", ",", "'testpass'", ")", "sample_recipe", "(", "user", "=", "user2", ")", "sample_recipe", "(", "user", "=", "self", ".", "user", ")", "res", "=", "self", ".", "client", ".", "get", "(", "RECIPES_URL", ")", "recipes", "=", "Recipe", ".", "objects", ".", "filter", "(", "user", "=", "self", ".", "user", ")", "serializer", "=", "RecipeSerializer", "(", "recipes", ",", "many", "=", "True", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_200_OK", ")", "self", ".", "assertEqual", "(", "len", "(", "res", ".", "data", ")", ",", "1", ")", "self", ".", "assertEqual", "(", "res", ".", "data", ",", "serializer", ".", "data", ")" ]
[ 88, 4 ]
[ 103, 51 ]
python
en
['en', 'pt', 'en']
True
PrivateRecipeApiTests.test_view_recipe_detail
(self)
Test viewing a recipe detail
Test viewing a recipe detail
def test_view_recipe_detail(self): """Test viewing a recipe detail""" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) recipe.ingredients.add(sample_ingredient(user=self.user)) url = detail_url(recipe.id) res = self.client.get(url) serializer = RecipeDetailSerializer(recipe) self.assertEqual(res.data, serializer.data)
[ "def", "test_view_recipe_detail", "(", "self", ")", ":", "recipe", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ")", "recipe", ".", "tags", ".", "add", "(", "sample_tag", "(", "user", "=", "self", ".", "user", ")", ")", "recipe", ".", "ingredients", ".", "add", "(", "sample_ingredient", "(", "user", "=", "self", ".", "user", ")", ")", "url", "=", "detail_url", "(", "recipe", ".", "id", ")", "res", "=", "self", ".", "client", ".", "get", "(", "url", ")", "serializer", "=", "RecipeDetailSerializer", "(", "recipe", ")", "self", ".", "assertEqual", "(", "res", ".", "data", ",", "serializer", ".", "data", ")" ]
[ 105, 4 ]
[ 115, 51 ]
python
en
['en', 'en', 'en']
True
PrivateRecipeApiTests.test_create_basic_recipe
(self)
Test creating recipe
Test creating recipe
def test_create_basic_recipe(self): """Test creating recipe""" payload = { 'title': 'Chocolate cheesecake', 'time_minutes': 30, 'price': 5.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) for key in payload.keys(): self.assertEqual(payload[key], getattr(recipe, key))
[ "def", "test_create_basic_recipe", "(", "self", ")", ":", "payload", "=", "{", "'title'", ":", "'Chocolate cheesecake'", ",", "'time_minutes'", ":", "30", ",", "'price'", ":", "5.00", "}", "res", "=", "self", ".", "client", ".", "post", "(", "RECIPES_URL", ",", "payload", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_201_CREATED", ")", "recipe", "=", "Recipe", ".", "objects", ".", "get", "(", "id", "=", "res", ".", "data", "[", "'id'", "]", ")", "for", "key", "in", "payload", ".", "keys", "(", ")", ":", "self", ".", "assertEqual", "(", "payload", "[", "key", "]", ",", "getattr", "(", "recipe", ",", "key", ")", ")" ]
[ 117, 4 ]
[ 129, 64 ]
python
en
['en', 'la', 'en']
True
PrivateRecipeApiTests.test_create_recipe_with_tags
(self)
Test creating a recipe with tags
Test creating a recipe with tags
def test_create_recipe_with_tags(self): """Test creating a recipe with tags""" tag1 = sample_tag(user=self.user, name='Vegan') tag2 = sample_tag(user=self.user, name='Dessert') payload = { 'title': 'Lime cheesecake', 'tags': [tag1.id, tag2.id], 'time_minutes': 45, 'price': 15.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) tags = recipe.tags.all() self.assertEqual(tags.count(), 2) self.assertIn(tag1, tags) self.assertIn(tag2, tags)
[ "def", "test_create_recipe_with_tags", "(", "self", ")", ":", "tag1", "=", "sample_tag", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Vegan'", ")", "tag2", "=", "sample_tag", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Dessert'", ")", "payload", "=", "{", "'title'", ":", "'Lime cheesecake'", ",", "'tags'", ":", "[", "tag1", ".", "id", ",", "tag2", ".", "id", "]", ",", "'time_minutes'", ":", "45", ",", "'price'", ":", "15.00", "}", "res", "=", "self", ".", "client", ".", "post", "(", "RECIPES_URL", ",", "payload", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_201_CREATED", ")", "recipe", "=", "Recipe", ".", "objects", ".", "get", "(", "id", "=", "res", ".", "data", "[", "'id'", "]", ")", "tags", "=", "recipe", ".", "tags", ".", "all", "(", ")", "self", ".", "assertEqual", "(", "tags", ".", "count", "(", ")", ",", "2", ")", "self", ".", "assertIn", "(", "tag1", ",", "tags", ")", "self", ".", "assertIn", "(", "tag2", ",", "tags", ")" ]
[ 131, 4 ]
[ 148, 33 ]
python
en
['en', 'en', 'en']
True
PrivateRecipeApiTests.test_create_recipe_with_ingredients
(self)
Test creating a recipe with ingredients
Test creating a recipe with ingredients
def test_create_recipe_with_ingredients(self): """Test creating a recipe with ingredients""" ingredients1 = sample_ingredient(user=self.user, name='Ginger') ingredients2 = sample_ingredient(user=self.user, name='Prawns') payload = { 'title': 'Thai prawn red curry', 'ingredients': [ingredients1.id, ingredients2.id], 'time_minutes': 40, 'price': 9.00 } res = self.client.post(RECIPES_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) recipe = Recipe.objects.get(id=res.data['id']) ingredients = recipe.ingredients.all() self.assertEqual(ingredients.count(), 2) self.assertIn(ingredients1, ingredients) self.assertIn(ingredients2, ingredients)
[ "def", "test_create_recipe_with_ingredients", "(", "self", ")", ":", "ingredients1", "=", "sample_ingredient", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Ginger'", ")", "ingredients2", "=", "sample_ingredient", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Prawns'", ")", "payload", "=", "{", "'title'", ":", "'Thai prawn red curry'", ",", "'ingredients'", ":", "[", "ingredients1", ".", "id", ",", "ingredients2", ".", "id", "]", ",", "'time_minutes'", ":", "40", ",", "'price'", ":", "9.00", "}", "res", "=", "self", ".", "client", ".", "post", "(", "RECIPES_URL", ",", "payload", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_201_CREATED", ")", "recipe", "=", "Recipe", ".", "objects", ".", "get", "(", "id", "=", "res", ".", "data", "[", "'id'", "]", ")", "ingredients", "=", "recipe", ".", "ingredients", ".", "all", "(", ")", "self", ".", "assertEqual", "(", "ingredients", ".", "count", "(", ")", ",", "2", ")", "self", ".", "assertIn", "(", "ingredients1", ",", "ingredients", ")", "self", ".", "assertIn", "(", "ingredients2", ",", "ingredients", ")" ]
[ 150, 4 ]
[ 167, 48 ]
python
en
['en', 'en', 'en']
True
PrivateRecipeApiTests.test_partial_update_recipe
(self)
Test updating a recipe with PATCH
Test updating a recipe with PATCH
def test_partial_update_recipe(self): """Test updating a recipe with PATCH""" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) new_tag = sample_tag(user=self.user, name='Curry') payload = { 'title': 'Chicken tikka', 'tags': [new_tag.id] } url = detail_url(recipe.id) self.client.patch(url, payload) recipe.refresh_from_db() tags = recipe.tags.all() self.assertEqual(recipe.title, payload['title']) self.assertEqual(len(tags), 1) self.assertIn(new_tag, tags)
[ "def", "test_partial_update_recipe", "(", "self", ")", ":", "recipe", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ")", "recipe", ".", "tags", ".", "add", "(", "sample_tag", "(", "user", "=", "self", ".", "user", ")", ")", "new_tag", "=", "sample_tag", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Curry'", ")", "payload", "=", "{", "'title'", ":", "'Chicken tikka'", ",", "'tags'", ":", "[", "new_tag", ".", "id", "]", "}", "url", "=", "detail_url", "(", "recipe", ".", "id", ")", "self", ".", "client", ".", "patch", "(", "url", ",", "payload", ")", "recipe", ".", "refresh_from_db", "(", ")", "tags", "=", "recipe", ".", "tags", ".", "all", "(", ")", "self", ".", "assertEqual", "(", "recipe", ".", "title", ",", "payload", "[", "'title'", "]", ")", "self", ".", "assertEqual", "(", "len", "(", "tags", ")", ",", "1", ")", "self", ".", "assertIn", "(", "new_tag", ",", "tags", ")" ]
[ 169, 4 ]
[ 186, 36 ]
python
en
['en', 'en', 'en']
True
PrivateRecipeApiTests.test_full_update_recipe
(self)
Test updating a recipe with PUT
Test updating a recipe with PUT
def test_full_update_recipe(self): """Test updating a recipe with PUT""" recipe = sample_recipe(user=self.user) recipe.tags.add(sample_tag(user=self.user)) payload = { 'title': 'Spaghetti carbonara', 'time_minutes': 25, 'price': 10.00 } url = detail_url(recipe.id) self.client.put(url, payload) recipe.refresh_from_db() self.assertEqual(recipe.title, payload['title']) self.assertEqual(recipe.time_minutes, payload['time_minutes']) self.assertEqual(recipe.price, payload['price']) tags = recipe.tags.all() self.assertEqual(len(tags), 0)
[ "def", "test_full_update_recipe", "(", "self", ")", ":", "recipe", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ")", "recipe", ".", "tags", ".", "add", "(", "sample_tag", "(", "user", "=", "self", ".", "user", ")", ")", "payload", "=", "{", "'title'", ":", "'Spaghetti carbonara'", ",", "'time_minutes'", ":", "25", ",", "'price'", ":", "10.00", "}", "url", "=", "detail_url", "(", "recipe", ".", "id", ")", "self", ".", "client", ".", "put", "(", "url", ",", "payload", ")", "recipe", ".", "refresh_from_db", "(", ")", "self", ".", "assertEqual", "(", "recipe", ".", "title", ",", "payload", "[", "'title'", "]", ")", "self", ".", "assertEqual", "(", "recipe", ".", "time_minutes", ",", "payload", "[", "'time_minutes'", "]", ")", "self", ".", "assertEqual", "(", "recipe", ".", "price", ",", "payload", "[", "'price'", "]", ")", "tags", "=", "recipe", ".", "tags", ".", "all", "(", ")", "self", ".", "assertEqual", "(", "len", "(", "tags", ")", ",", "0", ")" ]
[ 188, 4 ]
[ 205, 38 ]
python
en
['en', 'en', 'en']
True
RecipeImageUploadTests.test_upload_image_to_recipe
(self)
Test uploading an image to recipe
Test uploading an image to recipe
def test_upload_image_to_recipe(self): """Test uploading an image to recipe""" url = image_upload_url(self.recipe.id) with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf: img = Image.new('RGB', (10, 10)) img.save(ntf, format='JPEG') ntf.seek(0) res = self.client.post(url, {'image': ntf}, format='multipart') self.recipe.refresh_from_db() self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertIn('image', res.data) self.assertTrue(os.path.exists(self.recipe.image.path))
[ "def", "test_upload_image_to_recipe", "(", "self", ")", ":", "url", "=", "image_upload_url", "(", "self", ".", "recipe", ".", "id", ")", "with", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.jpg'", ")", "as", "ntf", ":", "img", "=", "Image", ".", "new", "(", "'RGB'", ",", "(", "10", ",", "10", ")", ")", "img", ".", "save", "(", "ntf", ",", "format", "=", "'JPEG'", ")", "ntf", ".", "seek", "(", "0", ")", "res", "=", "self", ".", "client", ".", "post", "(", "url", ",", "{", "'image'", ":", "ntf", "}", ",", "format", "=", "'multipart'", ")", "self", ".", "recipe", ".", "refresh_from_db", "(", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_200_OK", ")", "self", ".", "assertIn", "(", "'image'", ",", "res", ".", "data", ")", "self", ".", "assertTrue", "(", "os", ".", "path", ".", "exists", "(", "self", ".", "recipe", ".", "image", ".", "path", ")", ")" ]
[ 222, 4 ]
[ 234, 63 ]
python
en
['en', 'en', 'en']
True
RecipeImageUploadTests.test_upload_image_bad_request
(self)
Test uploading an invalid image
Test uploading an invalid image
def test_upload_image_bad_request(self): """Test uploading an invalid image""" url = image_upload_url(self.recipe.id) res = self.client.post(url, {'image': 'noimage'}, format='multipart') self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
[ "def", "test_upload_image_bad_request", "(", "self", ")", ":", "url", "=", "image_upload_url", "(", "self", ".", "recipe", ".", "id", ")", "res", "=", "self", ".", "client", ".", "post", "(", "url", ",", "{", "'image'", ":", "'noimage'", "}", ",", "format", "=", "'multipart'", ")", "self", ".", "assertEqual", "(", "res", ".", "status_code", ",", "status", ".", "HTTP_400_BAD_REQUEST", ")" ]
[ 236, 4 ]
[ 241, 70 ]
python
en
['en', 'zu', 'en']
True
RecipeImageUploadTests.test_filter_recipe_by_tags
(self)
Test returning recipes with specific tags
Test returning recipes with specific tags
def test_filter_recipe_by_tags(self): """Test returning recipes with specific tags""" recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry') recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini') recipe3 = sample_recipe(user=self.user, title='Fish and chips') tag1 = sample_tag(user=self.user, name='Vegan') tag2 = sample_tag(user=self.user, name='Vegetarian') recipe1.tags.add(tag1) recipe2.tags.add(tag2) res = self.client.get( RECIPES_URL, {'tags': f'{tag1.id},{tag2.id}'} ) serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data)
[ "def", "test_filter_recipe_by_tags", "(", "self", ")", ":", "recipe1", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ",", "title", "=", "'Thai vegetable curry'", ")", "recipe2", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ",", "title", "=", "'Aubergine with tahini'", ")", "recipe3", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ",", "title", "=", "'Fish and chips'", ")", "tag1", "=", "sample_tag", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Vegan'", ")", "tag2", "=", "sample_tag", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Vegetarian'", ")", "recipe1", ".", "tags", ".", "add", "(", "tag1", ")", "recipe2", ".", "tags", ".", "add", "(", "tag2", ")", "res", "=", "self", ".", "client", ".", "get", "(", "RECIPES_URL", ",", "{", "'tags'", ":", "f'{tag1.id},{tag2.id}'", "}", ")", "serializer1", "=", "RecipeSerializer", "(", "recipe1", ")", "serializer2", "=", "RecipeSerializer", "(", "recipe2", ")", "serializer3", "=", "RecipeSerializer", "(", "recipe3", ")", "self", ".", "assertIn", "(", "serializer1", ".", "data", ",", "res", ".", "data", ")", "self", ".", "assertIn", "(", "serializer2", ".", "data", ",", "res", ".", "data", ")", "self", ".", "assertNotIn", "(", "serializer3", ".", "data", ",", "res", ".", "data", ")" ]
[ 243, 4 ]
[ 263, 52 ]
python
en
['en', 'en', 'en']
True
RecipeImageUploadTests.test_filter_recipes_by_ingredients
(self)
Test returning recipes with specific ingredients
Test returning recipes with specific ingredients
def test_filter_recipes_by_ingredients(self): """Test returning recipes with specific ingredients""" recipe1 = sample_recipe(user=self.user, title='Posh beans on toast') recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore') recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms') ingredient1 = sample_ingredient(user=self.user, name='Feta cheese') ingredient2 = sample_ingredient(user=self.user, name='Chicken') recipe1.ingredients.add(ingredient1) recipe2.ingredients.add(ingredient2) res = self.client.get( RECIPES_URL, {'ingredients': f'{ingredient1.id},{ingredient2.id}'} ) serializer1 = RecipeSerializer(recipe1) serializer2 = RecipeSerializer(recipe2) serializer3 = RecipeSerializer(recipe3) self.assertIn(serializer1.data, res.data) self.assertIn(serializer2.data, res.data) self.assertNotIn(serializer3.data, res.data)
[ "def", "test_filter_recipes_by_ingredients", "(", "self", ")", ":", "recipe1", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ",", "title", "=", "'Posh beans on toast'", ")", "recipe2", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ",", "title", "=", "'Chicken cacciatore'", ")", "recipe3", "=", "sample_recipe", "(", "user", "=", "self", ".", "user", ",", "title", "=", "'Steak and mushrooms'", ")", "ingredient1", "=", "sample_ingredient", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Feta cheese'", ")", "ingredient2", "=", "sample_ingredient", "(", "user", "=", "self", ".", "user", ",", "name", "=", "'Chicken'", ")", "recipe1", ".", "ingredients", ".", "add", "(", "ingredient1", ")", "recipe2", ".", "ingredients", ".", "add", "(", "ingredient2", ")", "res", "=", "self", ".", "client", ".", "get", "(", "RECIPES_URL", ",", "{", "'ingredients'", ":", "f'{ingredient1.id},{ingredient2.id}'", "}", ")", "serializer1", "=", "RecipeSerializer", "(", "recipe1", ")", "serializer2", "=", "RecipeSerializer", "(", "recipe2", ")", "serializer3", "=", "RecipeSerializer", "(", "recipe3", ")", "self", ".", "assertIn", "(", "serializer1", ".", "data", ",", "res", ".", "data", ")", "self", ".", "assertIn", "(", "serializer2", ".", "data", ",", "res", ".", "data", ")", "self", ".", "assertNotIn", "(", "serializer3", ".", "data", ",", "res", ".", "data", ")" ]
[ 265, 4 ]
[ 285, 52 ]
python
en
['en', 'en', 'en']
True
validation_operator
()
Validation Operator operations
Validation Operator operations
def validation_operator(): """Validation Operator operations""" pass
[ "def", "validation_operator", "(", ")", ":", "pass" ]
[ 25, 0 ]
[ 27, 8 ]
python
en
['en', 'ky', 'en']
True
validation_operator_list
(directory)
List known Validation Operators.
List known Validation Operators.
def validation_operator_list(directory): """List known Validation Operators.""" try: context = DataContext(directory) except ge_exceptions.ConfigNotFoundError as err: cli_message("<red>{}</red>".format(err.message)) return try: validation_operators = context.list_validation_operators() if len(validation_operators) == 0: cli_message("No Validation Operators found") return elif len(validation_operators) == 1: list_intro_string = "1 Validation Operator found:" else: list_intro_string = "{} Validation Operators found:".format( len(validation_operators) ) cli_message(list_intro_string) for validation_operator in validation_operators: cli_message("") cli_message_dict(validation_operator) toolkit.send_usage_message( data_context=context, event="cli.validation_operator.list", success=True ) except Exception as e: toolkit.send_usage_message( data_context=context, event="cli.validation_operator.list", success=False ) raise e
[ "def", "validation_operator_list", "(", "directory", ")", ":", "try", ":", "context", "=", "DataContext", "(", "directory", ")", "except", "ge_exceptions", ".", "ConfigNotFoundError", "as", "err", ":", "cli_message", "(", "\"<red>{}</red>\"", ".", "format", "(", "err", ".", "message", ")", ")", "return", "try", ":", "validation_operators", "=", "context", ".", "list_validation_operators", "(", ")", "if", "len", "(", "validation_operators", ")", "==", "0", ":", "cli_message", "(", "\"No Validation Operators found\"", ")", "return", "elif", "len", "(", "validation_operators", ")", "==", "1", ":", "list_intro_string", "=", "\"1 Validation Operator found:\"", "else", ":", "list_intro_string", "=", "\"{} Validation Operators found:\"", ".", "format", "(", "len", "(", "validation_operators", ")", ")", "cli_message", "(", "list_intro_string", ")", "for", "validation_operator", "in", "validation_operators", ":", "cli_message", "(", "\"\"", ")", "cli_message_dict", "(", "validation_operator", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.list\"", ",", "success", "=", "True", ")", "except", "Exception", "as", "e", ":", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.list\"", ",", "success", "=", "False", ")", "raise", "e" ]
[ 38, 0 ]
[ 70, 15 ]
python
en
['en', 'et', 'en']
True
validation_operator_run
(name, run_name, validation_config_file, suite, directory)
Run a validation operator against some data. There are two modes to run this command: 1. Interactive (good for development): Specify the name of the validation operator using the --name argument and the name of the expectation suite using the --suite argument. The cli will help you specify the batch of data that you want to validate interactively. 2. Non-interactive (good for production): Use the `--validation_config_file` argument to specify the path of the validation configuration JSON file. This file can be used to instruct a validation operator to validate multiple batches of data and use multiple expectation suites to validate each batch. Learn how to create a validation config file here: https://great-expectations.readthedocs.io/en/latest/command_line.html#great-expectations-validation-operator-run-validation-config-file-validation-config-file-path This command exits with 0 if the validation operator ran and the "success" attribute in its return object is True. Otherwise, the command exits with 1. To learn more about validation operators, go here: https://great-expectations.readthedocs.io/en/latest/features/validation.html#validation-operators
Run a validation operator against some data.
def validation_operator_run(name, run_name, validation_config_file, suite, directory): # Note though the long lines here aren't pythonic, they look best if Click does the line wraps. """ Run a validation operator against some data. There are two modes to run this command: 1. Interactive (good for development): Specify the name of the validation operator using the --name argument and the name of the expectation suite using the --suite argument. The cli will help you specify the batch of data that you want to validate interactively. 2. Non-interactive (good for production): Use the `--validation_config_file` argument to specify the path of the validation configuration JSON file. This file can be used to instruct a validation operator to validate multiple batches of data and use multiple expectation suites to validate each batch. Learn how to create a validation config file here: https://great-expectations.readthedocs.io/en/latest/command_line.html#great-expectations-validation-operator-run-validation-config-file-validation-config-file-path This command exits with 0 if the validation operator ran and the "success" attribute in its return object is True. Otherwise, the command exits with 1. To learn more about validation operators, go here: https://great-expectations.readthedocs.io/en/latest/features/validation.html#validation-operators """ try: context = DataContext(directory) except ge_exceptions.ConfigNotFoundError as err: cli_message("Failed to process <red>{}</red>".format(err.message)) sys.exit(1) try: if validation_config_file is not None: try: with open(validation_config_file) as f: validation_config = json.load(f) except (OSError, json_parse_exception) as e: cli_message( f"Failed to process the --validation_config_file argument: <red>{e}</red>" ) toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False, ) sys.exit(1) validation_config_error_message = _validate_valdiation_config( validation_config ) if validation_config_error_message is not None: cli_message( "<red>The validation config in {:s} is misconfigured: {:s}</red>".format( validation_config_file, validation_config_error_message ) ) toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False, ) sys.exit(1) else: if suite is None: cli_message( """ Please use --suite argument to specify the name of the expectation suite. Call `great_expectation suite list` command to list the expectation suites in your project. """ ) toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False, ) sys.exit(0) suite = toolkit.load_expectation_suite( context, suite, "cli.validation_operator.run" ) if name is None: cli_message( """ Please use --name argument to specify the name of the validation operator. Call `great_expectation validation-operator list` command to list the operators in your project. """ ) toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False, ) sys.exit(1) else: if name not in context.list_validation_operator_names(): cli_message( f""" Could not find a validation operator {name}. Call `great_expectation validation-operator list` command to list the operators in your project. """ ) toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False, ) sys.exit(1) batch_kwargs = None cli_message( """ Let us help you specify the batch of data your want the validation operator to validate.""" ) try: data_source = toolkit.select_datasource(context) except ValueError as ve: cli_message("<red>{}</red>".format(ve)) toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False, ) sys.exit(1) if not data_source: cli_message("<red>No datasources found in the context.</red>") toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False, ) sys.exit(1) if batch_kwargs is None: ( datasource_name, batch_kwargs_generator, data_asset, batch_kwargs, ) = get_batch_kwargs( context, datasource_name=data_source.name, batch_kwargs_generator_name=None, data_asset_name=None, additional_batch_kwargs=None, ) validation_config = { "validation_operator_name": name, "batches": [ { "batch_kwargs": batch_kwargs, "expectation_suite_names": [suite.expectation_suite_name], } ], } try: validation_operator_name = validation_config["validation_operator_name"] batches_to_validate = [] for entry in validation_config["batches"]: for expectation_suite_name in entry["expectation_suite_names"]: batch = context.get_batch( entry["batch_kwargs"], expectation_suite_name ) batches_to_validate.append(batch) if run_name is None: run_name = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S.%fZ" ) run_id = RunIdentifier(run_name=run_name) if suite is None: results = context.run_validation_operator( validation_operator_name, assets_to_validate=batches_to_validate, run_id=run_id, ) else: if suite.evaluation_parameters is None: results = context.run_validation_operator( validation_operator_name, assets_to_validate=batches_to_validate, run_id=run_id, ) else: results = context.run_validation_operator( validation_operator_name, assets_to_validate=batches_to_validate, run_id=run_id, evaluation_parameters=suite.evaluation_parameters, ) except (ge_exceptions.DataContextError, OSError, SQLAlchemyError) as e: cli_message("<red>{}</red>".format(e)) toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False ) sys.exit(1) if not results["success"]: cli_message("Validation failed!") toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=True ) sys.exit(1) else: cli_message("Validation succeeded!") toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=True ) sys.exit(0) except Exception as e: toolkit.send_usage_message( data_context=context, event="cli.validation_operator.run", success=False ) raise e
[ "def", "validation_operator_run", "(", "name", ",", "run_name", ",", "validation_config_file", ",", "suite", ",", "directory", ")", ":", "# Note though the long lines here aren't pythonic, they look best if Click does the line wraps.", "try", ":", "context", "=", "DataContext", "(", "directory", ")", "except", "ge_exceptions", ".", "ConfigNotFoundError", "as", "err", ":", "cli_message", "(", "\"Failed to process <red>{}</red>\"", ".", "format", "(", "err", ".", "message", ")", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "if", "validation_config_file", "is", "not", "None", ":", "try", ":", "with", "open", "(", "validation_config_file", ")", "as", "f", ":", "validation_config", "=", "json", ".", "load", "(", "f", ")", "except", "(", "OSError", ",", "json_parse_exception", ")", "as", "e", ":", "cli_message", "(", "f\"Failed to process the --validation_config_file argument: <red>{e}</red>\"", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ",", ")", "sys", ".", "exit", "(", "1", ")", "validation_config_error_message", "=", "_validate_valdiation_config", "(", "validation_config", ")", "if", "validation_config_error_message", "is", "not", "None", ":", "cli_message", "(", "\"<red>The validation config in {:s} is misconfigured: {:s}</red>\"", ".", "format", "(", "validation_config_file", ",", "validation_config_error_message", ")", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ",", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "if", "suite", "is", "None", ":", "cli_message", "(", "\"\"\"\nPlease use --suite argument to specify the name of the expectation suite.\nCall `great_expectation suite list` command to list the expectation suites in your project.\n\"\"\"", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ",", ")", "sys", ".", "exit", "(", "0", ")", "suite", "=", "toolkit", ".", "load_expectation_suite", "(", "context", ",", "suite", ",", "\"cli.validation_operator.run\"", ")", "if", "name", "is", "None", ":", "cli_message", "(", "\"\"\"\nPlease use --name argument to specify the name of the validation operator.\nCall `great_expectation validation-operator list` command to list the operators in your project.\n\"\"\"", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ",", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "if", "name", "not", "in", "context", ".", "list_validation_operator_names", "(", ")", ":", "cli_message", "(", "f\"\"\"\nCould not find a validation operator {name}.\nCall `great_expectation validation-operator list` command to list the operators in your project.\n\"\"\"", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ",", ")", "sys", ".", "exit", "(", "1", ")", "batch_kwargs", "=", "None", "cli_message", "(", "\"\"\"\nLet us help you specify the batch of data your want the validation operator to validate.\"\"\"", ")", "try", ":", "data_source", "=", "toolkit", ".", "select_datasource", "(", "context", ")", "except", "ValueError", "as", "ve", ":", "cli_message", "(", "\"<red>{}</red>\"", ".", "format", "(", "ve", ")", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ",", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "data_source", ":", "cli_message", "(", "\"<red>No datasources found in the context.</red>\"", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ",", ")", "sys", ".", "exit", "(", "1", ")", "if", "batch_kwargs", "is", "None", ":", "(", "datasource_name", ",", "batch_kwargs_generator", ",", "data_asset", ",", "batch_kwargs", ",", ")", "=", "get_batch_kwargs", "(", "context", ",", "datasource_name", "=", "data_source", ".", "name", ",", "batch_kwargs_generator_name", "=", "None", ",", "data_asset_name", "=", "None", ",", "additional_batch_kwargs", "=", "None", ",", ")", "validation_config", "=", "{", "\"validation_operator_name\"", ":", "name", ",", "\"batches\"", ":", "[", "{", "\"batch_kwargs\"", ":", "batch_kwargs", ",", "\"expectation_suite_names\"", ":", "[", "suite", ".", "expectation_suite_name", "]", ",", "}", "]", ",", "}", "try", ":", "validation_operator_name", "=", "validation_config", "[", "\"validation_operator_name\"", "]", "batches_to_validate", "=", "[", "]", "for", "entry", "in", "validation_config", "[", "\"batches\"", "]", ":", "for", "expectation_suite_name", "in", "entry", "[", "\"expectation_suite_names\"", "]", ":", "batch", "=", "context", ".", "get_batch", "(", "entry", "[", "\"batch_kwargs\"", "]", ",", "expectation_suite_name", ")", "batches_to_validate", ".", "append", "(", "batch", ")", "if", "run_name", "is", "None", ":", "run_name", "=", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", ".", "strftime", "(", "\"%Y%m%dT%H%M%S.%fZ\"", ")", "run_id", "=", "RunIdentifier", "(", "run_name", "=", "run_name", ")", "if", "suite", "is", "None", ":", "results", "=", "context", ".", "run_validation_operator", "(", "validation_operator_name", ",", "assets_to_validate", "=", "batches_to_validate", ",", "run_id", "=", "run_id", ",", ")", "else", ":", "if", "suite", ".", "evaluation_parameters", "is", "None", ":", "results", "=", "context", ".", "run_validation_operator", "(", "validation_operator_name", ",", "assets_to_validate", "=", "batches_to_validate", ",", "run_id", "=", "run_id", ",", ")", "else", ":", "results", "=", "context", ".", "run_validation_operator", "(", "validation_operator_name", ",", "assets_to_validate", "=", "batches_to_validate", ",", "run_id", "=", "run_id", ",", "evaluation_parameters", "=", "suite", ".", "evaluation_parameters", ",", ")", "except", "(", "ge_exceptions", ".", "DataContextError", ",", "OSError", ",", "SQLAlchemyError", ")", "as", "e", ":", "cli_message", "(", "\"<red>{}</red>\"", ".", "format", "(", "e", ")", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "results", "[", "\"success\"", "]", ":", "cli_message", "(", "\"Validation failed!\"", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "True", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "cli_message", "(", "\"Validation succeeded!\"", ")", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "True", ")", "sys", ".", "exit", "(", "0", ")", "except", "Exception", "as", "e", ":", "toolkit", ".", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.validation_operator.run\"", ",", "success", "=", "False", ")", "raise", "e" ]
[ 110, 0 ]
[ 335, 15 ]
python
en
['en', 'error', 'th']
False
DBReporting.begin
(self)
At the start of the run, we want to record the test execution information in the database.
At the start of the run, we want to record the test execution information in the database.
def begin(self): """ At the start of the run, we want to record the test execution information in the database. """ exec_payload = ExecutionQueryPayload() exec_payload.execution_start_time = int(time.time() * 1000) self.execution_start_time = exec_payload.execution_start_time exec_payload.guid = self.execution_guid exec_payload.username = getpass.getuser() self.testcase_manager.insert_execution_data(exec_payload)
[ "def", "begin", "(", "self", ")", ":", "exec_payload", "=", "ExecutionQueryPayload", "(", ")", "exec_payload", ".", "execution_start_time", "=", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "self", ".", "execution_start_time", "=", "exec_payload", ".", "execution_start_time", "exec_payload", ".", "guid", "=", "self", ".", "execution_guid", "exec_payload", ".", "username", "=", "getpass", ".", "getuser", "(", ")", "self", ".", "testcase_manager", ".", "insert_execution_data", "(", "exec_payload", ")" ]
[ 55, 4 ]
[ 63, 65 ]
python
en
['en', 'en', 'en']
True
DBReporting.startTest
(self, test)
At the start of the test, set the testcase details.
At the start of the test, set the testcase details.
def startTest(self, test): """ At the start of the test, set the testcase details. """ data_payload = TestcaseDataPayload() self.testcase_guid = str(uuid.uuid4()) data_payload.guid = self.testcase_guid data_payload.execution_guid = self.execution_guid if hasattr(test, "browser"): data_payload.browser = test.browser else: data_payload.browser = "N/A" data_payload.test_address = test.id() application = ApplicationManager.generate_application_string(test) data_payload.env = application.split('.')[0] data_payload.start_time = application.split('.')[1] data_payload.state = constants.State.NOTRUN self.testcase_manager.insert_testcase_data(data_payload) self.case_start_time = int(time.time() * 1000) # Make the testcase guid available to other plugins test.testcase_guid = self.testcase_guid
[ "def", "startTest", "(", "self", ",", "test", ")", ":", "data_payload", "=", "TestcaseDataPayload", "(", ")", "self", ".", "testcase_guid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "data_payload", ".", "guid", "=", "self", ".", "testcase_guid", "data_payload", ".", "execution_guid", "=", "self", ".", "execution_guid", "if", "hasattr", "(", "test", ",", "\"browser\"", ")", ":", "data_payload", ".", "browser", "=", "test", ".", "browser", "else", ":", "data_payload", ".", "browser", "=", "\"N/A\"", "data_payload", ".", "test_address", "=", "test", ".", "id", "(", ")", "application", "=", "ApplicationManager", ".", "generate_application_string", "(", "test", ")", "data_payload", ".", "env", "=", "application", ".", "split", "(", "'.'", ")", "[", "0", "]", "data_payload", ".", "start_time", "=", "application", ".", "split", "(", "'.'", ")", "[", "1", "]", "data_payload", ".", "state", "=", "constants", ".", "State", ".", "NOTRUN", "self", ".", "testcase_manager", ".", "insert_testcase_data", "(", "data_payload", ")", "self", ".", "case_start_time", "=", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "# Make the testcase guid available to other plugins", "test", ".", "testcase_guid", "=", "self", ".", "testcase_guid" ]
[ 65, 4 ]
[ 83, 47 ]
python
en
['en', 'en', 'en']
True
DBReporting.finalize
(self, result)
At the end of the run, we want to update the DB row with the execution time.
At the end of the run, we want to update the DB row with the execution time.
def finalize(self, result): """ At the end of the run, we want to update the DB row with the execution time. """ runtime = int(time.time() * 1000) - self.execution_start_time self.testcase_manager.update_execution_data(self.execution_guid, runtime)
[ "def", "finalize", "(", "self", ",", "result", ")", ":", "runtime", "=", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "-", "self", ".", "execution_start_time", "self", ".", "testcase_manager", ".", "update_execution_data", "(", "self", ".", "execution_guid", ",", "runtime", ")" ]
[ 85, 4 ]
[ 90, 60 ]
python
en
['en', 'en', 'en']
True
DBReporting.addSuccess
(self, test, capt)
After test completion, we want to record testcase run information.
After test completion, we want to record testcase run information.
def addSuccess(self, test, capt): """ After test completion, we want to record testcase run information. """ self.__insert_test_result(constants.State.PASS, test)
[ "def", "addSuccess", "(", "self", ",", "test", ",", "capt", ")", ":", "self", ".", "__insert_test_result", "(", "constants", ".", "State", ".", "PASS", ",", "test", ")" ]
[ 92, 4 ]
[ 96, 61 ]
python
en
['en', 'error', 'th']
False
DBReporting.addError
(self, test, err, capt=None)
After a test error, we want to record testcase run information.
After a test error, we want to record testcase run information.
def addError(self, test, err, capt=None): """ After a test error, we want to record testcase run information. """ self.__insert_test_result(constants.State.ERROR, test, err)
[ "def", "addError", "(", "self", ",", "test", ",", "err", ",", "capt", "=", "None", ")", ":", "self", ".", "__insert_test_result", "(", "constants", ".", "State", ".", "ERROR", ",", "test", ",", "err", ")" ]
[ 98, 4 ]
[ 102, 67 ]
python
en
['en', 'error', 'th']
False
DBReporting.handleError
(self, test, err, capt=None)
After a test error, we want to record testcase run information. "Error" also encompasses any states other than Pass or Fail, so we check for those first.
After a test error, we want to record testcase run information. "Error" also encompasses any states other than Pass or Fail, so we check for those first.
def handleError(self, test, err, capt=None): """ After a test error, we want to record testcase run information. "Error" also encompasses any states other than Pass or Fail, so we check for those first. """ if err[0] == errors.BlockedTest: self.__insert_test_result(constants.State.BLOCKED, test, err) self.error_handled = True raise SkipTest(err[1]) return True elif err[0] == errors.DeprecatedTest: self.__insert_test_result(constants.State.DEPRECATED, test, err) self.error_handled = True raise SkipTest(err[1]) return True elif err[0] == errors.SkipTest: self.__insert_test_result(constants.State.SKIP, test, err) self.error_handled = True raise SkipTest(err[1]) return True
[ "def", "handleError", "(", "self", ",", "test", ",", "err", ",", "capt", "=", "None", ")", ":", "if", "err", "[", "0", "]", "==", "errors", ".", "BlockedTest", ":", "self", ".", "__insert_test_result", "(", "constants", ".", "State", ".", "BLOCKED", ",", "test", ",", "err", ")", "self", ".", "error_handled", "=", "True", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True", "elif", "err", "[", "0", "]", "==", "errors", ".", "DeprecatedTest", ":", "self", ".", "__insert_test_result", "(", "constants", ".", "State", ".", "DEPRECATED", ",", "test", ",", "err", ")", "self", ".", "error_handled", "=", "True", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True", "elif", "err", "[", "0", "]", "==", "errors", ".", "SkipTest", ":", "self", ".", "__insert_test_result", "(", "constants", ".", "State", ".", "SKIP", ",", "test", ",", "err", ")", "self", ".", "error_handled", "=", "True", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True" ]
[ 104, 4 ]
[ 126, 23 ]
python
en
['en', 'error', 'th']
False