Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
test_snapshot_BasicSuiteBuilderProfiler_on_titanic_in_demo_mode
()
A snapshot regression test for BasicSuiteBuilderProfiler. We are running the profiler on the Titanic dataset and comparing the EVRs to ones retrieved from a previously stored file.
A snapshot regression test for BasicSuiteBuilderProfiler. We are running the profiler on the Titanic dataset and comparing the EVRs to ones retrieved from a previously stored file.
def test_snapshot_BasicSuiteBuilderProfiler_on_titanic_in_demo_mode(): """ A snapshot regression test for BasicSuiteBuilderProfiler. We are running the profiler on the Titanic dataset and comparing the EVRs to ones retrieved from a previously stored file. """ df = ge.read_csv(file_relative_path(__file__, "../test_sets/Titanic.csv")) suite, evrs = df.profile(BasicSuiteBuilderProfiler, profiler_configuration="demo") # Check to make sure BasicSuiteBuilderProfiler is adding meta.columns with a single "description" field for each column assert "columns" in suite.meta for k, v in suite.meta["columns"].items(): assert v == {"description": ""} # Note: the above already produces an EVR; rerunning isn't strictly necessary just for EVRs evrs = df.validate(result_format="SUMMARY") # THIS IS NOT DEAD CODE. UNCOMMENT TO SAVE A SNAPSHOT WHEN UPDATING THIS TEST expected_filepath = file_relative_path( __file__, "./fixtures/expected_evrs_BasicSuiteBuilderProfiler_on_titanic_demo_mode.json", ) # with open(expected_filepath, 'w+') as file: # json.dump(expectationSuiteValidationResultSchema.dump(evrs), file, indent=2) # with open(file_relative_path(__file__, '../render/fixtures/BasicSuiteBuilderProfiler_evrs.json'), 'w+') as file: # json.dump(expectationSuiteValidationResultSchema.dump(evrs), file, indent=2) with open( expected_filepath, ) as file: expected_evrs = expectationSuiteValidationResultSchema.load( json.load(file, object_pairs_hook=OrderedDict) ) # We know that python 2 does not guarantee the order of value_counts, which causes a different # order for items in the partial_unexpected_value_counts list # Remove those before assertions. for result in evrs.results: if "partial_unexpected_counts" in result.result: result.result.pop("partial_unexpected_counts") for result in expected_evrs.results: if "partial_unexpected_counts" in result.result: result.result.pop("partial_unexpected_counts") # Version, run_id, batch id will be different del expected_evrs.meta["great_expectations_version"] del evrs.meta["great_expectations_version"] del expected_evrs.meta["run_id"] del evrs.meta["run_id"] del expected_evrs.meta["batch_kwargs"]["ge_batch_id"] del evrs.meta["batch_kwargs"]["ge_batch_id"] del evrs.meta["validation_time"] assert evrs == expected_evrs
[ "def", "test_snapshot_BasicSuiteBuilderProfiler_on_titanic_in_demo_mode", "(", ")", ":", "df", "=", "ge", ".", "read_csv", "(", "file_relative_path", "(", "__file__", ",", "\"../test_sets/Titanic.csv\"", ")", ")", "suite", ",", "evrs", "=", "df", ".", "profile", "(", "BasicSuiteBuilderProfiler", ",", "profiler_configuration", "=", "\"demo\"", ")", "# Check to make sure BasicSuiteBuilderProfiler is adding meta.columns with a single \"description\" field for each column", "assert", "\"columns\"", "in", "suite", ".", "meta", "for", "k", ",", "v", "in", "suite", ".", "meta", "[", "\"columns\"", "]", ".", "items", "(", ")", ":", "assert", "v", "==", "{", "\"description\"", ":", "\"\"", "}", "# Note: the above already produces an EVR; rerunning isn't strictly necessary just for EVRs", "evrs", "=", "df", ".", "validate", "(", "result_format", "=", "\"SUMMARY\"", ")", "# THIS IS NOT DEAD CODE. UNCOMMENT TO SAVE A SNAPSHOT WHEN UPDATING THIS TEST", "expected_filepath", "=", "file_relative_path", "(", "__file__", ",", "\"./fixtures/expected_evrs_BasicSuiteBuilderProfiler_on_titanic_demo_mode.json\"", ",", ")", "# with open(expected_filepath, 'w+') as file:", "# json.dump(expectationSuiteValidationResultSchema.dump(evrs), file, indent=2)", "# with open(file_relative_path(__file__, '../render/fixtures/BasicSuiteBuilderProfiler_evrs.json'), 'w+') as file:", "# json.dump(expectationSuiteValidationResultSchema.dump(evrs), file, indent=2)", "with", "open", "(", "expected_filepath", ",", ")", "as", "file", ":", "expected_evrs", "=", "expectationSuiteValidationResultSchema", ".", "load", "(", "json", ".", "load", "(", "file", ",", "object_pairs_hook", "=", "OrderedDict", ")", ")", "# We know that python 2 does not guarantee the order of value_counts, which causes a different", "# order for items in the partial_unexpected_value_counts list", "# Remove those before assertions.", "for", "result", "in", "evrs", ".", "results", ":", "if", "\"partial_unexpected_counts\"", "in", "result", ".", "result", ":", "result", ".", "result", ".", "pop", "(", "\"partial_unexpected_counts\"", ")", "for", "result", "in", "expected_evrs", ".", "results", ":", "if", "\"partial_unexpected_counts\"", "in", "result", ".", "result", ":", "result", ".", "result", ".", "pop", "(", "\"partial_unexpected_counts\"", ")", "# Version, run_id, batch id will be different", "del", "expected_evrs", ".", "meta", "[", "\"great_expectations_version\"", "]", "del", "evrs", ".", "meta", "[", "\"great_expectations_version\"", "]", "del", "expected_evrs", ".", "meta", "[", "\"run_id\"", "]", "del", "evrs", ".", "meta", "[", "\"run_id\"", "]", "del", "expected_evrs", ".", "meta", "[", "\"batch_kwargs\"", "]", "[", "\"ge_batch_id\"", "]", "del", "evrs", ".", "meta", "[", "\"batch_kwargs\"", "]", "[", "\"ge_batch_id\"", "]", "del", "evrs", ".", "meta", "[", "\"validation_time\"", "]", "assert", "evrs", "==", "expected_evrs" ]
[ 442, 0 ]
[ 500, 32 ]
python
en
['en', 'error', 'th']
False
test_snapshot_BasicSuiteBuilderProfiler_on_titanic_with_builder_configuration
()
A snapshot regression test for BasicSuiteBuilderProfiler. We are running the profiler on the Titanic dataset and comparing the EVRs to ones retrieved from a previously stored file.
A snapshot regression test for BasicSuiteBuilderProfiler.
def test_snapshot_BasicSuiteBuilderProfiler_on_titanic_with_builder_configuration(): """ A snapshot regression test for BasicSuiteBuilderProfiler. We are running the profiler on the Titanic dataset and comparing the EVRs to ones retrieved from a previously stored file. """ batch = ge.read_csv(file_relative_path(__file__, "../test_sets/Titanic.csv")) suite, evrs = BasicSuiteBuilderProfiler().profile( batch, profiler_configuration={ "included_columns": ["Name", "PClass", "Age", "Sex", "SexCode"] }, ) # Check to make sure SuiteBuilderProfiler is adding meta.columns with a single "description" field for each column assert "columns" in suite.meta for k, v in suite.meta["columns"].items(): assert v == {"description": ""} # Note: the above already produces an EVR; rerunning isn't strictly necessary just for EVRs evrs = batch.validate(result_format="SUMMARY") expected_filepath = file_relative_path( __file__, "./fixtures/expected_evrs_SuiteBuilderProfiler_on_titanic_with_configurations.json", ) # THIS IS NOT DEAD CODE. UNCOMMENT TO SAVE A SNAPSHOT WHEN UPDATING THIS TEST # with open(expected_filepath, 'w+') as file: # json.dump(expectationSuiteValidationResultSchema.dump(evrs), file, indent=2) # with open(file_relative_path(__file__, '../render/fixtures/SuiteBuilderProfiler_evrs.json'), 'w+') as file: # json.dump(expectationSuiteValidationResultSchema.dump(evrs), file, indent=2) with open( expected_filepath, ) as file: expected_evrs = expectationSuiteValidationResultSchema.load( json.load(file, object_pairs_hook=OrderedDict) ) # Version and RUN-ID will be different del expected_evrs.meta["great_expectations_version"] del evrs.meta["great_expectations_version"] del expected_evrs.meta["run_id"] del expected_evrs.meta["batch_kwargs"]["ge_batch_id"] del evrs.meta["run_id"] del evrs.meta["batch_kwargs"]["ge_batch_id"] del evrs.meta["validation_time"] assert evrs == expected_evrs
[ "def", "test_snapshot_BasicSuiteBuilderProfiler_on_titanic_with_builder_configuration", "(", ")", ":", "batch", "=", "ge", ".", "read_csv", "(", "file_relative_path", "(", "__file__", ",", "\"../test_sets/Titanic.csv\"", ")", ")", "suite", ",", "evrs", "=", "BasicSuiteBuilderProfiler", "(", ")", ".", "profile", "(", "batch", ",", "profiler_configuration", "=", "{", "\"included_columns\"", ":", "[", "\"Name\"", ",", "\"PClass\"", ",", "\"Age\"", ",", "\"Sex\"", ",", "\"SexCode\"", "]", "}", ",", ")", "# Check to make sure SuiteBuilderProfiler is adding meta.columns with a single \"description\" field for each column", "assert", "\"columns\"", "in", "suite", ".", "meta", "for", "k", ",", "v", "in", "suite", ".", "meta", "[", "\"columns\"", "]", ".", "items", "(", ")", ":", "assert", "v", "==", "{", "\"description\"", ":", "\"\"", "}", "# Note: the above already produces an EVR; rerunning isn't strictly necessary just for EVRs", "evrs", "=", "batch", ".", "validate", "(", "result_format", "=", "\"SUMMARY\"", ")", "expected_filepath", "=", "file_relative_path", "(", "__file__", ",", "\"./fixtures/expected_evrs_SuiteBuilderProfiler_on_titanic_with_configurations.json\"", ",", ")", "# THIS IS NOT DEAD CODE. UNCOMMENT TO SAVE A SNAPSHOT WHEN UPDATING THIS TEST", "# with open(expected_filepath, 'w+') as file:", "# json.dump(expectationSuiteValidationResultSchema.dump(evrs), file, indent=2)", "# with open(file_relative_path(__file__, '../render/fixtures/SuiteBuilderProfiler_evrs.json'), 'w+') as file:", "# json.dump(expectationSuiteValidationResultSchema.dump(evrs), file, indent=2)", "with", "open", "(", "expected_filepath", ",", ")", "as", "file", ":", "expected_evrs", "=", "expectationSuiteValidationResultSchema", ".", "load", "(", "json", ".", "load", "(", "file", ",", "object_pairs_hook", "=", "OrderedDict", ")", ")", "# Version and RUN-ID will be different", "del", "expected_evrs", ".", "meta", "[", "\"great_expectations_version\"", "]", "del", "evrs", ".", "meta", "[", "\"great_expectations_version\"", "]", "del", "expected_evrs", ".", "meta", "[", "\"run_id\"", "]", "del", "expected_evrs", ".", "meta", "[", "\"batch_kwargs\"", "]", "[", "\"ge_batch_id\"", "]", "del", "evrs", ".", "meta", "[", "\"run_id\"", "]", "del", "evrs", ".", "meta", "[", "\"batch_kwargs\"", "]", "[", "\"ge_batch_id\"", "]", "del", "evrs", ".", "meta", "[", "\"validation_time\"", "]", "assert", "evrs", "==", "expected_evrs" ]
[ 1297, 0 ]
[ 1346, 32 ]
python
en
['en', 'error', 'th']
False
test_render_checkpoint_new_notebook_with_available_data_asset
( deterministic_asset_dataconnector_context, titanic_expectation_suite, checkpoint_new_notebook_assets, )
What does this test and why? The CheckpointNewNotebookRenderer should generate a notebook with an example SimpleCheckpoint yaml config based on the first available data asset.
What does this test and why? The CheckpointNewNotebookRenderer should generate a notebook with an example SimpleCheckpoint yaml config based on the first available data asset.
def test_render_checkpoint_new_notebook_with_available_data_asset( deterministic_asset_dataconnector_context, titanic_expectation_suite, checkpoint_new_notebook_assets, ): """ What does this test and why? The CheckpointNewNotebookRenderer should generate a notebook with an example SimpleCheckpoint yaml config based on the first available data asset. """ context: DataContext = deterministic_asset_dataconnector_context assert context.list_checkpoints() == [] context.save_expectation_suite(titanic_expectation_suite) assert context.list_expectation_suite_names() == ["Titanic.warning"] checkpoint_new_notebook_renderer = CheckpointNewNotebookRenderer( context=context, checkpoint_name="my_checkpoint_name" ) obs: nbformat.NotebookNode = checkpoint_new_notebook_renderer.render() assert isinstance(obs, dict) expected_cells = ( checkpoint_new_notebook_assets["header"] + checkpoint_new_notebook_assets["imports"] + checkpoint_new_notebook_assets[ "sample_checkpoint_config_markdown_description" ] # Testing to make sure everything in the notebook but especially this checkpoint config code is correct. + checkpoint_new_notebook_assets["sample_checkpoint_config_code_correct"] + checkpoint_new_notebook_assets["optional_customize_your_config"] + checkpoint_new_notebook_assets["test_and_save_your_checkpoint_configuration"] + checkpoint_new_notebook_assets["review_checkpoint"] + checkpoint_new_notebook_assets["add_checkpoint"] + checkpoint_new_notebook_assets["optional_run_checkpoint"] ) expected = { "nbformat": 4, "nbformat_minor": 4, "metadata": {}, "cells": expected_cells, } del expected["nbformat_minor"] del obs["nbformat_minor"] for obs_cell, expected_cell in zip(obs["cells"], expected["cells"]): obs_cell.pop("id", None) assert obs_cell == expected_cell assert obs == expected
[ "def", "test_render_checkpoint_new_notebook_with_available_data_asset", "(", "deterministic_asset_dataconnector_context", ",", "titanic_expectation_suite", ",", "checkpoint_new_notebook_assets", ",", ")", ":", "context", ":", "DataContext", "=", "deterministic_asset_dataconnector_context", "assert", "context", ".", "list_checkpoints", "(", ")", "==", "[", "]", "context", ".", "save_expectation_suite", "(", "titanic_expectation_suite", ")", "assert", "context", ".", "list_expectation_suite_names", "(", ")", "==", "[", "\"Titanic.warning\"", "]", "checkpoint_new_notebook_renderer", "=", "CheckpointNewNotebookRenderer", "(", "context", "=", "context", ",", "checkpoint_name", "=", "\"my_checkpoint_name\"", ")", "obs", ":", "nbformat", ".", "NotebookNode", "=", "checkpoint_new_notebook_renderer", ".", "render", "(", ")", "assert", "isinstance", "(", "obs", ",", "dict", ")", "expected_cells", "=", "(", "checkpoint_new_notebook_assets", "[", "\"header\"", "]", "+", "checkpoint_new_notebook_assets", "[", "\"imports\"", "]", "+", "checkpoint_new_notebook_assets", "[", "\"sample_checkpoint_config_markdown_description\"", "]", "# Testing to make sure everything in the notebook but especially this checkpoint config code is correct.", "+", "checkpoint_new_notebook_assets", "[", "\"sample_checkpoint_config_code_correct\"", "]", "+", "checkpoint_new_notebook_assets", "[", "\"optional_customize_your_config\"", "]", "+", "checkpoint_new_notebook_assets", "[", "\"test_and_save_your_checkpoint_configuration\"", "]", "+", "checkpoint_new_notebook_assets", "[", "\"review_checkpoint\"", "]", "+", "checkpoint_new_notebook_assets", "[", "\"add_checkpoint\"", "]", "+", "checkpoint_new_notebook_assets", "[", "\"optional_run_checkpoint\"", "]", ")", "expected", "=", "{", "\"nbformat\"", ":", "4", ",", "\"nbformat_minor\"", ":", "4", ",", "\"metadata\"", ":", "{", "}", ",", "\"cells\"", ":", "expected_cells", ",", "}", "del", "expected", "[", "\"nbformat_minor\"", "]", "del", "obs", "[", "\"nbformat_minor\"", "]", "for", "obs_cell", ",", "expected_cell", "in", "zip", "(", "obs", "[", "\"cells\"", "]", ",", "expected", "[", "\"cells\"", "]", ")", ":", "obs_cell", ".", "pop", "(", "\"id\"", ",", "None", ")", "assert", "obs_cell", "==", "expected_cell", "assert", "obs", "==", "expected" ]
[ 256, 0 ]
[ 306, 26 ]
python
en
['en', 'error', 'th']
False
aclose_forcefully
(resource)
Close an async resource or async generator immediately, without blocking to do any graceful cleanup. :class:`~trio.abc.AsyncResource` objects guarantee that if their :meth:`~trio.abc.AsyncResource.aclose` method is cancelled, then they will still close the resource (albeit in a potentially ungraceful fashion). :func:`aclose_forcefully` is a convenience function that exploits this behavior to let you force a resource to be closed without blocking: it works by calling ``await resource.aclose()`` and then cancelling it immediately. Most users won't need this, but it may be useful on cleanup paths where you can't afford to block, or if you want to close a resource and don't care about handling it gracefully. For example, if :class:`~trio.SSLStream` encounters an error and cannot perform its own graceful close, then there's no point in waiting to gracefully shut down the underlying transport either, so it calls ``await aclose_forcefully(self.transport_stream)``. Note that this function is async, and that it acts as a checkpoint, but unlike most async functions it cannot block indefinitely (at least, assuming the underlying resource object is correctly implemented).
Close an async resource or async generator immediately, without blocking to do any graceful cleanup.
async def aclose_forcefully(resource): """Close an async resource or async generator immediately, without blocking to do any graceful cleanup. :class:`~trio.abc.AsyncResource` objects guarantee that if their :meth:`~trio.abc.AsyncResource.aclose` method is cancelled, then they will still close the resource (albeit in a potentially ungraceful fashion). :func:`aclose_forcefully` is a convenience function that exploits this behavior to let you force a resource to be closed without blocking: it works by calling ``await resource.aclose()`` and then cancelling it immediately. Most users won't need this, but it may be useful on cleanup paths where you can't afford to block, or if you want to close a resource and don't care about handling it gracefully. For example, if :class:`~trio.SSLStream` encounters an error and cannot perform its own graceful close, then there's no point in waiting to gracefully shut down the underlying transport either, so it calls ``await aclose_forcefully(self.transport_stream)``. Note that this function is async, and that it acts as a checkpoint, but unlike most async functions it cannot block indefinitely (at least, assuming the underlying resource object is correctly implemented). """ with trio.CancelScope() as cs: cs.cancel() await resource.aclose()
[ "async", "def", "aclose_forcefully", "(", "resource", ")", ":", "with", "trio", ".", "CancelScope", "(", ")", "as", "cs", ":", "cs", ".", "cancel", "(", ")", "await", "resource", ".", "aclose", "(", ")" ]
[ 8, 0 ]
[ 35, 31 ]
python
en
['en', 'en', 'en']
True
start_datarun
(orex, experiment, pipeline)
Start executing a Datarun and store the results on DB. Args: orex (OrionExplorer): OrionExplorer instance to use to store the results inside the Database. experiment (Experiment or ObjectId or str): The Experiment to which the created Datarun will belong. pipeline (Pipeline or ObjectId or str): Pipeline to use for the Datarun.
Start executing a Datarun and store the results on DB.
def start_datarun(orex, experiment, pipeline): """Start executing a Datarun and store the results on DB. Args: orex (OrionExplorer): OrionExplorer instance to use to store the results inside the Database. experiment (Experiment or ObjectId or str): The Experiment to which the created Datarun will belong. pipeline (Pipeline or ObjectId or str): Pipeline to use for the Datarun. """ datarun = orex.add_datarun(experiment, pipeline) datarun.start() LOGGER.info('Datarun %s started', datarun.id) try: for signal in experiment.signals: start_signalrun(orex, datarun, signal) status = datarun.STATUS_SUCCESS except Exception: LOGGER.exception('Datarun %s crashed', datarun.id) status = datarun.STATUS_ERRORED datarun.end(status)
[ "def", "start_datarun", "(", "orex", ",", "experiment", ",", "pipeline", ")", ":", "datarun", "=", "orex", ".", "add_datarun", "(", "experiment", ",", "pipeline", ")", "datarun", ".", "start", "(", ")", "LOGGER", ".", "info", "(", "'Datarun %s started'", ",", "datarun", ".", "id", ")", "try", ":", "for", "signal", "in", "experiment", ".", "signals", ":", "start_signalrun", "(", "orex", ",", "datarun", ",", "signal", ")", "status", "=", "datarun", ".", "STATUS_SUCCESS", "except", "Exception", ":", "LOGGER", ".", "exception", "(", "'Datarun %s crashed'", ",", "datarun", ".", "id", ")", "status", "=", "datarun", ".", "STATUS_ERRORED", "datarun", ".", "end", "(", "status", ")" ]
[ 75, 0 ]
[ 102, 23 ]
python
en
['en', 'en', 'en']
True
ValidationOperator.validation_operator_config
(self)
This method builds the config dict of a particular validation operator. The "kwargs" key is what really distinguishes different validation operators. e.g.: { "class_name": "ActionListValidationOperator", "module_name": "great_expectations.validation_operators", "name": self.name, "kwargs": { "action_list": self.action_list }, } { "class_name": "WarningAndFailureExpectationSuitesValidationOperator", "module_name": "great_expectations.validation_operators", "name": self.name, "kwargs": { "action_list": self.action_list, "base_expectation_suite_name": self.base_expectation_suite_name, "expectation_suite_name_suffixes": self.expectation_suite_name_suffixes, "stop_on_first_error": self.stop_on_first_error, "slack_webhook": self.slack_webhook, "notify_on": self.notify_on, }, }
This method builds the config dict of a particular validation operator. The "kwargs" key is what really distinguishes different validation operators.
def validation_operator_config(self): """ This method builds the config dict of a particular validation operator. The "kwargs" key is what really distinguishes different validation operators. e.g.: { "class_name": "ActionListValidationOperator", "module_name": "great_expectations.validation_operators", "name": self.name, "kwargs": { "action_list": self.action_list }, } { "class_name": "WarningAndFailureExpectationSuitesValidationOperator", "module_name": "great_expectations.validation_operators", "name": self.name, "kwargs": { "action_list": self.action_list, "base_expectation_suite_name": self.base_expectation_suite_name, "expectation_suite_name_suffixes": self.expectation_suite_name_suffixes, "stop_on_first_error": self.stop_on_first_error, "slack_webhook": self.slack_webhook, "notify_on": self.notify_on, }, } """ raise NotImplementedError
[ "def", "validation_operator_config", "(", "self", ")", ":", "raise", "NotImplementedError" ]
[ 37, 4 ]
[ 67, 33 ]
python
en
['en', 'error', 'th']
False
ActionListValidationOperator._build_batch_from_item
(self, item)
Internal helper method to take an asset to validate, which can be either: (1) a DataAsset; or (2) a tuple of data_asset_name, expectation_suite_name, and batch_kwargs (suitable for passing to get_batch) Args: item: The item to convert to a batch (see above) Returns: A batch of data
Internal helper method to take an asset to validate, which can be either: (1) a DataAsset; or (2) a tuple of data_asset_name, expectation_suite_name, and batch_kwargs (suitable for passing to get_batch)
def _build_batch_from_item(self, item): """Internal helper method to take an asset to validate, which can be either: (1) a DataAsset; or (2) a tuple of data_asset_name, expectation_suite_name, and batch_kwargs (suitable for passing to get_batch) Args: item: The item to convert to a batch (see above) Returns: A batch of data """ # if not isinstance(item, (DataAsset, Validator)): if isinstance(item, tuple): if not ( len(item) == 2 and isinstance(item[0], dict) and isinstance(item[1], str) ): raise ValueError("Unable to build batch from item.") batch = self.data_context.get_batch( batch_kwargs=item[0], expectation_suite_name=item[1] ) else: batch = item return batch
[ "def", "_build_batch_from_item", "(", "self", ",", "item", ")", ":", "# if not isinstance(item, (DataAsset, Validator)):", "if", "isinstance", "(", "item", ",", "tuple", ")", ":", "if", "not", "(", "len", "(", "item", ")", "==", "2", "and", "isinstance", "(", "item", "[", "0", "]", ",", "dict", ")", "and", "isinstance", "(", "item", "[", "1", "]", ",", "str", ")", ")", ":", "raise", "ValueError", "(", "\"Unable to build batch from item.\"", ")", "batch", "=", "self", ".", "data_context", ".", "get_batch", "(", "batch_kwargs", "=", "item", "[", "0", "]", ",", "expectation_suite_name", "=", "item", "[", "1", "]", ")", "else", ":", "batch", "=", "item", "return", "batch" ]
[ 247, 4 ]
[ 273, 20 ]
python
en
['en', 'en', 'en']
True
ActionListValidationOperator._run_actions
( self, batch, expectation_suite_identifier, expectation_suite, batch_validation_result, run_id, )
Runs all actions configured for this operator on the result of validating one batch against one expectation suite. If an action fails with an exception, the method does not continue. :param batch: :param expectation_suite: :param batch_validation_result: :param run_id: :return: a dictionary: {action name -> result returned by the action}
Runs all actions configured for this operator on the result of validating one batch against one expectation suite.
def _run_actions( self, batch, expectation_suite_identifier, expectation_suite, batch_validation_result, run_id, ): """ Runs all actions configured for this operator on the result of validating one batch against one expectation suite. If an action fails with an exception, the method does not continue. :param batch: :param expectation_suite: :param batch_validation_result: :param run_id: :return: a dictionary: {action name -> result returned by the action} """ batch_actions_results = {} for action in self.action_list: # NOTE: Eugene: 2019-09-23: log the info about the batch and the expectation suite logger.debug( "Processing validation action with name {}".format(action["name"]) ) if hasattr(batch, "active_batch_id"): batch_identifier = batch.active_batch_id else: batch_identifier = batch.batch_id validation_result_id = ValidationResultIdentifier( expectation_suite_identifier=expectation_suite_identifier, run_id=run_id, batch_identifier=batch_identifier, ) try: action_result = self.actions[action["name"]].run( validation_result_suite_identifier=validation_result_id, validation_result_suite=batch_validation_result, data_asset=batch, payload=batch_actions_results, ) # add action_result batch_actions_results[action["name"]] = ( {} if action_result is None else action_result ) batch_actions_results[action["name"]]["class"] = action["action"][ "class_name" ] except Exception as e: logger.exception( "Error running action with name {}".format(action["name"]) ) raise e return batch_actions_results
[ "def", "_run_actions", "(", "self", ",", "batch", ",", "expectation_suite_identifier", ",", "expectation_suite", ",", "batch_validation_result", ",", "run_id", ",", ")", ":", "batch_actions_results", "=", "{", "}", "for", "action", "in", "self", ".", "action_list", ":", "# NOTE: Eugene: 2019-09-23: log the info about the batch and the expectation suite", "logger", ".", "debug", "(", "\"Processing validation action with name {}\"", ".", "format", "(", "action", "[", "\"name\"", "]", ")", ")", "if", "hasattr", "(", "batch", ",", "\"active_batch_id\"", ")", ":", "batch_identifier", "=", "batch", ".", "active_batch_id", "else", ":", "batch_identifier", "=", "batch", ".", "batch_id", "validation_result_id", "=", "ValidationResultIdentifier", "(", "expectation_suite_identifier", "=", "expectation_suite_identifier", ",", "run_id", "=", "run_id", ",", "batch_identifier", "=", "batch_identifier", ",", ")", "try", ":", "action_result", "=", "self", ".", "actions", "[", "action", "[", "\"name\"", "]", "]", ".", "run", "(", "validation_result_suite_identifier", "=", "validation_result_id", ",", "validation_result_suite", "=", "batch_validation_result", ",", "data_asset", "=", "batch", ",", "payload", "=", "batch_actions_results", ",", ")", "# add action_result", "batch_actions_results", "[", "action", "[", "\"name\"", "]", "]", "=", "(", "{", "}", "if", "action_result", "is", "None", "else", "action_result", ")", "batch_actions_results", "[", "action", "[", "\"name\"", "]", "]", "[", "\"class\"", "]", "=", "action", "[", "\"action\"", "]", "[", "\"class_name\"", "]", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "\"Error running action with name {}\"", ".", "format", "(", "action", "[", "\"name\"", "]", ")", ")", "raise", "e", "return", "batch_actions_results" ]
[ 349, 4 ]
[ 408, 36 ]
python
en
['en', 'error', 'th']
False
SuiteEditNotebookRenderer.render
( self, suite: ExpectationSuite, batch_kwargs=None )
Render a notebook dict from an expectation suite.
Render a notebook dict from an expectation suite.
def render( self, suite: ExpectationSuite, batch_kwargs=None ) -> nbformat.NotebookNode: """ Render a notebook dict from an expectation suite. """ if not isinstance(suite, ExpectationSuite): raise RuntimeWarning("render must be given an ExpectationSuite.") self._notebook = nbformat.v4.new_notebook() suite_name = suite.expectation_suite_name batch_kwargs = self.get_batch_kwargs(suite, batch_kwargs) self.add_header(suite_name, batch_kwargs) self.add_authoring_intro() self.add_expectation_cells_from_suite(suite.expectations) self.add_footer() return self._notebook
[ "def", "render", "(", "self", ",", "suite", ":", "ExpectationSuite", ",", "batch_kwargs", "=", "None", ")", "->", "nbformat", ".", "NotebookNode", ":", "if", "not", "isinstance", "(", "suite", ",", "ExpectationSuite", ")", ":", "raise", "RuntimeWarning", "(", "\"render must be given an ExpectationSuite.\"", ")", "self", ".", "_notebook", "=", "nbformat", ".", "v4", ".", "new_notebook", "(", ")", "suite_name", "=", "suite", ".", "expectation_suite_name", "batch_kwargs", "=", "self", ".", "get_batch_kwargs", "(", "suite", ",", "batch_kwargs", ")", "self", ".", "add_header", "(", "suite_name", ",", "batch_kwargs", ")", "self", ".", "add_authoring_intro", "(", ")", "self", ".", "add_expectation_cells_from_suite", "(", "suite", ".", "expectations", ")", "self", ".", "add_footer", "(", ")", "return", "self", ".", "_notebook" ]
[ 270, 4 ]
[ 289, 29 ]
python
en
['en', 'error', 'th']
False
SuiteEditNotebookRenderer.render_to_disk
( self, suite: ExpectationSuite, notebook_file_path: str, batch_kwargs=None )
Render a notebook to disk from an expectation suite. If batch_kwargs are passed they will override any found in suite citations.
Render a notebook to disk from an expectation suite.
def render_to_disk( self, suite: ExpectationSuite, notebook_file_path: str, batch_kwargs=None ) -> None: """ Render a notebook to disk from an expectation suite. If batch_kwargs are passed they will override any found in suite citations. """ self.render(suite, batch_kwargs) self.write_notebook_to_disk(self._notebook, notebook_file_path)
[ "def", "render_to_disk", "(", "self", ",", "suite", ":", "ExpectationSuite", ",", "notebook_file_path", ":", "str", ",", "batch_kwargs", "=", "None", ")", "->", "None", ":", "self", ".", "render", "(", "suite", ",", "batch_kwargs", ")", "self", ".", "write_notebook_to_disk", "(", "self", ".", "_notebook", ",", "notebook_file_path", ")" ]
[ 291, 4 ]
[ 301, 71 ]
python
en
['en', 'error', 'th']
False
Mark.cli_as_experimental
(func: Callable)
Apply as a decorator to CLI commands that are Experimental.
Apply as a decorator to CLI commands that are Experimental.
def cli_as_experimental(func: Callable) -> Callable: """Apply as a decorator to CLI commands that are Experimental.""" @wraps(func) def wrapper(*args, **kwargs): cli_message( "<yellow>Heads up! This feature is Experimental. It may change. " "Please give us your feedback!</yellow>" ) func(*args, **kwargs) return wrapper
[ "def", "cli_as_experimental", "(", "func", ":", "Callable", ")", "->", "Callable", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cli_message", "(", "\"<yellow>Heads up! This feature is Experimental. It may change. \"", "\"Please give us your feedback!</yellow>\"", ")", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
[ 18, 4 ]
[ 29, 22 ]
python
en
['en', 'en', 'en']
True
Mark.cli_as_beta
(func: Callable)
Apply as a decorator to CLI commands that are beta.
Apply as a decorator to CLI commands that are beta.
def cli_as_beta(func: Callable) -> Callable: """Apply as a decorator to CLI commands that are beta.""" @wraps(func) def wrapper(*args, **kwargs): cli_message( "<yellow>Heads up! This feature is in Beta. Please give us " "your feedback!</yellow>" ) func(*args, **kwargs) return wrapper
[ "def", "cli_as_beta", "(", "func", ":", "Callable", ")", "->", "Callable", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cli_message", "(", "\"<yellow>Heads up! This feature is in Beta. Please give us \"", "\"your feedback!</yellow>\"", ")", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
[ 32, 4 ]
[ 43, 22 ]
python
en
['en', 'en', 'en']
True
Mark.cli_as_deprecation
( message: str = "<yellow>Heads up! This feature will be deprecated in the next major release</yellow>", )
Apply as a decorator to CLI commands that will be deprecated.
Apply as a decorator to CLI commands that will be deprecated.
def cli_as_deprecation( message: str = "<yellow>Heads up! This feature will be deprecated in the next major release</yellow>", ) -> Callable: """Apply as a decorator to CLI commands that will be deprecated.""" def inner_decorator(func): @wraps(func) def wrapped(*args, **kwargs): cli_message(message) func(*args, **kwargs) return wrapped return inner_decorator
[ "def", "cli_as_deprecation", "(", "message", ":", "str", "=", "\"<yellow>Heads up! This feature will be deprecated in the next major release</yellow>\"", ",", ")", "->", "Callable", ":", "def", "inner_decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cli_message", "(", "message", ")", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped", "return", "inner_decorator" ]
[ 46, 4 ]
[ 59, 30 ]
python
en
['en', 'en', 'en']
True
has_public_binary_operator
(type_, operator_symbol)
returns True, if `type_` has public binary operator, otherwise False
returns True, if `type_` has public binary operator, otherwise False
def has_public_binary_operator(type_, operator_symbol): """returns True, if `type_` has public binary operator, otherwise False""" type_ = type_traits.remove_alias(type_) type_ = type_traits.remove_cv(type_) type_ = type_traits.remove_declarated(type_) assert isinstance(type_, class_declaration.class_t) if type_traits.is_std_string(type_) or type_traits.is_std_wstring(type_): # In some case compare operators of std::basic_string are not # instantiated return True operators = type_.member_operators( function=matchers.custom_matcher_t( lambda decl: not decl.is_artificial) & matchers.access_type_matcher_t('public'), symbol=operator_symbol, allow_empty=True, recursive=False) if operators: return True declarated = cpptypes.declarated_t(type_) const = cpptypes.const_t(declarated) reference = cpptypes.reference_t(const) operators = type_.top_parent.operators( function=lambda decl: not decl.is_artificial, arg_types=[reference, None], symbol=operator_symbol, allow_empty=True, recursive=True) if operators: return True for bi in type_.recursive_bases: assert isinstance(bi, class_declaration.hierarchy_info_t) if bi.access_type != class_declaration.ACCESS_TYPES.PUBLIC: continue operators = bi.related_class.member_operators( function=matchers.custom_matcher_t( lambda decl: not decl.is_artificial) & matchers.access_type_matcher_t('public'), symbol=operator_symbol, allow_empty=True, recursive=False) if operators: return True return False
[ "def", "has_public_binary_operator", "(", "type_", ",", "operator_symbol", ")", ":", "type_", "=", "type_traits", ".", "remove_alias", "(", "type_", ")", "type_", "=", "type_traits", ".", "remove_cv", "(", "type_", ")", "type_", "=", "type_traits", ".", "remove_declarated", "(", "type_", ")", "assert", "isinstance", "(", "type_", ",", "class_declaration", ".", "class_t", ")", "if", "type_traits", ".", "is_std_string", "(", "type_", ")", "or", "type_traits", ".", "is_std_wstring", "(", "type_", ")", ":", "# In some case compare operators of std::basic_string are not", "# instantiated", "return", "True", "operators", "=", "type_", ".", "member_operators", "(", "function", "=", "matchers", ".", "custom_matcher_t", "(", "lambda", "decl", ":", "not", "decl", ".", "is_artificial", ")", "&", "matchers", ".", "access_type_matcher_t", "(", "'public'", ")", ",", "symbol", "=", "operator_symbol", ",", "allow_empty", "=", "True", ",", "recursive", "=", "False", ")", "if", "operators", ":", "return", "True", "declarated", "=", "cpptypes", ".", "declarated_t", "(", "type_", ")", "const", "=", "cpptypes", ".", "const_t", "(", "declarated", ")", "reference", "=", "cpptypes", ".", "reference_t", "(", "const", ")", "operators", "=", "type_", ".", "top_parent", ".", "operators", "(", "function", "=", "lambda", "decl", ":", "not", "decl", ".", "is_artificial", ",", "arg_types", "=", "[", "reference", ",", "None", "]", ",", "symbol", "=", "operator_symbol", ",", "allow_empty", "=", "True", ",", "recursive", "=", "True", ")", "if", "operators", ":", "return", "True", "for", "bi", "in", "type_", ".", "recursive_bases", ":", "assert", "isinstance", "(", "bi", ",", "class_declaration", ".", "hierarchy_info_t", ")", "if", "bi", ".", "access_type", "!=", "class_declaration", ".", "ACCESS_TYPES", ".", "PUBLIC", ":", "continue", "operators", "=", "bi", ".", "related_class", ".", "member_operators", "(", "function", "=", "matchers", ".", "custom_matcher_t", "(", "lambda", "decl", ":", "not", "decl", ".", "is_artificial", ")", "&", "matchers", ".", "access_type_matcher_t", "(", "'public'", ")", ",", "symbol", "=", "operator_symbol", ",", "allow_empty", "=", "True", ",", "recursive", "=", "False", ")", "if", "operators", ":", "return", "True", "return", "False" ]
[ 6, 0 ]
[ 48, 16 ]
python
en
['en', 'en', 'en']
True
has_public_equal
(decl_type)
returns True, if class has public operator==, otherwise False
returns True, if class has public operator==, otherwise False
def has_public_equal(decl_type): """returns True, if class has public operator==, otherwise False""" return has_public_binary_operator(decl_type, '==')
[ "def", "has_public_equal", "(", "decl_type", ")", ":", "return", "has_public_binary_operator", "(", "decl_type", ",", "'=='", ")" ]
[ 51, 0 ]
[ 53, 54 ]
python
en
['en', 'en', 'en']
True
has_public_less
(decl_type)
returns True, if class has public operator<, otherwise False
returns True, if class has public operator<, otherwise False
def has_public_less(decl_type): """returns True, if class has public operator<, otherwise False""" return has_public_binary_operator(decl_type, '<')
[ "def", "has_public_less", "(", "decl_type", ")", ":", "return", "has_public_binary_operator", "(", "decl_type", ",", "'<'", ")" ]
[ 56, 0 ]
[ 58, 53 ]
python
en
['en', 'en', 'en']
True
elaborated_info.elaborated_type_specifier
(self)
Elaborated specifier (can be: struct, union, class or enum). Returns: str: elaborated specifier
Elaborated specifier (can be: struct, union, class or enum).
def elaborated_type_specifier(self): """ Elaborated specifier (can be: struct, union, class or enum). Returns: str: elaborated specifier """ return self._elaborated_type_specifier
[ "def", "elaborated_type_specifier", "(", "self", ")", ":", "return", "self", ".", "_elaborated_type_specifier" ]
[ 16, 4 ]
[ 23, 46 ]
python
en
['en', 'error', 'th']
False
get_set_of_columns_and_expectations_from_suite
( suite: ExpectationSuite, )
Args: suite: An expectation suite Returns: A tuple containing a set of columns and a set of expectations found in a suite
Args: suite: An expectation suite
def get_set_of_columns_and_expectations_from_suite( suite: ExpectationSuite, ) -> Tuple[Set[str], Set[str]]: """ Args: suite: An expectation suite Returns: A tuple containing a set of columns and a set of expectations found in a suite """ columns: Set[str] = { i.kwargs.get("column") for i in suite.expectations if i.kwargs.get("column") } expectations: Set[str] = {i.expectation_type for i in suite.expectations} return columns, expectations
[ "def", "get_set_of_columns_and_expectations_from_suite", "(", "suite", ":", "ExpectationSuite", ",", ")", "->", "Tuple", "[", "Set", "[", "str", "]", ",", "Set", "[", "str", "]", "]", ":", "columns", ":", "Set", "[", "str", "]", "=", "{", "i", ".", "kwargs", ".", "get", "(", "\"column\"", ")", "for", "i", "in", "suite", ".", "expectations", "if", "i", ".", "kwargs", ".", "get", "(", "\"column\"", ")", "}", "expectations", ":", "Set", "[", "str", "]", "=", "{", "i", ".", "expectation_type", "for", "i", "in", "suite", ".", "expectations", "}", "return", "columns", ",", "expectations" ]
[ 55, 0 ]
[ 70, 32 ]
python
en
['en', 'error', 'th']
False
ape
(y, p)
Absolute Percentage Error (APE). Args: y (float): target p (float): prediction Returns: e (float): APE
Absolute Percentage Error (APE). Args: y (float): target p (float): prediction
def ape(y, p): """Absolute Percentage Error (APE). Args: y (float): target p (float): prediction Returns: e (float): APE """ assert np.abs(y) > EPS return np.abs(1 - p / y)
[ "def", "ape", "(", "y", ",", "p", ")", ":", "assert", "np", ".", "abs", "(", "y", ")", ">", "EPS", "return", "np", ".", "abs", "(", "1", "-", "p", "/", "y", ")" ]
[ 12, 0 ]
[ 23, 28 ]
python
en
['en', 'et', 'it']
False
mape
(y, p)
Mean Absolute Percentage Error (MAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): MAPE
Mean Absolute Percentage Error (MAPE). Args: y (numpy.array): target p (numpy.array): prediction
def mape(y, p): """Mean Absolute Percentage Error (MAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): MAPE """ filt = np.abs(y) > EPS return np.mean(np.abs(1 - p[filt] / y[filt]))
[ "def", "mape", "(", "y", ",", "p", ")", ":", "filt", "=", "np", ".", "abs", "(", "y", ")", ">", "EPS", "return", "np", ".", "mean", "(", "np", ".", "abs", "(", "1", "-", "p", "[", "filt", "]", "/", "y", "[", "filt", "]", ")", ")" ]
[ 26, 0 ]
[ 37, 49 ]
python
en
['en', 'ro', 'it']
False
smape
(y, p)
Symmetric Mean Absolute Percentage Error (sMAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): sMAPE
Symmetric Mean Absolute Percentage Error (sMAPE). Args: y (numpy.array): target p (numpy.array): prediction
def smape(y, p): """Symmetric Mean Absolute Percentage Error (sMAPE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): sMAPE """ return 2. * np.mean(np.abs(y - p) / (np.abs(y) + np.abs(p)))
[ "def", "smape", "(", "y", ",", "p", ")", ":", "return", "2.", "*", "np", ".", "mean", "(", "np", ".", "abs", "(", "y", "-", "p", ")", "/", "(", "np", ".", "abs", "(", "y", ")", "+", "np", ".", "abs", "(", "p", ")", ")", ")" ]
[ 40, 0 ]
[ 49, 64 ]
python
en
['en', 'ro', 'it']
False
rmse
(y, p)
Root Mean Squared Error (RMSE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): RMSE
Root Mean Squared Error (RMSE). Args: y (numpy.array): target p (numpy.array): prediction
def rmse(y, p): """Root Mean Squared Error (RMSE). Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): RMSE """ # check and get number of samples assert y.shape == p.shape return np.sqrt(mse(y, p))
[ "def", "rmse", "(", "y", ",", "p", ")", ":", "# check and get number of samples", "assert", "y", ".", "shape", "==", "p", ".", "shape", "return", "np", ".", "sqrt", "(", "mse", "(", "y", ",", "p", ")", ")" ]
[ 52, 0 ]
[ 65, 29 ]
python
en
['en', 'en', 'en']
True
gini
(y, p)
Normalized Gini Coefficient. Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): normalized Gini coefficient
Normalized Gini Coefficient.
def gini(y, p): """Normalized Gini Coefficient. Args: y (numpy.array): target p (numpy.array): prediction Returns: e (numpy.float64): normalized Gini coefficient """ # check and get number of samples assert y.shape == p.shape n_samples = y.shape[0] # sort rows on prediction column # (from largest to smallest) arr = np.array([y, p]).transpose() true_order = arr[arr[:, 0].argsort()][::-1, 0] pred_order = arr[arr[:, 1].argsort()][::-1, 0] # get Lorenz curves l_true = np.cumsum(true_order) / np.sum(true_order) l_pred = np.cumsum(pred_order) / np.sum(pred_order) l_ones = np.linspace(1/n_samples, 1, n_samples) # get Gini coefficients (area between curves) g_true = np.sum(l_ones - l_true) g_pred = np.sum(l_ones - l_pred) # normalize to true Gini coefficient return g_pred / g_true
[ "def", "gini", "(", "y", ",", "p", ")", ":", "# check and get number of samples", "assert", "y", ".", "shape", "==", "p", ".", "shape", "n_samples", "=", "y", ".", "shape", "[", "0", "]", "# sort rows on prediction column", "# (from largest to smallest)", "arr", "=", "np", ".", "array", "(", "[", "y", ",", "p", "]", ")", ".", "transpose", "(", ")", "true_order", "=", "arr", "[", "arr", "[", ":", ",", "0", "]", ".", "argsort", "(", ")", "]", "[", ":", ":", "-", "1", ",", "0", "]", "pred_order", "=", "arr", "[", "arr", "[", ":", ",", "1", "]", ".", "argsort", "(", ")", "]", "[", ":", ":", "-", "1", ",", "0", "]", "# get Lorenz curves", "l_true", "=", "np", ".", "cumsum", "(", "true_order", ")", "/", "np", ".", "sum", "(", "true_order", ")", "l_pred", "=", "np", ".", "cumsum", "(", "pred_order", ")", "/", "np", ".", "sum", "(", "pred_order", ")", "l_ones", "=", "np", ".", "linspace", "(", "1", "/", "n_samples", ",", "1", ",", "n_samples", ")", "# get Gini coefficients (area between curves)", "g_true", "=", "np", ".", "sum", "(", "l_ones", "-", "l_true", ")", "g_pred", "=", "np", ".", "sum", "(", "l_ones", "-", "l_pred", ")", "# normalize to true Gini coefficient", "return", "g_pred", "/", "g_true" ]
[ 68, 0 ]
[ 100, 26 ]
python
en
['it', 'en', 'en']
True
regression_metrics
(y, p, w=None, metrics={'RMSE': rmse, 'sMAPE': smape, 'Gini': gini})
Log metrics for regressors. Args: y (numpy.array): target p (numpy.array): prediction w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log metrics for the treatment and control group separately metrics (dict, optional): a dictionary of the metric names and functions
Log metrics for regressors.
def regression_metrics(y, p, w=None, metrics={'RMSE': rmse, 'sMAPE': smape, 'Gini': gini}): """Log metrics for regressors. Args: y (numpy.array): target p (numpy.array): prediction w (numpy.array, optional): a treatment vector (1 or True: treatment, 0 or False: control). If given, log metrics for the treatment and control group separately metrics (dict, optional): a dictionary of the metric names and functions """ assert metrics assert y.shape[0] == p.shape[0] for name, func in metrics.items(): if w is not None: assert y.shape[0] == w.shape[0] if w.dtype != bool: w = w == 1 logger.info('{:>8s} (Control): {:10.4f}'.format(name, func(y[~w], p[~w]))) logger.info('{:>8s} (Treatment): {:10.4f}'.format(name, func(y[w], p[w]))) else: logger.info('{:>8s}: {:10.4f}'.format(name, func(y, p)))
[ "def", "regression_metrics", "(", "y", ",", "p", ",", "w", "=", "None", ",", "metrics", "=", "{", "'RMSE'", ":", "rmse", ",", "'sMAPE'", ":", "smape", ",", "'Gini'", ":", "gini", "}", ")", ":", "assert", "metrics", "assert", "y", ".", "shape", "[", "0", "]", "==", "p", ".", "shape", "[", "0", "]", "for", "name", ",", "func", "in", "metrics", ".", "items", "(", ")", ":", "if", "w", "is", "not", "None", ":", "assert", "y", ".", "shape", "[", "0", "]", "==", "w", ".", "shape", "[", "0", "]", "if", "w", ".", "dtype", "!=", "bool", ":", "w", "=", "w", "==", "1", "logger", ".", "info", "(", "'{:>8s} (Control): {:10.4f}'", ".", "format", "(", "name", ",", "func", "(", "y", "[", "~", "w", "]", ",", "p", "[", "~", "w", "]", ")", ")", ")", "logger", ".", "info", "(", "'{:>8s} (Treatment): {:10.4f}'", ".", "format", "(", "name", ",", "func", "(", "y", "[", "w", "]", ",", "p", "[", "w", "]", ")", ")", ")", "else", ":", "logger", ".", "info", "(", "'{:>8s}: {:10.4f}'", ".", "format", "(", "name", ",", "func", "(", "y", ",", "p", ")", ")", ")" ]
[ 103, 0 ]
[ 124, 68 ]
python
da
['da', 'id', 'en']
False
mask_comments
(input)
Mask the quoted strings so we skip braces inside quoted strings.
Mask the quoted strings so we skip braces inside quoted strings.
def mask_comments(input): """Mask the quoted strings so we skip braces inside quoted strings.""" search_re = re.compile(r'(.*?)(#)(.*)') return [search_re.sub(comment_replace, line) for line in input]
[ "def", "mask_comments", "(", "input", ")", ":", "search_re", "=", "re", ".", "compile", "(", "r'(.*?)(#)(.*)'", ")", "return", "[", "search_re", ".", "sub", "(", "comment_replace", ",", "line", ")", "for", "line", "in", "input", "]" ]
[ 27, 0 ]
[ 30, 65 ]
python
en
['en', 'en', 'en']
True
mask_quotes
(input)
Mask the quoted strings so we skip braces inside quoted strings.
Mask the quoted strings so we skip braces inside quoted strings.
def mask_quotes(input): """Mask the quoted strings so we skip braces inside quoted strings.""" search_re = re.compile(r'(.*?)' + QUOTE_RE_STR) return [search_re.sub(quote_replace, line) for line in input]
[ "def", "mask_quotes", "(", "input", ")", ":", "search_re", "=", "re", ".", "compile", "(", "r'(.*?)'", "+", "QUOTE_RE_STR", ")", "return", "[", "search_re", ".", "sub", "(", "quote_replace", ",", "line", ")", "for", "line", "in", "input", "]" ]
[ 40, 0 ]
[ 43, 63 ]
python
en
['en', 'en', 'en']
True
split_double_braces
(input)
Masks out the quotes and comments, and then splits appropriate lines (lines that matche the double_*_brace re's above) before indenting them below. These are used to split lines which have multiple braces on them, so that the indentation looks prettier when all laid out (e.g. closing braces make a nice diagonal line).
Masks out the quotes and comments, and then splits appropriate lines (lines that matche the double_*_brace re's above) before indenting them below.
def split_double_braces(input): """Masks out the quotes and comments, and then splits appropriate lines (lines that matche the double_*_brace re's above) before indenting them below. These are used to split lines which have multiple braces on them, so that the indentation looks prettier when all laid out (e.g. closing braces make a nice diagonal line). """ double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])') double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])') masked_input = mask_quotes(input) masked_input = mask_comments(masked_input) (output, mask_output) = do_split(input, masked_input, double_open_brace_re) (output, mask_output) = do_split(output, mask_output, double_close_brace_re) return output
[ "def", "split_double_braces", "(", "input", ")", ":", "double_open_brace_re", "=", "re", ".", "compile", "(", "r'(.*?[\\[\\{\\(,])(\\s*)([\\[\\{\\(])'", ")", "double_close_brace_re", "=", "re", ".", "compile", "(", "r'(.*?[\\]\\}\\)],?)(\\s*)([\\]\\}\\)])'", ")", "masked_input", "=", "mask_quotes", "(", "input", ")", "masked_input", "=", "mask_comments", "(", "masked_input", ")", "(", "output", ",", "mask_output", ")", "=", "do_split", "(", "input", ",", "masked_input", ",", "double_open_brace_re", ")", "(", "output", ",", "mask_output", ")", "=", "do_split", "(", "output", ",", "mask_output", ",", "double_close_brace_re", ")", "return", "output" ]
[ 61, 0 ]
[ 79, 15 ]
python
en
['en', 'en', 'en']
True
count_braces
(line)
keeps track of the number of braces on a given line and returns the result. It starts at zero and subtracts for closed braces, and adds for open braces.
keeps track of the number of braces on a given line and returns the result.
def count_braces(line): """keeps track of the number of braces on a given line and returns the result. It starts at zero and subtracts for closed braces, and adds for open braces. """ open_braces = ['[', '(', '{'] close_braces = [']', ')', '}'] closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$') cnt = 0 stripline = COMMENT_RE.sub(r'', line) stripline = QUOTE_RE.sub(r"''", stripline) for char in stripline: for brace in open_braces: if char == brace: cnt += 1 for brace in close_braces: if char == brace: cnt -= 1 after = False if cnt > 0: after = True # This catches the special case of a closing brace having something # other than just whitespace ahead of it -- we don't want to # unindent that until after this line is printed so it stays with # the previous indentation level. if cnt < 0 and closing_prefix_re.match(stripline): after = True return (cnt, after)
[ "def", "count_braces", "(", "line", ")", ":", "open_braces", "=", "[", "'['", ",", "'('", ",", "'{'", "]", "close_braces", "=", "[", "']'", ",", "')'", ",", "'}'", "]", "closing_prefix_re", "=", "re", ".", "compile", "(", "r'(.*?[^\\s\\]\\}\\)]+.*?)([\\]\\}\\)],?)\\s*$'", ")", "cnt", "=", "0", "stripline", "=", "COMMENT_RE", ".", "sub", "(", "r''", ",", "line", ")", "stripline", "=", "QUOTE_RE", ".", "sub", "(", "r\"''\"", ",", "stripline", ")", "for", "char", "in", "stripline", ":", "for", "brace", "in", "open_braces", ":", "if", "char", "==", "brace", ":", "cnt", "+=", "1", "for", "brace", "in", "close_braces", ":", "if", "char", "==", "brace", ":", "cnt", "-=", "1", "after", "=", "False", "if", "cnt", ">", "0", ":", "after", "=", "True", "# This catches the special case of a closing brace having something", "# other than just whitespace ahead of it -- we don't want to", "# unindent that until after this line is printed so it stays with", "# the previous indentation level.", "if", "cnt", "<", "0", "and", "closing_prefix_re", ".", "match", "(", "stripline", ")", ":", "after", "=", "True", "return", "(", "cnt", ",", "after", ")" ]
[ 82, 0 ]
[ 111, 21 ]
python
en
['en', 'en', 'en']
True
prettyprint_input
(lines)
Does the main work of indenting the input based on the brace counts.
Does the main work of indenting the input based on the brace counts.
def prettyprint_input(lines): """Does the main work of indenting the input based on the brace counts.""" indent = 0 basic_offset = 2 last_line = "" for line in lines: if COMMENT_RE.match(line): print line else: line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix. if len(line) > 0: (brace_diff, after) = count_braces(line) if brace_diff != 0: if after: print " " * (basic_offset * indent) + line indent += brace_diff else: indent += brace_diff print " " * (basic_offset * indent) + line else: print " " * (basic_offset * indent) + line else: print "" last_line = line
[ "def", "prettyprint_input", "(", "lines", ")", ":", "indent", "=", "0", "basic_offset", "=", "2", "last_line", "=", "\"\"", "for", "line", "in", "lines", ":", "if", "COMMENT_RE", ".", "match", "(", "line", ")", ":", "print", "line", "else", ":", "line", "=", "line", ".", "strip", "(", "'\\r\\n\\t '", ")", "# Otherwise doesn't strip \\r on Unix.", "if", "len", "(", "line", ")", ">", "0", ":", "(", "brace_diff", ",", "after", ")", "=", "count_braces", "(", "line", ")", "if", "brace_diff", "!=", "0", ":", "if", "after", ":", "print", "\" \"", "*", "(", "basic_offset", "*", "indent", ")", "+", "line", "indent", "+=", "brace_diff", "else", ":", "indent", "+=", "brace_diff", "print", "\" \"", "*", "(", "basic_offset", "*", "indent", ")", "+", "line", "else", ":", "print", "\" \"", "*", "(", "basic_offset", "*", "indent", ")", "+", "line", "else", ":", "print", "\"\"", "last_line", "=", "line" ]
[ 114, 0 ]
[ 137, 22 ]
python
en
['en', 'en', 'en']
True
_calc_validation_statistics
(validation_results)
Calculate summary statistics for the validation results and return ``ExpectationStatistics``.
Calculate summary statistics for the validation results and return ``ExpectationStatistics``.
def _calc_validation_statistics(validation_results): """ Calculate summary statistics for the validation results and return ``ExpectationStatistics``. """ # calc stats successful_expectations = sum(exp.success for exp in validation_results) evaluated_expectations = len(validation_results) unsuccessful_expectations = evaluated_expectations - successful_expectations success = successful_expectations == evaluated_expectations try: success_percent = successful_expectations / evaluated_expectations * 100 except ZeroDivisionError: # success_percent = float("nan") success_percent = None return ValidationStatistics( successful_expectations=successful_expectations, evaluated_expectations=evaluated_expectations, unsuccessful_expectations=unsuccessful_expectations, success=success, success_percent=success_percent, )
[ "def", "_calc_validation_statistics", "(", "validation_results", ")", ":", "# calc stats", "successful_expectations", "=", "sum", "(", "exp", ".", "success", "for", "exp", "in", "validation_results", ")", "evaluated_expectations", "=", "len", "(", "validation_results", ")", "unsuccessful_expectations", "=", "evaluated_expectations", "-", "successful_expectations", "success", "=", "successful_expectations", "==", "evaluated_expectations", "try", ":", "success_percent", "=", "successful_expectations", "/", "evaluated_expectations", "*", "100", "except", "ZeroDivisionError", ":", "# success_percent = float(\"nan\")", "success_percent", "=", "None", "return", "ValidationStatistics", "(", "successful_expectations", "=", "successful_expectations", ",", "evaluated_expectations", "=", "evaluated_expectations", ",", "unsuccessful_expectations", "=", "unsuccessful_expectations", ",", "success", "=", "success", ",", "success_percent", "=", "success_percent", ",", ")" ]
[ 1416, 0 ]
[ 1438, 5 ]
python
en
['en', 'error', 'th']
False
Validator.__init__
( self, execution_engine, interactive_evaluation=True, expectation_suite=None, expectation_suite_name=None, data_context=None, batches=None, **kwargs, )
Initialize the DataAsset. :param profiler (profiler class) = None: The profiler that should be run on the data_asset to build a baseline expectation suite. Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the support for the profiler parameter not obvious from the signature.
Initialize the DataAsset.
def __init__( self, execution_engine, interactive_evaluation=True, expectation_suite=None, expectation_suite_name=None, data_context=None, batches=None, **kwargs, ): """ Initialize the DataAsset. :param profiler (profiler class) = None: The profiler that should be run on the data_asset to build a baseline expectation suite. Note: DataAsset is designed to support multiple inheritance (e.g. PandasDataset inherits from both a Pandas DataFrame and Dataset which inherits from DataAsset), so it accepts generic *args and **kwargs arguments so that they can also be passed to other parent classes. In python 2, there isn't a clean way to include all of *args, **kwargs, and a named kwarg...so we use the inelegant solution of popping from kwargs, leaving the support for the profiler parameter not obvious from the signature. """ self._data_context = data_context self._execution_engine = execution_engine self._expose_dataframe_methods = False self._validator_config = {} if batches is None: batches = tuple() self._batches = dict() for batch in batches: assert isinstance( batch, Batch ), "batches provided to Validator must be Great Expectations Batch objects" self._execution_engine.load_batch_data(batch.id, batch.data) self._batches[batch.id] = batch if len(batches) > 1: logger.warning( f"{len(batches)} batches will be added to this Validator. The batch_identifiers for the active " f"batch are {self.active_batch.batch_definition['batch_identifiers'].items()}" ) self.interactive_evaluation = interactive_evaluation self._initialize_expectations( expectation_suite=expectation_suite, expectation_suite_name=expectation_suite_name, ) self._default_expectation_args = { "include_config": True, "catch_exceptions": False, "result_format": "BASIC", } self._validator_config = {} # This special state variable tracks whether a validation run is going on, which will disable # saving expectation config objects self._active_validation = False if self._data_context and hasattr( self._data_context, "_expectation_explorer_manager" ): # TODO: verify flow of default expectation arguments self.set_default_expectation_argument("include_config", True)
[ "def", "__init__", "(", "self", ",", "execution_engine", ",", "interactive_evaluation", "=", "True", ",", "expectation_suite", "=", "None", ",", "expectation_suite_name", "=", "None", ",", "data_context", "=", "None", ",", "batches", "=", "None", ",", "*", "*", "kwargs", ",", ")", ":", "self", ".", "_data_context", "=", "data_context", "self", ".", "_execution_engine", "=", "execution_engine", "self", ".", "_expose_dataframe_methods", "=", "False", "self", ".", "_validator_config", "=", "{", "}", "if", "batches", "is", "None", ":", "batches", "=", "tuple", "(", ")", "self", ".", "_batches", "=", "dict", "(", ")", "for", "batch", "in", "batches", ":", "assert", "isinstance", "(", "batch", ",", "Batch", ")", ",", "\"batches provided to Validator must be Great Expectations Batch objects\"", "self", ".", "_execution_engine", ".", "load_batch_data", "(", "batch", ".", "id", ",", "batch", ".", "data", ")", "self", ".", "_batches", "[", "batch", ".", "id", "]", "=", "batch", "if", "len", "(", "batches", ")", ">", "1", ":", "logger", ".", "warning", "(", "f\"{len(batches)} batches will be added to this Validator. The batch_identifiers for the active \"", "f\"batch are {self.active_batch.batch_definition['batch_identifiers'].items()}\"", ")", "self", ".", "interactive_evaluation", "=", "interactive_evaluation", "self", ".", "_initialize_expectations", "(", "expectation_suite", "=", "expectation_suite", ",", "expectation_suite_name", "=", "expectation_suite_name", ",", ")", "self", ".", "_default_expectation_args", "=", "{", "\"include_config\"", ":", "True", ",", "\"catch_exceptions\"", ":", "False", ",", "\"result_format\"", ":", "\"BASIC\"", ",", "}", "self", ".", "_validator_config", "=", "{", "}", "# This special state variable tracks whether a validation run is going on, which will disable", "# saving expectation config objects", "self", ".", "_active_validation", "=", "False", "if", "self", ".", "_data_context", "and", "hasattr", "(", "self", ".", "_data_context", ",", "\"_expectation_explorer_manager\"", ")", ":", "# TODO: verify flow of default expectation arguments", "self", ".", "set_default_expectation_argument", "(", "\"include_config\"", ",", "True", ")" ]
[ 61, 4 ]
[ 127, 73 ]
python
en
['en', 'error', 'th']
False
Validator.__dir__
(self)
This custom magic method is used to enable expectation tab completion on Validator objects. It also allows users to call Pandas.DataFrame methods on Validator objects
This custom magic method is used to enable expectation tab completion on Validator objects. It also allows users to call Pandas.DataFrame methods on Validator objects
def __dir__(self): """ This custom magic method is used to enable expectation tab completion on Validator objects. It also allows users to call Pandas.DataFrame methods on Validator objects """ validator_attrs = set(super().__dir__()) class_expectation_impls = set(list_registered_expectation_implementations()) # execution_engine_expectation_impls = ( # { # attr_name # for attr_name in self.execution_engine.__dir__() # if attr_name.startswith("expect_") # } # if self.execution_engine # else set() # ) combined_dir = ( validator_attrs | class_expectation_impls # | execution_engine_expectation_impls ) if self._expose_dataframe_methods: combined_dir | set(dir(pd.DataFrame)) return list(combined_dir)
[ "def", "__dir__", "(", "self", ")", ":", "validator_attrs", "=", "set", "(", "super", "(", ")", ".", "__dir__", "(", ")", ")", "class_expectation_impls", "=", "set", "(", "list_registered_expectation_implementations", "(", ")", ")", "# execution_engine_expectation_impls = (", "# {", "# attr_name", "# for attr_name in self.execution_engine.__dir__()", "# if attr_name.startswith(\"expect_\")", "# }", "# if self.execution_engine", "# else set()", "# )", "combined_dir", "=", "(", "validator_attrs", "|", "class_expectation_impls", "# | execution_engine_expectation_impls", ")", "if", "self", ".", "_expose_dataframe_methods", ":", "combined_dir", "|", "set", "(", "dir", "(", "pd", ".", "DataFrame", ")", ")", "return", "list", "(", "combined_dir", ")" ]
[ 129, 4 ]
[ 155, 33 ]
python
en
['en', 'error', 'th']
False
Validator.validate_expectation
(self, name)
Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the expectation's validate method to obtain a validation result. Also adds in the runtime configuration Args: name (str): The name of the Expectation being validated Returns: The Expectation's validation result
Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the expectation's validate method to obtain a validation result. Also adds in the runtime configuration
def validate_expectation(self, name): """ Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the expectation's validate method to obtain a validation result. Also adds in the runtime configuration Args: name (str): The name of the Expectation being validated Returns: The Expectation's validation result """ def inst_expectation(*args, **kwargs): try: expectation_impl = get_expectation_impl(name) allowed_config_keys = expectation_impl.get_allowed_config_keys() expectation_kwargs = recursively_convert_to_json_serializable(kwargs) meta = None # This section uses Expectation class' legacy_method_parameters attribute to maintain support for passing # positional arguments to expectation methods legacy_arg_names = expectation_impl.legacy_method_parameters.get( name, tuple() ) for idx, arg in enumerate(args): try: arg_name = legacy_arg_names[idx] if arg_name in allowed_config_keys: expectation_kwargs[arg_name] = arg if arg_name == "meta": meta = arg except IndexError: raise InvalidExpectationConfigurationError( f"Invalid positional argument: {arg}" ) # this is used so that exceptions are caught appropriately when they occur in expectation config basic_runtime_configuration = { k: v for k, v in kwargs.items() if k in ("result_format", "include_config", "catch_exceptions") } configuration = ExpectationConfiguration( expectation_type=name, kwargs=expectation_kwargs, meta=meta ) # runtime_configuration = configuration.get_runtime_kwargs() expectation = expectation_impl(configuration) """Given an implementation and a configuration for any Expectation, returns its validation result""" if not self.interactive_evaluation and not self._active_validation: validation_result = ExpectationValidationResult( expectation_config=copy.deepcopy(expectation.configuration) ) else: validation_result = expectation.validate( validator=self, evaluation_parameters=self._expectation_suite.evaluation_parameters, data_context=self._data_context, runtime_configuration=basic_runtime_configuration, ) # If validate has set active_validation to true, then we do not save the config to avoid # saving updating expectation configs to the same suite during validation runs if self._active_validation is True: stored_config = configuration.get_raw_configuration() else: # Append the expectation to the config. stored_config = self._expectation_suite.add_expectation( configuration.get_raw_configuration() ) # If there was no interactive evaluation, success will not have been computed. if validation_result.success is not None: # Add a "success" object to the config stored_config.success_on_last_run = validation_result.success if self._data_context is not None: validation_result = self._data_context.update_return_obj( self, validation_result ) except Exception as err: if basic_runtime_configuration.get("catch_exceptions"): raised_exception = True exception_traceback = traceback.format_exc() exception_message = "{}: {}".format(type(err).__name__, str(err)) validation_result = ExpectationValidationResult( expectation_config=configuration, success=False, ) validation_result.exception_info = { "raised_exception": raised_exception, "exception_message": exception_message, "exception_traceback": exception_traceback, } else: raise err return validation_result inst_expectation.__name__ = name return inst_expectation
[ "def", "validate_expectation", "(", "self", ",", "name", ")", ":", "def", "inst_expectation", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "expectation_impl", "=", "get_expectation_impl", "(", "name", ")", "allowed_config_keys", "=", "expectation_impl", ".", "get_allowed_config_keys", "(", ")", "expectation_kwargs", "=", "recursively_convert_to_json_serializable", "(", "kwargs", ")", "meta", "=", "None", "# This section uses Expectation class' legacy_method_parameters attribute to maintain support for passing", "# positional arguments to expectation methods", "legacy_arg_names", "=", "expectation_impl", ".", "legacy_method_parameters", ".", "get", "(", "name", ",", "tuple", "(", ")", ")", "for", "idx", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "try", ":", "arg_name", "=", "legacy_arg_names", "[", "idx", "]", "if", "arg_name", "in", "allowed_config_keys", ":", "expectation_kwargs", "[", "arg_name", "]", "=", "arg", "if", "arg_name", "==", "\"meta\"", ":", "meta", "=", "arg", "except", "IndexError", ":", "raise", "InvalidExpectationConfigurationError", "(", "f\"Invalid positional argument: {arg}\"", ")", "# this is used so that exceptions are caught appropriately when they occur in expectation config", "basic_runtime_configuration", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "k", "in", "(", "\"result_format\"", ",", "\"include_config\"", ",", "\"catch_exceptions\"", ")", "}", "configuration", "=", "ExpectationConfiguration", "(", "expectation_type", "=", "name", ",", "kwargs", "=", "expectation_kwargs", ",", "meta", "=", "meta", ")", "# runtime_configuration = configuration.get_runtime_kwargs()", "expectation", "=", "expectation_impl", "(", "configuration", ")", "\"\"\"Given an implementation and a configuration for any Expectation, returns its validation result\"\"\"", "if", "not", "self", ".", "interactive_evaluation", "and", "not", "self", ".", "_active_validation", ":", "validation_result", "=", "ExpectationValidationResult", "(", "expectation_config", "=", "copy", ".", "deepcopy", "(", "expectation", ".", "configuration", ")", ")", "else", ":", "validation_result", "=", "expectation", ".", "validate", "(", "validator", "=", "self", ",", "evaluation_parameters", "=", "self", ".", "_expectation_suite", ".", "evaluation_parameters", ",", "data_context", "=", "self", ".", "_data_context", ",", "runtime_configuration", "=", "basic_runtime_configuration", ",", ")", "# If validate has set active_validation to true, then we do not save the config to avoid", "# saving updating expectation configs to the same suite during validation runs", "if", "self", ".", "_active_validation", "is", "True", ":", "stored_config", "=", "configuration", ".", "get_raw_configuration", "(", ")", "else", ":", "# Append the expectation to the config.", "stored_config", "=", "self", ".", "_expectation_suite", ".", "add_expectation", "(", "configuration", ".", "get_raw_configuration", "(", ")", ")", "# If there was no interactive evaluation, success will not have been computed.", "if", "validation_result", ".", "success", "is", "not", "None", ":", "# Add a \"success\" object to the config", "stored_config", ".", "success_on_last_run", "=", "validation_result", ".", "success", "if", "self", ".", "_data_context", "is", "not", "None", ":", "validation_result", "=", "self", ".", "_data_context", ".", "update_return_obj", "(", "self", ",", "validation_result", ")", "except", "Exception", "as", "err", ":", "if", "basic_runtime_configuration", ".", "get", "(", "\"catch_exceptions\"", ")", ":", "raised_exception", "=", "True", "exception_traceback", "=", "traceback", ".", "format_exc", "(", ")", "exception_message", "=", "\"{}: {}\"", ".", "format", "(", "type", "(", "err", ")", ".", "__name__", ",", "str", "(", "err", ")", ")", "validation_result", "=", "ExpectationValidationResult", "(", "expectation_config", "=", "configuration", ",", "success", "=", "False", ",", ")", "validation_result", ".", "exception_info", "=", "{", "\"raised_exception\"", ":", "raised_exception", ",", "\"exception_message\"", ":", "exception_message", ",", "\"exception_traceback\"", ":", "exception_traceback", ",", "}", "else", ":", "raise", "err", "return", "validation_result", "inst_expectation", ".", "__name__", "=", "name", "return", "inst_expectation" ]
[ 179, 4 ]
[ 283, 31 ]
python
en
['en', 'error', 'th']
False
Validator.execution_engine
(self)
Returns the execution engine being used by the validator at the given time
Returns the execution engine being used by the validator at the given time
def execution_engine(self): """Returns the execution engine being used by the validator at the given time""" return self._execution_engine
[ "def", "execution_engine", "(", "self", ")", ":", "return", "self", ".", "_execution_engine" ]
[ 286, 4 ]
[ 288, 37 ]
python
en
['en', 'en', 'en']
True
Validator.list_available_expectation_types
(self)
Returns a list of all expectations available to the validator
Returns a list of all expectations available to the validator
def list_available_expectation_types(self): """Returns a list of all expectations available to the validator""" keys = dir(self) return [ expectation for expectation in keys if expectation.startswith("expect_") ]
[ "def", "list_available_expectation_types", "(", "self", ")", ":", "keys", "=", "dir", "(", "self", ")", "return", "[", "expectation", "for", "expectation", "in", "keys", "if", "expectation", ".", "startswith", "(", "\"expect_\"", ")", "]" ]
[ 290, 4 ]
[ 295, 9 ]
python
en
['en', 'en', 'en']
True
Validator.get_metrics
(self, metrics: Dict[str, MetricConfiguration])
Return a dictionary with the requested metrics
Return a dictionary with the requested metrics
def get_metrics(self, metrics: Dict[str, MetricConfiguration]) -> Dict[str, Any]: """Return a dictionary with the requested metrics""" graph = ValidationGraph() resolved_metrics = {} for metric_name, metric_configuration in metrics.items(): provider_cls, _ = get_metric_provider( metric_configuration.metric_name, self.execution_engine ) for key in provider_cls.domain_keys: if ( key not in metric_configuration.metric_domain_kwargs and key in provider_cls.default_kwarg_values ): metric_configuration.metric_domain_kwargs[ key ] = provider_cls.default_kwarg_values[key] for key in provider_cls.value_keys: if ( key not in metric_configuration.metric_value_kwargs and key in provider_cls.default_kwarg_values ): metric_configuration.metric_value_kwargs[ key ] = provider_cls.default_kwarg_values[key] self.build_metric_dependency_graph( graph, child_node=metric_configuration, configuration=None, execution_engine=self._execution_engine, runtime_configuration=None, ) self.resolve_validation_graph(graph, resolved_metrics) return { metric_name: resolved_metrics[metric_configuration.id] for (metric_name, metric_configuration) in metrics.items() }
[ "def", "get_metrics", "(", "self", ",", "metrics", ":", "Dict", "[", "str", ",", "MetricConfiguration", "]", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "graph", "=", "ValidationGraph", "(", ")", "resolved_metrics", "=", "{", "}", "for", "metric_name", ",", "metric_configuration", "in", "metrics", ".", "items", "(", ")", ":", "provider_cls", ",", "_", "=", "get_metric_provider", "(", "metric_configuration", ".", "metric_name", ",", "self", ".", "execution_engine", ")", "for", "key", "in", "provider_cls", ".", "domain_keys", ":", "if", "(", "key", "not", "in", "metric_configuration", ".", "metric_domain_kwargs", "and", "key", "in", "provider_cls", ".", "default_kwarg_values", ")", ":", "metric_configuration", ".", "metric_domain_kwargs", "[", "key", "]", "=", "provider_cls", ".", "default_kwarg_values", "[", "key", "]", "for", "key", "in", "provider_cls", ".", "value_keys", ":", "if", "(", "key", "not", "in", "metric_configuration", ".", "metric_value_kwargs", "and", "key", "in", "provider_cls", ".", "default_kwarg_values", ")", ":", "metric_configuration", ".", "metric_value_kwargs", "[", "key", "]", "=", "provider_cls", ".", "default_kwarg_values", "[", "key", "]", "self", ".", "build_metric_dependency_graph", "(", "graph", ",", "child_node", "=", "metric_configuration", ",", "configuration", "=", "None", ",", "execution_engine", "=", "self", ".", "_execution_engine", ",", "runtime_configuration", "=", "None", ",", ")", "self", ".", "resolve_validation_graph", "(", "graph", ",", "resolved_metrics", ")", "return", "{", "metric_name", ":", "resolved_metrics", "[", "metric_configuration", ".", "id", "]", "for", "(", "metric_name", ",", "metric_configuration", ")", "in", "metrics", ".", "items", "(", ")", "}" ]
[ 297, 4 ]
[ 332, 9 ]
python
en
['en', 'en', 'en']
True
Validator.get_metric
(self, metric: MetricConfiguration)
return the value of the requested metric.
return the value of the requested metric.
def get_metric(self, metric: MetricConfiguration) -> Any: """return the value of the requested metric.""" return self.get_metrics({"_": metric})["_"]
[ "def", "get_metric", "(", "self", ",", "metric", ":", "MetricConfiguration", ")", "->", "Any", ":", "return", "self", ".", "get_metrics", "(", "{", "\"_\"", ":", "metric", "}", ")", "[", "\"_\"", "]" ]
[ 334, 4 ]
[ 336, 51 ]
python
en
['en', 'en', 'en']
True
Validator.build_metric_dependency_graph
( self, graph: ValidationGraph, child_node: MetricConfiguration, configuration: Optional[ExpectationConfiguration], execution_engine: "ExecutionEngine", parent_node: Optional[MetricConfiguration] = None, runtime_configuration: Optional[dict] = None, )
Obtain domain and value keys for metrics and proceeds to add these metrics to the validation graph until all metrics have been added.
Obtain domain and value keys for metrics and proceeds to add these metrics to the validation graph until all metrics have been added.
def build_metric_dependency_graph( self, graph: ValidationGraph, child_node: MetricConfiguration, configuration: Optional[ExpectationConfiguration], execution_engine: "ExecutionEngine", parent_node: Optional[MetricConfiguration] = None, runtime_configuration: Optional[dict] = None, ) -> None: """Obtain domain and value keys for metrics and proceeds to add these metrics to the validation graph until all metrics have been added.""" # metric_kwargs = get_metric_kwargs(metric_name) metric_impl = get_metric_provider( child_node.metric_name, execution_engine=execution_engine )[0] metric_dependencies = metric_impl.get_evaluation_dependencies( metric=child_node, configuration=configuration, execution_engine=execution_engine, runtime_configuration=runtime_configuration, ) child_node.metric_dependencies = metric_dependencies if parent_node: graph.add( MetricEdge( parent_node, child_node, ) ) if len(metric_dependencies) == 0: graph.add( MetricEdge( child_node, None, ) ) else: for metric_dependency in metric_dependencies.values(): if metric_dependency.id == child_node.id: logger.warning( f"Metric {str(child_node.id)} has created a circular dependency" ) continue self.build_metric_dependency_graph( graph, metric_dependency, configuration, execution_engine, child_node, runtime_configuration=runtime_configuration, )
[ "def", "build_metric_dependency_graph", "(", "self", ",", "graph", ":", "ValidationGraph", ",", "child_node", ":", "MetricConfiguration", ",", "configuration", ":", "Optional", "[", "ExpectationConfiguration", "]", ",", "execution_engine", ":", "\"ExecutionEngine\"", ",", "parent_node", ":", "Optional", "[", "MetricConfiguration", "]", "=", "None", ",", "runtime_configuration", ":", "Optional", "[", "dict", "]", "=", "None", ",", ")", "->", "None", ":", "# metric_kwargs = get_metric_kwargs(metric_name)", "metric_impl", "=", "get_metric_provider", "(", "child_node", ".", "metric_name", ",", "execution_engine", "=", "execution_engine", ")", "[", "0", "]", "metric_dependencies", "=", "metric_impl", ".", "get_evaluation_dependencies", "(", "metric", "=", "child_node", ",", "configuration", "=", "configuration", ",", "execution_engine", "=", "execution_engine", ",", "runtime_configuration", "=", "runtime_configuration", ",", ")", "child_node", ".", "metric_dependencies", "=", "metric_dependencies", "if", "parent_node", ":", "graph", ".", "add", "(", "MetricEdge", "(", "parent_node", ",", "child_node", ",", ")", ")", "if", "len", "(", "metric_dependencies", ")", "==", "0", ":", "graph", ".", "add", "(", "MetricEdge", "(", "child_node", ",", "None", ",", ")", ")", "else", ":", "for", "metric_dependency", "in", "metric_dependencies", ".", "values", "(", ")", ":", "if", "metric_dependency", ".", "id", "==", "child_node", ".", "id", ":", "logger", ".", "warning", "(", "f\"Metric {str(child_node.id)} has created a circular dependency\"", ")", "continue", "self", ".", "build_metric_dependency_graph", "(", "graph", ",", "metric_dependency", ",", "configuration", ",", "execution_engine", ",", "child_node", ",", "runtime_configuration", "=", "runtime_configuration", ",", ")" ]
[ 338, 4 ]
[ 392, 17 ]
python
en
['en', 'en', 'en']
True
Validator.graph_validate
( self, configurations: List[ExpectationConfiguration], metrics: dict = None, runtime_configuration: dict = None, )
Obtains validation dependencies for each metric using the implementation of their associated expectation, then proceeds to add these dependencies to the validation graph, supply readily available metric implementations to fulfill current metric requirements, and validate these metrics. Args: batches (Dict[str, Batch]): A Dictionary of batches and their corresponding names that will be used for Expectation Validation. configurations(List[ExpectationConfiguration]): A list of needed Expectation Configurations that will be used to supply domain and values for metrics. execution_engine (ExecutionEngine): An Execution Engine that will be used for extraction of metrics from the registry. metrics (dict): A list of currently registered metrics in the registry runtime_configuration (dict): A dictionary of runtime keyword arguments, controlling semantics such as the result_format. Returns: A list of Validations, validating that all necessary metrics are available.
Obtains validation dependencies for each metric using the implementation of their associated expectation, then proceeds to add these dependencies to the validation graph, supply readily available metric implementations to fulfill current metric requirements, and validate these metrics.
def graph_validate( self, configurations: List[ExpectationConfiguration], metrics: dict = None, runtime_configuration: dict = None, ) -> List[ExpectationValidationResult]: """Obtains validation dependencies for each metric using the implementation of their associated expectation, then proceeds to add these dependencies to the validation graph, supply readily available metric implementations to fulfill current metric requirements, and validate these metrics. Args: batches (Dict[str, Batch]): A Dictionary of batches and their corresponding names that will be used for Expectation Validation. configurations(List[ExpectationConfiguration]): A list of needed Expectation Configurations that will be used to supply domain and values for metrics. execution_engine (ExecutionEngine): An Execution Engine that will be used for extraction of metrics from the registry. metrics (dict): A list of currently registered metrics in the registry runtime_configuration (dict): A dictionary of runtime keyword arguments, controlling semantics such as the result_format. Returns: A list of Validations, validating that all necessary metrics are available. """ graph = ValidationGraph() if runtime_configuration is None: runtime_configuration = dict() if runtime_configuration.get("catch_exceptions", True): catch_exceptions = True else: catch_exceptions = False processed_configurations = [] evrs = [] for configuration in configurations: # Validating try: assert ( configuration.expectation_type is not None ), "Given configuration should include expectation type" except AssertionError as e: raise InvalidExpectationConfigurationError(str(e)) expectation_impl = get_expectation_impl(configuration.expectation_type) validation_dependencies = expectation_impl().get_validation_dependencies( configuration, self._execution_engine, runtime_configuration )["metrics"] try: for metric in validation_dependencies.values(): self.build_metric_dependency_graph( graph, metric, configuration, self._execution_engine, runtime_configuration=runtime_configuration, ) processed_configurations.append(configuration) except Exception as err: if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() result = ExpectationValidationResult( success=False, exception_info={ "raised_exception": raised_exception, "exception_traceback": exception_traceback, "exception_message": str(err), }, expectation_config=configuration, ) evrs.append(result) else: raise err if metrics is None: metrics = dict() metrics = self.resolve_validation_graph(graph, metrics, runtime_configuration) for configuration in processed_configurations: try: result = configuration.metrics_validate( metrics, execution_engine=self._execution_engine, runtime_configuration=runtime_configuration, ) evrs.append(result) except Exception as err: if catch_exceptions: raised_exception = True exception_traceback = traceback.format_exc() result = ExpectationValidationResult( success=False, exception_info={ "raised_exception": raised_exception, "exception_traceback": exception_traceback, "exception_message": str(err), }, expectation_config=configuration, ) evrs.append(result) else: raise err return evrs
[ "def", "graph_validate", "(", "self", ",", "configurations", ":", "List", "[", "ExpectationConfiguration", "]", ",", "metrics", ":", "dict", "=", "None", ",", "runtime_configuration", ":", "dict", "=", "None", ",", ")", "->", "List", "[", "ExpectationValidationResult", "]", ":", "graph", "=", "ValidationGraph", "(", ")", "if", "runtime_configuration", "is", "None", ":", "runtime_configuration", "=", "dict", "(", ")", "if", "runtime_configuration", ".", "get", "(", "\"catch_exceptions\"", ",", "True", ")", ":", "catch_exceptions", "=", "True", "else", ":", "catch_exceptions", "=", "False", "processed_configurations", "=", "[", "]", "evrs", "=", "[", "]", "for", "configuration", "in", "configurations", ":", "# Validating", "try", ":", "assert", "(", "configuration", ".", "expectation_type", "is", "not", "None", ")", ",", "\"Given configuration should include expectation type\"", "except", "AssertionError", "as", "e", ":", "raise", "InvalidExpectationConfigurationError", "(", "str", "(", "e", ")", ")", "expectation_impl", "=", "get_expectation_impl", "(", "configuration", ".", "expectation_type", ")", "validation_dependencies", "=", "expectation_impl", "(", ")", ".", "get_validation_dependencies", "(", "configuration", ",", "self", ".", "_execution_engine", ",", "runtime_configuration", ")", "[", "\"metrics\"", "]", "try", ":", "for", "metric", "in", "validation_dependencies", ".", "values", "(", ")", ":", "self", ".", "build_metric_dependency_graph", "(", "graph", ",", "metric", ",", "configuration", ",", "self", ".", "_execution_engine", ",", "runtime_configuration", "=", "runtime_configuration", ",", ")", "processed_configurations", ".", "append", "(", "configuration", ")", "except", "Exception", "as", "err", ":", "if", "catch_exceptions", ":", "raised_exception", "=", "True", "exception_traceback", "=", "traceback", ".", "format_exc", "(", ")", "result", "=", "ExpectationValidationResult", "(", "success", "=", "False", ",", "exception_info", "=", "{", "\"raised_exception\"", ":", "raised_exception", ",", "\"exception_traceback\"", ":", "exception_traceback", ",", "\"exception_message\"", ":", "str", "(", "err", ")", ",", "}", ",", "expectation_config", "=", "configuration", ",", ")", "evrs", ".", "append", "(", "result", ")", "else", ":", "raise", "err", "if", "metrics", "is", "None", ":", "metrics", "=", "dict", "(", ")", "metrics", "=", "self", ".", "resolve_validation_graph", "(", "graph", ",", "metrics", ",", "runtime_configuration", ")", "for", "configuration", "in", "processed_configurations", ":", "try", ":", "result", "=", "configuration", ".", "metrics_validate", "(", "metrics", ",", "execution_engine", "=", "self", ".", "_execution_engine", ",", "runtime_configuration", "=", "runtime_configuration", ",", ")", "evrs", ".", "append", "(", "result", ")", "except", "Exception", "as", "err", ":", "if", "catch_exceptions", ":", "raised_exception", "=", "True", "exception_traceback", "=", "traceback", ".", "format_exc", "(", ")", "result", "=", "ExpectationValidationResult", "(", "success", "=", "False", ",", "exception_info", "=", "{", "\"raised_exception\"", ":", "raised_exception", ",", "\"exception_traceback\"", ":", "exception_traceback", ",", "\"exception_message\"", ":", "str", "(", "err", ")", ",", "}", ",", "expectation_config", "=", "configuration", ",", ")", "evrs", ".", "append", "(", "result", ")", "else", ":", "raise", "err", "return", "evrs" ]
[ 394, 4 ]
[ 499, 19 ]
python
en
['en', 'en', 'en']
True
Validator._parse_validation_graph
(self, validation_graph, metrics)
Given validation graph, returns the ready and needed metrics necessary for validation using a traversal of validation graph (a graph structure of metric ids) edges
Given validation graph, returns the ready and needed metrics necessary for validation using a traversal of validation graph (a graph structure of metric ids) edges
def _parse_validation_graph(self, validation_graph, metrics): """Given validation graph, returns the ready and needed metrics necessary for validation using a traversal of validation graph (a graph structure of metric ids) edges""" unmet_dependency_ids = set() unmet_dependency = set() maybe_ready_ids = set() maybe_ready = set() for edge in validation_graph.edges: if edge.left.id not in metrics: if edge.right is None or edge.right.id in metrics: if edge.left.id not in maybe_ready_ids: maybe_ready_ids.add(edge.left.id) maybe_ready.add(edge.left) else: if edge.left.id not in unmet_dependency_ids: unmet_dependency_ids.add(edge.left.id) unmet_dependency.add(edge.left) return maybe_ready - unmet_dependency, unmet_dependency
[ "def", "_parse_validation_graph", "(", "self", ",", "validation_graph", ",", "metrics", ")", ":", "unmet_dependency_ids", "=", "set", "(", ")", "unmet_dependency", "=", "set", "(", ")", "maybe_ready_ids", "=", "set", "(", ")", "maybe_ready", "=", "set", "(", ")", "for", "edge", "in", "validation_graph", ".", "edges", ":", "if", "edge", ".", "left", ".", "id", "not", "in", "metrics", ":", "if", "edge", ".", "right", "is", "None", "or", "edge", ".", "right", ".", "id", "in", "metrics", ":", "if", "edge", ".", "left", ".", "id", "not", "in", "maybe_ready_ids", ":", "maybe_ready_ids", ".", "add", "(", "edge", ".", "left", ".", "id", ")", "maybe_ready", ".", "add", "(", "edge", ".", "left", ")", "else", ":", "if", "edge", ".", "left", ".", "id", "not", "in", "unmet_dependency_ids", ":", "unmet_dependency_ids", ".", "add", "(", "edge", ".", "left", ".", "id", ")", "unmet_dependency", ".", "add", "(", "edge", ".", "left", ")", "return", "maybe_ready", "-", "unmet_dependency", ",", "unmet_dependency" ]
[ 528, 4 ]
[ 547, 63 ]
python
en
['en', 'en', 'en']
True
Validator._resolve_metrics
( self, execution_engine: "ExecutionEngine", metrics_to_resolve: Iterable[MetricConfiguration], metrics: Dict, runtime_configuration: dict = None, )
A means of accessing the Execution Engine's resolve_metrics method, where missing metric configurations are resolved
A means of accessing the Execution Engine's resolve_metrics method, where missing metric configurations are resolved
def _resolve_metrics( self, execution_engine: "ExecutionEngine", metrics_to_resolve: Iterable[MetricConfiguration], metrics: Dict, runtime_configuration: dict = None, ): """A means of accessing the Execution Engine's resolve_metrics method, where missing metric configurations are resolved""" return execution_engine.resolve_metrics( metrics_to_resolve, metrics, runtime_configuration )
[ "def", "_resolve_metrics", "(", "self", ",", "execution_engine", ":", "\"ExecutionEngine\"", ",", "metrics_to_resolve", ":", "Iterable", "[", "MetricConfiguration", "]", ",", "metrics", ":", "Dict", ",", "runtime_configuration", ":", "dict", "=", "None", ",", ")", ":", "return", "execution_engine", ".", "resolve_metrics", "(", "metrics_to_resolve", ",", "metrics", ",", "runtime_configuration", ")" ]
[ 549, 4 ]
[ 560, 9 ]
python
en
['en', 'en', 'en']
True
Validator._initialize_expectations
( self, expectation_suite: ExpectationSuite = None, expectation_suite_name: str = None, )
Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`. In addition, this always sets the `default_expectation_args` to: `include_config`: False, `catch_exceptions`: False, `output_format`: 'BASIC' By default, initializes data_asset_type to the name of the implementing class, but subclasses that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their interoperability. Args: expectation_suite (json): \ A json-serializable expectation config. \ If None, creates default `_expectation_suite` with an empty list of expectations and \ key value `data_asset_name` as `data_asset_name`. expectation_suite_name (string): \ The name to assign to the `expectation_suite.expectation_suite_name` Returns: None
Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`. In addition, this always sets the `default_expectation_args` to: `include_config`: False, `catch_exceptions`: False, `output_format`: 'BASIC'
def _initialize_expectations( self, expectation_suite: ExpectationSuite = None, expectation_suite_name: str = None, ): """Instantiates `_expectation_suite` as empty by default or with a specified expectation `config`. In addition, this always sets the `default_expectation_args` to: `include_config`: False, `catch_exceptions`: False, `output_format`: 'BASIC' By default, initializes data_asset_type to the name of the implementing class, but subclasses that have interoperable semantics (e.g. Dataset) may override that parameter to clarify their interoperability. Args: expectation_suite (json): \ A json-serializable expectation config. \ If None, creates default `_expectation_suite` with an empty list of expectations and \ key value `data_asset_name` as `data_asset_name`. expectation_suite_name (string): \ The name to assign to the `expectation_suite.expectation_suite_name` Returns: None """ # Checking type of expectation_suite. # Check for expectation_suite_name is already done by ExpectationSuiteIdentifier if expectation_suite and not isinstance(expectation_suite, ExpectationSuite): raise TypeError( "expectation_suite must be of type ExpectationSuite, not {}".format( type(expectation_suite) ) ) if expectation_suite is not None: if isinstance(expectation_suite, dict): expectation_suite = expectationSuiteSchema.load(expectation_suite) else: expectation_suite = copy.deepcopy(expectation_suite) self._expectation_suite = expectation_suite if expectation_suite_name is not None: if ( self._expectation_suite.expectation_suite_name != expectation_suite_name ): logger.warning( "Overriding existing expectation_suite_name {n1} with new name {n2}".format( n1=self._expectation_suite.expectation_suite_name, n2=expectation_suite_name, ) ) self._expectation_suite.expectation_suite_name = expectation_suite_name else: if expectation_suite_name is None: expectation_suite_name = "default" self._expectation_suite = ExpectationSuite( expectation_suite_name=expectation_suite_name ) self._expectation_suite.execution_engine_type = type( self.execution_engine ).__name__
[ "def", "_initialize_expectations", "(", "self", ",", "expectation_suite", ":", "ExpectationSuite", "=", "None", ",", "expectation_suite_name", ":", "str", "=", "None", ",", ")", ":", "# Checking type of expectation_suite.", "# Check for expectation_suite_name is already done by ExpectationSuiteIdentifier", "if", "expectation_suite", "and", "not", "isinstance", "(", "expectation_suite", ",", "ExpectationSuite", ")", ":", "raise", "TypeError", "(", "\"expectation_suite must be of type ExpectationSuite, not {}\"", ".", "format", "(", "type", "(", "expectation_suite", ")", ")", ")", "if", "expectation_suite", "is", "not", "None", ":", "if", "isinstance", "(", "expectation_suite", ",", "dict", ")", ":", "expectation_suite", "=", "expectationSuiteSchema", ".", "load", "(", "expectation_suite", ")", "else", ":", "expectation_suite", "=", "copy", ".", "deepcopy", "(", "expectation_suite", ")", "self", ".", "_expectation_suite", "=", "expectation_suite", "if", "expectation_suite_name", "is", "not", "None", ":", "if", "(", "self", ".", "_expectation_suite", ".", "expectation_suite_name", "!=", "expectation_suite_name", ")", ":", "logger", ".", "warning", "(", "\"Overriding existing expectation_suite_name {n1} with new name {n2}\"", ".", "format", "(", "n1", "=", "self", ".", "_expectation_suite", ".", "expectation_suite_name", ",", "n2", "=", "expectation_suite_name", ",", ")", ")", "self", ".", "_expectation_suite", ".", "expectation_suite_name", "=", "expectation_suite_name", "else", ":", "if", "expectation_suite_name", "is", "None", ":", "expectation_suite_name", "=", "\"default\"", "self", ".", "_expectation_suite", "=", "ExpectationSuite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "self", ".", "_expectation_suite", ".", "execution_engine_type", "=", "type", "(", "self", ".", "execution_engine", ")", ".", "__name__" ]
[ 562, 4 ]
[ 626, 18 ]
python
en
['en', 'en', 'en']
True
Validator.append_expectation
(self, expectation_config)
This method is a thin wrapper for ExpectationSuite.append_expectation
This method is a thin wrapper for ExpectationSuite.append_expectation
def append_expectation(self, expectation_config): """This method is a thin wrapper for ExpectationSuite.append_expectation""" warnings.warn( "append_expectation is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.add_expectation instead.", DeprecationWarning, ) self._expectation_suite.append_expectation(expectation_config)
[ "def", "append_expectation", "(", "self", ",", "expectation_config", ")", ":", "warnings", ".", "warn", "(", "\"append_expectation is deprecated, and will be removed in a future release. \"", "+", "\"Please use ExpectationSuite.add_expectation instead.\"", ",", "DeprecationWarning", ",", ")", "self", ".", "_expectation_suite", ".", "append_expectation", "(", "expectation_config", ")" ]
[ 628, 4 ]
[ 635, 70 ]
python
en
['en', 'en', 'en']
True
Validator.find_expectation_indexes
( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", )
This method is a thin wrapper for ExpectationSuite.find_expectation_indexes
This method is a thin wrapper for ExpectationSuite.find_expectation_indexes
def find_expectation_indexes( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", ) -> List[int]: """This method is a thin wrapper for ExpectationSuite.find_expectation_indexes""" warnings.warn( "find_expectation_indexes is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.find_expectation_indexes instead.", DeprecationWarning, ) return self._expectation_suite.find_expectation_indexes( expectation_configuration=expectation_configuration, match_type=match_type )
[ "def", "find_expectation_indexes", "(", "self", ",", "expectation_configuration", ":", "ExpectationConfiguration", ",", "match_type", ":", "str", "=", "\"domain\"", ",", ")", "->", "List", "[", "int", "]", ":", "warnings", ".", "warn", "(", "\"find_expectation_indexes is deprecated, and will be removed in a future release. \"", "+", "\"Please use ExpectationSuite.find_expectation_indexes instead.\"", ",", "DeprecationWarning", ",", ")", "return", "self", ".", "_expectation_suite", ".", "find_expectation_indexes", "(", "expectation_configuration", "=", "expectation_configuration", ",", "match_type", "=", "match_type", ")" ]
[ 637, 4 ]
[ 650, 9 ]
python
en
['en', 'en', 'en']
True
Validator.find_expectations
( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", )
This method is a thin wrapper for ExpectationSuite.find_expectations()
This method is a thin wrapper for ExpectationSuite.find_expectations()
def find_expectations( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", ) -> List[ExpectationConfiguration]: """This method is a thin wrapper for ExpectationSuite.find_expectations()""" warnings.warn( "find_expectations is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.find_expectation_indexes instead.", DeprecationWarning, ) return self._expectation_suite.find_expectations( expectation_configuration=expectation_configuration, match_type=match_type )
[ "def", "find_expectations", "(", "self", ",", "expectation_configuration", ":", "ExpectationConfiguration", ",", "match_type", ":", "str", "=", "\"domain\"", ",", ")", "->", "List", "[", "ExpectationConfiguration", "]", ":", "warnings", ".", "warn", "(", "\"find_expectations is deprecated, and will be removed in a future release. \"", "+", "\"Please use ExpectationSuite.find_expectation_indexes instead.\"", ",", "DeprecationWarning", ",", ")", "return", "self", ".", "_expectation_suite", ".", "find_expectations", "(", "expectation_configuration", "=", "expectation_configuration", ",", "match_type", "=", "match_type", ")" ]
[ 652, 4 ]
[ 665, 9 ]
python
en
['en', 'en', 'en']
True
Validator.remove_expectation
( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", remove_multiple_matches: bool = False, )
This method is a thin wrapper for ExpectationSuite.remove()
This method is a thin wrapper for ExpectationSuite.remove()
def remove_expectation( self, expectation_configuration: ExpectationConfiguration, match_type: str = "domain", remove_multiple_matches: bool = False, ) -> List[ExpectationConfiguration]: """This method is a thin wrapper for ExpectationSuite.remove()""" warnings.warn( "DataAsset.remove_expectations is deprecated, and will be removed in a future release. " + "Please use ExpectationSuite.remove_expectation instead.", DeprecationWarning, ) return self._expectation_suite.remove_expectation( expectation_configuration=expectation_configuration, match_type=match_type, remove_multiple_matches=remove_multiple_matches, )
[ "def", "remove_expectation", "(", "self", ",", "expectation_configuration", ":", "ExpectationConfiguration", ",", "match_type", ":", "str", "=", "\"domain\"", ",", "remove_multiple_matches", ":", "bool", "=", "False", ",", ")", "->", "List", "[", "ExpectationConfiguration", "]", ":", "warnings", ".", "warn", "(", "\"DataAsset.remove_expectations is deprecated, and will be removed in a future release. \"", "+", "\"Please use ExpectationSuite.remove_expectation instead.\"", ",", "DeprecationWarning", ",", ")", "return", "self", ".", "_expectation_suite", ".", "remove_expectation", "(", "expectation_configuration", "=", "expectation_configuration", ",", "match_type", "=", "match_type", ",", "remove_multiple_matches", "=", "remove_multiple_matches", ",", ")" ]
[ 667, 4 ]
[ 683, 9 ]
python
en
['en', 'en', 'en']
True
Validator.set_config_value
(self, key, value)
Setter for config value
Setter for config value
def set_config_value(self, key, value): """Setter for config value""" self._validator_config[key] = value
[ "def", "set_config_value", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "_validator_config", "[", "key", "]", "=", "value" ]
[ 685, 4 ]
[ 687, 43 ]
python
da
['da', 'da', 'en']
True
Validator.get_config_value
(self, key)
Getter for config value
Getter for config value
def get_config_value(self, key): """Getter for config value""" return self._validator_config.get(key)
[ "def", "get_config_value", "(", "self", ",", "key", ")", ":", "return", "self", ".", "_validator_config", ".", "get", "(", "key", ")" ]
[ 689, 4 ]
[ 691, 46 ]
python
da
['da', 'da', 'en']
True
Validator.batches
(self)
Getter for batches
Getter for batches
def batches(self) -> Dict[str, Batch]: """Getter for batches""" return self._batches
[ "def", "batches", "(", "self", ")", "->", "Dict", "[", "str", ",", "Batch", "]", ":", "return", "self", ".", "_batches" ]
[ 704, 4 ]
[ 706, 28 ]
python
en
['en', 'en', 'en']
True
Validator.active_batch
(self)
Getter for active batch
Getter for active batch
def active_batch(self) -> Batch: """Getter for active batch""" active_batch_id: str = self.execution_engine.active_batch_data_id batch: Batch = self.batches.get(active_batch_id) if active_batch_id else None return batch
[ "def", "active_batch", "(", "self", ")", "->", "Batch", ":", "active_batch_id", ":", "str", "=", "self", ".", "execution_engine", ".", "active_batch_data_id", "batch", ":", "Batch", "=", "self", ".", "batches", ".", "get", "(", "active_batch_id", ")", "if", "active_batch_id", "else", "None", "return", "batch" ]
[ 713, 4 ]
[ 717, 20 ]
python
en
['en', 'en', 'en']
True
Validator.active_batch_spec
(self)
Getter for active batch's batch_spec
Getter for active batch's batch_spec
def active_batch_spec(self) -> Optional[BatchSpec]: """Getter for active batch's batch_spec""" if not self.active_batch: return None else: return self.active_batch.batch_spec
[ "def", "active_batch_spec", "(", "self", ")", "->", "Optional", "[", "BatchSpec", "]", ":", "if", "not", "self", ".", "active_batch", ":", "return", "None", "else", ":", "return", "self", ".", "active_batch", ".", "batch_spec" ]
[ 720, 4 ]
[ 725, 47 ]
python
en
['en', 'en', 'en']
True
Validator.active_batch_id
(self)
Getter for active batch id
Getter for active batch id
def active_batch_id(self) -> str: """Getter for active batch id""" return self.execution_engine.active_batch_data_id
[ "def", "active_batch_id", "(", "self", ")", "->", "str", ":", "return", "self", ".", "execution_engine", ".", "active_batch_data_id" ]
[ 728, 4 ]
[ 730, 57 ]
python
en
['en', 'en', 'en']
True
Validator.active_batch_markers
(self)
Getter for active batch's batch markers
Getter for active batch's batch markers
def active_batch_markers(self): """Getter for active batch's batch markers""" if not self.active_batch: return None else: return self.active_batch.batch_markers
[ "def", "active_batch_markers", "(", "self", ")", ":", "if", "not", "self", ".", "active_batch", ":", "return", "None", "else", ":", "return", "self", ".", "active_batch", ".", "batch_markers" ]
[ 748, 4 ]
[ 753, 50 ]
python
en
['en', 'en', 'en']
True
Validator.active_batch_definition
(self)
Getter for the active batch's batch definition
Getter for the active batch's batch definition
def active_batch_definition(self): """Getter for the active batch's batch definition""" if not self.active_batch: return None else: return self.active_batch.batch_definition
[ "def", "active_batch_definition", "(", "self", ")", ":", "if", "not", "self", ".", "active_batch", ":", "return", "None", "else", ":", "return", "self", ".", "active_batch", ".", "batch_definition" ]
[ 756, 4 ]
[ 761, 53 ]
python
en
['en', 'en', 'en']
True
Validator.discard_failing_expectations
(self)
Removes any expectations from the validator where the validation has failed
Removes any expectations from the validator where the validation has failed
def discard_failing_expectations(self): """Removes any expectations from the validator where the validation has failed""" res = self.validate(only_return_failures=True).results if any(res): for item in res: self.remove_expectation( expectation_configuration=item.expectation_config, match_type="runtime", ) warnings.warn("Removed %s expectations that were 'False'" % len(res))
[ "def", "discard_failing_expectations", "(", "self", ")", ":", "res", "=", "self", ".", "validate", "(", "only_return_failures", "=", "True", ")", ".", "results", "if", "any", "(", "res", ")", ":", "for", "item", "in", "res", ":", "self", ".", "remove_expectation", "(", "expectation_configuration", "=", "item", ".", "expectation_config", ",", "match_type", "=", "\"runtime\"", ",", ")", "warnings", ".", "warn", "(", "\"Removed %s expectations that were 'False'\"", "%", "len", "(", "res", ")", ")" ]
[ 763, 4 ]
[ 772, 81 ]
python
en
['en', 'en', 'en']
True
Validator.get_default_expectation_arguments
(self)
Fetch default expectation arguments for this data_asset Returns: A dictionary containing all the current default expectation arguments for a data_asset Ex:: { "include_config" : True, "catch_exceptions" : False, "result_format" : 'BASIC' } See also: set_default_expectation_arguments
Fetch default expectation arguments for this data_asset
def get_default_expectation_arguments(self): """Fetch default expectation arguments for this data_asset Returns: A dictionary containing all the current default expectation arguments for a data_asset Ex:: { "include_config" : True, "catch_exceptions" : False, "result_format" : 'BASIC' } See also: set_default_expectation_arguments """ return self._default_expectation_args
[ "def", "get_default_expectation_arguments", "(", "self", ")", ":", "return", "self", ".", "_default_expectation_args" ]
[ 774, 4 ]
[ 791, 45 ]
python
en
['en', 'fr', 'en']
True
Validator.default_expectation_args
(self)
A getter for default Expectation arguments
A getter for default Expectation arguments
def default_expectation_args(self): """A getter for default Expectation arguments""" return self._default_expectation_args
[ "def", "default_expectation_args", "(", "self", ")", ":", "return", "self", ".", "_default_expectation_args" ]
[ 794, 4 ]
[ 796, 45 ]
python
da
['da', 'fr', 'en']
False
Validator.set_default_expectation_argument
(self, argument, value)
Set a default expectation argument for this data_asset Args: argument (string): The argument to be replaced value : The New argument to use for replacement Returns: None See also: get_default_expectation_arguments
Set a default expectation argument for this data_asset
def set_default_expectation_argument(self, argument, value): """ Set a default expectation argument for this data_asset Args: argument (string): The argument to be replaced value : The New argument to use for replacement Returns: None See also: get_default_expectation_arguments """ self._default_expectation_args[argument] = value
[ "def", "set_default_expectation_argument", "(", "self", ",", "argument", ",", "value", ")", ":", "self", ".", "_default_expectation_args", "[", "argument", "]", "=", "value" ]
[ 798, 4 ]
[ 813, 56 ]
python
en
['en', 'error', 'th']
False
Validator.get_expectations_config
( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, )
Returns an expectation configuration, providing an option to discard failed expectation and discard/ include' different result aspects, such as exceptions and result format.
Returns an expectation configuration, providing an option to discard failed expectation and discard/ include' different result aspects, such as exceptions and result format.
def get_expectations_config( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, ): """ Returns an expectation configuration, providing an option to discard failed expectation and discard/ include' different result aspects, such as exceptions and result format. """ warnings.warn( "get_expectations_config is deprecated, and will be removed in a future release. " + "Please use get_expectation_suite instead.", DeprecationWarning, ) return self.get_expectation_suite( discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings, )
[ "def", "get_expectations_config", "(", "self", ",", "discard_failed_expectations", "=", "True", ",", "discard_result_format_kwargs", "=", "True", ",", "discard_include_config_kwargs", "=", "True", ",", "discard_catch_exceptions_kwargs", "=", "True", ",", "suppress_warnings", "=", "False", ",", ")", ":", "warnings", ".", "warn", "(", "\"get_expectations_config is deprecated, and will be removed in a future release. \"", "+", "\"Please use get_expectation_suite instead.\"", ",", "DeprecationWarning", ",", ")", "return", "self", ".", "get_expectation_suite", "(", "discard_failed_expectations", ",", "discard_result_format_kwargs", ",", "discard_include_config_kwargs", ",", "discard_catch_exceptions_kwargs", ",", "suppress_warnings", ",", ")" ]
[ 815, 4 ]
[ 838, 9 ]
python
en
['en', 'error', 'th']
False
Validator.get_expectation_suite
( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False, )
Returns _expectation_config as a JSON object, and perform some cleaning along the way. Args: discard_failed_expectations (boolean): \ Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`. discard_result_format_kwargs (boolean): \ In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`. discard_include_config_kwargs (boolean): \ In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`. discard_catch_exceptions_kwargs (boolean): \ In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`. suppress_warnings (boolean): \ If true, do not include warnings in logging information about the operation. suppress_logging (boolean): \ If true, do not create a log entry (useful when using get_expectation_suite programmatically) Returns: An expectation suite. Note: get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \ copy of _expectation_suite, not the original object.
Returns _expectation_config as a JSON object, and perform some cleaning along the way.
def get_expectation_suite( self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, suppress_logging=False, ): """Returns _expectation_config as a JSON object, and perform some cleaning along the way. Args: discard_failed_expectations (boolean): \ Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`. discard_result_format_kwargs (boolean): \ In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`. discard_include_config_kwargs (boolean): \ In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`. discard_catch_exceptions_kwargs (boolean): \ In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`. suppress_warnings (boolean): \ If true, do not include warnings in logging information about the operation. suppress_logging (boolean): \ If true, do not create a log entry (useful when using get_expectation_suite programmatically) Returns: An expectation suite. Note: get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \ copy of _expectation_suite, not the original object. """ expectation_suite = copy.deepcopy(self._expectation_suite) expectations = expectation_suite.expectations discards = defaultdict(int) if discard_failed_expectations: new_expectations = [] for expectation in expectations: # Note: This is conservative logic. # Instead of retaining expectations IFF success==True, it discard expectations IFF success==False. # In cases where expectation.success is missing or None, expectations are *retained*. # Such a case could occur if expectations were loaded from a config file and never run. if expectation.success_on_last_run is False: discards["failed_expectations"] += 1 else: new_expectations.append(expectation) expectations = new_expectations message = "\t%d expectation(s) included in expectation_suite." % len( expectations ) if discards["failed_expectations"] > 0 and not suppress_warnings: message += ( " Omitting %d expectation(s) that failed when last run; set " "discard_failed_expectations=False to include them." % discards["failed_expectations"] ) for expectation in expectations: # FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation, # which calls _copy_and_clean_up_expectation expectation.success_on_last_run = None if discard_result_format_kwargs: if "result_format" in expectation.kwargs: del expectation.kwargs["result_format"] discards["result_format"] += 1 if discard_include_config_kwargs: if "include_config" in expectation.kwargs: del expectation.kwargs["include_config"] discards["include_config"] += 1 if discard_catch_exceptions_kwargs: if "catch_exceptions" in expectation.kwargs: del expectation.kwargs["catch_exceptions"] discards["catch_exceptions"] += 1 settings_message = "" if discards["result_format"] > 0 and not suppress_warnings: settings_message += " result_format" if discards["include_config"] > 0 and not suppress_warnings: settings_message += " include_config" if discards["catch_exceptions"] > 0 and not suppress_warnings: settings_message += " catch_exceptions" if ( len(settings_message) > 1 ): # Only add this if we added one of the settings above. settings_message += " settings filtered." expectation_suite.expectations = expectations if not suppress_logging: logger.info(message + settings_message) return expectation_suite
[ "def", "get_expectation_suite", "(", "self", ",", "discard_failed_expectations", "=", "True", ",", "discard_result_format_kwargs", "=", "True", ",", "discard_include_config_kwargs", "=", "True", ",", "discard_catch_exceptions_kwargs", "=", "True", ",", "suppress_warnings", "=", "False", ",", "suppress_logging", "=", "False", ",", ")", ":", "expectation_suite", "=", "copy", ".", "deepcopy", "(", "self", ".", "_expectation_suite", ")", "expectations", "=", "expectation_suite", ".", "expectations", "discards", "=", "defaultdict", "(", "int", ")", "if", "discard_failed_expectations", ":", "new_expectations", "=", "[", "]", "for", "expectation", "in", "expectations", ":", "# Note: This is conservative logic.", "# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.", "# In cases where expectation.success is missing or None, expectations are *retained*.", "# Such a case could occur if expectations were loaded from a config file and never run.", "if", "expectation", ".", "success_on_last_run", "is", "False", ":", "discards", "[", "\"failed_expectations\"", "]", "+=", "1", "else", ":", "new_expectations", ".", "append", "(", "expectation", ")", "expectations", "=", "new_expectations", "message", "=", "\"\\t%d expectation(s) included in expectation_suite.\"", "%", "len", "(", "expectations", ")", "if", "discards", "[", "\"failed_expectations\"", "]", ">", "0", "and", "not", "suppress_warnings", ":", "message", "+=", "(", "\" Omitting %d expectation(s) that failed when last run; set \"", "\"discard_failed_expectations=False to include them.\"", "%", "discards", "[", "\"failed_expectations\"", "]", ")", "for", "expectation", "in", "expectations", ":", "# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation,", "# which calls _copy_and_clean_up_expectation", "expectation", ".", "success_on_last_run", "=", "None", "if", "discard_result_format_kwargs", ":", "if", "\"result_format\"", "in", "expectation", ".", "kwargs", ":", "del", "expectation", ".", "kwargs", "[", "\"result_format\"", "]", "discards", "[", "\"result_format\"", "]", "+=", "1", "if", "discard_include_config_kwargs", ":", "if", "\"include_config\"", "in", "expectation", ".", "kwargs", ":", "del", "expectation", ".", "kwargs", "[", "\"include_config\"", "]", "discards", "[", "\"include_config\"", "]", "+=", "1", "if", "discard_catch_exceptions_kwargs", ":", "if", "\"catch_exceptions\"", "in", "expectation", ".", "kwargs", ":", "del", "expectation", ".", "kwargs", "[", "\"catch_exceptions\"", "]", "discards", "[", "\"catch_exceptions\"", "]", "+=", "1", "settings_message", "=", "\"\"", "if", "discards", "[", "\"result_format\"", "]", ">", "0", "and", "not", "suppress_warnings", ":", "settings_message", "+=", "\" result_format\"", "if", "discards", "[", "\"include_config\"", "]", ">", "0", "and", "not", "suppress_warnings", ":", "settings_message", "+=", "\" include_config\"", "if", "discards", "[", "\"catch_exceptions\"", "]", ">", "0", "and", "not", "suppress_warnings", ":", "settings_message", "+=", "\" catch_exceptions\"", "if", "(", "len", "(", "settings_message", ")", ">", "1", ")", ":", "# Only add this if we added one of the settings above.", "settings_message", "+=", "\" settings filtered.\"", "expectation_suite", ".", "expectations", "=", "expectations", "if", "not", "suppress_logging", ":", "logger", ".", "info", "(", "message", "+", "settings_message", ")", "return", "expectation_suite" ]
[ 840, 4 ]
[ 943, 32 ]
python
en
['en', 'en', 'en']
True
Validator.save_expectation_suite
( self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, )
Writes ``_expectation_config`` to a JSON file. Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \ can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \ pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \ the JSON expectations config. Args: filepath (string): \ The location and name to write the JSON config file to. discard_failed_expectations (boolean): \ If True, excludes expectations that do not return ``success = True``. \ If False, all expectations are written to the JSON config file. discard_result_format_kwargs (boolean): \ If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \ file. discard_include_config_kwargs (boolean): \ If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \ file. discard_catch_exceptions_kwargs (boolean): \ If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \ config file. suppress_warnings (boolean): \ It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \ suppressed.
Writes ``_expectation_config`` to a JSON file.
def save_expectation_suite( self, filepath=None, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_config_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False, ): """Writes ``_expectation_config`` to a JSON file. Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \ can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \ pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \ the JSON expectations config. Args: filepath (string): \ The location and name to write the JSON config file to. discard_failed_expectations (boolean): \ If True, excludes expectations that do not return ``success = True``. \ If False, all expectations are written to the JSON config file. discard_result_format_kwargs (boolean): \ If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \ file. discard_include_config_kwargs (boolean): \ If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \ file. discard_catch_exceptions_kwargs (boolean): \ If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \ config file. suppress_warnings (boolean): \ It True, all warnings raised by Great Expectations, as a result of dropped expectations, are \ suppressed. """ expectation_suite = self.get_expectation_suite( discard_failed_expectations, discard_result_format_kwargs, discard_include_config_kwargs, discard_catch_exceptions_kwargs, suppress_warnings, ) if filepath is None and self._data_context is not None: self._data_context.save_expectation_suite(expectation_suite) elif filepath is not None: with open(filepath, "w") as outfile: json.dump( expectationSuiteSchema.dump(expectation_suite), outfile, indent=2, sort_keys=True, ) else: raise ValueError( "Unable to save config: filepath or data_context must be available." )
[ "def", "save_expectation_suite", "(", "self", ",", "filepath", "=", "None", ",", "discard_failed_expectations", "=", "True", ",", "discard_result_format_kwargs", "=", "True", ",", "discard_include_config_kwargs", "=", "True", ",", "discard_catch_exceptions_kwargs", "=", "True", ",", "suppress_warnings", "=", "False", ",", ")", ":", "expectation_suite", "=", "self", ".", "get_expectation_suite", "(", "discard_failed_expectations", ",", "discard_result_format_kwargs", ",", "discard_include_config_kwargs", ",", "discard_catch_exceptions_kwargs", ",", "suppress_warnings", ",", ")", "if", "filepath", "is", "None", "and", "self", ".", "_data_context", "is", "not", "None", ":", "self", ".", "_data_context", ".", "save_expectation_suite", "(", "expectation_suite", ")", "elif", "filepath", "is", "not", "None", ":", "with", "open", "(", "filepath", ",", "\"w\"", ")", "as", "outfile", ":", "json", ".", "dump", "(", "expectationSuiteSchema", ".", "dump", "(", "expectation_suite", ")", ",", "outfile", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ",", ")", "else", ":", "raise", "ValueError", "(", "\"Unable to save config: filepath or data_context must be available.\"", ")" ]
[ 945, 4 ]
[ 1001, 13 ]
python
en
['en', 'en', 'en']
True
Validator.validate
( self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None, )
Generates a JSON-formatted report describing the outcome of all expectations. Use the default expectation_suite=None to validate the expectations config associated with the DataAsset. Args: expectation_suite (json or None): \ If None, uses the expectations config generated with the DataAsset during the current session. \ If a JSON file, validates those expectations. run_name (str): \ Used to identify this validation result as part of a collection of validations. \ See DataContext for more information. data_context (DataContext): \ A datacontext object to use as part of validation for binding evaluation parameters and \ registering validation results. evaluation_parameters (dict or None): \ If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \ data_asset. If a dict, uses the evaluation parameters in the dictionary. catch_exceptions (boolean): \ If True, exceptions raised by tests will not end validation and will be described in the returned \ report. result_format (string or None): \ If None, uses the default value ('BASIC' or as specified). \ If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \ etc.). only_return_failures (boolean): \ If True, expectation results are only returned when ``success = False`` \ Returns: A JSON-formatted dictionary containing a list of the validation results. \ An example of the returned format:: { "results": [ { "unexpected_list": [unexpected_value_1, unexpected_value_2], "expectation_type": "expect_*", "kwargs": { "column": "Column_Name", "output_format": "SUMMARY" }, "success": true, "raised_exception: false. "exception_traceback": null }, { ... (Second expectation results) }, ... (More expectations results) ], "success": true, "statistics": { "evaluated_expectations": n, "successful_expectations": m, "unsuccessful_expectations": n - m, "success_percent": m / n } } Notes: If the configuration object was built with a different version of great expectations then the \ current environment. If no version was found in the configuration file. Raises: AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
Generates a JSON-formatted report describing the outcome of all expectations.
def validate( self, expectation_suite=None, run_id=None, data_context=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False, run_name=None, run_time=None, ): """Generates a JSON-formatted report describing the outcome of all expectations. Use the default expectation_suite=None to validate the expectations config associated with the DataAsset. Args: expectation_suite (json or None): \ If None, uses the expectations config generated with the DataAsset during the current session. \ If a JSON file, validates those expectations. run_name (str): \ Used to identify this validation result as part of a collection of validations. \ See DataContext for more information. data_context (DataContext): \ A datacontext object to use as part of validation for binding evaluation parameters and \ registering validation results. evaluation_parameters (dict or None): \ If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \ data_asset. If a dict, uses the evaluation parameters in the dictionary. catch_exceptions (boolean): \ If True, exceptions raised by tests will not end validation and will be described in the returned \ report. result_format (string or None): \ If None, uses the default value ('BASIC' or as specified). \ If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \ etc.). only_return_failures (boolean): \ If True, expectation results are only returned when ``success = False`` \ Returns: A JSON-formatted dictionary containing a list of the validation results. \ An example of the returned format:: { "results": [ { "unexpected_list": [unexpected_value_1, unexpected_value_2], "expectation_type": "expect_*", "kwargs": { "column": "Column_Name", "output_format": "SUMMARY" }, "success": true, "raised_exception: false. "exception_traceback": null }, { ... (Second expectation results) }, ... (More expectations results) ], "success": true, "statistics": { "evaluated_expectations": n, "successful_expectations": m, "unsuccessful_expectations": n - m, "success_percent": m / n } } Notes: If the configuration object was built with a different version of great expectations then the \ current environment. If no version was found in the configuration file. Raises: AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError """ try: validation_time = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%dT%H%M%S.%fZ" ) assert not (run_id and run_name) and not ( run_id and run_time ), "Please provide either a run_id or run_name and/or run_time." if isinstance(run_id, str) and not run_name: warnings.warn( "String run_ids will be deprecated in the future. Please provide a run_id of type " "RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name " "and run_time (both optional). Instead of providing a run_id, you may also provide" "run_name and run_time separately.", DeprecationWarning, ) try: run_time = parse(run_id) except (ValueError, TypeError): pass run_id = RunIdentifier(run_name=run_id, run_time=run_time) elif isinstance(run_id, dict): run_id = RunIdentifier(**run_id) elif not isinstance(run_id, RunIdentifier): run_id = RunIdentifier(run_name=run_name, run_time=run_time) self._active_validation = True if result_format is None: result_format = {"result_format": "BASIC"} # If a different validation data context was provided, override validate__data_context = self._data_context if data_context is None and self._data_context is not None: data_context = self._data_context elif data_context is not None: # temporarily set self._data_context so it is used inside the expectation decorator self._data_context = data_context if expectation_suite is None: expectation_suite = self.get_expectation_suite( discard_failed_expectations=False, discard_result_format_kwargs=False, discard_include_config_kwargs=False, discard_catch_exceptions_kwargs=False, ) elif isinstance(expectation_suite, str): try: with open(expectation_suite) as infile: expectation_suite = expectationSuiteSchema.loads(infile.read()) except ValidationError: raise except OSError: raise GreatExpectationsError( "Unable to load expectation suite: IO error while reading %s" % expectation_suite ) elif not isinstance(expectation_suite, ExpectationSuite): logger.error( "Unable to validate using the provided value for expectation suite; does it need to be " "loaded from a dictionary?" ) if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info( self ), success=False, ) return ExpectationValidationResult(success=False) # Evaluation parameter priority is # 1. from provided parameters # 2. from expectation configuration # 3. from data context # So, we load them in reverse order if data_context is not None: runtime_evaluation_parameters = ( data_context.evaluation_parameter_store.get_bind_params(run_id) ) else: runtime_evaluation_parameters = {} if expectation_suite.evaluation_parameters: runtime_evaluation_parameters.update( expectation_suite.evaluation_parameters ) if evaluation_parameters is not None: runtime_evaluation_parameters.update(evaluation_parameters) # Convert evaluation parameters to be json-serializable runtime_evaluation_parameters = recursively_convert_to_json_serializable( runtime_evaluation_parameters ) # Warn if our version is different from the version in the configuration # TODO: Deprecate "great_expectations.__version__" suite_ge_version = expectation_suite.meta.get( "great_expectations_version" ) or expectation_suite.meta.get("great_expectations.__version__") # Group expectations by column columns = {} for expectation in expectation_suite.expectations: expectation.process_evaluation_parameters( evaluation_parameters=runtime_evaluation_parameters, interactive_evaluation=self.interactive_evaluation, data_context=self._data_context, ) if "column" in expectation.kwargs and isinstance( expectation.kwargs["column"], Hashable ): column = expectation.kwargs["column"] else: column = "_nocolumn" if column not in columns: columns[column] = [] columns[column].append(expectation) expectations_to_evaluate = [] for col in columns: expectations_to_evaluate.extend(columns[col]) results = self.graph_validate( expectations_to_evaluate, runtime_configuration={ "catch_exceptions": catch_exceptions, "result_format": result_format, }, ) statistics = _calc_validation_statistics(results) if only_return_failures: abbrev_results = [] for exp in results: if not exp.success: abbrev_results.append(exp) results = abbrev_results expectation_suite_name = expectation_suite.expectation_suite_name result = ExpectationSuiteValidationResult( results=results, success=statistics.success, statistics={ "evaluated_expectations": statistics.evaluated_expectations, "successful_expectations": statistics.successful_expectations, "unsuccessful_expectations": statistics.unsuccessful_expectations, "success_percent": statistics.success_percent, }, evaluation_parameters=runtime_evaluation_parameters, meta={ "great_expectations_version": ge_version, "expectation_suite_name": expectation_suite_name, "run_id": run_id, "batch_spec": self.active_batch_spec, "batch_markers": self.active_batch_markers, "active_batch_definition": self.active_batch_definition, "validation_time": validation_time, }, ) self._data_context = validate__data_context except Exception as e: if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=False, ) raise finally: self._active_validation = False if getattr(data_context, "_usage_statistics_handler", None): handler = data_context._usage_statistics_handler handler.send_usage_message( event="data_asset.validate", event_payload=handler._batch_anonymizer.anonymize_batch_info(self), success=True, ) return result
[ "def", "validate", "(", "self", ",", "expectation_suite", "=", "None", ",", "run_id", "=", "None", ",", "data_context", "=", "None", ",", "evaluation_parameters", "=", "None", ",", "catch_exceptions", "=", "True", ",", "result_format", "=", "None", ",", "only_return_failures", "=", "False", ",", "run_name", "=", "None", ",", "run_time", "=", "None", ",", ")", ":", "try", ":", "validation_time", "=", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", ".", "strftime", "(", "\"%Y%m%dT%H%M%S.%fZ\"", ")", "assert", "not", "(", "run_id", "and", "run_name", ")", "and", "not", "(", "run_id", "and", "run_time", ")", ",", "\"Please provide either a run_id or run_name and/or run_time.\"", "if", "isinstance", "(", "run_id", ",", "str", ")", "and", "not", "run_name", ":", "warnings", ".", "warn", "(", "\"String run_ids will be deprecated in the future. Please provide a run_id of type \"", "\"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name \"", "\"and run_time (both optional). Instead of providing a run_id, you may also provide\"", "\"run_name and run_time separately.\"", ",", "DeprecationWarning", ",", ")", "try", ":", "run_time", "=", "parse", "(", "run_id", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "run_id", "=", "RunIdentifier", "(", "run_name", "=", "run_id", ",", "run_time", "=", "run_time", ")", "elif", "isinstance", "(", "run_id", ",", "dict", ")", ":", "run_id", "=", "RunIdentifier", "(", "*", "*", "run_id", ")", "elif", "not", "isinstance", "(", "run_id", ",", "RunIdentifier", ")", ":", "run_id", "=", "RunIdentifier", "(", "run_name", "=", "run_name", ",", "run_time", "=", "run_time", ")", "self", ".", "_active_validation", "=", "True", "if", "result_format", "is", "None", ":", "result_format", "=", "{", "\"result_format\"", ":", "\"BASIC\"", "}", "# If a different validation data context was provided, override", "validate__data_context", "=", "self", ".", "_data_context", "if", "data_context", "is", "None", "and", "self", ".", "_data_context", "is", "not", "None", ":", "data_context", "=", "self", ".", "_data_context", "elif", "data_context", "is", "not", "None", ":", "# temporarily set self._data_context so it is used inside the expectation decorator", "self", ".", "_data_context", "=", "data_context", "if", "expectation_suite", "is", "None", ":", "expectation_suite", "=", "self", ".", "get_expectation_suite", "(", "discard_failed_expectations", "=", "False", ",", "discard_result_format_kwargs", "=", "False", ",", "discard_include_config_kwargs", "=", "False", ",", "discard_catch_exceptions_kwargs", "=", "False", ",", ")", "elif", "isinstance", "(", "expectation_suite", ",", "str", ")", ":", "try", ":", "with", "open", "(", "expectation_suite", ")", "as", "infile", ":", "expectation_suite", "=", "expectationSuiteSchema", ".", "loads", "(", "infile", ".", "read", "(", ")", ")", "except", "ValidationError", ":", "raise", "except", "OSError", ":", "raise", "GreatExpectationsError", "(", "\"Unable to load expectation suite: IO error while reading %s\"", "%", "expectation_suite", ")", "elif", "not", "isinstance", "(", "expectation_suite", ",", "ExpectationSuite", ")", ":", "logger", ".", "error", "(", "\"Unable to validate using the provided value for expectation suite; does it need to be \"", "\"loaded from a dictionary?\"", ")", "if", "getattr", "(", "data_context", ",", "\"_usage_statistics_handler\"", ",", "None", ")", ":", "handler", "=", "data_context", ".", "_usage_statistics_handler", "handler", ".", "send_usage_message", "(", "event", "=", "\"data_asset.validate\"", ",", "event_payload", "=", "handler", ".", "_batch_anonymizer", ".", "anonymize_batch_info", "(", "self", ")", ",", "success", "=", "False", ",", ")", "return", "ExpectationValidationResult", "(", "success", "=", "False", ")", "# Evaluation parameter priority is", "# 1. from provided parameters", "# 2. from expectation configuration", "# 3. from data context", "# So, we load them in reverse order", "if", "data_context", "is", "not", "None", ":", "runtime_evaluation_parameters", "=", "(", "data_context", ".", "evaluation_parameter_store", ".", "get_bind_params", "(", "run_id", ")", ")", "else", ":", "runtime_evaluation_parameters", "=", "{", "}", "if", "expectation_suite", ".", "evaluation_parameters", ":", "runtime_evaluation_parameters", ".", "update", "(", "expectation_suite", ".", "evaluation_parameters", ")", "if", "evaluation_parameters", "is", "not", "None", ":", "runtime_evaluation_parameters", ".", "update", "(", "evaluation_parameters", ")", "# Convert evaluation parameters to be json-serializable", "runtime_evaluation_parameters", "=", "recursively_convert_to_json_serializable", "(", "runtime_evaluation_parameters", ")", "# Warn if our version is different from the version in the configuration", "# TODO: Deprecate \"great_expectations.__version__\"", "suite_ge_version", "=", "expectation_suite", ".", "meta", ".", "get", "(", "\"great_expectations_version\"", ")", "or", "expectation_suite", ".", "meta", ".", "get", "(", "\"great_expectations.__version__\"", ")", "# Group expectations by column", "columns", "=", "{", "}", "for", "expectation", "in", "expectation_suite", ".", "expectations", ":", "expectation", ".", "process_evaluation_parameters", "(", "evaluation_parameters", "=", "runtime_evaluation_parameters", ",", "interactive_evaluation", "=", "self", ".", "interactive_evaluation", ",", "data_context", "=", "self", ".", "_data_context", ",", ")", "if", "\"column\"", "in", "expectation", ".", "kwargs", "and", "isinstance", "(", "expectation", ".", "kwargs", "[", "\"column\"", "]", ",", "Hashable", ")", ":", "column", "=", "expectation", ".", "kwargs", "[", "\"column\"", "]", "else", ":", "column", "=", "\"_nocolumn\"", "if", "column", "not", "in", "columns", ":", "columns", "[", "column", "]", "=", "[", "]", "columns", "[", "column", "]", ".", "append", "(", "expectation", ")", "expectations_to_evaluate", "=", "[", "]", "for", "col", "in", "columns", ":", "expectations_to_evaluate", ".", "extend", "(", "columns", "[", "col", "]", ")", "results", "=", "self", ".", "graph_validate", "(", "expectations_to_evaluate", ",", "runtime_configuration", "=", "{", "\"catch_exceptions\"", ":", "catch_exceptions", ",", "\"result_format\"", ":", "result_format", ",", "}", ",", ")", "statistics", "=", "_calc_validation_statistics", "(", "results", ")", "if", "only_return_failures", ":", "abbrev_results", "=", "[", "]", "for", "exp", "in", "results", ":", "if", "not", "exp", ".", "success", ":", "abbrev_results", ".", "append", "(", "exp", ")", "results", "=", "abbrev_results", "expectation_suite_name", "=", "expectation_suite", ".", "expectation_suite_name", "result", "=", "ExpectationSuiteValidationResult", "(", "results", "=", "results", ",", "success", "=", "statistics", ".", "success", ",", "statistics", "=", "{", "\"evaluated_expectations\"", ":", "statistics", ".", "evaluated_expectations", ",", "\"successful_expectations\"", ":", "statistics", ".", "successful_expectations", ",", "\"unsuccessful_expectations\"", ":", "statistics", ".", "unsuccessful_expectations", ",", "\"success_percent\"", ":", "statistics", ".", "success_percent", ",", "}", ",", "evaluation_parameters", "=", "runtime_evaluation_parameters", ",", "meta", "=", "{", "\"great_expectations_version\"", ":", "ge_version", ",", "\"expectation_suite_name\"", ":", "expectation_suite_name", ",", "\"run_id\"", ":", "run_id", ",", "\"batch_spec\"", ":", "self", ".", "active_batch_spec", ",", "\"batch_markers\"", ":", "self", ".", "active_batch_markers", ",", "\"active_batch_definition\"", ":", "self", ".", "active_batch_definition", ",", "\"validation_time\"", ":", "validation_time", ",", "}", ",", ")", "self", ".", "_data_context", "=", "validate__data_context", "except", "Exception", "as", "e", ":", "if", "getattr", "(", "data_context", ",", "\"_usage_statistics_handler\"", ",", "None", ")", ":", "handler", "=", "data_context", ".", "_usage_statistics_handler", "handler", ".", "send_usage_message", "(", "event", "=", "\"data_asset.validate\"", ",", "event_payload", "=", "handler", ".", "_batch_anonymizer", ".", "anonymize_batch_info", "(", "self", ")", ",", "success", "=", "False", ",", ")", "raise", "finally", ":", "self", ".", "_active_validation", "=", "False", "if", "getattr", "(", "data_context", ",", "\"_usage_statistics_handler\"", ",", "None", ")", ":", "handler", "=", "data_context", ".", "_usage_statistics_handler", "handler", ".", "send_usage_message", "(", "event", "=", "\"data_asset.validate\"", ",", "event_payload", "=", "handler", ".", "_batch_anonymizer", ".", "anonymize_batch_info", "(", "self", ")", ",", "success", "=", "True", ",", ")", "return", "result" ]
[ 1003, 4 ]
[ 1263, 21 ]
python
en
['en', 'en', 'en']
True
Validator.get_evaluation_parameter
(self, parameter_name, default_value=None)
Get an evaluation parameter value that has been stored in meta. Args: parameter_name (string): The name of the parameter to store. default_value (any): The default value to be returned if the parameter is not found. Returns: The current value of the evaluation parameter.
Get an evaluation parameter value that has been stored in meta.
def get_evaluation_parameter(self, parameter_name, default_value=None): """ Get an evaluation parameter value that has been stored in meta. Args: parameter_name (string): The name of the parameter to store. default_value (any): The default value to be returned if the parameter is not found. Returns: The current value of the evaluation parameter. """ if parameter_name in self._expectation_suite.evaluation_parameters: return self._expectation_suite.evaluation_parameters[parameter_name] else: return default_value
[ "def", "get_evaluation_parameter", "(", "self", ",", "parameter_name", ",", "default_value", "=", "None", ")", ":", "if", "parameter_name", "in", "self", ".", "_expectation_suite", ".", "evaluation_parameters", ":", "return", "self", ".", "_expectation_suite", ".", "evaluation_parameters", "[", "parameter_name", "]", "else", ":", "return", "default_value" ]
[ 1265, 4 ]
[ 1279, 32 ]
python
en
['en', 'error', 'th']
False
Validator.set_evaluation_parameter
(self, parameter_name, parameter_value)
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations. Args: parameter_name (string): The name of the kwarg to be replaced at evaluation time parameter_value (any): The value to be used
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations.
def set_evaluation_parameter(self, parameter_name, parameter_value): """ Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations. Args: parameter_name (string): The name of the kwarg to be replaced at evaluation time parameter_value (any): The value to be used """ self._expectation_suite.evaluation_parameters.update( {parameter_name: parameter_value} )
[ "def", "set_evaluation_parameter", "(", "self", ",", "parameter_name", ",", "parameter_value", ")", ":", "self", ".", "_expectation_suite", ".", "evaluation_parameters", ".", "update", "(", "{", "parameter_name", ":", "parameter_value", "}", ")" ]
[ 1281, 4 ]
[ 1292, 9 ]
python
en
['en', 'error', 'th']
False
Validator.add_citation
( self, comment, batch_spec=None, batch_markers=None, batch_definition=None, citation_date=None, )
Adds a citation to an existing Expectation Suite within the validator
Adds a citation to an existing Expectation Suite within the validator
def add_citation( self, comment, batch_spec=None, batch_markers=None, batch_definition=None, citation_date=None, ): """Adds a citation to an existing Expectation Suite within the validator""" if batch_spec is None: batch_spec = self.batch_spec if batch_markers is None: batch_markers = self.active_batch_markers if batch_definition is None: batch_definition = self.active_batch_definition self._expectation_suite.add_citation( comment, batch_spec=batch_spec, batch_markers=batch_markers, batch_definition=batch_definition, citation_date=citation_date, )
[ "def", "add_citation", "(", "self", ",", "comment", ",", "batch_spec", "=", "None", ",", "batch_markers", "=", "None", ",", "batch_definition", "=", "None", ",", "citation_date", "=", "None", ",", ")", ":", "if", "batch_spec", "is", "None", ":", "batch_spec", "=", "self", ".", "batch_spec", "if", "batch_markers", "is", "None", ":", "batch_markers", "=", "self", ".", "active_batch_markers", "if", "batch_definition", "is", "None", ":", "batch_definition", "=", "self", ".", "active_batch_definition", "self", ".", "_expectation_suite", ".", "add_citation", "(", "comment", ",", "batch_spec", "=", "batch_spec", ",", "batch_markers", "=", "batch_markers", ",", "batch_definition", "=", "batch_definition", ",", "citation_date", "=", "citation_date", ",", ")" ]
[ 1294, 4 ]
[ 1315, 9 ]
python
en
['en', 'en', 'en']
True
Validator.expectation_suite_name
(self)
Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.
Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.
def expectation_suite_name(self): """Gets the current expectation_suite name of this data_asset as stored in the expectations configuration.""" return self._expectation_suite.expectation_suite_name
[ "def", "expectation_suite_name", "(", "self", ")", ":", "return", "self", ".", "_expectation_suite", ".", "expectation_suite_name" ]
[ 1318, 4 ]
[ 1320, 61 ]
python
en
['en', 'en', 'en']
True
Validator.expectation_suite_name
(self, expectation_suite_name)
Sets the expectation_suite name of this data_asset as stored in the expectations configuration.
Sets the expectation_suite name of this data_asset as stored in the expectations configuration.
def expectation_suite_name(self, expectation_suite_name): """Sets the expectation_suite name of this data_asset as stored in the expectations configuration.""" self._expectation_suite.expectation_suite_name = expectation_suite_name
[ "def", "expectation_suite_name", "(", "self", ",", "expectation_suite_name", ")", ":", "self", ".", "_expectation_suite", ".", "expectation_suite_name", "=", "expectation_suite_name" ]
[ 1323, 4 ]
[ 1325, 79 ]
python
en
['en', 'en', 'en']
True
Validator.test_expectation_function
(self, function, *args, **kwargs)
Test a generic expectation function Args: function (func): The function to be tested. (Must be a valid expectation function.) *args : Positional arguments to be passed the the function **kwargs : Keyword arguments to be passed the the function Returns: A JSON-serializable expectation result object. Notes: This function is a thin layer to allow quick testing of new expectation functions, without having to \ define custom classes, etc. To use developed expectations from the command-line tool, you will still need \ to define custom classes, etc. Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
Test a generic expectation function
def test_expectation_function(self, function, *args, **kwargs): """Test a generic expectation function Args: function (func): The function to be tested. (Must be a valid expectation function.) *args : Positional arguments to be passed the the function **kwargs : Keyword arguments to be passed the the function Returns: A JSON-serializable expectation result object. Notes: This function is a thin layer to allow quick testing of new expectation functions, without having to \ define custom classes, etc. To use developed expectations from the command-line tool, you will still need \ to define custom classes, etc. Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information. """ argspec = inspect.getfullargspec(function)[0][1:] new_function = self.expectation(argspec)(function) return new_function(self, *args, **kwargs)
[ "def", "test_expectation_function", "(", "self", ",", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "function", ")", "[", "0", "]", "[", "1", ":", "]", "new_function", "=", "self", ".", "expectation", "(", "argspec", ")", "(", "function", ")", "return", "new_function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
[ 1327, 4 ]
[ 1349, 50 ]
python
en
['ro', 'en', 'en']
True
BridgeValidator.__init__
(self, batch, expectation_suite, expectation_engine=None, **kwargs)
Builds an expectation_engine object using an expectation suite and a batch, with the expectation engine being determined either by the user or by the type of batch data (pandas dataframe, SqlAlchemy table, etc.) Args: batch (Batch): A Batch in Pandas, Spark, or SQL format expectation_suite (ExpectationSuite): The Expectation Suite available to the validator within the current Data Context expectation_engine (ExecutionEngine): The current Execution Engine being utilized. If this is not set, it is determined by the type of data within the given batch
Builds an expectation_engine object using an expectation suite and a batch, with the expectation engine being determined either by the user or by the type of batch data (pandas dataframe, SqlAlchemy table, etc.)
def __init__(self, batch, expectation_suite, expectation_engine=None, **kwargs): """Builds an expectation_engine object using an expectation suite and a batch, with the expectation engine being determined either by the user or by the type of batch data (pandas dataframe, SqlAlchemy table, etc.) Args: batch (Batch): A Batch in Pandas, Spark, or SQL format expectation_suite (ExpectationSuite): The Expectation Suite available to the validator within the current Data Context expectation_engine (ExecutionEngine): The current Execution Engine being utilized. If this is not set, it is determined by the type of data within the given batch """ self.batch = batch self.expectation_suite = expectation_suite if isinstance(expectation_engine, dict): expectation_engine = ClassConfig(**expectation_engine) if isinstance(expectation_engine, ClassConfig): module_name = expectation_engine.module_name or "great_expectations.dataset" verify_dynamic_loading_support(module_name=module_name) expectation_engine = load_class( class_name=expectation_engine.class_name, module_name=module_name ) self.expectation_engine = expectation_engine if self.expectation_engine is None: # Guess the engine try: import pandas as pd if isinstance(batch.data, pd.DataFrame): self.expectation_engine = PandasDataset except ImportError: pass if self.expectation_engine is None: if isinstance(batch.data, SqlAlchemyBatchReference): self.expectation_engine = SqlAlchemyDataset if self.expectation_engine is None: try: import pyspark if isinstance(batch.data, pyspark.sql.DataFrame): self.expectation_engine = SparkDFDataset except ImportError: pass if self.expectation_engine is None: raise ValueError( "Unable to identify expectation_engine. It must be a subclass of DataAsset." ) self.init_kwargs = kwargs
[ "def", "__init__", "(", "self", ",", "batch", ",", "expectation_suite", ",", "expectation_engine", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "batch", "=", "batch", "self", ".", "expectation_suite", "=", "expectation_suite", "if", "isinstance", "(", "expectation_engine", ",", "dict", ")", ":", "expectation_engine", "=", "ClassConfig", "(", "*", "*", "expectation_engine", ")", "if", "isinstance", "(", "expectation_engine", ",", "ClassConfig", ")", ":", "module_name", "=", "expectation_engine", ".", "module_name", "or", "\"great_expectations.dataset\"", "verify_dynamic_loading_support", "(", "module_name", "=", "module_name", ")", "expectation_engine", "=", "load_class", "(", "class_name", "=", "expectation_engine", ".", "class_name", ",", "module_name", "=", "module_name", ")", "self", ".", "expectation_engine", "=", "expectation_engine", "if", "self", ".", "expectation_engine", "is", "None", ":", "# Guess the engine", "try", ":", "import", "pandas", "as", "pd", "if", "isinstance", "(", "batch", ".", "data", ",", "pd", ".", "DataFrame", ")", ":", "self", ".", "expectation_engine", "=", "PandasDataset", "except", "ImportError", ":", "pass", "if", "self", ".", "expectation_engine", "is", "None", ":", "if", "isinstance", "(", "batch", ".", "data", ",", "SqlAlchemyBatchReference", ")", ":", "self", ".", "expectation_engine", "=", "SqlAlchemyDataset", "if", "self", ".", "expectation_engine", "is", "None", ":", "try", ":", "import", "pyspark", "if", "isinstance", "(", "batch", ".", "data", ",", "pyspark", ".", "sql", ".", "DataFrame", ")", ":", "self", ".", "expectation_engine", "=", "SparkDFDataset", "except", "ImportError", ":", "pass", "if", "self", ".", "expectation_engine", "is", "None", ":", "raise", "ValueError", "(", "\"Unable to identify expectation_engine. It must be a subclass of DataAsset.\"", ")", "self", ".", "init_kwargs", "=", "kwargs" ]
[ 1444, 4 ]
[ 1496, 33 ]
python
en
['en', 'en', 'en']
True
BridgeValidator.get_dataset
(self)
Bridges between Execution Engines in providing access to the batch data. Validates that Dataset classes contain proper type of data (i.e. a Pandas Dataset does not contain SqlAlchemy data)
Bridges between Execution Engines in providing access to the batch data. Validates that Dataset classes contain proper type of data (i.e. a Pandas Dataset does not contain SqlAlchemy data)
def get_dataset(self): """ Bridges between Execution Engines in providing access to the batch data. Validates that Dataset classes contain proper type of data (i.e. a Pandas Dataset does not contain SqlAlchemy data) """ if issubclass(self.expectation_engine, PandasDataset): import pandas as pd if not isinstance(self.batch["data"], pd.DataFrame): raise ValueError( "PandasDataset expectation_engine requires a Pandas Dataframe for its batch" ) return self.expectation_engine( self.batch.data, expectation_suite=self.expectation_suite, batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, **self.init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), ) elif issubclass(self.expectation_engine, SqlAlchemyDataset): if not isinstance(self.batch.data, SqlAlchemyBatchReference): raise ValueError( "SqlAlchemyDataset expectation_engine requires a SqlAlchemyBatchReference for its batch" ) init_kwargs = self.batch.data.get_init_kwargs() init_kwargs.update(self.init_kwargs) return self.expectation_engine( batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, expectation_suite=self.expectation_suite, **init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), ) elif issubclass(self.expectation_engine, SparkDFDataset): import pyspark if not isinstance(self.batch.data, pyspark.sql.DataFrame): raise ValueError( "SparkDFDataset expectation_engine requires a spark DataFrame for its batch" ) return self.expectation_engine( spark_df=self.batch.data, expectation_suite=self.expectation_suite, batch_kwargs=self.batch.batch_kwargs, batch_parameters=self.batch.batch_parameters, batch_markers=self.batch.batch_markers, data_context=self.batch.data_context, **self.init_kwargs, **self.batch.batch_kwargs.get("dataset_options", {}), )
[ "def", "get_dataset", "(", "self", ")", ":", "if", "issubclass", "(", "self", ".", "expectation_engine", ",", "PandasDataset", ")", ":", "import", "pandas", "as", "pd", "if", "not", "isinstance", "(", "self", ".", "batch", "[", "\"data\"", "]", ",", "pd", ".", "DataFrame", ")", ":", "raise", "ValueError", "(", "\"PandasDataset expectation_engine requires a Pandas Dataframe for its batch\"", ")", "return", "self", ".", "expectation_engine", "(", "self", ".", "batch", ".", "data", ",", "expectation_suite", "=", "self", ".", "expectation_suite", ",", "batch_kwargs", "=", "self", ".", "batch", ".", "batch_kwargs", ",", "batch_parameters", "=", "self", ".", "batch", ".", "batch_parameters", ",", "batch_markers", "=", "self", ".", "batch", ".", "batch_markers", ",", "data_context", "=", "self", ".", "batch", ".", "data_context", ",", "*", "*", "self", ".", "init_kwargs", ",", "*", "*", "self", ".", "batch", ".", "batch_kwargs", ".", "get", "(", "\"dataset_options\"", ",", "{", "}", ")", ",", ")", "elif", "issubclass", "(", "self", ".", "expectation_engine", ",", "SqlAlchemyDataset", ")", ":", "if", "not", "isinstance", "(", "self", ".", "batch", ".", "data", ",", "SqlAlchemyBatchReference", ")", ":", "raise", "ValueError", "(", "\"SqlAlchemyDataset expectation_engine requires a SqlAlchemyBatchReference for its batch\"", ")", "init_kwargs", "=", "self", ".", "batch", ".", "data", ".", "get_init_kwargs", "(", ")", "init_kwargs", ".", "update", "(", "self", ".", "init_kwargs", ")", "return", "self", ".", "expectation_engine", "(", "batch_kwargs", "=", "self", ".", "batch", ".", "batch_kwargs", ",", "batch_parameters", "=", "self", ".", "batch", ".", "batch_parameters", ",", "batch_markers", "=", "self", ".", "batch", ".", "batch_markers", ",", "data_context", "=", "self", ".", "batch", ".", "data_context", ",", "expectation_suite", "=", "self", ".", "expectation_suite", ",", "*", "*", "init_kwargs", ",", "*", "*", "self", ".", "batch", ".", "batch_kwargs", ".", "get", "(", "\"dataset_options\"", ",", "{", "}", ")", ",", ")", "elif", "issubclass", "(", "self", ".", "expectation_engine", ",", "SparkDFDataset", ")", ":", "import", "pyspark", "if", "not", "isinstance", "(", "self", ".", "batch", ".", "data", ",", "pyspark", ".", "sql", ".", "DataFrame", ")", ":", "raise", "ValueError", "(", "\"SparkDFDataset expectation_engine requires a spark DataFrame for its batch\"", ")", "return", "self", ".", "expectation_engine", "(", "spark_df", "=", "self", ".", "batch", ".", "data", ",", "expectation_suite", "=", "self", ".", "expectation_suite", ",", "batch_kwargs", "=", "self", ".", "batch", ".", "batch_kwargs", ",", "batch_parameters", "=", "self", ".", "batch", ".", "batch_parameters", ",", "batch_markers", "=", "self", ".", "batch", ".", "batch_markers", ",", "data_context", "=", "self", ".", "batch", ".", "data_context", ",", "*", "*", "self", ".", "init_kwargs", ",", "*", "*", "self", ".", "batch", ".", "batch_kwargs", ".", "get", "(", "\"dataset_options\"", ",", "{", "}", ")", ",", ")" ]
[ 1498, 4 ]
[ 1557, 13 ]
python
en
['en', 'error', 'th']
False
project_optional_config_comment
()
Default value for PROJECT_OPTIONAL_CONFIG_COMMENT
Default value for PROJECT_OPTIONAL_CONFIG_COMMENT
def project_optional_config_comment(): """ Default value for PROJECT_OPTIONAL_CONFIG_COMMENT """ PROJECT_OPTIONAL_CONFIG_COMMENT = ( templates.CONFIG_VARIABLES_INTRO + """ config_variables_file_path: uncommitted/config_variables.yml # The plugins_directory will be added to your python path for custom modules # used to override and extend Great Expectations. plugins_directory: plugins/ stores: # Stores are configurable places to store things like Expectations, Validations # Data Docs, and more. These are for advanced users only - most users can simply # leave this section alone. # # Three stores are required: expectations, validations, and # evaluation_parameters, and must exist with a valid store entry. Additional # stores can be configured for uses such as data_docs, etc. expectations_store: class_name: ExpectationsStore store_backend: class_name: TupleFilesystemStoreBackend base_directory: expectations/ validations_store: class_name: ValidationsStore store_backend: class_name: TupleFilesystemStoreBackend base_directory: uncommitted/validations/ evaluation_parameter_store: # Evaluation Parameters enable dynamic expectations. Read more here: # https://docs.greatexpectations.io/en/latest/reference/core_concepts/evaluation_parameters.html class_name: EvaluationParameterStore checkpoint_store: class_name: CheckpointStore store_backend: class_name: TupleFilesystemStoreBackend suppress_store_backend_id: true base_directory: checkpoints/ expectations_store_name: expectations_store validations_store_name: validations_store evaluation_parameter_store_name: evaluation_parameter_store checkpoint_store_name: checkpoint_store data_docs_sites: # Data Docs make it simple to visualize data quality in your project. These # include Expectations, Validations & Profiles. The are built for all # Datasources from JSON artifacts in the local repo including validations & # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/latest/reference/core_concepts/data_docs.html local_site: class_name: SiteBuilder # set to false to hide how-to buttons in Data Docs show_how_to_buttons: true store_backend: class_name: TupleFilesystemStoreBackend base_directory: uncommitted/data_docs/local_site/ site_index_builder: class_name: DefaultSiteIndexBuilder """ ) return PROJECT_OPTIONAL_CONFIG_COMMENT
[ "def", "project_optional_config_comment", "(", ")", ":", "PROJECT_OPTIONAL_CONFIG_COMMENT", "=", "(", "templates", ".", "CONFIG_VARIABLES_INTRO", "+", "\"\"\"\nconfig_variables_file_path: uncommitted/config_variables.yml\n\n# The plugins_directory will be added to your python path for custom modules\n# used to override and extend Great Expectations.\nplugins_directory: plugins/\n\nstores:\n# Stores are configurable places to store things like Expectations, Validations\n# Data Docs, and more. These are for advanced users only - most users can simply\n# leave this section alone.\n#\n# Three stores are required: expectations, validations, and\n# evaluation_parameters, and must exist with a valid store entry. Additional\n# stores can be configured for uses such as data_docs, etc.\n expectations_store:\n class_name: ExpectationsStore\n store_backend:\n class_name: TupleFilesystemStoreBackend\n base_directory: expectations/\n\n validations_store:\n class_name: ValidationsStore\n store_backend:\n class_name: TupleFilesystemStoreBackend\n base_directory: uncommitted/validations/\n\n evaluation_parameter_store:\n # Evaluation Parameters enable dynamic expectations. Read more here:\n # https://docs.greatexpectations.io/en/latest/reference/core_concepts/evaluation_parameters.html\n class_name: EvaluationParameterStore\n\n checkpoint_store:\n class_name: CheckpointStore\n store_backend:\n class_name: TupleFilesystemStoreBackend\n suppress_store_backend_id: true\n base_directory: checkpoints/\n\nexpectations_store_name: expectations_store\nvalidations_store_name: validations_store\nevaluation_parameter_store_name: evaluation_parameter_store\ncheckpoint_store_name: checkpoint_store\n\ndata_docs_sites:\n # Data Docs make it simple to visualize data quality in your project. These\n # include Expectations, Validations & Profiles. The are built for all\n # Datasources from JSON artifacts in the local repo including validations &\n # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/latest/reference/core_concepts/data_docs.html\n local_site:\n class_name: SiteBuilder\n # set to false to hide how-to buttons in Data Docs\n show_how_to_buttons: true\n store_backend:\n class_name: TupleFilesystemStoreBackend\n base_directory: uncommitted/data_docs/local_site/\n site_index_builder:\n class_name: DefaultSiteIndexBuilder\n\"\"\"", ")", "return", "PROJECT_OPTIONAL_CONFIG_COMMENT" ]
[ 6, 0 ]
[ 72, 42 ]
python
en
['en', 'error', 'th']
False
test_project_optional_config_comment_matches_default
( project_optional_config_comment, )
What does this test and why? Make sure that the templates built on data_context.types.base.DataContextConfigDefaults match the desired default.
What does this test and why? Make sure that the templates built on data_context.types.base.DataContextConfigDefaults match the desired default.
def test_project_optional_config_comment_matches_default( project_optional_config_comment, ): """ What does this test and why? Make sure that the templates built on data_context.types.base.DataContextConfigDefaults match the desired default. """ assert templates.PROJECT_OPTIONAL_CONFIG_COMMENT == project_optional_config_comment
[ "def", "test_project_optional_config_comment_matches_default", "(", "project_optional_config_comment", ",", ")", ":", "assert", "templates", ".", "PROJECT_OPTIONAL_CONFIG_COMMENT", "==", "project_optional_config_comment" ]
[ 98, 0 ]
[ 106, 87 ]
python
en
['en', 'error', 'th']
False
test_project_help_comment_matches_default
(project_help_comment)
What does this test and why? Make sure that the templates built on data_context.types.base.DataContextConfigDefaults match the desired default.
What does this test and why? Make sure that the templates built on data_context.types.base.DataContextConfigDefaults match the desired default.
def test_project_help_comment_matches_default(project_help_comment): """ What does this test and why? Make sure that the templates built on data_context.types.base.DataContextConfigDefaults match the desired default. """ assert templates.PROJECT_HELP_COMMENT == project_help_comment
[ "def", "test_project_help_comment_matches_default", "(", "project_help_comment", ")", ":", "assert", "templates", ".", "PROJECT_HELP_COMMENT", "==", "project_help_comment" ]
[ 109, 0 ]
[ 115, 65 ]
python
en
['en', 'error', 'th']
False
lex
(code, lexer)
Lex ``code`` with ``lexer`` and return an iterable of tokens.
Lex ``code`` with ``lexer`` and return an iterable of tokens.
def lex(code, lexer): """ Lex ``code`` with ``lexer`` and return an iterable of tokens. """ try: return lexer.get_tokens(code) except TypeError as err: if isinstance(err.args[0], str) and \ ('unbound method get_tokens' in err.args[0] or 'missing 1 required positional argument' in err.args[0]): raise TypeError('lex() argument must be a lexer instance, ' 'not a class') raise
[ "def", "lex", "(", "code", ",", "lexer", ")", ":", "try", ":", "return", "lexer", ".", "get_tokens", "(", "code", ")", "except", "TypeError", "as", "err", ":", "if", "isinstance", "(", "err", ".", "args", "[", "0", "]", ",", "str", ")", "and", "(", "'unbound method get_tokens'", "in", "err", ".", "args", "[", "0", "]", "or", "'missing 1 required positional argument'", "in", "err", ".", "args", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "'lex() argument must be a lexer instance, '", "'not a class'", ")", "raise" ]
[ 39, 0 ]
[ 51, 13 ]
python
en
['en', 'error', 'th']
False
format
(tokens, formatter, outfile=None)
Format a tokenlist ``tokens`` with the formatter ``formatter``. If ``outfile`` is given and a valid file object (an object with a ``write`` method), the result will be written to it, otherwise it is returned as a string.
Format a tokenlist ``tokens`` with the formatter ``formatter``.
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin """ Format a tokenlist ``tokens`` with the formatter ``formatter``. If ``outfile`` is given and a valid file object (an object with a ``write`` method), the result will be written to it, otherwise it is returned as a string. """ try: if not outfile: realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() formatter.format(tokens, realoutfile) return realoutfile.getvalue() else: formatter.format(tokens, outfile) except TypeError as err: if isinstance(err.args[0], str) and \ ('unbound method format' in err.args[0] or 'missing 1 required positional argument' in err.args[0]): raise TypeError('format() argument must be a formatter instance, ' 'not a class') raise
[ "def", "format", "(", "tokens", ",", "formatter", ",", "outfile", "=", "None", ")", ":", "# pylint: disable=redefined-builtin", "try", ":", "if", "not", "outfile", ":", "realoutfile", "=", "getattr", "(", "formatter", ",", "'encoding'", ",", "None", ")", "and", "BytesIO", "(", ")", "or", "StringIO", "(", ")", "formatter", ".", "format", "(", "tokens", ",", "realoutfile", ")", "return", "realoutfile", ".", "getvalue", "(", ")", "else", ":", "formatter", ".", "format", "(", "tokens", ",", "outfile", ")", "except", "TypeError", "as", "err", ":", "if", "isinstance", "(", "err", ".", "args", "[", "0", "]", ",", "str", ")", "and", "(", "'unbound method format'", "in", "err", ".", "args", "[", "0", "]", "or", "'missing 1 required positional argument'", "in", "err", ".", "args", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "'format() argument must be a formatter instance, '", "'not a class'", ")", "raise" ]
[ 54, 0 ]
[ 75, 13 ]
python
en
['en', 'error', 'th']
False
highlight
(code, lexer, formatter, outfile=None)
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``. If ``outfile`` is given and a valid file object (an object with a ``write`` method), the result will be written to it, otherwise it is returned as a string.
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
def highlight(code, lexer, formatter, outfile=None): """ Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``. If ``outfile`` is given and a valid file object (an object with a ``write`` method), the result will be written to it, otherwise it is returned as a string. """ return format(lex(code, lexer), formatter, outfile)
[ "def", "highlight", "(", "code", ",", "lexer", ",", "formatter", ",", "outfile", "=", "None", ")", ":", "return", "format", "(", "lex", "(", "code", ",", "lexer", ")", ",", "formatter", ",", "outfile", ")" ]
[ 78, 0 ]
[ 86, 55 ]
python
en
['en', 'error', 'th']
False
location
(mod_name)
Return the file and directory that the code for *mod_name* is in.
Return the file and directory that the code for *mod_name* is in.
def location(mod_name): """ Return the file and directory that the code for *mod_name* is in. """ source = mod_name.endswith("pyc") and mod_name[:-1] or mod_name source = os.path.abspath(source) return source, os.path.dirname(source)
[ "def", "location", "(", "mod_name", ")", ":", "source", "=", "mod_name", ".", "endswith", "(", "\"pyc\"", ")", "and", "mod_name", "[", ":", "-", "1", "]", "or", "mod_name", "source", "=", "os", ".", "path", ".", "abspath", "(", "source", ")", "return", "source", ",", "os", ".", "path", ".", "dirname", "(", "source", ")" ]
[ 10, 0 ]
[ 16, 42 ]
python
en
['en', 'error', 'th']
False
load_mpi_test
(file_path, seq, baseline_normalize)
Usage: Load a section once :param dataset_root: root path :param section: There are six sequences in this (seq=0,1,2,3,4,5). And 2935 poses in a unique set(seq==7). If you want to evaluate by scene setting, you can use the sequencewise evaluation to convert to these numbers by doing #1:Studio with Green Screen (TS1*603 + TS2 *540)/ (603+540) #2:Studio without Green Screen (TS3*505+TS4*553)/(505+553) #3:Outdoor (TS5*276+TS6*452)/(276+452) :return: Normalized 2d/3d pose, normalization params and camera intrinics. All types: List
Usage: Load a section once :param dataset_root: root path :param section: There are six sequences in this (seq=0,1,2,3,4,5). And 2935 poses in a unique set(seq==7). If you want to evaluate by scene setting, you can use the sequencewise evaluation to convert to these numbers by doing #1:Studio with Green Screen (TS1*603 + TS2 *540)/ (603+540) #2:Studio without Green Screen (TS3*505+TS4*553)/(505+553) #3:Outdoor (TS5*276+TS6*452)/(276+452) :return: Normalized 2d/3d pose, normalization params and camera intrinics. All types: List
def load_mpi_test(file_path, seq, baseline_normalize): """ Usage: Load a section once :param dataset_root: root path :param section: There are six sequences in this (seq=0,1,2,3,4,5). And 2935 poses in a unique set(seq==7). If you want to evaluate by scene setting, you can use the sequencewise evaluation to convert to these numbers by doing #1:Studio with Green Screen (TS1*603 + TS2 *540)/ (603+540) #2:Studio without Green Screen (TS3*505+TS4*553)/(505+553) #3:Outdoor (TS5*276+TS6*452)/(276+452) :return: Normalized 2d/3d pose, normalization params and camera intrinics. All types: List """ info = np.load(file_path, allow_pickle=True) if seq in range(0,6): pose_3d = info['pose3d_univ'][seq] pose_2d = info['pose2d'][seq] if seq in [0, 1, 2, 3]: img_w, img_h = 2048, 2048 cam_intri = np.array([1500.0686135995716, 1500.6590966853348, 1017.3794860438494, 1043.062824876024, 1,1,1,1,1]) elif seq in [4, 5]: img_w, img_h = 1920, 1080 cam_intri = np.array([1683.482559482185, 1671.927242063379, 939.9278168524228, 560.2072491988034, 1,1,1,1,1]) elif seq == 7: pose_3d = info['pose3d_univ'][0] pose_2d = info['pose2d'][0] img_w, img_h = 2048, 2048 cam_intri = np.array([1504.1479043534127, 1556.86936732066, 991.7469587022122, 872.994958045596, 1, 1, 1, 1, 1]) params = {} if baseline_normalize: # Remove global offset, but keep trajectory in first position pose_3d[:, 1:] -= pose_3d[:, :1] normed_pose_3d = pose_3d/1000 normed_pose_2d = normalize_screen_coordinates(pose_2d[..., :2], w=img_w, h=img_h) params['intrinsic'] = cam_intri else: normed_pose_3d, normed_pose_2d, pixel_ratio, rescale_ratio, offset_2d, abs_root_Z = norm_to_pixel(pose_3d/1000, pose_2d, cam_intri, norm) norm_params=np.concatenate((pixel_ratio, rescale_ratio, offset_2d, abs_root_Z), axis=-1) # [T, 1, 5], len()==4 params['intrinsic'] = cam_intri params['normalization_params'] = norm_params return normed_pose_3d, normed_pose_2d, params
[ "def", "load_mpi_test", "(", "file_path", ",", "seq", ",", "baseline_normalize", ")", ":", "info", "=", "np", ".", "load", "(", "file_path", ",", "allow_pickle", "=", "True", ")", "if", "seq", "in", "range", "(", "0", ",", "6", ")", ":", "pose_3d", "=", "info", "[", "'pose3d_univ'", "]", "[", "seq", "]", "pose_2d", "=", "info", "[", "'pose2d'", "]", "[", "seq", "]", "if", "seq", "in", "[", "0", ",", "1", ",", "2", ",", "3", "]", ":", "img_w", ",", "img_h", "=", "2048", ",", "2048", "cam_intri", "=", "np", ".", "array", "(", "[", "1500.0686135995716", ",", "1500.6590966853348", ",", "1017.3794860438494", ",", "1043.062824876024", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", "]", ")", "elif", "seq", "in", "[", "4", ",", "5", "]", ":", "img_w", ",", "img_h", "=", "1920", ",", "1080", "cam_intri", "=", "np", ".", "array", "(", "[", "1683.482559482185", ",", "1671.927242063379", ",", "939.9278168524228", ",", "560.2072491988034", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", "]", ")", "elif", "seq", "==", "7", ":", "pose_3d", "=", "info", "[", "'pose3d_univ'", "]", "[", "0", "]", "pose_2d", "=", "info", "[", "'pose2d'", "]", "[", "0", "]", "img_w", ",", "img_h", "=", "2048", ",", "2048", "cam_intri", "=", "np", ".", "array", "(", "[", "1504.1479043534127", ",", "1556.86936732066", ",", "991.7469587022122", ",", "872.994958045596", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", "]", ")", "params", "=", "{", "}", "if", "baseline_normalize", ":", "# Remove global offset, but keep trajectory in first position", "pose_3d", "[", ":", ",", "1", ":", "]", "-=", "pose_3d", "[", ":", ",", ":", "1", "]", "normed_pose_3d", "=", "pose_3d", "/", "1000", "normed_pose_2d", "=", "normalize_screen_coordinates", "(", "pose_2d", "[", "...", ",", ":", "2", "]", ",", "w", "=", "img_w", ",", "h", "=", "img_h", ")", "params", "[", "'intrinsic'", "]", "=", "cam_intri", "else", ":", "normed_pose_3d", ",", "normed_pose_2d", ",", "pixel_ratio", ",", "rescale_ratio", ",", "offset_2d", ",", "abs_root_Z", "=", "norm_to_pixel", "(", "pose_3d", "/", "1000", ",", "pose_2d", ",", "cam_intri", ",", "norm", ")", "norm_params", "=", "np", ".", "concatenate", "(", "(", "pixel_ratio", ",", "rescale_ratio", ",", "offset_2d", ",", "abs_root_Z", ")", ",", "axis", "=", "-", "1", ")", "# [T, 1, 5], len()==4", "params", "[", "'intrinsic'", "]", "=", "cam_intri", "params", "[", "'normalization_params'", "]", "=", "norm_params", "return", "normed_pose_3d", ",", "normed_pose_2d", ",", "params" ]
[ 3, 0 ]
[ 43, 49 ]
python
en
['en', 'error', 'th']
False
ApplicationManager.generate_application_string
(cls, test)
Generate an application string based on some of the given information that can be pulled from the test object: app_env, start_time.
Generate an application string based on some of the given information that can be pulled from the test object: app_env, start_time.
def generate_application_string(cls, test): """ Generate an application string based on some of the given information that can be pulled from the test object: app_env, start_time. """ app_env = 'test' if hasattr(test, 'env'): app_env = test.env elif hasattr(test, 'environment'): app_env = test.environment start_time = int(time.time() * 1000) return "%s.%s" % (app_env, start_time)
[ "def", "generate_application_string", "(", "cls", ",", "test", ")", ":", "app_env", "=", "'test'", "if", "hasattr", "(", "test", ",", "'env'", ")", ":", "app_env", "=", "test", ".", "env", "elif", "hasattr", "(", "test", ",", "'environment'", ")", ":", "app_env", "=", "test", ".", "environment", "start_time", "=", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "return", "\"%s.%s\"", "%", "(", "app_env", ",", "start_time", ")" ]
[ 13, 4 ]
[ 25, 46 ]
python
en
['en', 'en', 'en']
True
xarray_concat_and_merge
(*args, concat_dim='time', sort_dim='time')
Given parameters that are each a list of `xarray.Dataset` objects, merge each list into an `xarray.Dataset` object and return all such objects in the same order. Parameters ---------- *args: list of lists of `xarray.Dataset`. A list of lists of `xarray.Dataset` objects to merge. concat_dim, sort_dim: str or list of str The string name of the dimension to concatenate or sort bythe data. If a list, must be same length as `*args`, where each element of these variables corresponds to the same element in `*args` by index. Returns ------- merged: list of `xarray.Dataset` A tuple of the same length as `*args`, containing the merged data.
Given parameters that are each a list of `xarray.Dataset` objects, merge each list into an `xarray.Dataset` object and return all such objects in the same order.
def xarray_concat_and_merge(*args, concat_dim='time', sort_dim='time'): """ Given parameters that are each a list of `xarray.Dataset` objects, merge each list into an `xarray.Dataset` object and return all such objects in the same order. Parameters ---------- *args: list of lists of `xarray.Dataset`. A list of lists of `xarray.Dataset` objects to merge. concat_dim, sort_dim: str or list of str The string name of the dimension to concatenate or sort bythe data. If a list, must be same length as `*args`, where each element of these variables corresponds to the same element in `*args` by index. Returns ------- merged: list of `xarray.Dataset` A tuple of the same length as `*args`, containing the merged data. """ merged = [] for i, arg in enumerate(args): current_concat_dim = concat_dim[i] if isinstance(concat_dim, list) else concat_dim current_sort_dim = sort_dim[i] if isinstance(sort_dim, list) else sort_dim dataset_temp = xr.concat(arg, dim=concat_dim) merged.append(xarray_sortby_coord(dataset_temp, coord=sort_dim)) return merged
[ "def", "xarray_concat_and_merge", "(", "*", "args", ",", "concat_dim", "=", "'time'", ",", "sort_dim", "=", "'time'", ")", ":", "merged", "=", "[", "]", "for", "i", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "current_concat_dim", "=", "concat_dim", "[", "i", "]", "if", "isinstance", "(", "concat_dim", ",", "list", ")", "else", "concat_dim", "current_sort_dim", "=", "sort_dim", "[", "i", "]", "if", "isinstance", "(", "sort_dim", ",", "list", ")", "else", "sort_dim", "dataset_temp", "=", "xr", ".", "concat", "(", "arg", ",", "dim", "=", "concat_dim", ")", "merged", ".", "append", "(", "xarray_sortby_coord", "(", "dataset_temp", ",", "coord", "=", "sort_dim", ")", ")", "return", "merged" ]
[ 14, 0 ]
[ 39, 17 ]
python
en
['en', 'error', 'th']
False
merge_datasets
(datasets_temp, clean_masks_temp, masks_per_platform=None, x_coord='longitude', y_coord='latitude')
Merges dictionaries of platforms mapping to datasets, dataset clean masks, and lists of other masks into one dataset, one dataset clean mask, and one of each type of other mask, ordering all by time. Parameters ---------- datasets_temp, clean_masks_temp, masks_per_platform: dict Dictionaries that map, respectively, platforms to `xarray.Dataset` or `xarray.DataArray` objects to merge, `xarray.DataArray` masks to merge, and lists of `xarray.DataArray` masks to merge separately. All entries must have a 'time' dimension. x_coord, y_coord: str Names of the x and y coordinates in the datasets in `datasets_temp`. Returns ------- dataset: xarray.Dataset or xarray.DataArray The raw data requested. Can be cleaned with `dataset.where(clean_mask)`. clean_mask: xarray.DataArray The clean mask. masks: list of xarray.DataArray A list of individual masks. Raises ------ AssertionError: If no data was retrieved for any query (i.e. `len(datasets_temp) == 0`). :Authors: John Rattz ([email protected])
Merges dictionaries of platforms mapping to datasets, dataset clean masks, and lists of other masks into one dataset, one dataset clean mask, and one of each type of other mask, ordering all by time.
def merge_datasets(datasets_temp, clean_masks_temp, masks_per_platform=None, x_coord='longitude', y_coord='latitude'): """ Merges dictionaries of platforms mapping to datasets, dataset clean masks, and lists of other masks into one dataset, one dataset clean mask, and one of each type of other mask, ordering all by time. Parameters ---------- datasets_temp, clean_masks_temp, masks_per_platform: dict Dictionaries that map, respectively, platforms to `xarray.Dataset` or `xarray.DataArray` objects to merge, `xarray.DataArray` masks to merge, and lists of `xarray.DataArray` masks to merge separately. All entries must have a 'time' dimension. x_coord, y_coord: str Names of the x and y coordinates in the datasets in `datasets_temp`. Returns ------- dataset: xarray.Dataset or xarray.DataArray The raw data requested. Can be cleaned with `dataset.where(clean_mask)`. clean_mask: xarray.DataArray The clean mask. masks: list of xarray.DataArray A list of individual masks. Raises ------ AssertionError: If no data was retrieved for any query (i.e. `len(datasets_temp) == 0`). :Authors: John Rattz ([email protected]) """ def xr_set_same_coords(datasets): first_ds = datasets[0] for i, ds in enumerate(datasets): datasets[i] = \ ds.assign_coords(**{x_coord:first_ds[x_coord], y_coord:first_ds[y_coord]}) assert len(datasets_temp) > 0, "No data was retrieved." # No data for any query. # If multiple non-empty datasets were retrieved, merge them and sort by time. masks = None if len(datasets_temp) > 1: # Merge datasets. datasets_temp_list = list(datasets_temp.values()) # Set same x and y coords so `xr.concat()` concatenates as intended. xr_set_same_coords(datasets_temp_list) dataset = xr.concat(datasets_temp_list, dim='time') dataset = xarray_sortby_coord(dataset, 'time') # Merge clean masks. clean_masks_temp_list = list(clean_masks_temp.values()) xr_set_same_coords(clean_masks_temp_list) clean_mask = xr.concat(clean_masks_temp_list, dim='time') clean_mask = xarray_sortby_coord(clean_mask, 'time') # Merge masks. if masks_per_platform is not None: num_platforms = len(masks_per_platform.keys()) num_masks = len(list(masks_per_platform.values())[0]) np_platform_masks = np.empty((num_platforms, num_masks), dtype=object) for i, mask_list in enumerate(masks_per_platform.values()): np_platform_masks[i] = mask_list masks = [] for j in range(num_masks): masks.append(xr.concat(list(np_platform_masks[:,j]), dim='time')) else: # Select the only dataset. dataset = datasets_temp[list(datasets_temp.keys())[0]] clean_mask = clean_masks_temp[list(clean_masks_temp.keys())[0]] if masks_per_platform is not None: masks = masks_per_platform[list(masks_per_platform.keys())[0]] return dataset, clean_mask, masks
[ "def", "merge_datasets", "(", "datasets_temp", ",", "clean_masks_temp", ",", "masks_per_platform", "=", "None", ",", "x_coord", "=", "'longitude'", ",", "y_coord", "=", "'latitude'", ")", ":", "def", "xr_set_same_coords", "(", "datasets", ")", ":", "first_ds", "=", "datasets", "[", "0", "]", "for", "i", ",", "ds", "in", "enumerate", "(", "datasets", ")", ":", "datasets", "[", "i", "]", "=", "ds", ".", "assign_coords", "(", "*", "*", "{", "x_coord", ":", "first_ds", "[", "x_coord", "]", ",", "y_coord", ":", "first_ds", "[", "y_coord", "]", "}", ")", "assert", "len", "(", "datasets_temp", ")", ">", "0", ",", "\"No data was retrieved.\"", "# No data for any query.", "# If multiple non-empty datasets were retrieved, merge them and sort by time.", "masks", "=", "None", "if", "len", "(", "datasets_temp", ")", ">", "1", ":", "# Merge datasets.", "datasets_temp_list", "=", "list", "(", "datasets_temp", ".", "values", "(", ")", ")", "# Set same x and y coords so `xr.concat()` concatenates as intended.", "xr_set_same_coords", "(", "datasets_temp_list", ")", "dataset", "=", "xr", ".", "concat", "(", "datasets_temp_list", ",", "dim", "=", "'time'", ")", "dataset", "=", "xarray_sortby_coord", "(", "dataset", ",", "'time'", ")", "# Merge clean masks.", "clean_masks_temp_list", "=", "list", "(", "clean_masks_temp", ".", "values", "(", ")", ")", "xr_set_same_coords", "(", "clean_masks_temp_list", ")", "clean_mask", "=", "xr", ".", "concat", "(", "clean_masks_temp_list", ",", "dim", "=", "'time'", ")", "clean_mask", "=", "xarray_sortby_coord", "(", "clean_mask", ",", "'time'", ")", "# Merge masks.", "if", "masks_per_platform", "is", "not", "None", ":", "num_platforms", "=", "len", "(", "masks_per_platform", ".", "keys", "(", ")", ")", "num_masks", "=", "len", "(", "list", "(", "masks_per_platform", ".", "values", "(", ")", ")", "[", "0", "]", ")", "np_platform_masks", "=", "np", ".", "empty", "(", "(", "num_platforms", ",", "num_masks", ")", ",", "dtype", "=", "object", ")", "for", "i", ",", "mask_list", "in", "enumerate", "(", "masks_per_platform", ".", "values", "(", ")", ")", ":", "np_platform_masks", "[", "i", "]", "=", "mask_list", "masks", "=", "[", "]", "for", "j", "in", "range", "(", "num_masks", ")", ":", "masks", ".", "append", "(", "xr", ".", "concat", "(", "list", "(", "np_platform_masks", "[", ":", ",", "j", "]", ")", ",", "dim", "=", "'time'", ")", ")", "else", ":", "# Select the only dataset.", "dataset", "=", "datasets_temp", "[", "list", "(", "datasets_temp", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "clean_mask", "=", "clean_masks_temp", "[", "list", "(", "clean_masks_temp", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "if", "masks_per_platform", "is", "not", "None", ":", "masks", "=", "masks_per_platform", "[", "list", "(", "masks_per_platform", ".", "keys", "(", ")", ")", "[", "0", "]", "]", "return", "dataset", ",", "clean_mask", ",", "masks" ]
[ 41, 0 ]
[ 112, 37 ]
python
en
['en', 'error', 'th']
False
load_simple
(dc, platform, product, frac_res=None, abs_res=None, load_params={}, masking_params={}, indiv_masks=None)
Simplifies loading from the Data Cube by retrieving a dataset along with its mask. Parameters ---------- dc: datacube.api.core.Datacube The Datacube instance to load data with. platform, product: str Strings denoting the platform and product to retrieve data for. frac_res: float The fraction of the original resolution to scale to. Must be postive. Note that this can be greater than 1.0, in which case the resolution is upsampled. abs_res: list-like A list-like of the number of pixels for the x and y axes, respectively. Overrides `frac_res` if specified. load_params: dict, optional A dictionary of parameters for `dc.load()`. Here are some common load parameters: *lat, lon: list-like 2-tuples of minimum and maximum values for latitude and longitude, respectively.* *time: list-like A 2-tuple of the minimum and maximum times for acquisitions.* *measurements: list-like The list of measurements to retrieve from the Datacube.* masking_params: dict, optional A dictionary of keyword arguments for corresponding masking functions. For example: {'cover_types':['cloud']} would retain only clouds for Landsat products, because `landsat_qa_clean_mask()` is used for the Landsat family of platforms. indiv_masks: list A list of masks to return (e.g. ['water']). These do not have to be the same used to create `clean_mask`. Returns ------- dataset: xarray.Dataset The raw data requested. Can be cleaned with `dataset.where(clean_mask)`. clean_mask: xarray.DataArray The clean mask, formed as a logical AND of all masks used. masks: list of xarray.DataArray A list of the masks requested by `indiv_masks`, or `None` if `indiv_masks` is not specified. Raises ------ AssertionError: If no data is retrieved for any platform query. :Authors: John Rattz ([email protected])
Simplifies loading from the Data Cube by retrieving a dataset along with its mask.
def load_simple(dc, platform, product, frac_res=None, abs_res=None, load_params={}, masking_params={}, indiv_masks=None): """ Simplifies loading from the Data Cube by retrieving a dataset along with its mask. Parameters ---------- dc: datacube.api.core.Datacube The Datacube instance to load data with. platform, product: str Strings denoting the platform and product to retrieve data for. frac_res: float The fraction of the original resolution to scale to. Must be postive. Note that this can be greater than 1.0, in which case the resolution is upsampled. abs_res: list-like A list-like of the number of pixels for the x and y axes, respectively. Overrides `frac_res` if specified. load_params: dict, optional A dictionary of parameters for `dc.load()`. Here are some common load parameters: *lat, lon: list-like 2-tuples of minimum and maximum values for latitude and longitude, respectively.* *time: list-like A 2-tuple of the minimum and maximum times for acquisitions.* *measurements: list-like The list of measurements to retrieve from the Datacube.* masking_params: dict, optional A dictionary of keyword arguments for corresponding masking functions. For example: {'cover_types':['cloud']} would retain only clouds for Landsat products, because `landsat_qa_clean_mask()` is used for the Landsat family of platforms. indiv_masks: list A list of masks to return (e.g. ['water']). These do not have to be the same used to create `clean_mask`. Returns ------- dataset: xarray.Dataset The raw data requested. Can be cleaned with `dataset.where(clean_mask)`. clean_mask: xarray.DataArray The clean mask, formed as a logical AND of all masks used. masks: list of xarray.DataArray A list of the masks requested by `indiv_masks`, or `None` if `indiv_masks` is not specified. Raises ------ AssertionError: If no data is retrieved for any platform query. :Authors: John Rattz ([email protected]) """ current_load_params = dict(platform=platform, product=product) current_load_params.update(load_params) dataset = dc.load(**current_load_params) assert len(dataset.dims) > 0, "No data was retrieved." # Scale resolution if specified. if frac_res is not None or abs_res is not None: dataset = xr_scale_res(dataset, frac_res=frac_res, abs_res=abs_res) # Get the clean mask for the appropriate LANDSAT satellite platform. clean_mask = landsat_qa_clean_mask(dataset, platform, **masking_params) # Get the mask for removing data ouside the accepted range of LANDSAT 7 and 8. clean_mask = xr_and(clean_mask, landsat_clean_mask_invalid(dataset)) # Retrieve individual masks. if indiv_masks is None: masks = None else: masks = [] for mask in indiv_masks: masks.append(landsat_qa_clean_mask(dataset, platform, cover_types=[mask])) return dataset, clean_mask, masks
[ "def", "load_simple", "(", "dc", ",", "platform", ",", "product", ",", "frac_res", "=", "None", ",", "abs_res", "=", "None", ",", "load_params", "=", "{", "}", ",", "masking_params", "=", "{", "}", ",", "indiv_masks", "=", "None", ")", ":", "current_load_params", "=", "dict", "(", "platform", "=", "platform", ",", "product", "=", "product", ")", "current_load_params", ".", "update", "(", "load_params", ")", "dataset", "=", "dc", ".", "load", "(", "*", "*", "current_load_params", ")", "assert", "len", "(", "dataset", ".", "dims", ")", ">", "0", ",", "\"No data was retrieved.\"", "# Scale resolution if specified.", "if", "frac_res", "is", "not", "None", "or", "abs_res", "is", "not", "None", ":", "dataset", "=", "xr_scale_res", "(", "dataset", ",", "frac_res", "=", "frac_res", ",", "abs_res", "=", "abs_res", ")", "# Get the clean mask for the appropriate LANDSAT satellite platform.", "clean_mask", "=", "landsat_qa_clean_mask", "(", "dataset", ",", "platform", ",", "*", "*", "masking_params", ")", "# Get the mask for removing data ouside the accepted range of LANDSAT 7 and 8.", "clean_mask", "=", "xr_and", "(", "clean_mask", ",", "landsat_clean_mask_invalid", "(", "dataset", ")", ")", "# Retrieve individual masks.", "if", "indiv_masks", "is", "None", ":", "masks", "=", "None", "else", ":", "masks", "=", "[", "]", "for", "mask", "in", "indiv_masks", ":", "masks", ".", "append", "(", "landsat_qa_clean_mask", "(", "dataset", ",", "platform", ",", "cover_types", "=", "[", "mask", "]", ")", ")", "return", "dataset", ",", "clean_mask", ",", "masks" ]
[ 114, 0 ]
[ 185, 37 ]
python
en
['en', 'error', 'th']
False
load_multiplatform
(dc, platforms, products, frac_res=None, abs_res=None, load_params={}, masking_params={}, indiv_masks=None)
Load and merge data as well as clean masks given a list of platforms and products. Currently only tested on Landsat data. Parameters ---------- dc: datacube.api.core.Datacube The Datacube instance to load data with. platforms, products: list-like A list-like of platforms and products. Both must have the same length. frac_res: float The fraction of the original resolution to scale to. Must be postive. Note that this can be greater than 1.0, in which case the resolution is upsampled. The resolution used for all products will be the minimum resolution for latitude and longitude among any of them. abs_res: list-like A list-like of the number of pixels for the x and y axes, respectively. Overrides `frac_res` if specified. load_params: dict, optional A dictionary of parameters for `dc.load()` or a dictionary of dictionaries of such parameters, mapping platform names to parameter dictionaries (primarily useful for selecting different time ranges). Here are some common load parameters: *lat, lon: list-like 2-tuples of minimum and maximum values for latitude and longitude, respectively.* *time: list-like A pair of the minimum and maximum times for acquisitions or a list of such pairs.* *measurements: list-like The list of measurements to retrieve from the Datacube.* For example, to load data with different time ranges for different platforms, we could pass the following: `{'LANDSAT_7': dict(**common_load_params, time=ls7_date_range), 'LANDSAT_8': dict(**common_load_params, time=ls8_date_range)}`, where `common_load_params` is a dictionary of load parameters common to both - most notably 'lat', 'lon', and 'measurements' - and the 'date_range' variables are . masking_params: dict, optional A dictionary mapping platform names to a dictionary of keyword arguments for corresponding masking functions. For example: {'LANDSAT_7': {'cover_types':['cloud']}, 'LANDSAT_8': {'firstcover_types': ['cloud']}} would retain only clouds, because `landsat_qa_clean_mask()` is used to create clean masks for the Landsat family of platforms. indiv_masks: list A list of masks to return (e.g. ['water']). These do not have to be the same used to create the returned clean mask. Returns ------- dataset: xarray.Dataset The raw data requested. Can be cleaned with `dataset.where(clean_mask)`. clean_mask: xarray.DataArray The clean mask, formed as a logical AND of all masks used. masks: list of xarray.DataArray A list of the masks requested by `indiv_masks`, or `None` if `indiv_masks` is not specified. Raises ------ AssertionError: If no data is retrieved from any product. :Authors: John Rattz ([email protected])
Load and merge data as well as clean masks given a list of platforms and products. Currently only tested on Landsat data. Parameters ---------- dc: datacube.api.core.Datacube The Datacube instance to load data with. platforms, products: list-like A list-like of platforms and products. Both must have the same length. frac_res: float The fraction of the original resolution to scale to. Must be postive. Note that this can be greater than 1.0, in which case the resolution is upsampled. The resolution used for all products will be the minimum resolution for latitude and longitude among any of them. abs_res: list-like A list-like of the number of pixels for the x and y axes, respectively. Overrides `frac_res` if specified. load_params: dict, optional A dictionary of parameters for `dc.load()` or a dictionary of dictionaries of such parameters, mapping platform names to parameter dictionaries (primarily useful for selecting different time ranges). Here are some common load parameters: *lat, lon: list-like 2-tuples of minimum and maximum values for latitude and longitude, respectively.* *time: list-like A pair of the minimum and maximum times for acquisitions or a list of such pairs.* *measurements: list-like The list of measurements to retrieve from the Datacube.* For example, to load data with different time ranges for different platforms, we could pass the following: `{'LANDSAT_7': dict(**common_load_params, time=ls7_date_range), 'LANDSAT_8': dict(**common_load_params, time=ls8_date_range)}`, where `common_load_params` is a dictionary of load parameters common to both - most notably 'lat', 'lon', and 'measurements' - and the 'date_range' variables are . masking_params: dict, optional A dictionary mapping platform names to a dictionary of keyword arguments for corresponding masking functions. For example: {'LANDSAT_7': {'cover_types':['cloud']}, 'LANDSAT_8': {'firstcover_types': ['cloud']}} would retain only clouds, because `landsat_qa_clean_mask()` is used to create clean masks for the Landsat family of platforms. indiv_masks: list A list of masks to return (e.g. ['water']). These do not have to be the same used to create the returned clean mask. Returns ------- dataset: xarray.Dataset The raw data requested. Can be cleaned with `dataset.where(clean_mask)`. clean_mask: xarray.DataArray The clean mask, formed as a logical AND of all masks used. masks: list of xarray.DataArray A list of the masks requested by `indiv_masks`, or `None` if `indiv_masks` is not specified. Raises ------ AssertionError: If no data is retrieved from any product. :Authors: John Rattz (john.c.rattz
def load_multiplatform(dc, platforms, products, frac_res=None, abs_res=None, load_params={}, masking_params={}, indiv_masks=None): """ Load and merge data as well as clean masks given a list of platforms and products. Currently only tested on Landsat data. Parameters ---------- dc: datacube.api.core.Datacube The Datacube instance to load data with. platforms, products: list-like A list-like of platforms and products. Both must have the same length. frac_res: float The fraction of the original resolution to scale to. Must be postive. Note that this can be greater than 1.0, in which case the resolution is upsampled. The resolution used for all products will be the minimum resolution for latitude and longitude among any of them. abs_res: list-like A list-like of the number of pixels for the x and y axes, respectively. Overrides `frac_res` if specified. load_params: dict, optional A dictionary of parameters for `dc.load()` or a dictionary of dictionaries of such parameters, mapping platform names to parameter dictionaries (primarily useful for selecting different time ranges). Here are some common load parameters: *lat, lon: list-like 2-tuples of minimum and maximum values for latitude and longitude, respectively.* *time: list-like A pair of the minimum and maximum times for acquisitions or a list of such pairs.* *measurements: list-like The list of measurements to retrieve from the Datacube.* For example, to load data with different time ranges for different platforms, we could pass the following: `{'LANDSAT_7': dict(**common_load_params, time=ls7_date_range), 'LANDSAT_8': dict(**common_load_params, time=ls8_date_range)}`, where `common_load_params` is a dictionary of load parameters common to both - most notably 'lat', 'lon', and 'measurements' - and the 'date_range' variables are . masking_params: dict, optional A dictionary mapping platform names to a dictionary of keyword arguments for corresponding masking functions. For example: {'LANDSAT_7': {'cover_types':['cloud']}, 'LANDSAT_8': {'firstcover_types': ['cloud']}} would retain only clouds, because `landsat_qa_clean_mask()` is used to create clean masks for the Landsat family of platforms. indiv_masks: list A list of masks to return (e.g. ['water']). These do not have to be the same used to create the returned clean mask. Returns ------- dataset: xarray.Dataset The raw data requested. Can be cleaned with `dataset.where(clean_mask)`. clean_mask: xarray.DataArray The clean mask, formed as a logical AND of all masks used. masks: list of xarray.DataArray A list of the masks requested by `indiv_masks`, or `None` if `indiv_masks` is not specified. Raises ------ AssertionError: If no data is retrieved from any product. :Authors: John Rattz ([email protected]) """ # Determine what resolution the data will be scaled to. if frac_res is not None and abs_res is None: prod_info = dc.list_products() resolutions = prod_info[prod_info['name'].isin(products)]\ ['resolution'].values # Determine the minimum resolution, which is actually the maximum # value resolution, since resolution is measured in degrees per pixel. # The first resolution is for latitude (y) and is negative. # The second resolution is for longitude (x) and is positive. min_res = [0]*2 for res in resolutions: min_res[0] = res[0] if res[0] < min_res[0] else min_res[0] min_res[1] = res[1] if min_res[1] < res[1] else min_res[1] # Take reciprocal to convert pixels per degree to degrees per pixel. # Reverse to be in order (x, y). min_res = [abs(frac_res*(1/res)) for res in min_res][::-1] # Calculate the absolute resolution. x, y = load_params.get('lon', None), load_params.get('lat', None) x, y = load_params.get('longitude', x), load_params.get('latitude', y) x_y_rng = abs(x[1] - x[0]), abs(y[1] - y[0]) abs_res = [round(res*rng) for res, rng in zip(min_res, x_y_rng)] datasets_temp = {} # Maps platforms to datasets to merge. clean_masks_temp = {} # Maps platforms to clean masks to merge. masks_per_platform = {} if indiv_masks is not None else None # Maps platforms to lists of masks. for product,platform in zip(products, platforms): current_load_params = dict(platform=platform, product=product) current_masking_params = masking_params.get(platform, masking_params) # Handle `load_params` as a dict of dicts of platforms mapping to load params. if isinstance(list(load_params.values())[0], dict): current_load_params.update(load_params.get(platform, {})) else: # Handle `load_params` as a dict of load params. current_load_params.update(load_params) # Load each time range of data. time = current_load_params.get('time') if isinstance(time[0], tuple) or \ isinstance(time[0], list): # Handle `time` as a list of time ranges. datasets_time_parts = [] clean_masks_time_parts = [] masks_time_parts = np.empty((len(time), len(indiv_masks)), dtype=object)\ if indiv_masks is not None else None for i, time_range in enumerate(time): time_range_load_params = current_load_params time_range_load_params['time'] = time_range try: dataset_time_part, clean_mask_time_part, masks_time_part = \ load_simple(dc, platform, product, abs_res=abs_res, load_params=time_range_load_params, masking_params=masking_params, indiv_masks=indiv_masks) datasets_time_parts.append(dataset_time_part) clean_masks_time_parts.append(clean_mask_time_part) if indiv_masks is not None: masks_time_parts[i] = masks_time_part except (AssertionError): continue datasets_temp[platform], clean_masks_temp[platform] = \ xarray_concat_and_merge(datasets_time_parts, clean_masks_time_parts) if indiv_masks is not None: masks_per_platform[platform] = xarray_concat_and_merge(*masks_time_parts.T) else: # Handle `time` as a single time range. try: datasets_temp[platform], clean_masks_temp[platform], masks = \ load_simple(dc, platform, product, abs_res=abs_res, load_params=current_load_params, masking_params=masking_params, indiv_masks=indiv_masks) if indiv_masks is not None: masks_per_platform[platform] = masks except (AssertionError): continue return merge_datasets(datasets_temp, clean_masks_temp, masks_per_platform)
[ "def", "load_multiplatform", "(", "dc", ",", "platforms", ",", "products", ",", "frac_res", "=", "None", ",", "abs_res", "=", "None", ",", "load_params", "=", "{", "}", ",", "masking_params", "=", "{", "}", ",", "indiv_masks", "=", "None", ")", ":", "# Determine what resolution the data will be scaled to.", "if", "frac_res", "is", "not", "None", "and", "abs_res", "is", "None", ":", "prod_info", "=", "dc", ".", "list_products", "(", ")", "resolutions", "=", "prod_info", "[", "prod_info", "[", "'name'", "]", ".", "isin", "(", "products", ")", "]", "[", "'resolution'", "]", ".", "values", "# Determine the minimum resolution, which is actually the maximum", "# value resolution, since resolution is measured in degrees per pixel.", "# The first resolution is for latitude (y) and is negative.", "# The second resolution is for longitude (x) and is positive.", "min_res", "=", "[", "0", "]", "*", "2", "for", "res", "in", "resolutions", ":", "min_res", "[", "0", "]", "=", "res", "[", "0", "]", "if", "res", "[", "0", "]", "<", "min_res", "[", "0", "]", "else", "min_res", "[", "0", "]", "min_res", "[", "1", "]", "=", "res", "[", "1", "]", "if", "min_res", "[", "1", "]", "<", "res", "[", "1", "]", "else", "min_res", "[", "1", "]", "# Take reciprocal to convert pixels per degree to degrees per pixel.", "# Reverse to be in order (x, y).", "min_res", "=", "[", "abs", "(", "frac_res", "*", "(", "1", "/", "res", ")", ")", "for", "res", "in", "min_res", "]", "[", ":", ":", "-", "1", "]", "# Calculate the absolute resolution.", "x", ",", "y", "=", "load_params", ".", "get", "(", "'lon'", ",", "None", ")", ",", "load_params", ".", "get", "(", "'lat'", ",", "None", ")", "x", ",", "y", "=", "load_params", ".", "get", "(", "'longitude'", ",", "x", ")", ",", "load_params", ".", "get", "(", "'latitude'", ",", "y", ")", "x_y_rng", "=", "abs", "(", "x", "[", "1", "]", "-", "x", "[", "0", "]", ")", ",", "abs", "(", "y", "[", "1", "]", "-", "y", "[", "0", "]", ")", "abs_res", "=", "[", "round", "(", "res", "*", "rng", ")", "for", "res", ",", "rng", "in", "zip", "(", "min_res", ",", "x_y_rng", ")", "]", "datasets_temp", "=", "{", "}", "# Maps platforms to datasets to merge.", "clean_masks_temp", "=", "{", "}", "# Maps platforms to clean masks to merge.", "masks_per_platform", "=", "{", "}", "if", "indiv_masks", "is", "not", "None", "else", "None", "# Maps platforms to lists of masks.", "for", "product", ",", "platform", "in", "zip", "(", "products", ",", "platforms", ")", ":", "current_load_params", "=", "dict", "(", "platform", "=", "platform", ",", "product", "=", "product", ")", "current_masking_params", "=", "masking_params", ".", "get", "(", "platform", ",", "masking_params", ")", "# Handle `load_params` as a dict of dicts of platforms mapping to load params.", "if", "isinstance", "(", "list", "(", "load_params", ".", "values", "(", ")", ")", "[", "0", "]", ",", "dict", ")", ":", "current_load_params", ".", "update", "(", "load_params", ".", "get", "(", "platform", ",", "{", "}", ")", ")", "else", ":", "# Handle `load_params` as a dict of load params.", "current_load_params", ".", "update", "(", "load_params", ")", "# Load each time range of data.", "time", "=", "current_load_params", ".", "get", "(", "'time'", ")", "if", "isinstance", "(", "time", "[", "0", "]", ",", "tuple", ")", "or", "isinstance", "(", "time", "[", "0", "]", ",", "list", ")", ":", "# Handle `time` as a list of time ranges.", "datasets_time_parts", "=", "[", "]", "clean_masks_time_parts", "=", "[", "]", "masks_time_parts", "=", "np", ".", "empty", "(", "(", "len", "(", "time", ")", ",", "len", "(", "indiv_masks", ")", ")", ",", "dtype", "=", "object", ")", "if", "indiv_masks", "is", "not", "None", "else", "None", "for", "i", ",", "time_range", "in", "enumerate", "(", "time", ")", ":", "time_range_load_params", "=", "current_load_params", "time_range_load_params", "[", "'time'", "]", "=", "time_range", "try", ":", "dataset_time_part", ",", "clean_mask_time_part", ",", "masks_time_part", "=", "load_simple", "(", "dc", ",", "platform", ",", "product", ",", "abs_res", "=", "abs_res", ",", "load_params", "=", "time_range_load_params", ",", "masking_params", "=", "masking_params", ",", "indiv_masks", "=", "indiv_masks", ")", "datasets_time_parts", ".", "append", "(", "dataset_time_part", ")", "clean_masks_time_parts", ".", "append", "(", "clean_mask_time_part", ")", "if", "indiv_masks", "is", "not", "None", ":", "masks_time_parts", "[", "i", "]", "=", "masks_time_part", "except", "(", "AssertionError", ")", ":", "continue", "datasets_temp", "[", "platform", "]", ",", "clean_masks_temp", "[", "platform", "]", "=", "xarray_concat_and_merge", "(", "datasets_time_parts", ",", "clean_masks_time_parts", ")", "if", "indiv_masks", "is", "not", "None", ":", "masks_per_platform", "[", "platform", "]", "=", "xarray_concat_and_merge", "(", "*", "masks_time_parts", ".", "T", ")", "else", ":", "# Handle `time` as a single time range.", "try", ":", "datasets_temp", "[", "platform", "]", ",", "clean_masks_temp", "[", "platform", "]", ",", "masks", "=", "load_simple", "(", "dc", ",", "platform", ",", "product", ",", "abs_res", "=", "abs_res", ",", "load_params", "=", "current_load_params", ",", "masking_params", "=", "masking_params", ",", "indiv_masks", "=", "indiv_masks", ")", "if", "indiv_masks", "is", "not", "None", ":", "masks_per_platform", "[", "platform", "]", "=", "masks", "except", "(", "AssertionError", ")", ":", "continue", "return", "merge_datasets", "(", "datasets_temp", ",", "clean_masks_temp", ",", "masks_per_platform", ")" ]
[ 187, 0 ]
[ 326, 78 ]
python
en
['en', 'error', 'th']
False
get_product_extents
(api, platform, product)
Returns the minimum and maximum latitude, longitude, and date range of a product. Parameters ---------- api: DataAccessApi An instance of `DataAccessApi` to get query metadata from. platform, product: str Names of the platform and product to query extent information for. Returns ------- full_lat, full_lon: tuple Two 2-tuples of the minimum and maximum latitude and longitude, respectively. min_max_dates: tuple of datetime.datetime A 2-tuple of the minimum and maximum time available.
Returns the minimum and maximum latitude, longitude, and date range of a product.
def get_product_extents(api, platform, product): """ Returns the minimum and maximum latitude, longitude, and date range of a product. Parameters ---------- api: DataAccessApi An instance of `DataAccessApi` to get query metadata from. platform, product: str Names of the platform and product to query extent information for. Returns ------- full_lat, full_lon: tuple Two 2-tuples of the minimum and maximum latitude and longitude, respectively. min_max_dates: tuple of datetime.datetime A 2-tuple of the minimum and maximum time available. """ # Get the extents of the cube descriptor = api.get_query_metadata(platform=platform, product=product, measurements=[]) min_max_lat = descriptor['lat_extents'] min_max_lon = descriptor['lon_extents'] min_max_dates = descriptor['time_extents'] return min_max_lat, min_max_lon, min_max_dates
[ "def", "get_product_extents", "(", "api", ",", "platform", ",", "product", ")", ":", "# Get the extents of the cube", "descriptor", "=", "api", ".", "get_query_metadata", "(", "platform", "=", "platform", ",", "product", "=", "product", ",", "measurements", "=", "[", "]", ")", "min_max_lat", "=", "descriptor", "[", "'lat_extents'", "]", "min_max_lon", "=", "descriptor", "[", "'lon_extents'", "]", "min_max_dates", "=", "descriptor", "[", "'time_extents'", "]", "return", "min_max_lat", ",", "min_max_lon", ",", "min_max_dates" ]
[ 328, 0 ]
[ 351, 50 ]
python
en
['en', 'error', 'th']
False
get_overlapping_area
(api, platforms, products)
Returns the minimum and maximum latitude, longitude, and date range of the overlapping area for a set of products. Parameters ---------- api: DataAccessApi An instance of `DataAccessApi` to get query metadata from. platforms, products: list-like of str A list-like of names of platforms and products to query extent information for. These lists must have the same length. Returns ------- full_lat, full_lon: tuple Two 2-tuples of the minimum and maximum latitude and longitude, respectively. min_max_dates: numpy.ndarray of datetime.datetime A 2D NumPy array with shape (len(products), 2), in which rows contain the minimum and maximum time available for corresponding products.
Returns the minimum and maximum latitude, longitude, and date range of the overlapping area for a set of products. Parameters ---------- api: DataAccessApi An instance of `DataAccessApi` to get query metadata from. platforms, products: list-like of str A list-like of names of platforms and products to query extent information for. These lists must have the same length. Returns ------- full_lat, full_lon: tuple Two 2-tuples of the minimum and maximum latitude and longitude, respectively. min_max_dates: numpy.ndarray of datetime.datetime A 2D NumPy array with shape (len(products), 2), in which rows contain the minimum and maximum time available for corresponding products.
def get_overlapping_area(api, platforms, products): """ Returns the minimum and maximum latitude, longitude, and date range of the overlapping area for a set of products. Parameters ---------- api: DataAccessApi An instance of `DataAccessApi` to get query metadata from. platforms, products: list-like of str A list-like of names of platforms and products to query extent information for. These lists must have the same length. Returns ------- full_lat, full_lon: tuple Two 2-tuples of the minimum and maximum latitude and longitude, respectively. min_max_dates: numpy.ndarray of datetime.datetime A 2D NumPy array with shape (len(products), 2), in which rows contain the minimum and maximum time available for corresponding products. """ min_max_dates = np.empty((len(platforms), 2), dtype=object) min_max_lat = np.empty((len(platforms), 2)) min_max_lon = np.empty((len(platforms), 2)) for i, (platform, product) in enumerate(zip(platforms, products)): min_max_lat[i], min_max_lon[i], min_max_dates[i] = \ get_product_extents(api, platform, product) # Determine minimum and maximum lat and lon extents that bound a common area among the # products, which are the greatest minimums and smallest maximums. min_lon, max_lon = np.max(min_max_lon[:,0]), np.min(min_max_lon[:,1]) min_lat, max_lat = np.max(min_max_lat[:,0]), np.min(min_max_lat[:,1]) full_lon = (min_lon, max_lon) full_lat = (min_lat, max_lat) return full_lat, full_lon, min_max_dates
[ "def", "get_overlapping_area", "(", "api", ",", "platforms", ",", "products", ")", ":", "min_max_dates", "=", "np", ".", "empty", "(", "(", "len", "(", "platforms", ")", ",", "2", ")", ",", "dtype", "=", "object", ")", "min_max_lat", "=", "np", ".", "empty", "(", "(", "len", "(", "platforms", ")", ",", "2", ")", ")", "min_max_lon", "=", "np", ".", "empty", "(", "(", "len", "(", "platforms", ")", ",", "2", ")", ")", "for", "i", ",", "(", "platform", ",", "product", ")", "in", "enumerate", "(", "zip", "(", "platforms", ",", "products", ")", ")", ":", "min_max_lat", "[", "i", "]", ",", "min_max_lon", "[", "i", "]", ",", "min_max_dates", "[", "i", "]", "=", "get_product_extents", "(", "api", ",", "platform", ",", "product", ")", "# Determine minimum and maximum lat and lon extents that bound a common area among the", "# products, which are the greatest minimums and smallest maximums.", "min_lon", ",", "max_lon", "=", "np", ".", "max", "(", "min_max_lon", "[", ":", ",", "0", "]", ")", ",", "np", ".", "min", "(", "min_max_lon", "[", ":", ",", "1", "]", ")", "min_lat", ",", "max_lat", "=", "np", ".", "max", "(", "min_max_lat", "[", ":", ",", "0", "]", ")", ",", "np", ".", "min", "(", "min_max_lat", "[", ":", ",", "1", "]", ")", "full_lon", "=", "(", "min_lon", ",", "max_lon", ")", "full_lat", "=", "(", "min_lat", ",", "max_lat", ")", "return", "full_lat", ",", "full_lon", ",", "min_max_dates" ]
[ 353, 0 ]
[ 386, 44 ]
python
en
['en', 'error', 'th']
False
xml_generators.__init__
(self, logger, gccxml_cvs_revision=None, castxml_format=None)
Create a new xml_generators object. Args: logger (logging.Logger) : a logger for debugging output gccxml_cvs_revision (str|None): the xml output version castxml_format (str|None): the xml output version
Create a new xml_generators object.
def __init__(self, logger, gccxml_cvs_revision=None, castxml_format=None): """ Create a new xml_generators object. Args: logger (logging.Logger) : a logger for debugging output gccxml_cvs_revision (str|None): the xml output version castxml_format (str|None): the xml output version """ if castxml_format is not None and gccxml_cvs_revision is not None: raise RuntimeError("Setting both gccxml_cvs_revision and" "castxml_format is not allowed!") self._is_castxml1 = False self._is_castxml = False self._is_gccxml = False if castxml_format is not None: self._xml_generator_version = self.__castxml self._xml_output_version = castxml_format self._is_castxml = True self._is_castxml1 = True elif gccxml_cvs_revision is not None: self._xml_generator_version, self._xml_output_version = \ self.__extract_versions(logger, gccxml_cvs_revision) self._is_gccxml = "GCC-XML" in self._xml_generator_version self._is_castxml = "CastXML" in self._xml_generator_version else: raise RuntimeError("Either castxml_format or gccxml_cvs_revision" "need to be defined!")
[ "def", "__init__", "(", "self", ",", "logger", ",", "gccxml_cvs_revision", "=", "None", ",", "castxml_format", "=", "None", ")", ":", "if", "castxml_format", "is", "not", "None", "and", "gccxml_cvs_revision", "is", "not", "None", ":", "raise", "RuntimeError", "(", "\"Setting both gccxml_cvs_revision and\"", "\"castxml_format is not allowed!\"", ")", "self", ".", "_is_castxml1", "=", "False", "self", ".", "_is_castxml", "=", "False", "self", ".", "_is_gccxml", "=", "False", "if", "castxml_format", "is", "not", "None", ":", "self", ".", "_xml_generator_version", "=", "self", ".", "__castxml", "self", ".", "_xml_output_version", "=", "castxml_format", "self", ".", "_is_castxml", "=", "True", "self", ".", "_is_castxml1", "=", "True", "elif", "gccxml_cvs_revision", "is", "not", "None", ":", "self", ".", "_xml_generator_version", ",", "self", ".", "_xml_output_version", "=", "self", ".", "__extract_versions", "(", "logger", ",", "gccxml_cvs_revision", ")", "self", ".", "_is_gccxml", "=", "\"GCC-XML\"", "in", "self", ".", "_xml_generator_version", "self", ".", "_is_castxml", "=", "\"CastXML\"", "in", "self", ".", "_xml_generator_version", "else", ":", "raise", "RuntimeError", "(", "\"Either castxml_format or gccxml_cvs_revision\"", "\"need to be defined!\"", ")" ]
[ 18, 4 ]
[ 48, 53 ]
python
en
['en', 'error', 'th']
False
xml_generators.get_string_repr
(self)
Get a string identifier for the current type of xml generator Returns: str: identifier
Get a string identifier for the current type of xml generator
def get_string_repr(self): """ Get a string identifier for the current type of xml generator Returns: str: identifier """ return \ self._xml_generator_version + \ self.__separator + \ str(self._xml_output_version)
[ "def", "get_string_repr", "(", "self", ")", ":", "return", "self", ".", "_xml_generator_version", "+", "self", ".", "__separator", "+", "str", "(", "self", ".", "_xml_output_version", ")" ]
[ 79, 4 ]
[ 89, 41 ]
python
en
['en', 'error', 'th']
False
xml_generators.is_gccxml
(self)
Is the current xml generator gccxml? Returns: bool: is gccxml being used?
Is the current xml generator gccxml?
def is_gccxml(self): """ Is the current xml generator gccxml? Returns: bool: is gccxml being used? """ return self._is_gccxml
[ "def", "is_gccxml", "(", "self", ")", ":", "return", "self", ".", "_is_gccxml" ]
[ 92, 4 ]
[ 99, 30 ]
python
en
['en', 'error', 'th']
False
xml_generators.is_castxml
(self)
Is the current xml generator castxml? Returns: bool: is castxml being used?
Is the current xml generator castxml?
def is_castxml(self): """ Is the current xml generator castxml? Returns: bool: is castxml being used? """ return self._is_castxml
[ "def", "is_castxml", "(", "self", ")", ":", "return", "self", ".", "_is_castxml" ]
[ 102, 4 ]
[ 109, 31 ]
python
en
['en', 'error', 'th']
False
xml_generators.is_castxml1
(self)
Is the current xml generator castxml (with output format version 1)? Returns: bool: is castxml (with output format version 1) being used?
Is the current xml generator castxml (with output format version 1)?
def is_castxml1(self): """ Is the current xml generator castxml (with output format version 1)? Returns: bool: is castxml (with output format version 1) being used? """ return self._is_castxml1
[ "def", "is_castxml1", "(", "self", ")", ":", "return", "self", ".", "_is_castxml1" ]
[ 112, 4 ]
[ 119, 32 ]
python
en
['en', 'error', 'th']
False
xml_generators.is_gccxml_06
(self)
Is the current xml generator gccxml (version 0.6)? Returns: bool: is gccxml 0.6 being used?
Is the current xml generator gccxml (version 0.6)?
def is_gccxml_06(self): """ Is the current xml generator gccxml (version 0.6)? Returns: bool: is gccxml 0.6 being used? """ return self._xml_generator_version == self.__gccxml_06
[ "def", "is_gccxml_06", "(", "self", ")", ":", "return", "self", ".", "_xml_generator_version", "==", "self", ".", "__gccxml_06" ]
[ 122, 4 ]
[ 129, 62 ]
python
en
['en', 'error', 'th']
False
xml_generators.is_gccxml_07
(self)
Is the current xml generator gccxml (version 0.7)? Returns: bool: is gccxml 0.7 being used?
Is the current xml generator gccxml (version 0.7)?
def is_gccxml_07(self): """ Is the current xml generator gccxml (version 0.7)? Returns: bool: is gccxml 0.7 being used? """ return self._xml_generator_version == self.__gccxml_07
[ "def", "is_gccxml_07", "(", "self", ")", ":", "return", "self", ".", "_xml_generator_version", "==", "self", ".", "__gccxml_07" ]
[ 132, 4 ]
[ 139, 62 ]
python
en
['en', 'error', 'th']
False
xml_generators.is_gccxml_09
(self)
Is the current xml generator gccxml (version 0.9)? Returns: bool: is gccxml 0.9 being used?
Is the current xml generator gccxml (version 0.9)?
def is_gccxml_09(self): """ Is the current xml generator gccxml (version 0.9)? Returns: bool: is gccxml 0.9 being used? """ return self._xml_generator_version == self.__gccxml_09
[ "def", "is_gccxml_09", "(", "self", ")", ":", "return", "self", ".", "_xml_generator_version", "==", "self", ".", "__gccxml_09" ]
[ 142, 4 ]
[ 149, 62 ]
python
en
['en', 'error', 'th']
False
xml_generators.is_gccxml_09_buggy
(self)
Is the current xml generator gccxml (version 0.9 - buggy)? Returns: bool: is gccxml 0.9 (buggy) being used?
Is the current xml generator gccxml (version 0.9 - buggy)?
def is_gccxml_09_buggy(self): """ Is the current xml generator gccxml (version 0.9 - buggy)? Returns: bool: is gccxml 0.9 (buggy) being used? """ return self._xml_generator_version == self.__gccxml_09_buggy
[ "def", "is_gccxml_09_buggy", "(", "self", ")", ":", "return", "self", ".", "_xml_generator_version", "==", "self", ".", "__gccxml_09_buggy" ]
[ 152, 4 ]
[ 159, 68 ]
python
en
['en', 'error', 'th']
False
xml_generators.xml_output_version
(self)
The current xml output version for the parsed file. Returns: str: the xml output version
The current xml output version for the parsed file.
def xml_output_version(self): """ The current xml output version for the parsed file. Returns: str: the xml output version """ return self._xml_output_version
[ "def", "xml_output_version", "(", "self", ")", ":", "return", "self", ".", "_xml_output_version" ]
[ 162, 4 ]
[ 169, 39 ]
python
en
['en', 'error', 'th']
False
apply_filters
(stream, filters, lexer=None)
Use this method to apply an iterable of filters to a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`.
Use this method to apply an iterable of filters to a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`.
def apply_filters(stream, filters, lexer=None): """ Use this method to apply an iterable of filters to a stream. If lexer is given it's forwarded to the filter, otherwise the filter receives `None`. """ def _apply(filter_, stream): for token in filter_.filter(lexer, stream): yield token for filter_ in filters: stream = _apply(filter_, stream) return stream
[ "def", "apply_filters", "(", "stream", ",", "filters", ",", "lexer", "=", "None", ")", ":", "def", "_apply", "(", "filter_", ",", "stream", ")", ":", "for", "token", "in", "filter_", ".", "filter", "(", "lexer", ",", "stream", ")", ":", "yield", "token", "for", "filter_", "in", "filters", ":", "stream", "=", "_apply", "(", "filter_", ",", "stream", ")", "return", "stream" ]
[ 12, 0 ]
[ 23, 17 ]
python
en
['en', 'error', 'th']
False
simplefilter
(f)
Decorator that converts a function into a filter:: @simplefilter def lowercase(self, lexer, stream, options): for ttype, value in stream: yield ttype, value.lower()
Decorator that converts a function into a filter::
def simplefilter(f): """ Decorator that converts a function into a filter:: @simplefilter def lowercase(self, lexer, stream, options): for ttype, value in stream: yield ttype, value.lower() """ return type(f.__name__, (FunctionFilter,), { 'function': f, '__module__': getattr(f, '__module__'), '__doc__': f.__doc__ })
[ "def", "simplefilter", "(", "f", ")", ":", "return", "type", "(", "f", ".", "__name__", ",", "(", "FunctionFilter", ",", ")", ",", "{", "'function'", ":", "f", ",", "'__module__'", ":", "getattr", "(", "f", ",", "'__module__'", ")", ",", "'__doc__'", ":", "f", ".", "__doc__", "}", ")" ]
[ 26, 0 ]
[ 39, 14 ]
python
en
['en', 'error', 'th']
False
non_numeric_low_card_dataset
(test_backend)
Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework
Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework
def non_numeric_low_card_dataset(test_backend): """Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework""" data = { "lowcardnonnum": [ "a", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", ] } schemas = { "pandas": { "lowcardnonnum": "str", }, "postgresql": { "lowcardnonnum": "TEXT", }, "sqlite": { "lowcardnonnum": "VARCHAR", }, "mysql": { "lowcardnonnum": "TEXT", }, "mssql": { "lowcardnonnum": "VARCHAR", }, "spark": { "lowcardnonnum": "StringType", }, } return get_dataset(test_backend, data, schemas=schemas)
[ "def", "non_numeric_low_card_dataset", "(", "test_backend", ")", ":", "data", "=", "{", "\"lowcardnonnum\"", ":", "[", "\"a\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "\"b\"", ",", "]", "}", "schemas", "=", "{", "\"pandas\"", ":", "{", "\"lowcardnonnum\"", ":", "\"str\"", ",", "}", ",", "\"postgresql\"", ":", "{", "\"lowcardnonnum\"", ":", "\"TEXT\"", ",", "}", ",", "\"sqlite\"", ":", "{", "\"lowcardnonnum\"", ":", "\"VARCHAR\"", ",", "}", ",", "\"mysql\"", ":", "{", "\"lowcardnonnum\"", ":", "\"TEXT\"", ",", "}", ",", "\"mssql\"", ":", "{", "\"lowcardnonnum\"", ":", "\"VARCHAR\"", ",", "}", ",", "\"spark\"", ":", "{", "\"lowcardnonnum\"", ":", "\"StringType\"", ",", "}", ",", "}", "return", "get_dataset", "(", "test_backend", ",", "data", ",", "schemas", "=", "schemas", ")" ]
[ 1367, 0 ]
[ 1500, 59 ]
python
en
['en', 'en', 'en']
True
non_numeric_high_card_dataset
(test_backend)
Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework
Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework
def non_numeric_high_card_dataset(test_backend): """Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework""" data = { "highcardnonnum": [ "CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb", "cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ", "4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7", "ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz", "AaqMhdYukVdexTk6LlWvzXYXTp5upPuf", "ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR", "F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2", "coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq", "3IzmbSJF525qtn7O4AvfKONnz7eFgnyU", "gLCtw7435gaR532PNFVCtvk14lNJpZXv", "hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R", "IqKC2auGTNehP8y24HzDQOdt9oysgFyx", "TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg", "cIfDv6ieTAobe84P84InzDKrJrccmqbq", "m1979gfI6lVF9ijJA245bchYFd1EaMap", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "7wcR161jyKYhFLEZkhFqSXLwXW46I5x8", "IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn", "hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg", "vwZyG0jGUys3HQdUiOocIbzhUdUugwKX", "rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6", "p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA", "VzgAIYNKHA0APN0oZtzMAfmbCzJenswy", "IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG", "eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp", "4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU", "ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u", "nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "CP22IFHDX1maoSjTEdtBfrMHWQKACGDB", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6", "OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT", "JQbXIcgwUhttfPIGB7VGGfL2KiElabrO", "eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57", "GW2JuUJmuCebia7RUiCNl2BTjukIzZWj", "oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC", "zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ", "eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y", "xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77", "1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01", "uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG", "agIk8H2nFa0K27IFr0VM2RNp6saihYI3", "cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N", "fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj", "HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8", "938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev", "PyZetp4izgE4ymPcUXyImF5mm7I6zbta", "FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs", "PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd", "eSQIxFqyYVf55UMzMEZrotPO74i3Sh03", "2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR", "3svDRnrELyAsC69Phpnl2Os89856tFBJ", "ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN", "m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1", "wZTwJmMX5Q58DhDdmScdigTSyUUC04sO", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs", "0S4iueoqKNjvS55O57BdY3DbfwhIDwKc", "ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF", "Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i", "pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU", "6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM", "puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB", "jOI4E43wA3lYBWbV0nMxqix885Tye1Pf", "YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7", "24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ItvI4l02oAIZEd5cPtDf4OnyBazji0PL", "DW4oLNP49MNNENFoFf7jDTI04xdvCiWg", "vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn", "bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6", "UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c", "He7xIY2BMNZ7vSO47KfKoYskVJeeedI7", "G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR", "hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF", "mlYdlfei13P6JrT7ZbSZdsudhE24aPYr", "gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4", "xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo", "kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx", "7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg", "Wkh43H7t95kRb9oOMjTSqC7163SrI4rU", "x586wCHsLsOaXl3F9cYeaROwdFc2pbU1", "oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh", "suns0vGgaMzasYpwDEEof2Ktovy0o4os", "of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC", "mmTiWVje9SotwPgmRxrGrNeI9DssAaCj", "pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54", "nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2", "prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG", "JL38Vw7yERPC4gBplBaixlbpDg8V7gC6", "MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI", "hmr0LNyYObqe5sURs408IhRb50Lnek5K", "CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb", "cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ", "4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7", "ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz", "AaqMhdYukVdexTk6LlWvzXYXTp5upPuf", "ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR", "F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2", "coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq", "3IzmbSJF525qtn7O4AvfKONnz7eFgnyU", "gLCtw7435gaR532PNFVCtvk14lNJpZXv", "hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R", "IqKC2auGTNehP8y24HzDQOdt9oysgFyx", "TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg", "cIfDv6ieTAobe84P84InzDKrJrccmqbq", "m1979gfI6lVF9ijJA245bchYFd1EaMap", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "7wcR161jyKYhFLEZkhFqSXLwXW46I5x8", "IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn", "hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg", "vwZyG0jGUys3HQdUiOocIbzhUdUugwKX", "rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6", "p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA", "VzgAIYNKHA0APN0oZtzMAfmbCzJenswy", "IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG", "eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp", "4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU", "ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u", "nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "CP22IFHDX1maoSjTEdtBfrMHWQKACGDB", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6", "OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT", "JQbXIcgwUhttfPIGB7VGGfL2KiElabrO", "eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57", "GW2JuUJmuCebia7RUiCNl2BTjukIzZWj", "oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC", "zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ", "eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y", "xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77", "1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01", "uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG", "agIk8H2nFa0K27IFr0VM2RNp6saihYI3", "cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N", "fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj", "HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8", "938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev", "PyZetp4izgE4ymPcUXyImF5mm7I6zbta", "FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs", "PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd", "eSQIxFqyYVf55UMzMEZrotPO74i3Sh03", "2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR", "3svDRnrELyAsC69Phpnl2Os89856tFBJ", "ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN", "m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1", "wZTwJmMX5Q58DhDdmScdigTSyUUC04sO", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs", "0S4iueoqKNjvS55O57BdY3DbfwhIDwKc", "ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF", "Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i", "pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU", "6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM", "puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB", "jOI4E43wA3lYBWbV0nMxqix885Tye1Pf", "YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7", "24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ItvI4l02oAIZEd5cPtDf4OnyBazji0PL", "DW4oLNP49MNNENFoFf7jDTI04xdvCiWg", "vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn", "bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6", "UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c", "He7xIY2BMNZ7vSO47KfKoYskVJeeedI7", "G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR", "hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF", "mlYdlfei13P6JrT7ZbSZdsudhE24aPYr", "gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4", "xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo", "kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx", "7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg", "Wkh43H7t95kRb9oOMjTSqC7163SrI4rU", "x586wCHsLsOaXl3F9cYeaROwdFc2pbU1", "oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh", "suns0vGgaMzasYpwDEEof2Ktovy0o4os", "of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC", "mmTiWVje9SotwPgmRxrGrNeI9DssAaCj", "pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54", "nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2", "prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG", "JL38Vw7yERPC4gBplBaixlbpDg8V7gC6", "MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI", "hmr0LNyYObqe5sURs408IhRb50Lnek5K", ], # Built from highcardnonnum using the following: # vals = pd.Series(data["highcardnonnum"]) # sample_vals = vals.sample(n=10, random_state=42) # weights = np.random.RandomState(42).rand(10) # weights = weights / np.sum(weights) # new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11) "medcardnonnum": [ "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", ], } schemas = { "pandas": { "highcardnonnum": "str", "medcardnonnum": "str", }, "postgresql": { "highcardnonnum": "TEXT", "medcardnonnum": "TEXT", }, "sqlite": { "highcardnonnum": "VARCHAR", "medcardnonnum": "VARCHAR", }, "mysql": { "highcardnonnum": "TEXT", "medcardnonnum": "TEXT", }, "mssql": { "highcardnonnum": "VARCHAR", "medcardnonnum": "VARCHAR", }, "spark": { "highcardnonnum": "StringType", "medcardnonnum": "StringType", }, } return get_dataset(test_backend, data, schemas=schemas)
[ "def", "non_numeric_high_card_dataset", "(", "test_backend", ")", ":", "data", "=", "{", "\"highcardnonnum\"", ":", "[", "\"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\"", ",", "\"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\"", ",", "\"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\"", ",", "\"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\"", ",", "\"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\"", ",", "\"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\"", ",", "\"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\"", ",", "\"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\"", ",", "\"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\"", ",", "\"gLCtw7435gaR532PNFVCtvk14lNJpZXv\"", ",", "\"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\"", ",", "\"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\"", ",", "\"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\"", ",", "\"cIfDv6ieTAobe84P84InzDKrJrccmqbq\"", ",", "\"m1979gfI6lVF9ijJA245bchYFd1EaMap\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\"", ",", "\"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\"", ",", "\"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\"", ",", "\"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\"", ",", "\"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\"", ",", "\"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\"", ",", "\"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\"", ",", "\"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\"", ",", "\"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\"", ",", "\"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\"", ",", "\"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\"", ",", "\"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\"", ",", "\"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\"", ",", "\"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\"", ",", "\"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\"", ",", "\"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\"", ",", "\"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\"", ",", "\"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\"", ",", "\"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\"", ",", "\"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\"", ",", "\"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\"", ",", "\"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\"", ",", "\"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\"", ",", "\"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\"", ",", "\"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\"", ",", "\"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\"", ",", "\"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\"", ",", "\"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\"", ",", "\"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\"", ",", "\"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\"", ",", "\"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\"", ",", "\"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\"", ",", "\"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\"", ",", "\"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\"", ",", "\"3svDRnrELyAsC69Phpnl2Os89856tFBJ\"", ",", "\"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\"", ",", "\"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\"", ",", "\"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\"", ",", "\"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\"", ",", "\"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\"", ",", "\"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\"", ",", "\"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\"", ",", "\"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\"", ",", "\"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\"", ",", "\"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\"", ",", "\"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\"", ",", "\"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\"", ",", "\"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\"", ",", "\"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\"", ",", "\"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\"", ",", "\"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\"", ",", "\"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\"", ",", "\"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\"", ",", "\"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\"", ",", "\"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\"", ",", "\"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\"", ",", "\"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\"", ",", "\"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\"", ",", "\"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\"", ",", "\"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\"", ",", "\"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\"", ",", "\"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\"", ",", "\"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\"", ",", "\"suns0vGgaMzasYpwDEEof2Ktovy0o4os\"", ",", "\"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\"", ",", "\"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\"", ",", "\"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\"", ",", "\"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\"", ",", "\"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\"", ",", "\"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\"", ",", "\"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\"", ",", "\"hmr0LNyYObqe5sURs408IhRb50Lnek5K\"", ",", "\"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\"", ",", "\"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\"", ",", "\"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\"", ",", "\"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\"", ",", "\"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\"", ",", "\"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\"", ",", "\"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\"", ",", "\"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\"", ",", "\"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\"", ",", "\"gLCtw7435gaR532PNFVCtvk14lNJpZXv\"", ",", "\"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\"", ",", "\"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\"", ",", "\"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\"", ",", "\"cIfDv6ieTAobe84P84InzDKrJrccmqbq\"", ",", "\"m1979gfI6lVF9ijJA245bchYFd1EaMap\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\"", ",", "\"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\"", ",", "\"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\"", ",", "\"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\"", ",", "\"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\"", ",", "\"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\"", ",", "\"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\"", ",", "\"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\"", ",", "\"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\"", ",", "\"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\"", ",", "\"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\"", ",", "\"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\"", ",", "\"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\"", ",", "\"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\"", ",", "\"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\"", ",", "\"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\"", ",", "\"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\"", ",", "\"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\"", ",", "\"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\"", ",", "\"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\"", ",", "\"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\"", ",", "\"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\"", ",", "\"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\"", ",", "\"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\"", ",", "\"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\"", ",", "\"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\"", ",", "\"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\"", ",", "\"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\"", ",", "\"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\"", ",", "\"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\"", ",", "\"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\"", ",", "\"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\"", ",", "\"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\"", ",", "\"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\"", ",", "\"3svDRnrELyAsC69Phpnl2Os89856tFBJ\"", ",", "\"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\"", ",", "\"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\"", ",", "\"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\"", ",", "\"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\"", ",", "\"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\"", ",", "\"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\"", ",", "\"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\"", ",", "\"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\"", ",", "\"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\"", ",", "\"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\"", ",", "\"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\"", ",", "\"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\"", ",", "\"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\"", ",", "\"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\"", ",", "\"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\"", ",", "\"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\"", ",", "\"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\"", ",", "\"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\"", ",", "\"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\"", ",", "\"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\"", ",", "\"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\"", ",", "\"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\"", ",", "\"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\"", ",", "\"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\"", ",", "\"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\"", ",", "\"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\"", ",", "\"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\"", ",", "\"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\"", ",", "\"suns0vGgaMzasYpwDEEof2Ktovy0o4os\"", ",", "\"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\"", ",", "\"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\"", ",", "\"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\"", ",", "\"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\"", ",", "\"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\"", ",", "\"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\"", ",", "\"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\"", ",", "\"hmr0LNyYObqe5sURs408IhRb50Lnek5K\"", ",", "]", ",", "# Built from highcardnonnum using the following:", "# vals = pd.Series(data[\"highcardnonnum\"])", "# sample_vals = vals.sample(n=10, random_state=42)", "# weights = np.random.RandomState(42).rand(10)", "# weights = weights / np.sum(weights)", "# new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11)", "\"medcardnonnum\"", ":", "[", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\"", ",", "\"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\"", ",", "\"NhTsracusfp5V6zVeWqLZnychDl7jjO4\"", ",", "\"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\"", ",", "\"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\"", ",", "\"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\"", ",", "]", ",", "}", "schemas", "=", "{", "\"pandas\"", ":", "{", "\"highcardnonnum\"", ":", "\"str\"", ",", "\"medcardnonnum\"", ":", "\"str\"", ",", "}", ",", "\"postgresql\"", ":", "{", "\"highcardnonnum\"", ":", "\"TEXT\"", ",", "\"medcardnonnum\"", ":", "\"TEXT\"", ",", "}", ",", "\"sqlite\"", ":", "{", "\"highcardnonnum\"", ":", "\"VARCHAR\"", ",", "\"medcardnonnum\"", ":", "\"VARCHAR\"", ",", "}", ",", "\"mysql\"", ":", "{", "\"highcardnonnum\"", ":", "\"TEXT\"", ",", "\"medcardnonnum\"", ":", "\"TEXT\"", ",", "}", ",", "\"mssql\"", ":", "{", "\"highcardnonnum\"", ":", "\"VARCHAR\"", ",", "\"medcardnonnum\"", ":", "\"VARCHAR\"", ",", "}", ",", "\"spark\"", ":", "{", "\"highcardnonnum\"", ":", "\"StringType\"", ",", "\"medcardnonnum\"", ":", "\"StringType\"", ",", "}", ",", "}", "return", "get_dataset", "(", "test_backend", ",", "data", ",", "schemas", "=", "schemas", ")" ]
[ 1504, 0 ]
[ 1946, 59 ]
python
en
['en', 'en', 'en']
True
dataset
(test_backend)
Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework
Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework
def dataset(test_backend): """Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework""" data, schemas = dataset_sample_data(test_backend) return get_dataset(test_backend, data, schemas=schemas)
[ "def", "dataset", "(", "test_backend", ")", ":", "data", ",", "schemas", "=", "dataset_sample_data", "(", "test_backend", ")", "return", "get_dataset", "(", "test_backend", ",", "data", ",", "schemas", "=", "schemas", ")" ]
[ 2108, 0 ]
[ 2112, 59 ]
python
en
['en', 'en', 'en']
True