Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
select_datasource
( context: DataContext, datasource_name: str = None )
Select a datasource interactively.
Select a datasource interactively.
def select_datasource( context: DataContext, datasource_name: str = None ) -> BaseDatasource: """Select a datasource interactively.""" # TODO consolidate all the myriad CLI tests into this data_source: Optional[BaseDatasource] = None if datasource_name is None: data_sources: List[BaseDatasource] = cast( List[BaseDatasource], list( sorted(context.datasources.values(), key=lambda x: x.name), ), ) if len(data_sources) == 0: cli_message( string="<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>" ) elif len(data_sources) == 1: datasource_name = data_sources[0].name else: choices: str = "\n".join( [ " {}. {}".format(i, data_source.name) for i, data_source in enumerate(data_sources, 1) ] ) option_selection: str = click.prompt( "Select a datasource" + "\n" + choices + "\n", type=click.Choice( [str(i) for i, data_source in enumerate(data_sources, 1)] ), show_choices=False, ) datasource_name = data_sources[int(option_selection) - 1].name if datasource_name is not None: data_source = context.get_datasource(datasource_name=datasource_name) return data_source
[ "def", "select_datasource", "(", "context", ":", "DataContext", ",", "datasource_name", ":", "str", "=", "None", ")", "->", "BaseDatasource", ":", "# TODO consolidate all the myriad CLI tests into this", "data_source", ":", "Optional", "[", "BaseDatasource", "]", "=", "None", "if", "datasource_name", "is", "None", ":", "data_sources", ":", "List", "[", "BaseDatasource", "]", "=", "cast", "(", "List", "[", "BaseDatasource", "]", ",", "list", "(", "sorted", "(", "context", ".", "datasources", ".", "values", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", ".", "name", ")", ",", ")", ",", ")", "if", "len", "(", "data_sources", ")", "==", "0", ":", "cli_message", "(", "string", "=", "\"<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>\"", ")", "elif", "len", "(", "data_sources", ")", "==", "1", ":", "datasource_name", "=", "data_sources", "[", "0", "]", ".", "name", "else", ":", "choices", ":", "str", "=", "\"\\n\"", ".", "join", "(", "[", "\" {}. {}\"", ".", "format", "(", "i", ",", "data_source", ".", "name", ")", "for", "i", ",", "data_source", "in", "enumerate", "(", "data_sources", ",", "1", ")", "]", ")", "option_selection", ":", "str", "=", "click", ".", "prompt", "(", "\"Select a datasource\"", "+", "\"\\n\"", "+", "choices", "+", "\"\\n\"", ",", "type", "=", "click", ".", "Choice", "(", "[", "str", "(", "i", ")", "for", "i", ",", "data_source", "in", "enumerate", "(", "data_sources", ",", "1", ")", "]", ")", ",", "show_choices", "=", "False", ",", ")", "datasource_name", "=", "data_sources", "[", "int", "(", "option_selection", ")", "-", "1", "]", ".", "name", "if", "datasource_name", "is", "not", "None", ":", "data_source", "=", "context", ".", "get_datasource", "(", "datasource_name", "=", "datasource_name", ")", "return", "data_source" ]
[ 365, 0 ]
[ 404, 22 ]
python
en
['en', 'en', 'en']
True
load_data_context_with_error_handling
( directory: str, from_cli_upgrade_command: bool = False )
Return a DataContext with good error handling and exit codes.
Return a DataContext with good error handling and exit codes.
def load_data_context_with_error_handling( directory: str, from_cli_upgrade_command: bool = False ) -> DataContext: """Return a DataContext with good error handling and exit codes.""" try: context: DataContext = DataContext(context_root_dir=directory) if from_cli_upgrade_command: try: send_usage_message( data_context=context, event="cli.project.upgrade.begin", success=True, ) except Exception: # Don't fail for usage stats pass ge_config_version: int = context.get_config().config_version if ( from_cli_upgrade_command and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION ): directory = directory or context.root_directory ( increment_version, exception_occurred, ) = upgrade_project_one_version_increment( context_root_dir=directory, ge_config_version=ge_config_version, continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE, from_cli_upgrade_command=from_cli_upgrade_command, ) if not exception_occurred and increment_version: context = DataContext(context_root_dir=directory) if from_cli_upgrade_command: send_usage_message( data_context=context, event="cli.project.upgrade.end", success=True, ) return context except ge_exceptions.UnsupportedConfigVersionError as err: directory = directory or DataContext.find_context_root_dir() ge_config_version = DataContext.get_ge_config_version( context_root_dir=directory ) upgrade_helper_class = ( GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version)) if ge_config_version else None ) if upgrade_helper_class and ge_config_version < CURRENT_GE_CONFIG_VERSION: upgrade_project( context_root_dir=directory, ge_config_version=ge_config_version, from_cli_upgrade_command=from_cli_upgrade_command, ) else: cli_message(string="<red>{}</red>".format(err.message)) sys.exit(1) except ( ge_exceptions.ConfigNotFoundError, ge_exceptions.InvalidConfigError, ) as err: cli_message(string="<red>{}</red>".format(err.message)) sys.exit(1) except ge_exceptions.PluginModuleNotFoundError as err: cli_message(string=err.cli_colored_message) sys.exit(1) except ge_exceptions.PluginClassNotFoundError as err: cli_message(string=err.cli_colored_message) sys.exit(1) except ge_exceptions.InvalidConfigurationYamlError as err: cli_message(string=f"<red>{str(err)}</red>") sys.exit(1)
[ "def", "load_data_context_with_error_handling", "(", "directory", ":", "str", ",", "from_cli_upgrade_command", ":", "bool", "=", "False", ")", "->", "DataContext", ":", "try", ":", "context", ":", "DataContext", "=", "DataContext", "(", "context_root_dir", "=", "directory", ")", "if", "from_cli_upgrade_command", ":", "try", ":", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.project.upgrade.begin\"", ",", "success", "=", "True", ",", ")", "except", "Exception", ":", "# Don't fail for usage stats", "pass", "ge_config_version", ":", "int", "=", "context", ".", "get_config", "(", ")", ".", "config_version", "if", "(", "from_cli_upgrade_command", "and", "int", "(", "ge_config_version", ")", "<", "CURRENT_GE_CONFIG_VERSION", ")", ":", "directory", "=", "directory", "or", "context", ".", "root_directory", "(", "increment_version", ",", "exception_occurred", ",", ")", "=", "upgrade_project_one_version_increment", "(", "context_root_dir", "=", "directory", ",", "ge_config_version", "=", "ge_config_version", ",", "continuation_message", "=", "EXIT_UPGRADE_CONTINUATION_MESSAGE", ",", "from_cli_upgrade_command", "=", "from_cli_upgrade_command", ",", ")", "if", "not", "exception_occurred", "and", "increment_version", ":", "context", "=", "DataContext", "(", "context_root_dir", "=", "directory", ")", "if", "from_cli_upgrade_command", ":", "send_usage_message", "(", "data_context", "=", "context", ",", "event", "=", "\"cli.project.upgrade.end\"", ",", "success", "=", "True", ",", ")", "return", "context", "except", "ge_exceptions", ".", "UnsupportedConfigVersionError", "as", "err", ":", "directory", "=", "directory", "or", "DataContext", ".", "find_context_root_dir", "(", ")", "ge_config_version", "=", "DataContext", ".", "get_ge_config_version", "(", "context_root_dir", "=", "directory", ")", "upgrade_helper_class", "=", "(", "GE_UPGRADE_HELPER_VERSION_MAP", ".", "get", "(", "int", "(", "ge_config_version", ")", ")", "if", "ge_config_version", "else", "None", ")", "if", "upgrade_helper_class", "and", "ge_config_version", "<", "CURRENT_GE_CONFIG_VERSION", ":", "upgrade_project", "(", "context_root_dir", "=", "directory", ",", "ge_config_version", "=", "ge_config_version", ",", "from_cli_upgrade_command", "=", "from_cli_upgrade_command", ",", ")", "else", ":", "cli_message", "(", "string", "=", "\"<red>{}</red>\"", ".", "format", "(", "err", ".", "message", ")", ")", "sys", ".", "exit", "(", "1", ")", "except", "(", "ge_exceptions", ".", "ConfigNotFoundError", ",", "ge_exceptions", ".", "InvalidConfigError", ",", ")", "as", "err", ":", "cli_message", "(", "string", "=", "\"<red>{}</red>\"", ".", "format", "(", "err", ".", "message", ")", ")", "sys", ".", "exit", "(", "1", ")", "except", "ge_exceptions", ".", "PluginModuleNotFoundError", "as", "err", ":", "cli_message", "(", "string", "=", "err", ".", "cli_colored_message", ")", "sys", ".", "exit", "(", "1", ")", "except", "ge_exceptions", ".", "PluginClassNotFoundError", "as", "err", ":", "cli_message", "(", "string", "=", "err", ".", "cli_colored_message", ")", "sys", ".", "exit", "(", "1", ")", "except", "ge_exceptions", ".", "InvalidConfigurationYamlError", "as", "err", ":", "cli_message", "(", "string", "=", "f\"<red>{str(err)}</red>\"", ")", "sys", ".", "exit", "(", "1", ")" ]
[ 407, 0 ]
[ 480, 19 ]
python
en
['en', 'en', 'en']
True
confirm_proceed_or_exit
( confirm_prompt: str = "Would you like to proceed?", continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !", exit_on_no: bool = True, exit_code: int = 0, data_context: Optional[DataContext] = None, usage_stats_event: Optional[str] = None, )
Every CLI command that starts a potentially lengthy (>1 sec) computation or modifies some resources (e.g., edits the config file, adds objects to the stores) must follow this pattern: 1. Explain which resources will be created/modified/deleted 2. Use this method to ask for user's confirmation The goal of this standardization is for the users to expect consistency - if you saw one command, you know what to expect from all others. If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide the option to perform cleanup actions before exiting outside of the function.
Every CLI command that starts a potentially lengthy (>1 sec) computation or modifies some resources (e.g., edits the config file, adds objects to the stores) must follow this pattern: 1. Explain which resources will be created/modified/deleted 2. Use this method to ask for user's confirmation
def confirm_proceed_or_exit( confirm_prompt: str = "Would you like to proceed?", continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !", exit_on_no: bool = True, exit_code: int = 0, data_context: Optional[DataContext] = None, usage_stats_event: Optional[str] = None, ) -> Optional[bool]: """ Every CLI command that starts a potentially lengthy (>1 sec) computation or modifies some resources (e.g., edits the config file, adds objects to the stores) must follow this pattern: 1. Explain which resources will be created/modified/deleted 2. Use this method to ask for user's confirmation The goal of this standardization is for the users to expect consistency - if you saw one command, you know what to expect from all others. If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide the option to perform cleanup actions before exiting outside of the function. """ confirm_prompt_colorized = cli_colorize_string(confirm_prompt) continuation_message_colorized = cli_colorize_string(continuation_message) if not click.confirm(confirm_prompt_colorized, default=True): if exit_on_no: cli_message(string=continuation_message_colorized) cli_message(string=continuation_message_colorized) if (usage_stats_event is not None) and (data_context is not None): # noinspection PyBroadException try: send_usage_message( data_context=data_context, event=usage_stats_event, event_payload={"cancelled": True}, success=True, ) except Exception: # Don't fail on usage stats pass sys.exit(exit_code) else: return False return True
[ "def", "confirm_proceed_or_exit", "(", "confirm_prompt", ":", "str", "=", "\"Would you like to proceed?\"", ",", "continuation_message", ":", "str", "=", "\"Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !\"", ",", "exit_on_no", ":", "bool", "=", "True", ",", "exit_code", ":", "int", "=", "0", ",", "data_context", ":", "Optional", "[", "DataContext", "]", "=", "None", ",", "usage_stats_event", ":", "Optional", "[", "str", "]", "=", "None", ",", ")", "->", "Optional", "[", "bool", "]", ":", "confirm_prompt_colorized", "=", "cli_colorize_string", "(", "confirm_prompt", ")", "continuation_message_colorized", "=", "cli_colorize_string", "(", "continuation_message", ")", "if", "not", "click", ".", "confirm", "(", "confirm_prompt_colorized", ",", "default", "=", "True", ")", ":", "if", "exit_on_no", ":", "cli_message", "(", "string", "=", "continuation_message_colorized", ")", "cli_message", "(", "string", "=", "continuation_message_colorized", ")", "if", "(", "usage_stats_event", "is", "not", "None", ")", "and", "(", "data_context", "is", "not", "None", ")", ":", "# noinspection PyBroadException", "try", ":", "send_usage_message", "(", "data_context", "=", "data_context", ",", "event", "=", "usage_stats_event", ",", "event_payload", "=", "{", "\"cancelled\"", ":", "True", "}", ",", "success", "=", "True", ",", ")", "except", "Exception", ":", "# Don't fail on usage stats", "pass", "sys", ".", "exit", "(", "exit_code", ")", "else", ":", "return", "False", "return", "True" ]
[ 633, 0 ]
[ 675, 15 ]
python
en
['en', 'error', 'th']
False
parse_cli_config_file_location
(config_file_location: str)
Parse CLI yaml config file or directory location into directory and filename. Uses pathlib to handle windows paths. Args: config_file_location: string of config_file_location Returns: { "directory": "directory/where/config/file/is/located", "filename": "great_expectations.yml" }
Parse CLI yaml config file or directory location into directory and filename. Uses pathlib to handle windows paths. Args: config_file_location: string of config_file_location
def parse_cli_config_file_location(config_file_location: str) -> dict: """ Parse CLI yaml config file or directory location into directory and filename. Uses pathlib to handle windows paths. Args: config_file_location: string of config_file_location Returns: { "directory": "directory/where/config/file/is/located", "filename": "great_expectations.yml" } """ if config_file_location is not None and config_file_location != "": config_file_location_path = Path(config_file_location) # If the file or directory exists, treat it appropriately # This handles files without extensions if config_file_location_path.is_file(): filename: Optional[str] = fr"{str(config_file_location_path.name)}" directory: Optional[str] = fr"{str(config_file_location_path.parent)}" elif config_file_location_path.is_dir(): filename: Optional[str] = None directory: Optional[str] = config_file_location else: raise ge_exceptions.ConfigNotFoundError() else: # Return None if config_file_location is empty rather than default output of "" directory = None filename = None return {"directory": directory, "filename": filename}
[ "def", "parse_cli_config_file_location", "(", "config_file_location", ":", "str", ")", "->", "dict", ":", "if", "config_file_location", "is", "not", "None", "and", "config_file_location", "!=", "\"\"", ":", "config_file_location_path", "=", "Path", "(", "config_file_location", ")", "# If the file or directory exists, treat it appropriately", "# This handles files without extensions", "if", "config_file_location_path", ".", "is_file", "(", ")", ":", "filename", ":", "Optional", "[", "str", "]", "=", "fr\"{str(config_file_location_path.name)}\"", "directory", ":", "Optional", "[", "str", "]", "=", "fr\"{str(config_file_location_path.parent)}\"", "elif", "config_file_location_path", ".", "is_dir", "(", ")", ":", "filename", ":", "Optional", "[", "str", "]", "=", "None", "directory", ":", "Optional", "[", "str", "]", "=", "config_file_location", "else", ":", "raise", "ge_exceptions", ".", "ConfigNotFoundError", "(", ")", "else", ":", "# Return None if config_file_location is empty rather than default output of \"\"", "directory", "=", "None", "filename", "=", "None", "return", "{", "\"directory\"", ":", "directory", ",", "\"filename\"", ":", "filename", "}" ]
[ 678, 0 ]
[ 713, 57 ]
python
en
['en', 'error', 'th']
False
is_cloud_file_url
(file_path: str)
Check for commonly used cloud urls.
Check for commonly used cloud urls.
def is_cloud_file_url(file_path: str) -> bool: """Check for commonly used cloud urls.""" sanitized = file_path.strip() if sanitized[0:7] == "file://": return False if ( sanitized[0:5] in ["s3://", "gs://"] or sanitized[0:6] == "ftp://" or sanitized[0:7] in ["http://", "wasb://"] or sanitized[0:8] == "https://" ): return True return False
[ "def", "is_cloud_file_url", "(", "file_path", ":", "str", ")", "->", "bool", ":", "sanitized", "=", "file_path", ".", "strip", "(", ")", "if", "sanitized", "[", "0", ":", "7", "]", "==", "\"file://\"", ":", "return", "False", "if", "(", "sanitized", "[", "0", ":", "5", "]", "in", "[", "\"s3://\"", ",", "\"gs://\"", "]", "or", "sanitized", "[", "0", ":", "6", "]", "==", "\"ftp://\"", "or", "sanitized", "[", "0", ":", "7", "]", "in", "[", "\"http://\"", ",", "\"wasb://\"", "]", "or", "sanitized", "[", "0", ":", "8", "]", "==", "\"https://\"", ")", ":", "return", "True", "return", "False" ]
[ 734, 0 ]
[ 746, 16 ]
python
en
['en', 'en', 'en']
True
get_relative_path_from_config_file_to_base_path
( context_root_directory: str, data_path: str )
This function determines the relative path from a given data path relative to the great_expectations.yml file independent of the current working directory. This allows a user to use the CLI from any directory, type a relative path from their current working directory and have the correct relative path be put in the great_expectations.yml file.
This function determines the relative path from a given data path relative to the great_expectations.yml file independent of the current working directory.
def get_relative_path_from_config_file_to_base_path( context_root_directory: str, data_path: str ) -> str: """ This function determines the relative path from a given data path relative to the great_expectations.yml file independent of the current working directory. This allows a user to use the CLI from any directory, type a relative path from their current working directory and have the correct relative path be put in the great_expectations.yml file. """ data_from_working_dir = os.path.relpath(data_path) context_dir_from_working_dir = os.path.relpath(context_root_directory) return os.path.relpath(data_from_working_dir, context_dir_from_working_dir)
[ "def", "get_relative_path_from_config_file_to_base_path", "(", "context_root_directory", ":", "str", ",", "data_path", ":", "str", ")", "->", "str", ":", "data_from_working_dir", "=", "os", ".", "path", ".", "relpath", "(", "data_path", ")", "context_dir_from_working_dir", "=", "os", ".", "path", ".", "relpath", "(", "context_root_directory", ")", "return", "os", ".", "path", ".", "relpath", "(", "data_from_working_dir", ",", "context_dir_from_working_dir", ")" ]
[ 749, 0 ]
[ 763, 79 ]
python
en
['en', 'error', 'th']
False
ValidationAction.run
( self, validation_result_suite, validation_result_suite_identifier, data_asset, **kwargs, )
:param validation_result_suite: :param validation_result_suite_identifier: :param data_asset: :param: kwargs - any additional arguments the child might use :return:
def run( self, validation_result_suite, validation_result_suite_identifier, data_asset, **kwargs, ): """ :param validation_result_suite: :param validation_result_suite_identifier: :param data_asset: :param: kwargs - any additional arguments the child might use :return: """ return self._run( validation_result_suite, validation_result_suite_identifier, data_asset, **kwargs, )
[ "def", "run", "(", "self", ",", "validation_result_suite", ",", "validation_result_suite_identifier", ",", "data_asset", ",", "*", "*", "kwargs", ",", ")", ":", "return", "self", ".", "_run", "(", "validation_result_suite", ",", "validation_result_suite_identifier", ",", "data_asset", ",", "*", "*", "kwargs", ",", ")" ]
[ 42, 4 ]
[ 62, 9 ]
python
en
['en', 'error', 'th']
False
SlackNotificationAction.__init__
( self, data_context, renderer, slack_webhook, notify_on="all", notify_with=None, )
Construct a SlackNotificationAction Args: data_context: renderer: dictionary specifying the renderer used to generate a query consumable by Slack API, for example: { "module_name": "great_expectations.render.renderer.slack_renderer", "class_name": "SlackRenderer", } slack_webhook: incoming Slack webhook to which to send notification notify_on: "all", "failure", "success" - specifies validation status that will trigger notification payload: *Optional* payload from other ValidationActions
Construct a SlackNotificationAction
def __init__( self, data_context, renderer, slack_webhook, notify_on="all", notify_with=None, ): """Construct a SlackNotificationAction Args: data_context: renderer: dictionary specifying the renderer used to generate a query consumable by Slack API, for example: { "module_name": "great_expectations.render.renderer.slack_renderer", "class_name": "SlackRenderer", } slack_webhook: incoming Slack webhook to which to send notification notify_on: "all", "failure", "success" - specifies validation status that will trigger notification payload: *Optional* payload from other ValidationActions """ super().__init__(data_context) self.renderer = instantiate_class_from_config( config=renderer, runtime_environment={}, config_defaults={}, ) module_name = renderer["module_name"] if not self.renderer: raise ClassInstantiationError( module_name=module_name, package_name=None, class_name=renderer["class_name"], ) self.slack_webhook = slack_webhook assert slack_webhook, "No Slack webhook found in action config." self.notify_on = notify_on self.notify_with = notify_with
[ "def", "__init__", "(", "self", ",", "data_context", ",", "renderer", ",", "slack_webhook", ",", "notify_on", "=", "\"all\"", ",", "notify_with", "=", "None", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "self", ".", "renderer", "=", "instantiate_class_from_config", "(", "config", "=", "renderer", ",", "runtime_environment", "=", "{", "}", ",", "config_defaults", "=", "{", "}", ",", ")", "module_name", "=", "renderer", "[", "\"module_name\"", "]", "if", "not", "self", ".", "renderer", ":", "raise", "ClassInstantiationError", "(", "module_name", "=", "module_name", ",", "package_name", "=", "None", ",", "class_name", "=", "renderer", "[", "\"class_name\"", "]", ",", ")", "self", ".", "slack_webhook", "=", "slack_webhook", "assert", "slack_webhook", ",", "\"No Slack webhook found in action config.\"", "self", ".", "notify_on", "=", "notify_on", "self", ".", "notify_with", "=", "notify_with" ]
[ 108, 4 ]
[ 145, 38 ]
python
en
['en', 'ca', 'en']
True
PagerdutyAlertAction.__init__
( self, data_context, api_key, routing_key, notify_on="failure", )
Construct a PagerdutyAlertAction Args: data_context: api_key: Events API v2 key for pagerduty. routing_key: The 32 character Integration Key for an integration on a service or on a global ruleset. notify_on: "all", "failure", "success" - specifies validation status that will trigger notification
Construct a PagerdutyAlertAction
def __init__( self, data_context, api_key, routing_key, notify_on="failure", ): """Construct a PagerdutyAlertAction Args: data_context: api_key: Events API v2 key for pagerduty. routing_key: The 32 character Integration Key for an integration on a service or on a global ruleset. notify_on: "all", "failure", "success" - specifies validation status that will trigger notification """ super().__init__(data_context) if not pypd: raise DataContextError("ModuleNotFoundError: No module named 'pypd'") self.api_key = api_key assert api_key, "No Pagerduty api_key found in action config." self.routing_key = routing_key assert routing_key, "No Pagerduty routing_key found in action config." self.notify_on = notify_on
[ "def", "__init__", "(", "self", ",", "data_context", ",", "api_key", ",", "routing_key", ",", "notify_on", "=", "\"failure\"", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "if", "not", "pypd", ":", "raise", "DataContextError", "(", "\"ModuleNotFoundError: No module named 'pypd'\"", ")", "self", ".", "api_key", "=", "api_key", "assert", "api_key", ",", "\"No Pagerduty api_key found in action config.\"", "self", ".", "routing_key", "=", "routing_key", "assert", "routing_key", ",", "\"No Pagerduty routing_key found in action config.\"", "self", ".", "notify_on", "=", "notify_on" ]
[ 215, 4 ]
[ 237, 34 ]
python
en
['en', 'lb', 'it']
False
MicrosoftTeamsNotificationAction.__init__
( self, data_context, renderer, microsoft_teams_webhook, notify_on="all", )
Construct a MicrosoftTeamsNotificationAction Args: data_context: renderer: dictionary specifying the renderer used to generate a query consumable by teams API, for example: { "module_name": "great_expectations.render.renderer.microsoft_teams_renderer", "class_name": "MicrosoftTeamsRenderer", } microsoft_teams_webhook: incoming Microsoft Teams webhook to which to send notifications notify_on: "all", "failure", "success" - specifies validation status that will trigger notification payload: *Optional* payload from other ValidationActions
Construct a MicrosoftTeamsNotificationAction
def __init__( self, data_context, renderer, microsoft_teams_webhook, notify_on="all", ): """Construct a MicrosoftTeamsNotificationAction Args: data_context: renderer: dictionary specifying the renderer used to generate a query consumable by teams API, for example: { "module_name": "great_expectations.render.renderer.microsoft_teams_renderer", "class_name": "MicrosoftTeamsRenderer", } microsoft_teams_webhook: incoming Microsoft Teams webhook to which to send notifications notify_on: "all", "failure", "success" - specifies validation status that will trigger notification payload: *Optional* payload from other ValidationActions """ super().__init__(data_context) self.renderer = instantiate_class_from_config( config=renderer, runtime_environment={}, config_defaults={}, ) module_name = renderer["module_name"] if not self.renderer: raise ClassInstantiationError( module_name=module_name, package_name=None, class_name=renderer["class_name"], ) self.teams_webhook = microsoft_teams_webhook assert ( microsoft_teams_webhook ), "No Microsoft teams webhook found in action config." self.notify_on = notify_on
[ "def", "__init__", "(", "self", ",", "data_context", ",", "renderer", ",", "microsoft_teams_webhook", ",", "notify_on", "=", "\"all\"", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "self", ".", "renderer", "=", "instantiate_class_from_config", "(", "config", "=", "renderer", ",", "runtime_environment", "=", "{", "}", ",", "config_defaults", "=", "{", "}", ",", ")", "module_name", "=", "renderer", "[", "\"module_name\"", "]", "if", "not", "self", ".", "renderer", ":", "raise", "ClassInstantiationError", "(", "module_name", "=", "module_name", ",", "package_name", "=", "None", ",", "class_name", "=", "renderer", "[", "\"class_name\"", "]", ",", ")", "self", ".", "teams_webhook", "=", "microsoft_teams_webhook", "assert", "(", "microsoft_teams_webhook", ")", ",", "\"No Microsoft teams webhook found in action config.\"", "self", ".", "notify_on", "=", "notify_on" ]
[ 314, 4 ]
[ 351, 34 ]
python
en
['en', 'en', 'en']
True
OpsgenieAlertAction.__init__
( self, data_context, renderer, api_key, region=None, priority="P3", notify_on="failure", )
Construct a OpsgenieAlertAction Args: data_context: api_key: Opsgenie API key region: specifies the Opsgenie region. Populate 'EU' for Europe otherwise do not set priority: specify the priority of the alert (P1 - P5) defaults to P3 notify_on: "all", "failure", "success" - specifies validation status that will trigger notification
Construct a OpsgenieAlertAction
def __init__( self, data_context, renderer, api_key, region=None, priority="P3", notify_on="failure", ): """Construct a OpsgenieAlertAction Args: data_context: api_key: Opsgenie API key region: specifies the Opsgenie region. Populate 'EU' for Europe otherwise do not set priority: specify the priority of the alert (P1 - P5) defaults to P3 notify_on: "all", "failure", "success" - specifies validation status that will trigger notification """ super().__init__(data_context) self.renderer = instantiate_class_from_config( config=renderer, runtime_environment={}, config_defaults={}, ) module_name = renderer["module_name"] if not self.renderer: raise ClassInstantiationError( module_name=module_name, package_name=None, class_name=renderer["class_name"], ) self.api_key = api_key assert api_key, "opsgenie_api_key missing in config_variables.yml" self.region = region self.priority = priority self.notify_on = notify_on
[ "def", "__init__", "(", "self", ",", "data_context", ",", "renderer", ",", "api_key", ",", "region", "=", "None", ",", "priority", "=", "\"P3\"", ",", "notify_on", "=", "\"failure\"", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "self", ".", "renderer", "=", "instantiate_class_from_config", "(", "config", "=", "renderer", ",", "runtime_environment", "=", "{", "}", ",", "config_defaults", "=", "{", "}", ",", ")", "module_name", "=", "renderer", "[", "\"module_name\"", "]", "if", "not", "self", ".", "renderer", ":", "raise", "ClassInstantiationError", "(", "module_name", "=", "module_name", ",", "package_name", "=", "None", ",", "class_name", "=", "renderer", "[", "\"class_name\"", "]", ",", ")", "self", ".", "api_key", "=", "api_key", "assert", "api_key", ",", "\"opsgenie_api_key missing in config_variables.yml\"", "self", ".", "region", "=", "region", "self", ".", "priority", "=", "priority", "self", ".", "notify_on", "=", "notify_on" ]
[ 423, 4 ]
[ 459, 34 ]
python
en
['en', 'lb', 'it']
False
EmailAction.__init__
( self, data_context, renderer, smtp_address, smtp_port, sender_login, sender_password, receiver_emails, sender_alias=None, use_tls=None, use_ssl=None, notify_on="all", notify_with=None, )
Construct an EmailAction Args: data_context: renderer: dictionary specifying the renderer used to generate an email, for example: { "module_name": "great_expectations.render.renderer.email_renderer", "class_name": "EmailRenderer", } smtp_address: address of the SMTP server used to send the email smtp_address: port of the SMTP server used to send the email sender_login: login used send the email sender_password: password used to send the email sender_alias: optional alias used to send the email (default = sender_login) receiver_emails: email addresses that will be receive the email (separated by commas) use_tls: optional use of TLS to send the email (using either TLS or SSL is highly recommended) use_ssl: optional use of SSL to send the email (using either TLS or SSL is highly recommended) notify_on: "all", "failure", "success" - specifies validation status that will trigger notification notify_with: optional list of DataDocs site names to display in the email message
Construct an EmailAction Args: data_context: renderer: dictionary specifying the renderer used to generate an email, for example: { "module_name": "great_expectations.render.renderer.email_renderer", "class_name": "EmailRenderer", } smtp_address: address of the SMTP server used to send the email smtp_address: port of the SMTP server used to send the email sender_login: login used send the email sender_password: password used to send the email sender_alias: optional alias used to send the email (default = sender_login) receiver_emails: email addresses that will be receive the email (separated by commas) use_tls: optional use of TLS to send the email (using either TLS or SSL is highly recommended) use_ssl: optional use of SSL to send the email (using either TLS or SSL is highly recommended) notify_on: "all", "failure", "success" - specifies validation status that will trigger notification notify_with: optional list of DataDocs site names to display in the email message
def __init__( self, data_context, renderer, smtp_address, smtp_port, sender_login, sender_password, receiver_emails, sender_alias=None, use_tls=None, use_ssl=None, notify_on="all", notify_with=None, ): """Construct an EmailAction Args: data_context: renderer: dictionary specifying the renderer used to generate an email, for example: { "module_name": "great_expectations.render.renderer.email_renderer", "class_name": "EmailRenderer", } smtp_address: address of the SMTP server used to send the email smtp_address: port of the SMTP server used to send the email sender_login: login used send the email sender_password: password used to send the email sender_alias: optional alias used to send the email (default = sender_login) receiver_emails: email addresses that will be receive the email (separated by commas) use_tls: optional use of TLS to send the email (using either TLS or SSL is highly recommended) use_ssl: optional use of SSL to send the email (using either TLS or SSL is highly recommended) notify_on: "all", "failure", "success" - specifies validation status that will trigger notification notify_with: optional list of DataDocs site names to display in the email message """ super().__init__(data_context) self.renderer = instantiate_class_from_config( config=renderer, runtime_environment={}, config_defaults={}, ) module_name = renderer["module_name"] if not self.renderer: raise ClassInstantiationError( module_name=module_name, package_name=None, class_name=renderer["class_name"], ) self.smtp_address = smtp_address self.smtp_port = smtp_port self.sender_login = sender_login self.sender_password = sender_password if not sender_alias: self.sender_alias = sender_login else: self.sender_alias = sender_alias self.receiver_emails_list = list( map(lambda x: x.strip(), receiver_emails.split(",")) ) self.use_tls = use_tls self.use_ssl = use_ssl assert smtp_address, "No SMTP server address found in action config." assert smtp_port, "No SMTP server port found in action config." assert sender_login, "No login found for sending the email in action config." assert ( sender_password ), "No password found for sending the email in action config." assert ( receiver_emails ), "No email addresses to send the email to in action config." self.notify_on = notify_on self.notify_with = notify_with
[ "def", "__init__", "(", "self", ",", "data_context", ",", "renderer", ",", "smtp_address", ",", "smtp_port", ",", "sender_login", ",", "sender_password", ",", "receiver_emails", ",", "sender_alias", "=", "None", ",", "use_tls", "=", "None", ",", "use_ssl", "=", "None", ",", "notify_on", "=", "\"all\"", ",", "notify_with", "=", "None", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "self", ".", "renderer", "=", "instantiate_class_from_config", "(", "config", "=", "renderer", ",", "runtime_environment", "=", "{", "}", ",", "config_defaults", "=", "{", "}", ",", ")", "module_name", "=", "renderer", "[", "\"module_name\"", "]", "if", "not", "self", ".", "renderer", ":", "raise", "ClassInstantiationError", "(", "module_name", "=", "module_name", ",", "package_name", "=", "None", ",", "class_name", "=", "renderer", "[", "\"class_name\"", "]", ",", ")", "self", ".", "smtp_address", "=", "smtp_address", "self", ".", "smtp_port", "=", "smtp_port", "self", ".", "sender_login", "=", "sender_login", "self", ".", "sender_password", "=", "sender_password", "if", "not", "sender_alias", ":", "self", ".", "sender_alias", "=", "sender_login", "else", ":", "self", ".", "sender_alias", "=", "sender_alias", "self", ".", "receiver_emails_list", "=", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "receiver_emails", ".", "split", "(", "\",\"", ")", ")", ")", "self", ".", "use_tls", "=", "use_tls", "self", ".", "use_ssl", "=", "use_ssl", "assert", "smtp_address", ",", "\"No SMTP server address found in action config.\"", "assert", "smtp_port", ",", "\"No SMTP server port found in action config.\"", "assert", "sender_login", ",", "\"No login found for sending the email in action config.\"", "assert", "(", "sender_password", ")", ",", "\"No password found for sending the email in action config.\"", "assert", "(", "receiver_emails", ")", ",", "\"No email addresses to send the email to in action config.\"", "self", ".", "notify_on", "=", "notify_on", "self", ".", "notify_with", "=", "notify_with" ]
[ 540, 4 ]
[ 610, 38 ]
python
en
['en', 'en', 'en']
True
StoreValidationResultAction.__init__
( self, data_context, target_store_name=None, )
:param data_context: Data Context :param target_store_name: the name of the param_store in the Data Context which should be used to param_store the validation result
def __init__( self, data_context, target_store_name=None, ): """ :param data_context: Data Context :param target_store_name: the name of the param_store in the Data Context which should be used to param_store the validation result """ super().__init__(data_context) if target_store_name is None: self.target_store = data_context.stores[data_context.validations_store_name] else: self.target_store = data_context.stores[target_store_name]
[ "def", "__init__", "(", "self", ",", "data_context", ",", "target_store_name", "=", "None", ",", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "if", "target_store_name", "is", "None", ":", "self", ".", "target_store", "=", "data_context", ".", "stores", "[", "data_context", ".", "validations_store_name", "]", "else", ":", "self", ".", "target_store", "=", "data_context", ".", "stores", "[", "target_store_name", "]" ]
[ 687, 4 ]
[ 703, 70 ]
python
en
['en', 'error', 'th']
False
StoreEvaluationParametersAction.__init__
(self, data_context, target_store_name=None)
Args: data_context: Data Context target_store_name: the name of the store in the Data Context which should be used to store the evaluation parameters
def __init__(self, data_context, target_store_name=None): """ Args: data_context: Data Context target_store_name: the name of the store in the Data Context which should be used to store the evaluation parameters """ super().__init__(data_context) if target_store_name is None: self.target_store = data_context.evaluation_parameter_store else: self.target_store = data_context.stores[target_store_name]
[ "def", "__init__", "(", "self", ",", "data_context", ",", "target_store_name", "=", "None", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "if", "target_store_name", "is", "None", ":", "self", ".", "target_store", "=", "data_context", ".", "evaluation_parameter_store", "else", ":", "self", ".", "target_store", "=", "data_context", ".", "stores", "[", "target_store_name", "]" ]
[ 752, 4 ]
[ 765, 70 ]
python
en
['en', 'error', 'th']
False
StoreMetricsAction.__init__
( self, data_context, requested_metrics, target_store_name="metrics_store" )
Args: data_context: Data Context requested_metrics: dictionary of metrics to store. Dictionary should have the following structure: expectation_suite_name: metric_name: - metric_kwargs_id You may use "*" to denote that any expectation suite should match. target_store_name: the name of the store in the Data Context which should be used to store the metrics
def __init__( self, data_context, requested_metrics, target_store_name="metrics_store" ): """ Args: data_context: Data Context requested_metrics: dictionary of metrics to store. Dictionary should have the following structure: expectation_suite_name: metric_name: - metric_kwargs_id You may use "*" to denote that any expectation suite should match. target_store_name: the name of the store in the Data Context which should be used to store the metrics """ super().__init__(data_context) self._requested_metrics = requested_metrics self._target_store_name = target_store_name try: store = data_context.stores[target_store_name] except KeyError: raise DataContextError( "Unable to find store {} in your DataContext configuration.".format( target_store_name ) ) if not isinstance(store, MetricStore): raise DataContextError( "StoreMetricsAction must have a valid MetricsStore for its target store." )
[ "def", "__init__", "(", "self", ",", "data_context", ",", "requested_metrics", ",", "target_store_name", "=", "\"metrics_store\"", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "self", ".", "_requested_metrics", "=", "requested_metrics", "self", ".", "_target_store_name", "=", "target_store_name", "try", ":", "store", "=", "data_context", ".", "stores", "[", "target_store_name", "]", "except", "KeyError", ":", "raise", "DataContextError", "(", "\"Unable to find store {} in your DataContext configuration.\"", ".", "format", "(", "target_store_name", ")", ")", "if", "not", "isinstance", "(", "store", ",", "MetricStore", ")", ":", "raise", "DataContextError", "(", "\"StoreMetricsAction must have a valid MetricsStore for its target store.\"", ")" ]
[ 809, 4 ]
[ 840, 13 ]
python
en
['en', 'error', 'th']
False
UpdateDataDocsAction.__init__
(self, data_context, site_names=None, target_site_names=None)
:param data_context: Data Context :param site_names: *optional* List of site names for building data docs
:param data_context: Data Context :param site_names: *optional* List of site names for building data docs
def __init__(self, data_context, site_names=None, target_site_names=None): """ :param data_context: Data Context :param site_names: *optional* List of site names for building data docs """ super().__init__(data_context) if target_site_names: warnings.warn( "target_site_names is deprecated. Please use site_names instead.", DeprecationWarning, ) if site_names: raise DataContextError( "Invalid configuration: legacy key target_site_names and site_names key are " "both present in UpdateDataDocsAction configuration" ) site_names = target_site_names self._site_names = site_names
[ "def", "__init__", "(", "self", ",", "data_context", ",", "site_names", "=", "None", ",", "target_site_names", "=", "None", ")", ":", "super", "(", ")", ".", "__init__", "(", "data_context", ")", "if", "target_site_names", ":", "warnings", ".", "warn", "(", "\"target_site_names is deprecated. Please use site_names instead.\"", ",", "DeprecationWarning", ",", ")", "if", "site_names", ":", "raise", "DataContextError", "(", "\"Invalid configuration: legacy key target_site_names and site_names key are \"", "\"both present in UpdateDataDocsAction configuration\"", ")", "site_names", "=", "target_site_names", "self", ".", "_site_names", "=", "site_names" ]
[ 893, 4 ]
[ 910, 37 ]
python
en
['en', 'error', 'th']
False
test_parse_cli_config_file_location_posix_paths
(tmp_path_factory)
What does this test and why? We want to parse posix paths into their directory and filename parts so that we can pass the directory to our data context constructor. We need to be able to do that with all versions of path that can be input. This tests for posix paths for files/dirs that don't exist and files/dirs that do. Other tests handle testing for windows support.
What does this test and why? We want to parse posix paths into their directory and filename parts so that we can pass the directory to our data context constructor. We need to be able to do that with all versions of path that can be input. This tests for posix paths for files/dirs that don't exist and files/dirs that do. Other tests handle testing for windows support.
def test_parse_cli_config_file_location_posix_paths(tmp_path_factory): """ What does this test and why? We want to parse posix paths into their directory and filename parts so that we can pass the directory to our data context constructor. We need to be able to do that with all versions of path that can be input. This tests for posix paths for files/dirs that don't exist and files/dirs that do. Other tests handle testing for windows support. """ filename_fixtures = [ { "input_path": "just_a_file.yml", "expected": { "directory": "", "filename": "just_a_file.yml", }, }, ] absolute_path_fixtures = [ { "input_path": "/path/to/file/filename.yml", "expected": { "directory": "/path/to/file", "filename": "filename.yml", }, }, { "input_path": "/absolute/directory/ending/slash/", "expected": { "directory": "/absolute/directory/ending/slash/", "filename": None, }, }, { "input_path": "/absolute/directory/ending/no/slash", "expected": { "directory": "/absolute/directory/ending/no/slash", "filename": None, }, }, ] relative_path_fixtures = [ { "input_path": "relative/path/to/file.yml", "expected": { "directory": "relative/path/to", "filename": "file.yml", }, }, { "input_path": "relative/path/to/directory/slash/", "expected": { "directory": "relative/path/to/directory/slash/", "filename": None, }, }, { "input_path": "relative/path/to/directory/no_slash", "expected": { "directory": "relative/path/to/directory/no_slash", "filename": None, }, }, ] fixtures = filename_fixtures + absolute_path_fixtures + relative_path_fixtures for fixture in fixtures: with pytest.raises(ge_exceptions.ConfigNotFoundError): toolkit.parse_cli_config_file_location(fixture["input_path"]) # Create files and re-run assertions root_dir = tmp_path_factory.mktemp("posix") root_dir = str(root_dir) for fixture in fixtures: expected_dir = fixture.get("expected").get("directory") # Make non-absolute path if expected_dir is not None and expected_dir.startswith("/"): expected_dir = expected_dir[1:] expected_filename = fixture.get("expected").get("filename") if expected_dir: test_directory = os.path.join(root_dir, expected_dir) os.makedirs(test_directory, exist_ok=True) if expected_filename: expected_filepath = os.path.join(test_directory, expected_filename) with open(expected_filepath, "w") as fp: pass output = toolkit.parse_cli_config_file_location(expected_filepath) assert output == { "directory": os.path.join(root_dir, expected_dir), "filename": expected_filename, }
[ "def", "test_parse_cli_config_file_location_posix_paths", "(", "tmp_path_factory", ")", ":", "filename_fixtures", "=", "[", "{", "\"input_path\"", ":", "\"just_a_file.yml\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "\"\"", ",", "\"filename\"", ":", "\"just_a_file.yml\"", ",", "}", ",", "}", ",", "]", "absolute_path_fixtures", "=", "[", "{", "\"input_path\"", ":", "\"/path/to/file/filename.yml\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "\"/path/to/file\"", ",", "\"filename\"", ":", "\"filename.yml\"", ",", "}", ",", "}", ",", "{", "\"input_path\"", ":", "\"/absolute/directory/ending/slash/\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "\"/absolute/directory/ending/slash/\"", ",", "\"filename\"", ":", "None", ",", "}", ",", "}", ",", "{", "\"input_path\"", ":", "\"/absolute/directory/ending/no/slash\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "\"/absolute/directory/ending/no/slash\"", ",", "\"filename\"", ":", "None", ",", "}", ",", "}", ",", "]", "relative_path_fixtures", "=", "[", "{", "\"input_path\"", ":", "\"relative/path/to/file.yml\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "\"relative/path/to\"", ",", "\"filename\"", ":", "\"file.yml\"", ",", "}", ",", "}", ",", "{", "\"input_path\"", ":", "\"relative/path/to/directory/slash/\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "\"relative/path/to/directory/slash/\"", ",", "\"filename\"", ":", "None", ",", "}", ",", "}", ",", "{", "\"input_path\"", ":", "\"relative/path/to/directory/no_slash\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "\"relative/path/to/directory/no_slash\"", ",", "\"filename\"", ":", "None", ",", "}", ",", "}", ",", "]", "fixtures", "=", "filename_fixtures", "+", "absolute_path_fixtures", "+", "relative_path_fixtures", "for", "fixture", "in", "fixtures", ":", "with", "pytest", ".", "raises", "(", "ge_exceptions", ".", "ConfigNotFoundError", ")", ":", "toolkit", ".", "parse_cli_config_file_location", "(", "fixture", "[", "\"input_path\"", "]", ")", "# Create files and re-run assertions", "root_dir", "=", "tmp_path_factory", ".", "mktemp", "(", "\"posix\"", ")", "root_dir", "=", "str", "(", "root_dir", ")", "for", "fixture", "in", "fixtures", ":", "expected_dir", "=", "fixture", ".", "get", "(", "\"expected\"", ")", ".", "get", "(", "\"directory\"", ")", "# Make non-absolute path", "if", "expected_dir", "is", "not", "None", "and", "expected_dir", ".", "startswith", "(", "\"/\"", ")", ":", "expected_dir", "=", "expected_dir", "[", "1", ":", "]", "expected_filename", "=", "fixture", ".", "get", "(", "\"expected\"", ")", ".", "get", "(", "\"filename\"", ")", "if", "expected_dir", ":", "test_directory", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "expected_dir", ")", "os", ".", "makedirs", "(", "test_directory", ",", "exist_ok", "=", "True", ")", "if", "expected_filename", ":", "expected_filepath", "=", "os", ".", "path", ".", "join", "(", "test_directory", ",", "expected_filename", ")", "with", "open", "(", "expected_filepath", ",", "\"w\"", ")", "as", "fp", ":", "pass", "output", "=", "toolkit", ".", "parse_cli_config_file_location", "(", "expected_filepath", ")", "assert", "output", "==", "{", "\"directory\"", ":", "os", ".", "path", ".", "join", "(", "root_dir", ",", "expected_dir", ")", ",", "\"filename\"", ":", "expected_filename", ",", "}" ]
[ 46, 0 ]
[ 142, 17 ]
python
en
['en', 'error', 'th']
False
test_parse_cli_config_file_location_windows_paths
(tmp_path_factory)
What does this test and why? Since we are unable to test windows paths on our unix CI, this just tests that if a file doesn't exist we raise an error. Args: tmp_path_factory: Returns:
What does this test and why? Since we are unable to test windows paths on our unix CI, this just tests that if a file doesn't exist we raise an error. Args: tmp_path_factory: Returns:
def test_parse_cli_config_file_location_windows_paths(tmp_path_factory): """ What does this test and why? Since we are unable to test windows paths on our unix CI, this just tests that if a file doesn't exist we raise an error. Args: tmp_path_factory: Returns: """ filename_fixtures = [ { "input_path": "just_a_file.yml", "expected": { "directory": "", "filename": "just_a_file.yml", }, }, ] absolute_path_fixtures = [ { "input_path": r"C:\absolute\windows\path\to\file.yml", "expected": { "directory": r"C:\absolute\windows\path\to", "filename": "file.yml", }, }, { "input_path": r"C:\absolute\windows\directory\ending\slash\\", "expected": { "directory": r"C:\absolute\windows\directory\ending\slash\\", "filename": None, }, }, { "input_path": r"C:\absolute\windows\directory\ending\no_slash", "expected": { "directory": r"C:\absolute\windows\directory\ending\no_slash", "filename": None, }, }, ] relative_path_fixtures = [ { "input_path": r"relative\windows\path\to\file.yml", "expected": { "directory": r"relative\windows\path\to", "filename": "file.yml", }, }, # Double slash at end of raw string to escape slash { "input_path": r"relative\windows\path\to\directory\slash\\", "expected": { "directory": r"relative\windows\path\to\directory\slash\\", "filename": None, }, }, { "input_path": r"relative\windows\path\to\directory\no_slash", "expected": { "directory": r"relative\windows\path\to\directory\no_slash", "filename": None, }, }, ] fixtures = filename_fixtures + absolute_path_fixtures + relative_path_fixtures for fixture in fixtures: with pytest.raises(ge_exceptions.ConfigNotFoundError): toolkit.parse_cli_config_file_location(fixture["input_path"])
[ "def", "test_parse_cli_config_file_location_windows_paths", "(", "tmp_path_factory", ")", ":", "filename_fixtures", "=", "[", "{", "\"input_path\"", ":", "\"just_a_file.yml\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "\"\"", ",", "\"filename\"", ":", "\"just_a_file.yml\"", ",", "}", ",", "}", ",", "]", "absolute_path_fixtures", "=", "[", "{", "\"input_path\"", ":", "r\"C:\\absolute\\windows\\path\\to\\file.yml\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "r\"C:\\absolute\\windows\\path\\to\"", ",", "\"filename\"", ":", "\"file.yml\"", ",", "}", ",", "}", ",", "{", "\"input_path\"", ":", "r\"C:\\absolute\\windows\\directory\\ending\\slash\\\\\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "r\"C:\\absolute\\windows\\directory\\ending\\slash\\\\\"", ",", "\"filename\"", ":", "None", ",", "}", ",", "}", ",", "{", "\"input_path\"", ":", "r\"C:\\absolute\\windows\\directory\\ending\\no_slash\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "r\"C:\\absolute\\windows\\directory\\ending\\no_slash\"", ",", "\"filename\"", ":", "None", ",", "}", ",", "}", ",", "]", "relative_path_fixtures", "=", "[", "{", "\"input_path\"", ":", "r\"relative\\windows\\path\\to\\file.yml\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "r\"relative\\windows\\path\\to\"", ",", "\"filename\"", ":", "\"file.yml\"", ",", "}", ",", "}", ",", "# Double slash at end of raw string to escape slash", "{", "\"input_path\"", ":", "r\"relative\\windows\\path\\to\\directory\\slash\\\\\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "r\"relative\\windows\\path\\to\\directory\\slash\\\\\"", ",", "\"filename\"", ":", "None", ",", "}", ",", "}", ",", "{", "\"input_path\"", ":", "r\"relative\\windows\\path\\to\\directory\\no_slash\"", ",", "\"expected\"", ":", "{", "\"directory\"", ":", "r\"relative\\windows\\path\\to\\directory\\no_slash\"", ",", "\"filename\"", ":", "None", ",", "}", ",", "}", ",", "]", "fixtures", "=", "filename_fixtures", "+", "absolute_path_fixtures", "+", "relative_path_fixtures", "for", "fixture", "in", "fixtures", ":", "with", "pytest", ".", "raises", "(", "ge_exceptions", ".", "ConfigNotFoundError", ")", ":", "toolkit", ".", "parse_cli_config_file_location", "(", "fixture", "[", "\"input_path\"", "]", ")" ]
[ 233, 0 ]
[ 303, 73 ]
python
en
['en', 'error', 'th']
False
simulated_project_directories
(tmp_path_factory)
Using a wacky simulated directory structure allows testing of permutations of relative, absolute, and current working directories. /random/pytest/dir/projects/pipeline1/great_expectations /random/pytest/dir/projects/data/pipeline1
Using a wacky simulated directory structure allows testing of permutations of relative, absolute, and current working directories.
def simulated_project_directories(tmp_path_factory): """ Using a wacky simulated directory structure allows testing of permutations of relative, absolute, and current working directories. /random/pytest/dir/projects/pipeline1/great_expectations /random/pytest/dir/projects/data/pipeline1 """ test_dir = tmp_path_factory.mktemp("projects", numbered=False) assert os.path.isabs(test_dir) ge_dir = os.path.join(test_dir, "pipeline1", "great_expectations") os.makedirs(ge_dir) assert os.path.isdir(ge_dir) data_dir = os.path.join(test_dir, "data", "pipeline1") os.makedirs(data_dir) assert os.path.isdir(data_dir) yield ge_dir, data_dir shutil.rmtree(test_dir)
[ "def", "simulated_project_directories", "(", "tmp_path_factory", ")", ":", "test_dir", "=", "tmp_path_factory", ".", "mktemp", "(", "\"projects\"", ",", "numbered", "=", "False", ")", "assert", "os", ".", "path", ".", "isabs", "(", "test_dir", ")", "ge_dir", "=", "os", ".", "path", ".", "join", "(", "test_dir", ",", "\"pipeline1\"", ",", "\"great_expectations\"", ")", "os", ".", "makedirs", "(", "ge_dir", ")", "assert", "os", ".", "path", ".", "isdir", "(", "ge_dir", ")", "data_dir", "=", "os", ".", "path", ".", "join", "(", "test_dir", ",", "\"data\"", ",", "\"pipeline1\"", ")", "os", ".", "makedirs", "(", "data_dir", ")", "assert", "os", ".", "path", ".", "isdir", "(", "data_dir", ")", "yield", "ge_dir", ",", "data_dir", "shutil", ".", "rmtree", "(", "test_dir", ")" ]
[ 351, 0 ]
[ 371, 27 ]
python
en
['en', 'error', 'th']
False
test_get_relative_path_from_config_file_to_data_base_file_path_from_within_ge_directory_and_relative_data_path
( monkeypatch, simulated_project_directories )
This test simulates using the CLI from within the great_expectations directory. /projects/pipeline1/great_expectations /projects/data/pipeline1 cwd: /projects/pipeline1/great_expectations data: ../../data/pipeline1 expected results in yaml: ../../data/pipeline1
This test simulates using the CLI from within the great_expectations directory.
def test_get_relative_path_from_config_file_to_data_base_file_path_from_within_ge_directory_and_relative_data_path( monkeypatch, simulated_project_directories ): """ This test simulates using the CLI from within the great_expectations directory. /projects/pipeline1/great_expectations /projects/data/pipeline1 cwd: /projects/pipeline1/great_expectations data: ../../data/pipeline1 expected results in yaml: ../../data/pipeline1 """ ge_dir, data_dir = simulated_project_directories monkeypatch.chdir(ge_dir) assert str(os.path.abspath(os.path.curdir)) == str(ge_dir) obs = get_relative_path_from_config_file_to_base_path( ge_dir, os.path.join("..", "..", "data", "pipeline1") ) assert obs == os.path.join("..", "..", "data", "pipeline1")
[ "def", "test_get_relative_path_from_config_file_to_data_base_file_path_from_within_ge_directory_and_relative_data_path", "(", "monkeypatch", ",", "simulated_project_directories", ")", ":", "ge_dir", ",", "data_dir", "=", "simulated_project_directories", "monkeypatch", ".", "chdir", "(", "ge_dir", ")", "assert", "str", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", ")", "==", "str", "(", "ge_dir", ")", "obs", "=", "get_relative_path_from_config_file_to_base_path", "(", "ge_dir", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")", ")", "assert", "obs", "==", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")" ]
[ 374, 0 ]
[ 395, 63 ]
python
en
['en', 'error', 'th']
False
test_get_relative_path_from_config_file_to_data_base_file_path_from_within_ge_directory_and_absolute_data_path
( monkeypatch, simulated_project_directories )
This test simulates using the CLI from within the great_expectations directory and using an absolute path. /projects/pipeline1/great_expectations /projects/data/pipeline1 cwd: /projects/pipeline1/great_expectations data: /projects/data/pipeline1 expected results in yaml: ../../data/pipeline1
This test simulates using the CLI from within the great_expectations directory and using an absolute path.
def test_get_relative_path_from_config_file_to_data_base_file_path_from_within_ge_directory_and_absolute_data_path( monkeypatch, simulated_project_directories ): """ This test simulates using the CLI from within the great_expectations directory and using an absolute path. /projects/pipeline1/great_expectations /projects/data/pipeline1 cwd: /projects/pipeline1/great_expectations data: /projects/data/pipeline1 expected results in yaml: ../../data/pipeline1 """ ge_dir, data_dir = simulated_project_directories monkeypatch.chdir(ge_dir) assert str(os.path.abspath(os.path.curdir)) == str(ge_dir) absolute_path = os.path.abspath(os.path.join("..", "..", "data", "pipeline1")) obs = get_relative_path_from_config_file_to_base_path(ge_dir, absolute_path) assert obs == os.path.join("..", "..", "data", "pipeline1")
[ "def", "test_get_relative_path_from_config_file_to_data_base_file_path_from_within_ge_directory_and_absolute_data_path", "(", "monkeypatch", ",", "simulated_project_directories", ")", ":", "ge_dir", ",", "data_dir", "=", "simulated_project_directories", "monkeypatch", ".", "chdir", "(", "ge_dir", ")", "assert", "str", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", ")", "==", "str", "(", "ge_dir", ")", "absolute_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")", ")", "obs", "=", "get_relative_path_from_config_file_to_base_path", "(", "ge_dir", ",", "absolute_path", ")", "assert", "obs", "==", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")" ]
[ 398, 0 ]
[ 418, 63 ]
python
en
['en', 'error', 'th']
False
test_get_relative_path_from_config_file_to_data_base_file_path_from_adjacent_directory_and_relative_data_path
( monkeypatch, simulated_project_directories )
This test simulates using the CLI from a directory containing the great_expectations directory. /projects/pipeline1/great_expectations /projects/data/pipeline1 cwd: /projects/pipeline1 data: ../data/pipeline1 expected results in yaml: ../../data/pipeline1
This test simulates using the CLI from a directory containing the great_expectations directory.
def test_get_relative_path_from_config_file_to_data_base_file_path_from_adjacent_directory_and_relative_data_path( monkeypatch, simulated_project_directories ): """ This test simulates using the CLI from a directory containing the great_expectations directory. /projects/pipeline1/great_expectations /projects/data/pipeline1 cwd: /projects/pipeline1 data: ../data/pipeline1 expected results in yaml: ../../data/pipeline1 """ ge_dir, data_dir = simulated_project_directories adjacent_dir = os.path.dirname(ge_dir) monkeypatch.chdir(adjacent_dir) assert str(os.path.abspath(os.path.curdir)) == str(adjacent_dir) obs = get_relative_path_from_config_file_to_base_path( ge_dir, os.path.join("..", "data", "pipeline1") ) assert obs == os.path.join("..", "..", "data", "pipeline1")
[ "def", "test_get_relative_path_from_config_file_to_data_base_file_path_from_adjacent_directory_and_relative_data_path", "(", "monkeypatch", ",", "simulated_project_directories", ")", ":", "ge_dir", ",", "data_dir", "=", "simulated_project_directories", "adjacent_dir", "=", "os", ".", "path", ".", "dirname", "(", "ge_dir", ")", "monkeypatch", ".", "chdir", "(", "adjacent_dir", ")", "assert", "str", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", ")", "==", "str", "(", "adjacent_dir", ")", "obs", "=", "get_relative_path_from_config_file_to_base_path", "(", "ge_dir", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")", ")", "assert", "obs", "==", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")" ]
[ 421, 0 ]
[ 443, 63 ]
python
en
['en', 'error', 'th']
False
test_get_relative_path_from_config_file_to_data_base_file_path_from_adjacent_directory_and_absolute_data_path
( monkeypatch, simulated_project_directories )
This test simulates using the CLI from a directory containing the great_expectations directory and using an absolute path. /projects/pipeline1/great_expectations /projects/data/pipeline1 cwd: /projects/pipeline1 data: /projects/data/pipeline1 expected results in yaml: ../../data/pipeline1
This test simulates using the CLI from a directory containing the great_expectations directory and using an absolute path.
def test_get_relative_path_from_config_file_to_data_base_file_path_from_adjacent_directory_and_absolute_data_path( monkeypatch, simulated_project_directories ): """ This test simulates using the CLI from a directory containing the great_expectations directory and using an absolute path. /projects/pipeline1/great_expectations /projects/data/pipeline1 cwd: /projects/pipeline1 data: /projects/data/pipeline1 expected results in yaml: ../../data/pipeline1 """ ge_dir, data_dir = simulated_project_directories adjacent_dir = os.path.dirname(ge_dir) monkeypatch.chdir(adjacent_dir) assert str(os.path.abspath(os.path.curdir)) == str(adjacent_dir) absolute_path = os.path.abspath(os.path.join("..", "data", "pipeline1")) obs = get_relative_path_from_config_file_to_base_path(ge_dir, absolute_path) assert obs == os.path.join("..", "..", "data", "pipeline1")
[ "def", "test_get_relative_path_from_config_file_to_data_base_file_path_from_adjacent_directory_and_absolute_data_path", "(", "monkeypatch", ",", "simulated_project_directories", ")", ":", "ge_dir", ",", "data_dir", "=", "simulated_project_directories", "adjacent_dir", "=", "os", ".", "path", ".", "dirname", "(", "ge_dir", ")", "monkeypatch", ".", "chdir", "(", "adjacent_dir", ")", "assert", "str", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", ")", "==", "str", "(", "adjacent_dir", ")", "absolute_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")", ")", "obs", "=", "get_relative_path_from_config_file_to_base_path", "(", "ge_dir", ",", "absolute_path", ")", "assert", "obs", "==", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")" ]
[ 446, 0 ]
[ 467, 63 ]
python
en
['en', 'error', 'th']
False
test_get_relative_path_from_config_file_to_data_base_file_path_from_misc_directory_and_relative_data_path
( monkeypatch, misc_directory, simulated_project_directories )
This test simulates using the CLI with the --config flag operating from a random directory /projects/pipeline1/great_expectations /projects/data/pipeline1 /tmp_path/misc cwd: /tmp_path/random data: ../../projects/data/pipeline1 expected results in yaml: ../../data/pipeline1
This test simulates using the CLI with the --config flag operating from a random directory
def test_get_relative_path_from_config_file_to_data_base_file_path_from_misc_directory_and_relative_data_path( monkeypatch, misc_directory, simulated_project_directories ): """ This test simulates using the CLI with the --config flag operating from a random directory /projects/pipeline1/great_expectations /projects/data/pipeline1 /tmp_path/misc cwd: /tmp_path/random data: ../../projects/data/pipeline1 expected results in yaml: ../../data/pipeline1 """ ge_dir, data_dir = simulated_project_directories monkeypatch.chdir(misc_directory) assert str(os.path.abspath(os.path.curdir)) == str(misc_directory) obs = get_relative_path_from_config_file_to_base_path( ge_dir, os.path.join("..", "..", "projects", "data", "pipeline1") ) assert obs == os.path.join("..", "..", "data", "pipeline1")
[ "def", "test_get_relative_path_from_config_file_to_data_base_file_path_from_misc_directory_and_relative_data_path", "(", "monkeypatch", ",", "misc_directory", ",", "simulated_project_directories", ")", ":", "ge_dir", ",", "data_dir", "=", "simulated_project_directories", "monkeypatch", ".", "chdir", "(", "misc_directory", ")", "assert", "str", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", ")", "==", "str", "(", "misc_directory", ")", "obs", "=", "get_relative_path_from_config_file_to_base_path", "(", "ge_dir", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"projects\"", ",", "\"data\"", ",", "\"pipeline1\"", ")", ")", "assert", "obs", "==", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")" ]
[ 470, 0 ]
[ 492, 63 ]
python
en
['en', 'error', 'th']
False
test_get_relative_path_from_config_file_to_data_base_file_path_from_misc_directory_and_absolute_data_path
( monkeypatch, misc_directory, simulated_project_directories )
This test simulates using the CLI with the --config flag operating from a random directory and using an absolute path. /projects/pipeline1/great_expectations /projects/data/pipeline1 /tmp_path/misc cwd: /tmp_path/misc data: /projects/data/pipeline1 expected results in yaml: ../../data/pipeline1
This test simulates using the CLI with the --config flag operating from a random directory and using an absolute path.
def test_get_relative_path_from_config_file_to_data_base_file_path_from_misc_directory_and_absolute_data_path( monkeypatch, misc_directory, simulated_project_directories ): """ This test simulates using the CLI with the --config flag operating from a random directory and using an absolute path. /projects/pipeline1/great_expectations /projects/data/pipeline1 /tmp_path/misc cwd: /tmp_path/misc data: /projects/data/pipeline1 expected results in yaml: ../../data/pipeline1 """ ge_dir, data_dir = simulated_project_directories monkeypatch.chdir(misc_directory) assert str(os.path.abspath(os.path.curdir)) == str(misc_directory) absolute_path = os.path.abspath( os.path.join("..", "..", "projects", "data", "pipeline1") ) obs = get_relative_path_from_config_file_to_base_path(ge_dir, absolute_path) assert obs == os.path.join("..", "..", "data", "pipeline1")
[ "def", "test_get_relative_path_from_config_file_to_data_base_file_path_from_misc_directory_and_absolute_data_path", "(", "monkeypatch", ",", "misc_directory", ",", "simulated_project_directories", ")", ":", "ge_dir", ",", "data_dir", "=", "simulated_project_directories", "monkeypatch", ".", "chdir", "(", "misc_directory", ")", "assert", "str", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", ")", "==", "str", "(", "misc_directory", ")", "absolute_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"projects\"", ",", "\"data\"", ",", "\"pipeline1\"", ")", ")", "obs", "=", "get_relative_path_from_config_file_to_base_path", "(", "ge_dir", ",", "absolute_path", ")", "assert", "obs", "==", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"..\"", ",", "\"data\"", ",", "\"pipeline1\"", ")" ]
[ 495, 0 ]
[ 518, 63 ]
python
en
['en', 'error', 'th']
False
NDWI
(data, normalize=False, band_pair=0)
Computes various versions of the Normalized Difference Water Index for an `xarray.Dataset`. Values should be in the range [-1,1] for valid LANDSAT data (the bands are positive). Parameters ---------- data: xarray.Dataset or numpy.ndarray An `xarray.Dataset` containing the bands specified by `band_pair` or a 2D NumPy array with two columns - the band pair. normalize: bool Whether or not to normalize to the range [0,1]. band_pair: int The band pair to use. Band pair 0 uses 'nir' and 'swir1': (nir - swir1)/(nir + swir1). Band pair 1 uses 'green' and 'nir': (green - nir)/(green + nir). Returns ------- ndwi: xarray.DataArray An `xarray.DataArray` with the same shape as `dataset` - the same coordinates in the same order.
Computes various versions of the Normalized Difference Water Index for an `xarray.Dataset`. Values should be in the range [-1,1] for valid LANDSAT data (the bands are positive). Parameters ---------- data: xarray.Dataset or numpy.ndarray An `xarray.Dataset` containing the bands specified by `band_pair` or a 2D NumPy array with two columns - the band pair. normalize: bool Whether or not to normalize to the range [0,1]. band_pair: int The band pair to use. Band pair 0 uses 'nir' and 'swir1': (nir - swir1)/(nir + swir1). Band pair 1 uses 'green' and 'nir': (green - nir)/(green + nir).
def NDWI(data, normalize=False, band_pair=0): """ Computes various versions of the Normalized Difference Water Index for an `xarray.Dataset`. Values should be in the range [-1,1] for valid LANDSAT data (the bands are positive). Parameters ---------- data: xarray.Dataset or numpy.ndarray An `xarray.Dataset` containing the bands specified by `band_pair` or a 2D NumPy array with two columns - the band pair. normalize: bool Whether or not to normalize to the range [0,1]. band_pair: int The band pair to use. Band pair 0 uses 'nir' and 'swir1': (nir - swir1)/(nir + swir1). Band pair 1 uses 'green' and 'nir': (green - nir)/(green + nir). Returns ------- ndwi: xarray.DataArray An `xarray.DataArray` with the same shape as `dataset` - the same coordinates in the same order. """ bands = [None] * 2 if band_pair == 0: bands = ['nir', 'swir1'] elif band_pair == 1: bands = ['green', 'nir'] else: raise AssertionError('The band_pair parameter must be in [0,1]') if isinstance(data, xr.Dataset): ndwi = (data[bands[0]] - data[bands[1]]) / (data[bands[0]] + data[bands[1]]) if normalize: ndwi = (ndwi - ndwi.min())/(ndwi.max() - ndwi.min()) else: ndwi = data[:,0] - data[:,1] if normalize: ndwi = (ndwi - np.nanmin(ndwi))/(np.nanmax(ndwi) - np.nanmin(ndwi)) return ndwi
[ "def", "NDWI", "(", "data", ",", "normalize", "=", "False", ",", "band_pair", "=", "0", ")", ":", "bands", "=", "[", "None", "]", "*", "2", "if", "band_pair", "==", "0", ":", "bands", "=", "[", "'nir'", ",", "'swir1'", "]", "elif", "band_pair", "==", "1", ":", "bands", "=", "[", "'green'", ",", "'nir'", "]", "else", ":", "raise", "AssertionError", "(", "'The band_pair parameter must be in [0,1]'", ")", "if", "isinstance", "(", "data", ",", "xr", ".", "Dataset", ")", ":", "ndwi", "=", "(", "data", "[", "bands", "[", "0", "]", "]", "-", "data", "[", "bands", "[", "1", "]", "]", ")", "/", "(", "data", "[", "bands", "[", "0", "]", "]", "+", "data", "[", "bands", "[", "1", "]", "]", ")", "if", "normalize", ":", "ndwi", "=", "(", "ndwi", "-", "ndwi", ".", "min", "(", ")", ")", "/", "(", "ndwi", ".", "max", "(", ")", "-", "ndwi", ".", "min", "(", ")", ")", "else", ":", "ndwi", "=", "data", "[", ":", ",", "0", "]", "-", "data", "[", ":", ",", "1", "]", "if", "normalize", ":", "ndwi", "=", "(", "ndwi", "-", "np", ".", "nanmin", "(", "ndwi", ")", ")", "/", "(", "np", ".", "nanmax", "(", "ndwi", ")", "-", "np", ".", "nanmin", "(", "ndwi", ")", ")", "return", "ndwi" ]
[ 43, 0 ]
[ 82, 15 ]
python
en
['en', 'error', 'th']
False
wofs_classify
(dataset_in, clean_mask=None, x_coord='longitude', y_coord='latitude', time_coord='time', no_data=-9999, mosaic=False, enforce_float64=False)
Description: Performs WOfS algorithm on given dataset. Assumption: - The WOfS algorithm is defined for Landsat 5/Landsat 7 References: - Mueller, et al. (2015) "Water observations from space: Mapping surface water from 25 years of Landsat imagery across Australia." Remote Sensing of Environment. - https://github.com/GeoscienceAustralia/eo-tools/blob/stable/eotools/water_classifier.py ----- Inputs: dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain coordinates: time, latitude, longitude variables: blue, green, red, nir, swir1, swir2 x_coord, y_coord, time_coord: (str) - Names of DataArrays in `dataset_in` to use as x, y, and time coordinates. Optional Inputs: clean_mask (nd numpy array with dtype boolean) - true for values user considers clean; if user does not provide a clean mask, all values will be considered clean no_data (int/float) - no data pixel value; default: -9999 mosaic (boolean) - flag to indicate if dataset_in is a mosaic. If mosaic = False, dataset_in should have a time coordinate and wofs will run over each time slice; otherwise, dataset_in should not have a time coordinate and wofs will run over the single mosaicked image enforce_float64 (boolean) - flag to indicate whether or not to enforce float64 calculations; will use float32 if false Output: dataset_out (xarray.DataArray) - wofs water classification results: 0 - not water; 1 - water Throws: ValueError - if dataset_in is an empty xarray.Dataset.
Description: Performs WOfS algorithm on given dataset. Assumption: - The WOfS algorithm is defined for Landsat 5/Landsat 7 References: - Mueller, et al. (2015) "Water observations from space: Mapping surface water from 25 years of Landsat imagery across Australia." Remote Sensing of Environment. - https://github.com/GeoscienceAustralia/eo-tools/blob/stable/eotools/water_classifier.py ----- Inputs: dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain coordinates: time, latitude, longitude variables: blue, green, red, nir, swir1, swir2 x_coord, y_coord, time_coord: (str) - Names of DataArrays in `dataset_in` to use as x, y, and time coordinates. Optional Inputs: clean_mask (nd numpy array with dtype boolean) - true for values user considers clean; if user does not provide a clean mask, all values will be considered clean no_data (int/float) - no data pixel value; default: -9999 mosaic (boolean) - flag to indicate if dataset_in is a mosaic. If mosaic = False, dataset_in should have a time coordinate and wofs will run over each time slice; otherwise, dataset_in should not have a time coordinate and wofs will run over the single mosaicked image enforce_float64 (boolean) - flag to indicate whether or not to enforce float64 calculations; will use float32 if false Output: dataset_out (xarray.DataArray) - wofs water classification results: 0 - not water; 1 - water Throws: ValueError - if dataset_in is an empty xarray.Dataset.
def wofs_classify(dataset_in, clean_mask=None, x_coord='longitude', y_coord='latitude', time_coord='time', no_data=-9999, mosaic=False, enforce_float64=False): """ Description: Performs WOfS algorithm on given dataset. Assumption: - The WOfS algorithm is defined for Landsat 5/Landsat 7 References: - Mueller, et al. (2015) "Water observations from space: Mapping surface water from 25 years of Landsat imagery across Australia." Remote Sensing of Environment. - https://github.com/GeoscienceAustralia/eo-tools/blob/stable/eotools/water_classifier.py ----- Inputs: dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain coordinates: time, latitude, longitude variables: blue, green, red, nir, swir1, swir2 x_coord, y_coord, time_coord: (str) - Names of DataArrays in `dataset_in` to use as x, y, and time coordinates. Optional Inputs: clean_mask (nd numpy array with dtype boolean) - true for values user considers clean; if user does not provide a clean mask, all values will be considered clean no_data (int/float) - no data pixel value; default: -9999 mosaic (boolean) - flag to indicate if dataset_in is a mosaic. If mosaic = False, dataset_in should have a time coordinate and wofs will run over each time slice; otherwise, dataset_in should not have a time coordinate and wofs will run over the single mosaicked image enforce_float64 (boolean) - flag to indicate whether or not to enforce float64 calculations; will use float32 if false Output: dataset_out (xarray.DataArray) - wofs water classification results: 0 - not water; 1 - water Throws: ValueError - if dataset_in is an empty xarray.Dataset. """ def _band_ratio(a, b): """ Calculates a normalized ratio index """ return (a - b) / (a + b) def _run_regression(band1, band2, band3, band4, band5, band7): """ Regression analysis based on Australia's training data TODO: Return type """ # Compute normalized ratio indices ndi_52 = _band_ratio(band5, band2) ndi_43 = _band_ratio(band4, band3) ndi_72 = _band_ratio(band7, band2) #classified = np.ones(shape, dtype='uint8') classified = np.full(shape, no_data, dtype='uint8') # Start with the tree's left branch, finishing nodes as needed # Left branch r1 = ndi_52 <= -0.01 r2 = band1 <= 2083.5 classified[r1 & ~r2] = 0 #Node 3 r3 = band7 <= 323.5 _tmp = r1 & r2 _tmp2 = _tmp & r3 _tmp &= ~r3 r4 = ndi_43 <= 0.61 classified[_tmp2 & r4] = 1 #Node 6 classified[_tmp2 & ~r4] = 0 #Node 7 r5 = band1 <= 1400.5 _tmp2 = _tmp & ~r5 r6 = ndi_43 <= -0.01 classified[_tmp2 & r6] = 1 #Node 10 classified[_tmp2 & ~r6] = 0 #Node 11 _tmp &= r5 r7 = ndi_72 <= -0.23 _tmp2 = _tmp & ~r7 r8 = band1 <= 379 classified[_tmp2 & r8] = 1 #Node 14 classified[_tmp2 & ~r8] = 0 #Node 15 _tmp &= r7 r9 = ndi_43 <= 0.22 classified[_tmp & r9] = 1 #Node 17 _tmp &= ~r9 r10 = band1 <= 473 classified[_tmp & r10] = 1 #Node 19 classified[_tmp & ~r10] = 0 #Node 20 # Left branch complete; cleanup del r2, r3, r4, r5, r6, r7, r8, r9, r10 gc.collect() # Right branch of regression tree r1 = ~r1 r11 = ndi_52 <= 0.23 _tmp = r1 & r11 r12 = band1 <= 334.5 _tmp2 = _tmp & ~r12 classified[_tmp2] = 0 #Node 23 _tmp &= r12 r13 = ndi_43 <= 0.54 _tmp2 = _tmp & ~r13 classified[_tmp2] = 0 #Node 25 _tmp &= r13 r14 = ndi_52 <= 0.12 _tmp2 = _tmp & r14 classified[_tmp2] = 1 #Node 27 _tmp &= ~r14 r15 = band3 <= 364.5 _tmp2 = _tmp & r15 r16 = band1 <= 129.5 classified[_tmp2 & r16] = 1 #Node 31 classified[_tmp2 & ~r16] = 0 #Node 32 _tmp &= ~r15 r17 = band1 <= 300.5 _tmp2 = _tmp & ~r17 _tmp &= r17 classified[_tmp] = 1 #Node 33 classified[_tmp2] = 0 #Node 34 _tmp = r1 & ~r11 r18 = ndi_52 <= 0.34 classified[_tmp & ~r18] = 0 #Node 36 _tmp &= r18 r19 = band1 <= 249.5 classified[_tmp & ~r19] = 0 #Node 38 _tmp &= r19 r20 = ndi_43 <= 0.45 classified[_tmp & ~r20] = 0 #Node 40 _tmp &= r20 r21 = band3 <= 364.5 classified[_tmp & ~r21] = 0 #Node 42 _tmp &= r21 r22 = band1 <= 129.5 classified[_tmp & r22] = 1 #Node 44 classified[_tmp & ~r22] = 0 #Node 45 # Completed regression tree return classified # Default to masking nothing. if clean_mask is None: clean_mask = create_default_clean_mask(dataset_in) # Extract dataset bands needed for calculations blue = dataset_in.blue green = dataset_in.green red = dataset_in.red nir = dataset_in.nir swir1 = dataset_in.swir1 swir2 = dataset_in.swir2 # Enforce float calculations - float64 if user specified, otherwise float32 will do dtype = blue.values.dtype # This assumes all dataset bands will have # the same dtype (should be a reasonable # assumption) if enforce_float64: if dtype != 'float64': blue.values = blue.values.astype('float64') green.values = green.values.astype('float64') red.values = red.values.astype('float64') nir.values = nir.values.astype('float64') swir1.values = swir1.values.astype('float64') swir2.values = swir2.values.astype('float64') else: if dtype == 'float64': pass elif dtype != 'float32': blue.values = blue.values.astype('float32') green.values = green.values.astype('float32') red.values = red.values.astype('float32') nir.values = nir.values.astype('float32') swir1.values = swir1.values.astype('float32') swir2.values = swir2.values.astype('float32') shape = blue.values.shape classified = _run_regression(blue.values, green.values, red.values, nir.values, swir1.values, swir2.values) classified_clean = np.full(classified.shape, no_data, dtype='float64') classified_clean[clean_mask] = classified[clean_mask] # Contains data for clear pixels # Create xarray of data x_coords = dataset_in[x_coord] y_coords = dataset_in[y_coord] time = None coords = None dims = None if mosaic: coords = [y_coords, x_coords] dims = [y_coord, x_coord] else: time_coords = dataset_in[time_coord] coords = [time_coords, y_coords, x_coords] dims = [time_coord, y_coord, x_coord] data_array = xr.DataArray(classified_clean, coords=coords, dims=dims) if mosaic: dataset_out = xr.Dataset({'wofs': data_array}, coords={y_coord: y_coords, x_coord: x_coords}) else: dataset_out = xr.Dataset( {'wofs': data_array}, coords={time_coord: time_coords, y_coord: y_coords, x_coord: x_coords}) return dataset_out
[ "def", "wofs_classify", "(", "dataset_in", ",", "clean_mask", "=", "None", ",", "x_coord", "=", "'longitude'", ",", "y_coord", "=", "'latitude'", ",", "time_coord", "=", "'time'", ",", "no_data", "=", "-", "9999", ",", "mosaic", "=", "False", ",", "enforce_float64", "=", "False", ")", ":", "def", "_band_ratio", "(", "a", ",", "b", ")", ":", "\"\"\"\n Calculates a normalized ratio index\n \"\"\"", "return", "(", "a", "-", "b", ")", "/", "(", "a", "+", "b", ")", "def", "_run_regression", "(", "band1", ",", "band2", ",", "band3", ",", "band4", ",", "band5", ",", "band7", ")", ":", "\"\"\"\n Regression analysis based on Australia's training data\n TODO: Return type\n \"\"\"", "# Compute normalized ratio indices", "ndi_52", "=", "_band_ratio", "(", "band5", ",", "band2", ")", "ndi_43", "=", "_band_ratio", "(", "band4", ",", "band3", ")", "ndi_72", "=", "_band_ratio", "(", "band7", ",", "band2", ")", "#classified = np.ones(shape, dtype='uint8')", "classified", "=", "np", ".", "full", "(", "shape", ",", "no_data", ",", "dtype", "=", "'uint8'", ")", "# Start with the tree's left branch, finishing nodes as needed", "# Left branch", "r1", "=", "ndi_52", "<=", "-", "0.01", "r2", "=", "band1", "<=", "2083.5", "classified", "[", "r1", "&", "~", "r2", "]", "=", "0", "#Node 3", "r3", "=", "band7", "<=", "323.5", "_tmp", "=", "r1", "&", "r2", "_tmp2", "=", "_tmp", "&", "r3", "_tmp", "&=", "~", "r3", "r4", "=", "ndi_43", "<=", "0.61", "classified", "[", "_tmp2", "&", "r4", "]", "=", "1", "#Node 6", "classified", "[", "_tmp2", "&", "~", "r4", "]", "=", "0", "#Node 7", "r5", "=", "band1", "<=", "1400.5", "_tmp2", "=", "_tmp", "&", "~", "r5", "r6", "=", "ndi_43", "<=", "-", "0.01", "classified", "[", "_tmp2", "&", "r6", "]", "=", "1", "#Node 10", "classified", "[", "_tmp2", "&", "~", "r6", "]", "=", "0", "#Node 11", "_tmp", "&=", "r5", "r7", "=", "ndi_72", "<=", "-", "0.23", "_tmp2", "=", "_tmp", "&", "~", "r7", "r8", "=", "band1", "<=", "379", "classified", "[", "_tmp2", "&", "r8", "]", "=", "1", "#Node 14", "classified", "[", "_tmp2", "&", "~", "r8", "]", "=", "0", "#Node 15", "_tmp", "&=", "r7", "r9", "=", "ndi_43", "<=", "0.22", "classified", "[", "_tmp", "&", "r9", "]", "=", "1", "#Node 17", "_tmp", "&=", "~", "r9", "r10", "=", "band1", "<=", "473", "classified", "[", "_tmp", "&", "r10", "]", "=", "1", "#Node 19", "classified", "[", "_tmp", "&", "~", "r10", "]", "=", "0", "#Node 20", "# Left branch complete; cleanup", "del", "r2", ",", "r3", ",", "r4", ",", "r5", ",", "r6", ",", "r7", ",", "r8", ",", "r9", ",", "r10", "gc", ".", "collect", "(", ")", "# Right branch of regression tree", "r1", "=", "~", "r1", "r11", "=", "ndi_52", "<=", "0.23", "_tmp", "=", "r1", "&", "r11", "r12", "=", "band1", "<=", "334.5", "_tmp2", "=", "_tmp", "&", "~", "r12", "classified", "[", "_tmp2", "]", "=", "0", "#Node 23", "_tmp", "&=", "r12", "r13", "=", "ndi_43", "<=", "0.54", "_tmp2", "=", "_tmp", "&", "~", "r13", "classified", "[", "_tmp2", "]", "=", "0", "#Node 25", "_tmp", "&=", "r13", "r14", "=", "ndi_52", "<=", "0.12", "_tmp2", "=", "_tmp", "&", "r14", "classified", "[", "_tmp2", "]", "=", "1", "#Node 27", "_tmp", "&=", "~", "r14", "r15", "=", "band3", "<=", "364.5", "_tmp2", "=", "_tmp", "&", "r15", "r16", "=", "band1", "<=", "129.5", "classified", "[", "_tmp2", "&", "r16", "]", "=", "1", "#Node 31", "classified", "[", "_tmp2", "&", "~", "r16", "]", "=", "0", "#Node 32", "_tmp", "&=", "~", "r15", "r17", "=", "band1", "<=", "300.5", "_tmp2", "=", "_tmp", "&", "~", "r17", "_tmp", "&=", "r17", "classified", "[", "_tmp", "]", "=", "1", "#Node 33", "classified", "[", "_tmp2", "]", "=", "0", "#Node 34", "_tmp", "=", "r1", "&", "~", "r11", "r18", "=", "ndi_52", "<=", "0.34", "classified", "[", "_tmp", "&", "~", "r18", "]", "=", "0", "#Node 36", "_tmp", "&=", "r18", "r19", "=", "band1", "<=", "249.5", "classified", "[", "_tmp", "&", "~", "r19", "]", "=", "0", "#Node 38", "_tmp", "&=", "r19", "r20", "=", "ndi_43", "<=", "0.45", "classified", "[", "_tmp", "&", "~", "r20", "]", "=", "0", "#Node 40", "_tmp", "&=", "r20", "r21", "=", "band3", "<=", "364.5", "classified", "[", "_tmp", "&", "~", "r21", "]", "=", "0", "#Node 42", "_tmp", "&=", "r21", "r22", "=", "band1", "<=", "129.5", "classified", "[", "_tmp", "&", "r22", "]", "=", "1", "#Node 44", "classified", "[", "_tmp", "&", "~", "r22", "]", "=", "0", "#Node 45", "# Completed regression tree", "return", "classified", "# Default to masking nothing.", "if", "clean_mask", "is", "None", ":", "clean_mask", "=", "create_default_clean_mask", "(", "dataset_in", ")", "# Extract dataset bands needed for calculations", "blue", "=", "dataset_in", ".", "blue", "green", "=", "dataset_in", ".", "green", "red", "=", "dataset_in", ".", "red", "nir", "=", "dataset_in", ".", "nir", "swir1", "=", "dataset_in", ".", "swir1", "swir2", "=", "dataset_in", ".", "swir2", "# Enforce float calculations - float64 if user specified, otherwise float32 will do", "dtype", "=", "blue", ".", "values", ".", "dtype", "# This assumes all dataset bands will have", "# the same dtype (should be a reasonable", "# assumption)", "if", "enforce_float64", ":", "if", "dtype", "!=", "'float64'", ":", "blue", ".", "values", "=", "blue", ".", "values", ".", "astype", "(", "'float64'", ")", "green", ".", "values", "=", "green", ".", "values", ".", "astype", "(", "'float64'", ")", "red", ".", "values", "=", "red", ".", "values", ".", "astype", "(", "'float64'", ")", "nir", ".", "values", "=", "nir", ".", "values", ".", "astype", "(", "'float64'", ")", "swir1", ".", "values", "=", "swir1", ".", "values", ".", "astype", "(", "'float64'", ")", "swir2", ".", "values", "=", "swir2", ".", "values", ".", "astype", "(", "'float64'", ")", "else", ":", "if", "dtype", "==", "'float64'", ":", "pass", "elif", "dtype", "!=", "'float32'", ":", "blue", ".", "values", "=", "blue", ".", "values", ".", "astype", "(", "'float32'", ")", "green", ".", "values", "=", "green", ".", "values", ".", "astype", "(", "'float32'", ")", "red", ".", "values", "=", "red", ".", "values", ".", "astype", "(", "'float32'", ")", "nir", ".", "values", "=", "nir", ".", "values", ".", "astype", "(", "'float32'", ")", "swir1", ".", "values", "=", "swir1", ".", "values", ".", "astype", "(", "'float32'", ")", "swir2", ".", "values", "=", "swir2", ".", "values", ".", "astype", "(", "'float32'", ")", "shape", "=", "blue", ".", "values", ".", "shape", "classified", "=", "_run_regression", "(", "blue", ".", "values", ",", "green", ".", "values", ",", "red", ".", "values", ",", "nir", ".", "values", ",", "swir1", ".", "values", ",", "swir2", ".", "values", ")", "classified_clean", "=", "np", ".", "full", "(", "classified", ".", "shape", ",", "no_data", ",", "dtype", "=", "'float64'", ")", "classified_clean", "[", "clean_mask", "]", "=", "classified", "[", "clean_mask", "]", "# Contains data for clear pixels", "# Create xarray of data", "x_coords", "=", "dataset_in", "[", "x_coord", "]", "y_coords", "=", "dataset_in", "[", "y_coord", "]", "time", "=", "None", "coords", "=", "None", "dims", "=", "None", "if", "mosaic", ":", "coords", "=", "[", "y_coords", ",", "x_coords", "]", "dims", "=", "[", "y_coord", ",", "x_coord", "]", "else", ":", "time_coords", "=", "dataset_in", "[", "time_coord", "]", "coords", "=", "[", "time_coords", ",", "y_coords", ",", "x_coords", "]", "dims", "=", "[", "time_coord", ",", "y_coord", ",", "x_coord", "]", "data_array", "=", "xr", ".", "DataArray", "(", "classified_clean", ",", "coords", "=", "coords", ",", "dims", "=", "dims", ")", "if", "mosaic", ":", "dataset_out", "=", "xr", ".", "Dataset", "(", "{", "'wofs'", ":", "data_array", "}", ",", "coords", "=", "{", "y_coord", ":", "y_coords", ",", "x_coord", ":", "x_coords", "}", ")", "else", ":", "dataset_out", "=", "xr", ".", "Dataset", "(", "{", "'wofs'", ":", "data_array", "}", ",", "coords", "=", "{", "time_coord", ":", "time_coords", ",", "y_coord", ":", "y_coords", ",", "x_coord", ":", "x_coords", "}", ")", "return", "dataset_out" ]
[ 84, 0 ]
[ 318, 22 ]
python
en
['en', 'error', 'th']
False
main
(classifier, platform, product_type, min_lon, max_lon, min_lat, max_lat, start_date, end_date, dc_config)
Description: Command-line water detection tool - creates a time-series from water analysis performed on data retrieved by the Data Cube, shows plots of the normalized water observations (total water observations / total clear observations), total water observations, and total clear observations, and saves a GeoTIFF of the results Assumptions: The command-line tool assumes there is a measurement called cf_mask Inputs: classifier (str) platform (str) product_type (str) min_lon (str) max_lon (str) min_lat (str) max_lat (str) start_date (str) end_date (str) dc_config (str)
Description: Command-line water detection tool - creates a time-series from water analysis performed on data retrieved by the Data Cube, shows plots of the normalized water observations (total water observations / total clear observations), total water observations, and total clear observations, and saves a GeoTIFF of the results Assumptions: The command-line tool assumes there is a measurement called cf_mask Inputs: classifier (str) platform (str) product_type (str) min_lon (str) max_lon (str) min_lat (str) max_lat (str) start_date (str) end_date (str) dc_config (str)
def main(classifier, platform, product_type, min_lon, max_lon, min_lat, max_lat, start_date, end_date, dc_config): """ Description: Command-line water detection tool - creates a time-series from water analysis performed on data retrieved by the Data Cube, shows plots of the normalized water observations (total water observations / total clear observations), total water observations, and total clear observations, and saves a GeoTIFF of the results Assumptions: The command-line tool assumes there is a measurement called cf_mask Inputs: classifier (str) platform (str) product_type (str) min_lon (str) max_lon (str) min_lat (str) max_lat (str) start_date (str) end_date (str) dc_config (str) """ # Initialize data cube object dc = datacube.Datacube(config=dc_config, app='dc-mosaicker') # Validate arguments if classifier not in ['cfmask', 'ledaps', 'wofs']: print('ERROR: Unknown water classifier. Classifier options: cfmask, ledaps, wofs') return products = dc.list_products() platform_names = set([product[6] for product in products.values]) if platform not in platform_names: print('ERROR: Invalid platform.') print('Valid platforms are:') for name in platform_names: print(name) return product_names = [product[0] for product in products.values] if product_type not in product_names: print('ERROR: Invalid product type.') print('Valid product types are:') for name in product_names: print(name) return try: min_lon = float(args.min_lon) max_lon = float(args.max_lon) min_lat = float(args.min_lat) max_lat = float(args.max_lat) except: print('ERROR: Longitudes/Latitudes must be float values') return try: start_date_str = start_date end_date_str = end_date start_date = datetime.strptime(start_date, '%Y-%m-%d') end_date = datetime.strptime(end_date, '%Y-%m-%d') except: print('ERROR: Invalid date format. Date format: YYYY-MM-DD') return if not os.path.exists(dc_config): print('ERROR: Invalid file path for dc_config') return # Retrieve data from Data Cube dataset_in = dc.load( platform=platform, product=product_type, time=(start_date, end_date), lon=(min_lon, max_lon), lat=(min_lat, max_lat)) # Get information needed for saving as GeoTIFF # Spatial ref crs = dataset_in.crs spatial_ref = utilities.get_spatial_ref(crs) # Upper left coordinates ul_lon = dataset_in.longitude.values[0] ul_lat = dataset_in.latitude.values[0] # Resolution products = dc.list_products() resolution = products.resolution[products.name == 'ls7_ledaps'] lon_dist = resolution.values[0][1] lat_dist = resolution.values[0][0] # Rotation lon_rtn = 0 lat_rtn = 0 geotransform = (ul_lon, lon_dist, lon_rtn, ul_lat, lat_rtn, lat_dist) # Run desired classifier water_class = None if classifier == 'cfmask': #TODO: implement when cfmask_classify is refactored return elif classifier == 'ledaps': #TODO: implement when cfmask_classify is refactored return elif classifier == 'wofs': water_class = wofs_classify(dataset_in) dataset_out = utilities.perform_timeseries_analysis(water_class) print(dataset_out) out_file = ( str(min_lon) + '_' + str(min_lat) + '_' + start_date_str + '_' + end_date_str + '_' + classifier + '_.tif') utilities.save_to_geotiff(out_file, gdal.GDT_Float32, dataset_out, geotransform, spatial_ref)
[ "def", "main", "(", "classifier", ",", "platform", ",", "product_type", ",", "min_lon", ",", "max_lon", ",", "min_lat", ",", "max_lat", ",", "start_date", ",", "end_date", ",", "dc_config", ")", ":", "# Initialize data cube object", "dc", "=", "datacube", ".", "Datacube", "(", "config", "=", "dc_config", ",", "app", "=", "'dc-mosaicker'", ")", "# Validate arguments", "if", "classifier", "not", "in", "[", "'cfmask'", ",", "'ledaps'", ",", "'wofs'", "]", ":", "print", "(", "'ERROR: Unknown water classifier. Classifier options: cfmask, ledaps, wofs'", ")", "return", "products", "=", "dc", ".", "list_products", "(", ")", "platform_names", "=", "set", "(", "[", "product", "[", "6", "]", "for", "product", "in", "products", ".", "values", "]", ")", "if", "platform", "not", "in", "platform_names", ":", "print", "(", "'ERROR: Invalid platform.'", ")", "print", "(", "'Valid platforms are:'", ")", "for", "name", "in", "platform_names", ":", "print", "(", "name", ")", "return", "product_names", "=", "[", "product", "[", "0", "]", "for", "product", "in", "products", ".", "values", "]", "if", "product_type", "not", "in", "product_names", ":", "print", "(", "'ERROR: Invalid product type.'", ")", "print", "(", "'Valid product types are:'", ")", "for", "name", "in", "product_names", ":", "print", "(", "name", ")", "return", "try", ":", "min_lon", "=", "float", "(", "args", ".", "min_lon", ")", "max_lon", "=", "float", "(", "args", ".", "max_lon", ")", "min_lat", "=", "float", "(", "args", ".", "min_lat", ")", "max_lat", "=", "float", "(", "args", ".", "max_lat", ")", "except", ":", "print", "(", "'ERROR: Longitudes/Latitudes must be float values'", ")", "return", "try", ":", "start_date_str", "=", "start_date", "end_date_str", "=", "end_date", "start_date", "=", "datetime", ".", "strptime", "(", "start_date", ",", "'%Y-%m-%d'", ")", "end_date", "=", "datetime", ".", "strptime", "(", "end_date", ",", "'%Y-%m-%d'", ")", "except", ":", "print", "(", "'ERROR: Invalid date format. Date format: YYYY-MM-DD'", ")", "return", "if", "not", "os", ".", "path", ".", "exists", "(", "dc_config", ")", ":", "print", "(", "'ERROR: Invalid file path for dc_config'", ")", "return", "# Retrieve data from Data Cube", "dataset_in", "=", "dc", ".", "load", "(", "platform", "=", "platform", ",", "product", "=", "product_type", ",", "time", "=", "(", "start_date", ",", "end_date", ")", ",", "lon", "=", "(", "min_lon", ",", "max_lon", ")", ",", "lat", "=", "(", "min_lat", ",", "max_lat", ")", ")", "# Get information needed for saving as GeoTIFF", "# Spatial ref", "crs", "=", "dataset_in", ".", "crs", "spatial_ref", "=", "utilities", ".", "get_spatial_ref", "(", "crs", ")", "# Upper left coordinates", "ul_lon", "=", "dataset_in", ".", "longitude", ".", "values", "[", "0", "]", "ul_lat", "=", "dataset_in", ".", "latitude", ".", "values", "[", "0", "]", "# Resolution", "products", "=", "dc", ".", "list_products", "(", ")", "resolution", "=", "products", ".", "resolution", "[", "products", ".", "name", "==", "'ls7_ledaps'", "]", "lon_dist", "=", "resolution", ".", "values", "[", "0", "]", "[", "1", "]", "lat_dist", "=", "resolution", ".", "values", "[", "0", "]", "[", "0", "]", "# Rotation", "lon_rtn", "=", "0", "lat_rtn", "=", "0", "geotransform", "=", "(", "ul_lon", ",", "lon_dist", ",", "lon_rtn", ",", "ul_lat", ",", "lat_rtn", ",", "lat_dist", ")", "# Run desired classifier", "water_class", "=", "None", "if", "classifier", "==", "'cfmask'", ":", "#TODO: implement when cfmask_classify is refactored", "return", "elif", "classifier", "==", "'ledaps'", ":", "#TODO: implement when cfmask_classify is refactored", "return", "elif", "classifier", "==", "'wofs'", ":", "water_class", "=", "wofs_classify", "(", "dataset_in", ")", "dataset_out", "=", "utilities", ".", "perform_timeseries_analysis", "(", "water_class", ")", "print", "(", "dataset_out", ")", "out_file", "=", "(", "str", "(", "min_lon", ")", "+", "'_'", "+", "str", "(", "min_lat", ")", "+", "'_'", "+", "start_date_str", "+", "'_'", "+", "end_date_str", "+", "'_'", "+", "classifier", "+", "'_.tif'", ")", "utilities", ".", "save_to_geotiff", "(", "out_file", ",", "gdal", ".", "GDT_Float32", ",", "dataset_out", ",", "geotransform", ",", "spatial_ref", ")" ]
[ 367, 0 ]
[ 483, 97 ]
python
en
['en', 'error', 'th']
False
test_validation_operator_run_interactive_golden_path
( caplog, data_context_simple_expectation_suite, filesystem_csv_2 )
Interactive mode golden path - pass an existing suite name and an existing validation operator name, select an existing file.
Interactive mode golden path - pass an existing suite name and an existing validation operator name, select an existing file.
def test_validation_operator_run_interactive_golden_path( caplog, data_context_simple_expectation_suite, filesystem_csv_2 ): """ Interactive mode golden path - pass an existing suite name and an existing validation operator name, select an existing file. """ not_so_empty_data_context = data_context_simple_expectation_suite root_dir = not_so_empty_data_context.root_directory os.mkdir(os.path.join(root_dir, "uncommitted")) runner = CliRunner(mix_stderr=False) csv_path = os.path.join(filesystem_csv_2, "f1.csv") result = runner.invoke( cli, [ "validation-operator", "run", "-d", root_dir, "--name", "default", "--suite", "default", ], input=f"{csv_path}\n", catch_exceptions=False, ) stdout = result.stdout assert "Validation failed" in stdout assert result.exit_code == 1 assert_no_logging_messages_or_tracebacks(caplog, result)
[ "def", "test_validation_operator_run_interactive_golden_path", "(", "caplog", ",", "data_context_simple_expectation_suite", ",", "filesystem_csv_2", ")", ":", "not_so_empty_data_context", "=", "data_context_simple_expectation_suite", "root_dir", "=", "not_so_empty_data_context", ".", "root_directory", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ")", ")", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "csv_path", "=", "os", ".", "path", ".", "join", "(", "filesystem_csv_2", ",", "\"f1.csv\"", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"validation-operator\"", ",", "\"run\"", ",", "\"-d\"", ",", "root_dir", ",", "\"--name\"", ",", "\"default\"", ",", "\"--suite\"", ",", "\"default\"", ",", "]", ",", "input", "=", "f\"{csv_path}\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "\"Validation failed\"", "in", "stdout", "assert", "result", ".", "exit_code", "==", "1", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")" ]
[ 13, 0 ]
[ 44, 60 ]
python
en
['en', 'error', 'th']
False
test_validation_operator_run_interactive_pass_non_existing_expectation_suite
( caplog, data_context_parameterized_expectation_suite_no_checkpoint_store, filesystem_csv_2, )
Interactive mode: pass an non-existing suite name and an existing validation operator name, select an existing file.
Interactive mode: pass an non-existing suite name and an existing validation operator name, select an existing file.
def test_validation_operator_run_interactive_pass_non_existing_expectation_suite( caplog, data_context_parameterized_expectation_suite_no_checkpoint_store, filesystem_csv_2, ): """ Interactive mode: pass an non-existing suite name and an existing validation operator name, select an existing file. """ not_so_empty_data_context = ( data_context_parameterized_expectation_suite_no_checkpoint_store ) root_dir = not_so_empty_data_context.root_directory os.mkdir(os.path.join(root_dir, "uncommitted")) runner = CliRunner(mix_stderr=False) csv_path = os.path.join(filesystem_csv_2, "f1.csv") result = runner.invoke( cli, [ "validation-operator", "run", "-d", root_dir, "--name", "default", "--suite", "this.suite.does.not.exist", ], input=f"{csv_path}\n", catch_exceptions=False, ) stdout = result.stdout assert "Could not find a suite named" in stdout assert result.exit_code == 1 assert_no_logging_messages_or_tracebacks(caplog, result)
[ "def", "test_validation_operator_run_interactive_pass_non_existing_expectation_suite", "(", "caplog", ",", "data_context_parameterized_expectation_suite_no_checkpoint_store", ",", "filesystem_csv_2", ",", ")", ":", "not_so_empty_data_context", "=", "(", "data_context_parameterized_expectation_suite_no_checkpoint_store", ")", "root_dir", "=", "not_so_empty_data_context", ".", "root_directory", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ")", ")", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "csv_path", "=", "os", ".", "path", ".", "join", "(", "filesystem_csv_2", ",", "\"f1.csv\"", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"validation-operator\"", ",", "\"run\"", ",", "\"-d\"", ",", "root_dir", ",", "\"--name\"", ",", "\"default\"", ",", "\"--suite\"", ",", "\"this.suite.does.not.exist\"", ",", "]", ",", "input", "=", "f\"{csv_path}\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "\"Could not find a suite named\"", "in", "stdout", "assert", "result", ".", "exit_code", "==", "1", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")" ]
[ 47, 0 ]
[ 82, 60 ]
python
en
['en', 'error', 'th']
False
test_validation_operator_run_interactive_pass_non_existing_operator_name
( caplog, data_context_parameterized_expectation_suite_no_checkpoint_store, filesystem_csv_2, )
Interactive mode: pass an non-existing suite name and an existing validation operator name, select an existing file.
Interactive mode: pass an non-existing suite name and an existing validation operator name, select an existing file.
def test_validation_operator_run_interactive_pass_non_existing_operator_name( caplog, data_context_parameterized_expectation_suite_no_checkpoint_store, filesystem_csv_2, ): """ Interactive mode: pass an non-existing suite name and an existing validation operator name, select an existing file. """ not_so_empty_data_context = ( data_context_parameterized_expectation_suite_no_checkpoint_store ) root_dir = not_so_empty_data_context.root_directory os.mkdir(os.path.join(root_dir, "uncommitted")) runner = CliRunner(mix_stderr=False) csv_path = os.path.join(filesystem_csv_2, "f1.csv") result = runner.invoke( cli, [ "validation-operator", "run", "-d", root_dir, "--name", "this_val_op_does_not_exist", "--suite", "my_dag_node.default", ], input=f"{csv_path}\n", catch_exceptions=False, ) stdout = result.stdout assert "Could not find a validation operator" in stdout assert result.exit_code == 1 assert_no_logging_messages_or_tracebacks(caplog, result)
[ "def", "test_validation_operator_run_interactive_pass_non_existing_operator_name", "(", "caplog", ",", "data_context_parameterized_expectation_suite_no_checkpoint_store", ",", "filesystem_csv_2", ",", ")", ":", "not_so_empty_data_context", "=", "(", "data_context_parameterized_expectation_suite_no_checkpoint_store", ")", "root_dir", "=", "not_so_empty_data_context", ".", "root_directory", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ")", ")", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "csv_path", "=", "os", ".", "path", ".", "join", "(", "filesystem_csv_2", ",", "\"f1.csv\"", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"validation-operator\"", ",", "\"run\"", ",", "\"-d\"", ",", "root_dir", ",", "\"--name\"", ",", "\"this_val_op_does_not_exist\"", ",", "\"--suite\"", ",", "\"my_dag_node.default\"", ",", "]", ",", "input", "=", "f\"{csv_path}\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "\"Could not find a validation operator\"", "in", "stdout", "assert", "result", ".", "exit_code", "==", "1", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")" ]
[ 85, 0 ]
[ 120, 60 ]
python
en
['en', 'error', 'th']
False
test_validation_operator_run_noninteractive_golden_path
( caplog, data_context_simple_expectation_suite, filesystem_csv_2 )
Non-nteractive mode golden path - use the --validation_config_file argument to pass the path to a valid validation config file
Non-nteractive mode golden path - use the --validation_config_file argument to pass the path to a valid validation config file
def test_validation_operator_run_noninteractive_golden_path( caplog, data_context_simple_expectation_suite, filesystem_csv_2 ): """ Non-nteractive mode golden path - use the --validation_config_file argument to pass the path to a valid validation config file """ not_so_empty_data_context = data_context_simple_expectation_suite root_dir = not_so_empty_data_context.root_directory os.mkdir(os.path.join(root_dir, "uncommitted")) csv_path = os.path.join(filesystem_csv_2, "f1.csv") validation_config = { "validation_operator_name": "default", "batches": [ { "batch_kwargs": { "path": csv_path, "datasource": "mydatasource", "reader_method": "read_csv", }, "expectation_suite_names": ["default"], } ], } validation_config_file_path = os.path.join( root_dir, "uncommitted", "validation_config_1.json" ) with open(validation_config_file_path, "w") as f: json.dump(validation_config, f) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, [ "validation-operator", "run", "-d", root_dir, "--validation_config_file", validation_config_file_path, ], catch_exceptions=False, ) stdout = result.stdout assert "Validation failed" in stdout assert result.exit_code == 1 assert_no_logging_messages_or_tracebacks(caplog, result)
[ "def", "test_validation_operator_run_noninteractive_golden_path", "(", "caplog", ",", "data_context_simple_expectation_suite", ",", "filesystem_csv_2", ")", ":", "not_so_empty_data_context", "=", "data_context_simple_expectation_suite", "root_dir", "=", "not_so_empty_data_context", ".", "root_directory", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ")", ")", "csv_path", "=", "os", ".", "path", ".", "join", "(", "filesystem_csv_2", ",", "\"f1.csv\"", ")", "validation_config", "=", "{", "\"validation_operator_name\"", ":", "\"default\"", ",", "\"batches\"", ":", "[", "{", "\"batch_kwargs\"", ":", "{", "\"path\"", ":", "csv_path", ",", "\"datasource\"", ":", "\"mydatasource\"", ",", "\"reader_method\"", ":", "\"read_csv\"", ",", "}", ",", "\"expectation_suite_names\"", ":", "[", "\"default\"", "]", ",", "}", "]", ",", "}", "validation_config_file_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ",", "\"validation_config_1.json\"", ")", "with", "open", "(", "validation_config_file_path", ",", "\"w\"", ")", "as", "f", ":", "json", ".", "dump", "(", "validation_config", ",", "f", ")", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"validation-operator\"", ",", "\"run\"", ",", "\"-d\"", ",", "root_dir", ",", "\"--validation_config_file\"", ",", "validation_config_file_path", ",", "]", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "\"Validation failed\"", "in", "stdout", "assert", "result", ".", "exit_code", "==", "1", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")" ]
[ 123, 0 ]
[ 171, 60 ]
python
en
['en', 'error', 'th']
False
test_validation_operator_run_noninteractive_validation_config_file_does_not_exist
( caplog, data_context_parameterized_expectation_suite_no_checkpoint_store, filesystem_csv_2, )
Non-nteractive mode. Use the --validation_config_file argument to pass the path to a validation config file that does not exist.
Non-nteractive mode. Use the --validation_config_file argument to pass the path to a validation config file that does not exist.
def test_validation_operator_run_noninteractive_validation_config_file_does_not_exist( caplog, data_context_parameterized_expectation_suite_no_checkpoint_store, filesystem_csv_2, ): """ Non-nteractive mode. Use the --validation_config_file argument to pass the path to a validation config file that does not exist. """ not_so_empty_data_context = ( data_context_parameterized_expectation_suite_no_checkpoint_store ) root_dir = not_so_empty_data_context.root_directory os.mkdir(os.path.join(root_dir, "uncommitted")) validation_config_file_path = os.path.join( root_dir, "uncommitted", "validation_config_1.json" ) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, [ "validation-operator", "run", "-d", root_dir, "--validation_config_file", validation_config_file_path, ], catch_exceptions=False, ) stdout = result.stdout assert "Failed to process the --validation_config_file argument" in stdout assert result.exit_code == 1 assert_no_logging_messages_or_tracebacks(caplog, result)
[ "def", "test_validation_operator_run_noninteractive_validation_config_file_does_not_exist", "(", "caplog", ",", "data_context_parameterized_expectation_suite_no_checkpoint_store", ",", "filesystem_csv_2", ",", ")", ":", "not_so_empty_data_context", "=", "(", "data_context_parameterized_expectation_suite_no_checkpoint_store", ")", "root_dir", "=", "not_so_empty_data_context", ".", "root_directory", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ")", ")", "validation_config_file_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ",", "\"validation_config_1.json\"", ")", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"validation-operator\"", ",", "\"run\"", ",", "\"-d\"", ",", "root_dir", ",", "\"--validation_config_file\"", ",", "validation_config_file_path", ",", "]", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "\"Failed to process the --validation_config_file argument\"", "in", "stdout", "assert", "result", ".", "exit_code", "==", "1", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")" ]
[ 174, 0 ]
[ 209, 60 ]
python
en
['en', 'error', 'th']
False
test_validation_operator_run_noninteractive_validation_config_file_does_is_misconfigured
( caplog, data_context_parameterized_expectation_suite_no_checkpoint_store, filesystem_csv_2, )
Non-nteractive mode. Use the --validation_config_file argument to pass the path to a validation config file that is misconfigured - one of the batches does not have expectation_suite_names attribute
Non-nteractive mode. Use the --validation_config_file argument to pass the path to a validation config file that is misconfigured - one of the batches does not have expectation_suite_names attribute
def test_validation_operator_run_noninteractive_validation_config_file_does_is_misconfigured( caplog, data_context_parameterized_expectation_suite_no_checkpoint_store, filesystem_csv_2, ): """ Non-nteractive mode. Use the --validation_config_file argument to pass the path to a validation config file that is misconfigured - one of the batches does not have expectation_suite_names attribute """ not_so_empty_data_context = ( data_context_parameterized_expectation_suite_no_checkpoint_store ) root_dir = not_so_empty_data_context.root_directory os.mkdir(os.path.join(root_dir, "uncommitted")) csv_path = os.path.join(filesystem_csv_2, "f1.csv") validation_config = { "validation_operator_name": "default", "batches": [ { "batch_kwargs": { "path": csv_path, "datasource": "mydatasource", "reader_method": "read_csv", }, "wrong_attribute_expectation_suite_names": ["my_dag_node.default1"], } ], } validation_config_file_path = os.path.join( root_dir, "uncommitted", "validation_config_1.json" ) with open(validation_config_file_path, "w") as f: json.dump(validation_config, f) runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, [ "validation-operator", "run", "-d", root_dir, "--validation_config_file", validation_config_file_path, ], catch_exceptions=False, ) stdout = result.stdout assert ( "is misconfigured: Each batch must have a list of expectation suite names" in stdout ) assert result.exit_code == 1 assert_no_logging_messages_or_tracebacks(caplog, result)
[ "def", "test_validation_operator_run_noninteractive_validation_config_file_does_is_misconfigured", "(", "caplog", ",", "data_context_parameterized_expectation_suite_no_checkpoint_store", ",", "filesystem_csv_2", ",", ")", ":", "not_so_empty_data_context", "=", "(", "data_context_parameterized_expectation_suite_no_checkpoint_store", ")", "root_dir", "=", "not_so_empty_data_context", ".", "root_directory", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ")", ")", "csv_path", "=", "os", ".", "path", ".", "join", "(", "filesystem_csv_2", ",", "\"f1.csv\"", ")", "validation_config", "=", "{", "\"validation_operator_name\"", ":", "\"default\"", ",", "\"batches\"", ":", "[", "{", "\"batch_kwargs\"", ":", "{", "\"path\"", ":", "csv_path", ",", "\"datasource\"", ":", "\"mydatasource\"", ",", "\"reader_method\"", ":", "\"read_csv\"", ",", "}", ",", "\"wrong_attribute_expectation_suite_names\"", ":", "[", "\"my_dag_node.default1\"", "]", ",", "}", "]", ",", "}", "validation_config_file_path", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ",", "\"validation_config_1.json\"", ")", "with", "open", "(", "validation_config_file_path", ",", "\"w\"", ")", "as", "f", ":", "json", ".", "dump", "(", "validation_config", ",", "f", ")", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"validation-operator\"", ",", "\"run\"", ",", "\"-d\"", ",", "root_dir", ",", "\"--validation_config_file\"", ",", "validation_config_file_path", ",", "]", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "(", "\"is misconfigured: Each batch must have a list of expectation suite names\"", "in", "stdout", ")", "assert", "result", ".", "exit_code", "==", "1", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")" ]
[ 212, 0 ]
[ 268, 60 ]
python
en
['en', 'error', 'th']
False
_capture_ansi_codes_to_file
(result)
Use this to capture the ANSI color codes when updating snapshots. NOT DEAD CODE.
Use this to capture the ANSI color codes when updating snapshots. NOT DEAD CODE.
def _capture_ansi_codes_to_file(result): """ Use this to capture the ANSI color codes when updating snapshots. NOT DEAD CODE. """ with open("ansi.txt", "w") as f: f.write(result.output.strip())
[ "def", "_capture_ansi_codes_to_file", "(", "result", ")", ":", "with", "open", "(", "\"ansi.txt\"", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "result", ".", "output", ".", "strip", "(", ")", ")" ]
[ 388, 0 ]
[ 394, 38 ]
python
en
['en', 'error', 'th']
False
BaseYamlConfig.to_yaml
(self, outfile)
:returns None (but writes a YAML file containing the project configuration)
:returns None (but writes a YAML file containing the project configuration)
def to_yaml(self, outfile): """ :returns None (but writes a YAML file containing the project configuration) """ yaml.dump(self.commented_map, outfile)
[ "def", "to_yaml", "(", "self", ",", "outfile", ")", ":", "yaml", ".", "dump", "(", "self", ".", "commented_map", ",", "outfile", ")" ]
[ 98, 4 ]
[ 102, 46 ]
python
en
['en', 'error', 'th']
False
BaseYamlConfig.to_yaml_str
(self)
:returns a YAML string containing the project configuration
:returns a YAML string containing the project configuration
def to_yaml_str(self) -> str: """ :returns a YAML string containing the project configuration """ return object_to_yaml_str(self.commented_map)
[ "def", "to_yaml_str", "(", "self", ")", "->", "str", ":", "return", "object_to_yaml_str", "(", "self", ".", "commented_map", ")" ]
[ 104, 4 ]
[ 108, 53 ]
python
en
['en', 'error', 'th']
False
BaseYamlConfig.to_json_dict
(self)
:returns a JSON-serialiable dict containing the project configuration
:returns a JSON-serialiable dict containing the project configuration
def to_json_dict(self) -> dict: """ :returns a JSON-serialiable dict containing the project configuration """ commented_map: CommentedMap = self.commented_map return convert_to_json_serializable(data=commented_map)
[ "def", "to_json_dict", "(", "self", ")", "->", "dict", ":", "commented_map", ":", "CommentedMap", "=", "self", ".", "commented_map", "return", "convert_to_json_serializable", "(", "data", "=", "commented_map", ")" ]
[ 110, 4 ]
[ 115, 63 ]
python
en
['en', 'error', 'th']
False
DataContextConfigSchema.handle_error
(self, exc, data, **kwargs)
Log and raise our custom exception when (de)serialization fails.
Log and raise our custom exception when (de)serialization fails.
def handle_error(self, exc, data, **kwargs): """Log and raise our custom exception when (de)serialization fails.""" if ( exc and exc.messages and isinstance(exc.messages, dict) and all([key is None for key in exc.messages.keys()]) ): exc.messages = list(itertools.chain.from_iterable(exc.messages.values())) message: str = ( f"Error while processing DataContextConfig: {' '.join(exc.messages)}" ) logger.error(message) raise ge_exceptions.InvalidDataContextConfigError( message=message, )
[ "def", "handle_error", "(", "self", ",", "exc", ",", "data", ",", "*", "*", "kwargs", ")", ":", "if", "(", "exc", "and", "exc", ".", "messages", "and", "isinstance", "(", "exc", ".", "messages", ",", "dict", ")", "and", "all", "(", "[", "key", "is", "None", "for", "key", "in", "exc", ".", "messages", ".", "keys", "(", ")", "]", ")", ")", ":", "exc", ".", "messages", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "exc", ".", "messages", ".", "values", "(", ")", ")", ")", "message", ":", "str", "=", "(", "f\"Error while processing DataContextConfig: {' '.join(exc.messages)}\"", ")", "logger", ".", "error", "(", "message", ")", "raise", "ge_exceptions", ".", "InvalidDataContextConfigError", "(", "message", "=", "message", ",", ")" ]
[ 930, 4 ]
[ 946, 9 ]
python
en
['en', 'en', 'en']
True
docker_pull
(tag: str)
`docker pull` and then return the image ID
`docker pull` and then return the image ID
def docker_pull(tag: str) -> str: """`docker pull` and then return the image ID""" run(['docker', 'pull', tag]) return run_txtcapture(['docker', 'image', 'inspect', tag, '--format={{.Id}}'])
[ "def", "docker_pull", "(", "tag", ":", "str", ")", "->", "str", ":", "run", "(", "[", "'docker'", ",", "'pull'", ",", "tag", "]", ")", "return", "run_txtcapture", "(", "[", "'docker'", ",", "'image'", ",", "'inspect'", ",", "tag", ",", "'--format={{.Id}}'", "]", ")" ]
[ 17, 0 ]
[ 20, 82 ]
python
en
['en', 'en', 'en']
True
xr_scale
(data, data_vars=None, min_max=None, scaling='norm', copy=False)
Scales an xarray Dataset or DataArray with standard scaling or norm scaling. Parameters ---------- data: xarray.Dataset or xarray.DataArray The NumPy array to scale. data_vars: list The names of the data variables to scale. min_max: tuple A 2-tuple which specifies the desired range of the final output - the minimum and the maximum, in that order. If all values are the same, all values will become min_max[0]. scaling: str The options are ['std', 'norm']. The option 'std' standardizes. The option 'norm' normalizes (min-max scales). copy: bool Whether or not to copy `data` before scaling.
Scales an xarray Dataset or DataArray with standard scaling or norm scaling. Parameters ---------- data: xarray.Dataset or xarray.DataArray The NumPy array to scale. data_vars: list The names of the data variables to scale. min_max: tuple A 2-tuple which specifies the desired range of the final output - the minimum and the maximum, in that order. If all values are the same, all values will become min_max[0]. scaling: str The options are ['std', 'norm']. The option 'std' standardizes. The option 'norm' normalizes (min-max scales). copy: bool Whether or not to copy `data` before scaling.
def xr_scale(data, data_vars=None, min_max=None, scaling='norm', copy=False): """ Scales an xarray Dataset or DataArray with standard scaling or norm scaling. Parameters ---------- data: xarray.Dataset or xarray.DataArray The NumPy array to scale. data_vars: list The names of the data variables to scale. min_max: tuple A 2-tuple which specifies the desired range of the final output - the minimum and the maximum, in that order. If all values are the same, all values will become min_max[0]. scaling: str The options are ['std', 'norm']. The option 'std' standardizes. The option 'norm' normalizes (min-max scales). copy: bool Whether or not to copy `data` before scaling. """ data = data.copy() if copy else data if isinstance(data, xr.Dataset): data_arr_names = list(data.data_vars) if data_vars is None else data_vars for data_arr_name in data_arr_names: data_arr = data[data_arr_name] data_arr.values = np_scale(data_arr.values, min_max=min_max, scaling=scaling) elif isinstance(data, xr.DataArray): data.values = np_scale(data.values, min_max=min_max, scaling=scaling) return data
[ "def", "xr_scale", "(", "data", ",", "data_vars", "=", "None", ",", "min_max", "=", "None", ",", "scaling", "=", "'norm'", ",", "copy", "=", "False", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "copy", "else", "data", "if", "isinstance", "(", "data", ",", "xr", ".", "Dataset", ")", ":", "data_arr_names", "=", "list", "(", "data", ".", "data_vars", ")", "if", "data_vars", "is", "None", "else", "data_vars", "for", "data_arr_name", "in", "data_arr_names", ":", "data_arr", "=", "data", "[", "data_arr_name", "]", "data_arr", ".", "values", "=", "np_scale", "(", "data_arr", ".", "values", ",", "min_max", "=", "min_max", ",", "scaling", "=", "scaling", ")", "elif", "isinstance", "(", "data", ",", "xr", ".", "DataArray", ")", ":", "data", ".", "values", "=", "np_scale", "(", "data", ".", "values", ",", "min_max", "=", "min_max", ",", "scaling", "=", "scaling", ")", "return", "data" ]
[ 3, 0 ]
[ 30, 15 ]
python
en
['en', 'error', 'th']
False
np_scale
(arr, pop_arr=None, pop_min_max=None, pop_mean_std=None, min_max=None, scaling='norm')
Scales a NumPy array with standard scaling or norm scaling, default to norm scaling. Parameters ---------- arr: numpy.ndarray The NumPy array to scale. pop_arr: numpy.ndarray, optional The NumPy array to treat as the population. If specified, all members of `arr` must be within the range of `pop_arr` or `min_max` must be specified. pop_min_max: list-like, optional The population minimum and maximum, in that order. Supercedes `pop_arr` when normalizing. pop_mean_std: list-like, optional The population mean and standard deviation, in that order. Supercedes `pop_arr` when standard scaling. min_max: list-like, optional The desired minimum and maximum of the final output, in that order. If all values are the same, all values will become `min_max[0]`. scaling: str, optional The options are ['std', 'norm']. The option 'std' standardizes. The option 'norm' normalizes (min-max scales).
Scales a NumPy array with standard scaling or norm scaling, default to norm scaling. Parameters ---------- arr: numpy.ndarray The NumPy array to scale. pop_arr: numpy.ndarray, optional The NumPy array to treat as the population. If specified, all members of `arr` must be within the range of `pop_arr` or `min_max` must be specified. pop_min_max: list-like, optional The population minimum and maximum, in that order. Supercedes `pop_arr` when normalizing. pop_mean_std: list-like, optional The population mean and standard deviation, in that order. Supercedes `pop_arr` when standard scaling. min_max: list-like, optional The desired minimum and maximum of the final output, in that order. If all values are the same, all values will become `min_max[0]`. scaling: str, optional The options are ['std', 'norm']. The option 'std' standardizes. The option 'norm' normalizes (min-max scales).
def np_scale(arr, pop_arr=None, pop_min_max=None, pop_mean_std=None, min_max=None, scaling='norm'): """ Scales a NumPy array with standard scaling or norm scaling, default to norm scaling. Parameters ---------- arr: numpy.ndarray The NumPy array to scale. pop_arr: numpy.ndarray, optional The NumPy array to treat as the population. If specified, all members of `arr` must be within the range of `pop_arr` or `min_max` must be specified. pop_min_max: list-like, optional The population minimum and maximum, in that order. Supercedes `pop_arr` when normalizing. pop_mean_std: list-like, optional The population mean and standard deviation, in that order. Supercedes `pop_arr` when standard scaling. min_max: list-like, optional The desired minimum and maximum of the final output, in that order. If all values are the same, all values will become `min_max[0]`. scaling: str, optional The options are ['std', 'norm']. The option 'std' standardizes. The option 'norm' normalizes (min-max scales). """ pop_arr = arr if pop_arr is None else pop_arr if scaling == 'norm': pop_min, pop_max = (pop_min_max[0], pop_min_max[1]) if pop_min_max is not None else (np.nanmin(pop_arr), np.nanmax(pop_arr)) numerator, denominator = arr - pop_min, pop_max - pop_min elif scaling == 'std': mean, std = mean_std if mean_std is not None else (np.nanmean(pop_arr), np.nanstd(pop_arr)) numerator, denominator = arr - mean, std # Primary scaling new_arr = arr if denominator > 0: new_arr = numerator / denominator # Optional final scaling. if min_max is not None: new_arr = np.interp(new_arr, (np.nanmin(new_arr), np.nanmax(new_arr)), min_max) if denominator > 0 else \ np.full_like(new_arr, min_max[0]) # The values are identical - set all values to the low end of the desired range. return new_arr
[ "def", "np_scale", "(", "arr", ",", "pop_arr", "=", "None", ",", "pop_min_max", "=", "None", ",", "pop_mean_std", "=", "None", ",", "min_max", "=", "None", ",", "scaling", "=", "'norm'", ")", ":", "pop_arr", "=", "arr", "if", "pop_arr", "is", "None", "else", "pop_arr", "if", "scaling", "==", "'norm'", ":", "pop_min", ",", "pop_max", "=", "(", "pop_min_max", "[", "0", "]", ",", "pop_min_max", "[", "1", "]", ")", "if", "pop_min_max", "is", "not", "None", "else", "(", "np", ".", "nanmin", "(", "pop_arr", ")", ",", "np", ".", "nanmax", "(", "pop_arr", ")", ")", "numerator", ",", "denominator", "=", "arr", "-", "pop_min", ",", "pop_max", "-", "pop_min", "elif", "scaling", "==", "'std'", ":", "mean", ",", "std", "=", "mean_std", "if", "mean_std", "is", "not", "None", "else", "(", "np", ".", "nanmean", "(", "pop_arr", ")", ",", "np", ".", "nanstd", "(", "pop_arr", ")", ")", "numerator", ",", "denominator", "=", "arr", "-", "mean", ",", "std", "# Primary scaling", "new_arr", "=", "arr", "if", "denominator", ">", "0", ":", "new_arr", "=", "numerator", "/", "denominator", "# Optional final scaling.", "if", "min_max", "is", "not", "None", ":", "new_arr", "=", "np", ".", "interp", "(", "new_arr", ",", "(", "np", ".", "nanmin", "(", "new_arr", ")", ",", "np", ".", "nanmax", "(", "new_arr", ")", ")", ",", "min_max", ")", "if", "denominator", ">", "0", "else", "np", ".", "full_like", "(", "new_arr", ",", "min_max", "[", "0", "]", ")", "# The values are identical - set all values to the low end of the desired range.", "return", "new_arr" ]
[ 32, 0 ]
[ 72, 18 ]
python
en
['en', 'error', 'th']
False
qrot
(q, v)
Rotate vector(s) v about the rotation described by quaternion(s) q. Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, where * denotes any number of dimensions. Returns a tensor of shape (*, 3).
Rotate vector(s) v about the rotation described by quaternion(s) q. Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, where * denotes any number of dimensions. Returns a tensor of shape (*, 3).
def qrot(q, v): """ Rotate vector(s) v about the rotation described by quaternion(s) q. Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v, where * denotes any number of dimensions. Returns a tensor of shape (*, 3). """ assert q.shape[-1] == 4 assert v.shape[-1] == 3 assert q.shape[:-1] == v.shape[:-1] qvec = q[..., 1:] uv = torch.cross(qvec, v, dim=len(q.shape) - 1) uuv = torch.cross(qvec, uv, dim=len(q.shape) - 1) return (v + 2 * (q[..., :1] * uv + uuv))
[ "def", "qrot", "(", "q", ",", "v", ")", ":", "assert", "q", ".", "shape", "[", "-", "1", "]", "==", "4", "assert", "v", ".", "shape", "[", "-", "1", "]", "==", "3", "assert", "q", ".", "shape", "[", ":", "-", "1", "]", "==", "v", ".", "shape", "[", ":", "-", "1", "]", "qvec", "=", "q", "[", "...", ",", "1", ":", "]", "uv", "=", "torch", ".", "cross", "(", "qvec", ",", "v", ",", "dim", "=", "len", "(", "q", ".", "shape", ")", "-", "1", ")", "uuv", "=", "torch", ".", "cross", "(", "qvec", ",", "uv", ",", "dim", "=", "len", "(", "q", ".", "shape", ")", "-", "1", ")", "return", "(", "v", "+", "2", "*", "(", "q", "[", "...", ",", ":", "1", "]", "*", "uv", "+", "uuv", ")", ")" ]
[ 9, 0 ]
[ 23, 44 ]
python
en
['en', 'error', 'th']
False
MacTool.Dispatch
(self, args)
Dispatches a string command to a method.
Dispatches a string command to a method.
def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) return getattr(self, method)(*args[1:])
[ "def", "Dispatch", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "1", ":", "raise", "Exception", "(", "\"Not enough arguments\"", ")", "method", "=", "\"Exec%s\"", "%", "self", ".", "_CommandifyName", "(", "args", "[", "0", "]", ")", "return", "getattr", "(", "self", ",", "method", ")", "(", "*", "args", "[", "1", ":", "]", ")" ]
[ 35, 2 ]
[ 41, 43 ]
python
en
['en', 'en', 'en']
True
MacTool._CommandifyName
(self, name_string)
Transforms a tool name like copy-info-plist to CopyInfoPlist
Transforms a tool name like copy-info-plist to CopyInfoPlist
def _CommandifyName(self, name_string): """Transforms a tool name like copy-info-plist to CopyInfoPlist""" return name_string.title().replace('-', '')
[ "def", "_CommandifyName", "(", "self", ",", "name_string", ")", ":", "return", "name_string", ".", "title", "(", ")", ".", "replace", "(", "'-'", ",", "''", ")" ]
[ 43, 2 ]
[ 45, 47 ]
python
en
['en', 'pl', 'en']
True
MacTool.ExecCopyBundleResource
(self, source, dest, convert_to_binary)
Copies a resource file to the bundle/Resources directory, performing any necessary compilation on each resource.
Copies a resource file to the bundle/Resources directory, performing any necessary compilation on each resource.
def ExecCopyBundleResource(self, source, dest, convert_to_binary): """Copies a resource file to the bundle/Resources directory, performing any necessary compilation on each resource.""" extension = os.path.splitext(source)[1].lower() if os.path.isdir(source): # Copy tree. # TODO(thakis): This copies file attributes like mtime, while the # single-file branch below doesn't. This should probably be changed to # be consistent with the single-file branch. if os.path.exists(dest): shutil.rmtree(dest) shutil.copytree(source, dest) elif extension == '.xib': return self._CopyXIBFile(source, dest) elif extension == '.storyboard': return self._CopyXIBFile(source, dest) elif extension == '.strings': self._CopyStringsFile(source, dest, convert_to_binary) else: shutil.copy(source, dest)
[ "def", "ExecCopyBundleResource", "(", "self", ",", "source", ",", "dest", ",", "convert_to_binary", ")", ":", "extension", "=", "os", ".", "path", ".", "splitext", "(", "source", ")", "[", "1", "]", ".", "lower", "(", ")", "if", "os", ".", "path", ".", "isdir", "(", "source", ")", ":", "# Copy tree.", "# TODO(thakis): This copies file attributes like mtime, while the", "# single-file branch below doesn't. This should probably be changed to", "# be consistent with the single-file branch.", "if", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "shutil", ".", "rmtree", "(", "dest", ")", "shutil", ".", "copytree", "(", "source", ",", "dest", ")", "elif", "extension", "==", "'.xib'", ":", "return", "self", ".", "_CopyXIBFile", "(", "source", ",", "dest", ")", "elif", "extension", "==", "'.storyboard'", ":", "return", "self", ".", "_CopyXIBFile", "(", "source", ",", "dest", ")", "elif", "extension", "==", "'.strings'", ":", "self", ".", "_CopyStringsFile", "(", "source", ",", "dest", ",", "convert_to_binary", ")", "else", ":", "shutil", ".", "copy", "(", "source", ",", "dest", ")" ]
[ 47, 2 ]
[ 66, 31 ]
python
en
['en', 'en', 'en']
True
MacTool._CopyXIBFile
(self, source, dest)
Compiles a XIB file with ibtool into a binary plist in the bundle.
Compiles a XIB file with ibtool into a binary plist in the bundle.
def _CopyXIBFile(self, source, dest): """Compiles a XIB file with ibtool into a binary plist in the bundle.""" # ibtool sometimes crashes with relative paths. See crbug.com/314728. base = os.path.dirname(os.path.realpath(__file__)) if os.path.relpath(source): source = os.path.join(base, source) if os.path.relpath(dest): dest = os.path.join(base, dest) args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices', '--output-format', 'human-readable-text', '--compile', dest, source] ibtool_section_re = re.compile(r'/\*.*\*/') ibtool_re = re.compile(r'.*note:.*is clipping its content') ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE) current_section_header = None for line in ibtoolout.stdout: if ibtool_section_re.match(line): current_section_header = line elif not ibtool_re.match(line): if current_section_header: sys.stdout.write(current_section_header) current_section_header = None sys.stdout.write(line) return ibtoolout.returncode
[ "def", "_CopyXIBFile", "(", "self", ",", "source", ",", "dest", ")", ":", "# ibtool sometimes crashes with relative paths. See crbug.com/314728.", "base", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "if", "os", ".", "path", ".", "relpath", "(", "source", ")", ":", "source", "=", "os", ".", "path", ".", "join", "(", "base", ",", "source", ")", "if", "os", ".", "path", ".", "relpath", "(", "dest", ")", ":", "dest", "=", "os", ".", "path", ".", "join", "(", "base", ",", "dest", ")", "args", "=", "[", "'xcrun'", ",", "'ibtool'", ",", "'--errors'", ",", "'--warnings'", ",", "'--notices'", ",", "'--output-format'", ",", "'human-readable-text'", ",", "'--compile'", ",", "dest", ",", "source", "]", "ibtool_section_re", "=", "re", ".", "compile", "(", "r'/\\*.*\\*/'", ")", "ibtool_re", "=", "re", ".", "compile", "(", "r'.*note:.*is clipping its content'", ")", "ibtoolout", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "current_section_header", "=", "None", "for", "line", "in", "ibtoolout", ".", "stdout", ":", "if", "ibtool_section_re", ".", "match", "(", "line", ")", ":", "current_section_header", "=", "line", "elif", "not", "ibtool_re", ".", "match", "(", "line", ")", ":", "if", "current_section_header", ":", "sys", ".", "stdout", ".", "write", "(", "current_section_header", ")", "current_section_header", "=", "None", "sys", ".", "stdout", ".", "write", "(", "line", ")", "return", "ibtoolout", ".", "returncode" ]
[ 68, 2 ]
[ 92, 31 ]
python
en
['en', 'en', 'en']
True
MacTool._CopyStringsFile
(self, source, dest, convert_to_binary)
Copies a .strings file using iconv to reconvert the input into UTF-16.
Copies a .strings file using iconv to reconvert the input into UTF-16.
def _CopyStringsFile(self, source, dest, convert_to_binary): """Copies a .strings file using iconv to reconvert the input into UTF-16.""" input_code = self._DetectInputEncoding(source) or "UTF-8" # Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call # CFPropertyListCreateFromXMLData() behind the scenes; at least it prints # CFPropertyListCreateFromXMLData(): Old-style plist parser: missing # semicolon in dictionary. # on invalid files. Do the same kind of validation. import CoreFoundation s = open(source, 'rb').read() d = CoreFoundation.CFDataCreate(None, s, len(s)) _, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None) if error: return fp = open(dest, 'wb') fp.write(s.decode(input_code).encode('UTF-16')) fp.close() if convert_to_binary == 'True': self._ConvertToBinary(dest)
[ "def", "_CopyStringsFile", "(", "self", ",", "source", ",", "dest", ",", "convert_to_binary", ")", ":", "input_code", "=", "self", ".", "_DetectInputEncoding", "(", "source", ")", "or", "\"UTF-8\"", "# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call", "# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints", "# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing", "# semicolon in dictionary.", "# on invalid files. Do the same kind of validation.", "import", "CoreFoundation", "s", "=", "open", "(", "source", ",", "'rb'", ")", ".", "read", "(", ")", "d", "=", "CoreFoundation", ".", "CFDataCreate", "(", "None", ",", "s", ",", "len", "(", "s", ")", ")", "_", ",", "error", "=", "CoreFoundation", ".", "CFPropertyListCreateFromXMLData", "(", "None", ",", "d", ",", "0", ",", "None", ")", "if", "error", ":", "return", "fp", "=", "open", "(", "dest", ",", "'wb'", ")", "fp", ".", "write", "(", "s", ".", "decode", "(", "input_code", ")", ".", "encode", "(", "'UTF-16'", ")", ")", "fp", ".", "close", "(", ")", "if", "convert_to_binary", "==", "'True'", ":", "self", ".", "_ConvertToBinary", "(", "dest", ")" ]
[ 98, 2 ]
[ 119, 33 ]
python
en
['en', 'en', 'en']
True
MacTool._DetectInputEncoding
(self, file_name)
Reads the first few bytes from file_name and tries to guess the text encoding. Returns None as a guess if it can't detect it.
Reads the first few bytes from file_name and tries to guess the text encoding. Returns None as a guess if it can't detect it.
def _DetectInputEncoding(self, file_name): """Reads the first few bytes from file_name and tries to guess the text encoding. Returns None as a guess if it can't detect it.""" fp = open(file_name, 'rb') try: header = fp.read(3) except e: fp.close() return None fp.close() if header.startswith("\xFE\xFF"): return "UTF-16" elif header.startswith("\xFF\xFE"): return "UTF-16" elif header.startswith("\xEF\xBB\xBF"): return "UTF-8" else: return None
[ "def", "_DetectInputEncoding", "(", "self", ",", "file_name", ")", ":", "fp", "=", "open", "(", "file_name", ",", "'rb'", ")", "try", ":", "header", "=", "fp", ".", "read", "(", "3", ")", "except", "e", ":", "fp", ".", "close", "(", ")", "return", "None", "fp", ".", "close", "(", ")", "if", "header", ".", "startswith", "(", "\"\\xFE\\xFF\"", ")", ":", "return", "\"UTF-16\"", "elif", "header", ".", "startswith", "(", "\"\\xFF\\xFE\"", ")", ":", "return", "\"UTF-16\"", "elif", "header", ".", "startswith", "(", "\"\\xEF\\xBB\\xBF\"", ")", ":", "return", "\"UTF-8\"", "else", ":", "return", "None" ]
[ 121, 2 ]
[ 138, 17 ]
python
en
['en', 'en', 'en']
True
MacTool.ExecCopyInfoPlist
(self, source, dest, convert_to_binary, *keys)
Copies the |source| Info.plist to the destination directory |dest|.
Copies the |source| Info.plist to the destination directory |dest|.
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys): """Copies the |source| Info.plist to the destination directory |dest|.""" # Read the source Info.plist into memory. fd = open(source, 'r') lines = fd.read() fd.close() # Insert synthesized key/value pairs (e.g. BuildMachineOSBuild). plist = plistlib.readPlistFromString(lines) if keys: plist = dict(plist.items() + json.loads(keys[0]).items()) lines = plistlib.writePlistToString(plist) # Go through all the environment variables and replace them as variables in # the file. IDENT_RE = re.compile(r'[/\s]') for key in os.environ: if key.startswith('_'): continue evar = '${%s}' % key evalue = os.environ[key] lines = string.replace(lines, evar, evalue) # Xcode supports various suffices on environment variables, which are # all undocumented. :rfc1034identifier is used in the standard project # template these days, and :identifier was used earlier. They are used to # convert non-url characters into things that look like valid urls -- # except that the replacement character for :identifier, '_' isn't valid # in a URL either -- oops, hence :rfc1034identifier was born. evar = '${%s:identifier}' % key evalue = IDENT_RE.sub('_', os.environ[key]) lines = string.replace(lines, evar, evalue) evar = '${%s:rfc1034identifier}' % key evalue = IDENT_RE.sub('-', os.environ[key]) lines = string.replace(lines, evar, evalue) # Remove any keys with values that haven't been replaced. lines = lines.split('\n') for i in range(len(lines)): if lines[i].strip().startswith("<string>${"): lines[i] = None lines[i - 1] = None lines = '\n'.join(filter(lambda x: x is not None, lines)) # Write out the file with variables replaced. fd = open(dest, 'w') fd.write(lines) fd.close() # Now write out PkgInfo file now that the Info.plist file has been # "compiled". self._WritePkgInfo(dest) if convert_to_binary == 'True': self._ConvertToBinary(dest)
[ "def", "ExecCopyInfoPlist", "(", "self", ",", "source", ",", "dest", ",", "convert_to_binary", ",", "*", "keys", ")", ":", "# Read the source Info.plist into memory.", "fd", "=", "open", "(", "source", ",", "'r'", ")", "lines", "=", "fd", ".", "read", "(", ")", "fd", ".", "close", "(", ")", "# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).", "plist", "=", "plistlib", ".", "readPlistFromString", "(", "lines", ")", "if", "keys", ":", "plist", "=", "dict", "(", "plist", ".", "items", "(", ")", "+", "json", ".", "loads", "(", "keys", "[", "0", "]", ")", ".", "items", "(", ")", ")", "lines", "=", "plistlib", ".", "writePlistToString", "(", "plist", ")", "# Go through all the environment variables and replace them as variables in", "# the file.", "IDENT_RE", "=", "re", ".", "compile", "(", "r'[/\\s]'", ")", "for", "key", "in", "os", ".", "environ", ":", "if", "key", ".", "startswith", "(", "'_'", ")", ":", "continue", "evar", "=", "'${%s}'", "%", "key", "evalue", "=", "os", ".", "environ", "[", "key", "]", "lines", "=", "string", ".", "replace", "(", "lines", ",", "evar", ",", "evalue", ")", "# Xcode supports various suffices on environment variables, which are", "# all undocumented. :rfc1034identifier is used in the standard project", "# template these days, and :identifier was used earlier. They are used to", "# convert non-url characters into things that look like valid urls --", "# except that the replacement character for :identifier, '_' isn't valid", "# in a URL either -- oops, hence :rfc1034identifier was born.", "evar", "=", "'${%s:identifier}'", "%", "key", "evalue", "=", "IDENT_RE", ".", "sub", "(", "'_'", ",", "os", ".", "environ", "[", "key", "]", ")", "lines", "=", "string", ".", "replace", "(", "lines", ",", "evar", ",", "evalue", ")", "evar", "=", "'${%s:rfc1034identifier}'", "%", "key", "evalue", "=", "IDENT_RE", ".", "sub", "(", "'-'", ",", "os", ".", "environ", "[", "key", "]", ")", "lines", "=", "string", ".", "replace", "(", "lines", ",", "evar", ",", "evalue", ")", "# Remove any keys with values that haven't been replaced.", "lines", "=", "lines", ".", "split", "(", "'\\n'", ")", "for", "i", "in", "range", "(", "len", "(", "lines", ")", ")", ":", "if", "lines", "[", "i", "]", ".", "strip", "(", ")", ".", "startswith", "(", "\"<string>${\"", ")", ":", "lines", "[", "i", "]", "=", "None", "lines", "[", "i", "-", "1", "]", "=", "None", "lines", "=", "'\\n'", ".", "join", "(", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", ",", "lines", ")", ")", "# Write out the file with variables replaced.", "fd", "=", "open", "(", "dest", ",", "'w'", ")", "fd", ".", "write", "(", "lines", ")", "fd", ".", "close", "(", ")", "# Now write out PkgInfo file now that the Info.plist file has been", "# \"compiled\".", "self", ".", "_WritePkgInfo", "(", "dest", ")", "if", "convert_to_binary", "==", "'True'", ":", "self", ".", "_ConvertToBinary", "(", "dest", ")" ]
[ 140, 2 ]
[ 195, 33 ]
python
en
['en', 'fr', 'en']
True
MacTool._WritePkgInfo
(self, info_plist)
This writes the PkgInfo file from the data stored in Info.plist.
This writes the PkgInfo file from the data stored in Info.plist.
def _WritePkgInfo(self, info_plist): """This writes the PkgInfo file from the data stored in Info.plist.""" plist = plistlib.readPlist(info_plist) if not plist: return # Only create PkgInfo for executable types. package_type = plist['CFBundlePackageType'] if package_type != 'APPL': return # The format of PkgInfo is eight characters, representing the bundle type # and bundle signature, each four characters. If that is missing, four # '?' characters are used instead. signature_code = plist.get('CFBundleSignature', '????') if len(signature_code) != 4: # Wrong length resets everything, too. signature_code = '?' * 4 dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo') fp = open(dest, 'w') fp.write('%s%s' % (package_type, signature_code)) fp.close()
[ "def", "_WritePkgInfo", "(", "self", ",", "info_plist", ")", ":", "plist", "=", "plistlib", ".", "readPlist", "(", "info_plist", ")", "if", "not", "plist", ":", "return", "# Only create PkgInfo for executable types.", "package_type", "=", "plist", "[", "'CFBundlePackageType'", "]", "if", "package_type", "!=", "'APPL'", ":", "return", "# The format of PkgInfo is eight characters, representing the bundle type", "# and bundle signature, each four characters. If that is missing, four", "# '?' characters are used instead.", "signature_code", "=", "plist", ".", "get", "(", "'CFBundleSignature'", ",", "'????'", ")", "if", "len", "(", "signature_code", ")", "!=", "4", ":", "# Wrong length resets everything, too.", "signature_code", "=", "'?'", "*", "4", "dest", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "info_plist", ")", ",", "'PkgInfo'", ")", "fp", "=", "open", "(", "dest", ",", "'w'", ")", "fp", ".", "write", "(", "'%s%s'", "%", "(", "package_type", ",", "signature_code", ")", ")", "fp", ".", "close", "(", ")" ]
[ 197, 2 ]
[ 218, 14 ]
python
en
['en', 'en', 'en']
True
MacTool.ExecFlock
(self, lockfile, *cmd_list)
Emulates the most basic behavior of Linux's flock(1).
Emulates the most basic behavior of Linux's flock(1).
def ExecFlock(self, lockfile, *cmd_list): """Emulates the most basic behavior of Linux's flock(1).""" # Rely on exception handling to report errors. fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666) fcntl.flock(fd, fcntl.LOCK_EX) return subprocess.call(cmd_list)
[ "def", "ExecFlock", "(", "self", ",", "lockfile", ",", "*", "cmd_list", ")", ":", "# Rely on exception handling to report errors.", "fd", "=", "os", ".", "open", "(", "lockfile", ",", "os", ".", "O_RDONLY", "|", "os", ".", "O_NOCTTY", "|", "os", ".", "O_CREAT", ",", "0o666", ")", "fcntl", ".", "flock", "(", "fd", ",", "fcntl", ".", "LOCK_EX", ")", "return", "subprocess", ".", "call", "(", "cmd_list", ")" ]
[ 220, 2 ]
[ 225, 36 ]
python
en
['en', 'da', 'en']
True
MacTool.ExecFilterLibtool
(self, *cmd_list)
Calls libtool and filters out '/path/to/libtool: file: foo.o has no symbols'.
Calls libtool and filters out '/path/to/libtool: file: foo.o has no symbols'.
def ExecFilterLibtool(self, *cmd_list): """Calls libtool and filters out '/path/to/libtool: file: foo.o has no symbols'.""" libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$') libtool_re5 = re.compile( r'^.*libtool: warning for library: ' + r'.* the table of contents is empty ' + r'\(no object file members in the library define global symbols\)$') env = os.environ.copy() # Ref: # http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c # The problem with this flag is that it resets the file mtime on the file to # epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone. env['ZERO_AR_DATE'] = '1' libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) _, err = libtoolout.communicate() for line in err.splitlines(): if not libtool_re.match(line) and not libtool_re5.match(line): print >>sys.stderr, line # Unconditionally touch the output .a file on the command line if present # and the command succeeded. A bit hacky. if not libtoolout.returncode: for i in range(len(cmd_list) - 1): if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'): os.utime(cmd_list[i+1], None) break return libtoolout.returncode
[ "def", "ExecFilterLibtool", "(", "self", ",", "*", "cmd_list", ")", ":", "libtool_re", "=", "re", ".", "compile", "(", "r'^.*libtool: file: .* has no symbols$'", ")", "libtool_re5", "=", "re", ".", "compile", "(", "r'^.*libtool: warning for library: '", "+", "r'.* the table of contents is empty '", "+", "r'\\(no object file members in the library define global symbols\\)$'", ")", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "# Ref:", "# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c", "# The problem with this flag is that it resets the file mtime on the file to", "# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.", "env", "[", "'ZERO_AR_DATE'", "]", "=", "'1'", "libtoolout", "=", "subprocess", ".", "Popen", "(", "cmd_list", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "env", "=", "env", ")", "_", ",", "err", "=", "libtoolout", ".", "communicate", "(", ")", "for", "line", "in", "err", ".", "splitlines", "(", ")", ":", "if", "not", "libtool_re", ".", "match", "(", "line", ")", "and", "not", "libtool_re5", ".", "match", "(", "line", ")", ":", "print", ">>", "sys", ".", "stderr", ",", "line", "# Unconditionally touch the output .a file on the command line if present", "# and the command succeeded. A bit hacky.", "if", "not", "libtoolout", ".", "returncode", ":", "for", "i", "in", "range", "(", "len", "(", "cmd_list", ")", "-", "1", ")", ":", "if", "cmd_list", "[", "i", "]", "==", "\"-o\"", "and", "cmd_list", "[", "i", "+", "1", "]", ".", "endswith", "(", "'.a'", ")", ":", "os", ".", "utime", "(", "cmd_list", "[", "i", "+", "1", "]", ",", "None", ")", "break", "return", "libtoolout", ".", "returncode" ]
[ 227, 2 ]
[ 253, 32 ]
python
en
['en', 'en', 'en']
True
MacTool.ExecPackageFramework
(self, framework, version)
Takes a path to Something.framework and the Current version of that and sets up all the symlinks.
Takes a path to Something.framework and the Current version of that and sets up all the symlinks.
def ExecPackageFramework(self, framework, version): """Takes a path to Something.framework and the Current version of that and sets up all the symlinks.""" # Find the name of the binary based on the part before the ".framework". binary = os.path.basename(framework).split('.')[0] CURRENT = 'Current' RESOURCES = 'Resources' VERSIONS = 'Versions' if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)): # Binary-less frameworks don't seem to contain symlinks (see e.g. # chromium's out/Debug/org.chromium.Chromium.manifest/ bundle). return # Move into the framework directory to set the symlinks correctly. pwd = os.getcwd() os.chdir(framework) # Set up the Current version. self._Relink(version, os.path.join(VERSIONS, CURRENT)) # Set up the root symlinks. self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary) self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES) # Back to where we were before! os.chdir(pwd)
[ "def", "ExecPackageFramework", "(", "self", ",", "framework", ",", "version", ")", ":", "# Find the name of the binary based on the part before the \".framework\".", "binary", "=", "os", ".", "path", ".", "basename", "(", "framework", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "CURRENT", "=", "'Current'", "RESOURCES", "=", "'Resources'", "VERSIONS", "=", "'Versions'", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "framework", ",", "VERSIONS", ",", "version", ",", "binary", ")", ")", ":", "# Binary-less frameworks don't seem to contain symlinks (see e.g.", "# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).", "return", "# Move into the framework directory to set the symlinks correctly.", "pwd", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "framework", ")", "# Set up the Current version.", "self", ".", "_Relink", "(", "version", ",", "os", ".", "path", ".", "join", "(", "VERSIONS", ",", "CURRENT", ")", ")", "# Set up the root symlinks.", "self", ".", "_Relink", "(", "os", ".", "path", ".", "join", "(", "VERSIONS", ",", "CURRENT", ",", "binary", ")", ",", "binary", ")", "self", ".", "_Relink", "(", "os", ".", "path", ".", "join", "(", "VERSIONS", ",", "CURRENT", ",", "RESOURCES", ")", ",", "RESOURCES", ")", "# Back to where we were before!", "os", ".", "chdir", "(", "pwd", ")" ]
[ 255, 2 ]
[ 282, 17 ]
python
en
['en', 'en', 'en']
True
MacTool._Relink
(self, dest, link)
Creates a symlink to |dest| named |link|. If |link| already exists, it is overwritten.
Creates a symlink to |dest| named |link|. If |link| already exists, it is overwritten.
def _Relink(self, dest, link): """Creates a symlink to |dest| named |link|. If |link| already exists, it is overwritten.""" if os.path.lexists(link): os.remove(link) os.symlink(dest, link)
[ "def", "_Relink", "(", "self", ",", "dest", ",", "link", ")", ":", "if", "os", ".", "path", ".", "lexists", "(", "link", ")", ":", "os", ".", "remove", "(", "link", ")", "os", ".", "symlink", "(", "dest", ",", "link", ")" ]
[ 284, 2 ]
[ 289, 26 ]
python
en
['en', 'en', 'en']
True
MacTool.ExecCompileXcassets
(self, keys, *inputs)
Compiles multiple .xcassets files into a single .car file. This invokes 'actool' to compile all the inputs .xcassets files. The |keys| arguments is a json-encoded dictionary of extra arguments to pass to 'actool' when the asset catalogs contains an application icon or a launch image. Note that 'actool' does not create the Assets.car file if the asset catalogs does not contains imageset.
Compiles multiple .xcassets files into a single .car file.
def ExecCompileXcassets(self, keys, *inputs): """Compiles multiple .xcassets files into a single .car file. This invokes 'actool' to compile all the inputs .xcassets files. The |keys| arguments is a json-encoded dictionary of extra arguments to pass to 'actool' when the asset catalogs contains an application icon or a launch image. Note that 'actool' does not create the Assets.car file if the asset catalogs does not contains imageset. """ command_line = [ 'xcrun', 'actool', '--output-format', 'human-readable-text', '--compress-pngs', '--notices', '--warnings', '--errors', ] is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ if is_iphone_target: platform = os.environ['CONFIGURATION'].split('-')[-1] if platform not in ('iphoneos', 'iphonesimulator'): platform = 'iphonesimulator' command_line.extend([ '--platform', platform, '--target-device', 'iphone', '--target-device', 'ipad', '--minimum-deployment-target', os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile', os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']), ]) else: command_line.extend([ '--platform', 'macosx', '--target-device', 'mac', '--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'], '--compile', os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']), ]) if keys: keys = json.loads(keys) for key, value in keys.iteritems(): arg_name = '--' + key if isinstance(value, bool): if value: command_line.append(arg_name) elif isinstance(value, list): for v in value: command_line.append(arg_name) command_line.append(str(v)) else: command_line.append(arg_name) command_line.append(str(value)) # Note: actool crashes if inputs path are relative, so use os.path.abspath # to get absolute path name for inputs. command_line.extend(map(os.path.abspath, inputs)) subprocess.check_call(command_line)
[ "def", "ExecCompileXcassets", "(", "self", ",", "keys", ",", "*", "inputs", ")", ":", "command_line", "=", "[", "'xcrun'", ",", "'actool'", ",", "'--output-format'", ",", "'human-readable-text'", ",", "'--compress-pngs'", ",", "'--notices'", ",", "'--warnings'", ",", "'--errors'", ",", "]", "is_iphone_target", "=", "'IPHONEOS_DEPLOYMENT_TARGET'", "in", "os", ".", "environ", "if", "is_iphone_target", ":", "platform", "=", "os", ".", "environ", "[", "'CONFIGURATION'", "]", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", "if", "platform", "not", "in", "(", "'iphoneos'", ",", "'iphonesimulator'", ")", ":", "platform", "=", "'iphonesimulator'", "command_line", ".", "extend", "(", "[", "'--platform'", ",", "platform", ",", "'--target-device'", ",", "'iphone'", ",", "'--target-device'", ",", "'ipad'", ",", "'--minimum-deployment-target'", ",", "os", ".", "environ", "[", "'IPHONEOS_DEPLOYMENT_TARGET'", "]", ",", "'--compile'", ",", "os", ".", "path", ".", "abspath", "(", "os", ".", "environ", "[", "'CONTENTS_FOLDER_PATH'", "]", ")", ",", "]", ")", "else", ":", "command_line", ".", "extend", "(", "[", "'--platform'", ",", "'macosx'", ",", "'--target-device'", ",", "'mac'", ",", "'--minimum-deployment-target'", ",", "os", ".", "environ", "[", "'MACOSX_DEPLOYMENT_TARGET'", "]", ",", "'--compile'", ",", "os", ".", "path", ".", "abspath", "(", "os", ".", "environ", "[", "'UNLOCALIZED_RESOURCES_FOLDER_PATH'", "]", ")", ",", "]", ")", "if", "keys", ":", "keys", "=", "json", ".", "loads", "(", "keys", ")", "for", "key", ",", "value", "in", "keys", ".", "iteritems", "(", ")", ":", "arg_name", "=", "'--'", "+", "key", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "if", "value", ":", "command_line", ".", "append", "(", "arg_name", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "for", "v", "in", "value", ":", "command_line", ".", "append", "(", "arg_name", ")", "command_line", ".", "append", "(", "str", "(", "v", ")", ")", "else", ":", "command_line", ".", "append", "(", "arg_name", ")", "command_line", ".", "append", "(", "str", "(", "value", ")", ")", "# Note: actool crashes if inputs path are relative, so use os.path.abspath", "# to get absolute path name for inputs.", "command_line", ".", "extend", "(", "map", "(", "os", ".", "path", ".", "abspath", ",", "inputs", ")", ")", "subprocess", ".", "check_call", "(", "command_line", ")" ]
[ 291, 2 ]
[ 341, 39 ]
python
en
['en', 'en', 'en']
True
MacTool.ExecMergeInfoPlist
(self, output, *inputs)
Merge multiple .plist files into a single .plist file.
Merge multiple .plist files into a single .plist file.
def ExecMergeInfoPlist(self, output, *inputs): """Merge multiple .plist files into a single .plist file.""" merged_plist = {} for path in inputs: plist = self._LoadPlistMaybeBinary(path) self._MergePlist(merged_plist, plist) plistlib.writePlist(merged_plist, output)
[ "def", "ExecMergeInfoPlist", "(", "self", ",", "output", ",", "*", "inputs", ")", ":", "merged_plist", "=", "{", "}", "for", "path", "in", "inputs", ":", "plist", "=", "self", ".", "_LoadPlistMaybeBinary", "(", "path", ")", "self", ".", "_MergePlist", "(", "merged_plist", ",", "plist", ")", "plistlib", ".", "writePlist", "(", "merged_plist", ",", "output", ")" ]
[ 343, 2 ]
[ 349, 45 ]
python
en
['en', 'et', 'en']
True
MacTool.ExecCodeSignBundle
(self, key, resource_rules, entitlements, provisioning)
Code sign a bundle. This function tries to code sign an iOS bundle, following the same algorithm as Xcode: 1. copy ResourceRules.plist from the user or the SDK into the bundle, 2. pick the provisioning profile that best match the bundle identifier, and copy it into the bundle as embedded.mobileprovision, 3. copy Entitlements.plist from user or SDK next to the bundle, 4. code sign the bundle.
Code sign a bundle.
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning): """Code sign a bundle. This function tries to code sign an iOS bundle, following the same algorithm as Xcode: 1. copy ResourceRules.plist from the user or the SDK into the bundle, 2. pick the provisioning profile that best match the bundle identifier, and copy it into the bundle as embedded.mobileprovision, 3. copy Entitlements.plist from user or SDK next to the bundle, 4. code sign the bundle. """ resource_rules_path = self._InstallResourceRules(resource_rules) substitutions, overrides = self._InstallProvisioningProfile( provisioning, self._GetCFBundleIdentifier()) entitlements_path = self._InstallEntitlements( entitlements, substitutions, overrides) subprocess.check_call([ 'codesign', '--force', '--sign', key, '--resource-rules', resource_rules_path, '--entitlements', entitlements_path, os.path.join( os.environ['TARGET_BUILD_DIR'], os.environ['FULL_PRODUCT_NAME'])])
[ "def", "ExecCodeSignBundle", "(", "self", ",", "key", ",", "resource_rules", ",", "entitlements", ",", "provisioning", ")", ":", "resource_rules_path", "=", "self", ".", "_InstallResourceRules", "(", "resource_rules", ")", "substitutions", ",", "overrides", "=", "self", ".", "_InstallProvisioningProfile", "(", "provisioning", ",", "self", ".", "_GetCFBundleIdentifier", "(", ")", ")", "entitlements_path", "=", "self", ".", "_InstallEntitlements", "(", "entitlements", ",", "substitutions", ",", "overrides", ")", "subprocess", ".", "check_call", "(", "[", "'codesign'", ",", "'--force'", ",", "'--sign'", ",", "key", ",", "'--resource-rules'", ",", "resource_rules_path", ",", "'--entitlements'", ",", "entitlements_path", ",", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'TARGET_BUILD_DIR'", "]", ",", "os", ".", "environ", "[", "'FULL_PRODUCT_NAME'", "]", ")", "]", ")" ]
[ 351, 2 ]
[ 372, 46 ]
python
en
['en', 'su', 'en']
True
MacTool._InstallResourceRules
(self, resource_rules)
Installs ResourceRules.plist from user or SDK into the bundle. Args: resource_rules: string, optional, path to the ResourceRules.plist file to use, default to "${SDKROOT}/ResourceRules.plist" Returns: Path to the copy of ResourceRules.plist into the bundle.
Installs ResourceRules.plist from user or SDK into the bundle.
def _InstallResourceRules(self, resource_rules): """Installs ResourceRules.plist from user or SDK into the bundle. Args: resource_rules: string, optional, path to the ResourceRules.plist file to use, default to "${SDKROOT}/ResourceRules.plist" Returns: Path to the copy of ResourceRules.plist into the bundle. """ source_path = resource_rules target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['CONTENTS_FOLDER_PATH'], 'ResourceRules.plist') if not source_path: source_path = os.path.join( os.environ['SDKROOT'], 'ResourceRules.plist') shutil.copy2(source_path, target_path) return target_path
[ "def", "_InstallResourceRules", "(", "self", ",", "resource_rules", ")", ":", "source_path", "=", "resource_rules", "target_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'BUILT_PRODUCTS_DIR'", "]", ",", "os", ".", "environ", "[", "'CONTENTS_FOLDER_PATH'", "]", ",", "'ResourceRules.plist'", ")", "if", "not", "source_path", ":", "source_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'SDKROOT'", "]", ",", "'ResourceRules.plist'", ")", "shutil", ".", "copy2", "(", "source_path", ",", "target_path", ")", "return", "target_path" ]
[ 374, 2 ]
[ 393, 22 ]
python
en
['en', 'en', 'en']
True
MacTool._InstallProvisioningProfile
(self, profile, bundle_identifier)
Installs embedded.mobileprovision into the bundle. Args: profile: string, optional, short name of the .mobileprovision file to use, if empty or the file is missing, the best file installed will be used bundle_identifier: string, value of CFBundleIdentifier from Info.plist Returns: A tuple containing two dictionary: variables substitutions and values to overrides when generating the entitlements file.
Installs embedded.mobileprovision into the bundle.
def _InstallProvisioningProfile(self, profile, bundle_identifier): """Installs embedded.mobileprovision into the bundle. Args: profile: string, optional, short name of the .mobileprovision file to use, if empty or the file is missing, the best file installed will be used bundle_identifier: string, value of CFBundleIdentifier from Info.plist Returns: A tuple containing two dictionary: variables substitutions and values to overrides when generating the entitlements file. """ source_path, provisioning_data, team_id = self._FindProvisioningProfile( profile, bundle_identifier) target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['CONTENTS_FOLDER_PATH'], 'embedded.mobileprovision') shutil.copy2(source_path, target_path) substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.') return substitutions, provisioning_data['Entitlements']
[ "def", "_InstallProvisioningProfile", "(", "self", ",", "profile", ",", "bundle_identifier", ")", ":", "source_path", ",", "provisioning_data", ",", "team_id", "=", "self", ".", "_FindProvisioningProfile", "(", "profile", ",", "bundle_identifier", ")", "target_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'BUILT_PRODUCTS_DIR'", "]", ",", "os", ".", "environ", "[", "'CONTENTS_FOLDER_PATH'", "]", ",", "'embedded.mobileprovision'", ")", "shutil", ".", "copy2", "(", "source_path", ",", "target_path", ")", "substitutions", "=", "self", ".", "_GetSubstitutions", "(", "bundle_identifier", ",", "team_id", "+", "'.'", ")", "return", "substitutions", ",", "provisioning_data", "[", "'Entitlements'", "]" ]
[ 395, 2 ]
[ 416, 59 ]
python
en
['en', 'en', 'en']
True
MacTool._FindProvisioningProfile
(self, profile, bundle_identifier)
Finds the .mobileprovision file to use for signing the bundle. Checks all the installed provisioning profiles (or if the user specified the PROVISIONING_PROFILE variable, only consult it) and select the most specific that correspond to the bundle identifier. Args: profile: string, optional, short name of the .mobileprovision file to use, if empty or the file is missing, the best file installed will be used bundle_identifier: string, value of CFBundleIdentifier from Info.plist Returns: A tuple of the path to the selected provisioning profile, the data of the embedded plist in the provisioning profile and the team identifier to use for code signing. Raises: SystemExit: if no .mobileprovision can be used to sign the bundle.
Finds the .mobileprovision file to use for signing the bundle.
def _FindProvisioningProfile(self, profile, bundle_identifier): """Finds the .mobileprovision file to use for signing the bundle. Checks all the installed provisioning profiles (or if the user specified the PROVISIONING_PROFILE variable, only consult it) and select the most specific that correspond to the bundle identifier. Args: profile: string, optional, short name of the .mobileprovision file to use, if empty or the file is missing, the best file installed will be used bundle_identifier: string, value of CFBundleIdentifier from Info.plist Returns: A tuple of the path to the selected provisioning profile, the data of the embedded plist in the provisioning profile and the team identifier to use for code signing. Raises: SystemExit: if no .mobileprovision can be used to sign the bundle. """ profiles_dir = os.path.join( os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles') if not os.path.isdir(profiles_dir): print >>sys.stderr, ( 'cannot find mobile provisioning for %s' % bundle_identifier) sys.exit(1) provisioning_profiles = None if profile: profile_path = os.path.join(profiles_dir, profile + '.mobileprovision') if os.path.exists(profile_path): provisioning_profiles = [profile_path] if not provisioning_profiles: provisioning_profiles = glob.glob( os.path.join(profiles_dir, '*.mobileprovision')) valid_provisioning_profiles = {} for profile_path in provisioning_profiles: profile_data = self._LoadProvisioningProfile(profile_path) app_id_pattern = profile_data.get( 'Entitlements', {}).get('application-identifier', '') for team_identifier in profile_data.get('TeamIdentifier', []): app_id = '%s.%s' % (team_identifier, bundle_identifier) if fnmatch.fnmatch(app_id, app_id_pattern): valid_provisioning_profiles[app_id_pattern] = ( profile_path, profile_data, team_identifier) if not valid_provisioning_profiles: print >>sys.stderr, ( 'cannot find mobile provisioning for %s' % bundle_identifier) sys.exit(1) # If the user has multiple provisioning profiles installed that can be # used for ${bundle_identifier}, pick the most specific one (ie. the # provisioning profile whose pattern is the longest). selected_key = max(valid_provisioning_profiles, key=lambda v: len(v)) return valid_provisioning_profiles[selected_key]
[ "def", "_FindProvisioningProfile", "(", "self", ",", "profile", ",", "bundle_identifier", ")", ":", "profiles_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'HOME'", "]", ",", "'Library'", ",", "'MobileDevice'", ",", "'Provisioning Profiles'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "profiles_dir", ")", ":", "print", ">>", "sys", ".", "stderr", ",", "(", "'cannot find mobile provisioning for %s'", "%", "bundle_identifier", ")", "sys", ".", "exit", "(", "1", ")", "provisioning_profiles", "=", "None", "if", "profile", ":", "profile_path", "=", "os", ".", "path", ".", "join", "(", "profiles_dir", ",", "profile", "+", "'.mobileprovision'", ")", "if", "os", ".", "path", ".", "exists", "(", "profile_path", ")", ":", "provisioning_profiles", "=", "[", "profile_path", "]", "if", "not", "provisioning_profiles", ":", "provisioning_profiles", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "profiles_dir", ",", "'*.mobileprovision'", ")", ")", "valid_provisioning_profiles", "=", "{", "}", "for", "profile_path", "in", "provisioning_profiles", ":", "profile_data", "=", "self", ".", "_LoadProvisioningProfile", "(", "profile_path", ")", "app_id_pattern", "=", "profile_data", ".", "get", "(", "'Entitlements'", ",", "{", "}", ")", ".", "get", "(", "'application-identifier'", ",", "''", ")", "for", "team_identifier", "in", "profile_data", ".", "get", "(", "'TeamIdentifier'", ",", "[", "]", ")", ":", "app_id", "=", "'%s.%s'", "%", "(", "team_identifier", ",", "bundle_identifier", ")", "if", "fnmatch", ".", "fnmatch", "(", "app_id", ",", "app_id_pattern", ")", ":", "valid_provisioning_profiles", "[", "app_id_pattern", "]", "=", "(", "profile_path", ",", "profile_data", ",", "team_identifier", ")", "if", "not", "valid_provisioning_profiles", ":", "print", ">>", "sys", ".", "stderr", ",", "(", "'cannot find mobile provisioning for %s'", "%", "bundle_identifier", ")", "sys", ".", "exit", "(", "1", ")", "# If the user has multiple provisioning profiles installed that can be", "# used for ${bundle_identifier}, pick the most specific one (ie. the", "# provisioning profile whose pattern is the longest).", "selected_key", "=", "max", "(", "valid_provisioning_profiles", ",", "key", "=", "lambda", "v", ":", "len", "(", "v", ")", ")", "return", "valid_provisioning_profiles", "[", "selected_key", "]" ]
[ 418, 2 ]
[ 471, 52 ]
python
en
['en', 'en', 'en']
True
MacTool._LoadProvisioningProfile
(self, profile_path)
Extracts the plist embedded in a provisioning profile. Args: profile_path: string, path to the .mobileprovision file Returns: Content of the plist embedded in the provisioning profile as a dictionary.
Extracts the plist embedded in a provisioning profile.
def _LoadProvisioningProfile(self, profile_path): """Extracts the plist embedded in a provisioning profile. Args: profile_path: string, path to the .mobileprovision file Returns: Content of the plist embedded in the provisioning profile as a dictionary. """ with tempfile.NamedTemporaryFile() as temp: subprocess.check_call([ 'security', 'cms', '-D', '-i', profile_path, '-o', temp.name]) return self._LoadPlistMaybeBinary(temp.name)
[ "def", "_LoadProvisioningProfile", "(", "self", ",", "profile_path", ")", ":", "with", "tempfile", ".", "NamedTemporaryFile", "(", ")", "as", "temp", ":", "subprocess", ".", "check_call", "(", "[", "'security'", ",", "'cms'", ",", "'-D'", ",", "'-i'", ",", "profile_path", ",", "'-o'", ",", "temp", ".", "name", "]", ")", "return", "self", ".", "_LoadPlistMaybeBinary", "(", "temp", ".", "name", ")" ]
[ 473, 2 ]
[ 485, 50 ]
python
en
['en', 'en', 'en']
True
MacTool._MergePlist
(self, merged_plist, plist)
Merge |plist| into |merged_plist|.
Merge |plist| into |merged_plist|.
def _MergePlist(self, merged_plist, plist): """Merge |plist| into |merged_plist|.""" for key, value in plist.iteritems(): if isinstance(value, dict): merged_value = merged_plist.get(key, {}) if isinstance(merged_value, dict): self._MergePlist(merged_value, value) merged_plist[key] = merged_value else: merged_plist[key] = value else: merged_plist[key] = value
[ "def", "_MergePlist", "(", "self", ",", "merged_plist", ",", "plist", ")", ":", "for", "key", ",", "value", "in", "plist", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "merged_value", "=", "merged_plist", ".", "get", "(", "key", ",", "{", "}", ")", "if", "isinstance", "(", "merged_value", ",", "dict", ")", ":", "self", ".", "_MergePlist", "(", "merged_value", ",", "value", ")", "merged_plist", "[", "key", "]", "=", "merged_value", "else", ":", "merged_plist", "[", "key", "]", "=", "value", "else", ":", "merged_plist", "[", "key", "]", "=", "value" ]
[ 487, 2 ]
[ 498, 33 ]
python
en
['it', 'et', 'en']
False
MacTool._LoadPlistMaybeBinary
(self, plist_path)
Loads into a memory a plist possibly encoded in binary format. This is a wrapper around plistlib.readPlist that tries to convert the plist to the XML format if it can't be parsed (assuming that it is in the binary format). Args: plist_path: string, path to a plist file, in XML or binary format Returns: Content of the plist as a dictionary.
Loads into a memory a plist possibly encoded in binary format.
def _LoadPlistMaybeBinary(self, plist_path): """Loads into a memory a plist possibly encoded in binary format. This is a wrapper around plistlib.readPlist that tries to convert the plist to the XML format if it can't be parsed (assuming that it is in the binary format). Args: plist_path: string, path to a plist file, in XML or binary format Returns: Content of the plist as a dictionary. """ try: # First, try to read the file using plistlib that only supports XML, # and if an exception is raised, convert a temporary copy to XML and # load that copy. return plistlib.readPlist(plist_path) except: pass with tempfile.NamedTemporaryFile() as temp: shutil.copy2(plist_path, temp.name) subprocess.check_call(['plutil', '-convert', 'xml1', temp.name]) return plistlib.readPlist(temp.name)
[ "def", "_LoadPlistMaybeBinary", "(", "self", ",", "plist_path", ")", ":", "try", ":", "# First, try to read the file using plistlib that only supports XML,", "# and if an exception is raised, convert a temporary copy to XML and", "# load that copy.", "return", "plistlib", ".", "readPlist", "(", "plist_path", ")", "except", ":", "pass", "with", "tempfile", ".", "NamedTemporaryFile", "(", ")", "as", "temp", ":", "shutil", ".", "copy2", "(", "plist_path", ",", "temp", ".", "name", ")", "subprocess", ".", "check_call", "(", "[", "'plutil'", ",", "'-convert'", ",", "'xml1'", ",", "temp", ".", "name", "]", ")", "return", "plistlib", ".", "readPlist", "(", "temp", ".", "name", ")" ]
[ 500, 2 ]
[ 523, 42 ]
python
en
['en', 'en', 'en']
True
MacTool._GetSubstitutions
(self, bundle_identifier, app_identifier_prefix)
Constructs a dictionary of variable substitutions for Entitlements.plist. Args: bundle_identifier: string, value of CFBundleIdentifier from Info.plist app_identifier_prefix: string, value for AppIdentifierPrefix Returns: Dictionary of substitutions to apply when generating Entitlements.plist.
Constructs a dictionary of variable substitutions for Entitlements.plist.
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix): """Constructs a dictionary of variable substitutions for Entitlements.plist. Args: bundle_identifier: string, value of CFBundleIdentifier from Info.plist app_identifier_prefix: string, value for AppIdentifierPrefix Returns: Dictionary of substitutions to apply when generating Entitlements.plist. """ return { 'CFBundleIdentifier': bundle_identifier, 'AppIdentifierPrefix': app_identifier_prefix, }
[ "def", "_GetSubstitutions", "(", "self", ",", "bundle_identifier", ",", "app_identifier_prefix", ")", ":", "return", "{", "'CFBundleIdentifier'", ":", "bundle_identifier", ",", "'AppIdentifierPrefix'", ":", "app_identifier_prefix", ",", "}" ]
[ 525, 2 ]
[ 538, 5 ]
python
en
['en', 'en', 'en']
True
MacTool._GetCFBundleIdentifier
(self)
Extracts CFBundleIdentifier value from Info.plist in the bundle. Returns: Value of CFBundleIdentifier in the Info.plist located in the bundle.
Extracts CFBundleIdentifier value from Info.plist in the bundle.
def _GetCFBundleIdentifier(self): """Extracts CFBundleIdentifier value from Info.plist in the bundle. Returns: Value of CFBundleIdentifier in the Info.plist located in the bundle. """ info_plist_path = os.path.join( os.environ['TARGET_BUILD_DIR'], os.environ['INFOPLIST_PATH']) info_plist_data = self._LoadPlistMaybeBinary(info_plist_path) return info_plist_data['CFBundleIdentifier']
[ "def", "_GetCFBundleIdentifier", "(", "self", ")", ":", "info_plist_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'TARGET_BUILD_DIR'", "]", ",", "os", ".", "environ", "[", "'INFOPLIST_PATH'", "]", ")", "info_plist_data", "=", "self", ".", "_LoadPlistMaybeBinary", "(", "info_plist_path", ")", "return", "info_plist_data", "[", "'CFBundleIdentifier'", "]" ]
[ 540, 2 ]
[ 550, 48 ]
python
en
['en', 'en', 'en']
True
MacTool._InstallEntitlements
(self, entitlements, substitutions, overrides)
Generates and install the ${BundleName}.xcent entitlements file. Expands variables "$(variable)" pattern in the source entitlements file, add extra entitlements defined in the .mobileprovision file and the copy the generated plist to "${BundlePath}.xcent". Args: entitlements: string, optional, path to the Entitlements.plist template to use, defaults to "${SDKROOT}/Entitlements.plist" substitutions: dictionary, variable substitutions overrides: dictionary, values to add to the entitlements Returns: Path to the generated entitlements file.
Generates and install the ${BundleName}.xcent entitlements file.
def _InstallEntitlements(self, entitlements, substitutions, overrides): """Generates and install the ${BundleName}.xcent entitlements file. Expands variables "$(variable)" pattern in the source entitlements file, add extra entitlements defined in the .mobileprovision file and the copy the generated plist to "${BundlePath}.xcent". Args: entitlements: string, optional, path to the Entitlements.plist template to use, defaults to "${SDKROOT}/Entitlements.plist" substitutions: dictionary, variable substitutions overrides: dictionary, values to add to the entitlements Returns: Path to the generated entitlements file. """ source_path = entitlements target_path = os.path.join( os.environ['BUILT_PRODUCTS_DIR'], os.environ['PRODUCT_NAME'] + '.xcent') if not source_path: source_path = os.path.join( os.environ['SDKROOT'], 'Entitlements.plist') shutil.copy2(source_path, target_path) data = self._LoadPlistMaybeBinary(target_path) data = self._ExpandVariables(data, substitutions) if overrides: for key in overrides: if key not in data: data[key] = overrides[key] plistlib.writePlist(data, target_path) return target_path
[ "def", "_InstallEntitlements", "(", "self", ",", "entitlements", ",", "substitutions", ",", "overrides", ")", ":", "source_path", "=", "entitlements", "target_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'BUILT_PRODUCTS_DIR'", "]", ",", "os", ".", "environ", "[", "'PRODUCT_NAME'", "]", "+", "'.xcent'", ")", "if", "not", "source_path", ":", "source_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'SDKROOT'", "]", ",", "'Entitlements.plist'", ")", "shutil", ".", "copy2", "(", "source_path", ",", "target_path", ")", "data", "=", "self", ".", "_LoadPlistMaybeBinary", "(", "target_path", ")", "data", "=", "self", ".", "_ExpandVariables", "(", "data", ",", "substitutions", ")", "if", "overrides", ":", "for", "key", "in", "overrides", ":", "if", "key", "not", "in", "data", ":", "data", "[", "key", "]", "=", "overrides", "[", "key", "]", "plistlib", ".", "writePlist", "(", "data", ",", "target_path", ")", "return", "target_path" ]
[ 552, 2 ]
[ 584, 22 ]
python
en
['en', 'en', 'en']
True
MacTool._ExpandVariables
(self, data, substitutions)
Expands variables "$(variable)" in data. Args: data: object, can be either string, list or dictionary substitutions: dictionary, variable substitutions to perform Returns: Copy of data where each references to "$(variable)" has been replaced by the corresponding value found in substitutions, or left intact if the key was not found.
Expands variables "$(variable)" in data.
def _ExpandVariables(self, data, substitutions): """Expands variables "$(variable)" in data. Args: data: object, can be either string, list or dictionary substitutions: dictionary, variable substitutions to perform Returns: Copy of data where each references to "$(variable)" has been replaced by the corresponding value found in substitutions, or left intact if the key was not found. """ if isinstance(data, str): for key, value in substitutions.iteritems(): data = data.replace('$(%s)' % key, value) return data if isinstance(data, list): return [self._ExpandVariables(v, substitutions) for v in data] if isinstance(data, dict): return {k: self._ExpandVariables(data[k], substitutions) for k in data} return data
[ "def", "_ExpandVariables", "(", "self", ",", "data", ",", "substitutions", ")", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "for", "key", ",", "value", "in", "substitutions", ".", "iteritems", "(", ")", ":", "data", "=", "data", ".", "replace", "(", "'$(%s)'", "%", "key", ",", "value", ")", "return", "data", "if", "isinstance", "(", "data", ",", "list", ")", ":", "return", "[", "self", ".", "_ExpandVariables", "(", "v", ",", "substitutions", ")", "for", "v", "in", "data", "]", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "return", "{", "k", ":", "self", ".", "_ExpandVariables", "(", "data", "[", "k", "]", ",", "substitutions", ")", "for", "k", "in", "data", "}", "return", "data" ]
[ 586, 2 ]
[ 606, 15 ]
python
en
['nl', 'en', 'en']
True
ValidationResultsPageRenderer_render_with_run_info_at_end
()
Rendered validation results with run info at the end Returns: json string of rendered validation results
Rendered validation results with run info at the end Returns: json string of rendered validation results
def ValidationResultsPageRenderer_render_with_run_info_at_end(): """ Rendered validation results with run info at the end Returns: json string of rendered validation results """ fixture_filename = file_relative_path( __file__, "./fixtures/ValidationResultsPageRenderer_render_with_run_info_at_end.json", ) with open(fixture_filename) as infile: rendered_validation_results = json.load(infile) return rendered_validation_results
[ "def", "ValidationResultsPageRenderer_render_with_run_info_at_end", "(", ")", ":", "fixture_filename", "=", "file_relative_path", "(", "__file__", ",", "\"./fixtures/ValidationResultsPageRenderer_render_with_run_info_at_end.json\"", ",", ")", "with", "open", "(", "fixture_filename", ")", "as", "infile", ":", "rendered_validation_results", "=", "json", ".", "load", "(", "infile", ")", "return", "rendered_validation_results" ]
[ 478, 0 ]
[ 490, 42 ]
python
en
['en', 'error', 'th']
False
ValidationResultsPageRenderer_render_with_run_info_at_start
()
Rendered validation results with run info at the start Returns: json string of rendered validation results
Rendered validation results with run info at the start Returns: json string of rendered validation results
def ValidationResultsPageRenderer_render_with_run_info_at_start(): """ Rendered validation results with run info at the start Returns: json string of rendered validation results """ fixture_filename = file_relative_path( __file__, "./fixtures/ValidationResultsPageRenderer_render_with_run_info_at_start.json", ) with open(fixture_filename) as infile: rendered_validation_results = json.load(infile) return rendered_validation_results
[ "def", "ValidationResultsPageRenderer_render_with_run_info_at_start", "(", ")", ":", "fixture_filename", "=", "file_relative_path", "(", "__file__", ",", "\"./fixtures/ValidationResultsPageRenderer_render_with_run_info_at_start.json\"", ",", ")", "with", "open", "(", "fixture_filename", ")", "as", "infile", ":", "rendered_validation_results", "=", "json", ".", "load", "(", "infile", ")", "return", "rendered_validation_results" ]
[ 494, 0 ]
[ 506, 42 ]
python
en
['en', 'error', 'th']
False
construct_data_context_config
()
Construct a DataContextConfig fixture given the modifications in the input parameters Returns: Dictionary representation of a DataContextConfig to compare in tests
Construct a DataContextConfig fixture given the modifications in the input parameters Returns: Dictionary representation of a DataContextConfig to compare in tests
def construct_data_context_config(): """ Construct a DataContextConfig fixture given the modifications in the input parameters Returns: Dictionary representation of a DataContextConfig to compare in tests """ def _construct_data_context_config( data_context_id: str, datasources: Dict, config_version: float = float( DataContextConfigDefaults.DEFAULT_CONFIG_VERSION.value ), expectations_store_name: str = DataContextConfigDefaults.DEFAULT_EXPECTATIONS_STORE_NAME.value, validations_store_name: str = DataContextConfigDefaults.DEFAULT_VALIDATIONS_STORE_NAME.value, evaluation_parameter_store_name: str = DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value, checkpoint_store_name: str = DataContextConfigDefaults.DEFAULT_CHECKPOINT_STORE_NAME.value, plugins_directory: Optional[str] = None, stores: Optional[Dict] = None, validation_operators: Optional[Dict] = None, data_docs_sites: Optional[Dict] = None, ): if stores is None: stores = copy.deepcopy(DataContextConfigDefaults.DEFAULT_STORES.value) if data_docs_sites is None: data_docs_sites = copy.deepcopy( DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITES.value ) return { "config_version": config_version, "datasources": datasources, "expectations_store_name": expectations_store_name, "validations_store_name": validations_store_name, "evaluation_parameter_store_name": evaluation_parameter_store_name, "checkpoint_store_name": checkpoint_store_name, "plugins_directory": plugins_directory, "validation_operators": validation_operators, "stores": stores, "data_docs_sites": data_docs_sites, "notebooks": None, "config_variables_file_path": None, "anonymous_usage_statistics": { "data_context_id": data_context_id, "enabled": True, }, } return _construct_data_context_config
[ "def", "construct_data_context_config", "(", ")", ":", "def", "_construct_data_context_config", "(", "data_context_id", ":", "str", ",", "datasources", ":", "Dict", ",", "config_version", ":", "float", "=", "float", "(", "DataContextConfigDefaults", ".", "DEFAULT_CONFIG_VERSION", ".", "value", ")", ",", "expectations_store_name", ":", "str", "=", "DataContextConfigDefaults", ".", "DEFAULT_EXPECTATIONS_STORE_NAME", ".", "value", ",", "validations_store_name", ":", "str", "=", "DataContextConfigDefaults", ".", "DEFAULT_VALIDATIONS_STORE_NAME", ".", "value", ",", "evaluation_parameter_store_name", ":", "str", "=", "DataContextConfigDefaults", ".", "DEFAULT_EVALUATION_PARAMETER_STORE_NAME", ".", "value", ",", "checkpoint_store_name", ":", "str", "=", "DataContextConfigDefaults", ".", "DEFAULT_CHECKPOINT_STORE_NAME", ".", "value", ",", "plugins_directory", ":", "Optional", "[", "str", "]", "=", "None", ",", "stores", ":", "Optional", "[", "Dict", "]", "=", "None", ",", "validation_operators", ":", "Optional", "[", "Dict", "]", "=", "None", ",", "data_docs_sites", ":", "Optional", "[", "Dict", "]", "=", "None", ",", ")", ":", "if", "stores", "is", "None", ":", "stores", "=", "copy", ".", "deepcopy", "(", "DataContextConfigDefaults", ".", "DEFAULT_STORES", ".", "value", ")", "if", "data_docs_sites", "is", "None", ":", "data_docs_sites", "=", "copy", ".", "deepcopy", "(", "DataContextConfigDefaults", ".", "DEFAULT_DATA_DOCS_SITES", ".", "value", ")", "return", "{", "\"config_version\"", ":", "config_version", ",", "\"datasources\"", ":", "datasources", ",", "\"expectations_store_name\"", ":", "expectations_store_name", ",", "\"validations_store_name\"", ":", "validations_store_name", ",", "\"evaluation_parameter_store_name\"", ":", "evaluation_parameter_store_name", ",", "\"checkpoint_store_name\"", ":", "checkpoint_store_name", ",", "\"plugins_directory\"", ":", "plugins_directory", ",", "\"validation_operators\"", ":", "validation_operators", ",", "\"stores\"", ":", "stores", ",", "\"data_docs_sites\"", ":", "data_docs_sites", ",", "\"notebooks\"", ":", "None", ",", "\"config_variables_file_path\"", ":", "None", ",", "\"anonymous_usage_statistics\"", ":", "{", "\"data_context_id\"", ":", "data_context_id", ",", "\"enabled\"", ":", "True", ",", "}", ",", "}", "return", "_construct_data_context_config" ]
[ 30, 0 ]
[ 78, 41 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_BaseStoreBackendDefaults_and_simple_defaults
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Ensure that a very simple DataContextConfig setup with many defaults is created accurately and produces a valid DataContextConfig
What does this test and why? Ensure that a very simple DataContextConfig setup with many defaults is created accurately and produces a valid DataContextConfig
def test_DataContextConfig_with_BaseStoreBackendDefaults_and_simple_defaults( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Ensure that a very simple DataContextConfig setup with many defaults is created accurately and produces a valid DataContextConfig """ store_backend_defaults = BaseStoreBackendDefaults() data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, checkpoint_store_name=store_backend_defaults.checkpoint_store_name, ) desired_config = construct_data_context_config( data_context_id=data_context_config.anonymous_usage_statistics.data_context_id, datasources=default_pandas_datasource_config, ) data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_BaseStoreBackendDefaults_and_simple_defaults", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "store_backend_defaults", "=", "BaseStoreBackendDefaults", "(", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", "checkpoint_store_name", "=", "store_backend_defaults", ".", "checkpoint_store_name", ",", ")", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ",", ")", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 116, 0 ]
[ 155, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_S3StoreBackendDefaults
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Make sure that using S3StoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_bucket_name getting propagated to all stores.
What does this test and why? Make sure that using S3StoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_bucket_name getting propagated to all stores.
def test_DataContextConfig_with_S3StoreBackendDefaults( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Make sure that using S3StoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_bucket_name getting propagated to all stores. """ store_backend_defaults = S3StoreBackendDefaults( default_bucket_name="my_default_bucket" ) data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, ) # Create desired config desired_stores_config = { "evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, "expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "bucket": "my_default_bucket", "class_name": "TupleS3StoreBackend", "prefix": "expectations", }, }, "validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "bucket": "my_default_bucket", "class_name": "TupleS3StoreBackend", "prefix": "validations", }, }, "checkpoint_S3_store": { "class_name": "CheckpointStore", "store_backend": { "bucket": "my_default_bucket", "class_name": "TupleS3StoreBackend", "prefix": "checkpoints", }, }, } desired_data_docs_sites_config = { "s3_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "bucket": "my_default_bucket", "class_name": "TupleS3StoreBackend", "prefix": "data_docs", }, } } desired_config = construct_data_context_config( data_context_id=data_context_config.anonymous_usage_statistics.data_context_id, datasources=default_pandas_datasource_config, expectations_store_name="expectations_S3_store", validations_store_name="validations_S3_store", evaluation_parameter_store_name=DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value, checkpoint_store_name="checkpoint_S3_store", stores=desired_stores_config, data_docs_sites=desired_data_docs_sites_config, ) data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_S3StoreBackendDefaults", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "store_backend_defaults", "=", "S3StoreBackendDefaults", "(", "default_bucket_name", "=", "\"my_default_bucket\"", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", ")", "# Create desired config", "desired_stores_config", "=", "{", "\"evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"expectations\"", ",", "}", ",", "}", ",", "\"validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"validations\"", ",", "}", ",", "}", ",", "\"checkpoint_S3_store\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"checkpoints\"", ",", "}", ",", "}", ",", "}", "desired_data_docs_sites_config", "=", "{", "\"s3_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"data_docs\"", ",", "}", ",", "}", "}", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ",", "expectations_store_name", "=", "\"expectations_S3_store\"", ",", "validations_store_name", "=", "\"validations_S3_store\"", ",", "evaluation_parameter_store_name", "=", "DataContextConfigDefaults", ".", "DEFAULT_EVALUATION_PARAMETER_STORE_NAME", ".", "value", ",", "checkpoint_store_name", "=", "\"checkpoint_S3_store\"", ",", "stores", "=", "desired_stores_config", ",", "data_docs_sites", "=", "desired_data_docs_sites_config", ",", ")", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 158, 0 ]
[ 247, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_S3StoreBackendDefaults_using_all_parameters
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Make sure that S3StoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_bucket_name is ignored if individual bucket names are passed
What does this test and why? Make sure that S3StoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_bucket_name is ignored if individual bucket names are passed
def test_DataContextConfig_with_S3StoreBackendDefaults_using_all_parameters( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Make sure that S3StoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_bucket_name is ignored if individual bucket names are passed """ store_backend_defaults = S3StoreBackendDefaults( default_bucket_name="custom_default_bucket_name", expectations_store_bucket_name="custom_expectations_store_bucket_name", validations_store_bucket_name="custom_validations_store_bucket_name", data_docs_bucket_name="custom_data_docs_store_bucket_name", checkpoint_store_bucket_name="custom_checkpoint_store_bucket_name", expectations_store_prefix="custom_expectations_store_prefix", validations_store_prefix="custom_validations_store_prefix", data_docs_prefix="custom_data_docs_prefix", checkpoint_store_prefix="custom_checkpoint_store_prefix", expectations_store_name="custom_expectations_S3_store_name", validations_store_name="custom_validations_S3_store_name", evaluation_parameter_store_name="custom_evaluation_parameter_store_name", checkpoint_store_name="custom_checkpoint_S3_store_name", ) data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", module_name="great_expectations.datasource", data_asset_type={ "module_name": "great_expectations.dataset", "class_name": "PandasDataset", }, batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, ) # Create desired config desired_stores_config = { "custom_evaluation_parameter_store_name": { "class_name": "EvaluationParameterStore" }, "custom_expectations_S3_store_name": { "class_name": "ExpectationsStore", "store_backend": { "bucket": "custom_expectations_store_bucket_name", "class_name": "TupleS3StoreBackend", "prefix": "custom_expectations_store_prefix", }, }, "custom_validations_S3_store_name": { "class_name": "ValidationsStore", "store_backend": { "bucket": "custom_validations_store_bucket_name", "class_name": "TupleS3StoreBackend", "prefix": "custom_validations_store_prefix", }, }, "custom_checkpoint_S3_store_name": { "class_name": "CheckpointStore", "store_backend": { "bucket": "custom_checkpoint_store_bucket_name", "class_name": "TupleS3StoreBackend", "prefix": "custom_checkpoint_store_prefix", }, }, } desired_data_docs_sites_config = { "s3_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "bucket": "custom_data_docs_store_bucket_name", "class_name": "TupleS3StoreBackend", "prefix": "custom_data_docs_prefix", }, } } desired_config = construct_data_context_config( data_context_id=data_context_config.anonymous_usage_statistics.data_context_id, datasources=default_pandas_datasource_config, expectations_store_name="custom_expectations_S3_store_name", validations_store_name="custom_validations_S3_store_name", evaluation_parameter_store_name="custom_evaluation_parameter_store_name", checkpoint_store_name="custom_checkpoint_S3_store_name", stores=desired_stores_config, data_docs_sites=desired_data_docs_sites_config, ) data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_S3StoreBackendDefaults_using_all_parameters", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "store_backend_defaults", "=", "S3StoreBackendDefaults", "(", "default_bucket_name", "=", "\"custom_default_bucket_name\"", ",", "expectations_store_bucket_name", "=", "\"custom_expectations_store_bucket_name\"", ",", "validations_store_bucket_name", "=", "\"custom_validations_store_bucket_name\"", ",", "data_docs_bucket_name", "=", "\"custom_data_docs_store_bucket_name\"", ",", "checkpoint_store_bucket_name", "=", "\"custom_checkpoint_store_bucket_name\"", ",", "expectations_store_prefix", "=", "\"custom_expectations_store_prefix\"", ",", "validations_store_prefix", "=", "\"custom_validations_store_prefix\"", ",", "data_docs_prefix", "=", "\"custom_data_docs_prefix\"", ",", "checkpoint_store_prefix", "=", "\"custom_checkpoint_store_prefix\"", ",", "expectations_store_name", "=", "\"custom_expectations_S3_store_name\"", ",", "validations_store_name", "=", "\"custom_validations_S3_store_name\"", ",", "evaluation_parameter_store_name", "=", "\"custom_evaluation_parameter_store_name\"", ",", "checkpoint_store_name", "=", "\"custom_checkpoint_S3_store_name\"", ",", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "module_name", "=", "\"great_expectations.datasource\"", ",", "data_asset_type", "=", "{", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "\"class_name\"", ":", "\"PandasDataset\"", ",", "}", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", ")", "# Create desired config", "desired_stores_config", "=", "{", "\"custom_evaluation_parameter_store_name\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"custom_expectations_S3_store_name\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"custom_expectations_store_bucket_name\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"custom_expectations_store_prefix\"", ",", "}", ",", "}", ",", "\"custom_validations_S3_store_name\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"custom_validations_store_bucket_name\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"custom_validations_store_prefix\"", ",", "}", ",", "}", ",", "\"custom_checkpoint_S3_store_name\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"custom_checkpoint_store_bucket_name\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"custom_checkpoint_store_prefix\"", ",", "}", ",", "}", ",", "}", "desired_data_docs_sites_config", "=", "{", "\"s3_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"custom_data_docs_store_bucket_name\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"custom_data_docs_prefix\"", ",", "}", ",", "}", "}", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ",", "expectations_store_name", "=", "\"custom_expectations_S3_store_name\"", ",", "validations_store_name", "=", "\"custom_validations_S3_store_name\"", ",", "evaluation_parameter_store_name", "=", "\"custom_evaluation_parameter_store_name\"", ",", "checkpoint_store_name", "=", "\"custom_checkpoint_S3_store_name\"", ",", "stores", "=", "desired_stores_config", ",", "data_docs_sites", "=", "desired_data_docs_sites_config", ",", ")", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 250, 0 ]
[ 358, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_FilesystemStoreBackendDefaults_and_simple_defaults
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Ensure that a very simple DataContextConfig setup using FilesystemStoreBackendDefaults is created accurately This test sets the root_dir parameter
What does this test and why? Ensure that a very simple DataContextConfig setup using FilesystemStoreBackendDefaults is created accurately This test sets the root_dir parameter
def test_DataContextConfig_with_FilesystemStoreBackendDefaults_and_simple_defaults( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Ensure that a very simple DataContextConfig setup using FilesystemStoreBackendDefaults is created accurately This test sets the root_dir parameter """ test_root_directory = "test_root_dir" store_backend_defaults = FilesystemStoreBackendDefaults( root_directory=test_root_directory ) data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, ) # Create desired config data_context_id = data_context_config.anonymous_usage_statistics.data_context_id desired_config = construct_data_context_config( data_context_id=data_context_id, datasources=default_pandas_datasource_config ) # Add root_directory to stores and data_docs desired_config["stores"][desired_config["expectations_store_name"]][ "store_backend" ]["root_directory"] = test_root_directory desired_config["stores"][desired_config["validations_store_name"]]["store_backend"][ "root_directory" ] = test_root_directory desired_config["stores"][desired_config["checkpoint_store_name"]]["store_backend"][ "root_directory" ] = test_root_directory desired_config["data_docs_sites"]["local_site"]["store_backend"][ "root_directory" ] = test_root_directory data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_FilesystemStoreBackendDefaults_and_simple_defaults", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "test_root_directory", "=", "\"test_root_dir\"", "store_backend_defaults", "=", "FilesystemStoreBackendDefaults", "(", "root_directory", "=", "test_root_directory", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", ")", "# Create desired config", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ")", "# Add root_directory to stores and data_docs", "desired_config", "[", "\"stores\"", "]", "[", "desired_config", "[", "\"expectations_store_name\"", "]", "]", "[", "\"store_backend\"", "]", "[", "\"root_directory\"", "]", "=", "test_root_directory", "desired_config", "[", "\"stores\"", "]", "[", "desired_config", "[", "\"validations_store_name\"", "]", "]", "[", "\"store_backend\"", "]", "[", "\"root_directory\"", "]", "=", "test_root_directory", "desired_config", "[", "\"stores\"", "]", "[", "desired_config", "[", "\"checkpoint_store_name\"", "]", "]", "[", "\"store_backend\"", "]", "[", "\"root_directory\"", "]", "=", "test_root_directory", "desired_config", "[", "\"data_docs_sites\"", "]", "[", "\"local_site\"", "]", "[", "\"store_backend\"", "]", "[", "\"root_directory\"", "]", "=", "test_root_directory", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 361, 0 ]
[ 417, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_FilesystemStoreBackendDefaults_and_simple_defaults_no_root_directory
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Ensure that a very simple DataContextConfig setup using FilesystemStoreBackendDefaults is created accurately This test does not set the optional root_directory parameter
What does this test and why? Ensure that a very simple DataContextConfig setup using FilesystemStoreBackendDefaults is created accurately This test does not set the optional root_directory parameter
def test_DataContextConfig_with_FilesystemStoreBackendDefaults_and_simple_defaults_no_root_directory( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Ensure that a very simple DataContextConfig setup using FilesystemStoreBackendDefaults is created accurately This test does not set the optional root_directory parameter """ store_backend_defaults = FilesystemStoreBackendDefaults() data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, checkpoint_store_name=store_backend_defaults.checkpoint_store_name, ) # Create desired config data_context_id = data_context_config.anonymous_usage_statistics.data_context_id desired_config = construct_data_context_config( data_context_id=data_context_id, datasources=default_pandas_datasource_config ) data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_FilesystemStoreBackendDefaults_and_simple_defaults_no_root_directory", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "store_backend_defaults", "=", "FilesystemStoreBackendDefaults", "(", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", "checkpoint_store_name", "=", "store_backend_defaults", ".", "checkpoint_store_name", ",", ")", "# Create desired config", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ")", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 420, 0 ]
[ 460, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_GCSStoreBackendDefaults
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Make sure that using GCSStoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_bucket_name & default_project_name getting propagated to all stores.
What does this test and why? Make sure that using GCSStoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_bucket_name & default_project_name getting propagated to all stores.
def test_DataContextConfig_with_GCSStoreBackendDefaults( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Make sure that using GCSStoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_bucket_name & default_project_name getting propagated to all stores. """ store_backend_defaults = GCSStoreBackendDefaults( default_bucket_name="my_default_bucket", default_project_name="my_default_project", ) data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", module_name="great_expectations.datasource", data_asset_type={ "module_name": "great_expectations.dataset", "class_name": "PandasDataset", }, batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, ) # Create desired config data_context_id = data_context_config.anonymous_usage_statistics.data_context_id desired_stores_config = { "evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, "expectations_GCS_store": { "class_name": "ExpectationsStore", "store_backend": { "bucket": "my_default_bucket", "project": "my_default_project", "class_name": "TupleGCSStoreBackend", "prefix": "expectations", }, }, "validations_GCS_store": { "class_name": "ValidationsStore", "store_backend": { "bucket": "my_default_bucket", "project": "my_default_project", "class_name": "TupleGCSStoreBackend", "prefix": "validations", }, }, "checkpoint_GCS_store": { "class_name": "CheckpointStore", "store_backend": { "bucket": "my_default_bucket", "project": "my_default_project", "class_name": "TupleGCSStoreBackend", "prefix": "checkpoints", }, }, } desired_data_docs_sites_config = { "gcs_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "bucket": "my_default_bucket", "project": "my_default_project", "class_name": "TupleGCSStoreBackend", "prefix": "data_docs", }, } } desired_config = construct_data_context_config( data_context_id=data_context_id, datasources=default_pandas_datasource_config, expectations_store_name="expectations_GCS_store", validations_store_name="validations_GCS_store", checkpoint_store_name="checkpoint_GCS_store", evaluation_parameter_store_name=DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value, stores=desired_stores_config, data_docs_sites=desired_data_docs_sites_config, ) data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_GCSStoreBackendDefaults", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "store_backend_defaults", "=", "GCSStoreBackendDefaults", "(", "default_bucket_name", "=", "\"my_default_bucket\"", ",", "default_project_name", "=", "\"my_default_project\"", ",", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "module_name", "=", "\"great_expectations.datasource\"", ",", "data_asset_type", "=", "{", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "\"class_name\"", ":", "\"PandasDataset\"", ",", "}", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", ")", "# Create desired config", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", "desired_stores_config", "=", "{", "\"evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"expectations_GCS_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"project\"", ":", "\"my_default_project\"", ",", "\"class_name\"", ":", "\"TupleGCSStoreBackend\"", ",", "\"prefix\"", ":", "\"expectations\"", ",", "}", ",", "}", ",", "\"validations_GCS_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"project\"", ":", "\"my_default_project\"", ",", "\"class_name\"", ":", "\"TupleGCSStoreBackend\"", ",", "\"prefix\"", ":", "\"validations\"", ",", "}", ",", "}", ",", "\"checkpoint_GCS_store\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"project\"", ":", "\"my_default_project\"", ",", "\"class_name\"", ":", "\"TupleGCSStoreBackend\"", ",", "\"prefix\"", ":", "\"checkpoints\"", ",", "}", ",", "}", ",", "}", "desired_data_docs_sites_config", "=", "{", "\"gcs_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"project\"", ":", "\"my_default_project\"", ",", "\"class_name\"", ":", "\"TupleGCSStoreBackend\"", ",", "\"prefix\"", ":", "\"data_docs\"", ",", "}", ",", "}", "}", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ",", "expectations_store_name", "=", "\"expectations_GCS_store\"", ",", "validations_store_name", "=", "\"validations_GCS_store\"", ",", "checkpoint_store_name", "=", "\"checkpoint_GCS_store\"", ",", "evaluation_parameter_store_name", "=", "DataContextConfigDefaults", ".", "DEFAULT_EVALUATION_PARAMETER_STORE_NAME", ".", "value", ",", "stores", "=", "desired_stores_config", ",", "data_docs_sites", "=", "desired_data_docs_sites_config", ",", ")", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 463, 0 ]
[ 564, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_GCSStoreBackendDefaults_using_all_parameters
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Make sure that GCSStoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_bucket_name is ignored if individual bucket names are passed
What does this test and why? Make sure that GCSStoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_bucket_name is ignored if individual bucket names are passed
def test_DataContextConfig_with_GCSStoreBackendDefaults_using_all_parameters( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Make sure that GCSStoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_bucket_name is ignored if individual bucket names are passed """ store_backend_defaults = GCSStoreBackendDefaults( default_bucket_name="custom_default_bucket_name", default_project_name="custom_default_project_name", expectations_store_bucket_name="custom_expectations_store_bucket_name", validations_store_bucket_name="custom_validations_store_bucket_name", data_docs_bucket_name="custom_data_docs_store_bucket_name", checkpoint_store_bucket_name="custom_checkpoint_store_bucket_name", expectations_store_project_name="custom_expectations_store_project_name", validations_store_project_name="custom_validations_store_project_name", data_docs_project_name="custom_data_docs_store_project_name", checkpoint_store_project_name="custom_checkpoint_store_project_name", expectations_store_prefix="custom_expectations_store_prefix", validations_store_prefix="custom_validations_store_prefix", data_docs_prefix="custom_data_docs_prefix", checkpoint_store_prefix="custom_checkpoint_store_prefix", expectations_store_name="custom_expectations_GCS_store_name", validations_store_name="custom_validations_GCS_store_name", evaluation_parameter_store_name="custom_evaluation_parameter_store_name", checkpoint_store_name="custom_checkpoint_GCS_store_name", ) data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", module_name="great_expectations.datasource", data_asset_type={ "module_name": "great_expectations.dataset", "class_name": "PandasDataset", }, batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, ) # Create desired config desired_stores_config = { "custom_evaluation_parameter_store_name": { "class_name": "EvaluationParameterStore" }, "custom_expectations_GCS_store_name": { "class_name": "ExpectationsStore", "store_backend": { "bucket": "custom_expectations_store_bucket_name", "project": "custom_expectations_store_project_name", "class_name": "TupleGCSStoreBackend", "prefix": "custom_expectations_store_prefix", }, }, "custom_validations_GCS_store_name": { "class_name": "ValidationsStore", "store_backend": { "bucket": "custom_validations_store_bucket_name", "project": "custom_validations_store_project_name", "class_name": "TupleGCSStoreBackend", "prefix": "custom_validations_store_prefix", }, }, "custom_checkpoint_GCS_store_name": { "class_name": "CheckpointStore", "store_backend": { "bucket": "custom_checkpoint_store_bucket_name", "project": "custom_checkpoint_store_project_name", "class_name": "TupleGCSStoreBackend", "prefix": "custom_checkpoint_store_prefix", }, }, } desired_data_docs_sites_config = { "gcs_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "bucket": "custom_data_docs_store_bucket_name", "project": "custom_data_docs_store_project_name", "class_name": "TupleGCSStoreBackend", "prefix": "custom_data_docs_prefix", }, } } desired_config = construct_data_context_config( data_context_id=data_context_config.anonymous_usage_statistics.data_context_id, datasources=default_pandas_datasource_config, expectations_store_name="custom_expectations_GCS_store_name", validations_store_name="custom_validations_GCS_store_name", evaluation_parameter_store_name="custom_evaluation_parameter_store_name", checkpoint_store_name="custom_checkpoint_GCS_store_name", stores=desired_stores_config, data_docs_sites=desired_data_docs_sites_config, ) data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_GCSStoreBackendDefaults_using_all_parameters", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "store_backend_defaults", "=", "GCSStoreBackendDefaults", "(", "default_bucket_name", "=", "\"custom_default_bucket_name\"", ",", "default_project_name", "=", "\"custom_default_project_name\"", ",", "expectations_store_bucket_name", "=", "\"custom_expectations_store_bucket_name\"", ",", "validations_store_bucket_name", "=", "\"custom_validations_store_bucket_name\"", ",", "data_docs_bucket_name", "=", "\"custom_data_docs_store_bucket_name\"", ",", "checkpoint_store_bucket_name", "=", "\"custom_checkpoint_store_bucket_name\"", ",", "expectations_store_project_name", "=", "\"custom_expectations_store_project_name\"", ",", "validations_store_project_name", "=", "\"custom_validations_store_project_name\"", ",", "data_docs_project_name", "=", "\"custom_data_docs_store_project_name\"", ",", "checkpoint_store_project_name", "=", "\"custom_checkpoint_store_project_name\"", ",", "expectations_store_prefix", "=", "\"custom_expectations_store_prefix\"", ",", "validations_store_prefix", "=", "\"custom_validations_store_prefix\"", ",", "data_docs_prefix", "=", "\"custom_data_docs_prefix\"", ",", "checkpoint_store_prefix", "=", "\"custom_checkpoint_store_prefix\"", ",", "expectations_store_name", "=", "\"custom_expectations_GCS_store_name\"", ",", "validations_store_name", "=", "\"custom_validations_GCS_store_name\"", ",", "evaluation_parameter_store_name", "=", "\"custom_evaluation_parameter_store_name\"", ",", "checkpoint_store_name", "=", "\"custom_checkpoint_GCS_store_name\"", ",", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "module_name", "=", "\"great_expectations.datasource\"", ",", "data_asset_type", "=", "{", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "\"class_name\"", ":", "\"PandasDataset\"", ",", "}", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", ")", "# Create desired config", "desired_stores_config", "=", "{", "\"custom_evaluation_parameter_store_name\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"custom_expectations_GCS_store_name\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"custom_expectations_store_bucket_name\"", ",", "\"project\"", ":", "\"custom_expectations_store_project_name\"", ",", "\"class_name\"", ":", "\"TupleGCSStoreBackend\"", ",", "\"prefix\"", ":", "\"custom_expectations_store_prefix\"", ",", "}", ",", "}", ",", "\"custom_validations_GCS_store_name\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"custom_validations_store_bucket_name\"", ",", "\"project\"", ":", "\"custom_validations_store_project_name\"", ",", "\"class_name\"", ":", "\"TupleGCSStoreBackend\"", ",", "\"prefix\"", ":", "\"custom_validations_store_prefix\"", ",", "}", ",", "}", ",", "\"custom_checkpoint_GCS_store_name\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"custom_checkpoint_store_bucket_name\"", ",", "\"project\"", ":", "\"custom_checkpoint_store_project_name\"", ",", "\"class_name\"", ":", "\"TupleGCSStoreBackend\"", ",", "\"prefix\"", ":", "\"custom_checkpoint_store_prefix\"", ",", "}", ",", "}", ",", "}", "desired_data_docs_sites_config", "=", "{", "\"gcs_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"custom_data_docs_store_bucket_name\"", ",", "\"project\"", ":", "\"custom_data_docs_store_project_name\"", ",", "\"class_name\"", ":", "\"TupleGCSStoreBackend\"", ",", "\"prefix\"", ":", "\"custom_data_docs_prefix\"", ",", "}", ",", "}", "}", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ",", "expectations_store_name", "=", "\"custom_expectations_GCS_store_name\"", ",", "validations_store_name", "=", "\"custom_validations_GCS_store_name\"", ",", "evaluation_parameter_store_name", "=", "\"custom_evaluation_parameter_store_name\"", ",", "checkpoint_store_name", "=", "\"custom_checkpoint_GCS_store_name\"", ",", "stores", "=", "desired_stores_config", ",", "data_docs_sites", "=", "desired_data_docs_sites_config", ",", ")", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 567, 0 ]
[ 683, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_DatabaseStoreBackendDefaults
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Make sure that using DatabaseStoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_credentials getting propagated to stores and not data_docs
What does this test and why? Make sure that using DatabaseStoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_credentials getting propagated to stores and not data_docs
def test_DataContextConfig_with_DatabaseStoreBackendDefaults( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Make sure that using DatabaseStoreBackendDefaults as the store_backend_defaults applies appropriate defaults, including default_credentials getting propagated to stores and not data_docs """ store_backend_defaults = DatabaseStoreBackendDefaults( default_credentials={ "drivername": "postgresql", "host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "port": "65432", "username": "ge_tutorials", "password": "ge_tutorials", "database": "ge_tutorials", }, ) data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", module_name="great_expectations.datasource", data_asset_type={ "module_name": "great_expectations.dataset", "class_name": "PandasDataset", }, batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, ) # Create desired config desired_stores_config = { "evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, "expectations_database_store": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "DatabaseStoreBackend", "credentials": { "drivername": "postgresql", "host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "port": "65432", "username": "ge_tutorials", "password": "ge_tutorials", "database": "ge_tutorials", }, }, }, "validations_database_store": { "class_name": "ValidationsStore", "store_backend": { "class_name": "DatabaseStoreBackend", "credentials": { "drivername": "postgresql", "host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "port": "65432", "username": "ge_tutorials", "password": "ge_tutorials", "database": "ge_tutorials", }, }, }, "checkpoint_database_store": { "class_name": "CheckpointStore", "store_backend": { "class_name": "DatabaseStoreBackend", "credentials": { "drivername": "postgresql", "host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "port": "65432", "username": "ge_tutorials", "password": "ge_tutorials", "database": "ge_tutorials", }, }, }, } desired_data_docs_sites_config = { "local_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "base_directory": "uncommitted/data_docs/local_site/", "class_name": "TupleFilesystemStoreBackend", }, } } desired_config = construct_data_context_config( data_context_id=data_context_config.anonymous_usage_statistics.data_context_id, datasources=default_pandas_datasource_config, expectations_store_name="expectations_database_store", validations_store_name="validations_database_store", checkpoint_store_name="checkpoint_database_store", evaluation_parameter_store_name=DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value, stores=desired_stores_config, data_docs_sites=desired_data_docs_sites_config, ) data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_DatabaseStoreBackendDefaults", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "store_backend_defaults", "=", "DatabaseStoreBackendDefaults", "(", "default_credentials", "=", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"port\"", ":", "\"65432\"", ",", "\"username\"", ":", "\"ge_tutorials\"", ",", "\"password\"", ":", "\"ge_tutorials\"", ",", "\"database\"", ":", "\"ge_tutorials\"", ",", "}", ",", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "module_name", "=", "\"great_expectations.datasource\"", ",", "data_asset_type", "=", "{", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "\"class_name\"", ":", "\"PandasDataset\"", ",", "}", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", ")", "# Create desired config", "desired_stores_config", "=", "{", "\"evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"expectations_database_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"DatabaseStoreBackend\"", ",", "\"credentials\"", ":", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"port\"", ":", "\"65432\"", ",", "\"username\"", ":", "\"ge_tutorials\"", ",", "\"password\"", ":", "\"ge_tutorials\"", ",", "\"database\"", ":", "\"ge_tutorials\"", ",", "}", ",", "}", ",", "}", ",", "\"validations_database_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"DatabaseStoreBackend\"", ",", "\"credentials\"", ":", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"port\"", ":", "\"65432\"", ",", "\"username\"", ":", "\"ge_tutorials\"", ",", "\"password\"", ":", "\"ge_tutorials\"", ",", "\"database\"", ":", "\"ge_tutorials\"", ",", "}", ",", "}", ",", "}", ",", "\"checkpoint_database_store\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"DatabaseStoreBackend\"", ",", "\"credentials\"", ":", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"port\"", ":", "\"65432\"", ",", "\"username\"", ":", "\"ge_tutorials\"", ",", "\"password\"", ":", "\"ge_tutorials\"", ",", "\"database\"", ":", "\"ge_tutorials\"", ",", "}", ",", "}", ",", "}", ",", "}", "desired_data_docs_sites_config", "=", "{", "\"local_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"base_directory\"", ":", "\"uncommitted/data_docs/local_site/\"", ",", "\"class_name\"", ":", "\"TupleFilesystemStoreBackend\"", ",", "}", ",", "}", "}", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ",", "expectations_store_name", "=", "\"expectations_database_store\"", ",", "validations_store_name", "=", "\"validations_database_store\"", ",", "checkpoint_store_name", "=", "\"checkpoint_database_store\"", ",", "evaluation_parameter_store_name", "=", "DataContextConfigDefaults", ".", "DEFAULT_EVALUATION_PARAMETER_STORE_NAME", ".", "value", ",", "stores", "=", "desired_stores_config", ",", "data_docs_sites", "=", "desired_data_docs_sites_config", ",", ")", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 686, 0 ]
[ 804, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_DatabaseStoreBackendDefaults_using_all_parameters
( construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Make sure that DatabaseStoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_credentials is ignored if individual store credentials are passed
What does this test and why? Make sure that DatabaseStoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_credentials is ignored if individual store credentials are passed
def test_DataContextConfig_with_DatabaseStoreBackendDefaults_using_all_parameters( construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Make sure that DatabaseStoreBackendDefaults parameters are handled appropriately E.g. Make sure that default_credentials is ignored if individual store credentials are passed """ store_backend_defaults = DatabaseStoreBackendDefaults( default_credentials={ "drivername": "postgresql", "host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"), "port": "65432", "username": "ge_tutorials", "password": "ge_tutorials", "database": "ge_tutorials", }, expectations_store_credentials={ "drivername": "custom_expectations_store_drivername", "host": "custom_expectations_store_host", "port": "custom_expectations_store_port", "username": "custom_expectations_store_username", "password": "custom_expectations_store_password", "database": "custom_expectations_store_database", }, validations_store_credentials={ "drivername": "custom_validations_store_drivername", "host": "custom_validations_store_host", "port": "custom_validations_store_port", "username": "custom_validations_store_username", "password": "custom_validations_store_password", "database": "custom_validations_store_database", }, checkpoint_store_credentials={ "drivername": "custom_checkpoint_store_drivername", "host": "custom_checkpoint_store_host", "port": "custom_checkpoint_store_port", "username": "custom_checkpoint_store_username", "password": "custom_checkpoint_store_password", "database": "custom_checkpoint_store_database", }, expectations_store_name="custom_expectations_database_store_name", validations_store_name="custom_validations_database_store_name", evaluation_parameter_store_name="custom_evaluation_parameter_store_name", checkpoint_store_name="custom_checkpoint_database_store_name", ) data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", module_name="great_expectations.datasource", data_asset_type={ "module_name": "great_expectations.dataset", "class_name": "PandasDataset", }, batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ) }, store_backend_defaults=store_backend_defaults, ) # Create desired config desired_stores_config = { "custom_evaluation_parameter_store_name": { "class_name": "EvaluationParameterStore" }, "custom_expectations_database_store_name": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "DatabaseStoreBackend", "credentials": { "database": "custom_expectations_store_database", "drivername": "custom_expectations_store_drivername", "host": "custom_expectations_store_host", "password": "custom_expectations_store_password", "port": "custom_expectations_store_port", "username": "custom_expectations_store_username", }, }, }, "custom_validations_database_store_name": { "class_name": "ValidationsStore", "store_backend": { "class_name": "DatabaseStoreBackend", "credentials": { "database": "custom_validations_store_database", "drivername": "custom_validations_store_drivername", "host": "custom_validations_store_host", "password": "custom_validations_store_password", "port": "custom_validations_store_port", "username": "custom_validations_store_username", }, }, }, "custom_checkpoint_database_store_name": { "class_name": "CheckpointStore", "store_backend": { "class_name": "DatabaseStoreBackend", "credentials": { "database": "custom_checkpoint_store_database", "drivername": "custom_checkpoint_store_drivername", "host": "custom_checkpoint_store_host", "password": "custom_checkpoint_store_password", "port": "custom_checkpoint_store_port", "username": "custom_checkpoint_store_username", }, }, }, } desired_data_docs_sites_config = { "local_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "base_directory": "uncommitted/data_docs/local_site/", "class_name": "TupleFilesystemStoreBackend", }, } } desired_config = construct_data_context_config( data_context_id=data_context_config.anonymous_usage_statistics.data_context_id, datasources=default_pandas_datasource_config, expectations_store_name="custom_expectations_database_store_name", validations_store_name="custom_validations_database_store_name", evaluation_parameter_store_name="custom_evaluation_parameter_store_name", checkpoint_store_name="custom_checkpoint_database_store_name", stores=desired_stores_config, data_docs_sites=desired_data_docs_sites_config, ) data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_DataContextConfig_with_DatabaseStoreBackendDefaults_using_all_parameters", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "store_backend_defaults", "=", "DatabaseStoreBackendDefaults", "(", "default_credentials", "=", "{", "\"drivername\"", ":", "\"postgresql\"", ",", "\"host\"", ":", "os", ".", "getenv", "(", "\"GE_TEST_LOCAL_DB_HOSTNAME\"", ",", "\"localhost\"", ")", ",", "\"port\"", ":", "\"65432\"", ",", "\"username\"", ":", "\"ge_tutorials\"", ",", "\"password\"", ":", "\"ge_tutorials\"", ",", "\"database\"", ":", "\"ge_tutorials\"", ",", "}", ",", "expectations_store_credentials", "=", "{", "\"drivername\"", ":", "\"custom_expectations_store_drivername\"", ",", "\"host\"", ":", "\"custom_expectations_store_host\"", ",", "\"port\"", ":", "\"custom_expectations_store_port\"", ",", "\"username\"", ":", "\"custom_expectations_store_username\"", ",", "\"password\"", ":", "\"custom_expectations_store_password\"", ",", "\"database\"", ":", "\"custom_expectations_store_database\"", ",", "}", ",", "validations_store_credentials", "=", "{", "\"drivername\"", ":", "\"custom_validations_store_drivername\"", ",", "\"host\"", ":", "\"custom_validations_store_host\"", ",", "\"port\"", ":", "\"custom_validations_store_port\"", ",", "\"username\"", ":", "\"custom_validations_store_username\"", ",", "\"password\"", ":", "\"custom_validations_store_password\"", ",", "\"database\"", ":", "\"custom_validations_store_database\"", ",", "}", ",", "checkpoint_store_credentials", "=", "{", "\"drivername\"", ":", "\"custom_checkpoint_store_drivername\"", ",", "\"host\"", ":", "\"custom_checkpoint_store_host\"", ",", "\"port\"", ":", "\"custom_checkpoint_store_port\"", ",", "\"username\"", ":", "\"custom_checkpoint_store_username\"", ",", "\"password\"", ":", "\"custom_checkpoint_store_password\"", ",", "\"database\"", ":", "\"custom_checkpoint_store_database\"", ",", "}", ",", "expectations_store_name", "=", "\"custom_expectations_database_store_name\"", ",", "validations_store_name", "=", "\"custom_validations_database_store_name\"", ",", "evaluation_parameter_store_name", "=", "\"custom_evaluation_parameter_store_name\"", ",", "checkpoint_store_name", "=", "\"custom_checkpoint_database_store_name\"", ",", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "module_name", "=", "\"great_expectations.datasource\"", ",", "data_asset_type", "=", "{", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "\"class_name\"", ":", "\"PandasDataset\"", ",", "}", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", ")", "# Create desired config", "desired_stores_config", "=", "{", "\"custom_evaluation_parameter_store_name\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"custom_expectations_database_store_name\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"DatabaseStoreBackend\"", ",", "\"credentials\"", ":", "{", "\"database\"", ":", "\"custom_expectations_store_database\"", ",", "\"drivername\"", ":", "\"custom_expectations_store_drivername\"", ",", "\"host\"", ":", "\"custom_expectations_store_host\"", ",", "\"password\"", ":", "\"custom_expectations_store_password\"", ",", "\"port\"", ":", "\"custom_expectations_store_port\"", ",", "\"username\"", ":", "\"custom_expectations_store_username\"", ",", "}", ",", "}", ",", "}", ",", "\"custom_validations_database_store_name\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"DatabaseStoreBackend\"", ",", "\"credentials\"", ":", "{", "\"database\"", ":", "\"custom_validations_store_database\"", ",", "\"drivername\"", ":", "\"custom_validations_store_drivername\"", ",", "\"host\"", ":", "\"custom_validations_store_host\"", ",", "\"password\"", ":", "\"custom_validations_store_password\"", ",", "\"port\"", ":", "\"custom_validations_store_port\"", ",", "\"username\"", ":", "\"custom_validations_store_username\"", ",", "}", ",", "}", ",", "}", ",", "\"custom_checkpoint_database_store_name\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"DatabaseStoreBackend\"", ",", "\"credentials\"", ":", "{", "\"database\"", ":", "\"custom_checkpoint_store_database\"", ",", "\"drivername\"", ":", "\"custom_checkpoint_store_drivername\"", ",", "\"host\"", ":", "\"custom_checkpoint_store_host\"", ",", "\"password\"", ":", "\"custom_checkpoint_store_password\"", ",", "\"port\"", ":", "\"custom_checkpoint_store_port\"", ",", "\"username\"", ":", "\"custom_checkpoint_store_username\"", ",", "}", ",", "}", ",", "}", ",", "}", "desired_data_docs_sites_config", "=", "{", "\"local_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"base_directory\"", ":", "\"uncommitted/data_docs/local_site/\"", ",", "\"class_name\"", ":", "\"TupleFilesystemStoreBackend\"", ",", "}", ",", "}", "}", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ",", "expectations_store_name", "=", "\"custom_expectations_database_store_name\"", ",", "validations_store_name", "=", "\"custom_validations_database_store_name\"", ",", "evaluation_parameter_store_name", "=", "\"custom_evaluation_parameter_store_name\"", ",", "checkpoint_store_name", "=", "\"custom_checkpoint_database_store_name\"", ",", "stores", "=", "desired_stores_config", ",", "data_docs_sites", "=", "desired_data_docs_sites_config", ",", ")", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 807, 0 ]
[ 955, 74 ]
python
en
['en', 'error', 'th']
False
test_override_general_defaults
( construct_data_context_config, default_pandas_datasource_config, default_spark_datasource_config, )
What does this test and why? A DataContextConfig should be able to be created by passing items into the constructor that override any defaults. It should also be able to handle multiple datasources, even if they are configured with a dictionary or a DatasourceConfig.
What does this test and why? A DataContextConfig should be able to be created by passing items into the constructor that override any defaults. It should also be able to handle multiple datasources, even if they are configured with a dictionary or a DatasourceConfig.
def test_override_general_defaults( construct_data_context_config, default_pandas_datasource_config, default_spark_datasource_config, ): """ What does this test and why? A DataContextConfig should be able to be created by passing items into the constructor that override any defaults. It should also be able to handle multiple datasources, even if they are configured with a dictionary or a DatasourceConfig. """ data_context_config = DataContextConfig( config_version=999, plugins_directory="custom_plugins_directory", config_variables_file_path="custom_config_variables_file_path", datasources={ "my_spark_datasource": { "data_asset_type": { "class_name": "SparkDFDataset", "module_name": "great_expectations.dataset", }, "class_name": "SparkDFDatasource", "module_name": "great_expectations.datasource", "batch_kwargs_generators": {}, }, "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "../data/", } }, ), }, stores={ "expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": "REPLACE_ME", "prefix": "REPLACE_ME", }, }, "expectations_S3_store2": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": "REPLACE_ME", "prefix": "REPLACE_ME", }, }, "validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": "REPLACE_ME", "prefix": "REPLACE_ME", }, }, "validations_S3_store2": { "class_name": "ValidationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": "REPLACE_ME", "prefix": "REPLACE_ME", }, }, "custom_evaluation_parameter_store": { "class_name": "EvaluationParameterStore" }, "checkpoint_S3_store": { "class_name": "CheckpointStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": "REPLACE_ME", "prefix": "REPLACE_ME", }, }, }, expectations_store_name="custom_expectations_store_name", validations_store_name="custom_validations_store_name", evaluation_parameter_store_name="custom_evaluation_parameter_store_name", checkpoint_store_name="checkpoint_S3_store", data_docs_sites={ "s3_site": { "class_name": "SiteBuilder", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": "REPLACE_ME", }, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, }, "local_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "base_directory": "uncommitted/data_docs/local_site/", "class_name": "TupleFilesystemStoreBackend", }, }, }, validation_operators={ "custom_action_list_operator": { "class_name": "ActionListValidationOperator", "action_list": [ { "name": "custom_store_validation_result", "action": {"class_name": "CustomStoreValidationResultAction"}, }, { "name": "store_evaluation_params", "action": {"class_name": "StoreEvaluationParametersAction"}, }, { "name": "update_data_docs", "action": {"class_name": "UpdateDataDocsAction"}, }, ], } }, anonymous_usage_statistics={"enabled": True}, ) desired_stores = { "custom_evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, "expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "bucket": "REPLACE_ME", "class_name": "TupleS3StoreBackend", "prefix": "REPLACE_ME", }, }, "expectations_S3_store2": { "class_name": "ExpectationsStore", "store_backend": { "bucket": "REPLACE_ME", "class_name": "TupleS3StoreBackend", "prefix": "REPLACE_ME", }, }, "validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "bucket": "REPLACE_ME", "class_name": "TupleS3StoreBackend", "prefix": "REPLACE_ME", }, }, "validations_S3_store2": { "class_name": "ValidationsStore", "store_backend": { "bucket": "REPLACE_ME", "class_name": "TupleS3StoreBackend", "prefix": "REPLACE_ME", }, }, "checkpoint_S3_store": { "class_name": "CheckpointStore", "store_backend": { "bucket": "REPLACE_ME", "class_name": "TupleS3StoreBackend", "prefix": "REPLACE_ME", }, }, } desired_data_docs_sites_config = { "local_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "base_directory": "uncommitted/data_docs/local_site/", "class_name": "TupleFilesystemStoreBackend", }, }, "s3_site": { "class_name": "SiteBuilder", "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "bucket": "REPLACE_ME", "class_name": "TupleS3StoreBackend", }, }, } desired_validation_operators = { "custom_action_list_operator": { "class_name": "ActionListValidationOperator", "action_list": [ { "name": "custom_store_validation_result", "action": {"class_name": "CustomStoreValidationResultAction"}, }, { "name": "store_evaluation_params", "action": {"class_name": "StoreEvaluationParametersAction"}, }, { "name": "update_data_docs", "action": {"class_name": "UpdateDataDocsAction"}, }, ], } } desired_config = construct_data_context_config( data_context_id=data_context_config.anonymous_usage_statistics.data_context_id, datasources={ **default_pandas_datasource_config, **default_spark_datasource_config, }, config_version=999.0, expectations_store_name="custom_expectations_store_name", validations_store_name="custom_validations_store_name", evaluation_parameter_store_name="custom_evaluation_parameter_store_name", checkpoint_store_name="checkpoint_S3_store", stores=desired_stores, validation_operators=desired_validation_operators, data_docs_sites=desired_data_docs_sites_config, plugins_directory="custom_plugins_directory", ) desired_config["config_variables_file_path"] = "custom_config_variables_file_path" data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config)
[ "def", "test_override_general_defaults", "(", "construct_data_context_config", ",", "default_pandas_datasource_config", ",", "default_spark_datasource_config", ",", ")", ":", "data_context_config", "=", "DataContextConfig", "(", "config_version", "=", "999", ",", "plugins_directory", "=", "\"custom_plugins_directory\"", ",", "config_variables_file_path", "=", "\"custom_config_variables_file_path\"", ",", "datasources", "=", "{", "\"my_spark_datasource\"", ":", "{", "\"data_asset_type\"", ":", "{", "\"class_name\"", ":", "\"SparkDFDataset\"", ",", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "}", ",", "\"class_name\"", ":", "\"SparkDFDatasource\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource\"", ",", "\"batch_kwargs_generators\"", ":", "{", "}", ",", "}", ",", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"../data/\"", ",", "}", "}", ",", ")", ",", "}", ",", "stores", "=", "{", "\"expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "\"expectations_S3_store2\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "\"validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "\"validations_S3_store2\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "\"custom_evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"checkpoint_S3_store\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "}", ",", "expectations_store_name", "=", "\"custom_expectations_store_name\"", ",", "validations_store_name", "=", "\"custom_validations_store_name\"", ",", "evaluation_parameter_store_name", "=", "\"custom_evaluation_parameter_store_name\"", ",", "checkpoint_store_name", "=", "\"checkpoint_S3_store\"", ",", "data_docs_sites", "=", "{", "\"s3_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "}", ",", "\"local_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"base_directory\"", ":", "\"uncommitted/data_docs/local_site/\"", ",", "\"class_name\"", ":", "\"TupleFilesystemStoreBackend\"", ",", "}", ",", "}", ",", "}", ",", "validation_operators", "=", "{", "\"custom_action_list_operator\"", ":", "{", "\"class_name\"", ":", "\"ActionListValidationOperator\"", ",", "\"action_list\"", ":", "[", "{", "\"name\"", ":", "\"custom_store_validation_result\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"CustomStoreValidationResultAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"store_evaluation_params\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"StoreEvaluationParametersAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"update_data_docs\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"UpdateDataDocsAction\"", "}", ",", "}", ",", "]", ",", "}", "}", ",", "anonymous_usage_statistics", "=", "{", "\"enabled\"", ":", "True", "}", ",", ")", "desired_stores", "=", "{", "\"custom_evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "\"expectations_S3_store2\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "\"validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "\"validations_S3_store2\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "\"checkpoint_S3_store\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"REPLACE_ME\"", ",", "}", ",", "}", ",", "}", "desired_data_docs_sites_config", "=", "{", "\"local_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"base_directory\"", ":", "\"uncommitted/data_docs/local_site/\"", ",", "\"class_name\"", ":", "\"TupleFilesystemStoreBackend\"", ",", "}", ",", "}", ",", "\"s3_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"REPLACE_ME\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "}", ",", "}", ",", "}", "desired_validation_operators", "=", "{", "\"custom_action_list_operator\"", ":", "{", "\"class_name\"", ":", "\"ActionListValidationOperator\"", ",", "\"action_list\"", ":", "[", "{", "\"name\"", ":", "\"custom_store_validation_result\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"CustomStoreValidationResultAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"store_evaluation_params\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"StoreEvaluationParametersAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"update_data_docs\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"UpdateDataDocsAction\"", "}", ",", "}", ",", "]", ",", "}", "}", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", ",", "datasources", "=", "{", "*", "*", "default_pandas_datasource_config", ",", "*", "*", "default_spark_datasource_config", ",", "}", ",", "config_version", "=", "999.0", ",", "expectations_store_name", "=", "\"custom_expectations_store_name\"", ",", "validations_store_name", "=", "\"custom_validations_store_name\"", ",", "evaluation_parameter_store_name", "=", "\"custom_evaluation_parameter_store_name\"", ",", "checkpoint_store_name", "=", "\"checkpoint_S3_store\"", ",", "stores", "=", "desired_stores", ",", "validation_operators", "=", "desired_validation_operators", ",", "data_docs_sites", "=", "desired_data_docs_sites_config", ",", "plugins_directory", "=", "\"custom_plugins_directory\"", ",", ")", "desired_config", "[", "\"config_variables_file_path\"", "]", "=", "\"custom_config_variables_file_path\"", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")" ]
[ 958, 0 ]
[ 1200, 74 ]
python
en
['en', 'error', 'th']
False
test_DataContextConfig_with_S3StoreBackendDefaults_and_simple_defaults_with_variable_sub
( monkeypatch, construct_data_context_config, default_pandas_datasource_config )
What does this test and why? Ensure that a very simple DataContextConfig setup with many defaults is created accurately and produces a valid DataContextConfig
What does this test and why? Ensure that a very simple DataContextConfig setup with many defaults is created accurately and produces a valid DataContextConfig
def test_DataContextConfig_with_S3StoreBackendDefaults_and_simple_defaults_with_variable_sub( monkeypatch, construct_data_context_config, default_pandas_datasource_config ): """ What does this test and why? Ensure that a very simple DataContextConfig setup with many defaults is created accurately and produces a valid DataContextConfig """ monkeypatch.setenv("SUBSTITUTED_BASE_DIRECTORY", "../data/") store_backend_defaults = S3StoreBackendDefaults( default_bucket_name="my_default_bucket" ) data_context_config = DataContextConfig( datasources={ "my_pandas_datasource": DatasourceConfig( class_name="PandasDatasource", batch_kwargs_generators={ "subdir_reader": { "class_name": "SubdirReaderBatchKwargsGenerator", "base_directory": "${SUBSTITUTED_BASE_DIRECTORY}", } }, ) }, store_backend_defaults=store_backend_defaults, ) # Create desired config desired_stores_config = { "evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, "expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "bucket": "my_default_bucket", "class_name": "TupleS3StoreBackend", "prefix": "expectations", }, }, "validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "bucket": "my_default_bucket", "class_name": "TupleS3StoreBackend", "prefix": "validations", }, }, "checkpoint_S3_store": { "class_name": "CheckpointStore", "store_backend": { "bucket": "my_default_bucket", "class_name": "TupleS3StoreBackend", "prefix": "checkpoints", }, }, } desired_data_docs_sites_config = { "s3_site": { "class_name": "SiteBuilder", "show_how_to_buttons": True, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, "store_backend": { "bucket": "my_default_bucket", "class_name": "TupleS3StoreBackend", "prefix": "data_docs", }, } } desired_config = construct_data_context_config( data_context_id=data_context_config.anonymous_usage_statistics.data_context_id, datasources=default_pandas_datasource_config, expectations_store_name="expectations_S3_store", validations_store_name="validations_S3_store", checkpoint_store_name="checkpoint_S3_store", evaluation_parameter_store_name=DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value, stores=desired_stores_config, data_docs_sites=desired_data_docs_sites_config, ) desired_config["datasources"]["my_pandas_datasource"]["batch_kwargs_generators"][ "subdir_reader" ]["base_directory"] = "${SUBSTITUTED_BASE_DIRECTORY}" data_context_config_schema = DataContextConfigSchema() assert filter_properties_dict( properties=data_context_config_schema.dump(data_context_config), clean_falsy=True, ) == filter_properties_dict( properties=desired_config, clean_falsy=True, ) assert DataContext.validate_config(project_config=data_context_config) data_context = BaseDataContext(project_config=data_context_config) assert ( data_context.datasources["my_pandas_datasource"] .get_batch_kwargs_generator("subdir_reader") ._base_directory == "../data/" )
[ "def", "test_DataContextConfig_with_S3StoreBackendDefaults_and_simple_defaults_with_variable_sub", "(", "monkeypatch", ",", "construct_data_context_config", ",", "default_pandas_datasource_config", ")", ":", "monkeypatch", ".", "setenv", "(", "\"SUBSTITUTED_BASE_DIRECTORY\"", ",", "\"../data/\"", ")", "store_backend_defaults", "=", "S3StoreBackendDefaults", "(", "default_bucket_name", "=", "\"my_default_bucket\"", ")", "data_context_config", "=", "DataContextConfig", "(", "datasources", "=", "{", "\"my_pandas_datasource\"", ":", "DatasourceConfig", "(", "class_name", "=", "\"PandasDatasource\"", ",", "batch_kwargs_generators", "=", "{", "\"subdir_reader\"", ":", "{", "\"class_name\"", ":", "\"SubdirReaderBatchKwargsGenerator\"", ",", "\"base_directory\"", ":", "\"${SUBSTITUTED_BASE_DIRECTORY}\"", ",", "}", "}", ",", ")", "}", ",", "store_backend_defaults", "=", "store_backend_defaults", ",", ")", "# Create desired config", "desired_stores_config", "=", "{", "\"evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"expectations\"", ",", "}", ",", "}", ",", "\"validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"validations\"", ",", "}", ",", "}", ",", "\"checkpoint_S3_store\"", ":", "{", "\"class_name\"", ":", "\"CheckpointStore\"", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"checkpoints\"", ",", "}", ",", "}", ",", "}", "desired_data_docs_sites_config", "=", "{", "\"s3_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"show_how_to_buttons\"", ":", "True", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "\"store_backend\"", ":", "{", "\"bucket\"", ":", "\"my_default_bucket\"", ",", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"prefix\"", ":", "\"data_docs\"", ",", "}", ",", "}", "}", "desired_config", "=", "construct_data_context_config", "(", "data_context_id", "=", "data_context_config", ".", "anonymous_usage_statistics", ".", "data_context_id", ",", "datasources", "=", "default_pandas_datasource_config", ",", "expectations_store_name", "=", "\"expectations_S3_store\"", ",", "validations_store_name", "=", "\"validations_S3_store\"", ",", "checkpoint_store_name", "=", "\"checkpoint_S3_store\"", ",", "evaluation_parameter_store_name", "=", "DataContextConfigDefaults", ".", "DEFAULT_EVALUATION_PARAMETER_STORE_NAME", ".", "value", ",", "stores", "=", "desired_stores_config", ",", "data_docs_sites", "=", "desired_data_docs_sites_config", ",", ")", "desired_config", "[", "\"datasources\"", "]", "[", "\"my_pandas_datasource\"", "]", "[", "\"batch_kwargs_generators\"", "]", "[", "\"subdir_reader\"", "]", "[", "\"base_directory\"", "]", "=", "\"${SUBSTITUTED_BASE_DIRECTORY}\"", "data_context_config_schema", "=", "DataContextConfigSchema", "(", ")", "assert", "filter_properties_dict", "(", "properties", "=", "data_context_config_schema", ".", "dump", "(", "data_context_config", ")", ",", "clean_falsy", "=", "True", ",", ")", "==", "filter_properties_dict", "(", "properties", "=", "desired_config", ",", "clean_falsy", "=", "True", ",", ")", "assert", "DataContext", ".", "validate_config", "(", "project_config", "=", "data_context_config", ")", "data_context", "=", "BaseDataContext", "(", "project_config", "=", "data_context_config", ")", "assert", "(", "data_context", ".", "datasources", "[", "\"my_pandas_datasource\"", "]", ".", "get_batch_kwargs_generator", "(", "\"subdir_reader\"", ")", ".", "_base_directory", "==", "\"../data/\"", ")" ]
[ 1203, 0 ]
[ 1306, 5 ]
python
en
['en', 'error', 'th']
False
dt_to_str
(date, fmt='%Y-%m-%d')
Converts a datetime object to a string.
Converts a datetime object to a string.
def dt_to_str(date, fmt='%Y-%m-%d'): """ Converts a datetime object to a string. """ return date.strftime(fmt)
[ "def", "dt_to_str", "(", "date", ",", "fmt", "=", "'%Y-%m-%d'", ")", ":", "return", "date", ".", "strftime", "(", "fmt", ")" ]
[ 3, 0 ]
[ 7, 29 ]
python
en
['en', 'ja', 'th']
False
_n64_to_datetime
(n64)
Converts Numpy 64 bit timestamps to datetime objects. Units in seconds
Converts Numpy 64 bit timestamps to datetime objects. Units in seconds
def _n64_to_datetime(n64): """ Converts Numpy 64 bit timestamps to datetime objects. Units in seconds """ return datetime.utcfromtimestamp(n64.tolist() / 1e9)
[ "def", "_n64_to_datetime", "(", "n64", ")", ":", "return", "datetime", ".", "utcfromtimestamp", "(", "n64", ".", "tolist", "(", ")", "/", "1e9", ")" ]
[ 9, 0 ]
[ 13, 56 ]
python
en
['en', 'ja', 'th']
False
_n64_datetime_to_scalar
(dt64)
Converts a NumPy datetime64 object to the number of seconds since midnight, January 1, 1970, as a NumPy float64. Returns ------- scalar: numpy.float64 The number of seconds since midnight, January 1, 1970, as a NumPy float64.
Converts a NumPy datetime64 object to the number of seconds since midnight, January 1, 1970, as a NumPy float64. Returns ------- scalar: numpy.float64 The number of seconds since midnight, January 1, 1970, as a NumPy float64.
def _n64_datetime_to_scalar(dt64): """ Converts a NumPy datetime64 object to the number of seconds since midnight, January 1, 1970, as a NumPy float64. Returns ------- scalar: numpy.float64 The number of seconds since midnight, January 1, 1970, as a NumPy float64. """ return (dt64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
[ "def", "_n64_datetime_to_scalar", "(", "dt64", ")", ":", "return", "(", "dt64", "-", "np", ".", "datetime64", "(", "'1970-01-01T00:00:00Z'", ")", ")", "/", "np", ".", "timedelta64", "(", "1", ",", "'s'", ")" ]
[ 15, 0 ]
[ 25, 82 ]
python
en
['en', 'ja', 'th']
False
_scalar_to_n64_datetime
(scalar)
Converts a floating point number to a NumPy datetime64 object. Returns ------- dt64: numpy.datetime64 The NumPy datetime64 object representing the datetime of the scalar argument.
Converts a floating point number to a NumPy datetime64 object. Returns ------- dt64: numpy.datetime64 The NumPy datetime64 object representing the datetime of the scalar argument.
def _scalar_to_n64_datetime(scalar): """ Converts a floating point number to a NumPy datetime64 object. Returns ------- dt64: numpy.datetime64 The NumPy datetime64 object representing the datetime of the scalar argument. """ return (scalar * np.timedelta64(1, 's')) + np.datetime64('1970-01-01T00:00:00Z')
[ "def", "_scalar_to_n64_datetime", "(", "scalar", ")", ":", "return", "(", "scalar", "*", "np", ".", "timedelta64", "(", "1", ",", "'s'", ")", ")", "+", "np", ".", "datetime64", "(", "'1970-01-01T00:00:00Z'", ")" ]
[ 27, 0 ]
[ 36, 84 ]
python
en
['en', 'ja', 'th']
False
TransformersReader.__init__
( self, model_name_or_path: str = "distilbert-base-uncased-distilled-squad", model_version: Optional[str] = None, tokenizer: Optional[str] = None, context_window_size: int = 70, use_gpu: int = 0, top_k_per_candidate: int = 4, return_no_answers: bool = True, max_seq_len: int = 256, doc_stride: int = 128 )
Load a QA model from Transformers. Available models include: - ``'distilbert-base-uncased-distilled-squad`'`` - ``'bert-large-cased-whole-word-masking-finetuned-squad``' - ``'bert-large-uncased-whole-word-masking-finetuned-squad``' See https://huggingface.co/models for full list of available QA models :param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'bert-base-cased', 'deepset/bert-base-cased-squad2', 'deepset/bert-base-cased-squad2', 'distilbert-base-uncased-distilled-squad'. See https://huggingface.co/models for full list of available models. :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash. :param tokenizer: Name of the tokenizer (usually the same as model) :param context_window_size: Num of chars (before and after the answer) to return as "context" for each answer. The context usually helps users to understand if the answer really makes sense. :param use_gpu: If < 0, then use cpu. If >= 0, this is the ordinal of the gpu to use :param top_k_per_candidate: How many answers to extract for each candidate doc that is coming from the retriever (might be a long text). Note that this is not the number of "final answers" you will receive (see `top_k` in TransformersReader.predict() or Finder.get_answers() for that) and that no_answer can be included in the sorted list of predictions. :param return_no_answers: If True, the HuggingFace Transformers model could return a "no_answer" (i.e. when there is an unanswerable question) If False, it cannot return a "no_answer". Note that `no_answer_boost` is unfortunately not available with TransformersReader. If you would like to set no_answer_boost, use a `FARMReader`. :param max_seq_len: max sequence length of one input text for the model :param doc_stride: length of striding window for splitting long texts (used if len(text) > max_seq_len)
Load a QA model from Transformers. Available models include:
def __init__( self, model_name_or_path: str = "distilbert-base-uncased-distilled-squad", model_version: Optional[str] = None, tokenizer: Optional[str] = None, context_window_size: int = 70, use_gpu: int = 0, top_k_per_candidate: int = 4, return_no_answers: bool = True, max_seq_len: int = 256, doc_stride: int = 128 ): """ Load a QA model from Transformers. Available models include: - ``'distilbert-base-uncased-distilled-squad`'`` - ``'bert-large-cased-whole-word-masking-finetuned-squad``' - ``'bert-large-uncased-whole-word-masking-finetuned-squad``' See https://huggingface.co/models for full list of available QA models :param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'bert-base-cased', 'deepset/bert-base-cased-squad2', 'deepset/bert-base-cased-squad2', 'distilbert-base-uncased-distilled-squad'. See https://huggingface.co/models for full list of available models. :param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash. :param tokenizer: Name of the tokenizer (usually the same as model) :param context_window_size: Num of chars (before and after the answer) to return as "context" for each answer. The context usually helps users to understand if the answer really makes sense. :param use_gpu: If < 0, then use cpu. If >= 0, this is the ordinal of the gpu to use :param top_k_per_candidate: How many answers to extract for each candidate doc that is coming from the retriever (might be a long text). Note that this is not the number of "final answers" you will receive (see `top_k` in TransformersReader.predict() or Finder.get_answers() for that) and that no_answer can be included in the sorted list of predictions. :param return_no_answers: If True, the HuggingFace Transformers model could return a "no_answer" (i.e. when there is an unanswerable question) If False, it cannot return a "no_answer". Note that `no_answer_boost` is unfortunately not available with TransformersReader. If you would like to set no_answer_boost, use a `FARMReader`. :param max_seq_len: max sequence length of one input text for the model :param doc_stride: length of striding window for splitting long texts (used if len(text) > max_seq_len) """ self.model = pipeline('question-answering', model=model_name_or_path, tokenizer=tokenizer, device=use_gpu, revision=model_version) self.context_window_size = context_window_size self.top_k_per_candidate = top_k_per_candidate self.return_no_answers = return_no_answers self.max_seq_len = max_seq_len self.doc_stride = doc_stride
[ "def", "__init__", "(", "self", ",", "model_name_or_path", ":", "str", "=", "\"distilbert-base-uncased-distilled-squad\"", ",", "model_version", ":", "Optional", "[", "str", "]", "=", "None", ",", "tokenizer", ":", "Optional", "[", "str", "]", "=", "None", ",", "context_window_size", ":", "int", "=", "70", ",", "use_gpu", ":", "int", "=", "0", ",", "top_k_per_candidate", ":", "int", "=", "4", ",", "return_no_answers", ":", "bool", "=", "True", ",", "max_seq_len", ":", "int", "=", "256", ",", "doc_stride", ":", "int", "=", "128", ")", ":", "self", ".", "model", "=", "pipeline", "(", "'question-answering'", ",", "model", "=", "model_name_or_path", ",", "tokenizer", "=", "tokenizer", ",", "device", "=", "use_gpu", ",", "revision", "=", "model_version", ")", "self", ".", "context_window_size", "=", "context_window_size", "self", ".", "top_k_per_candidate", "=", "top_k_per_candidate", "self", ".", "return_no_answers", "=", "return_no_answers", "self", ".", "max_seq_len", "=", "max_seq_len", "self", ".", "doc_stride", "=", "doc_stride" ]
[ 16, 4 ]
[ 62, 36 ]
python
en
['en', 'error', 'th']
False
TransformersReader.predict
(self, query: str, documents: List[Document], top_k: Optional[int] = None)
Use loaded QA model to find answers for a query in the supplied list of Document. Returns dictionaries containing answers sorted by (desc.) probability. Example: ```python |{ | 'query': 'Who is the father of Arya Stark?', | 'answers':[ | {'answer': 'Eddard,', | 'context': " She travels with her father, Eddard, to King's Landing when he is ", | 'offset_answer_start': 147, | 'offset_answer_end': 154, | 'probability': 0.9787139466668613, | 'score': None, | 'document_id': '1337' | },... | ] |} ``` :param query: Query string :param documents: List of Document in which to search for the answer :param top_k: The maximum number of answers to return :return: Dict containing query and answers
Use loaded QA model to find answers for a query in the supplied list of Document.
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None): """ Use loaded QA model to find answers for a query in the supplied list of Document. Returns dictionaries containing answers sorted by (desc.) probability. Example: ```python |{ | 'query': 'Who is the father of Arya Stark?', | 'answers':[ | {'answer': 'Eddard,', | 'context': " She travels with her father, Eddard, to King's Landing when he is ", | 'offset_answer_start': 147, | 'offset_answer_end': 154, | 'probability': 0.9787139466668613, | 'score': None, | 'document_id': '1337' | },... | ] |} ``` :param query: Query string :param documents: List of Document in which to search for the answer :param top_k: The maximum number of answers to return :return: Dict containing query and answers """ # get top-answers for each candidate passage answers = [] no_ans_gaps = [] best_overall_score = 0 for doc in documents: transformers_query = {"context": doc.text, "question": query} predictions = self.model(transformers_query, topk=self.top_k_per_candidate, handle_impossible_answer=self.return_no_answers, max_seq_len=self.max_seq_len, doc_stride=self.doc_stride) # for single preds (e.g. via top_k=1) transformers returns a dict instead of a list if type(predictions) == dict: predictions = [predictions] # assemble and format all answers best_doc_score = 0 # because we cannot ensure a "no answer" prediction coming back from transformers we initialize it here with 0 no_ans_doc_score = 0 # TODO add no answer bias on haystack side after getting "no answer" scores from transformers for pred in predictions: if pred["answer"]: if pred["score"] > best_doc_score: best_doc_score = pred["score"] context_start = max(0, pred["start"] - self.context_window_size) context_end = min(len(doc.text), pred["end"] + self.context_window_size) answers.append({ "answer": pred["answer"], "context": doc.text[context_start:context_end], "offset_start": pred["start"], "offset_end": pred["end"], "probability": pred["score"], "score": None, "document_id": doc.id, "meta": doc.meta }) else: no_ans_doc_score = pred["score"] if best_doc_score > best_overall_score: best_overall_score = best_doc_score no_ans_gaps.append(no_ans_doc_score - best_doc_score) # Calculate the score for predicting "no answer", relative to our best positive answer score no_ans_prediction, max_no_ans_gap = self._calc_no_answer(no_ans_gaps, best_overall_score) if self.return_no_answers: answers.append(no_ans_prediction) # sort answers by their `probability` and select top-k answers = sorted( answers, key=lambda k: k["probability"], reverse=True ) answers = answers[:top_k] results = {"query": query, "answers": answers} return results
[ "def", "predict", "(", "self", ",", "query", ":", "str", ",", "documents", ":", "List", "[", "Document", "]", ",", "top_k", ":", "Optional", "[", "int", "]", "=", "None", ")", ":", "# get top-answers for each candidate passage", "answers", "=", "[", "]", "no_ans_gaps", "=", "[", "]", "best_overall_score", "=", "0", "for", "doc", "in", "documents", ":", "transformers_query", "=", "{", "\"context\"", ":", "doc", ".", "text", ",", "\"question\"", ":", "query", "}", "predictions", "=", "self", ".", "model", "(", "transformers_query", ",", "topk", "=", "self", ".", "top_k_per_candidate", ",", "handle_impossible_answer", "=", "self", ".", "return_no_answers", ",", "max_seq_len", "=", "self", ".", "max_seq_len", ",", "doc_stride", "=", "self", ".", "doc_stride", ")", "# for single preds (e.g. via top_k=1) transformers returns a dict instead of a list", "if", "type", "(", "predictions", ")", "==", "dict", ":", "predictions", "=", "[", "predictions", "]", "# assemble and format all answers", "best_doc_score", "=", "0", "# because we cannot ensure a \"no answer\" prediction coming back from transformers we initialize it here with 0", "no_ans_doc_score", "=", "0", "# TODO add no answer bias on haystack side after getting \"no answer\" scores from transformers", "for", "pred", "in", "predictions", ":", "if", "pred", "[", "\"answer\"", "]", ":", "if", "pred", "[", "\"score\"", "]", ">", "best_doc_score", ":", "best_doc_score", "=", "pred", "[", "\"score\"", "]", "context_start", "=", "max", "(", "0", ",", "pred", "[", "\"start\"", "]", "-", "self", ".", "context_window_size", ")", "context_end", "=", "min", "(", "len", "(", "doc", ".", "text", ")", ",", "pred", "[", "\"end\"", "]", "+", "self", ".", "context_window_size", ")", "answers", ".", "append", "(", "{", "\"answer\"", ":", "pred", "[", "\"answer\"", "]", ",", "\"context\"", ":", "doc", ".", "text", "[", "context_start", ":", "context_end", "]", ",", "\"offset_start\"", ":", "pred", "[", "\"start\"", "]", ",", "\"offset_end\"", ":", "pred", "[", "\"end\"", "]", ",", "\"probability\"", ":", "pred", "[", "\"score\"", "]", ",", "\"score\"", ":", "None", ",", "\"document_id\"", ":", "doc", ".", "id", ",", "\"meta\"", ":", "doc", ".", "meta", "}", ")", "else", ":", "no_ans_doc_score", "=", "pred", "[", "\"score\"", "]", "if", "best_doc_score", ">", "best_overall_score", ":", "best_overall_score", "=", "best_doc_score", "no_ans_gaps", ".", "append", "(", "no_ans_doc_score", "-", "best_doc_score", ")", "# Calculate the score for predicting \"no answer\", relative to our best positive answer score", "no_ans_prediction", ",", "max_no_ans_gap", "=", "self", ".", "_calc_no_answer", "(", "no_ans_gaps", ",", "best_overall_score", ")", "if", "self", ".", "return_no_answers", ":", "answers", ".", "append", "(", "no_ans_prediction", ")", "# sort answers by their `probability` and select top-k", "answers", "=", "sorted", "(", "answers", ",", "key", "=", "lambda", "k", ":", "k", "[", "\"probability\"", "]", ",", "reverse", "=", "True", ")", "answers", "=", "answers", "[", ":", "top_k", "]", "results", "=", "{", "\"query\"", ":", "query", ",", "\"answers\"", ":", "answers", "}", "return", "results" ]
[ 66, 4 ]
[ 153, 22 ]
python
en
['en', 'error', 'th']
False
IRAmbassadorTLS.__init__
(self, ir: 'IR', aconf: Config, rkey: str="ir.tlsmodule", kind: str="IRTLSModule", name: str="ir.tlsmodule", enabled: bool=True, **kwargs)
Initialize an IRAmbassadorTLS from the raw fields of its Resource.
Initialize an IRAmbassadorTLS from the raw fields of its Resource.
def __init__(self, ir: 'IR', aconf: Config, rkey: str="ir.tlsmodule", kind: str="IRTLSModule", name: str="ir.tlsmodule", enabled: bool=True, **kwargs) -> None: """ Initialize an IRAmbassadorTLS from the raw fields of its Resource. """ ir.logger.debug("IRAmbassadorTLS __init__ (%s %s %s)" % (kind, name, kwargs)) super().__init__( ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name, enabled=enabled, **kwargs )
[ "def", "__init__", "(", "self", ",", "ir", ":", "'IR'", ",", "aconf", ":", "Config", ",", "rkey", ":", "str", "=", "\"ir.tlsmodule\"", ",", "kind", ":", "str", "=", "\"IRTLSModule\"", ",", "name", ":", "str", "=", "\"ir.tlsmodule\"", ",", "enabled", ":", "bool", "=", "True", ",", "*", "*", "kwargs", ")", "->", "None", ":", "ir", ".", "logger", ".", "debug", "(", "\"IRAmbassadorTLS __init__ (%s %s %s)\"", "%", "(", "kind", ",", "name", ",", "kwargs", ")", ")", "super", "(", ")", ".", "__init__", "(", "ir", "=", "ir", ",", "aconf", "=", "aconf", ",", "rkey", "=", "rkey", ",", "kind", "=", "kind", ",", "name", "=", "name", ",", "enabled", "=", "enabled", ",", "*", "*", "kwargs", ")" ]
[ 40, 4 ]
[ 57, 9 ]
python
en
['en', 'error', 'th']
False
critical_suite_with_citations
()
This hand made fixture has a wide range of expectations, and has a mix of metadata including an BasicSuiteBuilderProfiler entry, and citations.
This hand made fixture has a wide range of expectations, and has a mix of metadata including an BasicSuiteBuilderProfiler entry, and citations.
def critical_suite_with_citations() -> ExpectationSuite: """ This hand made fixture has a wide range of expectations, and has a mix of metadata including an BasicSuiteBuilderProfiler entry, and citations. """ schema: ExpectationSuiteSchema = ExpectationSuiteSchema() critical_suite: dict = { "expectation_suite_name": "critical", "meta": { "great_expectations_version": "0.13.15+7252.g32fa98e2a.dirty", "columns": { "npi": {"description": ""}, "nppes_provider_last_org_name": {"description": ""}, "nppes_provider_first_name": {"description": ""}, "nppes_provider_mi": {"description": ""}, "nppes_credentials": {"description": ""}, "nppes_provider_gender": {"description": ""}, "nppes_entity_code": {"description": ""}, "nppes_provider_street1": {"description": ""}, "nppes_provider_street2": {"description": ""}, "nppes_provider_city": {"description": ""}, }, "citations": [ { "citation_date": "2020-02-28T17:34:31.307271", "batch_request": { "datasource_name": "files_datasource", "data_connector_name": "files_data_connector", "data_asset_name": "10k", }, "batch_markers": { "ge_load_time": "20200229T013430.655026Z", "pandas_data_fingerprint": "f6037d92eb4c01f976513bc0aec2420d", }, "batch_parameters": None, "comment": "BasicSuiteBuilderProfiler added a citation based on the current batch.", } ], "notes": { "format": "markdown", "content": [ "#### This is an _example_ suite\n\n- This suite was made by quickly glancing at 1000 rows of your data.\n- This is **not a production suite**. It is meant to show examples of expectations.\n- Because this suite was auto-generated using a very basic profiler that does not know your data like you do, many of the expectations may not be meaningful.\n" ], }, "BasicSuiteBuilderProfiler": { "created_by": "BasicSuiteBuilderProfiler", "created_at": 1582838223.843476, "batch_request": { "datasource_name": "files_datasource", "data_connector_name": "files_data_connector", "data_asset_name": "10k", }, }, }, "expectations": [ { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "npi"}, "meta": { "question": True, "Notes": "There are empty strings that should probably be nulls", "BasicSuiteBuilderProfiler": {"confidence": "very low"}, }, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "provider_type"}, }, ], "data_asset_type": "Dataset", } return schema.loads(json.dumps(critical_suite))
[ "def", "critical_suite_with_citations", "(", ")", "->", "ExpectationSuite", ":", "schema", ":", "ExpectationSuiteSchema", "=", "ExpectationSuiteSchema", "(", ")", "critical_suite", ":", "dict", "=", "{", "\"expectation_suite_name\"", ":", "\"critical\"", ",", "\"meta\"", ":", "{", "\"great_expectations_version\"", ":", "\"0.13.15+7252.g32fa98e2a.dirty\"", ",", "\"columns\"", ":", "{", "\"npi\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_provider_last_org_name\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_provider_first_name\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_provider_mi\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_credentials\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_provider_gender\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_entity_code\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_provider_street1\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_provider_street2\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "\"nppes_provider_city\"", ":", "{", "\"description\"", ":", "\"\"", "}", ",", "}", ",", "\"citations\"", ":", "[", "{", "\"citation_date\"", ":", "\"2020-02-28T17:34:31.307271\"", ",", "\"batch_request\"", ":", "{", "\"datasource_name\"", ":", "\"files_datasource\"", ",", "\"data_connector_name\"", ":", "\"files_data_connector\"", ",", "\"data_asset_name\"", ":", "\"10k\"", ",", "}", ",", "\"batch_markers\"", ":", "{", "\"ge_load_time\"", ":", "\"20200229T013430.655026Z\"", ",", "\"pandas_data_fingerprint\"", ":", "\"f6037d92eb4c01f976513bc0aec2420d\"", ",", "}", ",", "\"batch_parameters\"", ":", "None", ",", "\"comment\"", ":", "\"BasicSuiteBuilderProfiler added a citation based on the current batch.\"", ",", "}", "]", ",", "\"notes\"", ":", "{", "\"format\"", ":", "\"markdown\"", ",", "\"content\"", ":", "[", "\"#### This is an _example_ suite\\n\\n- This suite was made by quickly glancing at 1000 rows of your data.\\n- This is **not a production suite**. It is meant to show examples of expectations.\\n- Because this suite was auto-generated using a very basic profiler that does not know your data like you do, many of the expectations may not be meaningful.\\n\"", "]", ",", "}", ",", "\"BasicSuiteBuilderProfiler\"", ":", "{", "\"created_by\"", ":", "\"BasicSuiteBuilderProfiler\"", ",", "\"created_at\"", ":", "1582838223.843476", ",", "\"batch_request\"", ":", "{", "\"datasource_name\"", ":", "\"files_datasource\"", ",", "\"data_connector_name\"", ":", "\"files_data_connector\"", ",", "\"data_asset_name\"", ":", "\"10k\"", ",", "}", ",", "}", ",", "}", ",", "\"expectations\"", ":", "[", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"npi\"", "}", ",", "\"meta\"", ":", "{", "\"question\"", ":", "True", ",", "\"Notes\"", ":", "\"There are empty strings that should probably be nulls\"", ",", "\"BasicSuiteBuilderProfiler\"", ":", "{", "\"confidence\"", ":", "\"very low\"", "}", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"provider_type\"", "}", ",", "}", ",", "]", ",", "\"data_asset_type\"", ":", "\"Dataset\"", ",", "}", "return", "schema", ".", "loads", "(", "json", ".", "dumps", "(", "critical_suite", ")", ")" ]
[ 27, 0 ]
[ 98, 51 ]
python
en
['en', 'error', 'th']
False
suite_with_multiple_citations
()
A handmade suite with multiple citations each with different batch_request. The most recent citation does not have batch_request
A handmade suite with multiple citations each with different batch_request.
def suite_with_multiple_citations() -> ExpectationSuite: """ A handmade suite with multiple citations each with different batch_request. The most recent citation does not have batch_request """ schema: ExpectationSuiteSchema = ExpectationSuiteSchema() critical_suite: dict = { "expectation_suite_name": "critical", "meta": { "great_expectations_version": "0.13.15+7252.g32fa98e2a.dirty", "citations": [ { "citation_date": "2000-01-01T00:00:01.000001", "batch_request": { "datasource_name": "files_datasource", "data_connector_name": "files_data_connector", "data_asset_name": "1k", }, }, # This citation is the most recent and has no batch_request { "citation_date": "2020-01-01T00:00:01.000001", }, { "citation_date": "1999-01-01T00:00:01.000001", "batch_request": { "datasource_name": "files_datasource", "data_connector_name": "files_data_connector", "data_asset_name": "2k", }, }, ], }, "expectations": [ { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "npi"}, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "provider_type"}, }, ], "data_asset_type": "Dataset", } return schema.loads(json.dumps(critical_suite))
[ "def", "suite_with_multiple_citations", "(", ")", "->", "ExpectationSuite", ":", "schema", ":", "ExpectationSuiteSchema", "=", "ExpectationSuiteSchema", "(", ")", "critical_suite", ":", "dict", "=", "{", "\"expectation_suite_name\"", ":", "\"critical\"", ",", "\"meta\"", ":", "{", "\"great_expectations_version\"", ":", "\"0.13.15+7252.g32fa98e2a.dirty\"", ",", "\"citations\"", ":", "[", "{", "\"citation_date\"", ":", "\"2000-01-01T00:00:01.000001\"", ",", "\"batch_request\"", ":", "{", "\"datasource_name\"", ":", "\"files_datasource\"", ",", "\"data_connector_name\"", ":", "\"files_data_connector\"", ",", "\"data_asset_name\"", ":", "\"1k\"", ",", "}", ",", "}", ",", "# This citation is the most recent and has no batch_request", "{", "\"citation_date\"", ":", "\"2020-01-01T00:00:01.000001\"", ",", "}", ",", "{", "\"citation_date\"", ":", "\"1999-01-01T00:00:01.000001\"", ",", "\"batch_request\"", ":", "{", "\"datasource_name\"", ":", "\"files_datasource\"", ",", "\"data_connector_name\"", ":", "\"files_data_connector\"", ",", "\"data_asset_name\"", ":", "\"2k\"", ",", "}", ",", "}", ",", "]", ",", "}", ",", "\"expectations\"", ":", "[", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"npi\"", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"provider_type\"", "}", ",", "}", ",", "]", ",", "\"data_asset_type\"", ":", "\"Dataset\"", ",", "}", "return", "schema", ".", "loads", "(", "json", ".", "dumps", "(", "critical_suite", ")", ")" ]
[ 102, 0 ]
[ 148, 51 ]
python
en
['en', 'error', 'th']
False
warning_suite
()
This hand made fixture has a wide range of expectations, and has a mix of metadata including BasicSuiteBuilderProfiler entries.
This hand made fixture has a wide range of expectations, and has a mix of metadata including BasicSuiteBuilderProfiler entries.
def warning_suite() -> ExpectationSuite: """ This hand made fixture has a wide range of expectations, and has a mix of metadata including BasicSuiteBuilderProfiler entries. """ schema: ExpectationSuiteSchema = ExpectationSuiteSchema() warning_suite: dict = { "expectation_suite_name": "warning", "meta": { "great_expectations_version": "0.13.15+7252.g32fa98e2a.dirty", "citations": [ { "citation_date": "2020-02-28T17:34:31.307271", "batch_request": { "datasource_name": "files_datasource", "data_connector_name": "files_data_connector", "data_asset_name": "10k", }, "batch_markers": { "ge_load_time": "20200229T013430.655026Z", "pandas_data_fingerprint": "f6037d92eb4c01f976513bc0aec2420d", }, "batch_parameters": None, "comment": "BasicSuiteBuilderProfiler added a citation based on the current batch.", } ], }, "expectations": [ { "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"min_value": 800000, "max_value": 1200000}, }, { "expectation_type": "expect_table_column_count_to_equal", "kwargs": {"value": 71}, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "npi"}, "meta": {"BasicSuiteBuilderProfiler": {"confidence": "very low"}}, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "provider_type"}, "meta": {"BasicSuiteBuilderProfiler": {"confidence": "very low"}}, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "nppes_provider_last_org_name"}, }, { "expectation_type": "expect_column_values_to_be_in_set", "kwargs": { "column": "nppes_provider_gender", "value_set": ["M", "F", ""], }, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "nppes_entity_code"}, }, { "expectation_type": "expect_column_values_to_be_in_set", "kwargs": {"column": "nppes_entity_code", "value_set": ["I", "O"]}, }, { "expectation_type": "expect_column_kl_divergence_to_be_less_than", "kwargs": { "column": "nppes_entity_code", "partition_object": { "values": ["I", "O"], "weights": [0.9431769750233306, 0.056823024976669335], }, "threshold": 0.1, }, }, { "expectation_type": "expect_column_values_to_be_in_set", "kwargs": { "column": "nppes_provider_state", "value_set": [ "AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY", "DC", "PR", "AE", "VI", ], "mostly": 0.999, }, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "medicare_participation_indicator"}, }, { "expectation_type": "expect_column_values_to_be_in_set", "kwargs": { "column": "medicare_participation_indicator", "value_set": ["Y", "N"], }, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "number_of_hcpcs"}, }, { "expectation_type": "expect_column_values_to_be_between", "kwargs": { "column": "number_of_hcpcs", "min_value": 0, "max_value": 500, "mostly": 0.999, }, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "total_unique_benes"}, }, { "expectation_type": "expect_column_values_to_be_between", "kwargs": { "column": "total_unique_benes", "min_value": 0, "max_value": 2000, "mostly": 0.95, }, }, { "expectation_type": "expect_column_values_to_be_null", "kwargs": {"column": "med_suppress_indicator", "mostly": 0.85}, }, { "expectation_type": "expect_column_values_to_be_in_set", "kwargs": {"column": "med_suppress_indicator", "value_set": ["#", "*"]}, }, { "expectation_type": "expect_column_values_to_be_between", "kwargs": { "column": "beneficiary_average_age", "min_value": 40, "max_value": 90, "mostly": 0.995, }, }, { "expectation_type": "expect_column_kl_divergence_to_be_less_than", "kwargs": { "column": "beneficiary_average_age", "partition_object": { "bins": [8, 16.5, 25, 33.5, 42, 50.5, 59, 67.5, 76, 84.5, 93], "weights": [ 0.00025259576594384474, 0.00013318685840675451, 0.0009653750909344757, 0.0012363414580378728, 0.01081660996274442, 0.030813927854975127, 0.13495227317818748, 0.6919590041664524, 0.1244213260634741, 0.004449359600843578, ], }, "threshold": 0.9, }, }, { "expectation_type": "expect_column_values_to_be_between", "kwargs": { "column": "total_submitted_chrg_amt", "min_value": 2000, "max_value": 5000000, "mostly": 0.98, }, }, { "expectation_type": "expect_column_values_to_not_be_null", "kwargs": {"column": "nppes_provider_first_name", "mostly": 0.9}, }, { "expectation_type": "expect_column_values_to_match_regex", "kwargs": { "column": "nppes_provider_zip", "regex": "^\\d*$", "mostly": 0.999, }, }, ], "data_asset_type": "Dataset", } return schema.loads(json.dumps(warning_suite))
[ "def", "warning_suite", "(", ")", "->", "ExpectationSuite", ":", "schema", ":", "ExpectationSuiteSchema", "=", "ExpectationSuiteSchema", "(", ")", "warning_suite", ":", "dict", "=", "{", "\"expectation_suite_name\"", ":", "\"warning\"", ",", "\"meta\"", ":", "{", "\"great_expectations_version\"", ":", "\"0.13.15+7252.g32fa98e2a.dirty\"", ",", "\"citations\"", ":", "[", "{", "\"citation_date\"", ":", "\"2020-02-28T17:34:31.307271\"", ",", "\"batch_request\"", ":", "{", "\"datasource_name\"", ":", "\"files_datasource\"", ",", "\"data_connector_name\"", ":", "\"files_data_connector\"", ",", "\"data_asset_name\"", ":", "\"10k\"", ",", "}", ",", "\"batch_markers\"", ":", "{", "\"ge_load_time\"", ":", "\"20200229T013430.655026Z\"", ",", "\"pandas_data_fingerprint\"", ":", "\"f6037d92eb4c01f976513bc0aec2420d\"", ",", "}", ",", "\"batch_parameters\"", ":", "None", ",", "\"comment\"", ":", "\"BasicSuiteBuilderProfiler added a citation based on the current batch.\"", ",", "}", "]", ",", "}", ",", "\"expectations\"", ":", "[", "{", "\"expectation_type\"", ":", "\"expect_table_row_count_to_be_between\"", ",", "\"kwargs\"", ":", "{", "\"min_value\"", ":", "800000", ",", "\"max_value\"", ":", "1200000", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_table_column_count_to_equal\"", ",", "\"kwargs\"", ":", "{", "\"value\"", ":", "71", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"npi\"", "}", ",", "\"meta\"", ":", "{", "\"BasicSuiteBuilderProfiler\"", ":", "{", "\"confidence\"", ":", "\"very low\"", "}", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"provider_type\"", "}", ",", "\"meta\"", ":", "{", "\"BasicSuiteBuilderProfiler\"", ":", "{", "\"confidence\"", ":", "\"very low\"", "}", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"nppes_provider_last_org_name\"", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_in_set\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"nppes_provider_gender\"", ",", "\"value_set\"", ":", "[", "\"M\"", ",", "\"F\"", ",", "\"\"", "]", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"nppes_entity_code\"", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_in_set\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"nppes_entity_code\"", ",", "\"value_set\"", ":", "[", "\"I\"", ",", "\"O\"", "]", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_kl_divergence_to_be_less_than\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"nppes_entity_code\"", ",", "\"partition_object\"", ":", "{", "\"values\"", ":", "[", "\"I\"", ",", "\"O\"", "]", ",", "\"weights\"", ":", "[", "0.9431769750233306", ",", "0.056823024976669335", "]", ",", "}", ",", "\"threshold\"", ":", "0.1", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_in_set\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"nppes_provider_state\"", ",", "\"value_set\"", ":", "[", "\"AL\"", ",", "\"AK\"", ",", "\"AZ\"", ",", "\"AR\"", ",", "\"CA\"", ",", "\"CO\"", ",", "\"CT\"", ",", "\"DE\"", ",", "\"FL\"", ",", "\"GA\"", ",", "\"HI\"", ",", "\"ID\"", ",", "\"IL\"", ",", "\"IN\"", ",", "\"IA\"", ",", "\"KS\"", ",", "\"KY\"", ",", "\"LA\"", ",", "\"ME\"", ",", "\"MD\"", ",", "\"MA\"", ",", "\"MI\"", ",", "\"MN\"", ",", "\"MS\"", ",", "\"MO\"", ",", "\"MT\"", ",", "\"NE\"", ",", "\"NV\"", ",", "\"NH\"", ",", "\"NJ\"", ",", "\"NM\"", ",", "\"NY\"", ",", "\"NC\"", ",", "\"ND\"", ",", "\"OH\"", ",", "\"OK\"", ",", "\"OR\"", ",", "\"PA\"", ",", "\"RI\"", ",", "\"SC\"", ",", "\"SD\"", ",", "\"TN\"", ",", "\"TX\"", ",", "\"UT\"", ",", "\"VT\"", ",", "\"VA\"", ",", "\"WA\"", ",", "\"WV\"", ",", "\"WI\"", ",", "\"WY\"", ",", "\"DC\"", ",", "\"PR\"", ",", "\"AE\"", ",", "\"VI\"", ",", "]", ",", "\"mostly\"", ":", "0.999", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"medicare_participation_indicator\"", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_in_set\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"medicare_participation_indicator\"", ",", "\"value_set\"", ":", "[", "\"Y\"", ",", "\"N\"", "]", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"number_of_hcpcs\"", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_between\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"number_of_hcpcs\"", ",", "\"min_value\"", ":", "0", ",", "\"max_value\"", ":", "500", ",", "\"mostly\"", ":", "0.999", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"total_unique_benes\"", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_between\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"total_unique_benes\"", ",", "\"min_value\"", ":", "0", ",", "\"max_value\"", ":", "2000", ",", "\"mostly\"", ":", "0.95", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"med_suppress_indicator\"", ",", "\"mostly\"", ":", "0.85", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_in_set\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"med_suppress_indicator\"", ",", "\"value_set\"", ":", "[", "\"#\"", ",", "\"*\"", "]", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_between\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"beneficiary_average_age\"", ",", "\"min_value\"", ":", "40", ",", "\"max_value\"", ":", "90", ",", "\"mostly\"", ":", "0.995", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_kl_divergence_to_be_less_than\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"beneficiary_average_age\"", ",", "\"partition_object\"", ":", "{", "\"bins\"", ":", "[", "8", ",", "16.5", ",", "25", ",", "33.5", ",", "42", ",", "50.5", ",", "59", ",", "67.5", ",", "76", ",", "84.5", ",", "93", "]", ",", "\"weights\"", ":", "[", "0.00025259576594384474", ",", "0.00013318685840675451", ",", "0.0009653750909344757", ",", "0.0012363414580378728", ",", "0.01081660996274442", ",", "0.030813927854975127", ",", "0.13495227317818748", ",", "0.6919590041664524", ",", "0.1244213260634741", ",", "0.004449359600843578", ",", "]", ",", "}", ",", "\"threshold\"", ":", "0.9", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_be_between\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"total_submitted_chrg_amt\"", ",", "\"min_value\"", ":", "2000", ",", "\"max_value\"", ":", "5000000", ",", "\"mostly\"", ":", "0.98", ",", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_not_be_null\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"nppes_provider_first_name\"", ",", "\"mostly\"", ":", "0.9", "}", ",", "}", ",", "{", "\"expectation_type\"", ":", "\"expect_column_values_to_match_regex\"", ",", "\"kwargs\"", ":", "{", "\"column\"", ":", "\"nppes_provider_zip\"", ",", "\"regex\"", ":", "\"^\\\\d*$\"", ",", "\"mostly\"", ":", "0.999", ",", "}", ",", "}", ",", "]", ",", "\"data_asset_type\"", ":", "\"Dataset\"", ",", "}", "return", "schema", ".", "loads", "(", "json", ".", "dumps", "(", "warning_suite", ")", ")" ]
[ 152, 0 ]
[ 391, 50 ]
python
en
['en', 'error', 'th']
False
test_notebook_execution_with_pandas_backend
( titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, )
To set this test up we: - create a suite - add a few expectations (both table and column level) - verify that no validations have happened - create the suite edit notebook by hijacking the private cli method We then: - execute that notebook (Note this will raise various errors like CellExecutionError if any cell in the notebook fails - create a new context from disk - verify that a validation has been run with our expectation suite
To set this test up we:
def test_notebook_execution_with_pandas_backend( titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ To set this test up we: - create a suite - add a few expectations (both table and column level) - verify that no validations have happened - create the suite edit notebook by hijacking the private cli method We then: - execute that notebook (Note this will raise various errors like CellExecutionError if any cell in the notebook fails - create a new context from disk - verify that a validation has been run with our expectation suite """ # Since we'll run the notebook, we use a context with no data docs to avoid the renderer's default # behavior of building and opening docs, which is not part of this test. context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled root_dir: str = context.root_directory uncommitted_dir: str = os.path.join(root_dir, "uncommitted") expectation_suite_name: str = "warning" context.create_expectation_suite(expectation_suite_name=expectation_suite_name) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1912", } validator: Validator = context.get_validator( batch_request=BatchRequest(**batch_request), expectation_suite_name=expectation_suite_name, ) validator.expect_table_column_count_to_equal(1) validator.expect_table_row_count_to_equal(1313) validator.expect_column_values_to_be_in_set("Sex", ["female", "male"]) validator.save_expectation_suite(discard_failed_expectations=False) # Sanity check test setup original_suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert len(original_suite.expectations) == 3 assert context.list_expectation_suite_names() == [expectation_suite_name] assert context.list_datasources() == [ { "name": "my_datasource", "class_name": "Datasource", "module_name": "great_expectations.datasource", "execution_engine": { "class_name": "PandasExecutionEngine", "module_name": "great_expectations.execution_engine", }, "data_connectors": { "my_basic_data_connector": { "module_name": "great_expectations.datasource.data_connector", "base_directory": f"{root_dir}/../data/titanic", "default_regex": { "pattern": "(.*)\\.csv", "group_names": ["data_asset_name"], }, "class_name": "InferredAssetFilesystemDataConnector", }, "my_special_data_connector": { "glob_directive": "*.csv", "assets": { "users": { "pattern": "(.+)_(\\d+)_(\\d+)\\.csv", "group_names": ["name", "timestamp", "size"], "class_name": "Asset", "base_directory": f"{root_dir}/../data/titanic", "module_name": "great_expectations.datasource.data_connector.asset", } }, "module_name": "great_expectations.datasource.data_connector", "base_directory": f"{root_dir}/../data/titanic", "default_regex": {"pattern": "(.+)\\.csv", "group_names": ["name"]}, "class_name": "ConfiguredAssetFilesystemDataConnector", }, "my_other_data_connector": { "glob_directive": "*.csv", "assets": { "users": { "class_name": "Asset", "module_name": "great_expectations.datasource.data_connector.asset", } }, "module_name": "great_expectations.datasource.data_connector", "base_directory": f"{root_dir}/../data/titanic", "default_regex": {"pattern": "(.+)\\.csv", "group_names": ["name"]}, "class_name": "ConfiguredAssetFilesystemDataConnector", }, "my_runtime_data_connector": { "module_name": "great_expectations.datasource.data_connector", "batch_identifiers": ["pipeline_stage_name", "airflow_run_id"], "class_name": "RuntimeDataConnector", }, }, }, { "name": "my_additional_datasource", "class_name": "Datasource", "module_name": "great_expectations.datasource", "execution_engine": { "module_name": "great_expectations.execution_engine", "class_name": "PandasExecutionEngine", }, "data_connectors": { "my_additional_data_connector": { "module_name": "great_expectations.datasource.data_connector", "default_regex": { "pattern": "(.*)\\.csv", "group_names": ["data_asset_name"], }, "base_directory": f"{root_dir}/../data/titanic", "class_name": "InferredAssetFilesystemDataConnector", } }, }, ] assert context.get_validation_result(expectation_suite_name="warning") == {} # Create notebook # do not want to actually send usage_message, since the function call is not the result of actual usage _suite_edit_workflow( context=context, expectation_suite_name=expectation_suite_name, profile=False, usage_event="test_notebook_execution", interactive=False, no_jupyter=True, create_if_not_exist=False, datasource_name=None, batch_request=batch_request, additional_batch_request_args=None, suppress_usage_message=True, ) edit_notebook_path: str = os.path.join(uncommitted_dir, "edit_warning.ipynb") assert os.path.isfile(edit_notebook_path) run_notebook( notebook_path=edit_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) # Assertions about output context = DataContext(context_root_dir=root_dir) obs_validation_result: ExpectationSuiteValidationResult = ( context.get_validation_result(expectation_suite_name="warning") ) assert obs_validation_result.statistics == { "evaluated_expectations": 3, "successful_expectations": 2, "unsuccessful_expectations": 1, "success_percent": 66.66666666666666, } suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) suite["meta"].pop("citations", None) assert suite == original_suite
[ "def", "test_notebook_execution_with_pandas_backend", "(", "titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", ",", ")", ":", "# Since we'll run the notebook, we use a context with no data docs to avoid the renderer's default", "# behavior of building and opening docs, which is not part of this test.", "context", ":", "DataContext", "=", "titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", "root_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"warning\"", "context", ".", "create_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_datasource\"", ",", "\"data_connector_name\"", ":", "\"my_basic_data_connector\"", ",", "\"data_asset_name\"", ":", "\"Titanic_1912\"", ",", "}", "validator", ":", "Validator", "=", "context", ".", "get_validator", "(", "batch_request", "=", "BatchRequest", "(", "*", "*", "batch_request", ")", ",", "expectation_suite_name", "=", "expectation_suite_name", ",", ")", "validator", ".", "expect_table_column_count_to_equal", "(", "1", ")", "validator", ".", "expect_table_row_count_to_equal", "(", "1313", ")", "validator", ".", "expect_column_values_to_be_in_set", "(", "\"Sex\"", ",", "[", "\"female\"", ",", "\"male\"", "]", ")", "validator", ".", "save_expectation_suite", "(", "discard_failed_expectations", "=", "False", ")", "# Sanity check test setup", "original_suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "len", "(", "original_suite", ".", "expectations", ")", "==", "3", "assert", "context", ".", "list_expectation_suite_names", "(", ")", "==", "[", "expectation_suite_name", "]", "assert", "context", ".", "list_datasources", "(", ")", "==", "[", "{", "\"name\"", ":", "\"my_datasource\"", ",", "\"class_name\"", ":", "\"Datasource\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource\"", ",", "\"execution_engine\"", ":", "{", "\"class_name\"", ":", "\"PandasExecutionEngine\"", ",", "\"module_name\"", ":", "\"great_expectations.execution_engine\"", ",", "}", ",", "\"data_connectors\"", ":", "{", "\"my_basic_data_connector\"", ":", "{", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"default_regex\"", ":", "{", "\"pattern\"", ":", "\"(.*)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"data_asset_name\"", "]", ",", "}", ",", "\"class_name\"", ":", "\"InferredAssetFilesystemDataConnector\"", ",", "}", ",", "\"my_special_data_connector\"", ":", "{", "\"glob_directive\"", ":", "\"*.csv\"", ",", "\"assets\"", ":", "{", "\"users\"", ":", "{", "\"pattern\"", ":", "\"(.+)_(\\\\d+)_(\\\\d+)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"name\"", ",", "\"timestamp\"", ",", "\"size\"", "]", ",", "\"class_name\"", ":", "\"Asset\"", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector.asset\"", ",", "}", "}", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"default_regex\"", ":", "{", "\"pattern\"", ":", "\"(.+)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"name\"", "]", "}", ",", "\"class_name\"", ":", "\"ConfiguredAssetFilesystemDataConnector\"", ",", "}", ",", "\"my_other_data_connector\"", ":", "{", "\"glob_directive\"", ":", "\"*.csv\"", ",", "\"assets\"", ":", "{", "\"users\"", ":", "{", "\"class_name\"", ":", "\"Asset\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector.asset\"", ",", "}", "}", ",", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"default_regex\"", ":", "{", "\"pattern\"", ":", "\"(.+)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"name\"", "]", "}", ",", "\"class_name\"", ":", "\"ConfiguredAssetFilesystemDataConnector\"", ",", "}", ",", "\"my_runtime_data_connector\"", ":", "{", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"batch_identifiers\"", ":", "[", "\"pipeline_stage_name\"", ",", "\"airflow_run_id\"", "]", ",", "\"class_name\"", ":", "\"RuntimeDataConnector\"", ",", "}", ",", "}", ",", "}", ",", "{", "\"name\"", ":", "\"my_additional_datasource\"", ",", "\"class_name\"", ":", "\"Datasource\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource\"", ",", "\"execution_engine\"", ":", "{", "\"module_name\"", ":", "\"great_expectations.execution_engine\"", ",", "\"class_name\"", ":", "\"PandasExecutionEngine\"", ",", "}", ",", "\"data_connectors\"", ":", "{", "\"my_additional_data_connector\"", ":", "{", "\"module_name\"", ":", "\"great_expectations.datasource.data_connector\"", ",", "\"default_regex\"", ":", "{", "\"pattern\"", ":", "\"(.*)\\\\.csv\"", ",", "\"group_names\"", ":", "[", "\"data_asset_name\"", "]", ",", "}", ",", "\"base_directory\"", ":", "f\"{root_dir}/../data/titanic\"", ",", "\"class_name\"", ":", "\"InferredAssetFilesystemDataConnector\"", ",", "}", "}", ",", "}", ",", "]", "assert", "context", ".", "get_validation_result", "(", "expectation_suite_name", "=", "\"warning\"", ")", "==", "{", "}", "# Create notebook", "# do not want to actually send usage_message, since the function call is not the result of actual usage", "_suite_edit_workflow", "(", "context", "=", "context", ",", "expectation_suite_name", "=", "expectation_suite_name", ",", "profile", "=", "False", ",", "usage_event", "=", "\"test_notebook_execution\"", ",", "interactive", "=", "False", ",", "no_jupyter", "=", "True", ",", "create_if_not_exist", "=", "False", ",", "datasource_name", "=", "None", ",", "batch_request", "=", "batch_request", ",", "additional_batch_request_args", "=", "None", ",", "suppress_usage_message", "=", "True", ",", ")", "edit_notebook_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "\"edit_warning.ipynb\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "edit_notebook_path", ")", "run_notebook", "(", "notebook_path", "=", "edit_notebook_path", ",", "notebook_dir", "=", "uncommitted_dir", ",", "string_to_be_replaced", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", "replacement_string", "=", "\"\"", ",", ")", "# Assertions about output", "context", "=", "DataContext", "(", "context_root_dir", "=", "root_dir", ")", "obs_validation_result", ":", "ExpectationSuiteValidationResult", "=", "(", "context", ".", "get_validation_result", "(", "expectation_suite_name", "=", "\"warning\"", ")", ")", "assert", "obs_validation_result", ".", "statistics", "==", "{", "\"evaluated_expectations\"", ":", "3", ",", "\"successful_expectations\"", ":", "2", ",", "\"unsuccessful_expectations\"", ":", "1", ",", "\"success_percent\"", ":", "66.66666666666666", ",", "}", "suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "suite", "[", "\"meta\"", "]", ".", "pop", "(", "\"citations\"", ",", "None", ")", "assert", "suite", "==", "original_suite" ]
[ 818, 0 ]
[ 982, 34 ]
python
en
['en', 'error', 'th']
False
test_notebook_execution_with_custom_notebooks_wrong_module
( suite_with_multiple_citations, data_context_v3_custom_bad_notebooks )
Test the error message in case of "bad" custom module is clear
Test the error message in case of "bad" custom module is clear
def test_notebook_execution_with_custom_notebooks_wrong_module( suite_with_multiple_citations, data_context_v3_custom_bad_notebooks ): """ Test the error message in case of "bad" custom module is clear """ with pytest.raises( SuiteEditNotebookCustomTemplateModuleNotFoundError, match=r"invalid\.module" ): SuiteEditNotebookRenderer.from_data_context( data_context=data_context_v3_custom_bad_notebooks ).render(suite=suite_with_multiple_citations)
[ "def", "test_notebook_execution_with_custom_notebooks_wrong_module", "(", "suite_with_multiple_citations", ",", "data_context_v3_custom_bad_notebooks", ")", ":", "with", "pytest", ".", "raises", "(", "SuiteEditNotebookCustomTemplateModuleNotFoundError", ",", "match", "=", "r\"invalid\\.module\"", ")", ":", "SuiteEditNotebookRenderer", ".", "from_data_context", "(", "data_context", "=", "data_context_v3_custom_bad_notebooks", ")", ".", "render", "(", "suite", "=", "suite_with_multiple_citations", ")" ]
[ 985, 0 ]
[ 996, 53 ]
python
en
['en', 'error', 'th']
False
test_notebook_execution_with_custom_notebooks
( suite_with_multiple_citations, data_context_v3_custom_notebooks )
Test the different parts of the notebooks can be modified
Test the different parts of the notebooks can be modified
def test_notebook_execution_with_custom_notebooks( suite_with_multiple_citations, data_context_v3_custom_notebooks ): """ Test the different parts of the notebooks can be modified """ batch_request: dict = { "datasource_name": "files_datasource", "data_connector_name": "files_data_connector", "data_asset_name": "1k", } obs: NotebookNode = SuiteEditNotebookRenderer.from_data_context( data_context=data_context_v3_custom_notebooks ).render(suite=suite_with_multiple_citations, batch_request=batch_request) expected: dict = { "nbformat": 4, "nbformat_minor": 5, "metadata": {}, "cells": [ { "id": "intellectual-inspection", "cell_type": "markdown", "source": "# Edit Your Expectation Suite\nUse this notebook to recreate and modify your expectation suite:\n\n**Expectation Suite Name**: `critical`", "metadata": {}, }, { "id": "collaborative-transfer", "cell_type": "code", "metadata": {}, "execution_count": None, "source": 'import datetime\n\nimport pandas as pd\n\nimport great_expectations as ge\nimport great_expectations.jupyter_ux\nfrom great_expectations.core.batch import BatchRequest\nfrom great_expectations.checkpoint import SimpleCheckpoint\nfrom great_expectations.exceptions import DataContextError\n\ncontext = ge.data_context.DataContext()\n\nbatch_request = {\n "datasource_name": "files_datasource",\n "data_connector_name": "files_data_connector",\n "data_asset_name": "1k",\n}\n\n\n# Feel free to change the name of your suite here. Renaming this will not remove the other one.\nexpectation_suite_name = "critical"\ntry:\n suite = context.get_expectation_suite(expectation_suite_name=expectation_suite_name)\n print(\n f\'Loaded ExpectationSuite "{suite.expectation_suite_name}" containing {len(suite.expectations)} expectations.\'\n )\nexcept DataContextError:\n suite = context.create_expectation_suite(\n expectation_suite_name=expectation_suite_name\n )\n print(f\'Created ExpectationSuite "{suite.expectation_suite_name}".\')\n\n\nvalidator = context.get_validator(\n batch_request=BatchRequest(**batch_request),\n expectation_suite_name=expectation_suite_name,\n)\ncolumn_names = [f\'"{column_name}"\' for column_name in validator.columns()]\nprint(f"Columns: {\', \'.join(column_names)}.")\nvalidator.head(n_rows=5, fetch_all=False)', "outputs": [], }, { "id": "legal-beauty", "cell_type": "markdown", "source": "## Create & Edit Expectations\n\n\nAdd expectations by calling specific expectation methods on the `validator` object. They all begin with `.expect_` which makes autocompleting easy using tab.\n\nBecause you selected interactive mode, you are now creating or editing an Expectation Suite with validator feedback from the sample batch of data that you specified (see `batch_request`).\n\nNote that if you select manual mode you may still create or edit an Expectation Suite directly, without feedback from the `validator`. See our documentation for more info and examples: [How to create a new Expectation Suite without a sample batch](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/creating_and_editing_expectations/how_to_create_a_new_expectation_suite_without_a_sample_batch.html).\n\n\n\nYou can see all the available expectations in the **[expectation glossary](https://docs.greatexpectations.io/en/latest/reference/glossary_of_expectations.html?utm_source=notebook&utm_medium=create_expectations)**.", "metadata": {}, }, { "id": "smoking-bangladesh", "cell_type": "markdown", "source": "### Table Expectation(s)", "metadata": {}, }, { "id": "irish-storm", "cell_type": "markdown", "source": "No table level expectations are in this suite. Feel free to add some here.\n\nThey all begin with `validator.expect_table_...`.\n", "metadata": {}, }, { "id": "injured-differential", "cell_type": "markdown", "source": "### Column Expectation(s)", "metadata": {}, }, { "id": "eleven-football", "cell_type": "markdown", "source": "#### `npi`", "metadata": {}, }, { "id": "eleven-solid", "cell_type": "code", "metadata": {}, "execution_count": None, "source": 'validator.expect_column_values_to_not_be_null(column="npi")', "outputs": [], }, { "id": "responsible-coverage", "cell_type": "markdown", "source": "#### `provider_type`", "metadata": {}, }, { "id": "compressed-indiana", "cell_type": "code", "metadata": {}, "execution_count": None, "source": 'validator.expect_column_values_to_not_be_null(column="provider_type")', "outputs": [], }, { "id": "scheduled-freeware", "cell_type": "markdown", "source": "## Save & Review Your Expectations\n\nLet's save the expectation suite as a JSON file in the `great_expectations/expectations` directory of your project.\n\nLet's now rebuild your Data Docs, which helps you communicate about your data with both machines and humans.", "metadata": {}, }, { "id": "useful-hearts", "cell_type": "code", "metadata": {}, "execution_count": None, "source": 'print(validator.get_expectation_suite(discard_failed_expectations=False))\nvalidator.save_expectation_suite(discard_failed_expectations=False)\n\ncheckpoint_config = {\n "class_name": "SimpleCheckpoint",\n "validations": [\n {\n "batch_request": batch_request,\n "expectation_suite_name": expectation_suite_name\n }\n ]\n}\ncheckpoint = SimpleCheckpoint(\n f"_tmp_checkpoint_{expectation_suite_name}",\n context,\n **checkpoint_config\n)\ncheckpoint_result = checkpoint.run()\n\ncontext.build_data_docs()\n\nvalidation_result_identifier = checkpoint_result.list_validation_result_identifiers()[0]\ncontext.open_data_docs(resource_identifier=validation_result_identifier)', "outputs": [], }, ], } del expected["nbformat_minor"] del obs["nbformat_minor"] for obs_cell, expected_cell in zip(obs["cells"], expected["cells"]): obs_cell.pop("id", None) expected_cell.pop("id", None) assert obs_cell == expected_cell assert obs == expected
[ "def", "test_notebook_execution_with_custom_notebooks", "(", "suite_with_multiple_citations", ",", "data_context_v3_custom_notebooks", ")", ":", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"files_datasource\"", ",", "\"data_connector_name\"", ":", "\"files_data_connector\"", ",", "\"data_asset_name\"", ":", "\"1k\"", ",", "}", "obs", ":", "NotebookNode", "=", "SuiteEditNotebookRenderer", ".", "from_data_context", "(", "data_context", "=", "data_context_v3_custom_notebooks", ")", ".", "render", "(", "suite", "=", "suite_with_multiple_citations", ",", "batch_request", "=", "batch_request", ")", "expected", ":", "dict", "=", "{", "\"nbformat\"", ":", "4", ",", "\"nbformat_minor\"", ":", "5", ",", "\"metadata\"", ":", "{", "}", ",", "\"cells\"", ":", "[", "{", "\"id\"", ":", "\"intellectual-inspection\"", ",", "\"cell_type\"", ":", "\"markdown\"", ",", "\"source\"", ":", "\"# Edit Your Expectation Suite\\nUse this notebook to recreate and modify your expectation suite:\\n\\n**Expectation Suite Name**: `critical`\"", ",", "\"metadata\"", ":", "{", "}", ",", "}", ",", "{", "\"id\"", ":", "\"collaborative-transfer\"", ",", "\"cell_type\"", ":", "\"code\"", ",", "\"metadata\"", ":", "{", "}", ",", "\"execution_count\"", ":", "None", ",", "\"source\"", ":", "'import datetime\\n\\nimport pandas as pd\\n\\nimport great_expectations as ge\\nimport great_expectations.jupyter_ux\\nfrom great_expectations.core.batch import BatchRequest\\nfrom great_expectations.checkpoint import SimpleCheckpoint\\nfrom great_expectations.exceptions import DataContextError\\n\\ncontext = ge.data_context.DataContext()\\n\\nbatch_request = {\\n \"datasource_name\": \"files_datasource\",\\n \"data_connector_name\": \"files_data_connector\",\\n \"data_asset_name\": \"1k\",\\n}\\n\\n\\n# Feel free to change the name of your suite here. Renaming this will not remove the other one.\\nexpectation_suite_name = \"critical\"\\ntry:\\n suite = context.get_expectation_suite(expectation_suite_name=expectation_suite_name)\\n print(\\n f\\'Loaded ExpectationSuite \"{suite.expectation_suite_name}\" containing {len(suite.expectations)} expectations.\\'\\n )\\nexcept DataContextError:\\n suite = context.create_expectation_suite(\\n expectation_suite_name=expectation_suite_name\\n )\\n print(f\\'Created ExpectationSuite \"{suite.expectation_suite_name}\".\\')\\n\\n\\nvalidator = context.get_validator(\\n batch_request=BatchRequest(**batch_request),\\n expectation_suite_name=expectation_suite_name,\\n)\\ncolumn_names = [f\\'\"{column_name}\"\\' for column_name in validator.columns()]\\nprint(f\"Columns: {\\', \\'.join(column_names)}.\")\\nvalidator.head(n_rows=5, fetch_all=False)'", ",", "\"outputs\"", ":", "[", "]", ",", "}", ",", "{", "\"id\"", ":", "\"legal-beauty\"", ",", "\"cell_type\"", ":", "\"markdown\"", ",", "\"source\"", ":", "\"## Create & Edit Expectations\\n\\n\\nAdd expectations by calling specific expectation methods on the `validator` object. They all begin with `.expect_` which makes autocompleting easy using tab.\\n\\nBecause you selected interactive mode, you are now creating or editing an Expectation Suite with validator feedback from the sample batch of data that you specified (see `batch_request`).\\n\\nNote that if you select manual mode you may still create or edit an Expectation Suite directly, without feedback from the `validator`. See our documentation for more info and examples: [How to create a new Expectation Suite without a sample batch](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/creating_and_editing_expectations/how_to_create_a_new_expectation_suite_without_a_sample_batch.html).\\n\\n\\n\\nYou can see all the available expectations in the **[expectation glossary](https://docs.greatexpectations.io/en/latest/reference/glossary_of_expectations.html?utm_source=notebook&utm_medium=create_expectations)**.\"", ",", "\"metadata\"", ":", "{", "}", ",", "}", ",", "{", "\"id\"", ":", "\"smoking-bangladesh\"", ",", "\"cell_type\"", ":", "\"markdown\"", ",", "\"source\"", ":", "\"### Table Expectation(s)\"", ",", "\"metadata\"", ":", "{", "}", ",", "}", ",", "{", "\"id\"", ":", "\"irish-storm\"", ",", "\"cell_type\"", ":", "\"markdown\"", ",", "\"source\"", ":", "\"No table level expectations are in this suite. Feel free to add some here.\\n\\nThey all begin with `validator.expect_table_...`.\\n\"", ",", "\"metadata\"", ":", "{", "}", ",", "}", ",", "{", "\"id\"", ":", "\"injured-differential\"", ",", "\"cell_type\"", ":", "\"markdown\"", ",", "\"source\"", ":", "\"### Column Expectation(s)\"", ",", "\"metadata\"", ":", "{", "}", ",", "}", ",", "{", "\"id\"", ":", "\"eleven-football\"", ",", "\"cell_type\"", ":", "\"markdown\"", ",", "\"source\"", ":", "\"#### `npi`\"", ",", "\"metadata\"", ":", "{", "}", ",", "}", ",", "{", "\"id\"", ":", "\"eleven-solid\"", ",", "\"cell_type\"", ":", "\"code\"", ",", "\"metadata\"", ":", "{", "}", ",", "\"execution_count\"", ":", "None", ",", "\"source\"", ":", "'validator.expect_column_values_to_not_be_null(column=\"npi\")'", ",", "\"outputs\"", ":", "[", "]", ",", "}", ",", "{", "\"id\"", ":", "\"responsible-coverage\"", ",", "\"cell_type\"", ":", "\"markdown\"", ",", "\"source\"", ":", "\"#### `provider_type`\"", ",", "\"metadata\"", ":", "{", "}", ",", "}", ",", "{", "\"id\"", ":", "\"compressed-indiana\"", ",", "\"cell_type\"", ":", "\"code\"", ",", "\"metadata\"", ":", "{", "}", ",", "\"execution_count\"", ":", "None", ",", "\"source\"", ":", "'validator.expect_column_values_to_not_be_null(column=\"provider_type\")'", ",", "\"outputs\"", ":", "[", "]", ",", "}", ",", "{", "\"id\"", ":", "\"scheduled-freeware\"", ",", "\"cell_type\"", ":", "\"markdown\"", ",", "\"source\"", ":", "\"## Save & Review Your Expectations\\n\\nLet's save the expectation suite as a JSON file in the `great_expectations/expectations` directory of your project.\\n\\nLet's now rebuild your Data Docs, which helps you communicate about your data with both machines and humans.\"", ",", "\"metadata\"", ":", "{", "}", ",", "}", ",", "{", "\"id\"", ":", "\"useful-hearts\"", ",", "\"cell_type\"", ":", "\"code\"", ",", "\"metadata\"", ":", "{", "}", ",", "\"execution_count\"", ":", "None", ",", "\"source\"", ":", "'print(validator.get_expectation_suite(discard_failed_expectations=False))\\nvalidator.save_expectation_suite(discard_failed_expectations=False)\\n\\ncheckpoint_config = {\\n \"class_name\": \"SimpleCheckpoint\",\\n \"validations\": [\\n {\\n \"batch_request\": batch_request,\\n \"expectation_suite_name\": expectation_suite_name\\n }\\n ]\\n}\\ncheckpoint = SimpleCheckpoint(\\n f\"_tmp_checkpoint_{expectation_suite_name}\",\\n context,\\n **checkpoint_config\\n)\\ncheckpoint_result = checkpoint.run()\\n\\ncontext.build_data_docs()\\n\\nvalidation_result_identifier = checkpoint_result.list_validation_result_identifiers()[0]\\ncontext.open_data_docs(resource_identifier=validation_result_identifier)'", ",", "\"outputs\"", ":", "[", "]", ",", "}", ",", "]", ",", "}", "del", "expected", "[", "\"nbformat_minor\"", "]", "del", "obs", "[", "\"nbformat_minor\"", "]", "for", "obs_cell", ",", "expected_cell", "in", "zip", "(", "obs", "[", "\"cells\"", "]", ",", "expected", "[", "\"cells\"", "]", ")", ":", "obs_cell", ".", "pop", "(", "\"id\"", ",", "None", ")", "expected_cell", ".", "pop", "(", "\"id\"", ",", "None", ")", "assert", "obs_cell", "==", "expected_cell", "assert", "obs", "==", "expected" ]
[ 999, 0 ]
[ 1107, 26 ]
python
en
['en', 'error', 'th']
False
ProfilingColumnPropertiesTableContentBlockRenderer.render
(cls, ge_object, header_row=None)
Each expectation method should return a list of rows
Each expectation method should return a list of rows
def render(cls, ge_object, header_row=None): """Each expectation method should return a list of rows""" if header_row is None: header_row = [] table_rows = [] if isinstance(ge_object, list): for sub_object in ge_object: expectation_type = cls._get_expectation_type(sub_object) if expectation_type in cls.expectation_renderers: new_rows = [ get_renderer_impl(expectation_type, renderer_type)[1]( result=sub_object ) for renderer_type in cls.expectation_renderers.get( expectation_type ) ] table_rows.extend(new_rows) else: expectation_type = cls._get_expectation_type(ge_object) if expectation_type in cls.expectation_renderers: new_rows = [ get_renderer_impl(expectation_type, renderer_type)[1]( result=ge_object ) for renderer_type in cls.expectation_renderers.get(expectation_type) ] table_rows.extend(new_rows) return RenderedTableContent( **{ "content_block_type": "table", "header_row": header_row, "table": table_rows, } )
[ "def", "render", "(", "cls", ",", "ge_object", ",", "header_row", "=", "None", ")", ":", "if", "header_row", "is", "None", ":", "header_row", "=", "[", "]", "table_rows", "=", "[", "]", "if", "isinstance", "(", "ge_object", ",", "list", ")", ":", "for", "sub_object", "in", "ge_object", ":", "expectation_type", "=", "cls", ".", "_get_expectation_type", "(", "sub_object", ")", "if", "expectation_type", "in", "cls", ".", "expectation_renderers", ":", "new_rows", "=", "[", "get_renderer_impl", "(", "expectation_type", ",", "renderer_type", ")", "[", "1", "]", "(", "result", "=", "sub_object", ")", "for", "renderer_type", "in", "cls", ".", "expectation_renderers", ".", "get", "(", "expectation_type", ")", "]", "table_rows", ".", "extend", "(", "new_rows", ")", "else", ":", "expectation_type", "=", "cls", ".", "_get_expectation_type", "(", "ge_object", ")", "if", "expectation_type", "in", "cls", ".", "expectation_renderers", ":", "new_rows", "=", "[", "get_renderer_impl", "(", "expectation_type", ",", "renderer_type", ")", "[", "1", "]", "(", "result", "=", "ge_object", ")", "for", "renderer_type", "in", "cls", ".", "expectation_renderers", ".", "get", "(", "expectation_type", ")", "]", "table_rows", ".", "extend", "(", "new_rows", ")", "return", "RenderedTableContent", "(", "*", "*", "{", "\"content_block_type\"", ":", "\"table\"", ",", "\"header_row\"", ":", "header_row", ",", "\"table\"", ":", "table_rows", ",", "}", ")" ]
[ 27, 4 ]
[ 64, 9 ]
python
en
['en', 'ga', 'en']
True
member_calldef_t._get__cmp__call_items
(self)
implementation details
implementation details
def _get__cmp__call_items(self): """implementation details""" return [self.virtuality, self.has_static, self.has_const]
[ "def", "_get__cmp__call_items", "(", "self", ")", ":", "return", "[", "self", ".", "virtuality", ",", "self", ".", "has_static", ",", "self", ".", "has_const", "]" ]
[ 53, 4 ]
[ 55, 65 ]
python
da
['eo', 'da', 'en']
False
member_calldef_t.virtuality
(self)
Describes the "virtuality" of the member (as defined by the string constants in the class :class:VIRTUALITY_TYPES). @type: str
Describes the "virtuality" of the member (as defined by the string constants in the class :class:VIRTUALITY_TYPES).
def virtuality(self): """Describes the "virtuality" of the member (as defined by the string constants in the class :class:VIRTUALITY_TYPES). @type: str""" return self._virtuality
[ "def", "virtuality", "(", "self", ")", ":", "return", "self", ".", "_virtuality" ]
[ 68, 4 ]
[ 72, 31 ]
python
en
['en', 'en', 'en']
True
member_calldef_t.access_type
(self)
Return the access type of the member (as defined by the string constants in the class :class:ACCESS_TYPES. @type: str
Return the access type of the member (as defined by the string constants in the class :class:ACCESS_TYPES.
def access_type(self): """Return the access type of the member (as defined by the string constants in the class :class:ACCESS_TYPES. @type: str""" return self.parent.find_out_member_access_type(self)
[ "def", "access_type", "(", "self", ")", ":", "return", "self", ".", "parent", ".", "find_out_member_access_type", "(", "self", ")" ]
[ 80, 4 ]
[ 84, 60 ]
python
en
['en', 'en', 'en']
True
member_calldef_t.has_const
(self)
describes, whether "callable" has const modifier or not
describes, whether "callable" has const modifier or not
def has_const(self): """describes, whether "callable" has const modifier or not""" return self._has_const
[ "def", "has_const", "(", "self", ")", ":", "return", "self", ".", "_has_const" ]
[ 87, 4 ]
[ 89, 30 ]
python
en
['en', 'gl', 'en']
True