Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
sqlalchemy_dataset
(test_backends)
Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework
Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework
def sqlalchemy_dataset(test_backends): """Provide dataset fixtures that have special values and/or are otherwise useful outside the standard json testing framework""" if "postgresql" in test_backends: backend = "postgresql" elif "sqlite" in test_backends: backend = "sqlite" else: return data = { "infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf], "nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None], "naturals": [1, 2, 3, 4, 5, 6, 7], } schemas = { "postgresql": { "infinities": "DOUBLE_PRECISION", "nulls": "DOUBLE_PRECISION", "naturals": "DOUBLE_PRECISION", }, "sqlite": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"}, } return get_dataset(backend, data, schemas=schemas, profiler=None)
[ "def", "sqlalchemy_dataset", "(", "test_backends", ")", ":", "if", "\"postgresql\"", "in", "test_backends", ":", "backend", "=", "\"postgresql\"", "elif", "\"sqlite\"", "in", "test_backends", ":", "backend", "=", "\"sqlite\"", "else", ":", "return", "data", "=", "{", "\"infinities\"", ":", "[", "-", "np", ".", "inf", ",", "-", "10", ",", "-", "np", ".", "pi", ",", "0", ",", "np", ".", "pi", ",", "10", "/", "2.2", ",", "np", ".", "inf", "]", ",", "\"nulls\"", ":", "[", "np", ".", "nan", ",", "None", ",", "0", ",", "1.1", ",", "2.2", ",", "3.3", ",", "None", "]", ",", "\"naturals\"", ":", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", "]", ",", "}", "schemas", "=", "{", "\"postgresql\"", ":", "{", "\"infinities\"", ":", "\"DOUBLE_PRECISION\"", ",", "\"nulls\"", ":", "\"DOUBLE_PRECISION\"", ",", "\"naturals\"", ":", "\"DOUBLE_PRECISION\"", ",", "}", ",", "\"sqlite\"", ":", "{", "\"infinities\"", ":", "\"FLOAT\"", ",", "\"nulls\"", ":", "\"FLOAT\"", ",", "\"naturals\"", ":", "\"FLOAT\"", "}", ",", "}", "return", "get_dataset", "(", "backend", ",", "data", ",", "schemas", "=", "schemas", ",", "profiler", "=", "None", ")" ]
[ 2123, 0 ]
[ 2146, 69 ]
python
en
['en', 'en', 'en']
True
YamlLexer.something
(token_class)
Do not produce empty tokens.
Do not produce empty tokens.
def something(token_class): """Do not produce empty tokens.""" def callback(lexer, match, context): text = match.group() if not text: return yield match.start(), token_class, text context.pos = match.end() return callback
[ "def", "something", "(", "token_class", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "if", "not", "text", ":", "return", "yield", "match", ".", "start", "(", ")", ",", "token_class", ",", "text", "context", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 45, 4 ]
[ 53, 23 ]
python
en
['en', 'mg', 'en']
True
YamlLexer.reset_indent
(token_class)
Reset the indentation levels.
Reset the indentation levels.
def reset_indent(token_class): """Reset the indentation levels.""" def callback(lexer, match, context): text = match.group() context.indent_stack = [] context.indent = -1 context.next_indent = 0 context.block_scalar_indent = None yield match.start(), token_class, text context.pos = match.end() return callback
[ "def", "reset_indent", "(", "token_class", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "context", ".", "indent_stack", "=", "[", "]", "context", ".", "indent", "=", "-", "1", "context", ".", "next_indent", "=", "0", "context", ".", "block_scalar_indent", "=", "None", "yield", "match", ".", "start", "(", ")", ",", "token_class", ",", "text", "context", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 55, 4 ]
[ 65, 23 ]
python
en
['en', 'da', 'en']
True
YamlLexer.save_indent
(token_class, start=False)
Save a possible indentation level.
Save a possible indentation level.
def save_indent(token_class, start=False): """Save a possible indentation level.""" def callback(lexer, match, context): text = match.group() extra = '' if start: context.next_indent = len(text) if context.next_indent < context.indent: while context.next_indent < context.indent: context.indent = context.indent_stack.pop() if context.next_indent > context.indent: extra = text[context.indent:] text = text[:context.indent] else: context.next_indent += len(text) if text: yield match.start(), token_class, text if extra: yield match.start()+len(text), token_class.Error, extra context.pos = match.end() return callback
[ "def", "save_indent", "(", "token_class", ",", "start", "=", "False", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "extra", "=", "''", "if", "start", ":", "context", ".", "next_indent", "=", "len", "(", "text", ")", "if", "context", ".", "next_indent", "<", "context", ".", "indent", ":", "while", "context", ".", "next_indent", "<", "context", ".", "indent", ":", "context", ".", "indent", "=", "context", ".", "indent_stack", ".", "pop", "(", ")", "if", "context", ".", "next_indent", ">", "context", ".", "indent", ":", "extra", "=", "text", "[", "context", ".", "indent", ":", "]", "text", "=", "text", "[", ":", "context", ".", "indent", "]", "else", ":", "context", ".", "next_indent", "+=", "len", "(", "text", ")", "if", "text", ":", "yield", "match", ".", "start", "(", ")", ",", "token_class", ",", "text", "if", "extra", ":", "yield", "match", ".", "start", "(", ")", "+", "len", "(", "text", ")", ",", "token_class", ".", "Error", ",", "extra", "context", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 67, 4 ]
[ 87, 23 ]
python
en
['en', 'en', 'en']
True
YamlLexer.set_indent
(token_class, implicit=False)
Set the previously saved indentation level.
Set the previously saved indentation level.
def set_indent(token_class, implicit=False): """Set the previously saved indentation level.""" def callback(lexer, match, context): text = match.group() if context.indent < context.next_indent: context.indent_stack.append(context.indent) context.indent = context.next_indent if not implicit: context.next_indent += len(text) yield match.start(), token_class, text context.pos = match.end() return callback
[ "def", "set_indent", "(", "token_class", ",", "implicit", "=", "False", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "if", "context", ".", "indent", "<", "context", ".", "next_indent", ":", "context", ".", "indent_stack", ".", "append", "(", "context", ".", "indent", ")", "context", ".", "indent", "=", "context", ".", "next_indent", "if", "not", "implicit", ":", "context", ".", "next_indent", "+=", "len", "(", "text", ")", "yield", "match", ".", "start", "(", ")", ",", "token_class", ",", "text", "context", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 89, 4 ]
[ 100, 23 ]
python
en
['en', 'en', 'en']
True
YamlLexer.set_block_scalar_indent
(token_class)
Set an explicit indentation level for a block scalar.
Set an explicit indentation level for a block scalar.
def set_block_scalar_indent(token_class): """Set an explicit indentation level for a block scalar.""" def callback(lexer, match, context): text = match.group() context.block_scalar_indent = None if not text: return increment = match.group(1) if increment: current_indent = max(context.indent, 0) increment = int(increment) context.block_scalar_indent = current_indent + increment if text: yield match.start(), token_class, text context.pos = match.end() return callback
[ "def", "set_block_scalar_indent", "(", "token_class", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "context", ".", "block_scalar_indent", "=", "None", "if", "not", "text", ":", "return", "increment", "=", "match", ".", "group", "(", "1", ")", "if", "increment", ":", "current_indent", "=", "max", "(", "context", ".", "indent", ",", "0", ")", "increment", "=", "int", "(", "increment", ")", "context", ".", "block_scalar_indent", "=", "current_indent", "+", "increment", "if", "text", ":", "yield", "match", ".", "start", "(", ")", ",", "token_class", ",", "text", "context", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 102, 4 ]
[ 117, 23 ]
python
en
['en', 'su', 'en']
True
YamlLexer.parse_block_scalar_empty_line
(indent_token_class, content_token_class)
Process an empty line in a block scalar.
Process an empty line in a block scalar.
def parse_block_scalar_empty_line(indent_token_class, content_token_class): """Process an empty line in a block scalar.""" def callback(lexer, match, context): text = match.group() if (context.block_scalar_indent is None or len(text) <= context.block_scalar_indent): if text: yield match.start(), indent_token_class, text else: indentation = text[:context.block_scalar_indent] content = text[context.block_scalar_indent:] yield match.start(), indent_token_class, indentation yield (match.start()+context.block_scalar_indent, content_token_class, content) context.pos = match.end() return callback
[ "def", "parse_block_scalar_empty_line", "(", "indent_token_class", ",", "content_token_class", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "if", "(", "context", ".", "block_scalar_indent", "is", "None", "or", "len", "(", "text", ")", "<=", "context", ".", "block_scalar_indent", ")", ":", "if", "text", ":", "yield", "match", ".", "start", "(", ")", ",", "indent_token_class", ",", "text", "else", ":", "indentation", "=", "text", "[", ":", "context", ".", "block_scalar_indent", "]", "content", "=", "text", "[", "context", ".", "block_scalar_indent", ":", "]", "yield", "match", ".", "start", "(", ")", ",", "indent_token_class", ",", "indentation", "yield", "(", "match", ".", "start", "(", ")", "+", "context", ".", "block_scalar_indent", ",", "content_token_class", ",", "content", ")", "context", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 119, 4 ]
[ 134, 23 ]
python
en
['en', 'en', 'en']
True
YamlLexer.parse_block_scalar_indent
(token_class)
Process indentation spaces in a block scalar.
Process indentation spaces in a block scalar.
def parse_block_scalar_indent(token_class): """Process indentation spaces in a block scalar.""" def callback(lexer, match, context): text = match.group() if context.block_scalar_indent is None: if len(text) <= max(context.indent, 0): context.stack.pop() context.stack.pop() return context.block_scalar_indent = len(text) else: if len(text) < context.block_scalar_indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback
[ "def", "parse_block_scalar_indent", "(", "token_class", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "if", "context", ".", "block_scalar_indent", "is", "None", ":", "if", "len", "(", "text", ")", "<=", "max", "(", "context", ".", "indent", ",", "0", ")", ":", "context", ".", "stack", ".", "pop", "(", ")", "context", ".", "stack", ".", "pop", "(", ")", "return", "context", ".", "block_scalar_indent", "=", "len", "(", "text", ")", "else", ":", "if", "len", "(", "text", ")", "<", "context", ".", "block_scalar_indent", ":", "context", ".", "stack", ".", "pop", "(", ")", "context", ".", "stack", ".", "pop", "(", ")", "return", "if", "text", ":", "yield", "match", ".", "start", "(", ")", ",", "token_class", ",", "text", "context", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 136, 4 ]
[ 154, 23 ]
python
en
['en', 'en', 'en']
True
YamlLexer.parse_plain_scalar_indent
(token_class)
Process indentation spaces in a plain scalar.
Process indentation spaces in a plain scalar.
def parse_plain_scalar_indent(token_class): """Process indentation spaces in a plain scalar.""" def callback(lexer, match, context): text = match.group() if len(text) <= context.indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback
[ "def", "parse_plain_scalar_indent", "(", "token_class", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "if", "len", "(", "text", ")", "<=", "context", ".", "indent", ":", "context", ".", "stack", ".", "pop", "(", ")", "context", ".", "stack", ".", "pop", "(", ")", "return", "if", "text", ":", "yield", "match", ".", "start", "(", ")", ",", "token_class", ",", "text", "context", ".", "pos", "=", "match", ".", "end", "(", ")", "return", "callback" ]
[ 156, 4 ]
[ 167, 23 ]
python
en
['en', 'en', 'en']
True
build_in_code_data_context_project_config
( bucket: str = "leakybucket", expectations_store_prefix: str = "expectations_store_prefix", validations_store_prefix: str = "validations_store_prefix", data_docs_store_prefix: str = "data_docs_store_prefix", stores: Optional[Dict] = None, )
Create a project config for an in-code data context. Not a fixture because we want to control when this is built (after the expectation store). Args: expectations_store_prefix: prefix for expectations store validations_store_prefix: prefix for validations store data_docs_store_prefix: prefix for data docs bucket: name of the s3 bucket stores: optional overwrite of the default stores Returns: DataContextConfig using s3 for all stores.
Create a project config for an in-code data context. Not a fixture because we want to control when this is built (after the expectation store). Args: expectations_store_prefix: prefix for expectations store validations_store_prefix: prefix for validations store data_docs_store_prefix: prefix for data docs bucket: name of the s3 bucket stores: optional overwrite of the default stores
def build_in_code_data_context_project_config( bucket: str = "leakybucket", expectations_store_prefix: str = "expectations_store_prefix", validations_store_prefix: str = "validations_store_prefix", data_docs_store_prefix: str = "data_docs_store_prefix", stores: Optional[Dict] = None, ) -> DataContextConfig: """ Create a project config for an in-code data context. Not a fixture because we want to control when this is built (after the expectation store). Args: expectations_store_prefix: prefix for expectations store validations_store_prefix: prefix for validations store data_docs_store_prefix: prefix for data docs bucket: name of the s3 bucket stores: optional overwrite of the default stores Returns: DataContextConfig using s3 for all stores. """ if stores is None: stores = { "expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": bucket, "prefix": expectations_store_prefix, }, }, "validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": bucket, "prefix": validations_store_prefix, }, }, "evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, } project_config = DataContextConfig( config_version=2, plugins_directory=None, config_variables_file_path=None, datasources={ "my_spark_datasource": { "data_asset_type": { "class_name": "SparkDFDataset", "module_name": "great_expectations.dataset", }, "class_name": "SparkDFDatasource", "module_name": "great_expectations.datasource", "batch_kwargs_generators": {}, } }, stores=stores, expectations_store_name="expectations_S3_store", validations_store_name="validations_S3_store", evaluation_parameter_store_name="evaluation_parameter_store", data_docs_sites={ "s3_site": { "class_name": "SiteBuilder", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": bucket, "prefix": data_docs_store_prefix, }, "site_index_builder": { "class_name": "DefaultSiteIndexBuilder", }, } }, validation_operators={ "action_list_operator": { "class_name": "ActionListValidationOperator", "action_list": [ { "name": "store_validation_result", "action": {"class_name": "StoreValidationResultAction"}, }, { "name": "store_evaluation_params", "action": {"class_name": "StoreEvaluationParametersAction"}, }, { "name": "update_data_docs", "action": {"class_name": "UpdateDataDocsAction"}, }, ], } }, anonymous_usage_statistics={ "enabled": True, # NOTE: No data_context_id set here "usage_statistics_url": USAGE_STATISTICS_QA_URL, }, ) return project_config
[ "def", "build_in_code_data_context_project_config", "(", "bucket", ":", "str", "=", "\"leakybucket\"", ",", "expectations_store_prefix", ":", "str", "=", "\"expectations_store_prefix\"", ",", "validations_store_prefix", ":", "str", "=", "\"validations_store_prefix\"", ",", "data_docs_store_prefix", ":", "str", "=", "\"data_docs_store_prefix\"", ",", "stores", ":", "Optional", "[", "Dict", "]", "=", "None", ",", ")", "->", "DataContextConfig", ":", "if", "stores", "is", "None", ":", "stores", "=", "{", "\"expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "bucket", ",", "\"prefix\"", ":", "expectations_store_prefix", ",", "}", ",", "}", ",", "\"validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "bucket", ",", "\"prefix\"", ":", "validations_store_prefix", ",", "}", ",", "}", ",", "\"evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "}", "project_config", "=", "DataContextConfig", "(", "config_version", "=", "2", ",", "plugins_directory", "=", "None", ",", "config_variables_file_path", "=", "None", ",", "datasources", "=", "{", "\"my_spark_datasource\"", ":", "{", "\"data_asset_type\"", ":", "{", "\"class_name\"", ":", "\"SparkDFDataset\"", ",", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "}", ",", "\"class_name\"", ":", "\"SparkDFDatasource\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource\"", ",", "\"batch_kwargs_generators\"", ":", "{", "}", ",", "}", "}", ",", "stores", "=", "stores", ",", "expectations_store_name", "=", "\"expectations_S3_store\"", ",", "validations_store_name", "=", "\"validations_S3_store\"", ",", "evaluation_parameter_store_name", "=", "\"evaluation_parameter_store\"", ",", "data_docs_sites", "=", "{", "\"s3_site\"", ":", "{", "\"class_name\"", ":", "\"SiteBuilder\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "bucket", ",", "\"prefix\"", ":", "data_docs_store_prefix", ",", "}", ",", "\"site_index_builder\"", ":", "{", "\"class_name\"", ":", "\"DefaultSiteIndexBuilder\"", ",", "}", ",", "}", "}", ",", "validation_operators", "=", "{", "\"action_list_operator\"", ":", "{", "\"class_name\"", ":", "\"ActionListValidationOperator\"", ",", "\"action_list\"", ":", "[", "{", "\"name\"", ":", "\"store_validation_result\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"StoreValidationResultAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"store_evaluation_params\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"StoreEvaluationParametersAction\"", "}", ",", "}", ",", "{", "\"name\"", ":", "\"update_data_docs\"", ",", "\"action\"", ":", "{", "\"class_name\"", ":", "\"UpdateDataDocsAction\"", "}", ",", "}", ",", "]", ",", "}", "}", ",", "anonymous_usage_statistics", "=", "{", "\"enabled\"", ":", "True", ",", "# NOTE: No data_context_id set here", "\"usage_statistics_url\"", ":", "USAGE_STATISTICS_QA_URL", ",", "}", ",", ")", "return", "project_config" ]
[ 13, 0 ]
[ 110, 25 ]
python
en
['en', 'error', 'th']
False
get_store_backend_id_from_s3
(bucket: str, prefix: str, key: str)
Return the UUID store_backend_id from a given s3 file Args: bucket: s3 bucket prefix: prefix for s3 bucket key: filename in s3 bucket Returns:
Return the UUID store_backend_id from a given s3 file Args: bucket: s3 bucket prefix: prefix for s3 bucket key: filename in s3 bucket
def get_store_backend_id_from_s3(bucket: str, prefix: str, key: str) -> str: """ Return the UUID store_backend_id from a given s3 file Args: bucket: s3 bucket prefix: prefix for s3 bucket key: filename in s3 bucket Returns: """ s3_response_object = boto3.client("s3").get_object( Bucket=bucket, Key=f"{prefix}/{key}" ) s3_response_object_body = ( s3_response_object["Body"] .read() .decode(s3_response_object.get("ContentEncoding", "utf-8")) ) store_backend_id_from_s3_file = s3_response_object_body.replace( StoreBackend.STORE_BACKEND_ID_PREFIX, "" ) return store_backend_id_from_s3_file
[ "def", "get_store_backend_id_from_s3", "(", "bucket", ":", "str", ",", "prefix", ":", "str", ",", "key", ":", "str", ")", "->", "str", ":", "s3_response_object", "=", "boto3", ".", "client", "(", "\"s3\"", ")", ".", "get_object", "(", "Bucket", "=", "bucket", ",", "Key", "=", "f\"{prefix}/{key}\"", ")", "s3_response_object_body", "=", "(", "s3_response_object", "[", "\"Body\"", "]", ".", "read", "(", ")", ".", "decode", "(", "s3_response_object", ".", "get", "(", "\"ContentEncoding\"", ",", "\"utf-8\"", ")", ")", ")", "store_backend_id_from_s3_file", "=", "s3_response_object_body", ".", "replace", "(", "StoreBackend", ".", "STORE_BACKEND_ID_PREFIX", ",", "\"\"", ")", "return", "store_backend_id_from_s3_file" ]
[ 113, 0 ]
[ 135, 40 ]
python
en
['en', 'error', 'th']
False
list_s3_bucket_contents
(bucket: str, prefix: str)
List the contents of an s3 bucket as a set of strings given bucket name and prefix Args: bucket: s3 bucket prefix: prefix for s3 bucket Returns: set of filepath strings
List the contents of an s3 bucket as a set of strings given bucket name and prefix Args: bucket: s3 bucket prefix: prefix for s3 bucket
def list_s3_bucket_contents(bucket: str, prefix: str) -> Set[str]: """ List the contents of an s3 bucket as a set of strings given bucket name and prefix Args: bucket: s3 bucket prefix: prefix for s3 bucket Returns: set of filepath strings """ return { s3_object_info["Key"] for s3_object_info in boto3.client("s3").list_objects_v2( Bucket=bucket, Prefix=prefix )["Contents"] }
[ "def", "list_s3_bucket_contents", "(", "bucket", ":", "str", ",", "prefix", ":", "str", ")", "->", "Set", "[", "str", "]", ":", "return", "{", "s3_object_info", "[", "\"Key\"", "]", "for", "s3_object_info", "in", "boto3", ".", "client", "(", "\"s3\"", ")", ".", "list_objects_v2", "(", "Bucket", "=", "bucket", ",", "Prefix", "=", "prefix", ")", "[", "\"Contents\"", "]", "}" ]
[ 138, 0 ]
[ 153, 5 ]
python
en
['en', 'error', 'th']
False
test_DataContext_construct_data_context_id_uses_id_of_currently_configured_expectations_store
()
What does this test and why? A DataContext should have an id. This ID should come from either: 1. configured expectations store store_backend_id 2. great_expectations.yml 3. new generated id from DataContextConfig This test verifies that DataContext._construct_data_context_id uses the store_backend_id from the currently configured expectations store when instantiating the DataContext
What does this test and why?
def test_DataContext_construct_data_context_id_uses_id_of_currently_configured_expectations_store(): """ What does this test and why? A DataContext should have an id. This ID should come from either: 1. configured expectations store store_backend_id 2. great_expectations.yml 3. new generated id from DataContextConfig This test verifies that DataContext._construct_data_context_id uses the store_backend_id from the currently configured expectations store when instantiating the DataContext """ store_backend_id_filename = StoreBackend.STORE_BACKEND_ID_KEY[0] bucket = "leakybucket" expectations_store_prefix = "expectations_store_prefix" validations_store_prefix = "validations_store_prefix" data_docs_store_prefix = "data_docs_store_prefix" data_context_prefix = "" # Create a bucket in Moto's mock AWS environment conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) # Create a TupleS3StoreBackend # Initialize without store_backend_id and check that the store_backend_id is generated correctly s3_expectations_store_backend = TupleS3StoreBackend( filepath_template="my_file_{0}", bucket=bucket, prefix=expectations_store_prefix, ) # Make sure store_backend_id is not the error string store_error_uuid = "00000000-0000-0000-0000-00000000e003" s3_expectations_store_backend_id = s3_expectations_store_backend.store_backend_id assert s3_expectations_store_backend_id != store_error_uuid # Make sure the bucket contents are as expected bucket_contents_after_creating_expectation_store = list_s3_bucket_contents( bucket=bucket, prefix=data_context_prefix ) assert bucket_contents_after_creating_expectation_store == { f"{expectations_store_prefix}/{store_backend_id_filename}" } # Make sure the store_backend_id from the file is equal to reading from the property expectations_store_backend_id_from_s3_file = get_store_backend_id_from_s3( bucket=bucket, prefix=expectations_store_prefix, key=store_backend_id_filename, ) assert ( expectations_store_backend_id_from_s3_file == s3_expectations_store_backend_id ) # Create a DataContext (note existing expectations store already set up) in_code_data_context_project_config = build_in_code_data_context_project_config( bucket="leakybucket", expectations_store_prefix=expectations_store_prefix, validations_store_prefix=validations_store_prefix, data_docs_store_prefix=data_docs_store_prefix, ) in_code_data_context = BaseDataContext( project_config=in_code_data_context_project_config ) bucket_contents_after_instantiating_BaseDataContext = list_s3_bucket_contents( bucket=bucket, prefix=data_context_prefix ) assert bucket_contents_after_instantiating_BaseDataContext == { f"{expectations_store_prefix}/{store_backend_id_filename}", f"{validations_store_prefix}/{store_backend_id_filename}", } # Make sure ids are consistent in_code_data_context_expectations_store_store_backend_id = ( in_code_data_context.stores["expectations_S3_store"].store_backend_id ) in_code_data_context_data_context_id = in_code_data_context.data_context_id constructed_data_context_id = in_code_data_context._construct_data_context_id() assert ( in_code_data_context_expectations_store_store_backend_id == in_code_data_context_data_context_id == expectations_store_backend_id_from_s3_file == s3_expectations_store_backend_id == constructed_data_context_id )
[ "def", "test_DataContext_construct_data_context_id_uses_id_of_currently_configured_expectations_store", "(", ")", ":", "store_backend_id_filename", "=", "StoreBackend", ".", "STORE_BACKEND_ID_KEY", "[", "0", "]", "bucket", "=", "\"leakybucket\"", "expectations_store_prefix", "=", "\"expectations_store_prefix\"", "validations_store_prefix", "=", "\"validations_store_prefix\"", "data_docs_store_prefix", "=", "\"data_docs_store_prefix\"", "data_context_prefix", "=", "\"\"", "# Create a bucket in Moto's mock AWS environment", "conn", "=", "boto3", ".", "resource", "(", "\"s3\"", ",", "region_name", "=", "\"us-east-1\"", ")", "conn", ".", "create_bucket", "(", "Bucket", "=", "bucket", ")", "# Create a TupleS3StoreBackend", "# Initialize without store_backend_id and check that the store_backend_id is generated correctly", "s3_expectations_store_backend", "=", "TupleS3StoreBackend", "(", "filepath_template", "=", "\"my_file_{0}\"", ",", "bucket", "=", "bucket", ",", "prefix", "=", "expectations_store_prefix", ",", ")", "# Make sure store_backend_id is not the error string", "store_error_uuid", "=", "\"00000000-0000-0000-0000-00000000e003\"", "s3_expectations_store_backend_id", "=", "s3_expectations_store_backend", ".", "store_backend_id", "assert", "s3_expectations_store_backend_id", "!=", "store_error_uuid", "# Make sure the bucket contents are as expected", "bucket_contents_after_creating_expectation_store", "=", "list_s3_bucket_contents", "(", "bucket", "=", "bucket", ",", "prefix", "=", "data_context_prefix", ")", "assert", "bucket_contents_after_creating_expectation_store", "==", "{", "f\"{expectations_store_prefix}/{store_backend_id_filename}\"", "}", "# Make sure the store_backend_id from the file is equal to reading from the property", "expectations_store_backend_id_from_s3_file", "=", "get_store_backend_id_from_s3", "(", "bucket", "=", "bucket", ",", "prefix", "=", "expectations_store_prefix", ",", "key", "=", "store_backend_id_filename", ",", ")", "assert", "(", "expectations_store_backend_id_from_s3_file", "==", "s3_expectations_store_backend_id", ")", "# Create a DataContext (note existing expectations store already set up)", "in_code_data_context_project_config", "=", "build_in_code_data_context_project_config", "(", "bucket", "=", "\"leakybucket\"", ",", "expectations_store_prefix", "=", "expectations_store_prefix", ",", "validations_store_prefix", "=", "validations_store_prefix", ",", "data_docs_store_prefix", "=", "data_docs_store_prefix", ",", ")", "in_code_data_context", "=", "BaseDataContext", "(", "project_config", "=", "in_code_data_context_project_config", ")", "bucket_contents_after_instantiating_BaseDataContext", "=", "list_s3_bucket_contents", "(", "bucket", "=", "bucket", ",", "prefix", "=", "data_context_prefix", ")", "assert", "bucket_contents_after_instantiating_BaseDataContext", "==", "{", "f\"{expectations_store_prefix}/{store_backend_id_filename}\"", ",", "f\"{validations_store_prefix}/{store_backend_id_filename}\"", ",", "}", "# Make sure ids are consistent", "in_code_data_context_expectations_store_store_backend_id", "=", "(", "in_code_data_context", ".", "stores", "[", "\"expectations_S3_store\"", "]", ".", "store_backend_id", ")", "in_code_data_context_data_context_id", "=", "in_code_data_context", ".", "data_context_id", "constructed_data_context_id", "=", "in_code_data_context", ".", "_construct_data_context_id", "(", ")", "assert", "(", "in_code_data_context_expectations_store_store_backend_id", "==", "in_code_data_context_data_context_id", "==", "expectations_store_backend_id_from_s3_file", "==", "s3_expectations_store_backend_id", "==", "constructed_data_context_id", ")" ]
[ 157, 0 ]
[ 241, 5 ]
python
en
['en', 'error', 'th']
False
test_DataContext_construct_data_context_id_uses_id_stored_in_DataContextConfig_if_no_configured_expectations_store
( monkeypatch, )
What does this test and why? A DataContext should have an id. This ID should come from either: 1. configured expectations store store_backend_id 2. great_expectations.yml 3. new generated id from DataContextConfig This test verifies that DataContext._construct_data_context_id uses the data_context_id from DataContextConfig when there is no configured expectations store when instantiating the DataContext, and also that this data_context_id is used to configure the expectations_store.store_backend_id
What does this test and why?
def test_DataContext_construct_data_context_id_uses_id_stored_in_DataContextConfig_if_no_configured_expectations_store( monkeypatch, ): """ What does this test and why? A DataContext should have an id. This ID should come from either: 1. configured expectations store store_backend_id 2. great_expectations.yml 3. new generated id from DataContextConfig This test verifies that DataContext._construct_data_context_id uses the data_context_id from DataContextConfig when there is no configured expectations store when instantiating the DataContext, and also that this data_context_id is used to configure the expectations_store.store_backend_id """ monkeypatch.delenv( "GE_USAGE_STATS", raising=False ) # Undo the project-wide test default bucket = "leakybucket" expectations_store_prefix = "expectations_store_prefix" validations_store_prefix = "validations_store_prefix" data_docs_store_prefix = "data_docs_store_prefix" manually_created_uuid = "00000000-0000-0000-0000-000000000eee" # Create a bucket in Moto's mock AWS environment conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) # Create a DataContext (note NO existing expectations store already set up) in_code_data_context_project_config = build_in_code_data_context_project_config( bucket="leakybucket", expectations_store_prefix=expectations_store_prefix, validations_store_prefix=validations_store_prefix, data_docs_store_prefix=data_docs_store_prefix, ) # Manually set the data_context_id in the project_config in_code_data_context_project_config.anonymous_usage_statistics.data_context_id = ( manually_created_uuid ) in_code_data_context = BaseDataContext( project_config=in_code_data_context_project_config ) # Make sure the manually set data_context_id is propagated to all the appropriate places assert ( manually_created_uuid == in_code_data_context.data_context_id == in_code_data_context.stores[ in_code_data_context.expectations_store_name ].store_backend_id )
[ "def", "test_DataContext_construct_data_context_id_uses_id_stored_in_DataContextConfig_if_no_configured_expectations_store", "(", "monkeypatch", ",", ")", ":", "monkeypatch", ".", "delenv", "(", "\"GE_USAGE_STATS\"", ",", "raising", "=", "False", ")", "# Undo the project-wide test default", "bucket", "=", "\"leakybucket\"", "expectations_store_prefix", "=", "\"expectations_store_prefix\"", "validations_store_prefix", "=", "\"validations_store_prefix\"", "data_docs_store_prefix", "=", "\"data_docs_store_prefix\"", "manually_created_uuid", "=", "\"00000000-0000-0000-0000-000000000eee\"", "# Create a bucket in Moto's mock AWS environment", "conn", "=", "boto3", ".", "resource", "(", "\"s3\"", ",", "region_name", "=", "\"us-east-1\"", ")", "conn", ".", "create_bucket", "(", "Bucket", "=", "bucket", ")", "# Create a DataContext (note NO existing expectations store already set up)", "in_code_data_context_project_config", "=", "build_in_code_data_context_project_config", "(", "bucket", "=", "\"leakybucket\"", ",", "expectations_store_prefix", "=", "expectations_store_prefix", ",", "validations_store_prefix", "=", "validations_store_prefix", ",", "data_docs_store_prefix", "=", "data_docs_store_prefix", ",", ")", "# Manually set the data_context_id in the project_config", "in_code_data_context_project_config", ".", "anonymous_usage_statistics", ".", "data_context_id", "=", "(", "manually_created_uuid", ")", "in_code_data_context", "=", "BaseDataContext", "(", "project_config", "=", "in_code_data_context_project_config", ")", "# Make sure the manually set data_context_id is propagated to all the appropriate places", "assert", "(", "manually_created_uuid", "==", "in_code_data_context", ".", "data_context_id", "==", "in_code_data_context", ".", "stores", "[", "in_code_data_context", ".", "expectations_store_name", "]", ".", "store_backend_id", ")" ]
[ 245, 0 ]
[ 296, 5 ]
python
en
['en', 'error', 'th']
False
test_DataContext_construct_data_context_id_uses_id_stored_in_env_var_GE_DATA_CONTEXT_ID_if_no_configured_expectations_store
( monkeypatch, )
What does this test and why? A DataContext should have an id. This ID should come from either: 1. configured expectations store store_backend_id 2. great_expectations.yml 3. new generated id from DataContextConfig This test verifies that DataContext._construct_data_context_id uses the store_backend_id from the env variable GE_DATA_CONTEXT_ID when there is no configured expectations store when instantiating the DataContext
What does this test and why?
def test_DataContext_construct_data_context_id_uses_id_stored_in_env_var_GE_DATA_CONTEXT_ID_if_no_configured_expectations_store( monkeypatch, ): """ What does this test and why? A DataContext should have an id. This ID should come from either: 1. configured expectations store store_backend_id 2. great_expectations.yml 3. new generated id from DataContextConfig This test verifies that DataContext._construct_data_context_id uses the store_backend_id from the env variable GE_DATA_CONTEXT_ID when there is no configured expectations store when instantiating the DataContext """ bucket = "leakybucket" expectations_store_prefix = "expectations_store_prefix" validations_store_prefix = "validations_store_prefix" data_docs_store_prefix = "data_docs_store_prefix" manually_created_uuid = "00000000-0000-0000-0000-000000000fff" monkeypatch.setenv("GE_DATA_CONTEXT_ID", manually_created_uuid) # Create a bucket in Moto's mock AWS environment conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) # Create a DataContext (note NO existing expectations store already set up) in_code_data_context_project_config = build_in_code_data_context_project_config( bucket="leakybucket", expectations_store_prefix=expectations_store_prefix, validations_store_prefix=validations_store_prefix, data_docs_store_prefix=data_docs_store_prefix, ) in_code_data_context = BaseDataContext( project_config=in_code_data_context_project_config ) # Make sure the manually set data_context_id is propagated to all the appropriate places assert ( manually_created_uuid == in_code_data_context.data_context_id == in_code_data_context.stores[ in_code_data_context.expectations_store_name ].store_backend_id )
[ "def", "test_DataContext_construct_data_context_id_uses_id_stored_in_env_var_GE_DATA_CONTEXT_ID_if_no_configured_expectations_store", "(", "monkeypatch", ",", ")", ":", "bucket", "=", "\"leakybucket\"", "expectations_store_prefix", "=", "\"expectations_store_prefix\"", "validations_store_prefix", "=", "\"validations_store_prefix\"", "data_docs_store_prefix", "=", "\"data_docs_store_prefix\"", "manually_created_uuid", "=", "\"00000000-0000-0000-0000-000000000fff\"", "monkeypatch", ".", "setenv", "(", "\"GE_DATA_CONTEXT_ID\"", ",", "manually_created_uuid", ")", "# Create a bucket in Moto's mock AWS environment", "conn", "=", "boto3", ".", "resource", "(", "\"s3\"", ",", "region_name", "=", "\"us-east-1\"", ")", "conn", ".", "create_bucket", "(", "Bucket", "=", "bucket", ")", "# Create a DataContext (note NO existing expectations store already set up)", "in_code_data_context_project_config", "=", "build_in_code_data_context_project_config", "(", "bucket", "=", "\"leakybucket\"", ",", "expectations_store_prefix", "=", "expectations_store_prefix", ",", "validations_store_prefix", "=", "validations_store_prefix", ",", "data_docs_store_prefix", "=", "data_docs_store_prefix", ",", ")", "in_code_data_context", "=", "BaseDataContext", "(", "project_config", "=", "in_code_data_context_project_config", ")", "# Make sure the manually set data_context_id is propagated to all the appropriate places", "assert", "(", "manually_created_uuid", "==", "in_code_data_context", ".", "data_context_id", "==", "in_code_data_context", ".", "stores", "[", "in_code_data_context", ".", "expectations_store_name", "]", ".", "store_backend_id", ")" ]
[ 300, 0 ]
[ 344, 5 ]
python
en
['en', 'error', 'th']
False
test_suppress_store_backend_id_is_true_for_inactive_stores
()
What does this test and why? Trying to read / set the store_backend_id for inactive stores should not be attempted during DataContext initialization. This test ensures that the _suppress_store_backend_id parameter is set to True for inactive stores.
What does this test and why?
def test_suppress_store_backend_id_is_true_for_inactive_stores(): """ What does this test and why? Trying to read / set the store_backend_id for inactive stores should not be attempted during DataContext initialization. This test ensures that the _suppress_store_backend_id parameter is set to True for inactive stores. """ bucket = "leakybucket" expectations_store_prefix = "expectations_store_prefix" validations_store_prefix = "validations_store_prefix" data_docs_store_prefix = "data_docs_store_prefix" # Create a bucket in Moto's mock AWS environment conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) # Create a DataContext # Add inactive stores inactive_bucket = "inactive_leakybucket" stores = { "expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": bucket, "prefix": expectations_store_prefix, }, }, "validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": bucket, "prefix": validations_store_prefix, }, }, "evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, "inactive_expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": inactive_bucket, "prefix": expectations_store_prefix, }, }, "inactive_validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": inactive_bucket, "prefix": validations_store_prefix, }, }, "inactive_evaluation_parameter_store": { "class_name": "EvaluationParameterStore" }, } in_code_data_context_project_config = build_in_code_data_context_project_config( bucket="leakybucket", expectations_store_prefix=expectations_store_prefix, validations_store_prefix=validations_store_prefix, data_docs_store_prefix=data_docs_store_prefix, stores=stores, ) in_code_data_context = BaseDataContext( project_config=in_code_data_context_project_config ) # Check here that suppress_store_backend_id == True for inactive stores # and False for active stores assert ( in_code_data_context.stores.get( "inactive_expectations_S3_store" ).store_backend._suppress_store_backend_id is True ) assert ( in_code_data_context.stores.get( "inactive_validations_S3_store" ).store_backend._suppress_store_backend_id is True ) assert ( in_code_data_context.stores.get( "expectations_S3_store" ).store_backend._suppress_store_backend_id is False ) assert ( in_code_data_context.stores.get( "validations_S3_store" ).store_backend._suppress_store_backend_id is False ) # InMemoryStoreBackend created for evaluation_parameters_store & inactive_evaluation_parameters_store assert ( in_code_data_context.stores.get( "inactive_evaluation_parameter_store" ).store_backend._suppress_store_backend_id is False ) assert ( in_code_data_context.stores.get( "evaluation_parameter_store" ).store_backend._suppress_store_backend_id is False )
[ "def", "test_suppress_store_backend_id_is_true_for_inactive_stores", "(", ")", ":", "bucket", "=", "\"leakybucket\"", "expectations_store_prefix", "=", "\"expectations_store_prefix\"", "validations_store_prefix", "=", "\"validations_store_prefix\"", "data_docs_store_prefix", "=", "\"data_docs_store_prefix\"", "# Create a bucket in Moto's mock AWS environment", "conn", "=", "boto3", ".", "resource", "(", "\"s3\"", ",", "region_name", "=", "\"us-east-1\"", ")", "conn", ".", "create_bucket", "(", "Bucket", "=", "bucket", ")", "# Create a DataContext", "# Add inactive stores", "inactive_bucket", "=", "\"inactive_leakybucket\"", "stores", "=", "{", "\"expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "bucket", ",", "\"prefix\"", ":", "expectations_store_prefix", ",", "}", ",", "}", ",", "\"validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "bucket", ",", "\"prefix\"", ":", "validations_store_prefix", ",", "}", ",", "}", ",", "\"evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"inactive_expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "inactive_bucket", ",", "\"prefix\"", ":", "expectations_store_prefix", ",", "}", ",", "}", ",", "\"inactive_validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "inactive_bucket", ",", "\"prefix\"", ":", "validations_store_prefix", ",", "}", ",", "}", ",", "\"inactive_evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "}", "in_code_data_context_project_config", "=", "build_in_code_data_context_project_config", "(", "bucket", "=", "\"leakybucket\"", ",", "expectations_store_prefix", "=", "expectations_store_prefix", ",", "validations_store_prefix", "=", "validations_store_prefix", ",", "data_docs_store_prefix", "=", "data_docs_store_prefix", ",", "stores", "=", "stores", ",", ")", "in_code_data_context", "=", "BaseDataContext", "(", "project_config", "=", "in_code_data_context_project_config", ")", "# Check here that suppress_store_backend_id == True for inactive stores", "# and False for active stores", "assert", "(", "in_code_data_context", ".", "stores", ".", "get", "(", "\"inactive_expectations_S3_store\"", ")", ".", "store_backend", ".", "_suppress_store_backend_id", "is", "True", ")", "assert", "(", "in_code_data_context", ".", "stores", ".", "get", "(", "\"inactive_validations_S3_store\"", ")", ".", "store_backend", ".", "_suppress_store_backend_id", "is", "True", ")", "assert", "(", "in_code_data_context", ".", "stores", ".", "get", "(", "\"expectations_S3_store\"", ")", ".", "store_backend", ".", "_suppress_store_backend_id", "is", "False", ")", "assert", "(", "in_code_data_context", ".", "stores", ".", "get", "(", "\"validations_S3_store\"", ")", ".", "store_backend", ".", "_suppress_store_backend_id", "is", "False", ")", "# InMemoryStoreBackend created for evaluation_parameters_store & inactive_evaluation_parameters_store", "assert", "(", "in_code_data_context", ".", "stores", ".", "get", "(", "\"inactive_evaluation_parameter_store\"", ")", ".", "store_backend", ".", "_suppress_store_backend_id", "is", "False", ")", "assert", "(", "in_code_data_context", ".", "stores", ".", "get", "(", "\"evaluation_parameter_store\"", ")", ".", "store_backend", ".", "_suppress_store_backend_id", "is", "False", ")" ]
[ 348, 0 ]
[ 455, 5 ]
python
en
['en', 'error', 'th']
False
test_inaccessible_active_bucket_warning_messages
(caplog)
What does this test do and why? Trying to create a data context with unreachable ACTIVE stores should show a warning message once per store e.g. Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named expectations_S3_store Active stores are those named in: "expectations_store_name", "validations_store_name", "evaluation_parameter_store_name"
What does this test do and why?
def test_inaccessible_active_bucket_warning_messages(caplog): """ What does this test do and why? Trying to create a data context with unreachable ACTIVE stores should show a warning message once per store e.g. Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named expectations_S3_store Active stores are those named in: "expectations_store_name", "validations_store_name", "evaluation_parameter_store_name" """ bucket = "leakybucket" expectations_store_prefix = "expectations_store_prefix" validations_store_prefix = "validations_store_prefix" data_docs_store_prefix = "data_docs_store_prefix" # Create a bucket in Moto's mock AWS environment conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) # Create a DataContext # Add inactive stores inactive_bucket = "inactive_leakybucket" stores = { "expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": inactive_bucket, "prefix": expectations_store_prefix, }, }, "validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": inactive_bucket, "prefix": validations_store_prefix, }, }, "evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, } in_code_data_context_project_config = build_in_code_data_context_project_config( bucket="leakybucket", expectations_store_prefix=expectations_store_prefix, validations_store_prefix=validations_store_prefix, data_docs_store_prefix=data_docs_store_prefix, stores=stores, ) _ = BaseDataContext(project_config=in_code_data_context_project_config) assert ( caplog.messages.count( "Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named expectations_S3_store" ) == 1 ) assert ( caplog.messages.count( "Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named validations_S3_store" ) == 1 )
[ "def", "test_inaccessible_active_bucket_warning_messages", "(", "caplog", ")", ":", "bucket", "=", "\"leakybucket\"", "expectations_store_prefix", "=", "\"expectations_store_prefix\"", "validations_store_prefix", "=", "\"validations_store_prefix\"", "data_docs_store_prefix", "=", "\"data_docs_store_prefix\"", "# Create a bucket in Moto's mock AWS environment", "conn", "=", "boto3", ".", "resource", "(", "\"s3\"", ",", "region_name", "=", "\"us-east-1\"", ")", "conn", ".", "create_bucket", "(", "Bucket", "=", "bucket", ")", "# Create a DataContext", "# Add inactive stores", "inactive_bucket", "=", "\"inactive_leakybucket\"", "stores", "=", "{", "\"expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "inactive_bucket", ",", "\"prefix\"", ":", "expectations_store_prefix", ",", "}", ",", "}", ",", "\"validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "inactive_bucket", ",", "\"prefix\"", ":", "validations_store_prefix", ",", "}", ",", "}", ",", "\"evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "}", "in_code_data_context_project_config", "=", "build_in_code_data_context_project_config", "(", "bucket", "=", "\"leakybucket\"", ",", "expectations_store_prefix", "=", "expectations_store_prefix", ",", "validations_store_prefix", "=", "validations_store_prefix", ",", "data_docs_store_prefix", "=", "data_docs_store_prefix", ",", "stores", "=", "stores", ",", ")", "_", "=", "BaseDataContext", "(", "project_config", "=", "in_code_data_context_project_config", ")", "assert", "(", "caplog", ".", "messages", ".", "count", "(", "\"Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named expectations_S3_store\"", ")", "==", "1", ")", "assert", "(", "caplog", ".", "messages", ".", "count", "(", "\"Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named validations_S3_store\"", ")", "==", "1", ")" ]
[ 459, 0 ]
[ 519, 5 ]
python
en
['en', 'error', 'th']
False
test_inaccessible_inactive_bucket_no_warning_messages
(caplog)
What does this test do and why? Trying to create a data context with unreachable INACTIVE stores should show no warning messages Inactive stores are those NOT named in: "expectations_store_name", "validations_store_name", "evaluation_parameter_store_name"
What does this test do and why?
def test_inaccessible_inactive_bucket_no_warning_messages(caplog): """ What does this test do and why? Trying to create a data context with unreachable INACTIVE stores should show no warning messages Inactive stores are those NOT named in: "expectations_store_name", "validations_store_name", "evaluation_parameter_store_name" """ bucket = "leakybucket" expectations_store_prefix = "expectations_store_prefix" validations_store_prefix = "validations_store_prefix" data_docs_store_prefix = "data_docs_store_prefix" # Create a bucket in Moto's mock AWS environment conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) # Create a DataContext # Add inactive stores inactive_bucket = "inactive_leakybucket" stores = { "expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": bucket, "prefix": expectations_store_prefix, }, }, "validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": bucket, "prefix": validations_store_prefix, }, }, "evaluation_parameter_store": {"class_name": "EvaluationParameterStore"}, "inactive_expectations_S3_store": { "class_name": "ExpectationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": inactive_bucket, "prefix": expectations_store_prefix, }, }, "inactive_validations_S3_store": { "class_name": "ValidationsStore", "store_backend": { "class_name": "TupleS3StoreBackend", "bucket": inactive_bucket, "prefix": validations_store_prefix, }, }, "inactive_evaluation_parameter_store": { "class_name": "EvaluationParameterStore" }, } in_code_data_context_project_config = build_in_code_data_context_project_config( bucket="leakybucket", expectations_store_prefix=expectations_store_prefix, validations_store_prefix=validations_store_prefix, data_docs_store_prefix=data_docs_store_prefix, stores=stores, ) _ = BaseDataContext(project_config=in_code_data_context_project_config) assert ( caplog.messages.count( "Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named expectations_S3_store" ) == 0 ) assert ( caplog.messages.count( "Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named validations_S3_store" ) == 0 )
[ "def", "test_inaccessible_inactive_bucket_no_warning_messages", "(", "caplog", ")", ":", "bucket", "=", "\"leakybucket\"", "expectations_store_prefix", "=", "\"expectations_store_prefix\"", "validations_store_prefix", "=", "\"validations_store_prefix\"", "data_docs_store_prefix", "=", "\"data_docs_store_prefix\"", "# Create a bucket in Moto's mock AWS environment", "conn", "=", "boto3", ".", "resource", "(", "\"s3\"", ",", "region_name", "=", "\"us-east-1\"", ")", "conn", ".", "create_bucket", "(", "Bucket", "=", "bucket", ")", "# Create a DataContext", "# Add inactive stores", "inactive_bucket", "=", "\"inactive_leakybucket\"", "stores", "=", "{", "\"expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "bucket", ",", "\"prefix\"", ":", "expectations_store_prefix", ",", "}", ",", "}", ",", "\"validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "bucket", ",", "\"prefix\"", ":", "validations_store_prefix", ",", "}", ",", "}", ",", "\"evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "\"inactive_expectations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ExpectationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "inactive_bucket", ",", "\"prefix\"", ":", "expectations_store_prefix", ",", "}", ",", "}", ",", "\"inactive_validations_S3_store\"", ":", "{", "\"class_name\"", ":", "\"ValidationsStore\"", ",", "\"store_backend\"", ":", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "inactive_bucket", ",", "\"prefix\"", ":", "validations_store_prefix", ",", "}", ",", "}", ",", "\"inactive_evaluation_parameter_store\"", ":", "{", "\"class_name\"", ":", "\"EvaluationParameterStore\"", "}", ",", "}", "in_code_data_context_project_config", "=", "build_in_code_data_context_project_config", "(", "bucket", "=", "\"leakybucket\"", ",", "expectations_store_prefix", "=", "expectations_store_prefix", ",", "validations_store_prefix", "=", "validations_store_prefix", ",", "data_docs_store_prefix", "=", "data_docs_store_prefix", ",", "stores", "=", "stores", ",", ")", "_", "=", "BaseDataContext", "(", "project_config", "=", "in_code_data_context_project_config", ")", "assert", "(", "caplog", ".", "messages", ".", "count", "(", "\"Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named expectations_S3_store\"", ")", "==", "0", ")", "assert", "(", "caplog", ".", "messages", ".", "count", "(", "\"Invalid store configuration: Please check the configuration of your TupleS3StoreBackend named validations_S3_store\"", ")", "==", "0", ")" ]
[ 523, 0 ]
[ 601, 5 ]
python
en
['en', 'error', 'th']
False
NeuralNetwork.makeModel
(self,inputShape,outputShape)
overrides base function Create and return a Keras Model
overrides base function Create and return a Keras Model
def makeModel(self,inputShape,outputShape): """ overrides base function Create and return a Keras Model """ dropoutStrength = 0.1 _input = Input(shape=inputShape) #1 x = Conv2D(128, (3, 3), activation='relu',padding='same', use_bias=False)(_input) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(64, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (3, 3),strides=(2,2), activation='relu', use_bias=False,padding='same')(x) x = BatchNormalization()(x) x1 = Dropout(dropoutStrength)(x) #2 x = Conv2D(128, (5,5), activation='relu',padding='same', use_bias=False)(_input) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(64, (5,5), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (5,5), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (5,5),strides=(2,2), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x2 = Dropout(dropoutStrength)(x) #3 x = Conv2D(128, (8,8), activation='relu',padding='same', use_bias=False)(_input) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(64, (8,8), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (8,8), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (8,8),strides=(2,2), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x3 = Dropout(dropoutStrength)(x) #Merge concat = concatenate([x1,x2,x3]) concat = Conv2D(128, (3, 3), activation='relu',padding='same', use_bias=False)(concat) concat = BatchNormalization()(concat) concat = Conv2D(64, (3, 3), activation='relu',padding='same', use_bias=False)(concat) concat = BatchNormalization()(concat) concat = Conv2D(32, (3, 3), activation='relu',padding='same', use_bias=False)(concat) concat = BatchNormalization()(concat) #1 x = Conv2D(32, (3, 3), activation='relu',padding='same', use_bias=False)(concat) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(64, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(128, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x1 = Dropout(dropoutStrength)(x) #2 x = Conv2D(32, (5, 5), activation='relu',padding='same', use_bias=False)(concat) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (5, 5), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(64, (5,5), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(128, (5,5), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x2 = Dropout(dropoutStrength)(x) #3 x = Conv2D(32, (8,8), activation='relu',padding='same', use_bias=False)(concat) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(32, (8,8), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(64, (8,8), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x = Dropout(dropoutStrength)(x) x = Conv2D(128, (8,8), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = UpSampling2D((2,2))(x) x3 = Dropout(dropoutStrength)(x) #Merge concat = concatenate([x1,x2,x3]) x = Conv2D(128, (3, 3), activation='relu',padding='same', use_bias=False)(concat) x = BatchNormalization()(x) x = Conv2D(64, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = Conv2D(32, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = Conv2D(16, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = Conv2D(8, (3, 3), activation='relu',padding='same', use_bias=False)(x) x = BatchNormalization()(x) out = Conv2D(1, (3, 3), activation='relu',padding='same')(x) model = Model(_input,out) model.compile(optimizer='adam', loss='mse',metrics=["mae"]) return model
[ "def", "makeModel", "(", "self", ",", "inputShape", ",", "outputShape", ")", ":", "dropoutStrength", "=", "0.1", "_input", "=", "Input", "(", "shape", "=", "inputShape", ")", "#1", "x", "=", "Conv2D", "(", "128", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "_input", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "64", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "3", ",", "3", ")", ",", "strides", "=", "(", "2", ",", "2", ")", ",", "activation", "=", "'relu'", ",", "use_bias", "=", "False", ",", "padding", "=", "'same'", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x1", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "#2", "x", "=", "Conv2D", "(", "128", ",", "(", "5", ",", "5", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "_input", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "64", ",", "(", "5", ",", "5", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "5", ",", "5", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "5", ",", "5", ")", ",", "strides", "=", "(", "2", ",", "2", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x2", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "#3", "x", "=", "Conv2D", "(", "128", ",", "(", "8", ",", "8", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "_input", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "64", ",", "(", "8", ",", "8", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "8", ",", "8", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "MaxPooling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "8", ",", "8", ")", ",", "strides", "=", "(", "2", ",", "2", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x3", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "#Merge", "concat", "=", "concatenate", "(", "[", "x1", ",", "x2", ",", "x3", "]", ")", "concat", "=", "Conv2D", "(", "128", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "concat", ")", "concat", "=", "BatchNormalization", "(", ")", "(", "concat", ")", "concat", "=", "Conv2D", "(", "64", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "concat", ")", "concat", "=", "BatchNormalization", "(", ")", "(", "concat", ")", "concat", "=", "Conv2D", "(", "32", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "concat", ")", "concat", "=", "BatchNormalization", "(", ")", "(", "concat", ")", "#1", "x", "=", "Conv2D", "(", "32", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "concat", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "64", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "128", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x1", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "#2", "x", "=", "Conv2D", "(", "32", ",", "(", "5", ",", "5", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "concat", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "5", ",", "5", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "64", ",", "(", "5", ",", "5", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "128", ",", "(", "5", ",", "5", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x2", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "#3", "x", "=", "Conv2D", "(", "32", ",", "(", "8", ",", "8", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "concat", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "8", ",", "8", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "64", ",", "(", "8", ",", "8", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "128", ",", "(", "8", ",", "8", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "UpSampling2D", "(", "(", "2", ",", "2", ")", ")", "(", "x", ")", "x3", "=", "Dropout", "(", "dropoutStrength", ")", "(", "x", ")", "#Merge", "concat", "=", "concatenate", "(", "[", "x1", ",", "x2", ",", "x3", "]", ")", "x", "=", "Conv2D", "(", "128", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "concat", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "64", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "32", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "16", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "x", "=", "Conv2D", "(", "8", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ",", "use_bias", "=", "False", ")", "(", "x", ")", "x", "=", "BatchNormalization", "(", ")", "(", "x", ")", "out", "=", "Conv2D", "(", "1", ",", "(", "3", ",", "3", ")", ",", "activation", "=", "'relu'", ",", "padding", "=", "'same'", ")", "(", "x", ")", "model", "=", "Model", "(", "_input", ",", "out", ")", "model", ".", "compile", "(", "optimizer", "=", "'adam'", ",", "loss", "=", "'mse'", ",", "metrics", "=", "[", "\"mae\"", "]", ")", "return", "model" ]
[ 11, 4 ]
[ 169, 20 ]
python
en
['en', 'error', 'th']
False
bind_aliases
(decls)
This function binds between class and it's typedefs. :param decls: list of all declarations :rtype: None
This function binds between class and it's typedefs.
def bind_aliases(decls): """ This function binds between class and it's typedefs. :param decls: list of all declarations :rtype: None """ visited = set() typedefs = [ decl for decl in decls if isinstance(decl, declarations.typedef_t)] for decl in typedefs: type_ = declarations.remove_alias(decl.decl_type) if not isinstance(type_, declarations.declarated_t): continue cls_inst = type_.declaration if not isinstance(cls_inst, declarations.class_types): continue if id(cls_inst) not in visited: visited.add(id(cls_inst)) del cls_inst.aliases[:] cls_inst.aliases.append(decl)
[ "def", "bind_aliases", "(", "decls", ")", ":", "visited", "=", "set", "(", ")", "typedefs", "=", "[", "decl", "for", "decl", "in", "decls", "if", "isinstance", "(", "decl", ",", "declarations", ".", "typedef_t", ")", "]", "for", "decl", "in", "typedefs", ":", "type_", "=", "declarations", ".", "remove_alias", "(", "decl", ".", "decl_type", ")", "if", "not", "isinstance", "(", "type_", ",", "declarations", ".", "declarated_t", ")", ":", "continue", "cls_inst", "=", "type_", ".", "declaration", "if", "not", "isinstance", "(", "cls_inst", ",", "declarations", ".", "class_types", ")", ":", "continue", "if", "id", "(", "cls_inst", ")", "not", "in", "visited", ":", "visited", ".", "add", "(", "id", "(", "cls_inst", ")", ")", "del", "cls_inst", ".", "aliases", "[", ":", "]", "cls_inst", ".", "aliases", ".", "append", "(", "decl", ")" ]
[ 8, 0 ]
[ 31, 37 ]
python
en
['en', 'error', 'th']
False
titanic_validator
(titanic_data_context_modular_api)
What does this test do and why? Ensures that all available expectation types work as expected
What does this test do and why? Ensures that all available expectation types work as expected
def titanic_validator(titanic_data_context_modular_api): """ What does this test do and why? Ensures that all available expectation types work as expected """ df = ge.read_csv(file_relative_path(__file__, "../test_sets/Titanic.csv")) return get_pandas_runtime_validator(titanic_data_context_modular_api, df)
[ "def", "titanic_validator", "(", "titanic_data_context_modular_api", ")", ":", "df", "=", "ge", ".", "read_csv", "(", "file_relative_path", "(", "__file__", ",", "\"../test_sets/Titanic.csv\"", ")", ")", "return", "get_pandas_runtime_validator", "(", "titanic_data_context_modular_api", ",", "df", ")" ]
[ 177, 0 ]
[ 184, 77 ]
python
en
['en', 'error', 'th']
False
taxi_validator_pandas
(titanic_data_context_modular_api)
What does this test do and why? Ensures that all available expectation types work as expected
What does this test do and why? Ensures that all available expectation types work as expected
def taxi_validator_pandas(titanic_data_context_modular_api): """ What does this test do and why? Ensures that all available expectation types work as expected """ df = ge.read_csv( file_relative_path( __file__, "../test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_2019-01.csv", ), parse_dates=["pickup_datetime", "dropoff_datetime"], ) return get_pandas_runtime_validator(titanic_data_context_modular_api, df)
[ "def", "taxi_validator_pandas", "(", "titanic_data_context_modular_api", ")", ":", "df", "=", "ge", ".", "read_csv", "(", "file_relative_path", "(", "__file__", ",", "\"../test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_2019-01.csv\"", ",", ")", ",", "parse_dates", "=", "[", "\"pickup_datetime\"", ",", "\"dropoff_datetime\"", "]", ",", ")", "return", "get_pandas_runtime_validator", "(", "titanic_data_context_modular_api", ",", "df", ")" ]
[ 188, 0 ]
[ 202, 77 ]
python
en
['en', 'error', 'th']
False
taxi_validator_spark
(spark_session, titanic_data_context_modular_api)
What does this test do and why? Ensures that all available expectation types work as expected
What does this test do and why? Ensures that all available expectation types work as expected
def taxi_validator_spark(spark_session, titanic_data_context_modular_api): """ What does this test do and why? Ensures that all available expectation types work as expected """ df = ge.read_csv( file_relative_path( __file__, "../test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_2019-01.csv", ), parse_dates=["pickup_datetime", "dropoff_datetime"], ) return get_spark_runtime_validator(titanic_data_context_modular_api, df)
[ "def", "taxi_validator_spark", "(", "spark_session", ",", "titanic_data_context_modular_api", ")", ":", "df", "=", "ge", ".", "read_csv", "(", "file_relative_path", "(", "__file__", ",", "\"../test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_2019-01.csv\"", ",", ")", ",", "parse_dates", "=", "[", "\"pickup_datetime\"", ",", "\"dropoff_datetime\"", "]", ",", ")", "return", "get_spark_runtime_validator", "(", "titanic_data_context_modular_api", ",", "df", ")" ]
[ 206, 0 ]
[ 218, 76 ]
python
en
['en', 'error', 'th']
False
taxi_validator_sqlalchemy
(sa, titanic_data_context_modular_api)
What does this test do and why? Ensures that all available expectation types work as expected
What does this test do and why? Ensures that all available expectation types work as expected
def taxi_validator_sqlalchemy(sa, titanic_data_context_modular_api): """ What does this test do and why? Ensures that all available expectation types work as expected """ df = ge.read_csv( file_relative_path( __file__, "../test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_2019-01.csv", ), parse_dates=["pickup_datetime", "dropoff_datetime"], ) return get_sqlalchemy_runtime_validator_postgresql(df)
[ "def", "taxi_validator_sqlalchemy", "(", "sa", ",", "titanic_data_context_modular_api", ")", ":", "df", "=", "ge", ".", "read_csv", "(", "file_relative_path", "(", "__file__", ",", "\"../test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_2019-01.csv\"", ",", ")", ",", "parse_dates", "=", "[", "\"pickup_datetime\"", ",", "\"dropoff_datetime\"", "]", ",", ")", "return", "get_sqlalchemy_runtime_validator_postgresql", "(", "df", ")" ]
[ 222, 0 ]
[ 234, 58 ]
python
en
['en', 'error', 'th']
False
cardinality_validator
(titanic_data_context_modular_api)
What does this test do and why? Ensures that all available expectation types work as expected
What does this test do and why? Ensures that all available expectation types work as expected
def cardinality_validator(titanic_data_context_modular_api): """ What does this test do and why? Ensures that all available expectation types work as expected """ df = pd.DataFrame( { # TODO: Uncomment assertions that use col_none when proportion_of_unique_values bug is fixed for columns # that are all NULL/None # "col_none": [None for i in range(0, 1000)], "col_one": [0 for i in range(0, 1000)], "col_two": [i % 2 for i in range(0, 1000)], "col_very_few": [i % 10 for i in range(0, 1000)], "col_few": [i % 50 for i in range(0, 1000)], "col_many": [i % 100 for i in range(0, 1000)], "col_very_many": [i % 500 for i in range(0, 1000)], "col_unique": [i for i in range(0, 1000)], } ) return get_pandas_runtime_validator(titanic_data_context_modular_api, df)
[ "def", "cardinality_validator", "(", "titanic_data_context_modular_api", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "{", "# TODO: Uncomment assertions that use col_none when proportion_of_unique_values bug is fixed for columns", "# that are all NULL/None", "# \"col_none\": [None for i in range(0, 1000)],", "\"col_one\"", ":", "[", "0", "for", "i", "in", "range", "(", "0", ",", "1000", ")", "]", ",", "\"col_two\"", ":", "[", "i", "%", "2", "for", "i", "in", "range", "(", "0", ",", "1000", ")", "]", ",", "\"col_very_few\"", ":", "[", "i", "%", "10", "for", "i", "in", "range", "(", "0", ",", "1000", ")", "]", ",", "\"col_few\"", ":", "[", "i", "%", "50", "for", "i", "in", "range", "(", "0", ",", "1000", ")", "]", ",", "\"col_many\"", ":", "[", "i", "%", "100", "for", "i", "in", "range", "(", "0", ",", "1000", ")", "]", ",", "\"col_very_many\"", ":", "[", "i", "%", "500", "for", "i", "in", "range", "(", "0", ",", "1000", ")", "]", ",", "\"col_unique\"", ":", "[", "i", "for", "i", "in", "range", "(", "0", ",", "1000", ")", "]", ",", "}", ")", "return", "get_pandas_runtime_validator", "(", "titanic_data_context_modular_api", ",", "df", ")" ]
[ 252, 0 ]
[ 271, 77 ]
python
en
['en', 'error', 'th']
False
test_profiler_init_no_config
( cardinality_validator, )
What does this test do and why? Confirms that profiler can initialize with no config.
What does this test do and why? Confirms that profiler can initialize with no config.
def test_profiler_init_no_config( cardinality_validator, ): """ What does this test do and why? Confirms that profiler can initialize with no config. """ profiler = UserConfigurableProfiler(cardinality_validator) assert profiler.primary_or_compound_key == [] assert profiler.ignored_columns == [] assert profiler.value_set_threshold == "MANY" assert not profiler.table_expectations_only assert profiler.excluded_expectations == []
[ "def", "test_profiler_init_no_config", "(", "cardinality_validator", ",", ")", ":", "profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ")", "assert", "profiler", ".", "primary_or_compound_key", "==", "[", "]", "assert", "profiler", ".", "ignored_columns", "==", "[", "]", "assert", "profiler", ".", "value_set_threshold", "==", "\"MANY\"", "assert", "not", "profiler", ".", "table_expectations_only", "assert", "profiler", ".", "excluded_expectations", "==", "[", "]" ]
[ 304, 0 ]
[ 316, 47 ]
python
en
['en', 'error', 'th']
False
test_profiler_init_full_config_no_semantic_types
(cardinality_validator)
What does this test do and why? Confirms that profiler initializes properly with a full config, without a semantic_types dict
What does this test do and why? Confirms that profiler initializes properly with a full config, without a semantic_types dict
def test_profiler_init_full_config_no_semantic_types(cardinality_validator): """ What does this test do and why? Confirms that profiler initializes properly with a full config, without a semantic_types dict """ profiler = UserConfigurableProfiler( cardinality_validator, primary_or_compound_key=["col_unique"], ignored_columns=["col_one"], value_set_threshold="UNIQUE", table_expectations_only=False, excluded_expectations=["expect_column_values_to_not_be_null"], ) assert profiler.primary_or_compound_key == ["col_unique"] assert profiler.ignored_columns == [ "col_one", ] assert profiler.value_set_threshold == "UNIQUE" assert not profiler.table_expectations_only assert profiler.excluded_expectations == ["expect_column_values_to_not_be_null"] assert "col_one" not in profiler.column_info
[ "def", "test_profiler_init_full_config_no_semantic_types", "(", "cardinality_validator", ")", ":", "profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "primary_or_compound_key", "=", "[", "\"col_unique\"", "]", ",", "ignored_columns", "=", "[", "\"col_one\"", "]", ",", "value_set_threshold", "=", "\"UNIQUE\"", ",", "table_expectations_only", "=", "False", ",", "excluded_expectations", "=", "[", "\"expect_column_values_to_not_be_null\"", "]", ",", ")", "assert", "profiler", ".", "primary_or_compound_key", "==", "[", "\"col_unique\"", "]", "assert", "profiler", ".", "ignored_columns", "==", "[", "\"col_one\"", ",", "]", "assert", "profiler", ".", "value_set_threshold", "==", "\"UNIQUE\"", "assert", "not", "profiler", ".", "table_expectations_only", "assert", "profiler", ".", "excluded_expectations", "==", "[", "\"expect_column_values_to_not_be_null\"", "]", "assert", "\"col_one\"", "not", "in", "profiler", ".", "column_info" ]
[ 319, 0 ]
[ 341, 48 ]
python
en
['en', 'error', 'th']
False
test_init_with_semantic_types
(cardinality_validator)
What does this test do and why? Confirms that profiler initializes properly with a full config and a semantic_types dict
What does this test do and why? Confirms that profiler initializes properly with a full config and a semantic_types dict
def test_init_with_semantic_types(cardinality_validator): """ What does this test do and why? Confirms that profiler initializes properly with a full config and a semantic_types dict """ semantic_types = { "numeric": ["col_few", "col_many", "col_very_many"], "value_set": ["col_two", "col_very_few"], } profiler = UserConfigurableProfiler( cardinality_validator, semantic_types_dict=semantic_types, primary_or_compound_key=["col_unique"], ignored_columns=["col_one"], value_set_threshold="unique", table_expectations_only=False, excluded_expectations=["expect_column_values_to_not_be_null"], ) assert "col_one" not in profiler.column_info # assert profiler.column_info.get("col_none") == { # "cardinality": "NONE", # "type": "NUMERIC", # "semantic_types": [], # } assert profiler.column_info.get("col_two") == { "cardinality": "TWO", "type": "INT", "semantic_types": ["VALUE_SET"], } assert profiler.column_info.get("col_very_few") == { "cardinality": "VERY_FEW", "type": "INT", "semantic_types": ["VALUE_SET"], } assert profiler.column_info.get("col_few") == { "cardinality": "FEW", "type": "INT", "semantic_types": ["NUMERIC"], } assert profiler.column_info.get("col_many") == { "cardinality": "MANY", "type": "INT", "semantic_types": ["NUMERIC"], } assert profiler.column_info.get("col_very_many") == { "cardinality": "VERY_MANY", "type": "INT", "semantic_types": ["NUMERIC"], } assert profiler.column_info.get("col_unique") == { "cardinality": "UNIQUE", "type": "INT", "semantic_types": [], }
[ "def", "test_init_with_semantic_types", "(", "cardinality_validator", ")", ":", "semantic_types", "=", "{", "\"numeric\"", ":", "[", "\"col_few\"", ",", "\"col_many\"", ",", "\"col_very_many\"", "]", ",", "\"value_set\"", ":", "[", "\"col_two\"", ",", "\"col_very_few\"", "]", ",", "}", "profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "semantic_types_dict", "=", "semantic_types", ",", "primary_or_compound_key", "=", "[", "\"col_unique\"", "]", ",", "ignored_columns", "=", "[", "\"col_one\"", "]", ",", "value_set_threshold", "=", "\"unique\"", ",", "table_expectations_only", "=", "False", ",", "excluded_expectations", "=", "[", "\"expect_column_values_to_not_be_null\"", "]", ",", ")", "assert", "\"col_one\"", "not", "in", "profiler", ".", "column_info", "# assert profiler.column_info.get(\"col_none\") == {", "# \"cardinality\": \"NONE\",", "# \"type\": \"NUMERIC\",", "# \"semantic_types\": [],", "# }", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"col_two\"", ")", "==", "{", "\"cardinality\"", ":", "\"TWO\"", ",", "\"type\"", ":", "\"INT\"", ",", "\"semantic_types\"", ":", "[", "\"VALUE_SET\"", "]", ",", "}", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"col_very_few\"", ")", "==", "{", "\"cardinality\"", ":", "\"VERY_FEW\"", ",", "\"type\"", ":", "\"INT\"", ",", "\"semantic_types\"", ":", "[", "\"VALUE_SET\"", "]", ",", "}", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"col_few\"", ")", "==", "{", "\"cardinality\"", ":", "\"FEW\"", ",", "\"type\"", ":", "\"INT\"", ",", "\"semantic_types\"", ":", "[", "\"NUMERIC\"", "]", ",", "}", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"col_many\"", ")", "==", "{", "\"cardinality\"", ":", "\"MANY\"", ",", "\"type\"", ":", "\"INT\"", ",", "\"semantic_types\"", ":", "[", "\"NUMERIC\"", "]", ",", "}", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"col_very_many\"", ")", "==", "{", "\"cardinality\"", ":", "\"VERY_MANY\"", ",", "\"type\"", ":", "\"INT\"", ",", "\"semantic_types\"", ":", "[", "\"NUMERIC\"", "]", ",", "}", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"col_unique\"", ")", "==", "{", "\"cardinality\"", ":", "\"UNIQUE\"", ",", "\"type\"", ":", "\"INT\"", ",", "\"semantic_types\"", ":", "[", "]", ",", "}" ]
[ 344, 0 ]
[ 400, 5 ]
python
en
['en', 'error', 'th']
False
test__validate_config
(cardinality_validator)
What does this test do and why? Tests the validate config function on the profiler
What does this test do and why? Tests the validate config function on the profiler
def test__validate_config(cardinality_validator): """ What does this test do and why? Tests the validate config function on the profiler """ with pytest.raises(AssertionError) as e: UserConfigurableProfiler(cardinality_validator, ignored_columns="col_name") assert e.typename == "AssertionError" with pytest.raises(AssertionError) as e: UserConfigurableProfiler(cardinality_validator, table_expectations_only="True") assert e.typename == "AssertionError"
[ "def", "test__validate_config", "(", "cardinality_validator", ")", ":", "with", "pytest", ".", "raises", "(", "AssertionError", ")", "as", "e", ":", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "ignored_columns", "=", "\"col_name\"", ")", "assert", "e", ".", "typename", "==", "\"AssertionError\"", "with", "pytest", ".", "raises", "(", "AssertionError", ")", "as", "e", ":", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "table_expectations_only", "=", "\"True\"", ")", "assert", "e", ".", "typename", "==", "\"AssertionError\"" ]
[ 403, 0 ]
[ 415, 41 ]
python
en
['en', 'error', 'th']
False
test__validate_semantic_types_dict
(cardinality_validator)
What does this test do and why? Tests that _validate_semantic_types_dict function errors when not formatted correctly
What does this test do and why? Tests that _validate_semantic_types_dict function errors when not formatted correctly
def test__validate_semantic_types_dict(cardinality_validator): """ What does this test do and why? Tests that _validate_semantic_types_dict function errors when not formatted correctly """ bad_semantic_types_dict_type = {"value_set": "col_few"} with pytest.raises(AssertionError) as e: UserConfigurableProfiler( cardinality_validator, semantic_types_dict=bad_semantic_types_dict_type ) assert e.value.args[0] == ( "Entries in semantic type dict must be lists of column names e.g. " "{'semantic_types': {'numeric': ['number_of_transactions']}}" ) bad_semantic_types_incorrect_type = {"incorrect_type": ["col_few"]} with pytest.raises(ValueError) as e: UserConfigurableProfiler( cardinality_validator, semantic_types_dict=bad_semantic_types_incorrect_type ) assert e.value.args[0] == ( f"incorrect_type is not a recognized semantic_type. Please only include one of " f"{profiler_semantic_types}" ) # Error if column is specified for both semantic_types and ignored working_semantic_type = {"numeric": ["col_few"]} with pytest.raises(ValueError) as e: UserConfigurableProfiler( cardinality_validator, semantic_types_dict=working_semantic_type, ignored_columns=["col_few"], ) assert e.value.args[0] == ( f"Column col_few is specified in both the semantic_types_dict and the list of ignored columns. Please remove " f"one of these entries to proceed." )
[ "def", "test__validate_semantic_types_dict", "(", "cardinality_validator", ")", ":", "bad_semantic_types_dict_type", "=", "{", "\"value_set\"", ":", "\"col_few\"", "}", "with", "pytest", ".", "raises", "(", "AssertionError", ")", "as", "e", ":", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "semantic_types_dict", "=", "bad_semantic_types_dict_type", ")", "assert", "e", ".", "value", ".", "args", "[", "0", "]", "==", "(", "\"Entries in semantic type dict must be lists of column names e.g. \"", "\"{'semantic_types': {'numeric': ['number_of_transactions']}}\"", ")", "bad_semantic_types_incorrect_type", "=", "{", "\"incorrect_type\"", ":", "[", "\"col_few\"", "]", "}", "with", "pytest", ".", "raises", "(", "ValueError", ")", "as", "e", ":", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "semantic_types_dict", "=", "bad_semantic_types_incorrect_type", ")", "assert", "e", ".", "value", ".", "args", "[", "0", "]", "==", "(", "f\"incorrect_type is not a recognized semantic_type. Please only include one of \"", "f\"{profiler_semantic_types}\"", ")", "# Error if column is specified for both semantic_types and ignored", "working_semantic_type", "=", "{", "\"numeric\"", ":", "[", "\"col_few\"", "]", "}", "with", "pytest", ".", "raises", "(", "ValueError", ")", "as", "e", ":", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "semantic_types_dict", "=", "working_semantic_type", ",", "ignored_columns", "=", "[", "\"col_few\"", "]", ",", ")", "assert", "e", ".", "value", ".", "args", "[", "0", "]", "==", "(", "f\"Column col_few is specified in both the semantic_types_dict and the list of ignored columns. Please remove \"", "f\"one of these entries to proceed.\"", ")" ]
[ 418, 0 ]
[ 455, 5 ]
python
en
['en', 'error', 'th']
False
test_build_suite_no_config
(titanic_validator, possible_expectations_set)
What does this test do and why? Tests that the build_suite function works as expected with no config
What does this test do and why? Tests that the build_suite function works as expected with no config
def test_build_suite_no_config(titanic_validator, possible_expectations_set): """ What does this test do and why? Tests that the build_suite function works as expected with no config """ profiler = UserConfigurableProfiler(titanic_validator) suite = profiler.build_suite() expectations_from_suite = {i.expectation_type for i in suite.expectations} assert expectations_from_suite.issubset(possible_expectations_set) assert len(suite.expectations) == 48
[ "def", "test_build_suite_no_config", "(", "titanic_validator", ",", "possible_expectations_set", ")", ":", "profiler", "=", "UserConfigurableProfiler", "(", "titanic_validator", ")", "suite", "=", "profiler", ".", "build_suite", "(", ")", "expectations_from_suite", "=", "{", "i", ".", "expectation_type", "for", "i", "in", "suite", ".", "expectations", "}", "assert", "expectations_from_suite", ".", "issubset", "(", "possible_expectations_set", ")", "assert", "len", "(", "suite", ".", "expectations", ")", "==", "48" ]
[ 458, 0 ]
[ 468, 40 ]
python
en
['en', 'error', 'th']
False
test_build_suite_with_config_and_no_semantic_types_dict
( titanic_validator, possible_expectations_set )
What does this test do and why? Tests that the build_suite function works as expected with a config and without a semantic_types dict
What does this test do and why? Tests that the build_suite function works as expected with a config and without a semantic_types dict
def test_build_suite_with_config_and_no_semantic_types_dict( titanic_validator, possible_expectations_set ): """ What does this test do and why? Tests that the build_suite function works as expected with a config and without a semantic_types dict """ profiler = UserConfigurableProfiler( titanic_validator, ignored_columns=["Survived", "Unnamed: 0"], excluded_expectations=["expect_column_mean_to_be_between"], primary_or_compound_key=["Name"], table_expectations_only=False, value_set_threshold="very_few", ) suite = profiler.build_suite() ( columns_with_expectations, expectations_from_suite, ) = get_set_of_columns_and_expectations_from_suite(suite) columns_expected_in_suite = {"Name", "PClass", "Age", "Sex", "SexCode"} assert columns_with_expectations == columns_expected_in_suite assert expectations_from_suite.issubset(possible_expectations_set) assert "expect_column_mean_to_be_between" not in expectations_from_suite assert len(suite.expectations) == 29
[ "def", "test_build_suite_with_config_and_no_semantic_types_dict", "(", "titanic_validator", ",", "possible_expectations_set", ")", ":", "profiler", "=", "UserConfigurableProfiler", "(", "titanic_validator", ",", "ignored_columns", "=", "[", "\"Survived\"", ",", "\"Unnamed: 0\"", "]", ",", "excluded_expectations", "=", "[", "\"expect_column_mean_to_be_between\"", "]", ",", "primary_or_compound_key", "=", "[", "\"Name\"", "]", ",", "table_expectations_only", "=", "False", ",", "value_set_threshold", "=", "\"very_few\"", ",", ")", "suite", "=", "profiler", ".", "build_suite", "(", ")", "(", "columns_with_expectations", ",", "expectations_from_suite", ",", ")", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite", ")", "columns_expected_in_suite", "=", "{", "\"Name\"", ",", "\"PClass\"", ",", "\"Age\"", ",", "\"Sex\"", ",", "\"SexCode\"", "}", "assert", "columns_with_expectations", "==", "columns_expected_in_suite", "assert", "expectations_from_suite", ".", "issubset", "(", "possible_expectations_set", ")", "assert", "\"expect_column_mean_to_be_between\"", "not", "in", "expectations_from_suite", "assert", "len", "(", "suite", ".", "expectations", ")", "==", "29" ]
[ 515, 0 ]
[ 540, 40 ]
python
en
['en', 'error', 'th']
False
test_build_suite_with_semantic_types_dict
( cardinality_validator, possible_expectations_set, )
What does this test do and why? Tests that the build_suite function works as expected with a semantic_types dict
What does this test do and why? Tests that the build_suite function works as expected with a semantic_types dict
def test_build_suite_with_semantic_types_dict( cardinality_validator, possible_expectations_set, ): """ What does this test do and why? Tests that the build_suite function works as expected with a semantic_types dict """ semantic_types = { "numeric": ["col_few", "col_many", "col_very_many"], "value_set": ["col_two", "col_very_few"], } profiler = UserConfigurableProfiler( cardinality_validator, semantic_types_dict=semantic_types, primary_or_compound_key=["col_unique"], ignored_columns=["col_one"], value_set_threshold="unique", table_expectations_only=False, excluded_expectations=["expect_column_values_to_not_be_null"], ) suite = profiler.build_suite() ( columns_with_expectations, expectations_from_suite, ) = get_set_of_columns_and_expectations_from_suite(suite) assert "column_one" not in columns_with_expectations assert "expect_column_values_to_not_be_null" not in expectations_from_suite assert expectations_from_suite.issubset(possible_expectations_set) assert len(suite.expectations) == 32 value_set_expectations = [ i for i in suite.expectations if i.expectation_type == "expect_column_values_to_be_in_set" ] value_set_columns = {i.kwargs.get("column") for i in value_set_expectations} assert len(value_set_columns) == 2 assert value_set_columns == {"col_two", "col_very_few"}
[ "def", "test_build_suite_with_semantic_types_dict", "(", "cardinality_validator", ",", "possible_expectations_set", ",", ")", ":", "semantic_types", "=", "{", "\"numeric\"", ":", "[", "\"col_few\"", ",", "\"col_many\"", ",", "\"col_very_many\"", "]", ",", "\"value_set\"", ":", "[", "\"col_two\"", ",", "\"col_very_few\"", "]", ",", "}", "profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "semantic_types_dict", "=", "semantic_types", ",", "primary_or_compound_key", "=", "[", "\"col_unique\"", "]", ",", "ignored_columns", "=", "[", "\"col_one\"", "]", ",", "value_set_threshold", "=", "\"unique\"", ",", "table_expectations_only", "=", "False", ",", "excluded_expectations", "=", "[", "\"expect_column_values_to_not_be_null\"", "]", ",", ")", "suite", "=", "profiler", ".", "build_suite", "(", ")", "(", "columns_with_expectations", ",", "expectations_from_suite", ",", ")", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite", ")", "assert", "\"column_one\"", "not", "in", "columns_with_expectations", "assert", "\"expect_column_values_to_not_be_null\"", "not", "in", "expectations_from_suite", "assert", "expectations_from_suite", ".", "issubset", "(", "possible_expectations_set", ")", "assert", "len", "(", "suite", ".", "expectations", ")", "==", "32", "value_set_expectations", "=", "[", "i", "for", "i", "in", "suite", ".", "expectations", "if", "i", ".", "expectation_type", "==", "\"expect_column_values_to_be_in_set\"", "]", "value_set_columns", "=", "{", "i", ".", "kwargs", ".", "get", "(", "\"column\"", ")", "for", "i", "in", "value_set_expectations", "}", "assert", "len", "(", "value_set_columns", ")", "==", "2", "assert", "value_set_columns", "==", "{", "\"col_two\"", ",", "\"col_very_few\"", "}" ]
[ 543, 0 ]
[ 585, 59 ]
python
en
['en', 'error', 'th']
False
test_build_suite_when_suite_already_exists
(cardinality_validator)
What does this test do and why? Confirms that creating a new suite on an existing profiler wipes the previous suite
What does this test do and why? Confirms that creating a new suite on an existing profiler wipes the previous suite
def test_build_suite_when_suite_already_exists(cardinality_validator): """ What does this test do and why? Confirms that creating a new suite on an existing profiler wipes the previous suite """ profiler = UserConfigurableProfiler( cardinality_validator, table_expectations_only=True, excluded_expectations=["expect_table_row_count_to_be_between"], ) suite = profiler.build_suite() _, expectations = get_set_of_columns_and_expectations_from_suite(suite) assert len(suite.expectations) == 1 assert "expect_table_columns_to_match_ordered_list" in expectations profiler.excluded_expectations = ["expect_table_columns_to_match_ordered_list"] suite = profiler.build_suite() _, expectations = get_set_of_columns_and_expectations_from_suite(suite) assert len(suite.expectations) == 1 assert "expect_table_row_count_to_be_between" in expectations
[ "def", "test_build_suite_when_suite_already_exists", "(", "cardinality_validator", ")", ":", "profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "table_expectations_only", "=", "True", ",", "excluded_expectations", "=", "[", "\"expect_table_row_count_to_be_between\"", "]", ",", ")", "suite", "=", "profiler", ".", "build_suite", "(", ")", "_", ",", "expectations", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite", ")", "assert", "len", "(", "suite", ".", "expectations", ")", "==", "1", "assert", "\"expect_table_columns_to_match_ordered_list\"", "in", "expectations", "profiler", ".", "excluded_expectations", "=", "[", "\"expect_table_columns_to_match_ordered_list\"", "]", "suite", "=", "profiler", ".", "build_suite", "(", ")", "_", ",", "expectations", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite", ")", "assert", "len", "(", "suite", ".", "expectations", ")", "==", "1", "assert", "\"expect_table_row_count_to_be_between\"", "in", "expectations" ]
[ 588, 0 ]
[ 608, 65 ]
python
en
['en', 'error', 'th']
False
test_primary_or_compound_key_not_found_in_columns
(cardinality_validator)
What does this test do and why? Confirms that an error is raised if a primary_or_compound key is specified with a column not found in the validator
What does this test do and why? Confirms that an error is raised if a primary_or_compound key is specified with a column not found in the validator
def test_primary_or_compound_key_not_found_in_columns(cardinality_validator): """ What does this test do and why? Confirms that an error is raised if a primary_or_compound key is specified with a column not found in the validator """ # regular case, should pass working_profiler = UserConfigurableProfiler( cardinality_validator, primary_or_compound_key=["col_unique"] ) assert working_profiler.primary_or_compound_key == ["col_unique"] # key includes a non-existent column, should fail with pytest.raises(ValueError) as e: bad_key_profiler = UserConfigurableProfiler( cardinality_validator, primary_or_compound_key=["col_unique", "col_that_does_not_exist"], ) assert e.value.args[0] == ( f"Column col_that_does_not_exist not found. Please ensure that this column is in the Validator if " f"you would like to use it as a primary_or_compound_key." ) # key includes a column that exists, but is in ignored_columns, should pass ignored_column_profiler = UserConfigurableProfiler( cardinality_validator, primary_or_compound_key=["col_unique", "col_one"], ignored_columns=["col_none", "col_one"], ) assert ignored_column_profiler.primary_or_compound_key == ["col_unique", "col_one"]
[ "def", "test_primary_or_compound_key_not_found_in_columns", "(", "cardinality_validator", ")", ":", "# regular case, should pass", "working_profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "primary_or_compound_key", "=", "[", "\"col_unique\"", "]", ")", "assert", "working_profiler", ".", "primary_or_compound_key", "==", "[", "\"col_unique\"", "]", "# key includes a non-existent column, should fail", "with", "pytest", ".", "raises", "(", "ValueError", ")", "as", "e", ":", "bad_key_profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "primary_or_compound_key", "=", "[", "\"col_unique\"", ",", "\"col_that_does_not_exist\"", "]", ",", ")", "assert", "e", ".", "value", ".", "args", "[", "0", "]", "==", "(", "f\"Column col_that_does_not_exist not found. Please ensure that this column is in the Validator if \"", "f\"you would like to use it as a primary_or_compound_key.\"", ")", "# key includes a column that exists, but is in ignored_columns, should pass", "ignored_column_profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "primary_or_compound_key", "=", "[", "\"col_unique\"", ",", "\"col_one\"", "]", ",", "ignored_columns", "=", "[", "\"col_none\"", ",", "\"col_one\"", "]", ",", ")", "assert", "ignored_column_profiler", ".", "primary_or_compound_key", "==", "[", "\"col_unique\"", ",", "\"col_one\"", "]" ]
[ 611, 0 ]
[ 639, 87 ]
python
en
['en', 'error', 'th']
False
test_config_with_not_null_only
( titanic_data_context_modular_api, nulls_validator, possible_expectations_set )
What does this test do and why? Confirms that the not_null_only key in config works as expected.
What does this test do and why? Confirms that the not_null_only key in config works as expected.
def test_config_with_not_null_only( titanic_data_context_modular_api, nulls_validator, possible_expectations_set ): """ What does this test do and why? Confirms that the not_null_only key in config works as expected. """ excluded_expectations = [i for i in possible_expectations_set if "null" not in i] validator = nulls_validator profiler_without_not_null_only = UserConfigurableProfiler( validator, excluded_expectations, not_null_only=False ) suite_without_not_null_only = profiler_without_not_null_only.build_suite() _, expectations = get_set_of_columns_and_expectations_from_suite( suite_without_not_null_only ) assert expectations == { "expect_column_values_to_be_null", "expect_column_values_to_not_be_null", } profiler_with_not_null_only = UserConfigurableProfiler( validator, excluded_expectations, not_null_only=True ) not_null_only_suite = profiler_with_not_null_only.build_suite() _, expectations = get_set_of_columns_and_expectations_from_suite( not_null_only_suite ) assert expectations == {"expect_column_values_to_not_be_null"} no_config_profiler = UserConfigurableProfiler(validator) no_config_suite = no_config_profiler.build_suite() _, expectations = get_set_of_columns_and_expectations_from_suite(no_config_suite) assert "expect_column_values_to_be_null" in expectations
[ "def", "test_config_with_not_null_only", "(", "titanic_data_context_modular_api", ",", "nulls_validator", ",", "possible_expectations_set", ")", ":", "excluded_expectations", "=", "[", "i", "for", "i", "in", "possible_expectations_set", "if", "\"null\"", "not", "in", "i", "]", "validator", "=", "nulls_validator", "profiler_without_not_null_only", "=", "UserConfigurableProfiler", "(", "validator", ",", "excluded_expectations", ",", "not_null_only", "=", "False", ")", "suite_without_not_null_only", "=", "profiler_without_not_null_only", ".", "build_suite", "(", ")", "_", ",", "expectations", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite_without_not_null_only", ")", "assert", "expectations", "==", "{", "\"expect_column_values_to_be_null\"", ",", "\"expect_column_values_to_not_be_null\"", ",", "}", "profiler_with_not_null_only", "=", "UserConfigurableProfiler", "(", "validator", ",", "excluded_expectations", ",", "not_null_only", "=", "True", ")", "not_null_only_suite", "=", "profiler_with_not_null_only", ".", "build_suite", "(", ")", "_", ",", "expectations", "=", "get_set_of_columns_and_expectations_from_suite", "(", "not_null_only_suite", ")", "assert", "expectations", "==", "{", "\"expect_column_values_to_not_be_null\"", "}", "no_config_profiler", "=", "UserConfigurableProfiler", "(", "validator", ")", "no_config_suite", "=", "no_config_profiler", ".", "build_suite", "(", ")", "_", ",", "expectations", "=", "get_set_of_columns_and_expectations_from_suite", "(", "no_config_suite", ")", "assert", "\"expect_column_values_to_be_null\"", "in", "expectations" ]
[ 642, 0 ]
[ 678, 60 ]
python
en
['en', 'error', 'th']
False
test_profiled_dataset_passes_own_validation
( cardinality_validator, titanic_data_context )
What does this test do and why? Confirms that a suite created on a validator with no config will pass when validated against itself
What does this test do and why? Confirms that a suite created on a validator with no config will pass when validated against itself
def test_profiled_dataset_passes_own_validation( cardinality_validator, titanic_data_context ): """ What does this test do and why? Confirms that a suite created on a validator with no config will pass when validated against itself """ context = titanic_data_context profiler = UserConfigurableProfiler( cardinality_validator, ignored_columns=["col_none"] ) suite = profiler.build_suite() context.save_expectation_suite(suite) results = context.run_validation_operator( "action_list_operator", assets_to_validate=[cardinality_validator] ) assert results["success"]
[ "def", "test_profiled_dataset_passes_own_validation", "(", "cardinality_validator", ",", "titanic_data_context", ")", ":", "context", "=", "titanic_data_context", "profiler", "=", "UserConfigurableProfiler", "(", "cardinality_validator", ",", "ignored_columns", "=", "[", "\"col_none\"", "]", ")", "suite", "=", "profiler", ".", "build_suite", "(", ")", "context", ".", "save_expectation_suite", "(", "suite", ")", "results", "=", "context", ".", "run_validation_operator", "(", "\"action_list_operator\"", ",", "assets_to_validate", "=", "[", "cardinality_validator", "]", ")", "assert", "results", "[", "\"success\"", "]" ]
[ 697, 0 ]
[ 715, 29 ]
python
en
['en', 'error', 'th']
False
test_profiler_all_expectation_types_pandas
( titanic_data_context_modular_api, taxi_validator_pandas, possible_expectations_set, taxi_data_semantic_types, taxi_data_ignored_columns, )
What does this test do and why? Ensures that all available expectation types work as expected for pandas
What does this test do and why? Ensures that all available expectation types work as expected for pandas
def test_profiler_all_expectation_types_pandas( titanic_data_context_modular_api, taxi_validator_pandas, possible_expectations_set, taxi_data_semantic_types, taxi_data_ignored_columns, ): """ What does this test do and why? Ensures that all available expectation types work as expected for pandas """ context = titanic_data_context_modular_api profiler = UserConfigurableProfiler( taxi_validator_pandas, semantic_types_dict=taxi_data_semantic_types, ignored_columns=taxi_data_ignored_columns, # TODO: Add primary_or_compound_key test # primary_or_compound_key=[ # "vendor_id", # "pickup_datetime", # "dropoff_datetime", # "trip_distance", # "pickup_location_id", # "dropoff_location_id", # ], ) assert profiler.column_info.get("rate_code_id") suite = profiler.build_suite() assert len(suite.expectations) == 45 ( columns_with_expectations, expectations_from_suite, ) = get_set_of_columns_and_expectations_from_suite(suite) unexpected_expectations = { "expect_column_values_to_be_unique", "expect_column_values_to_be_null", "expect_compound_columns_to_be_unique", } assert expectations_from_suite == { i for i in possible_expectations_set if i not in unexpected_expectations } ignored_included_columns_overlap = [ i for i in columns_with_expectations if i in taxi_data_ignored_columns ] assert len(ignored_included_columns_overlap) == 0 results = context.run_validation_operator( "action_list_operator", assets_to_validate=[taxi_validator_pandas] ) assert results["success"]
[ "def", "test_profiler_all_expectation_types_pandas", "(", "titanic_data_context_modular_api", ",", "taxi_validator_pandas", ",", "possible_expectations_set", ",", "taxi_data_semantic_types", ",", "taxi_data_ignored_columns", ",", ")", ":", "context", "=", "titanic_data_context_modular_api", "profiler", "=", "UserConfigurableProfiler", "(", "taxi_validator_pandas", ",", "semantic_types_dict", "=", "taxi_data_semantic_types", ",", "ignored_columns", "=", "taxi_data_ignored_columns", ",", "# TODO: Add primary_or_compound_key test", "# primary_or_compound_key=[", "# \"vendor_id\",", "# \"pickup_datetime\",", "# \"dropoff_datetime\",", "# \"trip_distance\",", "# \"pickup_location_id\",", "# \"dropoff_location_id\",", "# ],", ")", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"rate_code_id\"", ")", "suite", "=", "profiler", ".", "build_suite", "(", ")", "assert", "len", "(", "suite", ".", "expectations", ")", "==", "45", "(", "columns_with_expectations", ",", "expectations_from_suite", ",", ")", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite", ")", "unexpected_expectations", "=", "{", "\"expect_column_values_to_be_unique\"", ",", "\"expect_column_values_to_be_null\"", ",", "\"expect_compound_columns_to_be_unique\"", ",", "}", "assert", "expectations_from_suite", "==", "{", "i", "for", "i", "in", "possible_expectations_set", "if", "i", "not", "in", "unexpected_expectations", "}", "ignored_included_columns_overlap", "=", "[", "i", "for", "i", "in", "columns_with_expectations", "if", "i", "in", "taxi_data_ignored_columns", "]", "assert", "len", "(", "ignored_included_columns_overlap", ")", "==", "0", "results", "=", "context", ".", "run_validation_operator", "(", "\"action_list_operator\"", ",", "assets_to_validate", "=", "[", "taxi_validator_pandas", "]", ")", "assert", "results", "[", "\"success\"", "]" ]
[ 749, 0 ]
[ 803, 29 ]
python
en
['en', 'error', 'th']
False
test_profiler_all_expectation_types_spark
( titanic_data_context_modular_api, taxi_validator_spark, possible_expectations_set, taxi_data_semantic_types, taxi_data_ignored_columns, )
What does this test do and why? Ensures that all available expectation types work as expected for spark
What does this test do and why? Ensures that all available expectation types work as expected for spark
def test_profiler_all_expectation_types_spark( titanic_data_context_modular_api, taxi_validator_spark, possible_expectations_set, taxi_data_semantic_types, taxi_data_ignored_columns, ): """ What does this test do and why? Ensures that all available expectation types work as expected for spark """ context = titanic_data_context_modular_api profiler = UserConfigurableProfiler( taxi_validator_spark, semantic_types_dict=taxi_data_semantic_types, ignored_columns=taxi_data_ignored_columns, # TODO: Add primary_or_compound_key test # primary_or_compound_key=[ # "vendor_id", # "pickup_datetime", # "dropoff_datetime", # "trip_distance", # "pickup_location_id", # "dropoff_location_id", # ], ) assert profiler.column_info.get("rate_code_id") suite = profiler.build_suite() assert len(suite.expectations) == 45 ( columns_with_expectations, expectations_from_suite, ) = get_set_of_columns_and_expectations_from_suite(suite) unexpected_expectations = { "expect_column_values_to_be_unique", "expect_column_values_to_be_null", "expect_compound_columns_to_be_unique", } assert expectations_from_suite == { i for i in possible_expectations_set if i not in unexpected_expectations } ignored_included_columns_overlap = [ i for i in columns_with_expectations if i in taxi_data_ignored_columns ] assert len(ignored_included_columns_overlap) == 0 results = context.run_validation_operator( "action_list_operator", assets_to_validate=[taxi_validator_spark] ) assert results["success"]
[ "def", "test_profiler_all_expectation_types_spark", "(", "titanic_data_context_modular_api", ",", "taxi_validator_spark", ",", "possible_expectations_set", ",", "taxi_data_semantic_types", ",", "taxi_data_ignored_columns", ",", ")", ":", "context", "=", "titanic_data_context_modular_api", "profiler", "=", "UserConfigurableProfiler", "(", "taxi_validator_spark", ",", "semantic_types_dict", "=", "taxi_data_semantic_types", ",", "ignored_columns", "=", "taxi_data_ignored_columns", ",", "# TODO: Add primary_or_compound_key test", "# primary_or_compound_key=[", "# \"vendor_id\",", "# \"pickup_datetime\",", "# \"dropoff_datetime\",", "# \"trip_distance\",", "# \"pickup_location_id\",", "# \"dropoff_location_id\",", "# ],", ")", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"rate_code_id\"", ")", "suite", "=", "profiler", ".", "build_suite", "(", ")", "assert", "len", "(", "suite", ".", "expectations", ")", "==", "45", "(", "columns_with_expectations", ",", "expectations_from_suite", ",", ")", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite", ")", "unexpected_expectations", "=", "{", "\"expect_column_values_to_be_unique\"", ",", "\"expect_column_values_to_be_null\"", ",", "\"expect_compound_columns_to_be_unique\"", ",", "}", "assert", "expectations_from_suite", "==", "{", "i", "for", "i", "in", "possible_expectations_set", "if", "i", "not", "in", "unexpected_expectations", "}", "ignored_included_columns_overlap", "=", "[", "i", "for", "i", "in", "columns_with_expectations", "if", "i", "in", "taxi_data_ignored_columns", "]", "assert", "len", "(", "ignored_included_columns_overlap", ")", "==", "0", "results", "=", "context", ".", "run_validation_operator", "(", "\"action_list_operator\"", ",", "assets_to_validate", "=", "[", "taxi_validator_spark", "]", ")", "assert", "results", "[", "\"success\"", "]" ]
[ 810, 0 ]
[ 864, 29 ]
python
en
['en', 'error', 'th']
False
test_profiler_all_expectation_types_sqlalchemy
( titanic_data_context_modular_api, taxi_validator_sqlalchemy, possible_expectations_set, taxi_data_semantic_types, taxi_data_ignored_columns, )
What does this test do and why? Ensures that all available expectation types work as expected for sqlalchemy
What does this test do and why? Ensures that all available expectation types work as expected for sqlalchemy
def test_profiler_all_expectation_types_sqlalchemy( titanic_data_context_modular_api, taxi_validator_sqlalchemy, possible_expectations_set, taxi_data_semantic_types, taxi_data_ignored_columns, ): """ What does this test do and why? Ensures that all available expectation types work as expected for sqlalchemy """ if taxi_validator_sqlalchemy == None: pytest.skip("a message") context = titanic_data_context_modular_api profiler = UserConfigurableProfiler( taxi_validator_sqlalchemy, semantic_types_dict=taxi_data_semantic_types, ignored_columns=taxi_data_ignored_columns, # TODO: Add primary_or_compound_key test # primary_or_compound_key=[ # "vendor_id", # "pickup_datetime", # "dropoff_datetime", # "trip_distance", # "pickup_location_id", # "dropoff_location_id", # ], ) assert profiler.column_info.get("rate_code_id") suite = profiler.build_suite() assert len(suite.expectations) == 45 ( columns_with_expectations, expectations_from_suite, ) = get_set_of_columns_and_expectations_from_suite(suite) unexpected_expectations = { "expect_column_values_to_be_unique", "expect_column_values_to_be_null", "expect_compound_columns_to_be_unique", } assert expectations_from_suite == { i for i in possible_expectations_set if i not in unexpected_expectations } ignored_included_columns_overlap = [ i for i in columns_with_expectations if i in taxi_data_ignored_columns ] assert len(ignored_included_columns_overlap) == 0 results = context.run_validation_operator( "action_list_operator", assets_to_validate=[taxi_validator_sqlalchemy] ) assert results["success"]
[ "def", "test_profiler_all_expectation_types_sqlalchemy", "(", "titanic_data_context_modular_api", ",", "taxi_validator_sqlalchemy", ",", "possible_expectations_set", ",", "taxi_data_semantic_types", ",", "taxi_data_ignored_columns", ",", ")", ":", "if", "taxi_validator_sqlalchemy", "==", "None", ":", "pytest", ".", "skip", "(", "\"a message\"", ")", "context", "=", "titanic_data_context_modular_api", "profiler", "=", "UserConfigurableProfiler", "(", "taxi_validator_sqlalchemy", ",", "semantic_types_dict", "=", "taxi_data_semantic_types", ",", "ignored_columns", "=", "taxi_data_ignored_columns", ",", "# TODO: Add primary_or_compound_key test", "# primary_or_compound_key=[", "# \"vendor_id\",", "# \"pickup_datetime\",", "# \"dropoff_datetime\",", "# \"trip_distance\",", "# \"pickup_location_id\",", "# \"dropoff_location_id\",", "# ],", ")", "assert", "profiler", ".", "column_info", ".", "get", "(", "\"rate_code_id\"", ")", "suite", "=", "profiler", ".", "build_suite", "(", ")", "assert", "len", "(", "suite", ".", "expectations", ")", "==", "45", "(", "columns_with_expectations", ",", "expectations_from_suite", ",", ")", "=", "get_set_of_columns_and_expectations_from_suite", "(", "suite", ")", "unexpected_expectations", "=", "{", "\"expect_column_values_to_be_unique\"", ",", "\"expect_column_values_to_be_null\"", ",", "\"expect_compound_columns_to_be_unique\"", ",", "}", "assert", "expectations_from_suite", "==", "{", "i", "for", "i", "in", "possible_expectations_set", "if", "i", "not", "in", "unexpected_expectations", "}", "ignored_included_columns_overlap", "=", "[", "i", "for", "i", "in", "columns_with_expectations", "if", "i", "in", "taxi_data_ignored_columns", "]", "assert", "len", "(", "ignored_included_columns_overlap", ")", "==", "0", "results", "=", "context", ".", "run_validation_operator", "(", "\"action_list_operator\"", ",", "assets_to_validate", "=", "[", "taxi_validator_sqlalchemy", "]", ")", "assert", "results", "[", "\"success\"", "]" ]
[ 871, 0 ]
[ 928, 29 ]
python
en
['en', 'error', 'th']
False
_get_dialect_type_module
(dialect)
Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates with the database/database implementation. Currently checks for RedShift/BigQuery dialects
Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates with the database/database implementation. Currently checks for RedShift/BigQuery dialects
def _get_dialect_type_module(dialect): """Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates with the database/database implementation. Currently checks for RedShift/BigQuery dialects""" if dialect is None: logger.warning( "No sqlalchemy dialect found; relying in top-level sqlalchemy types." ) return sa try: # Redshift does not (yet) export types to top level; only recognize base SA types if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect): return dialect.sa except (TypeError, AttributeError): pass # Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple try: if ( isinstance( dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect, ) and bigquery_types_tuple is not None ): return bigquery_types_tuple except (TypeError, AttributeError): pass return dialect
[ "def", "_get_dialect_type_module", "(", "dialect", ")", ":", "if", "dialect", "is", "None", ":", "logger", ".", "warning", "(", "\"No sqlalchemy dialect found; relying in top-level sqlalchemy types.\"", ")", "return", "sa", "try", ":", "# Redshift does not (yet) export types to top level; only recognize base SA types", "if", "isinstance", "(", "dialect", ",", "sqlalchemy_redshift", ".", "dialect", ".", "RedshiftDialect", ")", ":", "return", "dialect", ".", "sa", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "pass", "# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple", "try", ":", "if", "(", "isinstance", "(", "dialect", ",", "pybigquery", ".", "sqlalchemy_bigquery", ".", "BigQueryDialect", ",", ")", "and", "bigquery_types_tuple", "is", "not", "None", ")", ":", "return", "bigquery_types_tuple", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "pass", "return", "dialect" ]
[ 111, 0 ]
[ 139, 18 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine.__init__
( self, name=None, credentials=None, data_context=None, engine=None, connection_string=None, url=None, batch_data_dict=None, create_temp_table=True, **kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine )
Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the desired database. Also initializes the dialect to be used and configures usage statistics. Args: name (str): \ The name of the SqlAlchemyExecutionEngine credentials: \ If the Execution Engine is not provided, the credentials can be used to build the Execution Engine. If the Engine is provided, it will be used instead data_context (DataContext): \ An object representing a Great Expectations project that can be used to access Expectation Suites and the Project Data itself engine (Engine): \ A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, useful if an Engine has already been configured and should be reused. Will override Credentials if provided. connection_string (string): \ If neither the engines nor the credentials have been provided, a connection string can be used to access the data. This will be overridden by both the engine and credentials if those are provided. url (string): \ If neither the engines, the credentials, nor the connection_string have been provided, a url can be used to access the data. This will be overridden by all other configuration options if any are provided.
Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the desired database. Also initializes the dialect to be used and configures usage statistics.
def __init__( self, name=None, credentials=None, data_context=None, engine=None, connection_string=None, url=None, batch_data_dict=None, create_temp_table=True, **kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine ): """Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the desired database. Also initializes the dialect to be used and configures usage statistics. Args: name (str): \ The name of the SqlAlchemyExecutionEngine credentials: \ If the Execution Engine is not provided, the credentials can be used to build the Execution Engine. If the Engine is provided, it will be used instead data_context (DataContext): \ An object representing a Great Expectations project that can be used to access Expectation Suites and the Project Data itself engine (Engine): \ A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, useful if an Engine has already been configured and should be reused. Will override Credentials if provided. connection_string (string): \ If neither the engines nor the credentials have been provided, a connection string can be used to access the data. This will be overridden by both the engine and credentials if those are provided. url (string): \ If neither the engines, the credentials, nor the connection_string have been provided, a url can be used to access the data. This will be overridden by all other configuration options if any are provided. """ super().__init__(name=name, batch_data_dict=batch_data_dict) self._name = name self._credentials = credentials self._connection_string = connection_string self._url = url self._create_temp_table = create_temp_table if engine is not None: if credentials is not None: logger.warning( "Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. " "Ignoring credentials." ) self.engine = engine elif credentials is not None: self.engine = self._build_engine(credentials=credentials, **kwargs) elif connection_string is not None: self.engine = sa.create_engine(connection_string, **kwargs) elif url is not None: self.drivername = urlparse(url).scheme self.engine = sa.create_engine(url, **kwargs) else: raise InvalidConfigError( "Credentials or an engine are required for a SqlAlchemyExecutionEngine." ) # Get the dialect **for purposes of identifying types** if self.engine.dialect.name.lower() in [ "postgresql", "mysql", "sqlite", "oracle", "mssql", ]: # These are the officially included and supported dialects by sqlalchemy self.dialect_module = import_library_module( module_name="sqlalchemy.dialects." + self.engine.dialect.name ) elif self.engine.dialect.name.lower() == "snowflake": self.dialect_module = import_library_module( module_name="snowflake.sqlalchemy.snowdialect" ) elif self.engine.dialect.name.lower() == "redshift": self.dialect_module = import_library_module( module_name="sqlalchemy_redshift.dialect" ) elif self.engine.dialect.name.lower() == "bigquery": self.dialect_module = import_library_module( module_name="pybigquery.sqlalchemy_bigquery" ) else: self.dialect_module = None if self.engine and self.engine.dialect.name.lower() in [ "sqlite", "mssql", "snowflake", "mysql", ]: # sqlite/mssql temp tables only persist within a connection so override the engine self.engine = self.engine.connect() # Send a connect event to provide dialect type if data_context is not None and getattr( data_context, "_usage_statistics_handler", None ): handler = data_context._usage_statistics_handler handler.send_usage_message( event="execution_engine.sqlalchemy.connect", event_payload={ "anonymized_name": handler._execution_engine_anonymizer.anonymize( self.name ), "sqlalchemy_dialect": self.engine.name, }, success=True, ) # Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values, # and set the instance "_config" variable equal to the resulting dictionary. self._config = { "name": name, "credentials": credentials, "data_context": data_context, "engine": engine, "connection_string": connection_string, "url": url, "batch_data_dict": batch_data_dict, "module_name": self.__class__.__module__, "class_name": self.__class__.__name__, } self._config.update(kwargs) filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
[ "def", "__init__", "(", "self", ",", "name", "=", "None", ",", "credentials", "=", "None", ",", "data_context", "=", "None", ",", "engine", "=", "None", ",", "connection_string", "=", "None", ",", "url", "=", "None", ",", "batch_data_dict", "=", "None", ",", "create_temp_table", "=", "True", ",", "*", "*", "kwargs", ",", "# These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine", ")", ":", "super", "(", ")", ".", "__init__", "(", "name", "=", "name", ",", "batch_data_dict", "=", "batch_data_dict", ")", "self", ".", "_name", "=", "name", "self", ".", "_credentials", "=", "credentials", "self", ".", "_connection_string", "=", "connection_string", "self", ".", "_url", "=", "url", "self", ".", "_create_temp_table", "=", "create_temp_table", "if", "engine", "is", "not", "None", ":", "if", "credentials", "is", "not", "None", ":", "logger", ".", "warning", "(", "\"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. \"", "\"Ignoring credentials.\"", ")", "self", ".", "engine", "=", "engine", "elif", "credentials", "is", "not", "None", ":", "self", ".", "engine", "=", "self", ".", "_build_engine", "(", "credentials", "=", "credentials", ",", "*", "*", "kwargs", ")", "elif", "connection_string", "is", "not", "None", ":", "self", ".", "engine", "=", "sa", ".", "create_engine", "(", "connection_string", ",", "*", "*", "kwargs", ")", "elif", "url", "is", "not", "None", ":", "self", ".", "drivername", "=", "urlparse", "(", "url", ")", ".", "scheme", "self", ".", "engine", "=", "sa", ".", "create_engine", "(", "url", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "InvalidConfigError", "(", "\"Credentials or an engine are required for a SqlAlchemyExecutionEngine.\"", ")", "# Get the dialect **for purposes of identifying types**", "if", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "in", "[", "\"postgresql\"", ",", "\"mysql\"", ",", "\"sqlite\"", ",", "\"oracle\"", ",", "\"mssql\"", ",", "]", ":", "# These are the officially included and supported dialects by sqlalchemy", "self", ".", "dialect_module", "=", "import_library_module", "(", "module_name", "=", "\"sqlalchemy.dialects.\"", "+", "self", ".", "engine", ".", "dialect", ".", "name", ")", "elif", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"snowflake\"", ":", "self", ".", "dialect_module", "=", "import_library_module", "(", "module_name", "=", "\"snowflake.sqlalchemy.snowdialect\"", ")", "elif", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"redshift\"", ":", "self", ".", "dialect_module", "=", "import_library_module", "(", "module_name", "=", "\"sqlalchemy_redshift.dialect\"", ")", "elif", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "==", "\"bigquery\"", ":", "self", ".", "dialect_module", "=", "import_library_module", "(", "module_name", "=", "\"pybigquery.sqlalchemy_bigquery\"", ")", "else", ":", "self", ".", "dialect_module", "=", "None", "if", "self", ".", "engine", "and", "self", ".", "engine", ".", "dialect", ".", "name", ".", "lower", "(", ")", "in", "[", "\"sqlite\"", ",", "\"mssql\"", ",", "\"snowflake\"", ",", "\"mysql\"", ",", "]", ":", "# sqlite/mssql temp tables only persist within a connection so override the engine", "self", ".", "engine", "=", "self", ".", "engine", ".", "connect", "(", ")", "# Send a connect event to provide dialect type", "if", "data_context", "is", "not", "None", "and", "getattr", "(", "data_context", ",", "\"_usage_statistics_handler\"", ",", "None", ")", ":", "handler", "=", "data_context", ".", "_usage_statistics_handler", "handler", ".", "send_usage_message", "(", "event", "=", "\"execution_engine.sqlalchemy.connect\"", ",", "event_payload", "=", "{", "\"anonymized_name\"", ":", "handler", ".", "_execution_engine_anonymizer", ".", "anonymize", "(", "self", ".", "name", ")", ",", "\"sqlalchemy_dialect\"", ":", "self", ".", "engine", ".", "name", ",", "}", ",", "success", "=", "True", ",", ")", "# Gather the call arguments of the present function (and add the \"class_name\"), filter out the Falsy values,", "# and set the instance \"_config\" variable equal to the resulting dictionary.", "self", ".", "_config", "=", "{", "\"name\"", ":", "name", ",", "\"credentials\"", ":", "credentials", ",", "\"data_context\"", ":", "data_context", ",", "\"engine\"", ":", "engine", ",", "\"connection_string\"", ":", "connection_string", ",", "\"url\"", ":", "url", ",", "\"batch_data_dict\"", ":", "batch_data_dict", ",", "\"module_name\"", ":", "self", ".", "__class__", ".", "__module__", ",", "\"class_name\"", ":", "self", ".", "__class__", ".", "__name__", ",", "}", "self", ".", "_config", ".", "update", "(", "kwargs", ")", "filter_properties_dict", "(", "properties", "=", "self", ".", "_config", ",", "clean_falsy", "=", "True", ",", "inplace", "=", "True", ")" ]
[ 143, 4 ]
[ 274, 87 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._build_engine
(self, credentials, **kwargs)
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a private key path.
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a private key path.
def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine": """ Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a private key path. """ # Update credentials with anything passed during connection time drivername = credentials.pop("drivername") schema_name = credentials.pop("schema_name", None) if schema_name is not None: logger.warning( "schema_name specified creating a URL with schema is not supported. Set a default " "schema on the user connecting to your database." ) create_engine_kwargs = kwargs connect_args = credentials.pop("connect_args", None) if connect_args: create_engine_kwargs["connect_args"] = connect_args if "private_key_path" in credentials: options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url( drivername, credentials ) else: options = sa.engine.url.URL(drivername, **credentials) self.drivername = drivername engine = sa.create_engine(options, **create_engine_kwargs) return engine
[ "def", "_build_engine", "(", "self", ",", "credentials", ",", "*", "*", "kwargs", ")", "->", "\"sa.engine.Engine\"", ":", "# Update credentials with anything passed during connection time", "drivername", "=", "credentials", ".", "pop", "(", "\"drivername\"", ")", "schema_name", "=", "credentials", ".", "pop", "(", "\"schema_name\"", ",", "None", ")", "if", "schema_name", "is", "not", "None", ":", "logger", ".", "warning", "(", "\"schema_name specified creating a URL with schema is not supported. Set a default \"", "\"schema on the user connecting to your database.\"", ")", "create_engine_kwargs", "=", "kwargs", "connect_args", "=", "credentials", ".", "pop", "(", "\"connect_args\"", ",", "None", ")", "if", "connect_args", ":", "create_engine_kwargs", "[", "\"connect_args\"", "]", "=", "connect_args", "if", "\"private_key_path\"", "in", "credentials", ":", "options", ",", "create_engine_kwargs", "=", "self", ".", "_get_sqlalchemy_key_pair_auth_url", "(", "drivername", ",", "credentials", ")", "else", ":", "options", "=", "sa", ".", "engine", ".", "url", ".", "URL", "(", "drivername", ",", "*", "*", "credentials", ")", "self", ".", "drivername", "=", "drivername", "engine", "=", "sa", ".", "create_engine", "(", "options", ",", "*", "*", "create_engine_kwargs", ")", "return", "engine" ]
[ 288, 4 ]
[ 316, 21 ]
python
en
['en', 'error', 'th']
False
SqlAlchemyExecutionEngine._get_sqlalchemy_key_pair_auth_url
( self, drivername: str, credentials: dict )
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided values into a private key. If passphrase is incorrect, this will fail and an exception is raised. Args: drivername(str) - The name of the driver class credentials(dict) - A dictionary of database credentials used to access the database Returns: a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided values into a private key. If passphrase is incorrect, this will fail and an exception is raised.
def _get_sqlalchemy_key_pair_auth_url( self, drivername: str, credentials: dict ) -> Tuple["sa.engine.url.URL", Dict]: """ Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided values into a private key. If passphrase is incorrect, this will fail and an exception is raised. Args: drivername(str) - The name of the driver class credentials(dict) - A dictionary of database credentials used to access the database Returns: a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs. """ from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization private_key_path = credentials.pop("private_key_path") private_key_passphrase = credentials.pop("private_key_passphrase") with Path(private_key_path).expanduser().resolve().open(mode="rb") as key: try: p_key = serialization.load_pem_private_key( key.read(), password=private_key_passphrase.encode() if private_key_passphrase else None, backend=default_backend(), ) except ValueError as e: if "incorrect password" in str(e).lower(): raise DatasourceKeyPairAuthBadPassphraseError( datasource_name="SqlAlchemyDatasource", message="Decryption of key failed, was the passphrase incorrect?", ) from e else: raise e pkb = p_key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ) credentials_driver_name = credentials.pop("drivername", None) create_engine_kwargs = {"connect_args": {"private_key": pkb}} return ( sa.engine.url.URL(drivername or credentials_driver_name, **credentials), create_engine_kwargs, )
[ "def", "_get_sqlalchemy_key_pair_auth_url", "(", "self", ",", "drivername", ":", "str", ",", "credentials", ":", "dict", ")", "->", "Tuple", "[", "\"sa.engine.url.URL\"", ",", "Dict", "]", ":", "from", "cryptography", ".", "hazmat", ".", "backends", "import", "default_backend", "from", "cryptography", ".", "hazmat", ".", "primitives", "import", "serialization", "private_key_path", "=", "credentials", ".", "pop", "(", "\"private_key_path\"", ")", "private_key_passphrase", "=", "credentials", ".", "pop", "(", "\"private_key_passphrase\"", ")", "with", "Path", "(", "private_key_path", ")", ".", "expanduser", "(", ")", ".", "resolve", "(", ")", ".", "open", "(", "mode", "=", "\"rb\"", ")", "as", "key", ":", "try", ":", "p_key", "=", "serialization", ".", "load_pem_private_key", "(", "key", ".", "read", "(", ")", ",", "password", "=", "private_key_passphrase", ".", "encode", "(", ")", "if", "private_key_passphrase", "else", "None", ",", "backend", "=", "default_backend", "(", ")", ",", ")", "except", "ValueError", "as", "e", ":", "if", "\"incorrect password\"", "in", "str", "(", "e", ")", ".", "lower", "(", ")", ":", "raise", "DatasourceKeyPairAuthBadPassphraseError", "(", "datasource_name", "=", "\"SqlAlchemyDatasource\"", ",", "message", "=", "\"Decryption of key failed, was the passphrase incorrect?\"", ",", ")", "from", "e", "else", ":", "raise", "e", "pkb", "=", "p_key", ".", "private_bytes", "(", "encoding", "=", "serialization", ".", "Encoding", ".", "DER", ",", "format", "=", "serialization", ".", "PrivateFormat", ".", "PKCS8", ",", "encryption_algorithm", "=", "serialization", ".", "NoEncryption", "(", ")", ",", ")", "credentials_driver_name", "=", "credentials", ".", "pop", "(", "\"drivername\"", ",", "None", ")", "create_engine_kwargs", "=", "{", "\"connect_args\"", ":", "{", "\"private_key\"", ":", "pkb", "}", "}", "return", "(", "sa", ".", "engine", ".", "url", ".", "URL", "(", "drivername", "or", "credentials_driver_name", ",", "*", "*", "credentials", ")", ",", "create_engine_kwargs", ",", ")" ]
[ 318, 4 ]
[ 366, 9 ]
python
en
['en', 'error', 'th']
False
SqlAlchemyExecutionEngine.get_compute_domain
( self, domain_kwargs: Dict, domain_type: Union[str, MetricDomainTypes], accessor_keys: Optional[Iterable[str]] = None, )
Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object. Args: domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would like to be using, or a corresponding string value representing it. String types include "identity", "column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the class MetricDomainTypes. accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when describing the domain and simply transferred with their associated values into accessor_domain_kwargs. Returns: SqlAlchemy column
Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object.
def get_compute_domain( self, domain_kwargs: Dict, domain_type: Union[str, MetricDomainTypes], accessor_keys: Optional[Iterable[str]] = None, ) -> Tuple[Select, dict, dict]: """Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object. Args: domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would like to be using, or a corresponding string value representing it. String types include "identity", "column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the class MetricDomainTypes. accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when describing the domain and simply transferred with their associated values into accessor_domain_kwargs. Returns: SqlAlchemy column """ # Extracting value from enum if it is given for future computation domain_type = MetricDomainTypes(domain_type) batch_id = domain_kwargs.get("batch_id") if batch_id is None: # We allow no batch id specified if there is only one batch if self.active_batch_data: data_object = self.active_batch_data else: raise GreatExpectationsError( "No batch is specified, but could not identify a loaded batch." ) else: if batch_id in self.loaded_batch_data_dict: data_object = self.loaded_batch_data_dict[batch_id] else: raise GreatExpectationsError( f"Unable to find batch with batch_id {batch_id}" ) compute_domain_kwargs = copy.deepcopy(domain_kwargs) accessor_domain_kwargs = dict() if "table" in domain_kwargs and domain_kwargs["table"] is not None: # TODO: Add logic to handle record_set_name once implemented # (i.e. multiple record sets (tables) in one batch if domain_kwargs["table"] != data_object.selectable.name: selectable = sa.Table( domain_kwargs["table"], sa.MetaData(), schema_name=data_object._schema_name, ) else: selectable = data_object.selectable elif "query" in domain_kwargs: raise ValueError( "query is not currently supported by SqlAlchemyExecutionEngine" ) else: selectable = data_object.selectable if ( "row_condition" in domain_kwargs and domain_kwargs["row_condition"] is not None ): condition_parser = domain_kwargs["condition_parser"] if condition_parser == "great_expectations__experimental__": parsed_condition = parse_condition_to_sqlalchemy( domain_kwargs["row_condition"] ) selectable = sa.select( "*", from_obj=selectable, whereclause=parsed_condition ) else: raise GreatExpectationsError( "SqlAlchemyExecutionEngine only supports the great_expectations condition_parser." ) # Warning user if accessor keys are in any domain that is not of type table, will be ignored if ( domain_type != MetricDomainTypes.TABLE and accessor_keys is not None and len(list(accessor_keys)) > 0 ): logger.warning( "Accessor keys ignored since Metric Domain Type is not 'table'" ) if domain_type == MetricDomainTypes.TABLE: if accessor_keys is not None and len(list(accessor_keys)) > 0: for key in accessor_keys: accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key) if len(domain_kwargs.keys()) > 0: # Warn user if kwarg not "normal". unexpected_keys: set = set(compute_domain_kwargs.keys()).difference( { "batch_id", "table", "row_condition", "condition_parser", } ) if len(unexpected_keys) > 0: unexpected_keys_str: str = ", ".join( map(lambda element: f'"{element}"', unexpected_keys) ) logger.warning( f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".' ) return selectable, compute_domain_kwargs, accessor_domain_kwargs # If user has stated they want a column, checking if one is provided, and elif domain_type == MetricDomainTypes.COLUMN: if "column" in compute_domain_kwargs: # Checking if case- sensitive and using appropriate name if self.active_batch_data.use_quoted_name: accessor_domain_kwargs["column"] = quoted_name( compute_domain_kwargs.pop("column") ) else: accessor_domain_kwargs["column"] = compute_domain_kwargs.pop( "column" ) else: # If column not given raise GreatExpectationsError( "Column not provided in compute_domain_kwargs" ) # Else, if column pair values requested elif domain_type == MetricDomainTypes.COLUMN_PAIR: # Ensuring column_A and column_B parameters provided if ( "column_A" in compute_domain_kwargs and "column_B" in compute_domain_kwargs ): if self.active_batch_data.use_quoted_name: # If case matters... accessor_domain_kwargs["column_A"] = quoted_name( compute_domain_kwargs.pop("column_A") ) accessor_domain_kwargs["column_B"] = quoted_name( compute_domain_kwargs.pop("column_B") ) else: accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop( "column_A" ) accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop( "column_B" ) else: raise GreatExpectationsError( "column_A or column_B not found within compute_domain_kwargs" ) # Checking if table or identity or other provided, column is not specified. If it is, warning the user elif domain_type == MetricDomainTypes.MULTICOLUMN: if "column_list" in compute_domain_kwargs: # If column_list exists accessor_domain_kwargs["column_list"] = compute_domain_kwargs.pop( "column_list" ) # Filtering if identity elif domain_type == MetricDomainTypes.IDENTITY: # If we would like our data to become a single column if "column" in compute_domain_kwargs: if self.active_batch_data.use_quoted_name: selectable = sa.select( [sa.column(quoted_name(compute_domain_kwargs["column"]))] ).select_from(selectable) else: selectable = sa.select( [sa.column(compute_domain_kwargs["column"])] ).select_from(selectable) # If we would like our data to now become a column pair elif ("column_A" in compute_domain_kwargs) and ( "column_B" in compute_domain_kwargs ): if self.active_batch_data.use_quoted_name: selectable = sa.select( [ sa.column(quoted_name(compute_domain_kwargs["column_A"])), sa.column(quoted_name(compute_domain_kwargs["column_B"])), ] ).select_from(selectable) else: selectable = sa.select( [ sa.column(compute_domain_kwargs["column_A"]), sa.column(compute_domain_kwargs["column_B"]), ] ).select_from(selectable) else: # If we would like our data to become a multicolumn if "column_list" in compute_domain_kwargs: if self.active_batch_data.use_quoted_name: # Building a list of column objects used for sql alchemy selection to_select = [ sa.column(quoted_name(col)) for col in compute_domain_kwargs["column_list"] ] selectable = sa.select(to_select).select_from(selectable) else: to_select = [ sa.column(col) for col in compute_domain_kwargs["column_list"] ] selectable = sa.select(to_select).select_from(selectable) # Letting selectable fall through return selectable, compute_domain_kwargs, accessor_domain_kwargs
[ "def", "get_compute_domain", "(", "self", ",", "domain_kwargs", ":", "Dict", ",", "domain_type", ":", "Union", "[", "str", ",", "MetricDomainTypes", "]", ",", "accessor_keys", ":", "Optional", "[", "Iterable", "[", "str", "]", "]", "=", "None", ",", ")", "->", "Tuple", "[", "Select", ",", "dict", ",", "dict", "]", ":", "# Extracting value from enum if it is given for future computation", "domain_type", "=", "MetricDomainTypes", "(", "domain_type", ")", "batch_id", "=", "domain_kwargs", ".", "get", "(", "\"batch_id\"", ")", "if", "batch_id", "is", "None", ":", "# We allow no batch id specified if there is only one batch", "if", "self", ".", "active_batch_data", ":", "data_object", "=", "self", ".", "active_batch_data", "else", ":", "raise", "GreatExpectationsError", "(", "\"No batch is specified, but could not identify a loaded batch.\"", ")", "else", ":", "if", "batch_id", "in", "self", ".", "loaded_batch_data_dict", ":", "data_object", "=", "self", ".", "loaded_batch_data_dict", "[", "batch_id", "]", "else", ":", "raise", "GreatExpectationsError", "(", "f\"Unable to find batch with batch_id {batch_id}\"", ")", "compute_domain_kwargs", "=", "copy", ".", "deepcopy", "(", "domain_kwargs", ")", "accessor_domain_kwargs", "=", "dict", "(", ")", "if", "\"table\"", "in", "domain_kwargs", "and", "domain_kwargs", "[", "\"table\"", "]", "is", "not", "None", ":", "# TODO: Add logic to handle record_set_name once implemented", "# (i.e. multiple record sets (tables) in one batch", "if", "domain_kwargs", "[", "\"table\"", "]", "!=", "data_object", ".", "selectable", ".", "name", ":", "selectable", "=", "sa", ".", "Table", "(", "domain_kwargs", "[", "\"table\"", "]", ",", "sa", ".", "MetaData", "(", ")", ",", "schema_name", "=", "data_object", ".", "_schema_name", ",", ")", "else", ":", "selectable", "=", "data_object", ".", "selectable", "elif", "\"query\"", "in", "domain_kwargs", ":", "raise", "ValueError", "(", "\"query is not currently supported by SqlAlchemyExecutionEngine\"", ")", "else", ":", "selectable", "=", "data_object", ".", "selectable", "if", "(", "\"row_condition\"", "in", "domain_kwargs", "and", "domain_kwargs", "[", "\"row_condition\"", "]", "is", "not", "None", ")", ":", "condition_parser", "=", "domain_kwargs", "[", "\"condition_parser\"", "]", "if", "condition_parser", "==", "\"great_expectations__experimental__\"", ":", "parsed_condition", "=", "parse_condition_to_sqlalchemy", "(", "domain_kwargs", "[", "\"row_condition\"", "]", ")", "selectable", "=", "sa", ".", "select", "(", "\"*\"", ",", "from_obj", "=", "selectable", ",", "whereclause", "=", "parsed_condition", ")", "else", ":", "raise", "GreatExpectationsError", "(", "\"SqlAlchemyExecutionEngine only supports the great_expectations condition_parser.\"", ")", "# Warning user if accessor keys are in any domain that is not of type table, will be ignored", "if", "(", "domain_type", "!=", "MetricDomainTypes", ".", "TABLE", "and", "accessor_keys", "is", "not", "None", "and", "len", "(", "list", "(", "accessor_keys", ")", ")", ">", "0", ")", ":", "logger", ".", "warning", "(", "\"Accessor keys ignored since Metric Domain Type is not 'table'\"", ")", "if", "domain_type", "==", "MetricDomainTypes", ".", "TABLE", ":", "if", "accessor_keys", "is", "not", "None", "and", "len", "(", "list", "(", "accessor_keys", ")", ")", ">", "0", ":", "for", "key", "in", "accessor_keys", ":", "accessor_domain_kwargs", "[", "key", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "key", ")", "if", "len", "(", "domain_kwargs", ".", "keys", "(", ")", ")", ">", "0", ":", "# Warn user if kwarg not \"normal\".", "unexpected_keys", ":", "set", "=", "set", "(", "compute_domain_kwargs", ".", "keys", "(", ")", ")", ".", "difference", "(", "{", "\"batch_id\"", ",", "\"table\"", ",", "\"row_condition\"", ",", "\"condition_parser\"", ",", "}", ")", "if", "len", "(", "unexpected_keys", ")", ">", "0", ":", "unexpected_keys_str", ":", "str", "=", "\", \"", ".", "join", "(", "map", "(", "lambda", "element", ":", "f'\"{element}\"'", ",", "unexpected_keys", ")", ")", "logger", ".", "warning", "(", "f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type \"{domain_type.value}\".'", ")", "return", "selectable", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "# If user has stated they want a column, checking if one is provided, and", "elif", "domain_type", "==", "MetricDomainTypes", ".", "COLUMN", ":", "if", "\"column\"", "in", "compute_domain_kwargs", ":", "# Checking if case- sensitive and using appropriate name", "if", "self", ".", "active_batch_data", ".", "use_quoted_name", ":", "accessor_domain_kwargs", "[", "\"column\"", "]", "=", "quoted_name", "(", "compute_domain_kwargs", ".", "pop", "(", "\"column\"", ")", ")", "else", ":", "accessor_domain_kwargs", "[", "\"column\"", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "\"column\"", ")", "else", ":", "# If column not given", "raise", "GreatExpectationsError", "(", "\"Column not provided in compute_domain_kwargs\"", ")", "# Else, if column pair values requested", "elif", "domain_type", "==", "MetricDomainTypes", ".", "COLUMN_PAIR", ":", "# Ensuring column_A and column_B parameters provided", "if", "(", "\"column_A\"", "in", "compute_domain_kwargs", "and", "\"column_B\"", "in", "compute_domain_kwargs", ")", ":", "if", "self", ".", "active_batch_data", ".", "use_quoted_name", ":", "# If case matters...", "accessor_domain_kwargs", "[", "\"column_A\"", "]", "=", "quoted_name", "(", "compute_domain_kwargs", ".", "pop", "(", "\"column_A\"", ")", ")", "accessor_domain_kwargs", "[", "\"column_B\"", "]", "=", "quoted_name", "(", "compute_domain_kwargs", ".", "pop", "(", "\"column_B\"", ")", ")", "else", ":", "accessor_domain_kwargs", "[", "\"column_A\"", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "\"column_A\"", ")", "accessor_domain_kwargs", "[", "\"column_B\"", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "\"column_B\"", ")", "else", ":", "raise", "GreatExpectationsError", "(", "\"column_A or column_B not found within compute_domain_kwargs\"", ")", "# Checking if table or identity or other provided, column is not specified. If it is, warning the user", "elif", "domain_type", "==", "MetricDomainTypes", ".", "MULTICOLUMN", ":", "if", "\"column_list\"", "in", "compute_domain_kwargs", ":", "# If column_list exists", "accessor_domain_kwargs", "[", "\"column_list\"", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "\"column_list\"", ")", "# Filtering if identity", "elif", "domain_type", "==", "MetricDomainTypes", ".", "IDENTITY", ":", "# If we would like our data to become a single column", "if", "\"column\"", "in", "compute_domain_kwargs", ":", "if", "self", ".", "active_batch_data", ".", "use_quoted_name", ":", "selectable", "=", "sa", ".", "select", "(", "[", "sa", ".", "column", "(", "quoted_name", "(", "compute_domain_kwargs", "[", "\"column\"", "]", ")", ")", "]", ")", ".", "select_from", "(", "selectable", ")", "else", ":", "selectable", "=", "sa", ".", "select", "(", "[", "sa", ".", "column", "(", "compute_domain_kwargs", "[", "\"column\"", "]", ")", "]", ")", ".", "select_from", "(", "selectable", ")", "# If we would like our data to now become a column pair", "elif", "(", "\"column_A\"", "in", "compute_domain_kwargs", ")", "and", "(", "\"column_B\"", "in", "compute_domain_kwargs", ")", ":", "if", "self", ".", "active_batch_data", ".", "use_quoted_name", ":", "selectable", "=", "sa", ".", "select", "(", "[", "sa", ".", "column", "(", "quoted_name", "(", "compute_domain_kwargs", "[", "\"column_A\"", "]", ")", ")", ",", "sa", ".", "column", "(", "quoted_name", "(", "compute_domain_kwargs", "[", "\"column_B\"", "]", ")", ")", ",", "]", ")", ".", "select_from", "(", "selectable", ")", "else", ":", "selectable", "=", "sa", ".", "select", "(", "[", "sa", ".", "column", "(", "compute_domain_kwargs", "[", "\"column_A\"", "]", ")", ",", "sa", ".", "column", "(", "compute_domain_kwargs", "[", "\"column_B\"", "]", ")", ",", "]", ")", ".", "select_from", "(", "selectable", ")", "else", ":", "# If we would like our data to become a multicolumn", "if", "\"column_list\"", "in", "compute_domain_kwargs", ":", "if", "self", ".", "active_batch_data", ".", "use_quoted_name", ":", "# Building a list of column objects used for sql alchemy selection", "to_select", "=", "[", "sa", ".", "column", "(", "quoted_name", "(", "col", ")", ")", "for", "col", "in", "compute_domain_kwargs", "[", "\"column_list\"", "]", "]", "selectable", "=", "sa", ".", "select", "(", "to_select", ")", ".", "select_from", "(", "selectable", ")", "else", ":", "to_select", "=", "[", "sa", ".", "column", "(", "col", ")", "for", "col", "in", "compute_domain_kwargs", "[", "\"column_list\"", "]", "]", "selectable", "=", "sa", ".", "select", "(", "to_select", ")", ".", "select_from", "(", "selectable", ")", "# Letting selectable fall through", "return", "selectable", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs" ]
[ 368, 4 ]
[ 580, 72 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine.resolve_metric_bundle
( self, metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]], )
For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail if bundling the metrics together is not possible. Args: metric_fn_bundle (Iterable[Tuple[MetricConfiguration, Callable, dict]): \ A Dictionary containing a MetricProvider's MetricConfiguration (its unique identifier), its metric provider function (the function that actually executes the metric), and the arguments to pass to the metric provider function. metrics (Dict[Tuple, Any]): \ A dictionary of metrics defined in the registry and corresponding arguments Returns: A dictionary of metric names and their corresponding now-queried values.
For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail if bundling the metrics together is not possible.
def resolve_metric_bundle( self, metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]], ) -> dict: """For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail if bundling the metrics together is not possible. Args: metric_fn_bundle (Iterable[Tuple[MetricConfiguration, Callable, dict]): \ A Dictionary containing a MetricProvider's MetricConfiguration (its unique identifier), its metric provider function (the function that actually executes the metric), and the arguments to pass to the metric provider function. metrics (Dict[Tuple, Any]): \ A dictionary of metrics defined in the registry and corresponding arguments Returns: A dictionary of metric names and their corresponding now-queried values. """ resolved_metrics = dict() # We need a different query for each domain (where clause). queries: Dict[Tuple, dict] = dict() for ( metric_to_resolve, engine_fn, compute_domain_kwargs, accessor_domain_kwargs, metric_provider_kwargs, ) in metric_fn_bundle: if not isinstance(compute_domain_kwargs, IDDict): compute_domain_kwargs = IDDict(compute_domain_kwargs) domain_id = compute_domain_kwargs.to_id() if domain_id not in queries: queries[domain_id] = { "select": [], "ids": [], "domain_kwargs": compute_domain_kwargs, } queries[domain_id]["select"].append( engine_fn.label(metric_to_resolve.metric_name) ) queries[domain_id]["ids"].append(metric_to_resolve.id) for query in queries.values(): selectable, compute_domain_kwargs, _ = self.get_compute_domain( query["domain_kwargs"], domain_type=MetricDomainTypes.IDENTITY.value ) assert len(query["select"]) == len(query["ids"]) try: res = self.engine.execute( sa.select(query["select"]).select_from(selectable) ).fetchall() logger.debug( f"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(compute_domain_kwargs).to_id()}" ) except OperationalError as oe: exception_message: str = "An SQL execution Exception occurred. " exception_traceback: str = traceback.format_exc() exception_message += f'{type(oe).__name__}: "{str(oe)}". Traceback: "{exception_traceback}".' logger.error(exception_message) raise ExecutionEngineError(message=exception_message) assert ( len(res) == 1 ), "all bundle-computed metrics must be single-value statistics" assert len(query["ids"]) == len( res[0] ), "unexpected number of metrics returned" for idx, id in enumerate(query["ids"]): resolved_metrics[id] = convert_to_json_serializable(res[0][idx]) return resolved_metrics
[ "def", "resolve_metric_bundle", "(", "self", ",", "metric_fn_bundle", ":", "Iterable", "[", "Tuple", "[", "MetricConfiguration", ",", "Any", ",", "dict", ",", "dict", "]", "]", ",", ")", "->", "dict", ":", "resolved_metrics", "=", "dict", "(", ")", "# We need a different query for each domain (where clause).", "queries", ":", "Dict", "[", "Tuple", ",", "dict", "]", "=", "dict", "(", ")", "for", "(", "metric_to_resolve", ",", "engine_fn", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", ",", "metric_provider_kwargs", ",", ")", "in", "metric_fn_bundle", ":", "if", "not", "isinstance", "(", "compute_domain_kwargs", ",", "IDDict", ")", ":", "compute_domain_kwargs", "=", "IDDict", "(", "compute_domain_kwargs", ")", "domain_id", "=", "compute_domain_kwargs", ".", "to_id", "(", ")", "if", "domain_id", "not", "in", "queries", ":", "queries", "[", "domain_id", "]", "=", "{", "\"select\"", ":", "[", "]", ",", "\"ids\"", ":", "[", "]", ",", "\"domain_kwargs\"", ":", "compute_domain_kwargs", ",", "}", "queries", "[", "domain_id", "]", "[", "\"select\"", "]", ".", "append", "(", "engine_fn", ".", "label", "(", "metric_to_resolve", ".", "metric_name", ")", ")", "queries", "[", "domain_id", "]", "[", "\"ids\"", "]", ".", "append", "(", "metric_to_resolve", ".", "id", ")", "for", "query", "in", "queries", ".", "values", "(", ")", ":", "selectable", ",", "compute_domain_kwargs", ",", "_", "=", "self", ".", "get_compute_domain", "(", "query", "[", "\"domain_kwargs\"", "]", ",", "domain_type", "=", "MetricDomainTypes", ".", "IDENTITY", ".", "value", ")", "assert", "len", "(", "query", "[", "\"select\"", "]", ")", "==", "len", "(", "query", "[", "\"ids\"", "]", ")", "try", ":", "res", "=", "self", ".", "engine", ".", "execute", "(", "sa", ".", "select", "(", "query", "[", "\"select\"", "]", ")", ".", "select_from", "(", "selectable", ")", ")", ".", "fetchall", "(", ")", "logger", ".", "debug", "(", "f\"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(compute_domain_kwargs).to_id()}\"", ")", "except", "OperationalError", "as", "oe", ":", "exception_message", ":", "str", "=", "\"An SQL execution Exception occurred. \"", "exception_traceback", ":", "str", "=", "traceback", ".", "format_exc", "(", ")", "exception_message", "+=", "f'{type(oe).__name__}: \"{str(oe)}\". Traceback: \"{exception_traceback}\".'", "logger", ".", "error", "(", "exception_message", ")", "raise", "ExecutionEngineError", "(", "message", "=", "exception_message", ")", "assert", "(", "len", "(", "res", ")", "==", "1", ")", ",", "\"all bundle-computed metrics must be single-value statistics\"", "assert", "len", "(", "query", "[", "\"ids\"", "]", ")", "==", "len", "(", "res", "[", "0", "]", ")", ",", "\"unexpected number of metrics returned\"", "for", "idx", ",", "id", "in", "enumerate", "(", "query", "[", "\"ids\"", "]", ")", ":", "resolved_metrics", "[", "id", "]", "=", "convert_to_json_serializable", "(", "res", "[", "0", "]", "[", "idx", "]", ")", "return", "resolved_metrics" ]
[ 582, 4 ]
[ 651, 31 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._split_on_whole_table
(self, table_name: str, batch_identifiers: dict)
Split' by returning the whole table
Split' by returning the whole table
def _split_on_whole_table(self, table_name: str, batch_identifiers: dict): """'Split' by returning the whole table""" # return sa.column(column_name) == batch_identifiers[column_name] return 1 == 1
[ "def", "_split_on_whole_table", "(", "self", ",", "table_name", ":", "str", ",", "batch_identifiers", ":", "dict", ")", ":", "# return sa.column(column_name) == batch_identifiers[column_name]", "return", "1", "==", "1" ]
[ 655, 4 ]
[ 659, 21 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._split_on_column_value
( self, table_name: str, column_name: str, batch_identifiers: dict )
Split using the values in the named column
Split using the values in the named column
def _split_on_column_value( self, table_name: str, column_name: str, batch_identifiers: dict ): """Split using the values in the named column""" return sa.column(column_name) == batch_identifiers[column_name]
[ "def", "_split_on_column_value", "(", "self", ",", "table_name", ":", "str", ",", "column_name", ":", "str", ",", "batch_identifiers", ":", "dict", ")", ":", "return", "sa", ".", "column", "(", "column_name", ")", "==", "batch_identifiers", "[", "column_name", "]" ]
[ 661, 4 ]
[ 666, 71 ]
python
en
['en', 'no', 'en']
True
SqlAlchemyExecutionEngine._split_on_converted_datetime
( self, table_name: str, column_name: str, batch_identifiers: dict, date_format_string: str = "%Y-%m-%d", )
Convert the values in the named column to the given date_format, and split on that
Convert the values in the named column to the given date_format, and split on that
def _split_on_converted_datetime( self, table_name: str, column_name: str, batch_identifiers: dict, date_format_string: str = "%Y-%m-%d", ): """Convert the values in the named column to the given date_format, and split on that""" return ( sa.func.strftime( date_format_string, sa.column(column_name), ) == batch_identifiers[column_name] )
[ "def", "_split_on_converted_datetime", "(", "self", ",", "table_name", ":", "str", ",", "column_name", ":", "str", ",", "batch_identifiers", ":", "dict", ",", "date_format_string", ":", "str", "=", "\"%Y-%m-%d\"", ",", ")", ":", "return", "(", "sa", ".", "func", ".", "strftime", "(", "date_format_string", ",", "sa", ".", "column", "(", "column_name", ")", ",", ")", "==", "batch_identifiers", "[", "column_name", "]", ")" ]
[ 668, 4 ]
[ 683, 9 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._split_on_divided_integer
( self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict )
Divide the values in the named column by `divisor`, and split on that
Divide the values in the named column by `divisor`, and split on that
def _split_on_divided_integer( self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict ): """Divide the values in the named column by `divisor`, and split on that""" return ( sa.cast(sa.column(column_name) / divisor, sa.Integer) == batch_identifiers[column_name] )
[ "def", "_split_on_divided_integer", "(", "self", ",", "table_name", ":", "str", ",", "column_name", ":", "str", ",", "divisor", ":", "int", ",", "batch_identifiers", ":", "dict", ")", ":", "return", "(", "sa", ".", "cast", "(", "sa", ".", "column", "(", "column_name", ")", "/", "divisor", ",", "sa", ".", "Integer", ")", "==", "batch_identifiers", "[", "column_name", "]", ")" ]
[ 685, 4 ]
[ 693, 9 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._split_on_mod_integer
( self, table_name: str, column_name: str, mod: int, batch_identifiers: dict )
Divide the values in the named column by `divisor`, and split on that
Divide the values in the named column by `divisor`, and split on that
def _split_on_mod_integer( self, table_name: str, column_name: str, mod: int, batch_identifiers: dict ): """Divide the values in the named column by `divisor`, and split on that""" return sa.column(column_name) % mod == batch_identifiers[column_name]
[ "def", "_split_on_mod_integer", "(", "self", ",", "table_name", ":", "str", ",", "column_name", ":", "str", ",", "mod", ":", "int", ",", "batch_identifiers", ":", "dict", ")", ":", "return", "sa", ".", "column", "(", "column_name", ")", "%", "mod", "==", "batch_identifiers", "[", "column_name", "]" ]
[ 695, 4 ]
[ 700, 77 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._split_on_multi_column_values
( self, table_name: str, column_names: List[str], batch_identifiers: dict )
Split on the joint values in the named columns
Split on the joint values in the named columns
def _split_on_multi_column_values( self, table_name: str, column_names: List[str], batch_identifiers: dict ): """Split on the joint values in the named columns""" return sa.and_( *[ sa.column(column_name) == column_value for column_name, column_value in batch_identifiers.items() ] )
[ "def", "_split_on_multi_column_values", "(", "self", ",", "table_name", ":", "str", ",", "column_names", ":", "List", "[", "str", "]", ",", "batch_identifiers", ":", "dict", ")", ":", "return", "sa", ".", "and_", "(", "*", "[", "sa", ".", "column", "(", "column_name", ")", "==", "column_value", "for", "column_name", ",", "column_value", "in", "batch_identifiers", ".", "items", "(", ")", "]", ")" ]
[ 702, 4 ]
[ 712, 9 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._split_on_hashed_column
( self, table_name: str, column_name: str, hash_digits: int, batch_identifiers: dict, )
Split on the hashed value of the named column
Split on the hashed value of the named column
def _split_on_hashed_column( self, table_name: str, column_name: str, hash_digits: int, batch_identifiers: dict, ): """Split on the hashed value of the named column""" return ( sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits) == batch_identifiers[column_name] )
[ "def", "_split_on_hashed_column", "(", "self", ",", "table_name", ":", "str", ",", "column_name", ":", "str", ",", "hash_digits", ":", "int", ",", "batch_identifiers", ":", "dict", ",", ")", ":", "return", "(", "sa", ".", "func", ".", "right", "(", "sa", ".", "func", ".", "md5", "(", "sa", ".", "column", "(", "column_name", ")", ")", ",", "hash_digits", ")", "==", "batch_identifiers", "[", "column_name", "]", ")" ]
[ 714, 4 ]
[ 726, 9 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._sample_using_random
( self, p: float = 0.1, )
Take a random sample of rows, retaining proportion p Note: the Random function behaves differently on different dialects of SQL
Take a random sample of rows, retaining proportion p
def _sample_using_random( self, p: float = 0.1, ): """Take a random sample of rows, retaining proportion p Note: the Random function behaves differently on different dialects of SQL """ return sa.func.random() < p
[ "def", "_sample_using_random", "(", "self", ",", "p", ":", "float", "=", "0.1", ",", ")", ":", "return", "sa", ".", "func", ".", "random", "(", ")", "<", "p" ]
[ 736, 4 ]
[ 744, 35 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._sample_using_mod
( self, column_name, mod: int, value: int, )
Take the mod of named column, and only keep rows that match the given value
Take the mod of named column, and only keep rows that match the given value
def _sample_using_mod( self, column_name, mod: int, value: int, ): """Take the mod of named column, and only keep rows that match the given value""" return sa.column(column_name) % mod == value
[ "def", "_sample_using_mod", "(", "self", ",", "column_name", ",", "mod", ":", "int", ",", "value", ":", "int", ",", ")", ":", "return", "sa", ".", "column", "(", "column_name", ")", "%", "mod", "==", "value" ]
[ 746, 4 ]
[ 753, 52 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._sample_using_a_list
( self, column_name: str, value_list: list, )
Match the values in the named column against value_list, and only keep the matches
Match the values in the named column against value_list, and only keep the matches
def _sample_using_a_list( self, column_name: str, value_list: list, ): """Match the values in the named column against value_list, and only keep the matches""" return sa.column(column_name).in_(value_list)
[ "def", "_sample_using_a_list", "(", "self", ",", "column_name", ":", "str", ",", "value_list", ":", "list", ",", ")", ":", "return", "sa", ".", "column", "(", "column_name", ")", ".", "in_", "(", "value_list", ")" ]
[ 755, 4 ]
[ 761, 53 ]
python
en
['en', 'en', 'en']
True
SqlAlchemyExecutionEngine._sample_using_md5
( self, column_name: str, hash_digits: int = 1, hash_value: str = "f", )
Hash the values in the named column, and split on that
Hash the values in the named column, and split on that
def _sample_using_md5( self, column_name: str, hash_digits: int = 1, hash_value: str = "f", ): """Hash the values in the named column, and split on that""" return ( sa.func.right( sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits ) == hash_value )
[ "def", "_sample_using_md5", "(", "self", ",", "column_name", ":", "str", ",", "hash_digits", ":", "int", "=", "1", ",", "hash_value", ":", "str", "=", "\"f\"", ",", ")", ":", "return", "(", "sa", ".", "func", ".", "right", "(", "sa", ".", "func", ".", "md5", "(", "sa", ".", "cast", "(", "sa", ".", "column", "(", "column_name", ")", ",", "sa", ".", "Text", ")", ")", ",", "hash_digits", ")", "==", "hash_value", ")" ]
[ 763, 4 ]
[ 775, 9 ]
python
en
['en', 'en', 'en']
True
QuoteForRspFile
(arg)
Quote a command line argument so that it appears as one argument when processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for Windows programs).
Quote a command line argument so that it appears as one argument when processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for Windows programs).
def QuoteForRspFile(arg): """Quote a command line argument so that it appears as one argument when processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for Windows programs).""" # See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment # threads. This is actually the quoting rules for CommandLineToArgvW, not # for the shell, because the shell doesn't do anything in Windows. This # works more or less because most programs (including the compiler, etc.) # use that function to handle command line arguments. # For a literal quote, CommandLineToArgvW requires 2n+1 backslashes # preceding it, and results in n backslashes + the quote. So we substitute # in 2* what we match, +1 more, plus the quote. arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg) # %'s also need to be doubled otherwise they're interpreted as batch # positional arguments. Also make sure to escape the % so that they're # passed literally through escaping so they can be singled to just the # original %. Otherwise, trying to pass the literal representation that # looks like an environment variable to the shell (e.g. %PATH%) would fail. arg = arg.replace('%', '%%') # These commands are used in rsp files, so no escaping for the shell (via ^) # is necessary. # Finally, wrap the whole thing in quotes so that the above quote rule # applies and whitespace isn't a word break. return '"' + arg + '"'
[ "def", "QuoteForRspFile", "(", "arg", ")", ":", "# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment", "# threads. This is actually the quoting rules for CommandLineToArgvW, not", "# for the shell, because the shell doesn't do anything in Windows. This", "# works more or less because most programs (including the compiler, etc.)", "# use that function to handle command line arguments.", "# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes", "# preceding it, and results in n backslashes + the quote. So we substitute", "# in 2* what we match, +1 more, plus the quote.", "arg", "=", "windows_quoter_regex", ".", "sub", "(", "lambda", "mo", ":", "2", "*", "mo", ".", "group", "(", "1", ")", "+", "'\\\\\"'", ",", "arg", ")", "# %'s also need to be doubled otherwise they're interpreted as batch", "# positional arguments. Also make sure to escape the % so that they're", "# passed literally through escaping so they can be singled to just the", "# original %. Otherwise, trying to pass the literal representation that", "# looks like an environment variable to the shell (e.g. %PATH%) would fail.", "arg", "=", "arg", ".", "replace", "(", "'%'", ",", "'%%'", ")", "# These commands are used in rsp files, so no escaping for the shell (via ^)", "# is necessary.", "# Finally, wrap the whole thing in quotes so that the above quote rule", "# applies and whitespace isn't a word break.", "return", "'\"'", "+", "arg", "+", "'\"'" ]
[ 22, 0 ]
[ 49, 24 ]
python
en
['en', 'en', 'en']
True
EncodeRspFileList
(args)
Process a list of arguments using QuoteCmdExeArgument.
Process a list of arguments using QuoteCmdExeArgument.
def EncodeRspFileList(args): """Process a list of arguments using QuoteCmdExeArgument.""" # Note that the first argument is assumed to be the command. Don't add # quotes around it because then built-ins like 'echo', etc. won't work. # Take care to normpath only the path in the case of 'call ../x.bat' because # otherwise the whole thing is incorrectly interpreted as a path and not # normalized correctly. if not args: return '' if args[0].startswith('call '): call, program = args[0].split(' ', 1) program = call + ' ' + os.path.normpath(program) else: program = os.path.normpath(args[0]) return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
[ "def", "EncodeRspFileList", "(", "args", ")", ":", "# Note that the first argument is assumed to be the command. Don't add", "# quotes around it because then built-ins like 'echo', etc. won't work.", "# Take care to normpath only the path in the case of 'call ../x.bat' because", "# otherwise the whole thing is incorrectly interpreted as a path and not", "# normalized correctly.", "if", "not", "args", ":", "return", "''", "if", "args", "[", "0", "]", ".", "startswith", "(", "'call '", ")", ":", "call", ",", "program", "=", "args", "[", "0", "]", ".", "split", "(", "' '", ",", "1", ")", "program", "=", "call", "+", "' '", "+", "os", ".", "path", ".", "normpath", "(", "program", ")", "else", ":", "program", "=", "os", ".", "path", ".", "normpath", "(", "args", "[", "0", "]", ")", "return", "program", "+", "' '", "+", "' '", ".", "join", "(", "QuoteForRspFile", "(", "arg", ")", "for", "arg", "in", "args", "[", "1", ":", "]", ")" ]
[ 52, 0 ]
[ 65, 75 ]
python
en
['en', 'en', 'en']
True
_GenericRetrieve
(root, default, path)
Given a list of dictionary keys |path| and a tree of dicts |root|, find value at path, or return |default| if any of the path doesn't exist.
Given a list of dictionary keys |path| and a tree of dicts |root|, find value at path, or return |default| if any of the path doesn't exist.
def _GenericRetrieve(root, default, path): """Given a list of dictionary keys |path| and a tree of dicts |root|, find value at path, or return |default| if any of the path doesn't exist.""" if not root: return default if not path: return root return _GenericRetrieve(root.get(path[0]), default, path[1:])
[ "def", "_GenericRetrieve", "(", "root", ",", "default", ",", "path", ")", ":", "if", "not", "root", ":", "return", "default", "if", "not", "path", ":", "return", "root", "return", "_GenericRetrieve", "(", "root", ".", "get", "(", "path", "[", "0", "]", ")", ",", "default", ",", "path", "[", "1", ":", "]", ")" ]
[ 68, 0 ]
[ 75, 63 ]
python
en
['en', 'en', 'en']
True
_AddPrefix
(element, prefix)
Add |prefix| to |element| or each subelement if element is iterable.
Add |prefix| to |element| or each subelement if element is iterable.
def _AddPrefix(element, prefix): """Add |prefix| to |element| or each subelement if element is iterable.""" if element is None: return element # Note, not Iterable because we don't want to handle strings like that. if isinstance(element, list) or isinstance(element, tuple): return [prefix + e for e in element] else: return prefix + element
[ "def", "_AddPrefix", "(", "element", ",", "prefix", ")", ":", "if", "element", "is", "None", ":", "return", "element", "# Note, not Iterable because we don't want to handle strings like that.", "if", "isinstance", "(", "element", ",", "list", ")", "or", "isinstance", "(", "element", ",", "tuple", ")", ":", "return", "[", "prefix", "+", "e", "for", "e", "in", "element", "]", "else", ":", "return", "prefix", "+", "element" ]
[ 78, 0 ]
[ 86, 27 ]
python
en
['en', 'en', 'en']
True
_DoRemapping
(element, map)
If |element| then remap it through |map|. If |element| is iterable then each item will be remapped. Any elements not found will be removed.
If |element| then remap it through |map|. If |element| is iterable then each item will be remapped. Any elements not found will be removed.
def _DoRemapping(element, map): """If |element| then remap it through |map|. If |element| is iterable then each item will be remapped. Any elements not found will be removed.""" if map is not None and element is not None: if not callable(map): map = map.get # Assume it's a dict, otherwise a callable to do the remap. if isinstance(element, list) or isinstance(element, tuple): element = filter(None, [map(elem) for elem in element]) else: element = map(element) return element
[ "def", "_DoRemapping", "(", "element", ",", "map", ")", ":", "if", "map", "is", "not", "None", "and", "element", "is", "not", "None", ":", "if", "not", "callable", "(", "map", ")", ":", "map", "=", "map", ".", "get", "# Assume it's a dict, otherwise a callable to do the remap.", "if", "isinstance", "(", "element", ",", "list", ")", "or", "isinstance", "(", "element", ",", "tuple", ")", ":", "element", "=", "filter", "(", "None", ",", "[", "map", "(", "elem", ")", "for", "elem", "in", "element", "]", ")", "else", ":", "element", "=", "map", "(", "element", ")", "return", "element" ]
[ 89, 0 ]
[ 99, 16 ]
python
en
['en', 'en', 'en']
True
_AppendOrReturn
(append, element)
If |append| is None, simply return |element|. If |append| is not None, then add |element| to it, adding each item in |element| if it's a list or tuple.
If |append| is None, simply return |element|. If |append| is not None, then add |element| to it, adding each item in |element| if it's a list or tuple.
def _AppendOrReturn(append, element): """If |append| is None, simply return |element|. If |append| is not None, then add |element| to it, adding each item in |element| if it's a list or tuple.""" if append is not None and element is not None: if isinstance(element, list) or isinstance(element, tuple): append.extend(element) else: append.append(element) else: return element
[ "def", "_AppendOrReturn", "(", "append", ",", "element", ")", ":", "if", "append", "is", "not", "None", "and", "element", "is", "not", "None", ":", "if", "isinstance", "(", "element", ",", "list", ")", "or", "isinstance", "(", "element", ",", "tuple", ")", ":", "append", ".", "extend", "(", "element", ")", "else", ":", "append", ".", "append", "(", "element", ")", "else", ":", "return", "element" ]
[ 102, 0 ]
[ 112, 18 ]
python
en
['en', 'en', 'en']
True
_FindDirectXInstallation
()
Try to find an installation location for the DirectX SDK. Check for the standard environment variable, and if that doesn't exist, try to find via the registry. May return None if not found in either location.
Try to find an installation location for the DirectX SDK. Check for the standard environment variable, and if that doesn't exist, try to find via the registry. May return None if not found in either location.
def _FindDirectXInstallation(): """Try to find an installation location for the DirectX SDK. Check for the standard environment variable, and if that doesn't exist, try to find via the registry. May return None if not found in either location.""" # Return previously calculated value, if there is one if hasattr(_FindDirectXInstallation, 'dxsdk_dir'): return _FindDirectXInstallation.dxsdk_dir dxsdk_dir = os.environ.get('DXSDK_DIR') if not dxsdk_dir: # Setup params to pass to and attempt to launch reg.exe. cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in p.communicate()[0].splitlines(): if 'InstallPath' in line: dxsdk_dir = line.split(' ')[3] + "\\" # Cache return value _FindDirectXInstallation.dxsdk_dir = dxsdk_dir return dxsdk_dir
[ "def", "_FindDirectXInstallation", "(", ")", ":", "# Return previously calculated value, if there is one", "if", "hasattr", "(", "_FindDirectXInstallation", ",", "'dxsdk_dir'", ")", ":", "return", "_FindDirectXInstallation", ".", "dxsdk_dir", "dxsdk_dir", "=", "os", ".", "environ", ".", "get", "(", "'DXSDK_DIR'", ")", "if", "not", "dxsdk_dir", ":", "# Setup params to pass to and attempt to launch reg.exe.", "cmd", "=", "[", "'reg.exe'", ",", "'query'", ",", "r'HKLM\\Software\\Microsoft\\DirectX'", ",", "'/s'", "]", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "for", "line", "in", "p", ".", "communicate", "(", ")", "[", "0", "]", ".", "splitlines", "(", ")", ":", "if", "'InstallPath'", "in", "line", ":", "dxsdk_dir", "=", "line", ".", "split", "(", "' '", ")", "[", "3", "]", "+", "\"\\\\\"", "# Cache return value", "_FindDirectXInstallation", ".", "dxsdk_dir", "=", "dxsdk_dir", "return", "dxsdk_dir" ]
[ 115, 0 ]
[ 134, 18 ]
python
en
['en', 'en', 'en']
True
GetGlobalVSMacroEnv
(vs_version)
Get a dict of variables mapping internal VS macro names to their gyp equivalents. Returns all variables that are independent of the target.
Get a dict of variables mapping internal VS macro names to their gyp equivalents. Returns all variables that are independent of the target.
def GetGlobalVSMacroEnv(vs_version): """Get a dict of variables mapping internal VS macro names to their gyp equivalents. Returns all variables that are independent of the target.""" env = {} # '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when # Visual Studio is actually installed. if vs_version.Path(): env['$(VSInstallDir)'] = vs_version.Path() env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\' # Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be # set. This happens when the SDK is sync'd via src-internal, rather than # by typical end-user installation of the SDK. If it's not set, we don't # want to leave the unexpanded variable in the path, so simply strip it. dxsdk_dir = _FindDirectXInstallation() env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else '' # Try to find an installation location for the Windows DDK by checking # the WDK_DIR environment variable, may be None. env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '') return env
[ "def", "GetGlobalVSMacroEnv", "(", "vs_version", ")", ":", "env", "=", "{", "}", "# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when", "# Visual Studio is actually installed.", "if", "vs_version", ".", "Path", "(", ")", ":", "env", "[", "'$(VSInstallDir)'", "]", "=", "vs_version", ".", "Path", "(", ")", "env", "[", "'$(VCInstallDir)'", "]", "=", "os", ".", "path", ".", "join", "(", "vs_version", ".", "Path", "(", ")", ",", "'VC'", ")", "+", "'\\\\'", "# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be", "# set. This happens when the SDK is sync'd via src-internal, rather than", "# by typical end-user installation of the SDK. If it's not set, we don't", "# want to leave the unexpanded variable in the path, so simply strip it.", "dxsdk_dir", "=", "_FindDirectXInstallation", "(", ")", "env", "[", "'$(DXSDK_DIR)'", "]", "=", "dxsdk_dir", "if", "dxsdk_dir", "else", "''", "# Try to find an installation location for the Windows DDK by checking", "# the WDK_DIR environment variable, may be None.", "env", "[", "'$(WDK_DIR)'", "]", "=", "os", ".", "environ", ".", "get", "(", "'WDK_DIR'", ",", "''", ")", "return", "env" ]
[ 137, 0 ]
[ 155, 12 ]
python
en
['en', 'en', 'en']
True
ExtractSharedMSVSSystemIncludes
(configs, generator_flags)
Finds msvs_system_include_dirs that are common to all targets, removes them from all targets, and returns an OrderedSet containing them.
Finds msvs_system_include_dirs that are common to all targets, removes them from all targets, and returns an OrderedSet containing them.
def ExtractSharedMSVSSystemIncludes(configs, generator_flags): """Finds msvs_system_include_dirs that are common to all targets, removes them from all targets, and returns an OrderedSet containing them.""" all_system_includes = OrderedSet( configs[0].get('msvs_system_include_dirs', [])) for config in configs[1:]: system_includes = config.get('msvs_system_include_dirs', []) all_system_includes = all_system_includes & OrderedSet(system_includes) if not all_system_includes: return None # Expand macros in all_system_includes. env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags)) expanded_system_includes = OrderedSet([ExpandMacros(include, env) for include in all_system_includes]) if any(['$' in include for include in expanded_system_includes]): # Some path relies on target-specific variables, bail. return None # Remove system includes shared by all targets from the targets. for config in configs: includes = config.get('msvs_system_include_dirs', []) if includes: # Don't insert a msvs_system_include_dirs key if not needed. # This must check the unexpanded includes list: new_includes = [i for i in includes if i not in all_system_includes] config['msvs_system_include_dirs'] = new_includes return expanded_system_includes
[ "def", "ExtractSharedMSVSSystemIncludes", "(", "configs", ",", "generator_flags", ")", ":", "all_system_includes", "=", "OrderedSet", "(", "configs", "[", "0", "]", ".", "get", "(", "'msvs_system_include_dirs'", ",", "[", "]", ")", ")", "for", "config", "in", "configs", "[", "1", ":", "]", ":", "system_includes", "=", "config", ".", "get", "(", "'msvs_system_include_dirs'", ",", "[", "]", ")", "all_system_includes", "=", "all_system_includes", "&", "OrderedSet", "(", "system_includes", ")", "if", "not", "all_system_includes", ":", "return", "None", "# Expand macros in all_system_includes.", "env", "=", "GetGlobalVSMacroEnv", "(", "GetVSVersion", "(", "generator_flags", ")", ")", "expanded_system_includes", "=", "OrderedSet", "(", "[", "ExpandMacros", "(", "include", ",", "env", ")", "for", "include", "in", "all_system_includes", "]", ")", "if", "any", "(", "[", "'$'", "in", "include", "for", "include", "in", "expanded_system_includes", "]", ")", ":", "# Some path relies on target-specific variables, bail.", "return", "None", "# Remove system includes shared by all targets from the targets.", "for", "config", "in", "configs", ":", "includes", "=", "config", ".", "get", "(", "'msvs_system_include_dirs'", ",", "[", "]", ")", "if", "includes", ":", "# Don't insert a msvs_system_include_dirs key if not needed.", "# This must check the unexpanded includes list:", "new_includes", "=", "[", "i", "for", "i", "in", "includes", "if", "i", "not", "in", "all_system_includes", "]", "config", "[", "'msvs_system_include_dirs'", "]", "=", "new_includes", "return", "expanded_system_includes" ]
[ 157, 0 ]
[ 182, 33 ]
python
en
['en', 'en', 'en']
True
ExpandMacros
(string, expansions)
Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv for the canonical way to retrieve a suitable dict.
Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv for the canonical way to retrieve a suitable dict.
def ExpandMacros(string, expansions): """Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv for the canonical way to retrieve a suitable dict.""" if '$' in string: for old, new in expansions.iteritems(): assert '$(' not in new, new string = string.replace(old, new) return string
[ "def", "ExpandMacros", "(", "string", ",", "expansions", ")", ":", "if", "'$'", "in", "string", ":", "for", "old", ",", "new", "in", "expansions", ".", "iteritems", "(", ")", ":", "assert", "'$('", "not", "in", "new", ",", "new", "string", "=", "string", ".", "replace", "(", "old", ",", "new", ")", "return", "string" ]
[ 939, 0 ]
[ 946, 15 ]
python
en
['en', 'en', 'it']
True
_ExtractImportantEnvironment
(output_of_set)
Extracts environment variables required for the toolchain to run from a textual dump output by the cmd.exe 'set' command.
Extracts environment variables required for the toolchain to run from a textual dump output by the cmd.exe 'set' command.
def _ExtractImportantEnvironment(output_of_set): """Extracts environment variables required for the toolchain to run from a textual dump output by the cmd.exe 'set' command.""" envvars_to_save = ( 'goma_.*', # TODO(scottmg): This is ugly, but needed for goma. 'include', 'lib', 'libpath', 'path', 'pathext', 'systemroot', 'temp', 'tmp', ) env = {} for line in output_of_set.splitlines(): for envvar in envvars_to_save: if re.match(envvar + '=', line.lower()): var, setting = line.split('=', 1) if envvar == 'path': # Our own rules (for running gyp-win-tool) and other actions in # Chromium rely on python being in the path. Add the path to this # python here so that if it's not in the path when ninja is run # later, python will still be found. setting = os.path.dirname(sys.executable) + os.pathsep + setting env[var.upper()] = setting break for required in ('SYSTEMROOT', 'TEMP', 'TMP'): if required not in env: raise Exception('Environment variable "%s" ' 'required to be set to valid path' % required) return env
[ "def", "_ExtractImportantEnvironment", "(", "output_of_set", ")", ":", "envvars_to_save", "=", "(", "'goma_.*'", ",", "# TODO(scottmg): This is ugly, but needed for goma.", "'include'", ",", "'lib'", ",", "'libpath'", ",", "'path'", ",", "'pathext'", ",", "'systemroot'", ",", "'temp'", ",", "'tmp'", ",", ")", "env", "=", "{", "}", "for", "line", "in", "output_of_set", ".", "splitlines", "(", ")", ":", "for", "envvar", "in", "envvars_to_save", ":", "if", "re", ".", "match", "(", "envvar", "+", "'='", ",", "line", ".", "lower", "(", ")", ")", ":", "var", ",", "setting", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "if", "envvar", "==", "'path'", ":", "# Our own rules (for running gyp-win-tool) and other actions in", "# Chromium rely on python being in the path. Add the path to this", "# python here so that if it's not in the path when ninja is run", "# later, python will still be found.", "setting", "=", "os", ".", "path", ".", "dirname", "(", "sys", ".", "executable", ")", "+", "os", ".", "pathsep", "+", "setting", "env", "[", "var", ".", "upper", "(", ")", "]", "=", "setting", "break", "for", "required", "in", "(", "'SYSTEMROOT'", ",", "'TEMP'", ",", "'TMP'", ")", ":", "if", "required", "not", "in", "env", ":", "raise", "Exception", "(", "'Environment variable \"%s\" '", "'required to be set to valid path'", "%", "required", ")", "return", "env" ]
[ 948, 0 ]
[ 979, 12 ]
python
en
['en', 'en', 'en']
True
_FormatAsEnvironmentBlock
(envvar_dict)
Format as an 'environment block' directly suitable for CreateProcess. Briefly this is a list of key=value\0, terminated by an additional \0. See CreateProcess documentation for more details.
Format as an 'environment block' directly suitable for CreateProcess. Briefly this is a list of key=value\0, terminated by an additional \0. See CreateProcess documentation for more details.
def _FormatAsEnvironmentBlock(envvar_dict): """Format as an 'environment block' directly suitable for CreateProcess. Briefly this is a list of key=value\0, terminated by an additional \0. See CreateProcess documentation for more details.""" block = '' nul = '\0' for key, value in envvar_dict.iteritems(): block += key + '=' + value + nul block += nul return block
[ "def", "_FormatAsEnvironmentBlock", "(", "envvar_dict", ")", ":", "block", "=", "''", "nul", "=", "'\\0'", "for", "key", ",", "value", "in", "envvar_dict", ".", "iteritems", "(", ")", ":", "block", "+=", "key", "+", "'='", "+", "value", "+", "nul", "block", "+=", "nul", "return", "block" ]
[ 981, 0 ]
[ 990, 14 ]
python
en
['en', 'en', 'en']
True
_ExtractCLPath
(output_of_where)
Gets the path to cl.exe based on the output of calling the environment setup batch file, followed by the equivalent of `where`.
Gets the path to cl.exe based on the output of calling the environment setup batch file, followed by the equivalent of `where`.
def _ExtractCLPath(output_of_where): """Gets the path to cl.exe based on the output of calling the environment setup batch file, followed by the equivalent of `where`.""" # Take the first line, as that's the first found in the PATH. for line in output_of_where.strip().splitlines(): if line.startswith('LOC:'): return line[len('LOC:'):].strip()
[ "def", "_ExtractCLPath", "(", "output_of_where", ")", ":", "# Take the first line, as that's the first found in the PATH.", "for", "line", "in", "output_of_where", ".", "strip", "(", ")", ".", "splitlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "'LOC:'", ")", ":", "return", "line", "[", "len", "(", "'LOC:'", ")", ":", "]", ".", "strip", "(", ")" ]
[ 992, 0 ]
[ 998, 39 ]
python
en
['en', 'en', 'en']
True
GenerateEnvironmentFiles
(toplevel_build_dir, generator_flags, system_includes, open_out)
It's not sufficient to have the absolute path to the compiler, linker, etc. on Windows, as those tools rely on .dlls being in the PATH. We also need to support both x86 and x64 compilers within the same build (to support msvs_target_platform hackery). Different architectures require a different compiler binary, and different supporting environment variables (INCLUDE, LIB, LIBPATH). So, we extract the environment here, wrap all invocations of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which sets up the environment, and then we do not prefix the compiler with an absolute path, instead preferring something like "cl.exe" in the rule which will then run whichever the environment setup has put in the path. When the following procedure to generate environment files does not meet your requirement (e.g. for custom toolchains), you can pass "-G ninja_use_custom_environment_files" to the gyp to suppress file generation and use custom environment files prepared by yourself.
It's not sufficient to have the absolute path to the compiler, linker, etc. on Windows, as those tools rely on .dlls being in the PATH. We also need to support both x86 and x64 compilers within the same build (to support msvs_target_platform hackery). Different architectures require a different compiler binary, and different supporting environment variables (INCLUDE, LIB, LIBPATH). So, we extract the environment here, wrap all invocations of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which sets up the environment, and then we do not prefix the compiler with an absolute path, instead preferring something like "cl.exe" in the rule which will then run whichever the environment setup has put in the path. When the following procedure to generate environment files does not meet your requirement (e.g. for custom toolchains), you can pass "-G ninja_use_custom_environment_files" to the gyp to suppress file generation and use custom environment files prepared by yourself.
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, system_includes, open_out): """It's not sufficient to have the absolute path to the compiler, linker, etc. on Windows, as those tools rely on .dlls being in the PATH. We also need to support both x86 and x64 compilers within the same build (to support msvs_target_platform hackery). Different architectures require a different compiler binary, and different supporting environment variables (INCLUDE, LIB, LIBPATH). So, we extract the environment here, wrap all invocations of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which sets up the environment, and then we do not prefix the compiler with an absolute path, instead preferring something like "cl.exe" in the rule which will then run whichever the environment setup has put in the path. When the following procedure to generate environment files does not meet your requirement (e.g. for custom toolchains), you can pass "-G ninja_use_custom_environment_files" to the gyp to suppress file generation and use custom environment files prepared by yourself.""" archs = ('x86', 'x64') if generator_flags.get('ninja_use_custom_environment_files', 0): cl_paths = {} for arch in archs: cl_paths[arch] = 'cl.exe' return cl_paths vs = GetVSVersion(generator_flags) cl_paths = {} for arch in archs: # Extract environment variables for subprocesses. args = vs.SetupScript(arch) args.extend(('&&', 'set')) popen = subprocess.Popen( args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) variables, _ = popen.communicate() env = _ExtractImportantEnvironment(variables) # Inject system includes from gyp files into INCLUDE. if system_includes: system_includes = system_includes | OrderedSet( env.get('INCLUDE', '').split(';')) env['INCLUDE'] = ';'.join(system_includes) env_block = _FormatAsEnvironmentBlock(env) f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb') f.write(env_block) f.close() # Find cl.exe location for this architecture. args = vs.SetupScript(arch) args.extend(('&&', 'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i')) popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE) output, _ = popen.communicate() cl_paths[arch] = _ExtractCLPath(output) return cl_paths
[ "def", "GenerateEnvironmentFiles", "(", "toplevel_build_dir", ",", "generator_flags", ",", "system_includes", ",", "open_out", ")", ":", "archs", "=", "(", "'x86'", ",", "'x64'", ")", "if", "generator_flags", ".", "get", "(", "'ninja_use_custom_environment_files'", ",", "0", ")", ":", "cl_paths", "=", "{", "}", "for", "arch", "in", "archs", ":", "cl_paths", "[", "arch", "]", "=", "'cl.exe'", "return", "cl_paths", "vs", "=", "GetVSVersion", "(", "generator_flags", ")", "cl_paths", "=", "{", "}", "for", "arch", "in", "archs", ":", "# Extract environment variables for subprocesses.", "args", "=", "vs", ".", "SetupScript", "(", "arch", ")", "args", ".", "extend", "(", "(", "'&&'", ",", "'set'", ")", ")", "popen", "=", "subprocess", ".", "Popen", "(", "args", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "variables", ",", "_", "=", "popen", ".", "communicate", "(", ")", "env", "=", "_ExtractImportantEnvironment", "(", "variables", ")", "# Inject system includes from gyp files into INCLUDE.", "if", "system_includes", ":", "system_includes", "=", "system_includes", "|", "OrderedSet", "(", "env", ".", "get", "(", "'INCLUDE'", ",", "''", ")", ".", "split", "(", "';'", ")", ")", "env", "[", "'INCLUDE'", "]", "=", "';'", ".", "join", "(", "system_includes", ")", "env_block", "=", "_FormatAsEnvironmentBlock", "(", "env", ")", "f", "=", "open_out", "(", "os", ".", "path", ".", "join", "(", "toplevel_build_dir", ",", "'environment.'", "+", "arch", ")", ",", "'wb'", ")", "f", ".", "write", "(", "env_block", ")", "f", ".", "close", "(", ")", "# Find cl.exe location for this architecture.", "args", "=", "vs", ".", "SetupScript", "(", "arch", ")", "args", ".", "extend", "(", "(", "'&&'", ",", "'for'", ",", "'%i'", ",", "'in'", ",", "'(cl.exe)'", ",", "'do'", ",", "'@echo'", ",", "'LOC:%~$PATH:i'", ")", ")", "popen", "=", "subprocess", ".", "Popen", "(", "args", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "output", ",", "_", "=", "popen", ".", "communicate", "(", ")", "cl_paths", "[", "arch", "]", "=", "_ExtractCLPath", "(", "output", ")", "return", "cl_paths" ]
[ 1000, 0 ]
[ 1051, 17 ]
python
en
['en', 'en', 'en']
True
VerifyMissingSources
(sources, build_dir, generator_flags, gyp_to_ninja)
Emulate behavior of msvs_error_on_missing_sources present in the msvs generator: Check that all regular source files, i.e. not created at run time, exist on disk. Missing files cause needless recompilation when building via VS, and we want this check to match for people/bots that build using ninja, so they're not surprised when the VS build fails.
Emulate behavior of msvs_error_on_missing_sources present in the msvs generator: Check that all regular source files, i.e. not created at run time, exist on disk. Missing files cause needless recompilation when building via VS, and we want this check to match for people/bots that build using ninja, so they're not surprised when the VS build fails.
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja): """Emulate behavior of msvs_error_on_missing_sources present in the msvs generator: Check that all regular source files, i.e. not created at run time, exist on disk. Missing files cause needless recompilation when building via VS, and we want this check to match for people/bots that build using ninja, so they're not surprised when the VS build fails.""" if int(generator_flags.get('msvs_error_on_missing_sources', 0)): no_specials = filter(lambda x: '$' not in x, sources) relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials] missing = filter(lambda x: not os.path.exists(x), relative) if missing: # They'll look like out\Release\..\..\stuff\things.cc, so normalize the # path for a slightly less crazy looking output. cleaned_up = [os.path.normpath(x) for x in missing] raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
[ "def", "VerifyMissingSources", "(", "sources", ",", "build_dir", ",", "generator_flags", ",", "gyp_to_ninja", ")", ":", "if", "int", "(", "generator_flags", ".", "get", "(", "'msvs_error_on_missing_sources'", ",", "0", ")", ")", ":", "no_specials", "=", "filter", "(", "lambda", "x", ":", "'$'", "not", "in", "x", ",", "sources", ")", "relative", "=", "[", "os", ".", "path", ".", "join", "(", "build_dir", ",", "gyp_to_ninja", "(", "s", ")", ")", "for", "s", "in", "no_specials", "]", "missing", "=", "filter", "(", "lambda", "x", ":", "not", "os", ".", "path", ".", "exists", "(", "x", ")", ",", "relative", ")", "if", "missing", ":", "# They'll look like out\\Release\\..\\..\\stuff\\things.cc, so normalize the", "# path for a slightly less crazy looking output.", "cleaned_up", "=", "[", "os", ".", "path", ".", "normpath", "(", "x", ")", "for", "x", "in", "missing", "]", "raise", "Exception", "(", "'Missing input files:\\n%s'", "%", "'\\n'", ".", "join", "(", "cleaned_up", ")", ")" ]
[ 1053, 0 ]
[ 1067, 73 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetExtension
(self)
Returns the extension for the target, with no leading dot. Uses 'product_extension' if specified, otherwise uses MSVS defaults based on the target type.
Returns the extension for the target, with no leading dot.
def GetExtension(self): """Returns the extension for the target, with no leading dot. Uses 'product_extension' if specified, otherwise uses MSVS defaults based on the target type. """ ext = self.spec.get('product_extension', None) if ext: return ext return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
[ "def", "GetExtension", "(", "self", ")", ":", "ext", "=", "self", ".", "spec", ".", "get", "(", "'product_extension'", ",", "None", ")", "if", "ext", ":", "return", "ext", "return", "gyp", ".", "MSVSUtil", ".", "TARGET_TYPE_EXT", ".", "get", "(", "self", ".", "spec", "[", "'type'", "]", ",", "''", ")" ]
[ 225, 2 ]
[ 234, 66 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetVSMacroEnv
(self, base_to_build=None, config=None)
Get a dict of variables mapping internal VS macro names to their gyp equivalents.
Get a dict of variables mapping internal VS macro names to their gyp equivalents.
def GetVSMacroEnv(self, base_to_build=None, config=None): """Get a dict of variables mapping internal VS macro names to their gyp equivalents.""" target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64' target_name = self.spec.get('product_prefix', '') + \ self.spec.get('product_name', self.spec['target_name']) target_dir = base_to_build + '\\' if base_to_build else '' target_ext = '.' + self.GetExtension() target_file_name = target_name + target_ext replacements = { '$(InputName)': '${root}', '$(InputPath)': '${source}', '$(IntDir)': '$!INTERMEDIATE_DIR', '$(OutDir)\\': target_dir, '$(PlatformName)': target_platform, '$(ProjectDir)\\': '', '$(ProjectName)': self.spec['target_name'], '$(TargetDir)\\': target_dir, '$(TargetExt)': target_ext, '$(TargetFileName)': target_file_name, '$(TargetName)': target_name, '$(TargetPath)': os.path.join(target_dir, target_file_name), } replacements.update(GetGlobalVSMacroEnv(self.vs_version)) return replacements
[ "def", "GetVSMacroEnv", "(", "self", ",", "base_to_build", "=", "None", ",", "config", "=", "None", ")", ":", "target_platform", "=", "'Win32'", "if", "self", ".", "GetArch", "(", "config", ")", "==", "'x86'", "else", "'x64'", "target_name", "=", "self", ".", "spec", ".", "get", "(", "'product_prefix'", ",", "''", ")", "+", "self", ".", "spec", ".", "get", "(", "'product_name'", ",", "self", ".", "spec", "[", "'target_name'", "]", ")", "target_dir", "=", "base_to_build", "+", "'\\\\'", "if", "base_to_build", "else", "''", "target_ext", "=", "'.'", "+", "self", ".", "GetExtension", "(", ")", "target_file_name", "=", "target_name", "+", "target_ext", "replacements", "=", "{", "'$(InputName)'", ":", "'${root}'", ",", "'$(InputPath)'", ":", "'${source}'", ",", "'$(IntDir)'", ":", "'$!INTERMEDIATE_DIR'", ",", "'$(OutDir)\\\\'", ":", "target_dir", ",", "'$(PlatformName)'", ":", "target_platform", ",", "'$(ProjectDir)\\\\'", ":", "''", ",", "'$(ProjectName)'", ":", "self", ".", "spec", "[", "'target_name'", "]", ",", "'$(TargetDir)\\\\'", ":", "target_dir", ",", "'$(TargetExt)'", ":", "target_ext", ",", "'$(TargetFileName)'", ":", "target_file_name", ",", "'$(TargetName)'", ":", "target_name", ",", "'$(TargetPath)'", ":", "os", ".", "path", ".", "join", "(", "target_dir", ",", "target_file_name", ")", ",", "}", "replacements", ".", "update", "(", "GetGlobalVSMacroEnv", "(", "self", ".", "vs_version", ")", ")", "return", "replacements" ]
[ 236, 2 ]
[ 261, 23 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.ConvertVSMacros
(self, s, base_to_build=None, config=None)
Convert from VS macro names to something equivalent.
Convert from VS macro names to something equivalent.
def ConvertVSMacros(self, s, base_to_build=None, config=None): """Convert from VS macro names to something equivalent.""" env = self.GetVSMacroEnv(base_to_build, config=config) return ExpandMacros(s, env)
[ "def", "ConvertVSMacros", "(", "self", ",", "s", ",", "base_to_build", "=", "None", ",", "config", "=", "None", ")", ":", "env", "=", "self", ".", "GetVSMacroEnv", "(", "base_to_build", ",", "config", "=", "config", ")", "return", "ExpandMacros", "(", "s", ",", "env", ")" ]
[ 263, 2 ]
[ 266, 31 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.AdjustLibraries
(self, libraries)
Strip -l from library if it's specified with that.
Strip -l from library if it's specified with that.
def AdjustLibraries(self, libraries): """Strip -l from library if it's specified with that.""" libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries] return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
[ "def", "AdjustLibraries", "(", "self", ",", "libraries", ")", ":", "libs", "=", "[", "lib", "[", "2", ":", "]", "if", "lib", ".", "startswith", "(", "'-l'", ")", "else", "lib", "for", "lib", "in", "libraries", "]", "return", "[", "lib", "+", "'.lib'", "if", "not", "lib", ".", "endswith", "(", "'.lib'", ")", "else", "lib", "for", "lib", "in", "libs", "]" ]
[ 268, 2 ]
[ 271, 78 ]
python
en
['en', 'en', 'en']
True
MsvsSettings._GetAndMunge
(self, field, path, default, prefix, append, map)
Retrieve a value from |field| at |path| or return |default|. If |append| is specified, and the item is found, it will be appended to that object instead of returned. If |map| is specified, results will be remapped through |map| before being returned or appended.
Retrieve a value from |field| at |path| or return |default|. If |append| is specified, and the item is found, it will be appended to that object instead of returned. If |map| is specified, results will be remapped through |map| before being returned or appended.
def _GetAndMunge(self, field, path, default, prefix, append, map): """Retrieve a value from |field| at |path| or return |default|. If |append| is specified, and the item is found, it will be appended to that object instead of returned. If |map| is specified, results will be remapped through |map| before being returned or appended.""" result = _GenericRetrieve(field, default, path) result = _DoRemapping(result, map) result = _AddPrefix(result, prefix) return _AppendOrReturn(append, result)
[ "def", "_GetAndMunge", "(", "self", ",", "field", ",", "path", ",", "default", ",", "prefix", ",", "append", ",", "map", ")", ":", "result", "=", "_GenericRetrieve", "(", "field", ",", "default", ",", "path", ")", "result", "=", "_DoRemapping", "(", "result", ",", "map", ")", "result", "=", "_AddPrefix", "(", "result", ",", "prefix", ")", "return", "_AppendOrReturn", "(", "append", ",", "result", ")" ]
[ 273, 2 ]
[ 281, 42 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetArch
(self, config)
Get architecture based on msvs_configuration_platform and msvs_target_platform. Returns either 'x86' or 'x64'.
Get architecture based on msvs_configuration_platform and msvs_target_platform. Returns either 'x86' or 'x64'.
def GetArch(self, config): """Get architecture based on msvs_configuration_platform and msvs_target_platform. Returns either 'x86' or 'x64'.""" configuration_platform = self.msvs_configuration_platform.get(config, '') platform = self.msvs_target_platform.get(config, '') if not platform: # If no specific override, use the configuration's. platform = configuration_platform # Map from platform to architecture. return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
[ "def", "GetArch", "(", "self", ",", "config", ")", ":", "configuration_platform", "=", "self", ".", "msvs_configuration_platform", ".", "get", "(", "config", ",", "''", ")", "platform", "=", "self", ".", "msvs_target_platform", ".", "get", "(", "config", ",", "''", ")", "if", "not", "platform", ":", "# If no specific override, use the configuration's.", "platform", "=", "configuration_platform", "# Map from platform to architecture.", "return", "{", "'Win32'", ":", "'x86'", ",", "'x64'", ":", "'x64'", "}", ".", "get", "(", "platform", ",", "'x86'", ")" ]
[ 293, 2 ]
[ 301, 62 ]
python
en
['en', 'en', 'en']
True
MsvsSettings._TargetConfig
(self, config)
Returns the target-specific configuration.
Returns the target-specific configuration.
def _TargetConfig(self, config): """Returns the target-specific configuration.""" # There's two levels of architecture/platform specification in VS. The # first level is globally for the configuration (this is what we consider # "the" config at the gyp level, which will be something like 'Debug' or # 'Release_x64'), and a second target-specific configuration, which is an # override for the global one. |config| is remapped here to take into # account the local target-specific overrides to the global configuration. arch = self.GetArch(config) if arch == 'x64' and not config.endswith('_x64'): config += '_x64' if arch == 'x86' and config.endswith('_x64'): config = config.rsplit('_', 1)[0] return config
[ "def", "_TargetConfig", "(", "self", ",", "config", ")", ":", "# There's two levels of architecture/platform specification in VS. The", "# first level is globally for the configuration (this is what we consider", "# \"the\" config at the gyp level, which will be something like 'Debug' or", "# 'Release_x64'), and a second target-specific configuration, which is an", "# override for the global one. |config| is remapped here to take into", "# account the local target-specific overrides to the global configuration.", "arch", "=", "self", ".", "GetArch", "(", "config", ")", "if", "arch", "==", "'x64'", "and", "not", "config", ".", "endswith", "(", "'_x64'", ")", ":", "config", "+=", "'_x64'", "if", "arch", "==", "'x86'", "and", "config", ".", "endswith", "(", "'_x64'", ")", ":", "config", "=", "config", ".", "rsplit", "(", "'_'", ",", "1", ")", "[", "0", "]", "return", "config" ]
[ 303, 2 ]
[ 316, 17 ]
python
en
['en', 'en', 'en']
True
MsvsSettings._Setting
(self, path, config, default=None, prefix='', append=None, map=None)
_GetAndMunge for msvs_settings.
_GetAndMunge for msvs_settings.
def _Setting(self, path, config, default=None, prefix='', append=None, map=None): """_GetAndMunge for msvs_settings.""" return self._GetAndMunge( self.msvs_settings[config], path, default, prefix, append, map)
[ "def", "_Setting", "(", "self", ",", "path", ",", "config", ",", "default", "=", "None", ",", "prefix", "=", "''", ",", "append", "=", "None", ",", "map", "=", "None", ")", ":", "return", "self", ".", "_GetAndMunge", "(", "self", ".", "msvs_settings", "[", "config", "]", ",", "path", ",", "default", ",", "prefix", ",", "append", ",", "map", ")" ]
[ 318, 2 ]
[ 322, 71 ]
python
de
['de', 'no', 'en']
False
MsvsSettings._ConfigAttrib
(self, path, config, default=None, prefix='', append=None, map=None)
_GetAndMunge for msvs_configuration_attributes.
_GetAndMunge for msvs_configuration_attributes.
def _ConfigAttrib(self, path, config, default=None, prefix='', append=None, map=None): """_GetAndMunge for msvs_configuration_attributes.""" return self._GetAndMunge( self.msvs_configuration_attributes[config], path, default, prefix, append, map)
[ "def", "_ConfigAttrib", "(", "self", ",", "path", ",", "config", ",", "default", "=", "None", ",", "prefix", "=", "''", ",", "append", "=", "None", ",", "map", "=", "None", ")", ":", "return", "self", ".", "_GetAndMunge", "(", "self", ".", "msvs_configuration_attributes", "[", "config", "]", ",", "path", ",", "default", ",", "prefix", ",", "append", ",", "map", ")" ]
[ 324, 2 ]
[ 329, 43 ]
python
da
['de', 'da', 'en']
False
MsvsSettings.AdjustIncludeDirs
(self, include_dirs, config)
Updates include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.
Updates include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.
def AdjustIncludeDirs(self, include_dirs, config): """Updates include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.""" config = self._TargetConfig(config) includes = include_dirs + self.msvs_system_include_dirs[config] includes.extend(self._Setting( ('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[])) return [self.ConvertVSMacros(p, config=config) for p in includes]
[ "def", "AdjustIncludeDirs", "(", "self", ",", "include_dirs", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "includes", "=", "include_dirs", "+", "self", ".", "msvs_system_include_dirs", "[", "config", "]", "includes", ".", "extend", "(", "self", ".", "_Setting", "(", "(", "'VCCLCompilerTool'", ",", "'AdditionalIncludeDirectories'", ")", ",", "config", ",", "default", "=", "[", "]", ")", ")", "return", "[", "self", ".", "ConvertVSMacros", "(", "p", ",", "config", "=", "config", ")", "for", "p", "in", "includes", "]" ]
[ 331, 2 ]
[ 338, 69 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.AdjustMidlIncludeDirs
(self, midl_include_dirs, config)
Updates midl_include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.
Updates midl_include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.
def AdjustMidlIncludeDirs(self, midl_include_dirs, config): """Updates midl_include_dirs to expand VS specific paths, and adds the system include dirs used for platform SDK and similar.""" config = self._TargetConfig(config) includes = midl_include_dirs + self.msvs_system_include_dirs[config] includes.extend(self._Setting( ('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[])) return [self.ConvertVSMacros(p, config=config) for p in includes]
[ "def", "AdjustMidlIncludeDirs", "(", "self", ",", "midl_include_dirs", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "includes", "=", "midl_include_dirs", "+", "self", ".", "msvs_system_include_dirs", "[", "config", "]", "includes", ".", "extend", "(", "self", ".", "_Setting", "(", "(", "'VCMIDLTool'", ",", "'AdditionalIncludeDirectories'", ")", ",", "config", ",", "default", "=", "[", "]", ")", ")", "return", "[", "self", ".", "ConvertVSMacros", "(", "p", ",", "config", "=", "config", ")", "for", "p", "in", "includes", "]" ]
[ 340, 2 ]
[ 347, 69 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetComputedDefines
(self, config)
Returns the set of defines that are injected to the defines list based on other VS settings.
Returns the set of defines that are injected to the defines list based on other VS settings.
def GetComputedDefines(self, config): """Returns the set of defines that are injected to the defines list based on other VS settings.""" config = self._TargetConfig(config) defines = [] if self._ConfigAttrib(['CharacterSet'], config) == '1': defines.extend(('_UNICODE', 'UNICODE')) if self._ConfigAttrib(['CharacterSet'], config) == '2': defines.append('_MBCS') defines.extend(self._Setting( ('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[])) return defines
[ "def", "GetComputedDefines", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "defines", "=", "[", "]", "if", "self", ".", "_ConfigAttrib", "(", "[", "'CharacterSet'", "]", ",", "config", ")", "==", "'1'", ":", "defines", ".", "extend", "(", "(", "'_UNICODE'", ",", "'UNICODE'", ")", ")", "if", "self", ".", "_ConfigAttrib", "(", "[", "'CharacterSet'", "]", ",", "config", ")", "==", "'2'", ":", "defines", ".", "append", "(", "'_MBCS'", ")", "defines", ".", "extend", "(", "self", ".", "_Setting", "(", "(", "'VCCLCompilerTool'", ",", "'PreprocessorDefinitions'", ")", ",", "config", ",", "default", "=", "[", "]", ")", ")", "return", "defines" ]
[ 349, 2 ]
[ 360, 18 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetCompilerPdbName
(self, config, expand_special)
Get the pdb file name that should be used for compiler invocations, or None if there's no explicit name specified.
Get the pdb file name that should be used for compiler invocations, or None if there's no explicit name specified.
def GetCompilerPdbName(self, config, expand_special): """Get the pdb file name that should be used for compiler invocations, or None if there's no explicit name specified.""" config = self._TargetConfig(config) pdbname = self._Setting( ('VCCLCompilerTool', 'ProgramDataBaseFileName'), config) if pdbname: pdbname = expand_special(self.ConvertVSMacros(pdbname)) return pdbname
[ "def", "GetCompilerPdbName", "(", "self", ",", "config", ",", "expand_special", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "pdbname", "=", "self", ".", "_Setting", "(", "(", "'VCCLCompilerTool'", ",", "'ProgramDataBaseFileName'", ")", ",", "config", ")", "if", "pdbname", ":", "pdbname", "=", "expand_special", "(", "self", ".", "ConvertVSMacros", "(", "pdbname", ")", ")", "return", "pdbname" ]
[ 362, 2 ]
[ 370, 18 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetMapFileName
(self, config, expand_special)
Gets the explicitly overriden map file name for a target or returns None if it's not set.
Gets the explicitly overriden map file name for a target or returns None if it's not set.
def GetMapFileName(self, config, expand_special): """Gets the explicitly overriden map file name for a target or returns None if it's not set.""" config = self._TargetConfig(config) map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config) if map_file: map_file = expand_special(self.ConvertVSMacros(map_file, config=config)) return map_file
[ "def", "GetMapFileName", "(", "self", ",", "config", ",", "expand_special", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "map_file", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'MapFileName'", ")", ",", "config", ")", "if", "map_file", ":", "map_file", "=", "expand_special", "(", "self", ".", "ConvertVSMacros", "(", "map_file", ",", "config", "=", "config", ")", ")", "return", "map_file" ]
[ 372, 2 ]
[ 379, 19 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetOutputName
(self, config, expand_special)
Gets the explicitly overridden output name for a target or returns None if it's not overridden.
Gets the explicitly overridden output name for a target or returns None if it's not overridden.
def GetOutputName(self, config, expand_special): """Gets the explicitly overridden output name for a target or returns None if it's not overridden.""" config = self._TargetConfig(config) type = self.spec['type'] root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool' # TODO(scottmg): Handle OutputDirectory without OutputFile. output_file = self._Setting((root, 'OutputFile'), config) if output_file: output_file = expand_special(self.ConvertVSMacros( output_file, config=config)) return output_file
[ "def", "GetOutputName", "(", "self", ",", "config", ",", "expand_special", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "type", "=", "self", ".", "spec", "[", "'type'", "]", "root", "=", "'VCLibrarianTool'", "if", "type", "==", "'static_library'", "else", "'VCLinkerTool'", "# TODO(scottmg): Handle OutputDirectory without OutputFile.", "output_file", "=", "self", ".", "_Setting", "(", "(", "root", ",", "'OutputFile'", ")", ",", "config", ")", "if", "output_file", ":", "output_file", "=", "expand_special", "(", "self", ".", "ConvertVSMacros", "(", "output_file", ",", "config", "=", "config", ")", ")", "return", "output_file" ]
[ 381, 2 ]
[ 392, 22 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetPDBName
(self, config, expand_special, default)
Gets the explicitly overridden pdb name for a target or returns default if it's not overridden, or if no pdb will be generated.
Gets the explicitly overridden pdb name for a target or returns default if it's not overridden, or if no pdb will be generated.
def GetPDBName(self, config, expand_special, default): """Gets the explicitly overridden pdb name for a target or returns default if it's not overridden, or if no pdb will be generated.""" config = self._TargetConfig(config) output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config) generate_debug_info = self._Setting( ('VCLinkerTool', 'GenerateDebugInformation'), config) if generate_debug_info == 'true': if output_file: return expand_special(self.ConvertVSMacros(output_file, config=config)) else: return default else: return None
[ "def", "GetPDBName", "(", "self", ",", "config", ",", "expand_special", ",", "default", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "output_file", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'ProgramDatabaseFile'", ")", ",", "config", ")", "generate_debug_info", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'GenerateDebugInformation'", ")", ",", "config", ")", "if", "generate_debug_info", "==", "'true'", ":", "if", "output_file", ":", "return", "expand_special", "(", "self", ".", "ConvertVSMacros", "(", "output_file", ",", "config", "=", "config", ")", ")", "else", ":", "return", "default", "else", ":", "return", "None" ]
[ 394, 2 ]
[ 407, 17 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetNoImportLibrary
(self, config)
If NoImportLibrary: true, ninja will not expect the output to include an import library.
If NoImportLibrary: true, ninja will not expect the output to include an import library.
def GetNoImportLibrary(self, config): """If NoImportLibrary: true, ninja will not expect the output to include an import library.""" config = self._TargetConfig(config) noimplib = self._Setting(('NoImportLibrary',), config) return noimplib == 'true'
[ "def", "GetNoImportLibrary", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "noimplib", "=", "self", ".", "_Setting", "(", "(", "'NoImportLibrary'", ",", ")", ",", "config", ")", "return", "noimplib", "==", "'true'" ]
[ 409, 2 ]
[ 414, 29 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetAsmflags
(self, config)
Returns the flags that need to be added to ml invocations.
Returns the flags that need to be added to ml invocations.
def GetAsmflags(self, config): """Returns the flags that need to be added to ml invocations.""" config = self._TargetConfig(config) asmflags = [] safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config) if safeseh == 'true': asmflags.append('/safeseh') return asmflags
[ "def", "GetAsmflags", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "asmflags", "=", "[", "]", "safeseh", "=", "self", ".", "_Setting", "(", "(", "'MASM'", ",", "'UseSafeExceptionHandlers'", ")", ",", "config", ")", "if", "safeseh", "==", "'true'", ":", "asmflags", ".", "append", "(", "'/safeseh'", ")", "return", "asmflags" ]
[ 416, 2 ]
[ 423, 19 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetCflags
(self, config)
Returns the flags that need to be added to .c and .cc compilations.
Returns the flags that need to be added to .c and .cc compilations.
def GetCflags(self, config): """Returns the flags that need to be added to .c and .cc compilations.""" config = self._TargetConfig(config) cflags = [] cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]]) cl = self._GetWrapper(self, self.msvs_settings[config], 'VCCLCompilerTool', append=cflags) cl('Optimization', map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2') cl('InlineFunctionExpansion', prefix='/Ob') cl('DisableSpecificWarnings', prefix='/wd') cl('StringPooling', map={'true': '/GF'}) cl('EnableFiberSafeOptimizations', map={'true': '/GT'}) cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy') cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi') cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O') cl('FloatingPointModel', map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:', default='0') cl('CompileAsManaged', map={'false': '', 'true': '/clr'}) cl('WholeProgramOptimization', map={'true': '/GL'}) cl('WarningLevel', prefix='/W') cl('WarnAsError', map={'true': '/WX'}) cl('CallingConvention', map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G') cl('DebugInformationFormat', map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z') cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'}) cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'}) cl('MinimalRebuild', map={'true': '/Gm'}) cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'}) cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC') cl('RuntimeLibrary', map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M') cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH') cl('DefaultCharIsUnsigned', map={'true': '/J'}) cl('TreatWChar_tAsBuiltInType', map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t') cl('EnablePREfast', map={'true': '/analyze'}) cl('AdditionalOptions', prefix='') cl('EnableEnhancedInstructionSet', map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'}, prefix='/arch:') cflags.extend(['/FI' + f for f in self._Setting( ('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])]) if self.vs_version.short_name in ('2013', '2013e', '2015'): # New flag required in 2013 to maintain previous PDB behavior. cflags.append('/FS') # ninja handles parallelism by itself, don't have the compiler do it too. cflags = filter(lambda x: not x.startswith('/MP'), cflags) return cflags
[ "def", "GetCflags", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "cflags", "=", "[", "]", "cflags", ".", "extend", "(", "[", "'/wd'", "+", "w", "for", "w", "in", "self", ".", "msvs_disabled_warnings", "[", "config", "]", "]", ")", "cl", "=", "self", ".", "_GetWrapper", "(", "self", ",", "self", ".", "msvs_settings", "[", "config", "]", ",", "'VCCLCompilerTool'", ",", "append", "=", "cflags", ")", "cl", "(", "'Optimization'", ",", "map", "=", "{", "'0'", ":", "'d'", ",", "'1'", ":", "'1'", ",", "'2'", ":", "'2'", ",", "'3'", ":", "'x'", "}", ",", "prefix", "=", "'/O'", ",", "default", "=", "'2'", ")", "cl", "(", "'InlineFunctionExpansion'", ",", "prefix", "=", "'/Ob'", ")", "cl", "(", "'DisableSpecificWarnings'", ",", "prefix", "=", "'/wd'", ")", "cl", "(", "'StringPooling'", ",", "map", "=", "{", "'true'", ":", "'/GF'", "}", ")", "cl", "(", "'EnableFiberSafeOptimizations'", ",", "map", "=", "{", "'true'", ":", "'/GT'", "}", ")", "cl", "(", "'OmitFramePointers'", ",", "map", "=", "{", "'false'", ":", "'-'", ",", "'true'", ":", "''", "}", ",", "prefix", "=", "'/Oy'", ")", "cl", "(", "'EnableIntrinsicFunctions'", ",", "map", "=", "{", "'false'", ":", "'-'", ",", "'true'", ":", "''", "}", ",", "prefix", "=", "'/Oi'", ")", "cl", "(", "'FavorSizeOrSpeed'", ",", "map", "=", "{", "'1'", ":", "'t'", ",", "'2'", ":", "'s'", "}", ",", "prefix", "=", "'/O'", ")", "cl", "(", "'FloatingPointModel'", ",", "map", "=", "{", "'0'", ":", "'precise'", ",", "'1'", ":", "'strict'", ",", "'2'", ":", "'fast'", "}", ",", "prefix", "=", "'/fp:'", ",", "default", "=", "'0'", ")", "cl", "(", "'CompileAsManaged'", ",", "map", "=", "{", "'false'", ":", "''", ",", "'true'", ":", "'/clr'", "}", ")", "cl", "(", "'WholeProgramOptimization'", ",", "map", "=", "{", "'true'", ":", "'/GL'", "}", ")", "cl", "(", "'WarningLevel'", ",", "prefix", "=", "'/W'", ")", "cl", "(", "'WarnAsError'", ",", "map", "=", "{", "'true'", ":", "'/WX'", "}", ")", "cl", "(", "'CallingConvention'", ",", "map", "=", "{", "'0'", ":", "'d'", ",", "'1'", ":", "'r'", ",", "'2'", ":", "'z'", ",", "'3'", ":", "'v'", "}", ",", "prefix", "=", "'/G'", ")", "cl", "(", "'DebugInformationFormat'", ",", "map", "=", "{", "'1'", ":", "'7'", ",", "'3'", ":", "'i'", ",", "'4'", ":", "'I'", "}", ",", "prefix", "=", "'/Z'", ")", "cl", "(", "'RuntimeTypeInfo'", ",", "map", "=", "{", "'true'", ":", "'/GR'", ",", "'false'", ":", "'/GR-'", "}", ")", "cl", "(", "'EnableFunctionLevelLinking'", ",", "map", "=", "{", "'true'", ":", "'/Gy'", ",", "'false'", ":", "'/Gy-'", "}", ")", "cl", "(", "'MinimalRebuild'", ",", "map", "=", "{", "'true'", ":", "'/Gm'", "}", ")", "cl", "(", "'BufferSecurityCheck'", ",", "map", "=", "{", "'true'", ":", "'/GS'", ",", "'false'", ":", "'/GS-'", "}", ")", "cl", "(", "'BasicRuntimeChecks'", ",", "map", "=", "{", "'1'", ":", "'s'", ",", "'2'", ":", "'u'", ",", "'3'", ":", "'1'", "}", ",", "prefix", "=", "'/RTC'", ")", "cl", "(", "'RuntimeLibrary'", ",", "map", "=", "{", "'0'", ":", "'T'", ",", "'1'", ":", "'Td'", ",", "'2'", ":", "'D'", ",", "'3'", ":", "'Dd'", "}", ",", "prefix", "=", "'/M'", ")", "cl", "(", "'ExceptionHandling'", ",", "map", "=", "{", "'1'", ":", "'sc'", ",", "'2'", ":", "'a'", "}", ",", "prefix", "=", "'/EH'", ")", "cl", "(", "'DefaultCharIsUnsigned'", ",", "map", "=", "{", "'true'", ":", "'/J'", "}", ")", "cl", "(", "'TreatWChar_tAsBuiltInType'", ",", "map", "=", "{", "'false'", ":", "'-'", ",", "'true'", ":", "''", "}", ",", "prefix", "=", "'/Zc:wchar_t'", ")", "cl", "(", "'EnablePREfast'", ",", "map", "=", "{", "'true'", ":", "'/analyze'", "}", ")", "cl", "(", "'AdditionalOptions'", ",", "prefix", "=", "''", ")", "cl", "(", "'EnableEnhancedInstructionSet'", ",", "map", "=", "{", "'1'", ":", "'SSE'", ",", "'2'", ":", "'SSE2'", ",", "'3'", ":", "'AVX'", ",", "'4'", ":", "'IA32'", ",", "'5'", ":", "'AVX2'", "}", ",", "prefix", "=", "'/arch:'", ")", "cflags", ".", "extend", "(", "[", "'/FI'", "+", "f", "for", "f", "in", "self", ".", "_Setting", "(", "(", "'VCCLCompilerTool'", ",", "'ForcedIncludeFiles'", ")", ",", "config", ",", "default", "=", "[", "]", ")", "]", ")", "if", "self", ".", "vs_version", ".", "short_name", "in", "(", "'2013'", ",", "'2013e'", ",", "'2015'", ")", ":", "# New flag required in 2013 to maintain previous PDB behavior.", "cflags", ".", "append", "(", "'/FS'", ")", "# ninja handles parallelism by itself, don't have the compiler do it too.", "cflags", "=", "filter", "(", "lambda", "x", ":", "not", "x", ".", "startswith", "(", "'/MP'", ")", ",", "cflags", ")", "return", "cflags" ]
[ 425, 2 ]
[ 475, 17 ]
python
en
['en', 'en', 'en']
True
MsvsSettings._GetPchFlags
(self, config, extension)
Get the flags to be added to the cflags for precompiled header support.
Get the flags to be added to the cflags for precompiled header support.
def _GetPchFlags(self, config, extension): """Get the flags to be added to the cflags for precompiled header support. """ config = self._TargetConfig(config) # The PCH is only built once by a particular source file. Usage of PCH must # only be for the same language (i.e. C vs. C++), so only include the pch # flags when the language matches. if self.msvs_precompiled_header[config]: source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1] if _LanguageMatchesForPch(source_ext, extension): pch = os.path.split(self.msvs_precompiled_header[config])[1] return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch'] return []
[ "def", "_GetPchFlags", "(", "self", ",", "config", ",", "extension", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "# The PCH is only built once by a particular source file. Usage of PCH must", "# only be for the same language (i.e. C vs. C++), so only include the pch", "# flags when the language matches.", "if", "self", ".", "msvs_precompiled_header", "[", "config", "]", ":", "source_ext", "=", "os", ".", "path", ".", "splitext", "(", "self", ".", "msvs_precompiled_source", "[", "config", "]", ")", "[", "1", "]", "if", "_LanguageMatchesForPch", "(", "source_ext", ",", "extension", ")", ":", "pch", "=", "os", ".", "path", ".", "split", "(", "self", ".", "msvs_precompiled_header", "[", "config", "]", ")", "[", "1", "]", "return", "[", "'/Yu'", "+", "pch", ",", "'/FI'", "+", "pch", ",", "'/Fp${pchprefix}.'", "+", "pch", "+", "'.pch'", "]", "return", "[", "]" ]
[ 477, 2 ]
[ 489, 14 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetCflagsC
(self, config)
Returns the flags that need to be added to .c compilations.
Returns the flags that need to be added to .c compilations.
def GetCflagsC(self, config): """Returns the flags that need to be added to .c compilations.""" config = self._TargetConfig(config) return self._GetPchFlags(config, '.c')
[ "def", "GetCflagsC", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "return", "self", ".", "_GetPchFlags", "(", "config", ",", "'.c'", ")" ]
[ 491, 2 ]
[ 494, 42 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetCflagsCC
(self, config)
Returns the flags that need to be added to .cc compilations.
Returns the flags that need to be added to .cc compilations.
def GetCflagsCC(self, config): """Returns the flags that need to be added to .cc compilations.""" config = self._TargetConfig(config) return ['/TP'] + self._GetPchFlags(config, '.cc')
[ "def", "GetCflagsCC", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "return", "[", "'/TP'", "]", "+", "self", ".", "_GetPchFlags", "(", "config", ",", "'.cc'", ")" ]
[ 496, 2 ]
[ 499, 53 ]
python
en
['en', 'en', 'en']
True
MsvsSettings._GetAdditionalLibraryDirectories
(self, root, config, gyp_to_build_path)
Get and normalize the list of paths in AdditionalLibraryDirectories setting.
Get and normalize the list of paths in AdditionalLibraryDirectories setting.
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path): """Get and normalize the list of paths in AdditionalLibraryDirectories setting.""" config = self._TargetConfig(config) libpaths = self._Setting((root, 'AdditionalLibraryDirectories'), config, default=[]) libpaths = [os.path.normpath( gyp_to_build_path(self.ConvertVSMacros(p, config=config))) for p in libpaths] return ['/LIBPATH:"' + p + '"' for p in libpaths]
[ "def", "_GetAdditionalLibraryDirectories", "(", "self", ",", "root", ",", "config", ",", "gyp_to_build_path", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "libpaths", "=", "self", ".", "_Setting", "(", "(", "root", ",", "'AdditionalLibraryDirectories'", ")", ",", "config", ",", "default", "=", "[", "]", ")", "libpaths", "=", "[", "os", ".", "path", ".", "normpath", "(", "gyp_to_build_path", "(", "self", ".", "ConvertVSMacros", "(", "p", ",", "config", "=", "config", ")", ")", ")", "for", "p", "in", "libpaths", "]", "return", "[", "'/LIBPATH:\"'", "+", "p", "+", "'\"'", "for", "p", "in", "libpaths", "]" ]
[ 501, 2 ]
[ 510, 53 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetLibFlags
(self, config, gyp_to_build_path)
Returns the flags that need to be added to lib commands.
Returns the flags that need to be added to lib commands.
def GetLibFlags(self, config, gyp_to_build_path): """Returns the flags that need to be added to lib commands.""" config = self._TargetConfig(config) libflags = [] lib = self._GetWrapper(self, self.msvs_settings[config], 'VCLibrarianTool', append=libflags) libflags.extend(self._GetAdditionalLibraryDirectories( 'VCLibrarianTool', config, gyp_to_build_path)) lib('LinkTimeCodeGeneration', map={'true': '/LTCG'}) lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'}, prefix='/MACHINE:') lib('AdditionalOptions') return libflags
[ "def", "GetLibFlags", "(", "self", ",", "config", ",", "gyp_to_build_path", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "libflags", "=", "[", "]", "lib", "=", "self", ".", "_GetWrapper", "(", "self", ",", "self", ".", "msvs_settings", "[", "config", "]", ",", "'VCLibrarianTool'", ",", "append", "=", "libflags", ")", "libflags", ".", "extend", "(", "self", ".", "_GetAdditionalLibraryDirectories", "(", "'VCLibrarianTool'", ",", "config", ",", "gyp_to_build_path", ")", ")", "lib", "(", "'LinkTimeCodeGeneration'", ",", "map", "=", "{", "'true'", ":", "'/LTCG'", "}", ")", "lib", "(", "'TargetMachine'", ",", "map", "=", "{", "'1'", ":", "'X86'", ",", "'17'", ":", "'X64'", ",", "'3'", ":", "'ARM'", "}", ",", "prefix", "=", "'/MACHINE:'", ")", "lib", "(", "'AdditionalOptions'", ")", "return", "libflags" ]
[ 512, 2 ]
[ 524, 19 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetDefFile
(self, gyp_to_build_path)
Returns the .def file from sources, if any. Otherwise returns None.
Returns the .def file from sources, if any. Otherwise returns None.
def GetDefFile(self, gyp_to_build_path): """Returns the .def file from sources, if any. Otherwise returns None.""" spec = self.spec if spec['type'] in ('shared_library', 'loadable_module', 'executable'): def_files = [s for s in spec.get('sources', []) if s.endswith('.def')] if len(def_files) == 1: return gyp_to_build_path(def_files[0]) elif len(def_files) > 1: raise Exception("Multiple .def files") return None
[ "def", "GetDefFile", "(", "self", ",", "gyp_to_build_path", ")", ":", "spec", "=", "self", ".", "spec", "if", "spec", "[", "'type'", "]", "in", "(", "'shared_library'", ",", "'loadable_module'", ",", "'executable'", ")", ":", "def_files", "=", "[", "s", "for", "s", "in", "spec", ".", "get", "(", "'sources'", ",", "[", "]", ")", "if", "s", ".", "endswith", "(", "'.def'", ")", "]", "if", "len", "(", "def_files", ")", "==", "1", ":", "return", "gyp_to_build_path", "(", "def_files", "[", "0", "]", ")", "elif", "len", "(", "def_files", ")", ">", "1", ":", "raise", "Exception", "(", "\"Multiple .def files\"", ")", "return", "None" ]
[ 526, 2 ]
[ 535, 15 ]
python
en
['en', 'en', 'en']
True
MsvsSettings._GetDefFileAsLdflags
(self, ldflags, gyp_to_build_path)
.def files get implicitly converted to a ModuleDefinitionFile for the linker in the VS generator. Emulate that behaviour here.
.def files get implicitly converted to a ModuleDefinitionFile for the linker in the VS generator. Emulate that behaviour here.
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path): """.def files get implicitly converted to a ModuleDefinitionFile for the linker in the VS generator. Emulate that behaviour here.""" def_file = self.GetDefFile(gyp_to_build_path) if def_file: ldflags.append('/DEF:"%s"' % def_file)
[ "def", "_GetDefFileAsLdflags", "(", "self", ",", "ldflags", ",", "gyp_to_build_path", ")", ":", "def_file", "=", "self", ".", "GetDefFile", "(", "gyp_to_build_path", ")", "if", "def_file", ":", "ldflags", ".", "append", "(", "'/DEF:\"%s\"'", "%", "def_file", ")" ]
[ 537, 2 ]
[ 542, 44 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetPGDName
(self, config, expand_special)
Gets the explicitly overridden pgd name for a target or returns None if it's not overridden.
Gets the explicitly overridden pgd name for a target or returns None if it's not overridden.
def GetPGDName(self, config, expand_special): """Gets the explicitly overridden pgd name for a target or returns None if it's not overridden.""" config = self._TargetConfig(config) output_file = self._Setting( ('VCLinkerTool', 'ProfileGuidedDatabase'), config) if output_file: output_file = expand_special(self.ConvertVSMacros( output_file, config=config)) return output_file
[ "def", "GetPGDName", "(", "self", ",", "config", ",", "expand_special", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "output_file", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'ProfileGuidedDatabase'", ")", ",", "config", ")", "if", "output_file", ":", "output_file", "=", "expand_special", "(", "self", ".", "ConvertVSMacros", "(", "output_file", ",", "config", "=", "config", ")", ")", "return", "output_file" ]
[ 544, 2 ]
[ 553, 22 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetLdflags
(self, config, gyp_to_build_path, expand_special, manifest_base_name, output_name, is_executable, build_dir)
Returns the flags that need to be added to link commands, and the manifest files.
Returns the flags that need to be added to link commands, and the manifest files.
def GetLdflags(self, config, gyp_to_build_path, expand_special, manifest_base_name, output_name, is_executable, build_dir): """Returns the flags that need to be added to link commands, and the manifest files.""" config = self._TargetConfig(config) ldflags = [] ld = self._GetWrapper(self, self.msvs_settings[config], 'VCLinkerTool', append=ldflags) self._GetDefFileAsLdflags(ldflags, gyp_to_build_path) ld('GenerateDebugInformation', map={'true': '/DEBUG'}) ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'}, prefix='/MACHINE:') ldflags.extend(self._GetAdditionalLibraryDirectories( 'VCLinkerTool', config, gyp_to_build_path)) ld('DelayLoadDLLs', prefix='/DELAYLOAD:') ld('TreatLinkerWarningAsErrors', prefix='/WX', map={'true': '', 'false': ':NO'}) out = self.GetOutputName(config, expand_special) if out: ldflags.append('/OUT:' + out) pdb = self.GetPDBName(config, expand_special, output_name + '.pdb') if pdb: ldflags.append('/PDB:' + pdb) pgd = self.GetPGDName(config, expand_special) if pgd: ldflags.append('/PGD:' + pgd) map_file = self.GetMapFileName(config, expand_special) ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file else '/MAP'}) ld('MapExports', map={'true': '/MAPINFO:EXPORTS'}) ld('AdditionalOptions', prefix='') minimum_required_version = self._Setting( ('VCLinkerTool', 'MinimumRequiredVersion'), config, default='') if minimum_required_version: minimum_required_version = ',' + minimum_required_version ld('SubSystem', map={'1': 'CONSOLE%s' % minimum_required_version, '2': 'WINDOWS%s' % minimum_required_version}, prefix='/SUBSYSTEM:') stack_reserve_size = self._Setting( ('VCLinkerTool', 'StackReserveSize'), config, default='') if stack_reserve_size: stack_commit_size = self._Setting( ('VCLinkerTool', 'StackCommitSize'), config, default='') if stack_commit_size: stack_commit_size = ',' + stack_commit_size ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size)) ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE') ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL') ld('BaseAddress', prefix='/BASE:') ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED') ld('RandomizedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE') ld('DataExecutionPrevention', map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT') ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:') ld('ForceSymbolReferences', prefix='/INCLUDE:') ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:') ld('LinkTimeCodeGeneration', map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE', '4': ':PGUPDATE'}, prefix='/LTCG') ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:') ld('ResourceOnlyDLL', map={'true': '/NOENTRY'}) ld('EntryPointSymbol', prefix='/ENTRY:') ld('Profile', map={'true': '/PROFILE'}) ld('LargeAddressAware', map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE') # TODO(scottmg): This should sort of be somewhere else (not really a flag). ld('AdditionalDependencies', prefix='') if self.GetArch(config) == 'x86': safeseh_default = 'true' else: safeseh_default = None ld('ImageHasSafeExceptionHandlers', map={'false': ':NO', 'true': ''}, prefix='/SAFESEH', default=safeseh_default) # If the base address is not specifically controlled, DYNAMICBASE should # be on by default. base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED', ldflags) if not base_flags: ldflags.append('/DYNAMICBASE') # If the NXCOMPAT flag has not been specified, default to on. Despite the # documentation that says this only defaults to on when the subsystem is # Vista or greater (which applies to the linker), the IDE defaults it on # unless it's explicitly off. if not filter(lambda x: 'NXCOMPAT' in x, ldflags): ldflags.append('/NXCOMPAT') have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags) manifest_flags, intermediate_manifest, manifest_files = \ self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path, is_executable and not have_def_file, build_dir) ldflags.extend(manifest_flags) return ldflags, intermediate_manifest, manifest_files
[ "def", "GetLdflags", "(", "self", ",", "config", ",", "gyp_to_build_path", ",", "expand_special", ",", "manifest_base_name", ",", "output_name", ",", "is_executable", ",", "build_dir", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "ldflags", "=", "[", "]", "ld", "=", "self", ".", "_GetWrapper", "(", "self", ",", "self", ".", "msvs_settings", "[", "config", "]", ",", "'VCLinkerTool'", ",", "append", "=", "ldflags", ")", "self", ".", "_GetDefFileAsLdflags", "(", "ldflags", ",", "gyp_to_build_path", ")", "ld", "(", "'GenerateDebugInformation'", ",", "map", "=", "{", "'true'", ":", "'/DEBUG'", "}", ")", "ld", "(", "'TargetMachine'", ",", "map", "=", "{", "'1'", ":", "'X86'", ",", "'17'", ":", "'X64'", ",", "'3'", ":", "'ARM'", "}", ",", "prefix", "=", "'/MACHINE:'", ")", "ldflags", ".", "extend", "(", "self", ".", "_GetAdditionalLibraryDirectories", "(", "'VCLinkerTool'", ",", "config", ",", "gyp_to_build_path", ")", ")", "ld", "(", "'DelayLoadDLLs'", ",", "prefix", "=", "'/DELAYLOAD:'", ")", "ld", "(", "'TreatLinkerWarningAsErrors'", ",", "prefix", "=", "'/WX'", ",", "map", "=", "{", "'true'", ":", "''", ",", "'false'", ":", "':NO'", "}", ")", "out", "=", "self", ".", "GetOutputName", "(", "config", ",", "expand_special", ")", "if", "out", ":", "ldflags", ".", "append", "(", "'/OUT:'", "+", "out", ")", "pdb", "=", "self", ".", "GetPDBName", "(", "config", ",", "expand_special", ",", "output_name", "+", "'.pdb'", ")", "if", "pdb", ":", "ldflags", ".", "append", "(", "'/PDB:'", "+", "pdb", ")", "pgd", "=", "self", ".", "GetPGDName", "(", "config", ",", "expand_special", ")", "if", "pgd", ":", "ldflags", ".", "append", "(", "'/PGD:'", "+", "pgd", ")", "map_file", "=", "self", ".", "GetMapFileName", "(", "config", ",", "expand_special", ")", "ld", "(", "'GenerateMapFile'", ",", "map", "=", "{", "'true'", ":", "'/MAP:'", "+", "map_file", "if", "map_file", "else", "'/MAP'", "}", ")", "ld", "(", "'MapExports'", ",", "map", "=", "{", "'true'", ":", "'/MAPINFO:EXPORTS'", "}", ")", "ld", "(", "'AdditionalOptions'", ",", "prefix", "=", "''", ")", "minimum_required_version", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'MinimumRequiredVersion'", ")", ",", "config", ",", "default", "=", "''", ")", "if", "minimum_required_version", ":", "minimum_required_version", "=", "','", "+", "minimum_required_version", "ld", "(", "'SubSystem'", ",", "map", "=", "{", "'1'", ":", "'CONSOLE%s'", "%", "minimum_required_version", ",", "'2'", ":", "'WINDOWS%s'", "%", "minimum_required_version", "}", ",", "prefix", "=", "'/SUBSYSTEM:'", ")", "stack_reserve_size", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'StackReserveSize'", ")", ",", "config", ",", "default", "=", "''", ")", "if", "stack_reserve_size", ":", "stack_commit_size", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'StackCommitSize'", ")", ",", "config", ",", "default", "=", "''", ")", "if", "stack_commit_size", ":", "stack_commit_size", "=", "','", "+", "stack_commit_size", "ldflags", ".", "append", "(", "'/STACK:%s%s'", "%", "(", "stack_reserve_size", ",", "stack_commit_size", ")", ")", "ld", "(", "'TerminalServerAware'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/TSAWARE'", ")", "ld", "(", "'LinkIncremental'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/INCREMENTAL'", ")", "ld", "(", "'BaseAddress'", ",", "prefix", "=", "'/BASE:'", ")", "ld", "(", "'FixedBaseAddress'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/FIXED'", ")", "ld", "(", "'RandomizedBaseAddress'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/DYNAMICBASE'", ")", "ld", "(", "'DataExecutionPrevention'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/NXCOMPAT'", ")", "ld", "(", "'OptimizeReferences'", ",", "map", "=", "{", "'1'", ":", "'NOREF'", ",", "'2'", ":", "'REF'", "}", ",", "prefix", "=", "'/OPT:'", ")", "ld", "(", "'ForceSymbolReferences'", ",", "prefix", "=", "'/INCLUDE:'", ")", "ld", "(", "'EnableCOMDATFolding'", ",", "map", "=", "{", "'1'", ":", "'NOICF'", ",", "'2'", ":", "'ICF'", "}", ",", "prefix", "=", "'/OPT:'", ")", "ld", "(", "'LinkTimeCodeGeneration'", ",", "map", "=", "{", "'1'", ":", "''", ",", "'2'", ":", "':PGINSTRUMENT'", ",", "'3'", ":", "':PGOPTIMIZE'", ",", "'4'", ":", "':PGUPDATE'", "}", ",", "prefix", "=", "'/LTCG'", ")", "ld", "(", "'IgnoreDefaultLibraryNames'", ",", "prefix", "=", "'/NODEFAULTLIB:'", ")", "ld", "(", "'ResourceOnlyDLL'", ",", "map", "=", "{", "'true'", ":", "'/NOENTRY'", "}", ")", "ld", "(", "'EntryPointSymbol'", ",", "prefix", "=", "'/ENTRY:'", ")", "ld", "(", "'Profile'", ",", "map", "=", "{", "'true'", ":", "'/PROFILE'", "}", ")", "ld", "(", "'LargeAddressAware'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/LARGEADDRESSAWARE'", ")", "# TODO(scottmg): This should sort of be somewhere else (not really a flag).", "ld", "(", "'AdditionalDependencies'", ",", "prefix", "=", "''", ")", "if", "self", ".", "GetArch", "(", "config", ")", "==", "'x86'", ":", "safeseh_default", "=", "'true'", "else", ":", "safeseh_default", "=", "None", "ld", "(", "'ImageHasSafeExceptionHandlers'", ",", "map", "=", "{", "'false'", ":", "':NO'", ",", "'true'", ":", "''", "}", ",", "prefix", "=", "'/SAFESEH'", ",", "default", "=", "safeseh_default", ")", "# If the base address is not specifically controlled, DYNAMICBASE should", "# be on by default.", "base_flags", "=", "filter", "(", "lambda", "x", ":", "'DYNAMICBASE'", "in", "x", "or", "x", "==", "'/FIXED'", ",", "ldflags", ")", "if", "not", "base_flags", ":", "ldflags", ".", "append", "(", "'/DYNAMICBASE'", ")", "# If the NXCOMPAT flag has not been specified, default to on. Despite the", "# documentation that says this only defaults to on when the subsystem is", "# Vista or greater (which applies to the linker), the IDE defaults it on", "# unless it's explicitly off.", "if", "not", "filter", "(", "lambda", "x", ":", "'NXCOMPAT'", "in", "x", ",", "ldflags", ")", ":", "ldflags", ".", "append", "(", "'/NXCOMPAT'", ")", "have_def_file", "=", "filter", "(", "lambda", "x", ":", "x", ".", "startswith", "(", "'/DEF:'", ")", ",", "ldflags", ")", "manifest_flags", ",", "intermediate_manifest", ",", "manifest_files", "=", "self", ".", "_GetLdManifestFlags", "(", "config", ",", "manifest_base_name", ",", "gyp_to_build_path", ",", "is_executable", "and", "not", "have_def_file", ",", "build_dir", ")", "ldflags", ".", "extend", "(", "manifest_flags", ")", "return", "ldflags", ",", "intermediate_manifest", ",", "manifest_files" ]
[ 555, 2 ]
[ 656, 57 ]
python
en
['en', 'en', 'en']
True