Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
RemovePrefix | (a, prefix) | Returns 'a' without 'prefix' if it starts with 'prefix'. | Returns 'a' without 'prefix' if it starts with 'prefix'. | def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a | [
"def",
"RemovePrefix",
"(",
"a",
",",
"prefix",
")",
":",
"return",
"a",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"if",
"a",
".",
"startswith",
"(",
"prefix",
")",
"else",
"a"
] | [
72,
0
] | [
74,
55
] | python | en | ['en', 'en', 'en'] | True |
CalculateVariables | (default_variables, params) | Calculate additional variables for use in the build (called by gyp). | Calculate additional variables for use in the build (called by gyp). | def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params)) | [
"def",
"CalculateVariables",
"(",
"default_variables",
",",
"params",
")",
":",
"default_variables",
".",
"setdefault",
"(",
"'OS'",
",",
"gyp",
".",
"common",
".",
"GetFlavor",
"(",
"params",
")",
")"
] | [
77,
0
] | [
79,
66
] | python | en | ['en', 'en', 'en'] | True |
Compilable | (filename) | Return true if the file is compilable (should be in OBJS). | Return true if the file is compilable (should be in OBJS). | def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS) | [
"def",
"Compilable",
"(",
"filename",
")",
":",
"return",
"any",
"(",
"filename",
".",
"endswith",
"(",
"e",
")",
"for",
"e",
"in",
"COMPILABLE_EXTENSIONS",
")"
] | [
82,
0
] | [
84,
65
] | python | en | ['en', 'en', 'en'] | True |
Linkable | (filename) | Return true if the file is linkable (should be on the link line). | Return true if the file is linkable (should be on the link line). | def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o') | [
"def",
"Linkable",
"(",
"filename",
")",
":",
"return",
"filename",
".",
"endswith",
"(",
"'.o'",
")"
] | [
87,
0
] | [
89,
32
] | python | en | ['en', 'en', 'en'] | True |
NormjoinPathForceCMakeSource | (base_path, rel_path) | Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
| Resolves rel_path against base_path and returns the result. | def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path))) | [
"def",
"NormjoinPathForceCMakeSource",
"(",
"base_path",
",",
"rel_path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"rel_path",
")",
":",
"return",
"rel_path",
"if",
"any",
"(",
"[",
"rel_path",
".",
"startswith",
"(",
"var",
")",
"for",
"var",
"in",
"FULL_PATH_VARS",
"]",
")",
":",
"return",
"rel_path",
"# TODO: do we need to check base_path for absolute variables as well?",
"return",
"os",
".",
"path",
".",
"join",
"(",
"'${CMAKE_CURRENT_LIST_DIR}'",
",",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"rel_path",
")",
")",
")"
] | [
92,
0
] | [
106,
74
] | python | en | ['en', 'en', 'en'] | True |
NormjoinPath | (base_path, rel_path) | Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
| Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
| def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path)) | [
"def",
"NormjoinPath",
"(",
"base_path",
",",
"rel_path",
")",
":",
"if",
"rel_path",
".",
"startswith",
"(",
"'$'",
")",
"and",
"not",
"rel_path",
".",
"startswith",
"(",
"'${configuration}'",
")",
":",
"return",
"rel_path",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"rel_path",
")",
")"
] | [
109,
0
] | [
117,
60
] | python | en | ['en', 'en', 'en'] | True |
CMakeStringEscape | (a) | Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
| Escapes the string 'a' for use inside a CMake string. | def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"') | [
"def",
"CMakeStringEscape",
"(",
"a",
")",
":",
"return",
"a",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
".",
"replace",
"(",
"';'",
",",
"'\\\\;'",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")"
] | [
120,
0
] | [
136,
72
] | python | en | ['en', 'en', 'en'] | True |
SetFileProperty | (output, source_name, property_name, values, sep) | Given a set of source file, sets the given property on them. | Given a set of source file, sets the given property on them. | def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n') | [
"def",
"SetFileProperty",
"(",
"output",
",",
"source_name",
",",
"property_name",
",",
"values",
",",
"sep",
")",
":",
"output",
".",
"write",
"(",
"'set_source_files_properties('",
")",
"output",
".",
"write",
"(",
"source_name",
")",
"output",
".",
"write",
"(",
"' PROPERTIES '",
")",
"output",
".",
"write",
"(",
"property_name",
")",
"output",
".",
"write",
"(",
"' \"'",
")",
"for",
"value",
"in",
"values",
":",
"output",
".",
"write",
"(",
"CMakeStringEscape",
"(",
"value",
")",
")",
"output",
".",
"write",
"(",
"sep",
")",
"output",
".",
"write",
"(",
"'\")\\n'",
")"
] | [
139,
0
] | [
149,
22
] | python | en | ['en', 'en', 'en'] | True |
SetFilesProperty | (output, variable, property_name, values, sep) | Given a set of source files, sets the given property on them. | Given a set of source files, sets the given property on them. | def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n') | [
"def",
"SetFilesProperty",
"(",
"output",
",",
"variable",
",",
"property_name",
",",
"values",
",",
"sep",
")",
":",
"output",
".",
"write",
"(",
"'set_source_files_properties('",
")",
"WriteVariable",
"(",
"output",
",",
"variable",
")",
"output",
".",
"write",
"(",
"' PROPERTIES '",
")",
"output",
".",
"write",
"(",
"property_name",
")",
"output",
".",
"write",
"(",
"' \"'",
")",
"for",
"value",
"in",
"values",
":",
"output",
".",
"write",
"(",
"CMakeStringEscape",
"(",
"value",
")",
")",
"output",
".",
"write",
"(",
"sep",
")",
"output",
".",
"write",
"(",
"'\")\\n'",
")"
] | [
152,
0
] | [
162,
22
] | python | en | ['en', 'en', 'en'] | True |
SetTargetProperty | (output, target_name, property_name, values, sep='') | Given a target, sets the given property. | Given a target, sets the given property. | def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n') | [
"def",
"SetTargetProperty",
"(",
"output",
",",
"target_name",
",",
"property_name",
",",
"values",
",",
"sep",
"=",
"''",
")",
":",
"output",
".",
"write",
"(",
"'set_target_properties('",
")",
"output",
".",
"write",
"(",
"target_name",
")",
"output",
".",
"write",
"(",
"' PROPERTIES '",
")",
"output",
".",
"write",
"(",
"property_name",
")",
"output",
".",
"write",
"(",
"' \"'",
")",
"for",
"value",
"in",
"values",
":",
"output",
".",
"write",
"(",
"CMakeStringEscape",
"(",
"value",
")",
")",
"output",
".",
"write",
"(",
"sep",
")",
"output",
".",
"write",
"(",
"'\")\\n'",
")"
] | [
165,
0
] | [
175,
22
] | python | en | ['en', 'en', 'en'] | True |
SetVariable | (output, variable_name, value) | Sets a CMake variable. | Sets a CMake variable. | def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n') | [
"def",
"SetVariable",
"(",
"output",
",",
"variable_name",
",",
"value",
")",
":",
"output",
".",
"write",
"(",
"'set('",
")",
"output",
".",
"write",
"(",
"variable_name",
")",
"output",
".",
"write",
"(",
"' \"'",
")",
"output",
".",
"write",
"(",
"CMakeStringEscape",
"(",
"value",
")",
")",
"output",
".",
"write",
"(",
"'\")\\n'",
")"
] | [
178,
0
] | [
184,
22
] | python | en | ['en', 'fil', 'en'] | True |
SetVariableList | (output, variable_name, values) | Sets a CMake variable to a list. | Sets a CMake variable to a list. | def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n') | [
"def",
"SetVariableList",
"(",
"output",
",",
"variable_name",
",",
"values",
")",
":",
"if",
"not",
"values",
":",
"return",
"SetVariable",
"(",
"output",
",",
"variable_name",
",",
"\"\"",
")",
"if",
"len",
"(",
"values",
")",
"==",
"1",
":",
"return",
"SetVariable",
"(",
"output",
",",
"variable_name",
",",
"values",
"[",
"0",
"]",
")",
"output",
".",
"write",
"(",
"'list(APPEND '",
")",
"output",
".",
"write",
"(",
"variable_name",
")",
"output",
".",
"write",
"(",
"'\\n \"'",
")",
"output",
".",
"write",
"(",
"'\"\\n \"'",
".",
"join",
"(",
"[",
"CMakeStringEscape",
"(",
"value",
")",
"for",
"value",
"in",
"values",
"]",
")",
")",
"output",
".",
"write",
"(",
"'\")\\n'",
")"
] | [
187,
0
] | [
197,
22
] | python | en | ['en', 'en', 'en'] | True |
UnsetVariable | (output, variable_name) | Unsets a CMake variable. | Unsets a CMake variable. | def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n') | [
"def",
"UnsetVariable",
"(",
"output",
",",
"variable_name",
")",
":",
"output",
".",
"write",
"(",
"'unset('",
")",
"output",
".",
"write",
"(",
"variable_name",
")",
"output",
".",
"write",
"(",
"')\\n'",
")"
] | [
200,
0
] | [
204,
21
] | python | en | ['en', 'en', 'en'] | True |
StringToCMakeTargetName | (a) | Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
| Converts the given string 'a' to a valid CMake target name. | def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():."', '_______')) | [
"def",
"StringToCMakeTargetName",
"(",
"a",
")",
":",
"return",
"a",
".",
"translate",
"(",
"string",
".",
"maketrans",
"(",
"' /():.\"'",
",",
"'_______'",
")",
")"
] | [
231,
0
] | [
239,
60
] | python | en | ['en', 'en', 'en'] | True |
WriteActions | (target_name, actions, extra_sources, extra_deps,
path_to_gyp, output) | Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
| Write CMake for the 'actions' in the target. | def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name) | [
"def",
"WriteActions",
"(",
"target_name",
",",
"actions",
",",
"extra_sources",
",",
"extra_deps",
",",
"path_to_gyp",
",",
"output",
")",
":",
"for",
"action",
"in",
"actions",
":",
"action_name",
"=",
"StringToCMakeTargetName",
"(",
"action",
"[",
"'action_name'",
"]",
")",
"action_target_name",
"=",
"'%s__%s'",
"%",
"(",
"target_name",
",",
"action_name",
")",
"inputs",
"=",
"action",
"[",
"'inputs'",
"]",
"inputs_name",
"=",
"action_target_name",
"+",
"'__input'",
"SetVariableList",
"(",
"output",
",",
"inputs_name",
",",
"[",
"NormjoinPathForceCMakeSource",
"(",
"path_to_gyp",
",",
"dep",
")",
"for",
"dep",
"in",
"inputs",
"]",
")",
"outputs",
"=",
"action",
"[",
"'outputs'",
"]",
"cmake_outputs",
"=",
"[",
"NormjoinPathForceCMakeSource",
"(",
"path_to_gyp",
",",
"out",
")",
"for",
"out",
"in",
"outputs",
"]",
"outputs_name",
"=",
"action_target_name",
"+",
"'__output'",
"SetVariableList",
"(",
"output",
",",
"outputs_name",
",",
"cmake_outputs",
")",
"# Build up a list of outputs.",
"# Collect the output dirs we'll need.",
"dirs",
"=",
"set",
"(",
"dir",
"for",
"dir",
"in",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"o",
")",
"for",
"o",
"in",
"outputs",
")",
"if",
"dir",
")",
"if",
"int",
"(",
"action",
".",
"get",
"(",
"'process_outputs_as_sources'",
",",
"False",
")",
")",
":",
"extra_sources",
".",
"extend",
"(",
"zip",
"(",
"cmake_outputs",
",",
"outputs",
")",
")",
"# add_custom_command",
"output",
".",
"write",
"(",
"'add_custom_command(OUTPUT '",
")",
"WriteVariable",
"(",
"output",
",",
"outputs_name",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"if",
"len",
"(",
"dirs",
")",
">",
"0",
":",
"for",
"directory",
"in",
"dirs",
":",
"output",
".",
"write",
"(",
"' COMMAND ${CMAKE_COMMAND} -E make_directory '",
")",
"output",
".",
"write",
"(",
"directory",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' COMMAND '",
")",
"output",
".",
"write",
"(",
"gyp",
".",
"common",
".",
"EncodePOSIXShellList",
"(",
"action",
"[",
"'action'",
"]",
")",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' DEPENDS '",
")",
"WriteVariable",
"(",
"output",
",",
"inputs_name",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/'",
")",
"output",
".",
"write",
"(",
"path_to_gyp",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' COMMENT '",
")",
"if",
"'message'",
"in",
"action",
":",
"output",
".",
"write",
"(",
"action",
"[",
"'message'",
"]",
")",
"else",
":",
"output",
".",
"write",
"(",
"action_target_name",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' VERBATIM\\n'",
")",
"output",
".",
"write",
"(",
"')\\n'",
")",
"# add_custom_target",
"output",
".",
"write",
"(",
"'add_custom_target('",
")",
"output",
".",
"write",
"(",
"action_target_name",
")",
"output",
".",
"write",
"(",
"'\\n DEPENDS '",
")",
"WriteVariable",
"(",
"output",
",",
"outputs_name",
")",
"output",
".",
"write",
"(",
"'\\n SOURCES '",
")",
"WriteVariable",
"(",
"output",
",",
"inputs_name",
")",
"output",
".",
"write",
"(",
"'\\n)\\n'",
")",
"extra_deps",
".",
"append",
"(",
"action_target_name",
")"
] | [
242,
0
] | [
318,
41
] | python | en | ['en', 'en', 'en'] | True |
WriteRules | (target_name, rules, extra_sources, extra_deps,
path_to_gyp, output) | Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
| Write CMake for the 'rules' in the target. | def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name) | [
"def",
"WriteRules",
"(",
"target_name",
",",
"rules",
",",
"extra_sources",
",",
"extra_deps",
",",
"path_to_gyp",
",",
"output",
")",
":",
"for",
"rule",
"in",
"rules",
":",
"rule_name",
"=",
"StringToCMakeTargetName",
"(",
"target_name",
"+",
"'__'",
"+",
"rule",
"[",
"'rule_name'",
"]",
")",
"inputs",
"=",
"rule",
".",
"get",
"(",
"'inputs'",
",",
"[",
"]",
")",
"inputs_name",
"=",
"rule_name",
"+",
"'__input'",
"SetVariableList",
"(",
"output",
",",
"inputs_name",
",",
"[",
"NormjoinPathForceCMakeSource",
"(",
"path_to_gyp",
",",
"dep",
")",
"for",
"dep",
"in",
"inputs",
"]",
")",
"outputs",
"=",
"rule",
"[",
"'outputs'",
"]",
"var_outputs",
"=",
"[",
"]",
"for",
"count",
",",
"rule_source",
"in",
"enumerate",
"(",
"rule",
".",
"get",
"(",
"'rule_sources'",
",",
"[",
"]",
")",
")",
":",
"action_name",
"=",
"rule_name",
"+",
"'_'",
"+",
"str",
"(",
"count",
")",
"rule_source_dirname",
",",
"rule_source_basename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"rule_source",
")",
"rule_source_root",
",",
"rule_source_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"rule_source_basename",
")",
"SetVariable",
"(",
"output",
",",
"'RULE_INPUT_PATH'",
",",
"rule_source",
")",
"SetVariable",
"(",
"output",
",",
"'RULE_INPUT_DIRNAME'",
",",
"rule_source_dirname",
")",
"SetVariable",
"(",
"output",
",",
"'RULE_INPUT_NAME'",
",",
"rule_source_basename",
")",
"SetVariable",
"(",
"output",
",",
"'RULE_INPUT_ROOT'",
",",
"rule_source_root",
")",
"SetVariable",
"(",
"output",
",",
"'RULE_INPUT_EXT'",
",",
"rule_source_ext",
")",
"# Build up a list of outputs.",
"# Collect the output dirs we'll need.",
"dirs",
"=",
"set",
"(",
"dir",
"for",
"dir",
"in",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"o",
")",
"for",
"o",
"in",
"outputs",
")",
"if",
"dir",
")",
"# Create variables for the output, as 'local' variable will be unset.",
"these_outputs",
"=",
"[",
"]",
"for",
"output_index",
",",
"out",
"in",
"enumerate",
"(",
"outputs",
")",
":",
"output_name",
"=",
"action_name",
"+",
"'_'",
"+",
"str",
"(",
"output_index",
")",
"SetVariable",
"(",
"output",
",",
"output_name",
",",
"NormjoinRulePathForceCMakeSource",
"(",
"path_to_gyp",
",",
"out",
",",
"rule_source",
")",
")",
"if",
"int",
"(",
"rule",
".",
"get",
"(",
"'process_outputs_as_sources'",
",",
"False",
")",
")",
":",
"extra_sources",
".",
"append",
"(",
"(",
"'${'",
"+",
"output_name",
"+",
"'}'",
",",
"out",
")",
")",
"these_outputs",
".",
"append",
"(",
"'${'",
"+",
"output_name",
"+",
"'}'",
")",
"var_outputs",
".",
"append",
"(",
"'${'",
"+",
"output_name",
"+",
"'}'",
")",
"# add_custom_command",
"output",
".",
"write",
"(",
"'add_custom_command(OUTPUT\\n'",
")",
"for",
"out",
"in",
"these_outputs",
":",
"output",
".",
"write",
"(",
"' '",
")",
"output",
".",
"write",
"(",
"out",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"for",
"directory",
"in",
"dirs",
":",
"output",
".",
"write",
"(",
"' COMMAND ${CMAKE_COMMAND} -E make_directory '",
")",
"output",
".",
"write",
"(",
"directory",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' COMMAND '",
")",
"output",
".",
"write",
"(",
"gyp",
".",
"common",
".",
"EncodePOSIXShellList",
"(",
"rule",
"[",
"'action'",
"]",
")",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' DEPENDS '",
")",
"WriteVariable",
"(",
"output",
",",
"inputs_name",
")",
"output",
".",
"write",
"(",
"' '",
")",
"output",
".",
"write",
"(",
"NormjoinPath",
"(",
"path_to_gyp",
",",
"rule_source",
")",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.",
"# The cwd is the current build directory.",
"output",
".",
"write",
"(",
"' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/'",
")",
"output",
".",
"write",
"(",
"path_to_gyp",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' COMMENT '",
")",
"if",
"'message'",
"in",
"rule",
":",
"output",
".",
"write",
"(",
"rule",
"[",
"'message'",
"]",
")",
"else",
":",
"output",
".",
"write",
"(",
"action_name",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"' VERBATIM\\n'",
")",
"output",
".",
"write",
"(",
"')\\n'",
")",
"UnsetVariable",
"(",
"output",
",",
"'RULE_INPUT_PATH'",
")",
"UnsetVariable",
"(",
"output",
",",
"'RULE_INPUT_DIRNAME'",
")",
"UnsetVariable",
"(",
"output",
",",
"'RULE_INPUT_NAME'",
")",
"UnsetVariable",
"(",
"output",
",",
"'RULE_INPUT_ROOT'",
")",
"UnsetVariable",
"(",
"output",
",",
"'RULE_INPUT_EXT'",
")",
"# add_custom_target",
"output",
".",
"write",
"(",
"'add_custom_target('",
")",
"output",
".",
"write",
"(",
"rule_name",
")",
"output",
".",
"write",
"(",
"' DEPENDS\\n'",
")",
"for",
"out",
"in",
"var_outputs",
":",
"output",
".",
"write",
"(",
"' '",
")",
"output",
".",
"write",
"(",
"out",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"'SOURCES '",
")",
"WriteVariable",
"(",
"output",
",",
"inputs_name",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"for",
"rule_source",
"in",
"rule",
".",
"get",
"(",
"'rule_sources'",
",",
"[",
"]",
")",
":",
"output",
".",
"write",
"(",
"' '",
")",
"output",
".",
"write",
"(",
"NormjoinPath",
"(",
"path_to_gyp",
",",
"rule_source",
")",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"')\\n'",
")",
"extra_deps",
".",
"append",
"(",
"rule_name",
")"
] | [
328,
0
] | [
439,
32
] | python | en | ['en', 'en', 'en'] | True |
WriteCopies | (target_name, copies, extra_deps, path_to_gyp, output) | Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
| Write CMake for the 'copies' in the target. | def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name) | [
"def",
"WriteCopies",
"(",
"target_name",
",",
"copies",
",",
"extra_deps",
",",
"path_to_gyp",
",",
"output",
")",
":",
"copy_name",
"=",
"target_name",
"+",
"'__copies'",
"# CMake gets upset with custom targets with OUTPUT which specify no output.",
"have_copies",
"=",
"any",
"(",
"copy",
"[",
"'files'",
"]",
"for",
"copy",
"in",
"copies",
")",
"if",
"not",
"have_copies",
":",
"output",
".",
"write",
"(",
"'add_custom_target('",
")",
"output",
".",
"write",
"(",
"copy_name",
")",
"output",
".",
"write",
"(",
"')\\n'",
")",
"extra_deps",
".",
"append",
"(",
"copy_name",
")",
"return",
"class",
"Copy",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"ext",
",",
"command",
")",
":",
"self",
".",
"cmake_inputs",
"=",
"[",
"]",
"self",
".",
"cmake_outputs",
"=",
"[",
"]",
"self",
".",
"gyp_inputs",
"=",
"[",
"]",
"self",
".",
"gyp_outputs",
"=",
"[",
"]",
"self",
".",
"ext",
"=",
"ext",
"self",
".",
"inputs_name",
"=",
"None",
"self",
".",
"outputs_name",
"=",
"None",
"self",
".",
"command",
"=",
"command",
"file_copy",
"=",
"Copy",
"(",
"''",
",",
"'copy'",
")",
"dir_copy",
"=",
"Copy",
"(",
"'_dirs'",
",",
"'copy_directory'",
")",
"for",
"copy",
"in",
"copies",
":",
"files",
"=",
"copy",
"[",
"'files'",
"]",
"destination",
"=",
"copy",
"[",
"'destination'",
"]",
"for",
"src",
"in",
"files",
":",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"src",
")",
"basename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"[",
"1",
"]",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination",
",",
"basename",
")",
"copy",
"=",
"file_copy",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
"else",
"dir_copy",
"copy",
".",
"cmake_inputs",
".",
"append",
"(",
"NormjoinPathForceCMakeSource",
"(",
"path_to_gyp",
",",
"src",
")",
")",
"copy",
".",
"cmake_outputs",
".",
"append",
"(",
"NormjoinPathForceCMakeSource",
"(",
"path_to_gyp",
",",
"dst",
")",
")",
"copy",
".",
"gyp_inputs",
".",
"append",
"(",
"src",
")",
"copy",
".",
"gyp_outputs",
".",
"append",
"(",
"dst",
")",
"for",
"copy",
"in",
"(",
"file_copy",
",",
"dir_copy",
")",
":",
"if",
"copy",
".",
"cmake_inputs",
":",
"copy",
".",
"inputs_name",
"=",
"copy_name",
"+",
"'__input'",
"+",
"copy",
".",
"ext",
"SetVariableList",
"(",
"output",
",",
"copy",
".",
"inputs_name",
",",
"copy",
".",
"cmake_inputs",
")",
"copy",
".",
"outputs_name",
"=",
"copy_name",
"+",
"'__output'",
"+",
"copy",
".",
"ext",
"SetVariableList",
"(",
"output",
",",
"copy",
".",
"outputs_name",
",",
"copy",
".",
"cmake_outputs",
")",
"# add_custom_command",
"output",
".",
"write",
"(",
"'add_custom_command(\\n'",
")",
"output",
".",
"write",
"(",
"'OUTPUT'",
")",
"for",
"copy",
"in",
"(",
"file_copy",
",",
"dir_copy",
")",
":",
"if",
"copy",
".",
"outputs_name",
":",
"WriteVariable",
"(",
"output",
",",
"copy",
".",
"outputs_name",
",",
"' '",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"for",
"copy",
"in",
"(",
"file_copy",
",",
"dir_copy",
")",
":",
"for",
"src",
",",
"dst",
"in",
"zip",
"(",
"copy",
".",
"gyp_inputs",
",",
"copy",
".",
"gyp_outputs",
")",
":",
"# 'cmake -E copy src dst' will create the 'dst' directory if needed.",
"output",
".",
"write",
"(",
"'COMMAND ${CMAKE_COMMAND} -E %s '",
"%",
"copy",
".",
"command",
")",
"output",
".",
"write",
"(",
"src",
")",
"output",
".",
"write",
"(",
"' '",
")",
"output",
".",
"write",
"(",
"dst",
")",
"output",
".",
"write",
"(",
"\"\\n\"",
")",
"output",
".",
"write",
"(",
"'DEPENDS'",
")",
"for",
"copy",
"in",
"(",
"file_copy",
",",
"dir_copy",
")",
":",
"if",
"copy",
".",
"inputs_name",
":",
"WriteVariable",
"(",
"output",
",",
"copy",
".",
"inputs_name",
",",
"' '",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"'WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/'",
")",
"output",
".",
"write",
"(",
"path_to_gyp",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"'COMMENT Copying for '",
")",
"output",
".",
"write",
"(",
"target_name",
")",
"output",
".",
"write",
"(",
"'\\n'",
")",
"output",
".",
"write",
"(",
"'VERBATIM\\n'",
")",
"output",
".",
"write",
"(",
"')\\n'",
")",
"# add_custom_target",
"output",
".",
"write",
"(",
"'add_custom_target('",
")",
"output",
".",
"write",
"(",
"copy_name",
")",
"output",
".",
"write",
"(",
"'\\n DEPENDS'",
")",
"for",
"copy",
"in",
"(",
"file_copy",
",",
"dir_copy",
")",
":",
"if",
"copy",
".",
"outputs_name",
":",
"WriteVariable",
"(",
"output",
",",
"copy",
".",
"outputs_name",
",",
"' '",
")",
"output",
".",
"write",
"(",
"'\\n SOURCES'",
")",
"if",
"file_copy",
".",
"inputs_name",
":",
"WriteVariable",
"(",
"output",
",",
"file_copy",
".",
"inputs_name",
",",
"' '",
")",
"output",
".",
"write",
"(",
"'\\n)\\n'",
")",
"extra_deps",
".",
"append",
"(",
"copy_name",
")"
] | [
442,
0
] | [
547,
30
] | python | en | ['en', 'en', 'en'] | True |
CreateCMakeTargetBaseName | (qualified_target) | This is the name we would like the target to have. | This is the name we would like the target to have. | def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name) | [
"def",
"CreateCMakeTargetBaseName",
"(",
"qualified_target",
")",
":",
"_",
",",
"gyp_target_name",
",",
"gyp_target_toolset",
"=",
"(",
"gyp",
".",
"common",
".",
"ParseQualifiedTarget",
"(",
"qualified_target",
")",
")",
"cmake_target_base_name",
"=",
"gyp_target_name",
"if",
"gyp_target_toolset",
"and",
"gyp_target_toolset",
"!=",
"'target'",
":",
"cmake_target_base_name",
"+=",
"'_'",
"+",
"gyp_target_toolset",
"return",
"StringToCMakeTargetName",
"(",
"cmake_target_base_name",
")"
] | [
550,
0
] | [
557,
56
] | python | en | ['en', 'en', 'en'] | True |
CreateCMakeTargetFullName | (qualified_target) | An unambiguous name for the target. | An unambiguous name for the target. | def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name) | [
"def",
"CreateCMakeTargetFullName",
"(",
"qualified_target",
")",
":",
"gyp_file",
",",
"gyp_target_name",
",",
"gyp_target_toolset",
"=",
"(",
"gyp",
".",
"common",
".",
"ParseQualifiedTarget",
"(",
"qualified_target",
")",
")",
"cmake_target_full_name",
"=",
"gyp_file",
"+",
"':'",
"+",
"gyp_target_name",
"if",
"gyp_target_toolset",
"and",
"gyp_target_toolset",
"!=",
"'target'",
":",
"cmake_target_full_name",
"+=",
"'_'",
"+",
"gyp_target_toolset",
"return",
"StringToCMakeTargetName",
"(",
"cmake_target_full_name",
")"
] | [
560,
0
] | [
567,
56
] | python | en | ['en', 'en', 'en'] | True |
ExpectTableColumnsToMatchOrderedList.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
# Ensuring that a proper value has been provided
try:
assert "column_list" in configuration.kwargs, "column_list is required"
assert (
isinstance(configuration.kwargs["column_list"], (list, set, dict))
or configuration.kwargs["column_list"] is None
), "column_list must be a list, set, or None"
if isinstance(configuration.kwargs["column_list"], dict):
assert (
"$PARAMETER" in configuration.kwargs["column_list"]
), 'Evaluation Parameter dict for column_list kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"# Setting up a configuration",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"# Ensuring that a proper value has been provided",
"try",
":",
"assert",
"\"column_list\"",
"in",
"configuration",
".",
"kwargs",
",",
"\"column_list is required\"",
"assert",
"(",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"column_list\"",
"]",
",",
"(",
"list",
",",
"set",
",",
"dict",
")",
")",
"or",
"configuration",
".",
"kwargs",
"[",
"\"column_list\"",
"]",
"is",
"None",
")",
",",
"\"column_list must be a list, set, or None\"",
"if",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"column_list\"",
"]",
",",
"dict",
")",
":",
"assert",
"(",
"\"$PARAMETER\"",
"in",
"configuration",
".",
"kwargs",
"[",
"\"column_list\"",
"]",
")",
",",
"'Evaluation Parameter dict for column_list kwarg must have \"$PARAMETER\" key.'",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"InvalidExpectationConfigurationError",
"(",
"str",
"(",
"e",
")",
")",
"return",
"True"
] | [
81,
4
] | [
110,
19
] | python | en | ['en', 'error', 'th'] | False |
MetaFileDataAsset.file_lines_map_expectation | (cls, func) | Constructs an expectation using file lines map semantics.
The file_lines_map_expectations decorator handles boilerplate issues
surrounding the common pattern of evaluating truthiness of some
condition on an line by line basis in a file.
Args:
func (function): \
The function implementing an expectation that will be applied
line by line across a file. The function should take a file
and return information about how many lines met expectations.
Notes:
Users can specify skip value k that will cause the expectation
function to disregard the first k lines of the file.
file_lines_map_expectation will add a kwarg _lines to the called function with the nonnull lines \
to process.
null_lines_regex defines a regex used to skip lines, but can be overridden
See also:
:func:`expect_file_line_regex_match_count_to_be_between
<great_expectations.data_asset.base.DataAsset.expect_file_line_regex_match_count_to_be_between>` \
for an example of a file_lines_map_expectation
| Constructs an expectation using file lines map semantics.
The file_lines_map_expectations decorator handles boilerplate issues
surrounding the common pattern of evaluating truthiness of some
condition on an line by line basis in a file. | def file_lines_map_expectation(cls, func):
"""Constructs an expectation using file lines map semantics.
The file_lines_map_expectations decorator handles boilerplate issues
surrounding the common pattern of evaluating truthiness of some
condition on an line by line basis in a file.
Args:
func (function): \
The function implementing an expectation that will be applied
line by line across a file. The function should take a file
and return information about how many lines met expectations.
Notes:
Users can specify skip value k that will cause the expectation
function to disregard the first k lines of the file.
file_lines_map_expectation will add a kwarg _lines to the called function with the nonnull lines \
to process.
null_lines_regex defines a regex used to skip lines, but can be overridden
See also:
:func:`expect_file_line_regex_match_count_to_be_between
<great_expectations.data_asset.base.DataAsset.expect_file_line_regex_match_count_to_be_between>` \
for an example of a file_lines_map_expectation
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
skip=None,
mostly=None,
null_lines_regex=r"^\s*$",
result_format=None,
*args,
**kwargs
):
try:
f = open(self._path)
except OSError:
raise
if result_format is None:
result_format = self.default_expectation_args["result_format"]
result_format = parse_result_format(result_format)
lines = f.readlines() # Read in file lines
# Skip k initial lines designated by the user
if skip is not None and skip <= len(lines):
try:
assert float(skip).is_integer()
assert float(skip) >= 0
except (AssertionError, ValueError):
raise ValueError("skip must be a positive integer")
for i in range(1, skip + 1):
lines.pop(0)
if lines:
if null_lines_regex is not None:
null_lines = re.compile(
null_lines_regex
) # Ignore lines that are empty or have only white space ("null values" in the line-map context)
boolean_mapped_null_lines = np.array(
[bool(null_lines.match(line)) for line in lines]
)
else:
boolean_mapped_null_lines = np.zeros(len(lines), dtype=bool)
element_count = int(len(lines))
if element_count > sum(boolean_mapped_null_lines):
nonnull_lines = list(
compress(lines, np.invert(boolean_mapped_null_lines))
)
nonnull_count = int((boolean_mapped_null_lines == False).sum())
boolean_mapped_success_lines = np.array(
func(self, _lines=nonnull_lines, *args, **kwargs)
)
success_count = np.count_nonzero(boolean_mapped_success_lines)
unexpected_list = list(
compress(nonnull_lines, np.invert(boolean_mapped_success_lines))
)
nonnull_lines_index = range(0, len(nonnull_lines) + 1)
unexpected_index_list = list(
compress(
nonnull_lines_index, np.invert(boolean_mapped_success_lines)
)
)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list,
unexpected_index_list,
)
else:
return_obj = self._format_map_output(
result_format=result_format,
success=None,
element_count=element_count,
nonnull_count=0,
unexpected_count=0,
unexpected_list=[],
unexpected_index_list=[],
)
else:
return_obj = self._format_map_output(
result_format=result_format,
success=None,
element_count=0,
nonnull_count=0,
unexpected_count=0,
unexpected_list=[],
unexpected_index_list=[],
)
f.close()
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper | [
"def",
"file_lines_map_expectation",
"(",
"cls",
",",
"func",
")",
":",
"argspec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"func",
")",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"@",
"cls",
".",
"expectation",
"(",
"argspec",
")",
"@",
"wraps",
"(",
"func",
")",
"def",
"inner_wrapper",
"(",
"self",
",",
"skip",
"=",
"None",
",",
"mostly",
"=",
"None",
",",
"null_lines_regex",
"=",
"r\"^\\s*$\"",
",",
"result_format",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"self",
".",
"_path",
")",
"except",
"OSError",
":",
"raise",
"if",
"result_format",
"is",
"None",
":",
"result_format",
"=",
"self",
".",
"default_expectation_args",
"[",
"\"result_format\"",
"]",
"result_format",
"=",
"parse_result_format",
"(",
"result_format",
")",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"# Read in file lines",
"# Skip k initial lines designated by the user",
"if",
"skip",
"is",
"not",
"None",
"and",
"skip",
"<=",
"len",
"(",
"lines",
")",
":",
"try",
":",
"assert",
"float",
"(",
"skip",
")",
".",
"is_integer",
"(",
")",
"assert",
"float",
"(",
"skip",
")",
">=",
"0",
"except",
"(",
"AssertionError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"\"skip must be a positive integer\"",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"skip",
"+",
"1",
")",
":",
"lines",
".",
"pop",
"(",
"0",
")",
"if",
"lines",
":",
"if",
"null_lines_regex",
"is",
"not",
"None",
":",
"null_lines",
"=",
"re",
".",
"compile",
"(",
"null_lines_regex",
")",
"# Ignore lines that are empty or have only white space (\"null values\" in the line-map context)",
"boolean_mapped_null_lines",
"=",
"np",
".",
"array",
"(",
"[",
"bool",
"(",
"null_lines",
".",
"match",
"(",
"line",
")",
")",
"for",
"line",
"in",
"lines",
"]",
")",
"else",
":",
"boolean_mapped_null_lines",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"lines",
")",
",",
"dtype",
"=",
"bool",
")",
"element_count",
"=",
"int",
"(",
"len",
"(",
"lines",
")",
")",
"if",
"element_count",
">",
"sum",
"(",
"boolean_mapped_null_lines",
")",
":",
"nonnull_lines",
"=",
"list",
"(",
"compress",
"(",
"lines",
",",
"np",
".",
"invert",
"(",
"boolean_mapped_null_lines",
")",
")",
")",
"nonnull_count",
"=",
"int",
"(",
"(",
"boolean_mapped_null_lines",
"==",
"False",
")",
".",
"sum",
"(",
")",
")",
"boolean_mapped_success_lines",
"=",
"np",
".",
"array",
"(",
"func",
"(",
"self",
",",
"_lines",
"=",
"nonnull_lines",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"success_count",
"=",
"np",
".",
"count_nonzero",
"(",
"boolean_mapped_success_lines",
")",
"unexpected_list",
"=",
"list",
"(",
"compress",
"(",
"nonnull_lines",
",",
"np",
".",
"invert",
"(",
"boolean_mapped_success_lines",
")",
")",
")",
"nonnull_lines_index",
"=",
"range",
"(",
"0",
",",
"len",
"(",
"nonnull_lines",
")",
"+",
"1",
")",
"unexpected_index_list",
"=",
"list",
"(",
"compress",
"(",
"nonnull_lines_index",
",",
"np",
".",
"invert",
"(",
"boolean_mapped_success_lines",
")",
")",
")",
"success",
",",
"percent_success",
"=",
"self",
".",
"_calc_map_expectation_success",
"(",
"success_count",
",",
"nonnull_count",
",",
"mostly",
")",
"return_obj",
"=",
"self",
".",
"_format_map_output",
"(",
"result_format",
",",
"success",
",",
"element_count",
",",
"nonnull_count",
",",
"len",
"(",
"unexpected_list",
")",
",",
"unexpected_list",
",",
"unexpected_index_list",
",",
")",
"else",
":",
"return_obj",
"=",
"self",
".",
"_format_map_output",
"(",
"result_format",
"=",
"result_format",
",",
"success",
"=",
"None",
",",
"element_count",
"=",
"element_count",
",",
"nonnull_count",
"=",
"0",
",",
"unexpected_count",
"=",
"0",
",",
"unexpected_list",
"=",
"[",
"]",
",",
"unexpected_index_list",
"=",
"[",
"]",
",",
")",
"else",
":",
"return_obj",
"=",
"self",
".",
"_format_map_output",
"(",
"result_format",
"=",
"result_format",
",",
"success",
"=",
"None",
",",
"element_count",
"=",
"0",
",",
"nonnull_count",
"=",
"0",
",",
"unexpected_count",
"=",
"0",
",",
"unexpected_list",
"=",
"[",
"]",
",",
"unexpected_index_list",
"=",
"[",
"]",
",",
")",
"f",
".",
"close",
"(",
")",
"return",
"return_obj",
"inner_wrapper",
".",
"__name__",
"=",
"func",
".",
"__name__",
"inner_wrapper",
".",
"__doc__",
"=",
"func",
".",
"__doc__",
"return",
"inner_wrapper"
] | [
27,
4
] | [
155,
28
] | python | en | ['en', 'en', 'en'] | True |
FileDataAsset.expect_file_line_regex_match_count_to_be_between | (
self,
regex,
expected_min_count=0,
expected_max_count=None,
skip=None,
mostly=None,
null_lines_regex=r"^\s*$",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
_lines=None,
) |
Expect the number of times a regular expression appears on each line of
a file to be between a maximum and minimum value.
Args:
regex: \
A string that can be compiled as valid regular expression to match
expected_min_count (None or nonnegative integer): \
Specifies the minimum number of times regex is expected to appear
on each line of the file
expected_max_count (None or nonnegative integer): \
Specifies the maximum number of times regex is expected to appear
on each line of the file
Keyword Args:
skip (None or nonnegative integer): \
Integer specifying the first lines in the file the method should
skip before assessing expectations
mostly (None or number between 0 and 1): \
Specifies an acceptable error for expectations. If the percentage
of unexpected lines is less than mostly, the method still returns
true even if all lines don't match the expectation criteria.
null_lines_regex (valid regular expression or None): \
If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`,
or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the
result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the
result object. For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
_lines (list): \
The lines over which to operate (provided by the file_lines_map_expectation decorator)
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to
:ref:`result_format <result_format>` and :ref:`include_config`,
:ref:`catch_exceptions`, and :ref:`meta`.
|
Expect the number of times a regular expression appears on each line of
a file to be between a maximum and minimum value. | def expect_file_line_regex_match_count_to_be_between(
self,
regex,
expected_min_count=0,
expected_max_count=None,
skip=None,
mostly=None,
null_lines_regex=r"^\s*$",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
_lines=None,
):
"""
Expect the number of times a regular expression appears on each line of
a file to be between a maximum and minimum value.
Args:
regex: \
A string that can be compiled as valid regular expression to match
expected_min_count (None or nonnegative integer): \
Specifies the minimum number of times regex is expected to appear
on each line of the file
expected_max_count (None or nonnegative integer): \
Specifies the maximum number of times regex is expected to appear
on each line of the file
Keyword Args:
skip (None or nonnegative integer): \
Integer specifying the first lines in the file the method should
skip before assessing expectations
mostly (None or number between 0 and 1): \
Specifies an acceptable error for expectations. If the percentage
of unexpected lines is less than mostly, the method still returns
true even if all lines don't match the expectation criteria.
null_lines_regex (valid regular expression or None): \
If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`,
or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the
result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the
result object. For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
_lines (list): \
The lines over which to operate (provided by the file_lines_map_expectation decorator)
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to
:ref:`result_format <result_format>` and :ref:`include_config`,
:ref:`catch_exceptions`, and :ref:`meta`.
"""
try:
comp_regex = re.compile(regex)
except (ValueError, TypeError):
raise ValueError("Must enter valid regular expression for regex")
if expected_min_count is not None:
try:
assert float(expected_min_count).is_integer()
assert float(expected_min_count) >= 0
except (AssertionError, ValueError):
raise ValueError(
"expected_min_count must be a non-negative \
integer or None"
)
if expected_max_count is not None:
try:
assert float(expected_max_count).is_integer()
assert float(expected_max_count) >= 0
except (AssertionError, ValueError):
raise ValueError(
"expected_max_count must be a non-negative \
integer or None"
)
if expected_max_count is not None and expected_min_count is not None:
try:
assert expected_max_count >= expected_min_count
except (AssertionError, ValueError):
raise ValueError(
"expected_max_count must be greater than or \
equal to expected_min_count"
)
if expected_max_count is not None and expected_min_count is not None:
truth_list = [
expected_min_count
<= len(comp_regex.findall(line))
<= expected_max_count
for line in _lines
]
elif expected_max_count is not None:
truth_list = [
len(comp_regex.findall(line)) <= expected_max_count for line in _lines
]
elif expected_min_count is not None:
truth_list = [
len(comp_regex.findall(line)) >= expected_min_count for line in _lines
]
else:
truth_list = [True for _ in _lines]
return truth_list | [
"def",
"expect_file_line_regex_match_count_to_be_between",
"(",
"self",
",",
"regex",
",",
"expected_min_count",
"=",
"0",
",",
"expected_max_count",
"=",
"None",
",",
"skip",
"=",
"None",
",",
"mostly",
"=",
"None",
",",
"null_lines_regex",
"=",
"r\"^\\s*$\"",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
"_lines",
"=",
"None",
",",
")",
":",
"try",
":",
"comp_regex",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"ValueError",
"(",
"\"Must enter valid regular expression for regex\"",
")",
"if",
"expected_min_count",
"is",
"not",
"None",
":",
"try",
":",
"assert",
"float",
"(",
"expected_min_count",
")",
".",
"is_integer",
"(",
")",
"assert",
"float",
"(",
"expected_min_count",
")",
">=",
"0",
"except",
"(",
"AssertionError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"\"expected_min_count must be a non-negative \\\n integer or None\"",
")",
"if",
"expected_max_count",
"is",
"not",
"None",
":",
"try",
":",
"assert",
"float",
"(",
"expected_max_count",
")",
".",
"is_integer",
"(",
")",
"assert",
"float",
"(",
"expected_max_count",
")",
">=",
"0",
"except",
"(",
"AssertionError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"\"expected_max_count must be a non-negative \\\n integer or None\"",
")",
"if",
"expected_max_count",
"is",
"not",
"None",
"and",
"expected_min_count",
"is",
"not",
"None",
":",
"try",
":",
"assert",
"expected_max_count",
">=",
"expected_min_count",
"except",
"(",
"AssertionError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"\"expected_max_count must be greater than or \\\n equal to expected_min_count\"",
")",
"if",
"expected_max_count",
"is",
"not",
"None",
"and",
"expected_min_count",
"is",
"not",
"None",
":",
"truth_list",
"=",
"[",
"expected_min_count",
"<=",
"len",
"(",
"comp_regex",
".",
"findall",
"(",
"line",
")",
")",
"<=",
"expected_max_count",
"for",
"line",
"in",
"_lines",
"]",
"elif",
"expected_max_count",
"is",
"not",
"None",
":",
"truth_list",
"=",
"[",
"len",
"(",
"comp_regex",
".",
"findall",
"(",
"line",
")",
")",
"<=",
"expected_max_count",
"for",
"line",
"in",
"_lines",
"]",
"elif",
"expected_min_count",
"is",
"not",
"None",
":",
"truth_list",
"=",
"[",
"len",
"(",
"comp_regex",
".",
"findall",
"(",
"line",
")",
")",
">=",
"expected_min_count",
"for",
"line",
"in",
"_lines",
"]",
"else",
":",
"truth_list",
"=",
"[",
"True",
"for",
"_",
"in",
"_lines",
"]",
"return",
"truth_list"
] | [
172,
4
] | [
292,
25
] | python | en | ['en', 'error', 'th'] | False |
FileDataAsset.expect_file_line_regex_match_count_to_equal | (
self,
regex,
expected_count=0,
skip=None,
mostly=None,
nonnull_lines_regex=r"^\s*$",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
_lines=None,
) |
Expect the number of times a regular expression appears on each line of
a file to be between a maximum and minimum value.
Args:
regex: \
A string that can be compiled as valid regular expression to match
expected_count (None or nonnegative integer): \
Specifies the number of times regex is expected to appear on each
line of the file
Keyword Args:
skip (None or nonnegative integer): \
Integer specifying the first lines in the file the method should
skip before assessing expectations
mostly (None or number between 0 and 1): \
Specifies an acceptable error for expectations. If the percentage
of unexpected lines is less than mostly, the method still returns
true even if all lines don't match the expectation criteria.
nonnull_lines_regex (valid regular expression or None): \
If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`,
or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the
result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the
result object. For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
_lines (list): \
The lines over which to operate (provided by the file_lines_map_expectation decorator)
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to
:ref:`result_format <result_format>` and :ref:`include_config`,
:ref:`catch_exceptions`, and :ref:`meta`.
|
Expect the number of times a regular expression appears on each line of
a file to be between a maximum and minimum value. | def expect_file_line_regex_match_count_to_equal(
self,
regex,
expected_count=0,
skip=None,
mostly=None,
nonnull_lines_regex=r"^\s*$",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
_lines=None,
):
"""
Expect the number of times a regular expression appears on each line of
a file to be between a maximum and minimum value.
Args:
regex: \
A string that can be compiled as valid regular expression to match
expected_count (None or nonnegative integer): \
Specifies the number of times regex is expected to appear on each
line of the file
Keyword Args:
skip (None or nonnegative integer): \
Integer specifying the first lines in the file the method should
skip before assessing expectations
mostly (None or number between 0 and 1): \
Specifies an acceptable error for expectations. If the percentage
of unexpected lines is less than mostly, the method still returns
true even if all lines don't match the expectation criteria.
nonnull_lines_regex (valid regular expression or None): \
If not none, a regex to skip lines as null. Defaults to empty or whitespace-only lines.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`,
or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the
result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the
result object. For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
_lines (list): \
The lines over which to operate (provided by the file_lines_map_expectation decorator)
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to
:ref:`result_format <result_format>` and :ref:`include_config`,
:ref:`catch_exceptions`, and :ref:`meta`.
"""
try:
comp_regex = re.compile(regex)
except (ValueError, TypeError):
raise ValueError("Must enter valid regular expression for regex")
try:
assert float(expected_count).is_integer()
assert float(expected_count) >= 0
except (AssertionError, ValueError):
raise ValueError("expected_count must be a non-negative integer")
return [len(comp_regex.findall(line)) == expected_count for line in _lines] | [
"def",
"expect_file_line_regex_match_count_to_equal",
"(",
"self",
",",
"regex",
",",
"expected_count",
"=",
"0",
",",
"skip",
"=",
"None",
",",
"mostly",
"=",
"None",
",",
"nonnull_lines_regex",
"=",
"r\"^\\s*$\"",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
"_lines",
"=",
"None",
",",
")",
":",
"try",
":",
"comp_regex",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"ValueError",
"(",
"\"Must enter valid regular expression for regex\"",
")",
"try",
":",
"assert",
"float",
"(",
"expected_count",
")",
".",
"is_integer",
"(",
")",
"assert",
"float",
"(",
"expected_count",
")",
">=",
"0",
"except",
"(",
"AssertionError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"\"expected_count must be a non-negative integer\"",
")",
"return",
"[",
"len",
"(",
"comp_regex",
".",
"findall",
"(",
"line",
")",
")",
"==",
"expected_count",
"for",
"line",
"in",
"_lines",
"]"
] | [
295,
4
] | [
370,
83
] | python | en | ['en', 'error', 'th'] | False |
FileDataAsset.expect_file_hash_to_equal | (
self,
value,
hash_alg="md5",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) |
Expect computed file hash to equal some given value.
Args:
value: A string to compare with the computed hash value
Keyword Args:
hash_alg (string): Indicates the hash algorithm to use
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`,
or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the
result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format
<result_format>` and :ref:`include_config`, :ref:`catch_exceptions`,
and :ref:`meta`.
|
Expect computed file hash to equal some given value. | def expect_file_hash_to_equal(
self,
value,
hash_alg="md5",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect computed file hash to equal some given value.
Args:
value: A string to compare with the computed hash value
Keyword Args:
hash_alg (string): Indicates the hash algorithm to use
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`,
or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the
result object. For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format
<result_format>` and :ref:`include_config`, :ref:`catch_exceptions`,
and :ref:`meta`.
"""
success = False
try:
hash = hashlib.new(hash_alg)
# Limit file reads to 64 KB chunks at a time
BLOCK_SIZE = 65536
try:
with open(self._path, "rb") as file:
file_buffer = file.read(BLOCK_SIZE)
while file_buffer:
hash.update(file_buffer)
file_buffer = file.read(BLOCK_SIZE)
success = hash.hexdigest() == value
except OSError:
raise
except ValueError:
raise
return {"success": success} | [
"def",
"expect_file_hash_to_equal",
"(",
"self",
",",
"value",
",",
"hash_alg",
"=",
"\"md5\"",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"success",
"=",
"False",
"try",
":",
"hash",
"=",
"hashlib",
".",
"new",
"(",
"hash_alg",
")",
"# Limit file reads to 64 KB chunks at a time",
"BLOCK_SIZE",
"=",
"65536",
"try",
":",
"with",
"open",
"(",
"self",
".",
"_path",
",",
"\"rb\"",
")",
"as",
"file",
":",
"file_buffer",
"=",
"file",
".",
"read",
"(",
"BLOCK_SIZE",
")",
"while",
"file_buffer",
":",
"hash",
".",
"update",
"(",
"file_buffer",
")",
"file_buffer",
"=",
"file",
".",
"read",
"(",
"BLOCK_SIZE",
")",
"success",
"=",
"hash",
".",
"hexdigest",
"(",
")",
"==",
"value",
"except",
"OSError",
":",
"raise",
"except",
"ValueError",
":",
"raise",
"return",
"{",
"\"success\"",
":",
"success",
"}"
] | [
373,
4
] | [
430,
35
] | python | en | ['en', 'error', 'th'] | False |
FileDataAsset.expect_file_size_to_be_between | (
self,
minsize=0,
maxsize=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) |
Expect file size to be between a user specified maxsize and minsize.
Args:
minsize(integer): minimum expected file size
maxsize(integer): maximum expected file size
Keyword Args:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
|
Expect file size to be between a user specified maxsize and minsize. | def expect_file_size_to_be_between(
self,
minsize=0,
maxsize=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect file size to be between a user specified maxsize and minsize.
Args:
minsize(integer): minimum expected file size
maxsize(integer): maximum expected file size
Keyword Args:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
try:
size = os.path.getsize(self._path)
except OSError:
raise
# We want string or float or int versions of numbers, but
# they must be representable as clean integers.
try:
if not float(minsize).is_integer():
raise ValueError("minsize must be an integer")
minsize = int(float(minsize))
if maxsize is not None and not float(maxsize).is_integer():
raise ValueError("maxsize must be an integer")
elif maxsize is not None:
maxsize = int(float(maxsize))
except TypeError:
raise
if minsize < 0:
raise ValueError("minsize must be greater than or equal to 0")
if maxsize is not None and maxsize < 0:
raise ValueError("maxsize must be greater than or equal to 0")
if maxsize is not None and minsize > maxsize:
raise ValueError("maxsize must be greater than or equal to minsize")
if maxsize is None and size >= minsize:
success = True
elif (size >= minsize) and (size <= maxsize):
success = True
else:
success = False
return {"success": success, "result": {"observed_value": size}} | [
"def",
"expect_file_size_to_be_between",
"(",
"self",
",",
"minsize",
"=",
"0",
",",
"maxsize",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"try",
":",
"size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"_path",
")",
"except",
"OSError",
":",
"raise",
"# We want string or float or int versions of numbers, but",
"# they must be representable as clean integers.",
"try",
":",
"if",
"not",
"float",
"(",
"minsize",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"minsize must be an integer\"",
")",
"minsize",
"=",
"int",
"(",
"float",
"(",
"minsize",
")",
")",
"if",
"maxsize",
"is",
"not",
"None",
"and",
"not",
"float",
"(",
"maxsize",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"maxsize must be an integer\"",
")",
"elif",
"maxsize",
"is",
"not",
"None",
":",
"maxsize",
"=",
"int",
"(",
"float",
"(",
"maxsize",
")",
")",
"except",
"TypeError",
":",
"raise",
"if",
"minsize",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"minsize must be greater than or equal to 0\"",
")",
"if",
"maxsize",
"is",
"not",
"None",
"and",
"maxsize",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"maxsize must be greater than or equal to 0\"",
")",
"if",
"maxsize",
"is",
"not",
"None",
"and",
"minsize",
">",
"maxsize",
":",
"raise",
"ValueError",
"(",
"\"maxsize must be greater than or equal to minsize\"",
")",
"if",
"maxsize",
"is",
"None",
"and",
"size",
">=",
"minsize",
":",
"success",
"=",
"True",
"elif",
"(",
"size",
">=",
"minsize",
")",
"and",
"(",
"size",
"<=",
"maxsize",
")",
":",
"success",
"=",
"True",
"else",
":",
"success",
"=",
"False",
"return",
"{",
"\"success\"",
":",
"success",
",",
"\"result\"",
":",
"{",
"\"observed_value\"",
":",
"size",
"}",
"}"
] | [
433,
4
] | [
508,
71
] | python | en | ['en', 'error', 'th'] | False |
FileDataAsset.expect_file_to_exist | (
self,
filepath=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) |
Checks to see if a file specified by the user actually exists
Args:
filepath (str or None): \
The filepath to evaluate. If none, will check the currently-configured path object
of this FileDataAsset.
Keyword Args:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
|
Checks to see if a file specified by the user actually exists | def expect_file_to_exist(
self,
filepath=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Checks to see if a file specified by the user actually exists
Args:
filepath (str or None): \
The filepath to evaluate. If none, will check the currently-configured path object
of this FileDataAsset.
Keyword Args:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
if filepath is not None and os.path.isfile(filepath):
success = True
elif self._path is not None and os.path.isfile(self._path):
success = True
else:
success = False
return {"success": success} | [
"def",
"expect_file_to_exist",
"(",
"self",
",",
"filepath",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"if",
"filepath",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"success",
"=",
"True",
"elif",
"self",
".",
"_path",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"_path",
")",
":",
"success",
"=",
"True",
"else",
":",
"success",
"=",
"False",
"return",
"{",
"\"success\"",
":",
"success",
"}"
] | [
511,
4
] | [
561,
35
] | python | en | ['en', 'error', 'th'] | False |
FileDataAsset.expect_file_to_have_valid_table_header | (
self,
regex,
skip=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) |
Checks to see if a file has a line with unique delimited values,
such a line may be used as a table header.
Keyword Args:
skip (nonnegative integer): \
Integer specifying the first lines in the file the method
should skip before assessing expectations
regex (string):
A string that can be compiled as valid regular expression.
Used to specify the elements of the table header (the column headers)
result_format (str or None):
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
|
Checks to see if a file has a line with unique delimited values,
such a line may be used as a table header. | def expect_file_to_have_valid_table_header(
self,
regex,
skip=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Checks to see if a file has a line with unique delimited values,
such a line may be used as a table header.
Keyword Args:
skip (nonnegative integer): \
Integer specifying the first lines in the file the method
should skip before assessing expectations
regex (string):
A string that can be compiled as valid regular expression.
Used to specify the elements of the table header (the column headers)
result_format (str or None):
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be
included in the output without modification. For more detail,
see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
try:
comp_regex = re.compile(regex)
except re.error:
raise ValueError("Must enter valid regular expression for regex")
success = False
try:
with open(self._path) as f:
lines = f.readlines() # Read in file lines
except OSError:
raise
# Skip k initial lines designated by the user
if skip is not None and skip <= len(lines):
try:
assert float(skip).is_integer()
assert float(skip) >= 0
except (AssertionError, ValueError):
raise ValueError("skip must be a positive integer")
lines = lines[skip:]
header_line = lines[0].strip()
header_names = comp_regex.split(header_line)
if len(set(header_names)) == len(header_names):
success = True
return {"success": success} | [
"def",
"expect_file_to_have_valid_table_header",
"(",
"self",
",",
"regex",
",",
"skip",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"try",
":",
"comp_regex",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"except",
"re",
".",
"error",
":",
"raise",
"ValueError",
"(",
"\"Must enter valid regular expression for regex\"",
")",
"success",
"=",
"False",
"try",
":",
"with",
"open",
"(",
"self",
".",
"_path",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"# Read in file lines",
"except",
"OSError",
":",
"raise",
"# Skip k initial lines designated by the user",
"if",
"skip",
"is",
"not",
"None",
"and",
"skip",
"<=",
"len",
"(",
"lines",
")",
":",
"try",
":",
"assert",
"float",
"(",
"skip",
")",
".",
"is_integer",
"(",
")",
"assert",
"float",
"(",
"skip",
")",
">=",
"0",
"except",
"(",
"AssertionError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"\"skip must be a positive integer\"",
")",
"lines",
"=",
"lines",
"[",
"skip",
":",
"]",
"header_line",
"=",
"lines",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"header_names",
"=",
"comp_regex",
".",
"split",
"(",
"header_line",
")",
"if",
"len",
"(",
"set",
"(",
"header_names",
")",
")",
"==",
"len",
"(",
"header_names",
")",
":",
"success",
"=",
"True",
"return",
"{",
"\"success\"",
":",
"success",
"}"
] | [
564,
4
] | [
639,
35
] | python | en | ['en', 'error', 'th'] | False |
FileDataAsset.expect_file_to_be_valid_json | (
self,
schema=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) |
Args:
schema : string
optional JSON schema file on which JSON data file is validated against
result_format (str or None):
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean):
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None):
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None):
A JSON-serializable dictionary (nesting allowed) that will \
be included in the output without modification.
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and \
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
|
Args:
schema : string
optional JSON schema file on which JSON data file is validated against | def expect_file_to_be_valid_json(
self,
schema=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Args:
schema : string
optional JSON schema file on which JSON data file is validated against
result_format (str or None):
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean):
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None):
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None):
A JSON-serializable dictionary (nesting allowed) that will \
be included in the output without modification.
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and \
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
if schema is None:
try:
with open(self._path) as f:
json.load(f)
success = True
except ValueError:
success = False
else:
try:
with open(schema) as s:
schema_data = s.read()
sdata = json.loads(schema_data)
with open(self._path) as f:
json_data = f.read()
jdata = json.loads(json_data)
jsonschema.validate(jdata, sdata)
success = True
except jsonschema.ValidationError:
success = False
except jsonschema.SchemaError:
raise
except:
raise
return {"success": success} | [
"def",
"expect_file_to_be_valid_json",
"(",
"self",
",",
"schema",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"if",
"schema",
"is",
"None",
":",
"try",
":",
"with",
"open",
"(",
"self",
".",
"_path",
")",
"as",
"f",
":",
"json",
".",
"load",
"(",
"f",
")",
"success",
"=",
"True",
"except",
"ValueError",
":",
"success",
"=",
"False",
"else",
":",
"try",
":",
"with",
"open",
"(",
"schema",
")",
"as",
"s",
":",
"schema_data",
"=",
"s",
".",
"read",
"(",
")",
"sdata",
"=",
"json",
".",
"loads",
"(",
"schema_data",
")",
"with",
"open",
"(",
"self",
".",
"_path",
")",
"as",
"f",
":",
"json_data",
"=",
"f",
".",
"read",
"(",
")",
"jdata",
"=",
"json",
".",
"loads",
"(",
"json_data",
")",
"jsonschema",
".",
"validate",
"(",
"jdata",
",",
"sdata",
")",
"success",
"=",
"True",
"except",
"jsonschema",
".",
"ValidationError",
":",
"success",
"=",
"False",
"except",
"jsonschema",
".",
"SchemaError",
":",
"raise",
"except",
":",
"raise",
"return",
"{",
"\"success\"",
":",
"success",
"}"
] | [
642,
4
] | [
704,
35
] | python | en | ['en', 'error', 'th'] | False |
parse_bool | (s: Optional[Union[str, bool]]) |
Parse a boolean value from a string. T, True, Y, y, 1 return True;
other things return False.
|
Parse a boolean value from a string. T, True, Y, y, 1 return True;
other things return False.
| def parse_bool(s: Optional[Union[str, bool]]) -> bool:
"""
Parse a boolean value from a string. T, True, Y, y, 1 return True;
other things return False.
"""
# If `s` is already a bool, return its value.
#
# This allows a caller to not know or care whether their value is already
# a boolean, or if it is a string that needs to be parsed below.
if isinstance(s, bool):
return s
# If we didn't get anything at all, return False.
if not s:
return False
# OK, we got _something_, so try strtobool.
try:
return strtobool(s)
except ValueError:
return False | [
"def",
"parse_bool",
"(",
"s",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"bool",
"]",
"]",
")",
"->",
"bool",
":",
"# If `s` is already a bool, return its value.",
"#",
"# This allows a caller to not know or care whether their value is already",
"# a boolean, or if it is a string that needs to be parsed below.",
"if",
"isinstance",
"(",
"s",
",",
"bool",
")",
":",
"return",
"s",
"# If we didn't get anything at all, return False.",
"if",
"not",
"s",
":",
"return",
"False",
"# OK, we got _something_, so try strtobool.",
"try",
":",
"return",
"strtobool",
"(",
"s",
")",
"except",
"ValueError",
":",
"return",
"False"
] | [
152,
0
] | [
173,
20
] | python | en | ['en', 'ja', 'th'] | False |
Timer.__init__ | (self, name: str, prom_metrics_registry: Optional[Any]=None) |
Create a Timer, given a name. The Timer is initially stopped.
|
Create a Timer, given a name. The Timer is initially stopped.
| def __init__(self, name: str, prom_metrics_registry: Optional[Any]=None) -> None:
"""
Create a Timer, given a name. The Timer is initially stopped.
"""
self.name = name
if prom_metrics_registry:
metric_prefix = re.sub(r'\s+', '_', name).lower()
self._gauge = Gauge(f'{metric_prefix}_time_seconds', f'Elapsed time on {name} operations',
namespace='ambassador', registry=prom_metrics_registry)
self.reset() | [
"def",
"__init__",
"(",
"self",
",",
"name",
":",
"str",
",",
"prom_metrics_registry",
":",
"Optional",
"[",
"Any",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"name",
"=",
"name",
"if",
"prom_metrics_registry",
":",
"metric_prefix",
"=",
"re",
".",
"sub",
"(",
"r'\\s+'",
",",
"'_'",
",",
"name",
")",
".",
"lower",
"(",
")",
"self",
".",
"_gauge",
"=",
"Gauge",
"(",
"f'{metric_prefix}_time_seconds'",
",",
"f'Elapsed time on {name} operations'",
",",
"namespace",
"=",
"'ambassador'",
",",
"registry",
"=",
"prom_metrics_registry",
")",
"self",
".",
"reset",
"(",
")"
] | [
273,
4
] | [
285,
20
] | python | en | ['en', 'error', 'th'] | False |
Timer.__bool__ | (self) |
Timers test True in a boolean context if they have timed at least one
cycle.
|
Timers test True in a boolean context if they have timed at least one
cycle.
| def __bool__(self) -> bool:
"""
Timers test True in a boolean context if they have timed at least one
cycle.
"""
return self._cycles > 0 | [
"def",
"__bool__",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"_cycles",
">",
"0"
] | [
303,
4
] | [
308,
31
] | python | en | ['en', 'error', 'th'] | False |
Timer.start | (self, when: Optional[float]=None) |
Start a Timer running.
:param when: Optional start time. If not supplied,
the current time is used.
|
Start a Timer running. | def start(self, when: Optional[float]=None) -> None:
"""
Start a Timer running.
:param when: Optional start time. If not supplied,
the current time is used.
"""
# If we're already running, this method silently discards the
# currently-running cycle. Why? Because otherwise, it's a little
# too easy to forget to stop a Timer, cause an Exception, and
# crash the world.
#
# Not that I ever got bitten by this. Of course. [ :P ]
self._starttime = when or time.perf_counter()
self._running = True | [
"def",
"start",
"(",
"self",
",",
"when",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
")",
"->",
"None",
":",
"# If we're already running, this method silently discards the",
"# currently-running cycle. Why? Because otherwise, it's a little",
"# too easy to forget to stop a Timer, cause an Exception, and",
"# crash the world.",
"#",
"# Not that I ever got bitten by this. Of course. [ :P ]",
"self",
".",
"_starttime",
"=",
"when",
"or",
"time",
".",
"perf_counter",
"(",
")",
"self",
".",
"_running",
"=",
"True"
] | [
310,
4
] | [
326,
28
] | python | en | ['en', 'error', 'th'] | False |
Timer.stop | (self, when: Optional[float]=None) |
Stop a Timer, increment the cycle count, and update the
accumulated time with the amount of time since the Timer
was started.
:param when: Optional stop time. If not supplied,
the current time is used.
:return: The amount of time the Timer has accumulated
|
Stop a Timer, increment the cycle count, and update the
accumulated time with the amount of time since the Timer
was started. | def stop(self, when: Optional[float]=None) -> float:
"""
Stop a Timer, increment the cycle count, and update the
accumulated time with the amount of time since the Timer
was started.
:param when: Optional stop time. If not supplied,
the current time is used.
:return: The amount of time the Timer has accumulated
"""
# If we're already stopped, just return the same thing as the
# previous call to stop. See comments in start() for why this
# isn't an Exception...
if self._running:
if not when:
when = time.perf_counter()
self._running = False
self._cycles += 1
this_cycle = (when - self._starttime) + self._faketime
if self._gauge:
self._gauge.set(this_cycle)
self._faketime = 0
self._accumulated += this_cycle
if this_cycle < self._minimum:
self._minimum = this_cycle
if this_cycle > self._maximum:
self._maximum = this_cycle
return self._accumulated | [
"def",
"stop",
"(",
"self",
",",
"when",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
")",
"->",
"float",
":",
"# If we're already stopped, just return the same thing as the",
"# previous call to stop. See comments in start() for why this",
"# isn't an Exception...",
"if",
"self",
".",
"_running",
":",
"if",
"not",
"when",
":",
"when",
"=",
"time",
".",
"perf_counter",
"(",
")",
"self",
".",
"_running",
"=",
"False",
"self",
".",
"_cycles",
"+=",
"1",
"this_cycle",
"=",
"(",
"when",
"-",
"self",
".",
"_starttime",
")",
"+",
"self",
".",
"_faketime",
"if",
"self",
".",
"_gauge",
":",
"self",
".",
"_gauge",
".",
"set",
"(",
"this_cycle",
")",
"self",
".",
"_faketime",
"=",
"0",
"self",
".",
"_accumulated",
"+=",
"this_cycle",
"if",
"this_cycle",
"<",
"self",
".",
"_minimum",
":",
"self",
".",
"_minimum",
"=",
"this_cycle",
"if",
"this_cycle",
">",
"self",
".",
"_maximum",
":",
"self",
".",
"_maximum",
"=",
"this_cycle",
"return",
"self",
".",
"_accumulated"
] | [
328,
4
] | [
364,
32
] | python | en | ['en', 'error', 'th'] | False |
Timer.faketime | (self, faketime: float) |
Add fake time to a Timer. This is intended solely for
testing.
|
Add fake time to a Timer. This is intended solely for
testing.
| def faketime(self, faketime: float) -> None:
"""
Add fake time to a Timer. This is intended solely for
testing.
"""
if not self._running:
raise Exception(f"Timer {self.name}.faketime: not running")
self._faketime = faketime | [
"def",
"faketime",
"(",
"self",
",",
"faketime",
":",
"float",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"_running",
":",
"raise",
"Exception",
"(",
"f\"Timer {self.name}.faketime: not running\"",
")",
"self",
".",
"_faketime",
"=",
"faketime"
] | [
366,
4
] | [
375,
33
] | python | en | ['en', 'error', 'th'] | False |
Timer.cycles | (self) |
The number of timing cycles this Timer has recorded.
|
The number of timing cycles this Timer has recorded.
| def cycles(self):
"""
The number of timing cycles this Timer has recorded.
"""
return self._cycles | [
"def",
"cycles",
"(",
"self",
")",
":",
"return",
"self",
".",
"_cycles"
] | [
378,
4
] | [
382,
27
] | python | en | ['en', 'error', 'th'] | False |
Timer.starttime | (self) |
The time this Timer was last started, or 0 if it has
never been started.
|
The time this Timer was last started, or 0 if it has
never been started.
| def starttime(self):
"""
The time this Timer was last started, or 0 if it has
never been started.
"""
return self._starttime | [
"def",
"starttime",
"(",
"self",
")",
":",
"return",
"self",
".",
"_starttime"
] | [
385,
4
] | [
390,
30
] | python | en | ['en', 'error', 'th'] | False |
Timer.accumulated | (self) |
The amount of time this Timer has accumulated.
|
The amount of time this Timer has accumulated.
| def accumulated(self):
"""
The amount of time this Timer has accumulated.
"""
return self._accumulated | [
"def",
"accumulated",
"(",
"self",
")",
":",
"return",
"self",
".",
"_accumulated"
] | [
393,
4
] | [
397,
32
] | python | en | ['en', 'error', 'th'] | False |
Timer.minimum | (self) |
The minimum single-cycle time this Timer has recorded.
|
The minimum single-cycle time this Timer has recorded.
| def minimum(self):
"""
The minimum single-cycle time this Timer has recorded.
"""
return self._minimum | [
"def",
"minimum",
"(",
"self",
")",
":",
"return",
"self",
".",
"_minimum"
] | [
400,
4
] | [
404,
28
] | python | en | ['en', 'error', 'th'] | False |
Timer.maximum | (self) |
The maximum single-cycle time this Timer has recorded.
|
The maximum single-cycle time this Timer has recorded.
| def maximum(self):
"""
The maximum single-cycle time this Timer has recorded.
"""
return self._maximum | [
"def",
"maximum",
"(",
"self",
")",
":",
"return",
"self",
".",
"_maximum"
] | [
407,
4
] | [
411,
28
] | python | en | ['en', 'error', 'th'] | False |
Timer.average | (self) |
The average cycle time for this Timer.
|
The average cycle time for this Timer.
| def average(self):
"""
The average cycle time for this Timer.
"""
if self._cycles > 0:
return self._accumulated / self._cycles
raise Exception(f"Timer {self.name}.average: no cycles to average") | [
"def",
"average",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cycles",
">",
"0",
":",
"return",
"self",
".",
"_accumulated",
"/",
"self",
".",
"_cycles",
"raise",
"Exception",
"(",
"f\"Timer {self.name}.average: no cycles to average\"",
")"
] | [
414,
4
] | [
421,
75
] | python | en | ['en', 'error', 'th'] | False |
Timer.running | (self) |
Whether or not this Timer is running.
|
Whether or not this Timer is running.
| def running(self):
"""
Whether or not this Timer is running.
"""
return self._running | [
"def",
"running",
"(",
"self",
")",
":",
"return",
"self",
".",
"_running"
] | [
424,
4
] | [
428,
28
] | python | en | ['en', 'error', 'th'] | False |
Timer.summary | (self) |
Return a summary of this Timer.
|
Return a summary of this Timer.
| def summary(self) -> str:
"""
Return a summary of this Timer.
"""
return "TIMER %s: %d, %.3f/%.3f/%.3f" % (
self.name, self.cycles, self.minimum, self.average, self.maximum
) | [
"def",
"summary",
"(",
"self",
")",
"->",
"str",
":",
"return",
"\"TIMER %s: %d, %.3f/%.3f/%.3f\"",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"cycles",
",",
"self",
".",
"minimum",
",",
"self",
".",
"average",
",",
"self",
".",
"maximum",
")"
] | [
440,
4
] | [
447,
16
] | python | en | ['en', 'error', 'th'] | False |
SecretInfo.decode | (b64_pem: str) |
Do base64 decoding of a cryptographic element.
:param b64_pem: Base64-encoded PEM element
:return: Decoded PEM element
|
Do base64 decoding of a cryptographic element. | def decode(b64_pem: str) -> Optional[str]:
"""
Do base64 decoding of a cryptographic element.
:param b64_pem: Base64-encoded PEM element
:return: Decoded PEM element
"""
utf8_pem = None
pem = None
try:
utf8_pem = binascii.a2b_base64(b64_pem)
except binascii.Error:
return None
try:
pem = utf8_pem.decode('utf-8')
except UnicodeDecodeError:
return None
return pem | [
"def",
"decode",
"(",
"b64_pem",
":",
"str",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"utf8_pem",
"=",
"None",
"pem",
"=",
"None",
"try",
":",
"utf8_pem",
"=",
"binascii",
".",
"a2b_base64",
"(",
"b64_pem",
")",
"except",
"binascii",
".",
"Error",
":",
"return",
"None",
"try",
":",
"pem",
"=",
"utf8_pem",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
":",
"return",
"None",
"return",
"pem"
] | [
548,
4
] | [
568,
18
] | python | en | ['en', 'error', 'th'] | False |
SecretInfo.fingerprint | (pem: Optional[str]) |
Generate and return a cryptographic fingerprint of a PEM element.
The fingerprint is the uppercase hex SHA-1 signature of the element's UTF-8
representation.
:param pem: PEM element
:return: fingerprint string
|
Generate and return a cryptographic fingerprint of a PEM element. | def fingerprint(pem: Optional[str]) -> str:
"""
Generate and return a cryptographic fingerprint of a PEM element.
The fingerprint is the uppercase hex SHA-1 signature of the element's UTF-8
representation.
:param pem: PEM element
:return: fingerprint string
"""
if not pem:
return '<none>'
h = hashlib.new('sha1')
h.update(pem.encode('utf-8'))
hd = h.hexdigest()[0:16].upper()
keytype = 'PEM' if pem.startswith('-----BEGIN') else 'RAW'
return f'{keytype}: {hd}' | [
"def",
"fingerprint",
"(",
"pem",
":",
"Optional",
"[",
"str",
"]",
")",
"->",
"str",
":",
"if",
"not",
"pem",
":",
"return",
"'<none>'",
"h",
"=",
"hashlib",
".",
"new",
"(",
"'sha1'",
")",
"h",
".",
"update",
"(",
"pem",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"hd",
"=",
"h",
".",
"hexdigest",
"(",
")",
"[",
"0",
":",
"16",
"]",
".",
"upper",
"(",
")",
"keytype",
"=",
"'PEM'",
"if",
"pem",
".",
"startswith",
"(",
"'-----BEGIN'",
")",
"else",
"'RAW'",
"return",
"f'{keytype}: {hd}'"
] | [
571,
4
] | [
590,
33
] | python | en | ['en', 'error', 'th'] | False |
SecretInfo.to_dict | (self) |
Return the dictionary representation of this SecretInfo.
:return: dict
|
Return the dictionary representation of this SecretInfo. | def to_dict(self) -> Dict[str, Any]:
"""
Return the dictionary representation of this SecretInfo.
:return: dict
"""
return {
'name': self.name,
'namespace': self.namespace,
'secret_type': self.secret_type,
'tls_crt': self.fingerprint(self.tls_crt),
'tls_key': self.fingerprint(self.tls_key),
'user_key': self.fingerprint(self.user_key),
'root_crt': self.fingerprint(self.root_crt)
} | [
"def",
"to_dict",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"return",
"{",
"'name'",
":",
"self",
".",
"name",
",",
"'namespace'",
":",
"self",
".",
"namespace",
",",
"'secret_type'",
":",
"self",
".",
"secret_type",
",",
"'tls_crt'",
":",
"self",
".",
"fingerprint",
"(",
"self",
".",
"tls_crt",
")",
",",
"'tls_key'",
":",
"self",
".",
"fingerprint",
"(",
"self",
".",
"tls_key",
")",
",",
"'user_key'",
":",
"self",
".",
"fingerprint",
"(",
"self",
".",
"user_key",
")",
",",
"'root_crt'",
":",
"self",
".",
"fingerprint",
"(",
"self",
".",
"root_crt",
")",
"}"
] | [
592,
4
] | [
606,
9
] | python | en | ['en', 'error', 'th'] | False |
SecretInfo.from_aconf_secret | (cls, aconf_object: 'ACResource') |
Convert an ACResource containing a secret into a SecretInfo. This is used by the IR.save_secret_info()
to convert saved secrets into SecretInfos.
:param aconf_object: a ACResource containing a secret
:return: SecretInfo
|
Convert an ACResource containing a secret into a SecretInfo. This is used by the IR.save_secret_info()
to convert saved secrets into SecretInfos. | def from_aconf_secret(cls, aconf_object: 'ACResource') -> 'SecretInfo':
"""
Convert an ACResource containing a secret into a SecretInfo. This is used by the IR.save_secret_info()
to convert saved secrets into SecretInfos.
:param aconf_object: a ACResource containing a secret
:return: SecretInfo
"""
tls_crt = aconf_object.get('tls_crt', None)
if not tls_crt:
tls_crt = aconf_object.get('cert-chain_pem')
tls_key = aconf_object.get('tls_key', None)
if not tls_key:
tls_key = aconf_object.get('key_pem')
return SecretInfo(
aconf_object.name,
aconf_object.namespace,
aconf_object.secret_type,
tls_crt,
tls_key,
aconf_object.get('user_key', None),
aconf_object.get('root-cert_pem', None)
) | [
"def",
"from_aconf_secret",
"(",
"cls",
",",
"aconf_object",
":",
"'ACResource'",
")",
"->",
"'SecretInfo'",
":",
"tls_crt",
"=",
"aconf_object",
".",
"get",
"(",
"'tls_crt'",
",",
"None",
")",
"if",
"not",
"tls_crt",
":",
"tls_crt",
"=",
"aconf_object",
".",
"get",
"(",
"'cert-chain_pem'",
")",
"tls_key",
"=",
"aconf_object",
".",
"get",
"(",
"'tls_key'",
",",
"None",
")",
"if",
"not",
"tls_key",
":",
"tls_key",
"=",
"aconf_object",
".",
"get",
"(",
"'key_pem'",
")",
"return",
"SecretInfo",
"(",
"aconf_object",
".",
"name",
",",
"aconf_object",
".",
"namespace",
",",
"aconf_object",
".",
"secret_type",
",",
"tls_crt",
",",
"tls_key",
",",
"aconf_object",
".",
"get",
"(",
"'user_key'",
",",
"None",
")",
",",
"aconf_object",
".",
"get",
"(",
"'root-cert_pem'",
",",
"None",
")",
")"
] | [
609,
4
] | [
634,
9
] | python | en | ['en', 'error', 'th'] | False |
SecretInfo.from_dict | (cls, resource: 'IRResource',
secret_name: str, namespace: str, source: str,
cert_data: Optional[Dict[str, Any]], secret_type="kubernetes.io/tls") |
Given a secret's name and namespace, and a dictionary of configuration elements, return
a SecretInfo for the secret.
The "source" parameter needs some explanation. When working with secrets in most environments
where Ambassador runs, secrets will be loaded from some external system (e.g. Kubernetes),
and serialized to disk, and the disk serialization is the thing we can actually read the
dictionary of secret data from. The "source" parameter is the thing we read to get the actual
dictionary -- in our example above, "source" would be the pathname of the serialization on
disk, rather than the Kubernetes resource name.
:param resource: owning IRResource
:param secret_name: name of secret
:param namespace: namespace of secret
:param source: source of data
:param cert_data: dictionary of secret info (public and private key, etc.)
:param secret_type: Kubernetes-style secret type
:return:
|
Given a secret's name and namespace, and a dictionary of configuration elements, return
a SecretInfo for the secret. | def from_dict(cls, resource: 'IRResource',
secret_name: str, namespace: str, source: str,
cert_data: Optional[Dict[str, Any]], secret_type="kubernetes.io/tls") -> Optional['SecretInfo']:
"""
Given a secret's name and namespace, and a dictionary of configuration elements, return
a SecretInfo for the secret.
The "source" parameter needs some explanation. When working with secrets in most environments
where Ambassador runs, secrets will be loaded from some external system (e.g. Kubernetes),
and serialized to disk, and the disk serialization is the thing we can actually read the
dictionary of secret data from. The "source" parameter is the thing we read to get the actual
dictionary -- in our example above, "source" would be the pathname of the serialization on
disk, rather than the Kubernetes resource name.
:param resource: owning IRResource
:param secret_name: name of secret
:param namespace: namespace of secret
:param source: source of data
:param cert_data: dictionary of secret info (public and private key, etc.)
:param secret_type: Kubernetes-style secret type
:return:
"""
tls_crt = None
tls_key = None
user_key = None
if not cert_data:
resource.ir.logger.error(f"{resource.kind} {resource.name}: found no certificate in {source}?")
return None
if secret_type == 'kubernetes.io/tls':
# OK, we have something to work with. Hopefully.
tls_crt = cert_data.get('tls.crt', None)
if not tls_crt:
# Having no public half is definitely an error. Having no private half given a public half
# might be OK, though -- that's up to our caller to decide.
resource.ir.logger.error(f"{resource.kind} {resource.name}: found data but no certificate in {source}?")
return None
tls_key = cert_data.get('tls.key', None)
elif secret_type == 'Opaque':
user_key = cert_data.get('user.key', None)
if not user_key:
# The opaque keys we support must have user.key, but will likely have nothing else.
resource.ir.logger.error(f"{resource.kind} {resource.name}: found data but no user.key in {source}?")
return None
cert = None
elif secret_type == 'istio.io/key-and-cert':
resource.ir.logger.error(f"{resource.kind} {resource.name}: found data but handler for istio key not finished yet")
return SecretInfo(secret_name, namespace, secret_type, tls_crt=tls_crt, tls_key=tls_key, user_key=user_key) | [
"def",
"from_dict",
"(",
"cls",
",",
"resource",
":",
"'IRResource'",
",",
"secret_name",
":",
"str",
",",
"namespace",
":",
"str",
",",
"source",
":",
"str",
",",
"cert_data",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
",",
"secret_type",
"=",
"\"kubernetes.io/tls\"",
")",
"->",
"Optional",
"[",
"'SecretInfo'",
"]",
":",
"tls_crt",
"=",
"None",
"tls_key",
"=",
"None",
"user_key",
"=",
"None",
"if",
"not",
"cert_data",
":",
"resource",
".",
"ir",
".",
"logger",
".",
"error",
"(",
"f\"{resource.kind} {resource.name}: found no certificate in {source}?\"",
")",
"return",
"None",
"if",
"secret_type",
"==",
"'kubernetes.io/tls'",
":",
"# OK, we have something to work with. Hopefully.",
"tls_crt",
"=",
"cert_data",
".",
"get",
"(",
"'tls.crt'",
",",
"None",
")",
"if",
"not",
"tls_crt",
":",
"# Having no public half is definitely an error. Having no private half given a public half",
"# might be OK, though -- that's up to our caller to decide.",
"resource",
".",
"ir",
".",
"logger",
".",
"error",
"(",
"f\"{resource.kind} {resource.name}: found data but no certificate in {source}?\"",
")",
"return",
"None",
"tls_key",
"=",
"cert_data",
".",
"get",
"(",
"'tls.key'",
",",
"None",
")",
"elif",
"secret_type",
"==",
"'Opaque'",
":",
"user_key",
"=",
"cert_data",
".",
"get",
"(",
"'user.key'",
",",
"None",
")",
"if",
"not",
"user_key",
":",
"# The opaque keys we support must have user.key, but will likely have nothing else.",
"resource",
".",
"ir",
".",
"logger",
".",
"error",
"(",
"f\"{resource.kind} {resource.name}: found data but no user.key in {source}?\"",
")",
"return",
"None",
"cert",
"=",
"None",
"elif",
"secret_type",
"==",
"'istio.io/key-and-cert'",
":",
"resource",
".",
"ir",
".",
"logger",
".",
"error",
"(",
"f\"{resource.kind} {resource.name}: found data but handler for istio key not finished yet\"",
")",
"return",
"SecretInfo",
"(",
"secret_name",
",",
"namespace",
",",
"secret_type",
",",
"tls_crt",
"=",
"tls_crt",
",",
"tls_key",
"=",
"tls_key",
",",
"user_key",
"=",
"user_key",
")"
] | [
637,
4
] | [
690,
115
] | python | en | ['en', 'error', 'th'] | False |
SecretHandler.load_secret | (self, resource: 'IRResource', secret_name: str, namespace: str) |
load_secret: given a secret’s name and namespace, pull it from wherever it really lives,
write it to disk, and return a SecretInfo telling the rest of Ambassador where it got written.
This is the fallback load_secret implementation, which doesn't do anything: it is written
assuming that ir.save_secret_info has already filled ir.saved_secrets with any secrets handed in
from watt, so that load_secrets will never be called for those secrets. Therefore, if load_secrets
gets called at all, it's for a secret that wasn't found, and it should just return None.
:param resource: referencing resource (so that we can correctly default the namespace)
:param secret_name: name of the secret
:param namespace: namespace, if any specific namespace was given
:return: Optional[SecretInfo]
|
load_secret: given a secret’s name and namespace, pull it from wherever it really lives,
write it to disk, and return a SecretInfo telling the rest of Ambassador where it got written. | def load_secret(self, resource: 'IRResource', secret_name: str, namespace: str) -> Optional[SecretInfo]:
"""
load_secret: given a secret’s name and namespace, pull it from wherever it really lives,
write it to disk, and return a SecretInfo telling the rest of Ambassador where it got written.
This is the fallback load_secret implementation, which doesn't do anything: it is written
assuming that ir.save_secret_info has already filled ir.saved_secrets with any secrets handed in
from watt, so that load_secrets will never be called for those secrets. Therefore, if load_secrets
gets called at all, it's for a secret that wasn't found, and it should just return None.
:param resource: referencing resource (so that we can correctly default the namespace)
:param secret_name: name of the secret
:param namespace: namespace, if any specific namespace was given
:return: Optional[SecretInfo]
"""
self.logger.debug("SecretHandler (%s %s): load secret %s in namespace %s" %
(resource.kind, resource.name, secret_name, namespace))
return None | [
"def",
"load_secret",
"(",
"self",
",",
"resource",
":",
"'IRResource'",
",",
"secret_name",
":",
"str",
",",
"namespace",
":",
"str",
")",
"->",
"Optional",
"[",
"SecretInfo",
"]",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"SecretHandler (%s %s): load secret %s in namespace %s\"",
"%",
"(",
"resource",
".",
"kind",
",",
"resource",
".",
"name",
",",
"secret_name",
",",
"namespace",
")",
")",
"return",
"None"
] | [
763,
4
] | [
782,
19
] | python | en | ['en', 'error', 'th'] | False |
SecretHandler.still_needed | (self, resource: 'IRResource', secret_name: str, namespace: str) |
still_needed: remember that a given secret is still needed, so that we can tell watt to
keep paying attention to it.
The default implementation doesn't do much of anything, because it assumes that we're
not running in the watch_hook, so watt has already been told everything it needs to be
told. This should be OK for everything that's not the watch_hook.
:param resource: referencing resource
:param secret_name: name of the secret
:param namespace: namespace of the secret
:return: None
|
still_needed: remember that a given secret is still needed, so that we can tell watt to
keep paying attention to it. | def still_needed(self, resource: 'IRResource', secret_name: str, namespace: str) -> None:
"""
still_needed: remember that a given secret is still needed, so that we can tell watt to
keep paying attention to it.
The default implementation doesn't do much of anything, because it assumes that we're
not running in the watch_hook, so watt has already been told everything it needs to be
told. This should be OK for everything that's not the watch_hook.
:param resource: referencing resource
:param secret_name: name of the secret
:param namespace: namespace of the secret
:return: None
"""
self.logger.debug("SecretHandler (%s %s): secret %s in namespace %s is still needed" %
(resource.kind, resource.name, secret_name, namespace)) | [
"def",
"still_needed",
"(",
"self",
",",
"resource",
":",
"'IRResource'",
",",
"secret_name",
":",
"str",
",",
"namespace",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"SecretHandler (%s %s): secret %s in namespace %s is still needed\"",
"%",
"(",
"resource",
".",
"kind",
",",
"resource",
".",
"name",
",",
"secret_name",
",",
"namespace",
")",
")"
] | [
784,
4
] | [
800,
81
] | python | en | ['en', 'error', 'th'] | False |
SecretHandler.cache_secret | (self, resource: 'IRResource', secret_info: SecretInfo) |
cache_secret: stash the SecretInfo from load_secret into Ambassador’s internal cache,
so that we don’t have to call load_secret again if we need it again.
The default implementation should be usable by everything that's not the watch_hook.
:param resource: referencing resource
:param secret_info: SecretInfo returned from load_secret
:return: SavedSecret
|
cache_secret: stash the SecretInfo from load_secret into Ambassador’s internal cache,
so that we don’t have to call load_secret again if we need it again. | def cache_secret(self, resource: 'IRResource', secret_info: SecretInfo) -> SavedSecret:
"""
cache_secret: stash the SecretInfo from load_secret into Ambassador’s internal cache,
so that we don’t have to call load_secret again if we need it again.
The default implementation should be usable by everything that's not the watch_hook.
:param resource: referencing resource
:param secret_info: SecretInfo returned from load_secret
:return: SavedSecret
"""
name = secret_info.name
namespace = secret_info.namespace
tls_crt = secret_info.tls_crt
tls_key = secret_info.tls_key
user_key = secret_info.user_key
root_crt = secret_info.root_crt
return self.cache_internal(name, namespace, tls_crt, tls_key, user_key, root_crt) | [
"def",
"cache_secret",
"(",
"self",
",",
"resource",
":",
"'IRResource'",
",",
"secret_info",
":",
"SecretInfo",
")",
"->",
"SavedSecret",
":",
"name",
"=",
"secret_info",
".",
"name",
"namespace",
"=",
"secret_info",
".",
"namespace",
"tls_crt",
"=",
"secret_info",
".",
"tls_crt",
"tls_key",
"=",
"secret_info",
".",
"tls_key",
"user_key",
"=",
"secret_info",
".",
"user_key",
"root_crt",
"=",
"secret_info",
".",
"root_crt",
"return",
"self",
".",
"cache_internal",
"(",
"name",
",",
"namespace",
",",
"tls_crt",
",",
"tls_key",
",",
"user_key",
",",
"root_crt",
")"
] | [
802,
4
] | [
821,
89
] | python | en | ['en', 'error', 'th'] | False |
SecretHandler.secret_info_from_k8s | (self, resource: 'IRResource',
secret_name: str, namespace: str, source: str,
serialization: Optional[str]) |
secret_info_from_k8s is NO LONGER USED.
|
secret_info_from_k8s is NO LONGER USED.
| def secret_info_from_k8s(self, resource: 'IRResource',
secret_name: str, namespace: str, source: str,
serialization: Optional[str]) -> Optional[SecretInfo]:
"""
secret_info_from_k8s is NO LONGER USED.
"""
objects: Optional[List[Any]] = None
self.logger.debug(f"getting secret info for secret {secret_name} from k8s")
# If serialization is None or empty, we'll just return None.
if serialization:
try:
objects = parse_yaml(serialization)
except yaml.error.YAMLError as e:
self.logger.error(f"{resource.kind} {resource.name}: could not parse {source}: {e}")
if not objects:
# Nothing in the serialization, we're done.
return None
secret_type = None
cert_data = None
ocount = 0
errors = 0
for obj in objects:
ocount += 1
kind = obj.get('kind', None)
if kind != "Secret":
self.logger.error("%s %s: found K8s %s at %s.%d?" %
(resource.kind, resource.name, kind, source, ocount))
errors += 1
continue
metadata = obj.get('metadata', None)
if not metadata:
self.logger.error("%s %s: found K8s Secret with no metadata at %s.%d?" %
(resource.kind, resource.name, source, ocount))
errors += 1
continue
secret_type = metadata.get('type', 'kubernetes.io/tls')
if 'data' in obj:
if cert_data:
self.logger.error("%s %s: found multiple Secrets in %s?" %
(resource.kind, resource.name, source))
errors += 1
continue
cert_data = obj['data']
if errors:
# Bzzt.
return None
return SecretInfo.from_dict(resource, secret_name, namespace, source,
cert_data=cert_data, secret_type=secret_type) | [
"def",
"secret_info_from_k8s",
"(",
"self",
",",
"resource",
":",
"'IRResource'",
",",
"secret_name",
":",
"str",
",",
"namespace",
":",
"str",
",",
"source",
":",
"str",
",",
"serialization",
":",
"Optional",
"[",
"str",
"]",
")",
"->",
"Optional",
"[",
"SecretInfo",
"]",
":",
"objects",
":",
"Optional",
"[",
"List",
"[",
"Any",
"]",
"]",
"=",
"None",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"getting secret info for secret {secret_name} from k8s\"",
")",
"# If serialization is None or empty, we'll just return None.",
"if",
"serialization",
":",
"try",
":",
"objects",
"=",
"parse_yaml",
"(",
"serialization",
")",
"except",
"yaml",
".",
"error",
".",
"YAMLError",
"as",
"e",
":",
"self",
".",
"logger",
".",
"error",
"(",
"f\"{resource.kind} {resource.name}: could not parse {source}: {e}\"",
")",
"if",
"not",
"objects",
":",
"# Nothing in the serialization, we're done.",
"return",
"None",
"secret_type",
"=",
"None",
"cert_data",
"=",
"None",
"ocount",
"=",
"0",
"errors",
"=",
"0",
"for",
"obj",
"in",
"objects",
":",
"ocount",
"+=",
"1",
"kind",
"=",
"obj",
".",
"get",
"(",
"'kind'",
",",
"None",
")",
"if",
"kind",
"!=",
"\"Secret\"",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"%s %s: found K8s %s at %s.%d?\"",
"%",
"(",
"resource",
".",
"kind",
",",
"resource",
".",
"name",
",",
"kind",
",",
"source",
",",
"ocount",
")",
")",
"errors",
"+=",
"1",
"continue",
"metadata",
"=",
"obj",
".",
"get",
"(",
"'metadata'",
",",
"None",
")",
"if",
"not",
"metadata",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"%s %s: found K8s Secret with no metadata at %s.%d?\"",
"%",
"(",
"resource",
".",
"kind",
",",
"resource",
".",
"name",
",",
"source",
",",
"ocount",
")",
")",
"errors",
"+=",
"1",
"continue",
"secret_type",
"=",
"metadata",
".",
"get",
"(",
"'type'",
",",
"'kubernetes.io/tls'",
")",
"if",
"'data'",
"in",
"obj",
":",
"if",
"cert_data",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"%s %s: found multiple Secrets in %s?\"",
"%",
"(",
"resource",
".",
"kind",
",",
"resource",
".",
"name",
",",
"source",
")",
")",
"errors",
"+=",
"1",
"continue",
"cert_data",
"=",
"obj",
"[",
"'data'",
"]",
"if",
"errors",
":",
"# Bzzt.",
"return",
"None",
"return",
"SecretInfo",
".",
"from_dict",
"(",
"resource",
",",
"secret_name",
",",
"namespace",
",",
"source",
",",
"cert_data",
"=",
"cert_data",
",",
"secret_type",
"=",
"secret_type",
")"
] | [
876,
4
] | [
938,
81
] | python | en | ['en', 'error', 'th'] | False |
NullSecretHandler.__init__ | (self, logger: logging.Logger, source_root: Optional[str], cache_dir: Optional[str], version: str) |
Returns a valid SecretInfo (with fake keys) for any requested secret. Also, you can pass
None for source_root and cache_dir to use random temporary directories for them.
|
Returns a valid SecretInfo (with fake keys) for any requested secret. Also, you can pass
None for source_root and cache_dir to use random temporary directories for them.
| def __init__(self, logger: logging.Logger, source_root: Optional[str], cache_dir: Optional[str], version: str) -> None:
"""
Returns a valid SecretInfo (with fake keys) for any requested secret. Also, you can pass
None for source_root and cache_dir to use random temporary directories for them.
"""
if not source_root:
self.tempdir_source = tempfile.TemporaryDirectory(prefix="null-secret-", suffix="-source")
source_root = self.tempdir_source.name
if not cache_dir:
self.tempdir_cache = tempfile.TemporaryDirectory(prefix="null-secret-", suffix="-cache")
cache_dir = self.tempdir_cache.name
logger.info(f'NullSecretHandler using source_root {source_root}, cache_dir {cache_dir}')
super().__init__(logger, source_root, cache_dir, version) | [
"def",
"__init__",
"(",
"self",
",",
"logger",
":",
"logging",
".",
"Logger",
",",
"source_root",
":",
"Optional",
"[",
"str",
"]",
",",
"cache_dir",
":",
"Optional",
"[",
"str",
"]",
",",
"version",
":",
"str",
")",
"->",
"None",
":",
"if",
"not",
"source_root",
":",
"self",
".",
"tempdir_source",
"=",
"tempfile",
".",
"TemporaryDirectory",
"(",
"prefix",
"=",
"\"null-secret-\"",
",",
"suffix",
"=",
"\"-source\"",
")",
"source_root",
"=",
"self",
".",
"tempdir_source",
".",
"name",
"if",
"not",
"cache_dir",
":",
"self",
".",
"tempdir_cache",
"=",
"tempfile",
".",
"TemporaryDirectory",
"(",
"prefix",
"=",
"\"null-secret-\"",
",",
"suffix",
"=",
"\"-cache\"",
")",
"cache_dir",
"=",
"self",
".",
"tempdir_cache",
".",
"name",
"logger",
".",
"info",
"(",
"f'NullSecretHandler using source_root {source_root}, cache_dir {cache_dir}'",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"logger",
",",
"source_root",
",",
"cache_dir",
",",
"version",
")"
] | [
942,
4
] | [
958,
65
] | python | en | ['en', 'error', 'th'] | False |
_format_map_output | (
result_format,
success,
element_count,
nonnull_count,
unexpected_count,
unexpected_list,
unexpected_index_list,
) | Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation).
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result_format` for more information.
This function handles the logic for mapping those fields for column_map_expectations.
| Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation). | def _format_map_output(
result_format,
success,
element_count,
nonnull_count,
unexpected_count,
unexpected_list,
unexpected_index_list,
):
"""Helper function to construct expectation result objects for map_expectations (such as column_map_expectation
and file_lines_map_expectation).
Expectations support four result_formats: BOOLEAN_ONLY, BASIC, SUMMARY, and COMPLETE.
In each case, the object returned has a different set of populated fields.
See :ref:`result_format` for more information.
This function handles the logic for mapping those fields for column_map_expectations.
"""
# NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list
# Incrementally add to result and return when all values for the specified level are present
return_obj = {"success": success}
if result_format["result_format"] == "BOOLEAN_ONLY":
return return_obj
skip_missing = False
if nonnull_count is None:
missing_count = None
skip_missing: bool = True
else:
missing_count = element_count - nonnull_count
if element_count > 0:
unexpected_percent_total = unexpected_count / element_count * 100
if not skip_missing:
missing_percent = missing_count / element_count * 100
if nonnull_count > 0:
unexpected_percent_nonmissing = unexpected_count / nonnull_count * 100
else:
unexpected_percent_nonmissing = None
else:
unexpected_percent_nonmissing = unexpected_percent_total
else:
missing_percent = None
unexpected_percent_total = None
unexpected_percent_nonmissing = None
return_obj["result"] = {
"element_count": element_count,
"unexpected_count": unexpected_count,
"unexpected_percent": unexpected_percent_nonmissing,
"partial_unexpected_list": unexpected_list[
: result_format["partial_unexpected_count"]
],
}
if not skip_missing:
return_obj["result"]["missing_count"] = missing_count
return_obj["result"]["missing_percent"] = missing_percent
return_obj["result"]["unexpected_percent_total"] = unexpected_percent_total
return_obj["result"][
"unexpected_percent_nonmissing"
] = unexpected_percent_nonmissing
if result_format["result_format"] == "BASIC":
return return_obj
# Try to return the most common values, if possible.
if 0 < result_format.get("partial_unexpected_count"):
try:
partial_unexpected_counts = [
{"value": key, "count": value}
for key, value in sorted(
Counter(unexpected_list).most_common(
result_format["partial_unexpected_count"]
),
key=lambda x: (-x[1], x[0]),
)
]
except TypeError:
partial_unexpected_counts = [
"partial_exception_counts requires a hashable type"
]
finally:
return_obj["result"].update(
{
"partial_unexpected_index_list": unexpected_index_list[
: result_format["partial_unexpected_count"]
]
if unexpected_index_list is not None
else None,
"partial_unexpected_counts": partial_unexpected_counts,
}
)
if result_format["result_format"] == "SUMMARY":
return return_obj
return_obj["result"].update(
{
"unexpected_list": unexpected_list,
"unexpected_index_list": unexpected_index_list,
}
)
if result_format["result_format"] == "COMPLETE":
return return_obj
raise ValueError("Unknown result_format {}.".format(result_format["result_format"])) | [
"def",
"_format_map_output",
"(",
"result_format",
",",
"success",
",",
"element_count",
",",
"nonnull_count",
",",
"unexpected_count",
",",
"unexpected_list",
",",
"unexpected_index_list",
",",
")",
":",
"# NB: unexpected_count parameter is explicit some implementing classes may limit the length of unexpected_list",
"# Incrementally add to result and return when all values for the specified level are present",
"return_obj",
"=",
"{",
"\"success\"",
":",
"success",
"}",
"if",
"result_format",
"[",
"\"result_format\"",
"]",
"==",
"\"BOOLEAN_ONLY\"",
":",
"return",
"return_obj",
"skip_missing",
"=",
"False",
"if",
"nonnull_count",
"is",
"None",
":",
"missing_count",
"=",
"None",
"skip_missing",
":",
"bool",
"=",
"True",
"else",
":",
"missing_count",
"=",
"element_count",
"-",
"nonnull_count",
"if",
"element_count",
">",
"0",
":",
"unexpected_percent_total",
"=",
"unexpected_count",
"/",
"element_count",
"*",
"100",
"if",
"not",
"skip_missing",
":",
"missing_percent",
"=",
"missing_count",
"/",
"element_count",
"*",
"100",
"if",
"nonnull_count",
">",
"0",
":",
"unexpected_percent_nonmissing",
"=",
"unexpected_count",
"/",
"nonnull_count",
"*",
"100",
"else",
":",
"unexpected_percent_nonmissing",
"=",
"None",
"else",
":",
"unexpected_percent_nonmissing",
"=",
"unexpected_percent_total",
"else",
":",
"missing_percent",
"=",
"None",
"unexpected_percent_total",
"=",
"None",
"unexpected_percent_nonmissing",
"=",
"None",
"return_obj",
"[",
"\"result\"",
"]",
"=",
"{",
"\"element_count\"",
":",
"element_count",
",",
"\"unexpected_count\"",
":",
"unexpected_count",
",",
"\"unexpected_percent\"",
":",
"unexpected_percent_nonmissing",
",",
"\"partial_unexpected_list\"",
":",
"unexpected_list",
"[",
":",
"result_format",
"[",
"\"partial_unexpected_count\"",
"]",
"]",
",",
"}",
"if",
"not",
"skip_missing",
":",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"missing_count\"",
"]",
"=",
"missing_count",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"missing_percent\"",
"]",
"=",
"missing_percent",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"unexpected_percent_total\"",
"]",
"=",
"unexpected_percent_total",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"unexpected_percent_nonmissing\"",
"]",
"=",
"unexpected_percent_nonmissing",
"if",
"result_format",
"[",
"\"result_format\"",
"]",
"==",
"\"BASIC\"",
":",
"return",
"return_obj",
"# Try to return the most common values, if possible.",
"if",
"0",
"<",
"result_format",
".",
"get",
"(",
"\"partial_unexpected_count\"",
")",
":",
"try",
":",
"partial_unexpected_counts",
"=",
"[",
"{",
"\"value\"",
":",
"key",
",",
"\"count\"",
":",
"value",
"}",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"Counter",
"(",
"unexpected_list",
")",
".",
"most_common",
"(",
"result_format",
"[",
"\"partial_unexpected_count\"",
"]",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"-",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
")",
",",
")",
"]",
"except",
"TypeError",
":",
"partial_unexpected_counts",
"=",
"[",
"\"partial_exception_counts requires a hashable type\"",
"]",
"finally",
":",
"return_obj",
"[",
"\"result\"",
"]",
".",
"update",
"(",
"{",
"\"partial_unexpected_index_list\"",
":",
"unexpected_index_list",
"[",
":",
"result_format",
"[",
"\"partial_unexpected_count\"",
"]",
"]",
"if",
"unexpected_index_list",
"is",
"not",
"None",
"else",
"None",
",",
"\"partial_unexpected_counts\"",
":",
"partial_unexpected_counts",
",",
"}",
")",
"if",
"result_format",
"[",
"\"result_format\"",
"]",
"==",
"\"SUMMARY\"",
":",
"return",
"return_obj",
"return_obj",
"[",
"\"result\"",
"]",
".",
"update",
"(",
"{",
"\"unexpected_list\"",
":",
"unexpected_list",
",",
"\"unexpected_index_list\"",
":",
"unexpected_index_list",
",",
"}",
")",
"if",
"result_format",
"[",
"\"result_format\"",
"]",
"==",
"\"COMPLETE\"",
":",
"return",
"return_obj",
"raise",
"ValueError",
"(",
"\"Unknown result_format {}.\"",
".",
"format",
"(",
"result_format",
"[",
"\"result_format\"",
"]",
")",
")"
] | [
1576,
0
] | [
1687,
88
] | python | en | ['en', 'en', 'en'] | True |
Expectation._build_evr | (self, raw_response, configuration) | _build_evr is a lightweight convenience wrapper handling cases where an Expectation implementor
fails to return an EVR but returns the necessary components in a dictionary. | _build_evr is a lightweight convenience wrapper handling cases where an Expectation implementor
fails to return an EVR but returns the necessary components in a dictionary. | def _build_evr(self, raw_response, configuration):
"""_build_evr is a lightweight convenience wrapper handling cases where an Expectation implementor
fails to return an EVR but returns the necessary components in a dictionary."""
if not isinstance(raw_response, ExpectationValidationResult):
if isinstance(raw_response, dict):
evr = ExpectationValidationResult(**raw_response)
evr.expectation_config = configuration
else:
raise GreatExpectationsError("Unable to build EVR")
else:
evr = raw_response
evr.expectation_config = configuration
return evr | [
"def",
"_build_evr",
"(",
"self",
",",
"raw_response",
",",
"configuration",
")",
":",
"if",
"not",
"isinstance",
"(",
"raw_response",
",",
"ExpectationValidationResult",
")",
":",
"if",
"isinstance",
"(",
"raw_response",
",",
"dict",
")",
":",
"evr",
"=",
"ExpectationValidationResult",
"(",
"*",
"*",
"raw_response",
")",
"evr",
".",
"expectation_config",
"=",
"configuration",
"else",
":",
"raise",
"GreatExpectationsError",
"(",
"\"Unable to build EVR\"",
")",
"else",
":",
"evr",
"=",
"raw_response",
"evr",
".",
"expectation_config",
"=",
"configuration",
"return",
"evr"
] | [
518,
4
] | [
530,
18
] | python | en | ['en', 'en', 'en'] | True |
Expectation.get_validation_dependencies | (
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
) | Returns the result format and metrics required to validate this Expectation using the provided result format. | Returns the result format and metrics required to validate this Expectation using the provided result format. | def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""Returns the result format and metrics required to validate this Expectation using the provided result format."""
return {
"result_format": parse_result_format(
self.get_runtime_kwargs(
configuration=configuration,
runtime_configuration=runtime_configuration,
).get("result_format")
),
"metrics": dict(),
} | [
"def",
"get_validation_dependencies",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
"=",
"None",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"runtime_configuration",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
")",
":",
"return",
"{",
"\"result_format\"",
":",
"parse_result_format",
"(",
"self",
".",
"get_runtime_kwargs",
"(",
"configuration",
"=",
"configuration",
",",
"runtime_configuration",
"=",
"runtime_configuration",
",",
")",
".",
"get",
"(",
"\"result_format\"",
")",
")",
",",
"\"metrics\"",
":",
"dict",
"(",
")",
",",
"}"
] | [
532,
4
] | [
547,
9
] | python | en | ['en', 'en', 'en'] | True |
Expectation.run_diagnostics | (self, pretty_print=True) |
Produce a diagnostic report about this expectation.
The current uses for this method's output are
using the JSON structure to populate the Public Expectation Gallery
and enabling a fast devloop for developing new expectations where the
contributors can quickly check the completeness of their expectations.
The content of the report:
* name and description
* "library metadata", such as the GitHub usernames of the expectation's authors
* the execution engines the expectation is implemented for
* the implemented renderers
* tests in "examples" member variable
* the tests are executed against the execution engines for which the expectation
is implemented and the output of the test runs is included in the report.
At least one test case with include_in_gallery=True must be present in the examples to
produce the metrics, renderers and execution engines parts of the report. This is due to
a get_validation_dependencies requiring expectation_config as an argument.
If errors are encountered in the process of running the diagnostics, they are assumed to be due to
incompleteness of the Expectation's implementation (e.g., declaring a dependency on Metrics
that do not exist). These errors are added under "errors" key in the report.
:param pretty_print: TODO: this argument is not currently used. The intent is to return
a well formatted and easily readable text instead of the dictionary when the argument is set
to True
:return: a dictionary view of the report
|
Produce a diagnostic report about this expectation.
The current uses for this method's output are
using the JSON structure to populate the Public Expectation Gallery
and enabling a fast devloop for developing new expectations where the
contributors can quickly check the completeness of their expectations. | def run_diagnostics(self, pretty_print=True):
"""
Produce a diagnostic report about this expectation.
The current uses for this method's output are
using the JSON structure to populate the Public Expectation Gallery
and enabling a fast devloop for developing new expectations where the
contributors can quickly check the completeness of their expectations.
The content of the report:
* name and description
* "library metadata", such as the GitHub usernames of the expectation's authors
* the execution engines the expectation is implemented for
* the implemented renderers
* tests in "examples" member variable
* the tests are executed against the execution engines for which the expectation
is implemented and the output of the test runs is included in the report.
At least one test case with include_in_gallery=True must be present in the examples to
produce the metrics, renderers and execution engines parts of the report. This is due to
a get_validation_dependencies requiring expectation_config as an argument.
If errors are encountered in the process of running the diagnostics, they are assumed to be due to
incompleteness of the Expectation's implementation (e.g., declaring a dependency on Metrics
that do not exist). These errors are added under "errors" key in the report.
:param pretty_print: TODO: this argument is not currently used. The intent is to return
a well formatted and easily readable text instead of the dictionary when the argument is set
to True
:return: a dictionary view of the report
"""
camel_name = self.__class__.__name__
snake_name = camel_to_snake(self.__class__.__name__)
docstring, short_description = self._get_docstring_and_short_description()
library_metadata = self._get_library_metadata()
report_obj = {
"description": {
"camel_name": camel_name,
"snake_name": snake_name,
"short_description": short_description,
"docstring": docstring,
},
"library_metadata": library_metadata,
"renderers": {},
"examples": [],
"metrics": [],
"execution_engines": {},
"test_report": [],
"diagnostics_report": [],
}
# Generate artifacts from an example case
gallery_examples = self._get_examples()
report_obj.update({"examples": gallery_examples})
if gallery_examples != []:
example_data, example_test = self._choose_example(gallery_examples)
# TODO: this should be creating a Batch using an engine
test_batch = Batch(data=pd.DataFrame(example_data))
expectation_config = ExpectationConfiguration(
**{"expectation_type": snake_name, "kwargs": example_test}
)
validation_result = None
try:
validation_results = self._instantiate_example_validation_results(
test_batch=test_batch,
expectation_config=expectation_config,
)
validation_result = validation_results[0]
except (
GreatExpectationsError,
AttributeError,
ImportError,
LookupError,
ValueError,
SyntaxError,
) as e:
report_obj = self._add_error_to_diagnostics_report(
report_obj, e, traceback.format_exc()
)
if validation_result is not None:
renderers = self._get_renderer_dict(
expectation_name=snake_name,
expectation_config=expectation_config,
validation_result=validation_result,
)
report_obj.update({"renderers": renderers})
upstream_metrics = None
try:
upstream_metrics = self._get_upstream_metrics(expectation_config)
report_obj.update({"metrics": upstream_metrics})
except GreatExpectationsError as e:
report_obj = self._add_error_to_diagnostics_report(
report_obj, e, traceback.format_exc()
)
execution_engines = None
if upstream_metrics is not None:
execution_engines = self._get_execution_engine_dict(
upstream_metrics=upstream_metrics,
)
report_obj.update({"execution_engines": execution_engines})
try:
tests = self._get_examples(return_only_gallery_examples=False)
if len(tests) > 0:
if execution_engines is not None:
test_results = self._get_test_results(
snake_name,
tests,
execution_engines,
)
report_obj.update({"test_report": test_results})
except Exception as e:
report_obj = self._add_error_to_diagnostics_report(
report_obj, e, traceback.format_exc()
)
return report_obj | [
"def",
"run_diagnostics",
"(",
"self",
",",
"pretty_print",
"=",
"True",
")",
":",
"camel_name",
"=",
"self",
".",
"__class__",
".",
"__name__",
"snake_name",
"=",
"camel_to_snake",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
"docstring",
",",
"short_description",
"=",
"self",
".",
"_get_docstring_and_short_description",
"(",
")",
"library_metadata",
"=",
"self",
".",
"_get_library_metadata",
"(",
")",
"report_obj",
"=",
"{",
"\"description\"",
":",
"{",
"\"camel_name\"",
":",
"camel_name",
",",
"\"snake_name\"",
":",
"snake_name",
",",
"\"short_description\"",
":",
"short_description",
",",
"\"docstring\"",
":",
"docstring",
",",
"}",
",",
"\"library_metadata\"",
":",
"library_metadata",
",",
"\"renderers\"",
":",
"{",
"}",
",",
"\"examples\"",
":",
"[",
"]",
",",
"\"metrics\"",
":",
"[",
"]",
",",
"\"execution_engines\"",
":",
"{",
"}",
",",
"\"test_report\"",
":",
"[",
"]",
",",
"\"diagnostics_report\"",
":",
"[",
"]",
",",
"}",
"# Generate artifacts from an example case",
"gallery_examples",
"=",
"self",
".",
"_get_examples",
"(",
")",
"report_obj",
".",
"update",
"(",
"{",
"\"examples\"",
":",
"gallery_examples",
"}",
")",
"if",
"gallery_examples",
"!=",
"[",
"]",
":",
"example_data",
",",
"example_test",
"=",
"self",
".",
"_choose_example",
"(",
"gallery_examples",
")",
"# TODO: this should be creating a Batch using an engine",
"test_batch",
"=",
"Batch",
"(",
"data",
"=",
"pd",
".",
"DataFrame",
"(",
"example_data",
")",
")",
"expectation_config",
"=",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"snake_name",
",",
"\"kwargs\"",
":",
"example_test",
"}",
")",
"validation_result",
"=",
"None",
"try",
":",
"validation_results",
"=",
"self",
".",
"_instantiate_example_validation_results",
"(",
"test_batch",
"=",
"test_batch",
",",
"expectation_config",
"=",
"expectation_config",
",",
")",
"validation_result",
"=",
"validation_results",
"[",
"0",
"]",
"except",
"(",
"GreatExpectationsError",
",",
"AttributeError",
",",
"ImportError",
",",
"LookupError",
",",
"ValueError",
",",
"SyntaxError",
",",
")",
"as",
"e",
":",
"report_obj",
"=",
"self",
".",
"_add_error_to_diagnostics_report",
"(",
"report_obj",
",",
"e",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"if",
"validation_result",
"is",
"not",
"None",
":",
"renderers",
"=",
"self",
".",
"_get_renderer_dict",
"(",
"expectation_name",
"=",
"snake_name",
",",
"expectation_config",
"=",
"expectation_config",
",",
"validation_result",
"=",
"validation_result",
",",
")",
"report_obj",
".",
"update",
"(",
"{",
"\"renderers\"",
":",
"renderers",
"}",
")",
"upstream_metrics",
"=",
"None",
"try",
":",
"upstream_metrics",
"=",
"self",
".",
"_get_upstream_metrics",
"(",
"expectation_config",
")",
"report_obj",
".",
"update",
"(",
"{",
"\"metrics\"",
":",
"upstream_metrics",
"}",
")",
"except",
"GreatExpectationsError",
"as",
"e",
":",
"report_obj",
"=",
"self",
".",
"_add_error_to_diagnostics_report",
"(",
"report_obj",
",",
"e",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"execution_engines",
"=",
"None",
"if",
"upstream_metrics",
"is",
"not",
"None",
":",
"execution_engines",
"=",
"self",
".",
"_get_execution_engine_dict",
"(",
"upstream_metrics",
"=",
"upstream_metrics",
",",
")",
"report_obj",
".",
"update",
"(",
"{",
"\"execution_engines\"",
":",
"execution_engines",
"}",
")",
"try",
":",
"tests",
"=",
"self",
".",
"_get_examples",
"(",
"return_only_gallery_examples",
"=",
"False",
")",
"if",
"len",
"(",
"tests",
")",
">",
"0",
":",
"if",
"execution_engines",
"is",
"not",
"None",
":",
"test_results",
"=",
"self",
".",
"_get_test_results",
"(",
"snake_name",
",",
"tests",
",",
"execution_engines",
",",
")",
"report_obj",
".",
"update",
"(",
"{",
"\"test_report\"",
":",
"test_results",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"report_obj",
"=",
"self",
".",
"_add_error_to_diagnostics_report",
"(",
"report_obj",
",",
"e",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"report_obj"
] | [
687,
4
] | [
811,
25
] | python | en | ['en', 'error', 'th'] | False |
Expectation._get_examples | (self, return_only_gallery_examples=True) |
Get a list of examples from the object's `examples` member variable.
:param return_only_gallery_examples: if True, include only test examples where `include_in_gallery` is true
:return: list of examples or [], if no examples exist
|
Get a list of examples from the object's `examples` member variable. | def _get_examples(self, return_only_gallery_examples=True) -> List[Dict]:
"""
Get a list of examples from the object's `examples` member variable.
:param return_only_gallery_examples: if True, include only test examples where `include_in_gallery` is true
:return: list of examples or [], if no examples exist
"""
try:
all_examples = self.examples
except AttributeError:
return []
included_examples = []
for example in all_examples:
# print(example)
included_tests = []
for test in example["tests"]:
if (
test.get("include_in_gallery") == True
or return_only_gallery_examples == False
):
included_tests.append(test)
if len(included_tests) > 0:
copied_example = deepcopy(example)
copied_example["tests"] = included_tests
included_examples.append(copied_example)
return included_examples | [
"def",
"_get_examples",
"(",
"self",
",",
"return_only_gallery_examples",
"=",
"True",
")",
"->",
"List",
"[",
"Dict",
"]",
":",
"try",
":",
"all_examples",
"=",
"self",
".",
"examples",
"except",
"AttributeError",
":",
"return",
"[",
"]",
"included_examples",
"=",
"[",
"]",
"for",
"example",
"in",
"all_examples",
":",
"# print(example)",
"included_tests",
"=",
"[",
"]",
"for",
"test",
"in",
"example",
"[",
"\"tests\"",
"]",
":",
"if",
"(",
"test",
".",
"get",
"(",
"\"include_in_gallery\"",
")",
"==",
"True",
"or",
"return_only_gallery_examples",
"==",
"False",
")",
":",
"included_tests",
".",
"append",
"(",
"test",
")",
"if",
"len",
"(",
"included_tests",
")",
">",
"0",
":",
"copied_example",
"=",
"deepcopy",
"(",
"example",
")",
"copied_example",
"[",
"\"tests\"",
"]",
"=",
"included_tests",
"included_examples",
".",
"append",
"(",
"copied_example",
")",
"return",
"included_examples"
] | [
830,
4
] | [
859,
32
] | python | en | ['en', 'error', 'th'] | False |
ExpectColumnToExist.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
# Ensuring that a proper value has been provided
try:
assert "column" in configuration.kwargs, "A column name must be provided"
assert isinstance(
configuration.kwargs["column"], str
), "Column name must be a string"
assert (
isinstance(configuration.kwargs.get("column_index"), (int, dict))
or configuration.kwargs.get("column_index") is None
), "column_index must be an integer or None"
if isinstance(configuration.kwargs.get("column_index"), dict):
assert "$PARAMETER" in configuration.kwargs.get(
"column_index"
), 'Evaluation Parameter dict for column_index kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"# Setting up a configuration",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"# Ensuring that a proper value has been provided",
"try",
":",
"assert",
"\"column\"",
"in",
"configuration",
".",
"kwargs",
",",
"\"A column name must be provided\"",
"assert",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"column\"",
"]",
",",
"str",
")",
",",
"\"Column name must be a string\"",
"assert",
"(",
"isinstance",
"(",
"configuration",
".",
"kwargs",
".",
"get",
"(",
"\"column_index\"",
")",
",",
"(",
"int",
",",
"dict",
")",
")",
"or",
"configuration",
".",
"kwargs",
".",
"get",
"(",
"\"column_index\"",
")",
"is",
"None",
")",
",",
"\"column_index must be an integer or None\"",
"if",
"isinstance",
"(",
"configuration",
".",
"kwargs",
".",
"get",
"(",
"\"column_index\"",
")",
",",
"dict",
")",
":",
"assert",
"\"$PARAMETER\"",
"in",
"configuration",
".",
"kwargs",
".",
"get",
"(",
"\"column_index\"",
")",
",",
"'Evaluation Parameter dict for column_index kwarg must have \"$PARAMETER\" key.'",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"InvalidExpectationConfigurationError",
"(",
"str",
"(",
"e",
")",
")",
"return",
"True"
] | [
71,
4
] | [
102,
19
] | python | en | ['en', 'error', 'th'] | False |
Explainer.__init__ | (self, method, control_name, X, tau, classes, model_tau=None,
features=None, normalize=True, test_size=0.3, random_state=None, override_checks=False,
r_learners=None) |
The Explainer class handles all feature explanation/interpretation functions, including plotting
feature importances, shapley value distributions, and shapley value dependency plots.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
- shapley (calculates shapley values; estimator must be tree-based)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
method (str): auto, permutation, shapley
control_name (str/int/float): name of control group
X (np.matrix): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
classes (dict): a mapping of treatment names to indices (used for indexing tau array)
model_tau (sklearn/lightgbm/xgboost model object): a model object
features (np.array): list/array of feature names. If None, an enumerated list will be used.
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
override_checks (bool): overrides self.check_conditions (e.g. if importance/shapley values are pre-computed)
r_learners (dict): a mapping of treatment group to fitted R Learners
|
The Explainer class handles all feature explanation/interpretation functions, including plotting
feature importances, shapley value distributions, and shapley value dependency plots. | def __init__(self, method, control_name, X, tau, classes, model_tau=None,
features=None, normalize=True, test_size=0.3, random_state=None, override_checks=False,
r_learners=None):
"""
The Explainer class handles all feature explanation/interpretation functions, including plotting
feature importances, shapley value distributions, and shapley value dependency plots.
Currently supported methods are:
- auto (calculates importance based on estimator's default implementation of feature importance;
estimator must be tree-based)
Note: if none provided, it uses lightgbm's LGBMRegressor as estimator, and "gain" as
importance type
- permutation (calculates importance based on mean decrease in accuracy when a feature column is permuted;
estimator can be any form)
- shapley (calculates shapley values; estimator must be tree-based)
Hint: for permutation, downsample data for better performance especially if X.shape[1] is large
Args:
method (str): auto, permutation, shapley
control_name (str/int/float): name of control group
X (np.matrix): a feature matrix
tau (np.array): a treatment effect vector (estimated/actual)
classes (dict): a mapping of treatment names to indices (used for indexing tau array)
model_tau (sklearn/lightgbm/xgboost model object): a model object
features (np.array): list/array of feature names. If None, an enumerated list will be used.
normalize (bool): normalize by sum of importances if method=auto (defaults to True)
test_size (float/int): if float, represents the proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples (used for estimating
permutation importance)
random_state (int/RandomState instance/None): random state used in permutation importance estimation
override_checks (bool): overrides self.check_conditions (e.g. if importance/shapley values are pre-computed)
r_learners (dict): a mapping of treatment group to fitted R Learners
"""
self.method = method
self.control_name = control_name
self.X = convert_pd_to_np(X)
self.tau = convert_pd_to_np(tau)
if self.tau is not None and self.tau.ndim == 1:
self.tau = self.tau.reshape(-1, 1)
self.classes = classes
self.model_tau = LGBMRegressor(importance_type='gain') if model_tau is None else model_tau
self.features = features
self.normalize = normalize
self.test_size = test_size
self.random_state = random_state
self.override_checks = override_checks
self.r_learners = r_learners
if not self.override_checks:
self.check_conditions()
self.create_feature_names()
self.build_new_tau_models() | [
"def",
"__init__",
"(",
"self",
",",
"method",
",",
"control_name",
",",
"X",
",",
"tau",
",",
"classes",
",",
"model_tau",
"=",
"None",
",",
"features",
"=",
"None",
",",
"normalize",
"=",
"True",
",",
"test_size",
"=",
"0.3",
",",
"random_state",
"=",
"None",
",",
"override_checks",
"=",
"False",
",",
"r_learners",
"=",
"None",
")",
":",
"self",
".",
"method",
"=",
"method",
"self",
".",
"control_name",
"=",
"control_name",
"self",
".",
"X",
"=",
"convert_pd_to_np",
"(",
"X",
")",
"self",
".",
"tau",
"=",
"convert_pd_to_np",
"(",
"tau",
")",
"if",
"self",
".",
"tau",
"is",
"not",
"None",
"and",
"self",
".",
"tau",
".",
"ndim",
"==",
"1",
":",
"self",
".",
"tau",
"=",
"self",
".",
"tau",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"self",
".",
"classes",
"=",
"classes",
"self",
".",
"model_tau",
"=",
"LGBMRegressor",
"(",
"importance_type",
"=",
"'gain'",
")",
"if",
"model_tau",
"is",
"None",
"else",
"model_tau",
"self",
".",
"features",
"=",
"features",
"self",
".",
"normalize",
"=",
"normalize",
"self",
".",
"test_size",
"=",
"test_size",
"self",
".",
"random_state",
"=",
"random_state",
"self",
".",
"override_checks",
"=",
"override_checks",
"self",
".",
"r_learners",
"=",
"r_learners",
"if",
"not",
"self",
".",
"override_checks",
":",
"self",
".",
"check_conditions",
"(",
")",
"self",
".",
"create_feature_names",
"(",
")",
"self",
".",
"build_new_tau_models",
"(",
")"
] | [
14,
4
] | [
65,
39
] | python | en | ['en', 'error', 'th'] | False |
Explainer.check_conditions | (self) |
Checks for multiple conditions:
- method is valid
- X, tau, and classes are specified
- model_tau has feature_importances_ attribute after fitting
|
Checks for multiple conditions:
- method is valid
- X, tau, and classes are specified
- model_tau has feature_importances_ attribute after fitting
| def check_conditions(self):
"""
Checks for multiple conditions:
- method is valid
- X, tau, and classes are specified
- model_tau has feature_importances_ attribute after fitting
"""
assert self.method in VALID_METHODS, 'Current supported methods: {}'.format(', '.join(VALID_METHODS))
assert all(obj is not None for obj in (self.X, self.tau, self.classes)), \
"X, tau, and classes must be provided."
model_test = deepcopy(self.model_tau)
model_test.fit([[0], [1]], [0, 1]) # Fit w/ dummy data to check for feature_importances_ below
assert hasattr(model_test, "feature_importances_"), \
"model_tau must have the feature_importances_ method (after fitting)" | [
"def",
"check_conditions",
"(",
"self",
")",
":",
"assert",
"self",
".",
"method",
"in",
"VALID_METHODS",
",",
"'Current supported methods: {}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"VALID_METHODS",
")",
")",
"assert",
"all",
"(",
"obj",
"is",
"not",
"None",
"for",
"obj",
"in",
"(",
"self",
".",
"X",
",",
"self",
".",
"tau",
",",
"self",
".",
"classes",
")",
")",
",",
"\"X, tau, and classes must be provided.\"",
"model_test",
"=",
"deepcopy",
"(",
"self",
".",
"model_tau",
")",
"model_test",
".",
"fit",
"(",
"[",
"[",
"0",
"]",
",",
"[",
"1",
"]",
"]",
",",
"[",
"0",
",",
"1",
"]",
")",
"# Fit w/ dummy data to check for feature_importances_ below",
"assert",
"hasattr",
"(",
"model_test",
",",
"\"feature_importances_\"",
")",
",",
"\"model_tau must have the feature_importances_ method (after fitting)\""
] | [
67,
4
] | [
82,
81
] | python | en | ['en', 'error', 'th'] | False |
Explainer.create_feature_names | (self) |
Creates feature names (simple enumerated list) if not provided in __init__.
|
Creates feature names (simple enumerated list) if not provided in __init__.
| def create_feature_names(self):
"""
Creates feature names (simple enumerated list) if not provided in __init__.
"""
if self.features is None:
num_features = self.X.shape[1]
self.features = ['Feature_{:03d}'.format(i) for i in range(num_features)] | [
"def",
"create_feature_names",
"(",
"self",
")",
":",
"if",
"self",
".",
"features",
"is",
"None",
":",
"num_features",
"=",
"self",
".",
"X",
".",
"shape",
"[",
"1",
"]",
"self",
".",
"features",
"=",
"[",
"'Feature_{:03d}'",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"num_features",
")",
"]"
] | [
84,
4
] | [
90,
85
] | python | en | ['en', 'error', 'th'] | False |
Explainer.build_new_tau_models | (self) |
Builds tau models (using X to predict estimated/actual tau) for each treatment group.
|
Builds tau models (using X to predict estimated/actual tau) for each treatment group.
| def build_new_tau_models(self):
"""
Builds tau models (using X to predict estimated/actual tau) for each treatment group.
"""
if self.method in ('permutation'):
self.X_train, self.X_test, self.tau_train, self.tau_test = train_test_split(self.X,
self.tau,
test_size=self.test_size,
random_state=self.random_state)
else:
self.X_train, self.tau_train = self.X, self.tau
if self.r_learners is not None:
self.models_tau = deepcopy(self.r_learners)
else:
self.models_tau = {group: deepcopy(self.model_tau) for group in self.classes}
for group, idx in self.classes.items():
self.models_tau[group].fit(self.X_train, self.tau_train[:, idx]) | [
"def",
"build_new_tau_models",
"(",
"self",
")",
":",
"if",
"self",
".",
"method",
"in",
"(",
"'permutation'",
")",
":",
"self",
".",
"X_train",
",",
"self",
".",
"X_test",
",",
"self",
".",
"tau_train",
",",
"self",
".",
"tau_test",
"=",
"train_test_split",
"(",
"self",
".",
"X",
",",
"self",
".",
"tau",
",",
"test_size",
"=",
"self",
".",
"test_size",
",",
"random_state",
"=",
"self",
".",
"random_state",
")",
"else",
":",
"self",
".",
"X_train",
",",
"self",
".",
"tau_train",
"=",
"self",
".",
"X",
",",
"self",
".",
"tau",
"if",
"self",
".",
"r_learners",
"is",
"not",
"None",
":",
"self",
".",
"models_tau",
"=",
"deepcopy",
"(",
"self",
".",
"r_learners",
")",
"else",
":",
"self",
".",
"models_tau",
"=",
"{",
"group",
":",
"deepcopy",
"(",
"self",
".",
"model_tau",
")",
"for",
"group",
"in",
"self",
".",
"classes",
"}",
"for",
"group",
",",
"idx",
"in",
"self",
".",
"classes",
".",
"items",
"(",
")",
":",
"self",
".",
"models_tau",
"[",
"group",
"]",
".",
"fit",
"(",
"self",
".",
"X_train",
",",
"self",
".",
"tau_train",
"[",
":",
",",
"idx",
"]",
")"
] | [
92,
4
] | [
109,
80
] | python | en | ['en', 'error', 'th'] | False |
Explainer.get_importance | (self) |
Calculates feature importances for each treatment group, based on specified method in __init__.
|
Calculates feature importances for each treatment group, based on specified method in __init__.
| def get_importance(self):
"""
Calculates feature importances for each treatment group, based on specified method in __init__.
"""
importance_catalog = {'auto': self.default_importance, 'permutation': self.perm_importance}
importance_dict = importance_catalog[self.method]()
importance_dict = {group: pd.Series(array, index=self.features).sort_values(ascending=False)
for group, array in importance_dict.items()}
return importance_dict | [
"def",
"get_importance",
"(",
"self",
")",
":",
"importance_catalog",
"=",
"{",
"'auto'",
":",
"self",
".",
"default_importance",
",",
"'permutation'",
":",
"self",
".",
"perm_importance",
"}",
"importance_dict",
"=",
"importance_catalog",
"[",
"self",
".",
"method",
"]",
"(",
")",
"importance_dict",
"=",
"{",
"group",
":",
"pd",
".",
"Series",
"(",
"array",
",",
"index",
"=",
"self",
".",
"features",
")",
".",
"sort_values",
"(",
"ascending",
"=",
"False",
")",
"for",
"group",
",",
"array",
"in",
"importance_dict",
".",
"items",
"(",
")",
"}",
"return",
"importance_dict"
] | [
111,
4
] | [
120,
30
] | python | en | ['en', 'error', 'th'] | False |
Explainer.default_importance | (self) |
Calculates feature importances for each treatment group, based on the model_tau's default implementation.
|
Calculates feature importances for each treatment group, based on the model_tau's default implementation.
| def default_importance(self):
"""
Calculates feature importances for each treatment group, based on the model_tau's default implementation.
"""
importance_dict = {}
if self.r_learners is not None:
self.models_tau = deepcopy(self.r_learners)
for group, idx in self.classes.items():
importance_dict[group] = self.models_tau[group].feature_importances_
if self.normalize:
importance_dict[group] = importance_dict[group] / importance_dict[group].sum()
return importance_dict | [
"def",
"default_importance",
"(",
"self",
")",
":",
"importance_dict",
"=",
"{",
"}",
"if",
"self",
".",
"r_learners",
"is",
"not",
"None",
":",
"self",
".",
"models_tau",
"=",
"deepcopy",
"(",
"self",
".",
"r_learners",
")",
"for",
"group",
",",
"idx",
"in",
"self",
".",
"classes",
".",
"items",
"(",
")",
":",
"importance_dict",
"[",
"group",
"]",
"=",
"self",
".",
"models_tau",
"[",
"group",
"]",
".",
"feature_importances_",
"if",
"self",
".",
"normalize",
":",
"importance_dict",
"[",
"group",
"]",
"=",
"importance_dict",
"[",
"group",
"]",
"/",
"importance_dict",
"[",
"group",
"]",
".",
"sum",
"(",
")",
"return",
"importance_dict"
] | [
122,
4
] | [
134,
30
] | python | en | ['en', 'error', 'th'] | False |
Explainer.perm_importance | (self) |
Calculates feature importances for each treatment group, based on the permutation method.
|
Calculates feature importances for each treatment group, based on the permutation method.
| def perm_importance(self):
"""
Calculates feature importances for each treatment group, based on the permutation method.
"""
importance_dict = {}
if self.r_learners is not None:
self.models_tau = deepcopy(self.r_learners)
self.X_test, self.tau_test = self.X, self.tau
for group, idx in self.classes.items():
perm_estimator = self.models_tau[group]
importance_dict[group] = permutation_importance(estimator=perm_estimator,
X=self.X_test,
y=self.tau_test[:, idx],
random_state=self.random_state).importances_mean
return importance_dict | [
"def",
"perm_importance",
"(",
"self",
")",
":",
"importance_dict",
"=",
"{",
"}",
"if",
"self",
".",
"r_learners",
"is",
"not",
"None",
":",
"self",
".",
"models_tau",
"=",
"deepcopy",
"(",
"self",
".",
"r_learners",
")",
"self",
".",
"X_test",
",",
"self",
".",
"tau_test",
"=",
"self",
".",
"X",
",",
"self",
".",
"tau",
"for",
"group",
",",
"idx",
"in",
"self",
".",
"classes",
".",
"items",
"(",
")",
":",
"perm_estimator",
"=",
"self",
".",
"models_tau",
"[",
"group",
"]",
"importance_dict",
"[",
"group",
"]",
"=",
"permutation_importance",
"(",
"estimator",
"=",
"perm_estimator",
",",
"X",
"=",
"self",
".",
"X_test",
",",
"y",
"=",
"self",
".",
"tau_test",
"[",
":",
",",
"idx",
"]",
",",
"random_state",
"=",
"self",
".",
"random_state",
")",
".",
"importances_mean",
"return",
"importance_dict"
] | [
136,
4
] | [
151,
30
] | python | en | ['en', 'error', 'th'] | False |
Explainer.get_shap_values | (self) |
Calculates shapley values for each treatment group.
|
Calculates shapley values for each treatment group.
| def get_shap_values(self):
"""
Calculates shapley values for each treatment group.
"""
shap_dict = {}
for group, mod in self.models_tau.items():
explainer = shap.TreeExplainer(mod)
if self.r_learners is not None:
explainer.model.original_model.params['objective'] = None # hacky way of running shap without error
shap_values = explainer.shap_values(self.X)
shap_dict[group] = shap_values
return shap_dict | [
"def",
"get_shap_values",
"(",
"self",
")",
":",
"shap_dict",
"=",
"{",
"}",
"for",
"group",
",",
"mod",
"in",
"self",
".",
"models_tau",
".",
"items",
"(",
")",
":",
"explainer",
"=",
"shap",
".",
"TreeExplainer",
"(",
"mod",
")",
"if",
"self",
".",
"r_learners",
"is",
"not",
"None",
":",
"explainer",
".",
"model",
".",
"original_model",
".",
"params",
"[",
"'objective'",
"]",
"=",
"None",
"# hacky way of running shap without error",
"shap_values",
"=",
"explainer",
".",
"shap_values",
"(",
"self",
".",
"X",
")",
"shap_dict",
"[",
"group",
"]",
"=",
"shap_values",
"return",
"shap_dict"
] | [
153,
4
] | [
165,
24
] | python | en | ['en', 'error', 'th'] | False |
Explainer.plot_importance | (self, importance_dict=None, title_prefix='') |
Calculates and plots feature importances for each treatment group, based on specified method in __init__.
Skips the calculation part if importance_dict is given.
|
Calculates and plots feature importances for each treatment group, based on specified method in __init__.
Skips the calculation part if importance_dict is given.
| def plot_importance(self, importance_dict=None, title_prefix=''):
"""
Calculates and plots feature importances for each treatment group, based on specified method in __init__.
Skips the calculation part if importance_dict is given.
"""
if importance_dict is None:
importance_dict = self.get_importance()
for group, series in importance_dict.items():
plt.figure()
series.sort_values().plot(kind='barh', figsize=(12, 8))
title = group
if title_prefix != '':
title = '{} - {}'.format(title_prefix, title)
plt.title(title) | [
"def",
"plot_importance",
"(",
"self",
",",
"importance_dict",
"=",
"None",
",",
"title_prefix",
"=",
"''",
")",
":",
"if",
"importance_dict",
"is",
"None",
":",
"importance_dict",
"=",
"self",
".",
"get_importance",
"(",
")",
"for",
"group",
",",
"series",
"in",
"importance_dict",
".",
"items",
"(",
")",
":",
"plt",
".",
"figure",
"(",
")",
"series",
".",
"sort_values",
"(",
")",
".",
"plot",
"(",
"kind",
"=",
"'barh'",
",",
"figsize",
"=",
"(",
"12",
",",
"8",
")",
")",
"title",
"=",
"group",
"if",
"title_prefix",
"!=",
"''",
":",
"title",
"=",
"'{} - {}'",
".",
"format",
"(",
"title_prefix",
",",
"title",
")",
"plt",
".",
"title",
"(",
"title",
")"
] | [
167,
4
] | [
180,
28
] | python | en | ['en', 'error', 'th'] | False |
Explainer.plot_shap_values | (self, shap_dict=None) |
Calculates and plots the distribution of shapley values of each feature, for each treatment group.
Skips the calculation part if shap_dict is given.
|
Calculates and plots the distribution of shapley values of each feature, for each treatment group.
Skips the calculation part if shap_dict is given.
| def plot_shap_values(self, shap_dict=None):
"""
Calculates and plots the distribution of shapley values of each feature, for each treatment group.
Skips the calculation part if shap_dict is given.
"""
if shap_dict is None:
shap_dict = self.get_shap_values()
for group, values in shap_dict.items():
plt.title(group)
shap.summary_plot(values, features=self.X, feature_names=self.features) | [
"def",
"plot_shap_values",
"(",
"self",
",",
"shap_dict",
"=",
"None",
")",
":",
"if",
"shap_dict",
"is",
"None",
":",
"shap_dict",
"=",
"self",
".",
"get_shap_values",
"(",
")",
"for",
"group",
",",
"values",
"in",
"shap_dict",
".",
"items",
"(",
")",
":",
"plt",
".",
"title",
"(",
"group",
")",
"shap",
".",
"summary_plot",
"(",
"values",
",",
"features",
"=",
"self",
".",
"X",
",",
"feature_names",
"=",
"self",
".",
"features",
")"
] | [
182,
4
] | [
192,
83
] | python | en | ['en', 'error', 'th'] | False |
Explainer.plot_shap_dependence | (self, treatment_group, feature_idx, shap_dict=None, interaction_idx='auto', **kwargs) |
Plots dependency of shapley values for a specified feature, colored by an interaction feature.
Skips the calculation part if shap_dict is given.
This plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extension of the classical partial dependence plots. Vertical dispersion of the
data points represents interaction effects.
Args:
treatment_group (str or int): name of treatment group to create dependency plot on
feature_idx (str or int): feature index / name to create dependency plot on
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
interaction_idx (optional, str or int): feature index / name used in coloring scheme as interaction feature.
If "auto" then shap.common.approximate_interactions is used to pick what seems to be the
strongest interaction (note that to find to true strongest interaction you need to compute
the SHAP interaction values).
|
Plots dependency of shapley values for a specified feature, colored by an interaction feature.
Skips the calculation part if shap_dict is given. | def plot_shap_dependence(self, treatment_group, feature_idx, shap_dict=None, interaction_idx='auto', **kwargs):
"""
Plots dependency of shapley values for a specified feature, colored by an interaction feature.
Skips the calculation part if shap_dict is given.
This plots the value of the feature on the x-axis and the SHAP value of the same feature
on the y-axis. This shows how the model depends on the given feature, and is like a
richer extension of the classical partial dependence plots. Vertical dispersion of the
data points represents interaction effects.
Args:
treatment_group (str or int): name of treatment group to create dependency plot on
feature_idx (str or int): feature index / name to create dependency plot on
shap_dict (optional, dict): a dict of shapley value matrices. If None, shap_dict will be computed.
interaction_idx (optional, str or int): feature index / name used in coloring scheme as interaction feature.
If "auto" then shap.common.approximate_interactions is used to pick what seems to be the
strongest interaction (note that to find to true strongest interaction you need to compute
the SHAP interaction values).
"""
if shap_dict is None:
shap_dict = self.get_shap_values()
shap_values = shap_dict[treatment_group]
shap.dependence_plot(feature_idx, shap_values, self.X, interaction_index=interaction_idx,
feature_names=self.features, **kwargs) | [
"def",
"plot_shap_dependence",
"(",
"self",
",",
"treatment_group",
",",
"feature_idx",
",",
"shap_dict",
"=",
"None",
",",
"interaction_idx",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"shap_dict",
"is",
"None",
":",
"shap_dict",
"=",
"self",
".",
"get_shap_values",
"(",
")",
"shap_values",
"=",
"shap_dict",
"[",
"treatment_group",
"]",
"shap",
".",
"dependence_plot",
"(",
"feature_idx",
",",
"shap_values",
",",
"self",
".",
"X",
",",
"interaction_index",
"=",
"interaction_idx",
",",
"feature_names",
"=",
"self",
".",
"features",
",",
"*",
"*",
"kwargs",
")"
] | [
194,
4
] | [
219,
67
] | python | en | ['en', 'error', 'th'] | False |
create_connection | (db_file) | create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
| create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
| def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file)
return conn
except sqlite3.Error as e:
logging.warn('Sqlite3 connection Error: {}'.format(e))
print(e)
return None | [
"def",
"create_connection",
"(",
"db_file",
")",
":",
"try",
":",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"db_file",
")",
"return",
"conn",
"except",
"sqlite3",
".",
"Error",
"as",
"e",
":",
"logging",
".",
"warn",
"(",
"'Sqlite3 connection Error: {}'",
".",
"format",
"(",
"e",
")",
")",
"print",
"(",
"e",
")",
"return",
"None"
] | [
27,
0
] | [
40,
15
] | python | en | ['en', 'en', 'en'] | True |
select_dly_flows_by_station_ID | (conn, station) |
Query tasks by priority
:param conn: the Connection object
:param station: station number (ID) according to WSC convention
:return: dataframe object of daily flows
|
Query tasks by priority
:param conn: the Connection object
:param station: station number (ID) according to WSC convention
:return: dataframe object of daily flows
| def select_dly_flows_by_station_ID(conn, station):
"""
Query tasks by priority
:param conn: the Connection object
:param station: station number (ID) according to WSC convention
:return: dataframe object of daily flows
"""
time0 = time.time()
cur = conn.cursor()
cur.execute("SELECT * FROM DLY_FLOWS WHERE STATION_NUMBER=?", (station,))
rows = cur.fetchall()
column_headers = [description[0] for description in cur.description]
id_var_headers = column_headers[:11]
df = pd.DataFrame(rows, columns=column_headers)
df.drop(['MONTHLY_MEAN', 'MONTHLY_TOTAL', 'FIRST_DAY_MIN',
'MIN', 'FIRST_DAY_MAX', 'MAX'], axis=1, inplace=True)
timex = time.time()
all_val_vars = [e for e in column_headers if 'FLOW' in e]
flag_val_vars = [e for e in all_val_vars if 'FLOW_SYMBOL' in e]
flow_val_vars = [e for e in all_val_vars if '_' not in e]
df_flows = pd.melt(df,
id_vars=id_var_headers,
value_vars=flow_val_vars,
value_name='DAILY_FLOW',
var_name='DAY').sort_values(by=['YEAR', 'MONTH'])
df_flows['DAY'] = df_flows['DAY'].apply(
map_day_to_var_name)
df_flags = pd.melt(df,
id_vars=id_var_headers,
value_vars=flag_val_vars,
value_name='FLAG',
var_name='DAY').sort_values(by=['YEAR', 'MONTH'])
# print('time to melt = ', time.time() - timex)
df_flows['FLAG'] = df_flags['FLAG']
# filter out day row if it's not a day that exists in given month
df_flows = df_flows[df_flows['DAY'].astype(
int) <= df_flows['NO_DAYS'].astype(int)].dropna(subset=['DAILY_FLOW'])
dates = df_flows['YEAR'].astype(
str) + '-' + df_flows['MONTH'].astype(str) + '-' + df_flows['DAY'].astype(str)
df_flows['DATE'] = pd.to_datetime(dates, format='%Y-%m-%d')
out = pd.DataFrame()
out['DATE'] = df_flows['DATE']
if IDS_AND_DAS[station] > 0:
out['DAILY_UR_{}'.format(
station)] = df_flows['DAILY_FLOW'] / IDS_AND_DAS[station] * 1000
out['FLAG_{}'.format(station)] = df_flows['FLAG']
out.set_index('DATE', inplace=True)
if len(out) > 0:
return out
else:
return None | [
"def",
"select_dly_flows_by_station_ID",
"(",
"conn",
",",
"station",
")",
":",
"time0",
"=",
"time",
".",
"time",
"(",
")",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"\"SELECT * FROM DLY_FLOWS WHERE STATION_NUMBER=?\"",
",",
"(",
"station",
",",
")",
")",
"rows",
"=",
"cur",
".",
"fetchall",
"(",
")",
"column_headers",
"=",
"[",
"description",
"[",
"0",
"]",
"for",
"description",
"in",
"cur",
".",
"description",
"]",
"id_var_headers",
"=",
"column_headers",
"[",
":",
"11",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"rows",
",",
"columns",
"=",
"column_headers",
")",
"df",
".",
"drop",
"(",
"[",
"'MONTHLY_MEAN'",
",",
"'MONTHLY_TOTAL'",
",",
"'FIRST_DAY_MIN'",
",",
"'MIN'",
",",
"'FIRST_DAY_MAX'",
",",
"'MAX'",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"timex",
"=",
"time",
".",
"time",
"(",
")",
"all_val_vars",
"=",
"[",
"e",
"for",
"e",
"in",
"column_headers",
"if",
"'FLOW'",
"in",
"e",
"]",
"flag_val_vars",
"=",
"[",
"e",
"for",
"e",
"in",
"all_val_vars",
"if",
"'FLOW_SYMBOL'",
"in",
"e",
"]",
"flow_val_vars",
"=",
"[",
"e",
"for",
"e",
"in",
"all_val_vars",
"if",
"'_'",
"not",
"in",
"e",
"]",
"df_flows",
"=",
"pd",
".",
"melt",
"(",
"df",
",",
"id_vars",
"=",
"id_var_headers",
",",
"value_vars",
"=",
"flow_val_vars",
",",
"value_name",
"=",
"'DAILY_FLOW'",
",",
"var_name",
"=",
"'DAY'",
")",
".",
"sort_values",
"(",
"by",
"=",
"[",
"'YEAR'",
",",
"'MONTH'",
"]",
")",
"df_flows",
"[",
"'DAY'",
"]",
"=",
"df_flows",
"[",
"'DAY'",
"]",
".",
"apply",
"(",
"map_day_to_var_name",
")",
"df_flags",
"=",
"pd",
".",
"melt",
"(",
"df",
",",
"id_vars",
"=",
"id_var_headers",
",",
"value_vars",
"=",
"flag_val_vars",
",",
"value_name",
"=",
"'FLAG'",
",",
"var_name",
"=",
"'DAY'",
")",
".",
"sort_values",
"(",
"by",
"=",
"[",
"'YEAR'",
",",
"'MONTH'",
"]",
")",
"# print('time to melt = ', time.time() - timex)",
"df_flows",
"[",
"'FLAG'",
"]",
"=",
"df_flags",
"[",
"'FLAG'",
"]",
"# filter out day row if it's not a day that exists in given month",
"df_flows",
"=",
"df_flows",
"[",
"df_flows",
"[",
"'DAY'",
"]",
".",
"astype",
"(",
"int",
")",
"<=",
"df_flows",
"[",
"'NO_DAYS'",
"]",
".",
"astype",
"(",
"int",
")",
"]",
".",
"dropna",
"(",
"subset",
"=",
"[",
"'DAILY_FLOW'",
"]",
")",
"dates",
"=",
"df_flows",
"[",
"'YEAR'",
"]",
".",
"astype",
"(",
"str",
")",
"+",
"'-'",
"+",
"df_flows",
"[",
"'MONTH'",
"]",
".",
"astype",
"(",
"str",
")",
"+",
"'-'",
"+",
"df_flows",
"[",
"'DAY'",
"]",
".",
"astype",
"(",
"str",
")",
"df_flows",
"[",
"'DATE'",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"dates",
",",
"format",
"=",
"'%Y-%m-%d'",
")",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"out",
"[",
"'DATE'",
"]",
"=",
"df_flows",
"[",
"'DATE'",
"]",
"if",
"IDS_AND_DAS",
"[",
"station",
"]",
">",
"0",
":",
"out",
"[",
"'DAILY_UR_{}'",
".",
"format",
"(",
"station",
")",
"]",
"=",
"df_flows",
"[",
"'DAILY_FLOW'",
"]",
"/",
"IDS_AND_DAS",
"[",
"station",
"]",
"*",
"1000",
"out",
"[",
"'FLAG_{}'",
".",
"format",
"(",
"station",
")",
"]",
"=",
"df_flows",
"[",
"'FLAG'",
"]",
"out",
".",
"set_index",
"(",
"'DATE'",
",",
"inplace",
"=",
"True",
")",
"if",
"len",
"(",
"out",
")",
">",
"0",
":",
"return",
"out",
"else",
":",
"return",
"None"
] | [
71,
0
] | [
131,
19
] | python | en | ['en', 'error', 'th'] | False |
get_xyz_distance | (lat, lon, target) |
Converts lat/lon to x, y, z.
Does not account for elevation of target location.
Just assumes stations are at same elevation
|
Converts lat/lon to x, y, z.
Does not account for elevation of target location.
Just assumes stations are at same elevation
| def get_xyz_distance(lat, lon, target):
"""
Converts lat/lon to x, y, z.
Does not account for elevation of target location.
Just assumes stations are at same elevation
"""
r = 6378137 + target.Elevation
x = r * np.cos(deg2rad(lat)) * np.cos(deg2rad(lon))
y = r * np.cos(deg2rad(lat)) * np.sin(deg2rad(lon))
z = r * np.sin(deg2rad(lat)) * (1 - 1 / 298.257223563)
return scipy.spatial.distance.euclidean(target.xyz_coords, [x, y, z]) | [
"def",
"get_xyz_distance",
"(",
"lat",
",",
"lon",
",",
"target",
")",
":",
"r",
"=",
"6378137",
"+",
"target",
".",
"Elevation",
"x",
"=",
"r",
"*",
"np",
".",
"cos",
"(",
"deg2rad",
"(",
"lat",
")",
")",
"*",
"np",
".",
"cos",
"(",
"deg2rad",
"(",
"lon",
")",
")",
"y",
"=",
"r",
"*",
"np",
".",
"cos",
"(",
"deg2rad",
"(",
"lat",
")",
")",
"*",
"np",
".",
"sin",
"(",
"deg2rad",
"(",
"lon",
")",
")",
"z",
"=",
"r",
"*",
"np",
".",
"sin",
"(",
"deg2rad",
"(",
"lat",
")",
")",
"*",
"(",
"1",
"-",
"1",
"/",
"298.257223563",
")",
"return",
"scipy",
".",
"spatial",
".",
"distance",
".",
"euclidean",
"(",
"target",
".",
"xyz_coords",
",",
"[",
"x",
",",
"y",
",",
"z",
"]",
")"
] | [
139,
0
] | [
150,
73
] | python | en | ['en', 'error', 'th'] | False |
export_slice_to_geotiff | (ds, path) |
Exports a single slice of an xarray.Dataset as a GeoTIFF.
ds: xarray.Dataset
The Dataset to export.
path: str
The path to store the exported GeoTIFF.
|
Exports a single slice of an xarray.Dataset as a GeoTIFF.
ds: xarray.Dataset
The Dataset to export.
path: str
The path to store the exported GeoTIFF.
| def export_slice_to_geotiff(ds, path):
"""
Exports a single slice of an xarray.Dataset as a GeoTIFF.
ds: xarray.Dataset
The Dataset to export.
path: str
The path to store the exported GeoTIFF.
"""
kwargs = dict(tif_path=path, dataset=ds.astype(np.float32), bands=list(ds.data_vars.keys()))
if 'crs' in ds.attrs:
kwargs['crs'] = str(ds.attrs['crs'])
dc_utilities.write_geotiff_from_xr(**kwargs) | [
"def",
"export_slice_to_geotiff",
"(",
"ds",
",",
"path",
")",
":",
"kwargs",
"=",
"dict",
"(",
"tif_path",
"=",
"path",
",",
"dataset",
"=",
"ds",
".",
"astype",
"(",
"np",
".",
"float32",
")",
",",
"bands",
"=",
"list",
"(",
"ds",
".",
"data_vars",
".",
"keys",
"(",
")",
")",
")",
"if",
"'crs'",
"in",
"ds",
".",
"attrs",
":",
"kwargs",
"[",
"'crs'",
"]",
"=",
"str",
"(",
"ds",
".",
"attrs",
"[",
"'crs'",
"]",
")",
"dc_utilities",
".",
"write_geotiff_from_xr",
"(",
"*",
"*",
"kwargs",
")"
] | [
10,
0
] | [
22,
48
] | python | en | ['en', 'error', 'th'] | False |
export_xarray_to_geotiff | (ds, path) |
Exports an xarray.Dataset as individual time slices.
Parameters
----------
ds: xarray.Dataset
The Dataset to export.
path: str
The path prefix to store the exported GeoTIFFs. For example, 'geotiffs/mydata' would result in files named like
'mydata_2016_12_05_12_31_36.tif' within the 'geotiffs' folder.
|
Exports an xarray.Dataset as individual time slices.
Parameters
----------
ds: xarray.Dataset
The Dataset to export.
path: str
The path prefix to store the exported GeoTIFFs. For example, 'geotiffs/mydata' would result in files named like
'mydata_2016_12_05_12_31_36.tif' within the 'geotiffs' folder.
| def export_xarray_to_geotiff(ds, path):
"""
Exports an xarray.Dataset as individual time slices.
Parameters
----------
ds: xarray.Dataset
The Dataset to export.
path: str
The path prefix to store the exported GeoTIFFs. For example, 'geotiffs/mydata' would result in files named like
'mydata_2016_12_05_12_31_36.tif' within the 'geotiffs' folder.
"""
def time_to_string(t):
return time.strftime("%Y_%m_%d_%H_%M_%S", time.gmtime(t.astype(int)/1000000000))
for t in ds.time:
time_slice_xarray = ds.sel(time = t)
export_slice_to_geotiff(time_slice_xarray,
path + "_" + time_to_string(t) + ".tif") | [
"def",
"export_xarray_to_geotiff",
"(",
"ds",
",",
"path",
")",
":",
"def",
"time_to_string",
"(",
"t",
")",
":",
"return",
"time",
".",
"strftime",
"(",
"\"%Y_%m_%d_%H_%M_%S\"",
",",
"time",
".",
"gmtime",
"(",
"t",
".",
"astype",
"(",
"int",
")",
"/",
"1000000000",
")",
")",
"for",
"t",
"in",
"ds",
".",
"time",
":",
"time_slice_xarray",
"=",
"ds",
".",
"sel",
"(",
"time",
"=",
"t",
")",
"export_slice_to_geotiff",
"(",
"time_slice_xarray",
",",
"path",
"+",
"\"_\"",
"+",
"time_to_string",
"(",
"t",
")",
"+",
"\".tif\"",
")"
] | [
24,
0
] | [
42,
72
] | python | en | ['en', 'error', 'th'] | False |
prepare_virtualenv | (packages=()) |
Prepares a virtual environment.
:rtype : VirtualEnvDescription
|
Prepares a virtual environment.
:rtype : VirtualEnvDescription
| def prepare_virtualenv(packages=()):
"""
Prepares a virtual environment.
:rtype : VirtualEnvDescription
"""
vroot = get_vroot()
env_key = get_env_key(packages)
vdir = os.path.join(vroot, env_key)
vbin = os.path.join(vdir, ('bin', 'Scripts')[_windows])
vpython = os.path.join(vbin, 'python' + get_exe_suffix())
vpip = os.path.join(vbin, 'pip' + get_exe_suffix())
vpip_install = [vpip, "install"]
if (2, 5) <= sys.version_info < (2, 6):
vpip_install.append("--insecure")
venv_description = VirtualEnvDescription(home_dir=vdir, bin_dir=vbin, python=vpython, pip=vpip, packages=packages)
print("Will install now")
print(str(venv_description))
env = get_clean_system_environment()
env['PIP_DOWNLOAD_CACHE'] = os.path.abspath(os.path.join(vroot, "pip-download-cache"))
# Cache environment
done_flag_file = os.path.join(vdir, "done")
if not os.path.exists(done_flag_file):
if os.path.exists(vdir):
shutil.rmtree(vdir)
virtualenv.create_environment(vdir)
# Update for newly created environment
if sys.version_info >= (2, 7):
_call([vpython, "-m", "pip", "install", "--upgrade", "pip", "setuptools"], env=env, cwd=get_teamcity_messages_root())
for package_spec in packages:
_call(vpip_install + [package_spec], env=env)
open(done_flag_file, 'a').close()
# Update for env. that already exists: does not take long, but may save old envs.
if sys.version_info >= (2, 7):
_call([vpython, "-m", "pip", "install", "--upgrade", "pip", "setuptools"], env=env, cwd=get_teamcity_messages_root())
_call([vpython, "setup.py", "install"], env=env, cwd=get_teamcity_messages_root())
return venv_description | [
"def",
"prepare_virtualenv",
"(",
"packages",
"=",
"(",
")",
")",
":",
"vroot",
"=",
"get_vroot",
"(",
")",
"env_key",
"=",
"get_env_key",
"(",
"packages",
")",
"vdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vroot",
",",
"env_key",
")",
"vbin",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vdir",
",",
"(",
"'bin'",
",",
"'Scripts'",
")",
"[",
"_windows",
"]",
")",
"vpython",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vbin",
",",
"'python'",
"+",
"get_exe_suffix",
"(",
")",
")",
"vpip",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vbin",
",",
"'pip'",
"+",
"get_exe_suffix",
"(",
")",
")",
"vpip_install",
"=",
"[",
"vpip",
",",
"\"install\"",
"]",
"if",
"(",
"2",
",",
"5",
")",
"<=",
"sys",
".",
"version_info",
"<",
"(",
"2",
",",
"6",
")",
":",
"vpip_install",
".",
"append",
"(",
"\"--insecure\"",
")",
"venv_description",
"=",
"VirtualEnvDescription",
"(",
"home_dir",
"=",
"vdir",
",",
"bin_dir",
"=",
"vbin",
",",
"python",
"=",
"vpython",
",",
"pip",
"=",
"vpip",
",",
"packages",
"=",
"packages",
")",
"print",
"(",
"\"Will install now\"",
")",
"print",
"(",
"str",
"(",
"venv_description",
")",
")",
"env",
"=",
"get_clean_system_environment",
"(",
")",
"env",
"[",
"'PIP_DOWNLOAD_CACHE'",
"]",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"vroot",
",",
"\"pip-download-cache\"",
")",
")",
"# Cache environment",
"done_flag_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vdir",
",",
"\"done\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"done_flag_file",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"vdir",
")",
":",
"shutil",
".",
"rmtree",
"(",
"vdir",
")",
"virtualenv",
".",
"create_environment",
"(",
"vdir",
")",
"# Update for newly created environment",
"if",
"sys",
".",
"version_info",
">=",
"(",
"2",
",",
"7",
")",
":",
"_call",
"(",
"[",
"vpython",
",",
"\"-m\"",
",",
"\"pip\"",
",",
"\"install\"",
",",
"\"--upgrade\"",
",",
"\"pip\"",
",",
"\"setuptools\"",
"]",
",",
"env",
"=",
"env",
",",
"cwd",
"=",
"get_teamcity_messages_root",
"(",
")",
")",
"for",
"package_spec",
"in",
"packages",
":",
"_call",
"(",
"vpip_install",
"+",
"[",
"package_spec",
"]",
",",
"env",
"=",
"env",
")",
"open",
"(",
"done_flag_file",
",",
"'a'",
")",
".",
"close",
"(",
")",
"# Update for env. that already exists: does not take long, but may save old envs.",
"if",
"sys",
".",
"version_info",
">=",
"(",
"2",
",",
"7",
")",
":",
"_call",
"(",
"[",
"vpython",
",",
"\"-m\"",
",",
"\"pip\"",
",",
"\"install\"",
",",
"\"--upgrade\"",
",",
"\"pip\"",
",",
"\"setuptools\"",
"]",
",",
"env",
"=",
"env",
",",
"cwd",
"=",
"get_teamcity_messages_root",
"(",
")",
")",
"_call",
"(",
"[",
"vpython",
",",
"\"setup.py\"",
",",
"\"install\"",
"]",
",",
"env",
"=",
"env",
",",
"cwd",
"=",
"get_teamcity_messages_root",
"(",
")",
")",
"return",
"venv_description"
] | [
44,
0
] | [
88,
27
] | python | en | ['en', 'error', 'th'] | False |
ColumnDistributionMatchesBenfordsLaw._pandas | (cls, column, **kwargs) |
listdata: length 10
matchvalues: length 10
chi square them with 90 percent confidence
|
listdata: length 10
matchvalues: length 10
chi square them with 90 percent confidence
| def _pandas(cls, column, **kwargs):
totalVals = (column.apply(lambda x: 1.0 if x is not None else 0.0)).sum()
num1 = (
column.apply(lambda x: matchFirstDigit(x, 1) if x is not None else 0.0)
).sum()
num2 = (
column.apply(lambda x: matchFirstDigit(x, 2) if x is not None else 0.0)
).sum()
num3 = (
column.apply(lambda x: matchFirstDigit(x, 3) if x is not None else 0.0)
).sum()
num4 = (
column.apply(lambda x: matchFirstDigit(x, 4) if x is not None else 0.0)
).sum()
num5 = (
column.apply(lambda x: matchFirstDigit(x, 5) if x is not None else 0.0)
).sum()
num6 = (
column.apply(lambda x: matchFirstDigit(x, 6) if x is not None else 0.0)
).sum()
num7 = (
column.apply(lambda x: matchFirstDigit(x, 7) if x is not None else 0.0)
).sum()
num8 = (
column.apply(lambda x: matchFirstDigit(x, 8) if x is not None else 0.0)
).sum()
num9 = (
column.apply(lambda x: matchFirstDigit(x, 9) if x is not None else 0.0)
).sum()
listdata = [
num1 / totalVals,
num2 / totalVals,
num3 / totalVals,
num4 / totalVals,
num5 / totalVals,
num6 / totalVals,
num7 / totalVals,
num8 / totalVals,
num9 / totalVals,
]
matchvalues = []
for x in range(1, 10):
matchvalues.append(math.log(1.0 + 1.0 / x) / math.log(10))
"""
listdata: length 10
matchvalues: length 10
chi square them with 90 percent confidence
"""
stat = 0
for i in range(9):
stat += ((listdata[i] - matchvalues[i]) ** 2) / (matchvalues[i])
if stat >= 5.071:
return False
else:
return True | [
"def",
"_pandas",
"(",
"cls",
",",
"column",
",",
"*",
"*",
"kwargs",
")",
":",
"totalVals",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"1.0",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num1",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"1",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num2",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"2",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num3",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"3",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num4",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"4",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num5",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"5",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num6",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"6",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num7",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"7",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num8",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"8",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"num9",
"=",
"(",
"column",
".",
"apply",
"(",
"lambda",
"x",
":",
"matchFirstDigit",
"(",
"x",
",",
"9",
")",
"if",
"x",
"is",
"not",
"None",
"else",
"0.0",
")",
")",
".",
"sum",
"(",
")",
"listdata",
"=",
"[",
"num1",
"/",
"totalVals",
",",
"num2",
"/",
"totalVals",
",",
"num3",
"/",
"totalVals",
",",
"num4",
"/",
"totalVals",
",",
"num5",
"/",
"totalVals",
",",
"num6",
"/",
"totalVals",
",",
"num7",
"/",
"totalVals",
",",
"num8",
"/",
"totalVals",
",",
"num9",
"/",
"totalVals",
",",
"]",
"matchvalues",
"=",
"[",
"]",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"10",
")",
":",
"matchvalues",
".",
"append",
"(",
"math",
".",
"log",
"(",
"1.0",
"+",
"1.0",
"/",
"x",
")",
"/",
"math",
".",
"log",
"(",
"10",
")",
")",
"stat",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"9",
")",
":",
"stat",
"+=",
"(",
"(",
"listdata",
"[",
"i",
"]",
"-",
"matchvalues",
"[",
"i",
"]",
")",
"**",
"2",
")",
"/",
"(",
"matchvalues",
"[",
"i",
"]",
")",
"if",
"stat",
">=",
"5.071",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | [
80,
4
] | [
137,
23
] | python | en | ['en', 'error', 'th'] | False |
ColumnDistributionMatchesBenfordsLaw._get_evaluation_dependencies | (
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
) | This should return a dictionary:
{
"dependency_name": MetricConfiguration,
...
}
| This should return a dictionary: | def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""This should return a dictionary:
{
"dependency_name": MetricConfiguration,
...
}
"""
dependencies = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
table_domain_kwargs = {
k: v for k, v in metric.metric_domain_kwargs.items() if k != "column"
}
dependencies.update(
{
"table.row_count": MetricConfiguration(
"table.row_count", table_domain_kwargs
)
}
)
if isinstance(execution_engine, SqlAlchemyExecutionEngine):
dependencies["column_values.nonnull.count"] = MetricConfiguration(
"column_values.nonnull.count", metric.metric_domain_kwargs
)
return dependencies | [
"def",
"_get_evaluation_dependencies",
"(",
"cls",
",",
"metric",
":",
"MetricConfiguration",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
"=",
"None",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"runtime_configuration",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
")",
":",
"dependencies",
"=",
"super",
"(",
")",
".",
"_get_evaluation_dependencies",
"(",
"metric",
"=",
"metric",
",",
"configuration",
"=",
"configuration",
",",
"execution_engine",
"=",
"execution_engine",
",",
"runtime_configuration",
"=",
"runtime_configuration",
",",
")",
"table_domain_kwargs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"metric",
".",
"metric_domain_kwargs",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"\"column\"",
"}",
"dependencies",
".",
"update",
"(",
"{",
"\"table.row_count\"",
":",
"MetricConfiguration",
"(",
"\"table.row_count\"",
",",
"table_domain_kwargs",
")",
"}",
")",
"if",
"isinstance",
"(",
"execution_engine",
",",
"SqlAlchemyExecutionEngine",
")",
":",
"dependencies",
"[",
"\"column_values.nonnull.count\"",
"]",
"=",
"MetricConfiguration",
"(",
"\"column_values.nonnull.count\"",
",",
"metric",
".",
"metric_domain_kwargs",
")",
"return",
"dependencies"
] | [
191,
4
] | [
230,
27
] | python | en | ['en', 'en', 'en'] | True |
ColumnMostCommonValue._get_evaluation_dependencies | (
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[Dict] = None,
) | Returns a dictionary of given metric names and their corresponding configuration,
specifying the metric types and their respective domains | Returns a dictionary of given metric names and their corresponding configuration,
specifying the metric types and their respective domains | def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[Dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration,
specifying the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if isinstance(
execution_engine, (SparkDFExecutionEngine, SqlAlchemyExecutionEngine)
):
dependencies["column.value_counts"] = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs={
"sort": "value",
"collate": None,
},
)
return dependencies | [
"def",
"_get_evaluation_dependencies",
"(",
"cls",
",",
"metric",
":",
"MetricConfiguration",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
"=",
"None",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"runtime_configuration",
":",
"Optional",
"[",
"Dict",
"]",
"=",
"None",
",",
")",
":",
"dependencies",
":",
"dict",
"=",
"super",
"(",
")",
".",
"_get_evaluation_dependencies",
"(",
"metric",
"=",
"metric",
",",
"configuration",
"=",
"configuration",
",",
"execution_engine",
"=",
"execution_engine",
",",
"runtime_configuration",
"=",
"runtime_configuration",
",",
")",
"if",
"isinstance",
"(",
"execution_engine",
",",
"(",
"SparkDFExecutionEngine",
",",
"SqlAlchemyExecutionEngine",
")",
")",
":",
"dependencies",
"[",
"\"column.value_counts\"",
"]",
"=",
"MetricConfiguration",
"(",
"metric_name",
"=",
"\"column.value_counts\"",
",",
"metric_domain_kwargs",
"=",
"metric",
".",
"metric_domain_kwargs",
",",
"metric_value_kwargs",
"=",
"{",
"\"sort\"",
":",
"\"value\"",
",",
"\"collate\"",
":",
"None",
",",
"}",
",",
")",
"return",
"dependencies"
] | [
59,
4
] | [
87,
27
] | python | en | ['en', 'en', 'en'] | True |
MasterQA.auto_close_results | (self) | If this method is called, the results page will automatically close
at the end of the test run, rather than waiting on the user to close
the results page manually.
| If this method is called, the results page will automatically close
at the end of the test run, rather than waiting on the user to close
the results page manually.
| def auto_close_results(self):
''' If this method is called, the results page will automatically close
at the end of the test run, rather than waiting on the user to close
the results page manually.
'''
self.auto_close_results_page = True | [
"def",
"auto_close_results",
"(",
"self",
")",
":",
"self",
".",
"auto_close_results_page",
"=",
"True"
] | [
49,
4
] | [
54,
43
] | python | en | ['en', 'en', 'en'] | True |
ColumnValuesZScore._get_evaluation_dependencies | (
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
) | Returns a dictionary of given metric names and their corresponding configuration, specifying the metric
types and their respective domains | Returns a dictionary of given metric names and their corresponding configuration, specifying the metric
types and their respective domains | def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration, specifying the metric
types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if metric.metric_name == "column_values.z_score.under_threshold.condition":
dependencies["column_values.z_score.map"] = MetricConfiguration(
metric_name="column_values.z_score.map",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
if metric.metric_name == "column_values.z_score.map":
dependencies["column.mean"] = MetricConfiguration(
metric_name="column.mean",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
dependencies["column.standard_deviation"] = MetricConfiguration(
metric_name="column.standard_deviation",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
return dependencies | [
"def",
"_get_evaluation_dependencies",
"(",
"cls",
",",
"metric",
":",
"MetricConfiguration",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
"=",
"None",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"runtime_configuration",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
")",
":",
"dependencies",
":",
"dict",
"=",
"super",
"(",
")",
".",
"_get_evaluation_dependencies",
"(",
"metric",
"=",
"metric",
",",
"configuration",
"=",
"configuration",
",",
"execution_engine",
"=",
"execution_engine",
",",
"runtime_configuration",
"=",
"runtime_configuration",
",",
")",
"if",
"metric",
".",
"metric_name",
"==",
"\"column_values.z_score.under_threshold.condition\"",
":",
"dependencies",
"[",
"\"column_values.z_score.map\"",
"]",
"=",
"MetricConfiguration",
"(",
"metric_name",
"=",
"\"column_values.z_score.map\"",
",",
"metric_domain_kwargs",
"=",
"metric",
".",
"metric_domain_kwargs",
",",
")",
"if",
"metric",
".",
"metric_name",
"==",
"\"column_values.z_score.map\"",
":",
"dependencies",
"[",
"\"column.mean\"",
"]",
"=",
"MetricConfiguration",
"(",
"metric_name",
"=",
"\"column.mean\"",
",",
"metric_domain_kwargs",
"=",
"metric",
".",
"metric_domain_kwargs",
",",
")",
"dependencies",
"[",
"\"column.standard_deviation\"",
"]",
"=",
"MetricConfiguration",
"(",
"metric_name",
"=",
"\"column.standard_deviation\"",
",",
"metric_domain_kwargs",
"=",
"metric",
".",
"metric_domain_kwargs",
",",
")",
"return",
"dependencies"
] | [
99,
4
] | [
131,
27
] | python | en | ['en', 'en', 'en'] | True |
SparkDFDatasource.build_configuration | (
cls,
data_asset_type=None,
batch_kwargs_generators=None,
spark_config=None,
force_reuse_spark_context=False,
**kwargs
) |
Build a full configuration object for a datasource, potentially including generators with defaults.
Args:
data_asset_type: A ClassConfig dictionary
batch_kwargs_generators: Generator configuration dictionary
spark_config: dictionary of key-value pairs to pass to the spark builder
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
|
Build a full configuration object for a datasource, potentially including generators with defaults. | def build_configuration(
cls,
data_asset_type=None,
batch_kwargs_generators=None,
spark_config=None,
force_reuse_spark_context=False,
**kwargs
):
"""
Build a full configuration object for a datasource, potentially including generators with defaults.
Args:
data_asset_type: A ClassConfig dictionary
batch_kwargs_generators: Generator configuration dictionary
spark_config: dictionary of key-value pairs to pass to the spark builder
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
"""
if data_asset_type is None:
data_asset_type = {
"class_name": "SparkDFDataset",
"module_name": "great_expectations.dataset",
}
else:
data_asset_type = classConfigSchema.dump(ClassConfig(**data_asset_type))
if spark_config is None:
spark_config = {}
configuration = kwargs
configuration.update(
{
"data_asset_type": data_asset_type,
"spark_config": spark_config,
"force_reuse_spark_context": force_reuse_spark_context,
}
)
if batch_kwargs_generators:
configuration["batch_kwargs_generators"] = batch_kwargs_generators
return configuration | [
"def",
"build_configuration",
"(",
"cls",
",",
"data_asset_type",
"=",
"None",
",",
"batch_kwargs_generators",
"=",
"None",
",",
"spark_config",
"=",
"None",
",",
"force_reuse_spark_context",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"data_asset_type",
"is",
"None",
":",
"data_asset_type",
"=",
"{",
"\"class_name\"",
":",
"\"SparkDFDataset\"",
",",
"\"module_name\"",
":",
"\"great_expectations.dataset\"",
",",
"}",
"else",
":",
"data_asset_type",
"=",
"classConfigSchema",
".",
"dump",
"(",
"ClassConfig",
"(",
"*",
"*",
"data_asset_type",
")",
")",
"if",
"spark_config",
"is",
"None",
":",
"spark_config",
"=",
"{",
"}",
"configuration",
"=",
"kwargs",
"configuration",
".",
"update",
"(",
"{",
"\"data_asset_type\"",
":",
"data_asset_type",
",",
"\"spark_config\"",
":",
"spark_config",
",",
"\"force_reuse_spark_context\"",
":",
"force_reuse_spark_context",
",",
"}",
")",
"if",
"batch_kwargs_generators",
":",
"configuration",
"[",
"\"batch_kwargs_generators\"",
"]",
"=",
"batch_kwargs_generators",
"return",
"configuration"
] | [
64,
4
] | [
109,
28
] | python | en | ['en', 'error', 'th'] | False |
SparkDFDatasource.__init__ | (
self,
name="default",
data_context=None,
data_asset_type=None,
batch_kwargs_generators=None,
spark_config=None,
force_reuse_spark_context=False,
**kwargs
) | Build a new SparkDFDatasource instance.
Args:
name: the name of this datasource
data_context: the DataContext to which this datasource is connected
data_asset_type: ClassConfig describing the data_asset type to be constructed by this datasource
batch_kwargs_generators: generator configuration
spark_config: dictionary of key-value pairs to be set on the spark session builder
**kwargs: Additional
| Build a new SparkDFDatasource instance. | def __init__(
self,
name="default",
data_context=None,
data_asset_type=None,
batch_kwargs_generators=None,
spark_config=None,
force_reuse_spark_context=False,
**kwargs
):
"""Build a new SparkDFDatasource instance.
Args:
name: the name of this datasource
data_context: the DataContext to which this datasource is connected
data_asset_type: ClassConfig describing the data_asset type to be constructed by this datasource
batch_kwargs_generators: generator configuration
spark_config: dictionary of key-value pairs to be set on the spark session builder
**kwargs: Additional
"""
configuration_with_defaults = SparkDFDatasource.build_configuration(
data_asset_type,
batch_kwargs_generators,
spark_config,
force_reuse_spark_context,
**kwargs
)
data_asset_type = configuration_with_defaults.pop("data_asset_type")
batch_kwargs_generators = configuration_with_defaults.pop(
"batch_kwargs_generators", None
)
super().__init__(
name,
data_context=data_context,
data_asset_type=data_asset_type,
batch_kwargs_generators=batch_kwargs_generators,
**configuration_with_defaults
)
if spark_config is None:
spark_config = {}
spark = get_or_create_spark_application(
spark_config=spark_config,
force_reuse_spark_context=force_reuse_spark_context,
)
self.spark = spark
self._build_generators() | [
"def",
"__init__",
"(",
"self",
",",
"name",
"=",
"\"default\"",
",",
"data_context",
"=",
"None",
",",
"data_asset_type",
"=",
"None",
",",
"batch_kwargs_generators",
"=",
"None",
",",
"spark_config",
"=",
"None",
",",
"force_reuse_spark_context",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"configuration_with_defaults",
"=",
"SparkDFDatasource",
".",
"build_configuration",
"(",
"data_asset_type",
",",
"batch_kwargs_generators",
",",
"spark_config",
",",
"force_reuse_spark_context",
",",
"*",
"*",
"kwargs",
")",
"data_asset_type",
"=",
"configuration_with_defaults",
".",
"pop",
"(",
"\"data_asset_type\"",
")",
"batch_kwargs_generators",
"=",
"configuration_with_defaults",
".",
"pop",
"(",
"\"batch_kwargs_generators\"",
",",
"None",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"name",
",",
"data_context",
"=",
"data_context",
",",
"data_asset_type",
"=",
"data_asset_type",
",",
"batch_kwargs_generators",
"=",
"batch_kwargs_generators",
",",
"*",
"*",
"configuration_with_defaults",
")",
"if",
"spark_config",
"is",
"None",
":",
"spark_config",
"=",
"{",
"}",
"spark",
"=",
"get_or_create_spark_application",
"(",
"spark_config",
"=",
"spark_config",
",",
"force_reuse_spark_context",
"=",
"force_reuse_spark_context",
",",
")",
"self",
".",
"spark",
"=",
"spark",
"self",
".",
"_build_generators",
"(",
")"
] | [
111,
4
] | [
158,
32
] | python | en | ['en', 'lb', 'en'] | True |
SparkDFDatasource.get_batch | (self, batch_kwargs, batch_parameters=None) | class-private implementation of get_data_asset | class-private implementation of get_data_asset | def get_batch(self, batch_kwargs, batch_parameters=None):
"""class-private implementation of get_data_asset"""
if self.spark is None:
logger.error("No spark session available")
return None
reader_options = batch_kwargs.get("reader_options", {})
# We need to build batch_markers to be used with the DataFrame
batch_markers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
if "path" in batch_kwargs or "s3" in batch_kwargs:
if "s3" in batch_kwargs:
warnings.warn(
"Direct GE Support for the s3 BatchKwarg will be removed in a future release. Please use a path "
"including the s3a:// protocol instead.",
DeprecationWarning,
)
# If both are present, let s3 override
path = batch_kwargs.get("path")
path = batch_kwargs.get("s3", path)
reader_method = batch_kwargs.get("reader_method")
reader = self.spark.read
for option in reader_options.items():
reader = reader.option(*option)
reader_fn = self._get_reader_fn(reader, reader_method, path)
df = reader_fn(path)
elif "query" in batch_kwargs:
df = self.spark.sql(batch_kwargs["query"])
elif "dataset" in batch_kwargs and isinstance(
batch_kwargs["dataset"], (DataFrame, SparkDFDataset)
):
df = batch_kwargs.get("dataset")
# We don't want to store the actual dataframe in kwargs; copy the remaining batch_kwargs
batch_kwargs = {k: batch_kwargs[k] for k in batch_kwargs if k != "dataset"}
if isinstance(df, SparkDFDataset):
# Grab just the spark_df reference, since we want to override everything else
df = df.spark_df
# Record this in the kwargs *and* the id
batch_kwargs["SparkDFRef"] = True
batch_kwargs["ge_batch_id"] = str(uuid.uuid1())
else:
raise BatchKwargsError(
"Unrecognized batch_kwargs for spark_source", batch_kwargs
)
if "limit" in batch_kwargs:
df = df.limit(batch_kwargs["limit"])
return Batch(
datasource_name=self.name,
batch_kwargs=batch_kwargs,
data=df,
batch_parameters=batch_parameters,
batch_markers=batch_markers,
data_context=self._data_context,
) | [
"def",
"get_batch",
"(",
"self",
",",
"batch_kwargs",
",",
"batch_parameters",
"=",
"None",
")",
":",
"if",
"self",
".",
"spark",
"is",
"None",
":",
"logger",
".",
"error",
"(",
"\"No spark session available\"",
")",
"return",
"None",
"reader_options",
"=",
"batch_kwargs",
".",
"get",
"(",
"\"reader_options\"",
",",
"{",
"}",
")",
"# We need to build batch_markers to be used with the DataFrame",
"batch_markers",
"=",
"BatchMarkers",
"(",
"{",
"\"ge_load_time\"",
":",
"datetime",
".",
"datetime",
".",
"now",
"(",
"datetime",
".",
"timezone",
".",
"utc",
")",
".",
"strftime",
"(",
"\"%Y%m%dT%H%M%S.%fZ\"",
")",
"}",
")",
"if",
"\"path\"",
"in",
"batch_kwargs",
"or",
"\"s3\"",
"in",
"batch_kwargs",
":",
"if",
"\"s3\"",
"in",
"batch_kwargs",
":",
"warnings",
".",
"warn",
"(",
"\"Direct GE Support for the s3 BatchKwarg will be removed in a future release. Please use a path \"",
"\"including the s3a:// protocol instead.\"",
",",
"DeprecationWarning",
",",
")",
"# If both are present, let s3 override",
"path",
"=",
"batch_kwargs",
".",
"get",
"(",
"\"path\"",
")",
"path",
"=",
"batch_kwargs",
".",
"get",
"(",
"\"s3\"",
",",
"path",
")",
"reader_method",
"=",
"batch_kwargs",
".",
"get",
"(",
"\"reader_method\"",
")",
"reader",
"=",
"self",
".",
"spark",
".",
"read",
"for",
"option",
"in",
"reader_options",
".",
"items",
"(",
")",
":",
"reader",
"=",
"reader",
".",
"option",
"(",
"*",
"option",
")",
"reader_fn",
"=",
"self",
".",
"_get_reader_fn",
"(",
"reader",
",",
"reader_method",
",",
"path",
")",
"df",
"=",
"reader_fn",
"(",
"path",
")",
"elif",
"\"query\"",
"in",
"batch_kwargs",
":",
"df",
"=",
"self",
".",
"spark",
".",
"sql",
"(",
"batch_kwargs",
"[",
"\"query\"",
"]",
")",
"elif",
"\"dataset\"",
"in",
"batch_kwargs",
"and",
"isinstance",
"(",
"batch_kwargs",
"[",
"\"dataset\"",
"]",
",",
"(",
"DataFrame",
",",
"SparkDFDataset",
")",
")",
":",
"df",
"=",
"batch_kwargs",
".",
"get",
"(",
"\"dataset\"",
")",
"# We don't want to store the actual dataframe in kwargs; copy the remaining batch_kwargs",
"batch_kwargs",
"=",
"{",
"k",
":",
"batch_kwargs",
"[",
"k",
"]",
"for",
"k",
"in",
"batch_kwargs",
"if",
"k",
"!=",
"\"dataset\"",
"}",
"if",
"isinstance",
"(",
"df",
",",
"SparkDFDataset",
")",
":",
"# Grab just the spark_df reference, since we want to override everything else",
"df",
"=",
"df",
".",
"spark_df",
"# Record this in the kwargs *and* the id",
"batch_kwargs",
"[",
"\"SparkDFRef\"",
"]",
"=",
"True",
"batch_kwargs",
"[",
"\"ge_batch_id\"",
"]",
"=",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
"else",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unrecognized batch_kwargs for spark_source\"",
",",
"batch_kwargs",
")",
"if",
"\"limit\"",
"in",
"batch_kwargs",
":",
"df",
"=",
"df",
".",
"limit",
"(",
"batch_kwargs",
"[",
"\"limit\"",
"]",
")",
"return",
"Batch",
"(",
"datasource_name",
"=",
"self",
".",
"name",
",",
"batch_kwargs",
"=",
"batch_kwargs",
",",
"data",
"=",
"df",
",",
"batch_parameters",
"=",
"batch_parameters",
",",
"batch_markers",
"=",
"batch_markers",
",",
"data_context",
"=",
"self",
".",
"_data_context",
",",
")"
] | [
180,
4
] | [
247,
9
] | python | en | ['en', 'en', 'en'] | True |
SparkDFDatasource._get_reader_fn | (self, reader, reader_method=None, path=None) | Static helper for providing reader_fn
Args:
reader: the base spark reader to use; this should have had reader_options applied already
reader_method: the name of the reader_method to use, if specified
path (str): the path to use to guess reader_method if it was not specified
Returns:
ReaderMethod to use for the filepath
| Static helper for providing reader_fn | def _get_reader_fn(self, reader, reader_method=None, path=None):
"""Static helper for providing reader_fn
Args:
reader: the base spark reader to use; this should have had reader_options applied already
reader_method: the name of the reader_method to use, if specified
path (str): the path to use to guess reader_method if it was not specified
Returns:
ReaderMethod to use for the filepath
"""
if reader_method is None and path is None:
raise BatchKwargsError(
"Unable to determine spark reader function without reader_method or path.",
{"reader_method": reader_method},
)
if reader_method is None:
reader_method = self.guess_reader_method_from_path(path=path)[
"reader_method"
]
try:
if reader_method.lower() in ["delta", "avro"]:
return reader.format(reader_method.lower()).load
return getattr(reader, reader_method)
except AttributeError:
raise BatchKwargsError(
"Unable to find reader_method %s in spark." % reader_method,
{"reader_method": reader_method},
) | [
"def",
"_get_reader_fn",
"(",
"self",
",",
"reader",
",",
"reader_method",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"if",
"reader_method",
"is",
"None",
"and",
"path",
"is",
"None",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to determine spark reader function without reader_method or path.\"",
",",
"{",
"\"reader_method\"",
":",
"reader_method",
"}",
",",
")",
"if",
"reader_method",
"is",
"None",
":",
"reader_method",
"=",
"self",
".",
"guess_reader_method_from_path",
"(",
"path",
"=",
"path",
")",
"[",
"\"reader_method\"",
"]",
"try",
":",
"if",
"reader_method",
".",
"lower",
"(",
")",
"in",
"[",
"\"delta\"",
",",
"\"avro\"",
"]",
":",
"return",
"reader",
".",
"format",
"(",
"reader_method",
".",
"lower",
"(",
")",
")",
".",
"load",
"return",
"getattr",
"(",
"reader",
",",
"reader_method",
")",
"except",
"AttributeError",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to find reader_method %s in spark.\"",
"%",
"reader_method",
",",
"{",
"\"reader_method\"",
":",
"reader_method",
"}",
",",
")"
] | [
260,
4
] | [
292,
13
] | python | en | ['en', 'no', 'en'] | True |
Logs.get_local_logger | (self, name, log_file) | Returns a local logger with a file handler. | Returns a local logger with a file handler. | def get_local_logger(self, name, log_file):
"""Returns a local logger with a file handler."""
handler = RotatingFileHandler(log_file, maxBytes=MAX_LOG_BYTES)
handler.setFormatter(Formatter(LOGS_FORMAT))
handler.setLevel(DEBUG)
logger = getLogger(name)
logger.setLevel(DEBUG)
logger.handlers = [handler]
return (logger, handler) | [
"def",
"get_local_logger",
"(",
"self",
",",
"name",
",",
"log_file",
")",
":",
"handler",
"=",
"RotatingFileHandler",
"(",
"log_file",
",",
"maxBytes",
"=",
"MAX_LOG_BYTES",
")",
"handler",
".",
"setFormatter",
"(",
"Formatter",
"(",
"LOGS_FORMAT",
")",
")",
"handler",
".",
"setLevel",
"(",
"DEBUG",
")",
"logger",
"=",
"getLogger",
"(",
"name",
")",
"logger",
".",
"setLevel",
"(",
"DEBUG",
")",
"logger",
".",
"handlers",
"=",
"[",
"handler",
"]",
"return",
"(",
"logger",
",",
"handler",
")"
] | [
52,
4
] | [
63,
32
] | python | en | ['en', 'en', 'en'] | True |
Logs.debug | (self, text) | Logs at the DEBUG level. | Logs at the DEBUG level. | def debug(self, text):
"""Logs at the DEBUG level."""
if self.to_cloud:
self.safe_cloud_log_text(text, severity='DEBUG')
else:
self.local_logger.debug(text) | [
"def",
"debug",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"to_cloud",
":",
"self",
".",
"safe_cloud_log_text",
"(",
"text",
",",
"severity",
"=",
"'DEBUG'",
")",
"else",
":",
"self",
".",
"local_logger",
".",
"debug",
"(",
"text",
")"
] | [
65,
4
] | [
71,
41
] | python | en | ['en', 'en', 'en'] | True |
Logs.info | (self, text) | Logs at the INFO level. | Logs at the INFO level. | def info(self, text):
"""Logs at the INFO level."""
if self.to_cloud:
self.safe_cloud_log_text(text, severity='INFO')
else:
self.local_logger.info(text) | [
"def",
"info",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"to_cloud",
":",
"self",
".",
"safe_cloud_log_text",
"(",
"text",
",",
"severity",
"=",
"'INFO'",
")",
"else",
":",
"self",
".",
"local_logger",
".",
"info",
"(",
"text",
")"
] | [
73,
4
] | [
79,
40
] | python | en | ['en', 'en', 'en'] | True |
Logs.warn | (self, text) | Logs at the WARNING level. | Logs at the WARNING level. | def warn(self, text):
"""Logs at the WARNING level."""
if self.to_cloud:
self.safe_cloud_log_text(text, severity='WARNING')
else:
self.local_logger.warning(text) | [
"def",
"warn",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"to_cloud",
":",
"self",
".",
"safe_cloud_log_text",
"(",
"text",
",",
"severity",
"=",
"'WARNING'",
")",
"else",
":",
"self",
".",
"local_logger",
".",
"warning",
"(",
"text",
")"
] | [
81,
4
] | [
87,
43
] | python | en | ['en', 'en', 'en'] | True |
Logs.error | (self, text) | Logs at the ERROR level. | Logs at the ERROR level. | def error(self, text):
"""Logs at the ERROR level."""
if self.to_cloud:
self.safe_cloud_log_text(text, severity='ERROR')
else:
self.local_logger.error(text) | [
"def",
"error",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"to_cloud",
":",
"self",
".",
"safe_cloud_log_text",
"(",
"text",
",",
"severity",
"=",
"'ERROR'",
")",
"else",
":",
"self",
".",
"local_logger",
".",
"error",
"(",
"text",
")"
] | [
89,
4
] | [
95,
41
] | python | en | ['en', 'en', 'en'] | True |
Logs.catch | (self) | Logs the latest exception. | Logs the latest exception. | def catch(self):
"""Logs the latest exception."""
exception_str = self.format_exception()
if self.to_cloud:
self.safe_report_exception(exception_str)
self.safe_cloud_log_text(exception_str, severity='CRITICAL')
else:
self.local_logger.critical(exception_str) | [
"def",
"catch",
"(",
"self",
")",
":",
"exception_str",
"=",
"self",
".",
"format_exception",
"(",
")",
"if",
"self",
".",
"to_cloud",
":",
"self",
".",
"safe_report_exception",
"(",
"exception_str",
")",
"self",
".",
"safe_cloud_log_text",
"(",
"exception_str",
",",
"severity",
"=",
"'CRITICAL'",
")",
"else",
":",
"self",
".",
"local_logger",
".",
"critical",
"(",
"exception_str",
")"
] | [
97,
4
] | [
106,
53
] | python | en | ['en', 'en', 'en'] | True |
Logs.safe_cloud_log_text | (self, text, severity) | Logs to the cloud, retries if necessary, and eventually fails over
to local logs.
| Logs to the cloud, retries if necessary, and eventually fails over
to local logs.
| def safe_cloud_log_text(self, text, severity):
"""Logs to the cloud, retries if necessary, and eventually fails over
to local logs.
"""
try:
self.retry_cloud_log_text(text, severity)
except Exception:
exception_str = self.format_exception()
self.fallback_logger.error('Failed to log to cloud: %s %s\n%s' %
(severity, text, exception_str)) | [
"def",
"safe_cloud_log_text",
"(",
"self",
",",
"text",
",",
"severity",
")",
":",
"try",
":",
"self",
".",
"retry_cloud_log_text",
"(",
"text",
",",
"severity",
")",
"except",
"Exception",
":",
"exception_str",
"=",
"self",
".",
"format_exception",
"(",
")",
"self",
".",
"fallback_logger",
".",
"error",
"(",
"'Failed to log to cloud: %s %s\\n%s'",
"%",
"(",
"severity",
",",
"text",
",",
"exception_str",
")",
")"
] | [
108,
4
] | [
118,
71
] | python | en | ['en', 'en', 'en'] | True |
Logs.retry_cloud_log_text | (self, text, severity) | Logs to the cloud and retries up to 10 times with exponential
backoff (51.2 seconds max total) if the upload fails.
| Logs to the cloud and retries up to 10 times with exponential
backoff (51.2 seconds max total) if the upload fails.
| def retry_cloud_log_text(self, text, severity):
"""Logs to the cloud and retries up to 10 times with exponential
backoff (51.2 seconds max total) if the upload fails.
"""
self.cloud_logger.log_text(text, severity=severity) | [
"def",
"retry_cloud_log_text",
"(",
"self",
",",
"text",
",",
"severity",
")",
":",
"self",
".",
"cloud_logger",
".",
"log_text",
"(",
"text",
",",
"severity",
"=",
"severity",
")"
] | [
121,
4
] | [
126,
59
] | python | en | ['en', 'en', 'en'] | True |
Logs.safe_report_exception | (self, exception_str) | Reports the exception, retries if necessary, and eventually fails
over to local logs.
| Reports the exception, retries if necessary, and eventually fails
over to local logs.
| def safe_report_exception(self, exception_str):
"""Reports the exception, retries if necessary, and eventually fails
over to local logs.
"""
try:
self.retry_report_exception(exception_str)
except Exception:
meta_exception_str = self.format_exception()
self.fallback_logger.error('Failed to report exception: %s\n%s' %
(exception_str, meta_exception_str)) | [
"def",
"safe_report_exception",
"(",
"self",
",",
"exception_str",
")",
":",
"try",
":",
"self",
".",
"retry_report_exception",
"(",
"exception_str",
")",
"except",
"Exception",
":",
"meta_exception_str",
"=",
"self",
".",
"format_exception",
"(",
")",
"self",
".",
"fallback_logger",
".",
"error",
"(",
"'Failed to report exception: %s\\n%s'",
"%",
"(",
"exception_str",
",",
"meta_exception_str",
")",
")"
] | [
128,
4
] | [
138,
75
] | python | en | ['en', 'en', 'en'] | True |
Logs.retry_report_exception | (self, exception_str) | Reports the exception and retries up to 10 times with exponential
backoff (51.2 seconds max total) if the upload fails.
| Reports the exception and retries up to 10 times with exponential
backoff (51.2 seconds max total) if the upload fails.
| def retry_report_exception(self, exception_str):
"""Reports the exception and retries up to 10 times with exponential
backoff (51.2 seconds max total) if the upload fails.
"""
self.error_client.report(exception_str) | [
"def",
"retry_report_exception",
"(",
"self",
",",
"exception_str",
")",
":",
"self",
".",
"error_client",
".",
"report",
"(",
"exception_str",
")"
] | [
141,
4
] | [
146,
47
] | python | en | ['en', 'en', 'en'] | True |
Logs.format_exception | (self) | Grabs the latest exception and formats it. | Grabs the latest exception and formats it. | def format_exception(self):
"""Grabs the latest exception and formats it."""
exc_type, exc_value, exc_traceback = exc_info()
exc_format = format_exception(exc_type, exc_value, exc_traceback)
return ''.join(exc_format).strip() | [
"def",
"format_exception",
"(",
"self",
")",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"exc_info",
"(",
")",
"exc_format",
"=",
"format_exception",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
"return",
"''",
".",
"join",
"(",
"exc_format",
")",
".",
"strip",
"(",
")"
] | [
148,
4
] | [
153,
42
] | python | en | ['en', 'en', 'en'] | True |
wrap | (func, *args, unsqueeze=False) |
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
|
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
| def wrap(func, *args, unsqueeze=False):
"""
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
"""
# Convert input types where applicable
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
# Convert output types where applicable
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
return result.numpy()
else:
return result | [
"def",
"wrap",
"(",
"func",
",",
"*",
"args",
",",
"unsqueeze",
"=",
"False",
")",
":",
"# Convert input types where applicable",
"args",
"=",
"list",
"(",
"args",
")",
"for",
"i",
",",
"arg",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"type",
"(",
"arg",
")",
"==",
"np",
".",
"ndarray",
":",
"args",
"[",
"i",
"]",
"=",
"torch",
".",
"from_numpy",
"(",
"arg",
")",
"if",
"unsqueeze",
":",
"args",
"[",
"i",
"]",
"=",
"args",
"[",
"i",
"]",
".",
"unsqueeze",
"(",
"0",
")",
"result",
"=",
"func",
"(",
"*",
"args",
")",
"# Convert output types where applicable",
"if",
"isinstance",
"(",
"result",
",",
"tuple",
")",
":",
"result",
"=",
"list",
"(",
"result",
")",
"for",
"i",
",",
"res",
"in",
"enumerate",
"(",
"result",
")",
":",
"if",
"type",
"(",
"res",
")",
"==",
"torch",
".",
"Tensor",
":",
"if",
"unsqueeze",
":",
"res",
"=",
"res",
".",
"squeeze",
"(",
"0",
")",
"result",
"[",
"i",
"]",
"=",
"res",
".",
"numpy",
"(",
")",
"return",
"tuple",
"(",
"result",
")",
"elif",
"type",
"(",
"result",
")",
"==",
"torch",
".",
"Tensor",
":",
"if",
"unsqueeze",
":",
"result",
"=",
"result",
".",
"squeeze",
"(",
"0",
")",
"return",
"result",
".",
"numpy",
"(",
")",
"else",
":",
"return",
"result"
] | [
4,
0
] | [
34,
21
] | python | en | ['en', 'error', 'th'] | False |
convert_to_dtype | (data, dtype) |
A utility function converting xarray, pandas, or NumPy data to a given dtype.
Parameters
----------
data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame,
or numpy.ndarray
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
|
A utility function converting xarray, pandas, or NumPy data to a given dtype. | def convert_to_dtype(data, dtype):
"""
A utility function converting xarray, pandas, or NumPy data to a given dtype.
Parameters
----------
data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame,
or numpy.ndarray
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
"""
if dtype is None: # Don't convert the data type.
return data
return data.astype(dtype) | [
"def",
"convert_to_dtype",
"(",
"data",
",",
"dtype",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"# Don't convert the data type.",
"return",
"data",
"return",
"data",
".",
"astype",
"(",
"dtype",
")"
] | [
34,
0
] | [
48,
29
] | python | en | ['en', 'error', 'th'] | False |
create_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs) |
Creates a most-recent-to-oldest mosaic of the input dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
intermediate_product: xarray.Dataset
A 2D dataset used to store intermediate results.
Returns
-------
dataset_out: xarray.Dataset
Composited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Creates a most-recent-to-oldest mosaic of the input dataset. | def create_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):
"""
Creates a most-recent-to-oldest mosaic of the input dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
intermediate_product: xarray.Dataset
A 2D dataset used to store intermediate results.
Returns
-------
dataset_out: xarray.Dataset
Composited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
dataset_in_dtypes, band_list = [None]*2
if dtype is None:
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
time_slices = range(len(dataset_in.time))
if 'reverse_time' in kwargs:
time_slices = reversed(time_slices)
for timeslice in time_slices:
dataset_slice = dataset_in.isel(time=timeslice).drop('time')
clean_mask_slice = clean_mask[timeslice]
dataset_slice = dataset_slice.where((dataset_slice != no_data) & (clean_mask_slice))
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
utilities.clear_attrs(dataset_out)
else:
for key in list(dataset_slice.data_vars):
data_var_is_no_data = dataset_out[key].values == no_data
dataset_out[key].values[data_var_is_no_data] = dataset_slice[key].values[data_var_is_no_data]
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"intermediate_product",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"dataset_in_dtypes",
",",
"band_list",
"=",
"[",
"None",
"]",
"*",
"2",
"if",
"dtype",
"is",
"None",
":",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"if",
"intermediate_product",
"is",
"not",
"None",
":",
"dataset_out",
"=",
"intermediate_product",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"else",
":",
"dataset_out",
"=",
"None",
"time_slices",
"=",
"range",
"(",
"len",
"(",
"dataset_in",
".",
"time",
")",
")",
"if",
"'reverse_time'",
"in",
"kwargs",
":",
"time_slices",
"=",
"reversed",
"(",
"time_slices",
")",
"for",
"timeslice",
"in",
"time_slices",
":",
"dataset_slice",
"=",
"dataset_in",
".",
"isel",
"(",
"time",
"=",
"timeslice",
")",
".",
"drop",
"(",
"'time'",
")",
"clean_mask_slice",
"=",
"clean_mask",
"[",
"timeslice",
"]",
"dataset_slice",
"=",
"dataset_slice",
".",
"where",
"(",
"(",
"dataset_slice",
"!=",
"no_data",
")",
"&",
"(",
"clean_mask_slice",
")",
")",
"if",
"dataset_out",
"is",
"None",
":",
"dataset_out",
"=",
"dataset_slice",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"utilities",
".",
"clear_attrs",
"(",
"dataset_out",
")",
"else",
":",
"for",
"key",
"in",
"list",
"(",
"dataset_slice",
".",
"data_vars",
")",
":",
"data_var_is_no_data",
"=",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"==",
"no_data",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"[",
"data_var_is_no_data",
"]",
"=",
"dataset_slice",
"[",
"key",
"]",
".",
"values",
"[",
"data_var_is_no_data",
"]",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
55,
0
] | [
117,
22
] | python | en | ['en', 'error', 'th'] | False |
create_mean_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs) |
Method for calculating the mean pixel value for a given dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Method for calculating the mean pixel value for a given dataset. | def create_mean_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs):
"""
Method for calculating the mean pixel value for a given dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
dataset_in_dtypes = None
if dtype is None:
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and scan lines.
dataset_in = dataset_in.where((dataset_in != no_data) & (clean_mask))
dataset_out = dataset_in.mean(dim='time', skipna=True, keep_attrs=False)
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_mean_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"dataset_in_dtypes",
"=",
"None",
"if",
"dtype",
"is",
"None",
":",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"no_data",
")",
"&",
"(",
"clean_mask",
")",
")",
"dataset_out",
"=",
"dataset_in",
".",
"mean",
"(",
"dim",
"=",
"'time'",
",",
"skipna",
"=",
"True",
",",
"keep_attrs",
"=",
"False",
")",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
119,
0
] | [
163,
22
] | python | en | ['en', 'error', 'th'] | False |
create_median_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs) |
Method for calculating the median pixel value for a given dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Method for calculating the median pixel value for a given dataset. | def create_median_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs):
"""
Method for calculating the median pixel value for a given dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
dataset_in_dtypes = None
if dtype is None:
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and Landsat 7 scan lines.
dataset_in = dataset_in.where((dataset_in != no_data) & (clean_mask))
dataset_out = dataset_in.median(dim='time', skipna=True, keep_attrs=False)
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_median_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"dataset_in_dtypes",
"=",
"None",
"if",
"dtype",
"is",
"None",
":",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and Landsat 7 scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"no_data",
")",
"&",
"(",
"clean_mask",
")",
")",
"dataset_out",
"=",
"dataset_in",
".",
"median",
"(",
"dim",
"=",
"'time'",
",",
"skipna",
"=",
"True",
",",
"keep_attrs",
"=",
"False",
")",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
166,
0
] | [
210,
22
] | python | en | ['en', 'error', 'th'] | False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.