Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
as_market_time | (year, month, day, hour=0, minute=0, second=0) | Creates a timestamp in market time. | Creates a timestamp in market time. | def as_market_time(year, month, day, hour=0, minute=0, second=0):
"""Creates a timestamp in market time."""
market_time = datetime(year, month, day, hour, minute, second)
return MARKET_TIMEZONE.localize(market_time) | [
"def",
"as_market_time",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
")",
":",
"market_time",
"=",
"datetime",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"second",
")",
"return",
"MARKET_TIMEZONE",
".",
"localize",
"(",
"market_time",
")"
] | [
19,
0
] | [
23,
48
] | python | en | ['en', 'en', 'en'] | True |
GetIncludedBuildFiles | (build_file_path, aux_data, included=None) | Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
| Return a list of all build files included into build_file_path. | def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included | [
"def",
"GetIncludedBuildFiles",
"(",
"build_file_path",
",",
"aux_data",
",",
"included",
"=",
"None",
")",
":",
"if",
"included",
"==",
"None",
":",
"included",
"=",
"[",
"]",
"if",
"build_file_path",
"in",
"included",
":",
"return",
"included",
"included",
".",
"append",
"(",
"build_file_path",
")",
"for",
"included_build_file",
"in",
"aux_data",
"[",
"build_file_path",
"]",
".",
"get",
"(",
"'included'",
",",
"[",
"]",
")",
":",
"GetIncludedBuildFiles",
"(",
"included_build_file",
",",
"aux_data",
",",
"included",
")",
"return",
"included"
] | [
142,
0
] | [
172,
17
] | python | en | ['en', 'en', 'en'] | True |
CheckedEval | (file_contents) | Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
| Return the eval of a gyp file. | def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], []) | [
"def",
"CheckedEval",
"(",
"file_contents",
")",
":",
"ast",
"=",
"compiler",
".",
"parse",
"(",
"file_contents",
")",
"assert",
"isinstance",
"(",
"ast",
",",
"Module",
")",
"c1",
"=",
"ast",
".",
"getChildren",
"(",
")",
"assert",
"c1",
"[",
"0",
"]",
"is",
"None",
"assert",
"isinstance",
"(",
"c1",
"[",
"1",
"]",
",",
"Stmt",
")",
"c2",
"=",
"c1",
"[",
"1",
"]",
".",
"getChildren",
"(",
")",
"assert",
"isinstance",
"(",
"c2",
"[",
"0",
"]",
",",
"Discard",
")",
"c3",
"=",
"c2",
"[",
"0",
"]",
".",
"getChildren",
"(",
")",
"assert",
"len",
"(",
"c3",
")",
"==",
"1",
"return",
"CheckNode",
"(",
"c3",
"[",
"0",
"]",
",",
"[",
"]",
")"
] | [
175,
0
] | [
193,
29
] | python | en | ['en', 'en', 'en'] | True |
CallLoadTargetBuildFile | (global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info) | Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
| Wrapper around LoadTargetBuildFile for parallel processing. | def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None | [
"def",
"CallLoadTargetBuildFile",
"(",
"global_flags",
",",
"build_file_path",
",",
"variables",
",",
"includes",
",",
"depth",
",",
"check",
",",
"generator_input_info",
")",
":",
"try",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIG_IGN",
")",
"# Apply globals so that the worker process behaves the same.",
"for",
"key",
",",
"value",
"in",
"global_flags",
".",
"iteritems",
"(",
")",
":",
"globals",
"(",
")",
"[",
"key",
"]",
"=",
"value",
"SetGeneratorGlobals",
"(",
"generator_input_info",
")",
"result",
"=",
"LoadTargetBuildFile",
"(",
"build_file_path",
",",
"per_process_data",
",",
"per_process_aux_data",
",",
"variables",
",",
"includes",
",",
"depth",
",",
"check",
",",
"False",
")",
"if",
"not",
"result",
":",
"return",
"result",
"(",
"build_file_path",
",",
"dependencies",
")",
"=",
"result",
"# We can safely pop the build_file_data from per_process_data because it",
"# will never be referenced by this process again, so we don't need to keep",
"# it in the cache.",
"build_file_data",
"=",
"per_process_data",
".",
"pop",
"(",
"build_file_path",
")",
"# This gets serialized and sent back to the main process via a pipe.",
"# It's handled in LoadTargetBuildFileCallback.",
"return",
"(",
"build_file_path",
",",
"build_file_data",
",",
"dependencies",
")",
"except",
"GypError",
",",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"gyp: %s\\n\"",
"%",
"e",
")",
"return",
"None",
"except",
"Exception",
",",
"e",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"'Exception:'",
",",
"e",
"print",
">>",
"sys",
".",
"stderr",
",",
"traceback",
".",
"format_exc",
"(",
")",
"return",
"None"
] | [
483,
0
] | [
525,
15
] | python | en | ['en', 'en', 'en'] | True |
IsStrCanonicalInt | (string) | Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
| Returns True if |string| is in its canonical integer form. | def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False | [
"def",
"IsStrCanonicalInt",
"(",
"string",
")",
":",
"if",
"type",
"(",
"string",
")",
"is",
"str",
":",
"# This function is called a lot so for maximum performance, avoid",
"# involving regexps which would otherwise make the code much",
"# shorter. Regexps would need twice the time of this function.",
"if",
"string",
":",
"if",
"string",
"==",
"\"0\"",
":",
"return",
"True",
"if",
"string",
"[",
"0",
"]",
"==",
"\"-\"",
":",
"string",
"=",
"string",
"[",
"1",
":",
"]",
"if",
"not",
"string",
":",
"return",
"False",
"if",
"'1'",
"<=",
"string",
"[",
"0",
"]",
"<=",
"'9'",
":",
"return",
"string",
".",
"isdigit",
"(",
")",
"return",
"False"
] | [
652,
0
] | [
671,
14
] | python | en | ['en', 'en', 'en'] | True |
EvalCondition | (condition, conditions_key, phase, variables, build_file) | Returns the dict that should be used or None if the result was
that nothing should be used. | Returns the dict that should be used or None if the result was
that nothing should be used. | def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result | [
"def",
"EvalCondition",
"(",
"condition",
",",
"conditions_key",
",",
"phase",
",",
"variables",
",",
"build_file",
")",
":",
"if",
"type",
"(",
"condition",
")",
"is",
"not",
"list",
":",
"raise",
"GypError",
"(",
"conditions_key",
"+",
"' must be a list'",
")",
"if",
"len",
"(",
"condition",
")",
"<",
"2",
":",
"# It's possible that condition[0] won't work in which case this",
"# attempt will raise its own IndexError. That's probably fine.",
"raise",
"GypError",
"(",
"conditions_key",
"+",
"' '",
"+",
"condition",
"[",
"0",
"]",
"+",
"' must be at least length 2, not '",
"+",
"str",
"(",
"len",
"(",
"condition",
")",
")",
")",
"i",
"=",
"0",
"result",
"=",
"None",
"while",
"i",
"<",
"len",
"(",
"condition",
")",
":",
"cond_expr",
"=",
"condition",
"[",
"i",
"]",
"true_dict",
"=",
"condition",
"[",
"i",
"+",
"1",
"]",
"if",
"type",
"(",
"true_dict",
")",
"is",
"not",
"dict",
":",
"raise",
"GypError",
"(",
"'{} {} must be followed by a dictionary, not {}'",
".",
"format",
"(",
"conditions_key",
",",
"cond_expr",
",",
"type",
"(",
"true_dict",
")",
")",
")",
"if",
"len",
"(",
"condition",
")",
">",
"i",
"+",
"2",
"and",
"type",
"(",
"condition",
"[",
"i",
"+",
"2",
"]",
")",
"is",
"dict",
":",
"false_dict",
"=",
"condition",
"[",
"i",
"+",
"2",
"]",
"i",
"=",
"i",
"+",
"3",
"if",
"i",
"!=",
"len",
"(",
"condition",
")",
":",
"raise",
"GypError",
"(",
"'{} {} has {} unexpected trailing items'",
".",
"format",
"(",
"conditions_key",
",",
"cond_expr",
",",
"len",
"(",
"condition",
")",
"-",
"i",
")",
")",
"else",
":",
"false_dict",
"=",
"None",
"i",
"=",
"i",
"+",
"2",
"if",
"result",
"==",
"None",
":",
"result",
"=",
"EvalSingleCondition",
"(",
"cond_expr",
",",
"true_dict",
",",
"false_dict",
",",
"phase",
",",
"variables",
",",
"build_file",
")",
"return",
"result"
] | [
1040,
0
] | [
1072,
15
] | python | en | ['en', 'en', 'en'] | True |
EvalSingleCondition | (
cond_expr, true_dict, false_dict, phase, variables, build_file) | Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise. | Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise. | def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e) | [
"def",
"EvalSingleCondition",
"(",
"cond_expr",
",",
"true_dict",
",",
"false_dict",
",",
"phase",
",",
"variables",
",",
"build_file",
")",
":",
"# Do expansions on the condition itself. Since the conditon can naturally",
"# contain variable references without needing to resort to GYP expansion",
"# syntax, this is of dubious value for variables, but someone might want to",
"# use a command expansion directly inside a condition.",
"cond_expr_expanded",
"=",
"ExpandVariables",
"(",
"cond_expr",
",",
"phase",
",",
"variables",
",",
"build_file",
")",
"if",
"type",
"(",
"cond_expr_expanded",
")",
"not",
"in",
"(",
"str",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"'Variable expansion in this context permits str and int '",
"+",
"'only, found '",
"+",
"cond_expr_expanded",
".",
"__class__",
".",
"__name__",
")",
"try",
":",
"if",
"cond_expr_expanded",
"in",
"cached_conditions_asts",
":",
"ast_code",
"=",
"cached_conditions_asts",
"[",
"cond_expr_expanded",
"]",
"else",
":",
"ast_code",
"=",
"compile",
"(",
"cond_expr_expanded",
",",
"'<string>'",
",",
"'eval'",
")",
"cached_conditions_asts",
"[",
"cond_expr_expanded",
"]",
"=",
"ast_code",
"if",
"eval",
"(",
"ast_code",
",",
"{",
"'__builtins__'",
":",
"None",
"}",
",",
"variables",
")",
":",
"return",
"true_dict",
"return",
"false_dict",
"except",
"SyntaxError",
",",
"e",
":",
"syntax_error",
"=",
"SyntaxError",
"(",
"'%s while evaluating condition \\'%s\\' in %s '",
"'at character %d.'",
"%",
"(",
"str",
"(",
"e",
".",
"args",
"[",
"0",
"]",
")",
",",
"e",
".",
"text",
",",
"build_file",
",",
"e",
".",
"offset",
")",
",",
"e",
".",
"filename",
",",
"e",
".",
"lineno",
",",
"e",
".",
"offset",
",",
"e",
".",
"text",
")",
"raise",
"syntax_error",
"except",
"NameError",
",",
"e",
":",
"gyp",
".",
"common",
".",
"ExceptionAppend",
"(",
"e",
",",
"'while evaluating condition \\'%s\\' in %s'",
"%",
"(",
"cond_expr_expanded",
",",
"build_file",
")",
")",
"raise",
"GypError",
"(",
"e",
")"
] | [
1075,
0
] | [
1108,
21
] | python | en | ['en', 'pt', 'en'] | True |
ProcessVariablesAndConditionsInDict | (the_dict, phase, variables_in,
build_file, the_dict_key=None) | Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
| Handle all variable and command expansion and conditional evaluation. | def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key) | [
"def",
"ProcessVariablesAndConditionsInDict",
"(",
"the_dict",
",",
"phase",
",",
"variables_in",
",",
"build_file",
",",
"the_dict_key",
"=",
"None",
")",
":",
"# Make a copy of the variables_in dict that can be modified during the",
"# loading of automatics and the loading of the variables dict.",
"variables",
"=",
"variables_in",
".",
"copy",
"(",
")",
"LoadAutomaticVariablesFromDict",
"(",
"variables",
",",
"the_dict",
")",
"if",
"'variables'",
"in",
"the_dict",
":",
"# Make sure all the local variables are added to the variables",
"# list before we process them so that you can reference one",
"# variable from another. They will be fully expanded by recursion",
"# in ExpandVariables.",
"for",
"key",
",",
"value",
"in",
"the_dict",
"[",
"'variables'",
"]",
".",
"iteritems",
"(",
")",
":",
"variables",
"[",
"key",
"]",
"=",
"value",
"# Handle the associated variables dict first, so that any variable",
"# references within can be resolved prior to using them as variables.",
"# Pass a copy of the variables dict to avoid having it be tainted.",
"# Otherwise, it would have extra automatics added for everything that",
"# should just be an ordinary variable in this scope.",
"ProcessVariablesAndConditionsInDict",
"(",
"the_dict",
"[",
"'variables'",
"]",
",",
"phase",
",",
"variables",
",",
"build_file",
",",
"'variables'",
")",
"LoadVariablesFromVariablesDict",
"(",
"variables",
",",
"the_dict",
",",
"the_dict_key",
")",
"for",
"key",
",",
"value",
"in",
"the_dict",
".",
"iteritems",
"(",
")",
":",
"# Skip \"variables\", which was already processed if present.",
"if",
"key",
"!=",
"'variables'",
"and",
"type",
"(",
"value",
")",
"is",
"str",
":",
"expanded",
"=",
"ExpandVariables",
"(",
"value",
",",
"phase",
",",
"variables",
",",
"build_file",
")",
"if",
"type",
"(",
"expanded",
")",
"not",
"in",
"(",
"str",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"'Variable expansion in this context permits str and int '",
"+",
"'only, found '",
"+",
"expanded",
".",
"__class__",
".",
"__name__",
"+",
"' for '",
"+",
"key",
")",
"the_dict",
"[",
"key",
"]",
"=",
"expanded",
"# Variable expansion may have resulted in changes to automatics. Reload.",
"# TODO(mark): Optimization: only reload if no changes were made.",
"variables",
"=",
"variables_in",
".",
"copy",
"(",
")",
"LoadAutomaticVariablesFromDict",
"(",
"variables",
",",
"the_dict",
")",
"LoadVariablesFromVariablesDict",
"(",
"variables",
",",
"the_dict",
",",
"the_dict_key",
")",
"# Process conditions in this dict. This is done after variable expansion",
"# so that conditions may take advantage of expanded variables. For example,",
"# if the_dict contains:",
"# {'type': '<(library_type)',",
"# 'conditions': [['_type==\"static_library\"', { ... }]]},",
"# _type, as used in the condition, will only be set to the value of",
"# library_type if variable expansion is performed before condition",
"# processing. However, condition processing should occur prior to recursion",
"# so that variables (both automatic and \"variables\" dict type) may be",
"# adjusted by conditions sections, merged into the_dict, and have the",
"# intended impact on contained dicts.",
"#",
"# This arrangement means that a \"conditions\" section containing a \"variables\"",
"# section will only have those variables effective in subdicts, not in",
"# the_dict. The workaround is to put a \"conditions\" section within a",
"# \"variables\" section. For example:",
"# {'conditions': [['os==\"mac\"', {'variables': {'define': 'IS_MAC'}}]],",
"# 'defines': ['<(define)'],",
"# 'my_subdict': {'defines': ['<(define)']}},",
"# will not result in \"IS_MAC\" being appended to the \"defines\" list in the",
"# current scope but would result in it being appended to the \"defines\" list",
"# within \"my_subdict\". By comparison:",
"# {'variables': {'conditions': [['os==\"mac\"', {'define': 'IS_MAC'}]]},",
"# 'defines': ['<(define)'],",
"# 'my_subdict': {'defines': ['<(define)']}},",
"# will append \"IS_MAC\" to both \"defines\" lists.",
"# Evaluate conditions sections, allowing variable expansions within them",
"# as well as nested conditionals. This will process a 'conditions' or",
"# 'target_conditions' section, perform appropriate merging and recursive",
"# conditional and variable processing, and then remove the conditions section",
"# from the_dict if it is present.",
"ProcessConditionsInDict",
"(",
"the_dict",
",",
"phase",
",",
"variables",
",",
"build_file",
")",
"# Conditional processing may have resulted in changes to automatics or the",
"# variables dict. Reload.",
"variables",
"=",
"variables_in",
".",
"copy",
"(",
")",
"LoadAutomaticVariablesFromDict",
"(",
"variables",
",",
"the_dict",
")",
"LoadVariablesFromVariablesDict",
"(",
"variables",
",",
"the_dict",
",",
"the_dict_key",
")",
"# Recurse into child dicts, or process child lists which may result in",
"# further recursion into descendant dicts.",
"for",
"key",
",",
"value",
"in",
"the_dict",
".",
"iteritems",
"(",
")",
":",
"# Skip \"variables\" and string values, which were already processed if",
"# present.",
"if",
"key",
"==",
"'variables'",
"or",
"type",
"(",
"value",
")",
"is",
"str",
":",
"continue",
"if",
"type",
"(",
"value",
")",
"is",
"dict",
":",
"# Pass a copy of the variables dict so that subdicts can't influence",
"# parents.",
"ProcessVariablesAndConditionsInDict",
"(",
"value",
",",
"phase",
",",
"variables",
",",
"build_file",
",",
"key",
")",
"elif",
"type",
"(",
"value",
")",
"is",
"list",
":",
"# The list itself can't influence the variables dict, and",
"# ProcessVariablesAndConditionsInList will make copies of the variables",
"# dict if it needs to pass it to something that can influence it. No",
"# copy is necessary here.",
"ProcessVariablesAndConditionsInList",
"(",
"value",
",",
"phase",
",",
"variables",
",",
"build_file",
")",
"elif",
"type",
"(",
"value",
")",
"is",
"not",
"int",
":",
"raise",
"TypeError",
"(",
"'Unknown type '",
"+",
"value",
".",
"__class__",
".",
"__name__",
"+",
"' for '",
"+",
"key",
")"
] | [
1193,
0
] | [
1302,
36
] | python | en | ['en', 'en', 'en'] | True |
BuildTargetsDict | (data) | Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
| Builds a dict mapping fully-qualified target names to their target dicts. | def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets | [
"def",
"BuildTargetsDict",
"(",
"data",
")",
":",
"targets",
"=",
"{",
"}",
"for",
"build_file",
"in",
"data",
"[",
"'target_build_files'",
"]",
":",
"for",
"target",
"in",
"data",
"[",
"build_file",
"]",
".",
"get",
"(",
"'targets'",
",",
"[",
"]",
")",
":",
"target_name",
"=",
"gyp",
".",
"common",
".",
"QualifiedTarget",
"(",
"build_file",
",",
"target",
"[",
"'target_name'",
"]",
",",
"target",
"[",
"'toolset'",
"]",
")",
"if",
"target_name",
"in",
"targets",
":",
"raise",
"GypError",
"(",
"'Duplicate target definitions for '",
"+",
"target_name",
")",
"targets",
"[",
"target_name",
"]",
"=",
"target",
"return",
"targets"
] | [
1339,
0
] | [
1362,
16
] | python | en | ['en', 'en', 'en'] | True |
QualifyDependencies | (targets) | Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
| Make dependency links fully-qualified relative to the current directory. | def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies') | [
"def",
"QualifyDependencies",
"(",
"targets",
")",
":",
"all_dependency_sections",
"=",
"[",
"dep",
"+",
"op",
"for",
"dep",
"in",
"dependency_sections",
"for",
"op",
"in",
"(",
"''",
",",
"'!'",
",",
"'/'",
")",
"]",
"for",
"target",
",",
"target_dict",
"in",
"targets",
".",
"iteritems",
"(",
")",
":",
"target_build_file",
"=",
"gyp",
".",
"common",
".",
"BuildFile",
"(",
"target",
")",
"toolset",
"=",
"target_dict",
"[",
"'toolset'",
"]",
"for",
"dependency_key",
"in",
"all_dependency_sections",
":",
"dependencies",
"=",
"target_dict",
".",
"get",
"(",
"dependency_key",
",",
"[",
"]",
")",
"for",
"index",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"dependencies",
")",
")",
":",
"dep_file",
",",
"dep_target",
",",
"dep_toolset",
"=",
"gyp",
".",
"common",
".",
"ResolveTarget",
"(",
"target_build_file",
",",
"dependencies",
"[",
"index",
"]",
",",
"toolset",
")",
"if",
"not",
"multiple_toolsets",
":",
"# Ignore toolset specification in the dependency if it is specified.",
"dep_toolset",
"=",
"toolset",
"dependency",
"=",
"gyp",
".",
"common",
".",
"QualifiedTarget",
"(",
"dep_file",
",",
"dep_target",
",",
"dep_toolset",
")",
"dependencies",
"[",
"index",
"]",
"=",
"dependency",
"# Make sure anything appearing in a list other than \"dependencies\" also",
"# appears in the \"dependencies\" list.",
"if",
"dependency_key",
"!=",
"'dependencies'",
"and",
"dependency",
"not",
"in",
"target_dict",
"[",
"'dependencies'",
"]",
":",
"raise",
"GypError",
"(",
"'Found '",
"+",
"dependency",
"+",
"' in '",
"+",
"dependency_key",
"+",
"' of '",
"+",
"target",
"+",
"', but not in dependencies'",
")"
] | [
1365,
0
] | [
1401,
71
] | python | en | ['en', 'en', 'en'] | True |
ExpandWildcardDependencies | (targets, data) | Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
| Expands dependencies specified as build_file:*. | def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1 | [
"def",
"ExpandWildcardDependencies",
"(",
"targets",
",",
"data",
")",
":",
"for",
"target",
",",
"target_dict",
"in",
"targets",
".",
"iteritems",
"(",
")",
":",
"toolset",
"=",
"target_dict",
"[",
"'toolset'",
"]",
"target_build_file",
"=",
"gyp",
".",
"common",
".",
"BuildFile",
"(",
"target",
")",
"for",
"dependency_key",
"in",
"dependency_sections",
":",
"dependencies",
"=",
"target_dict",
".",
"get",
"(",
"dependency_key",
",",
"[",
"]",
")",
"# Loop this way instead of \"for dependency in\" or \"for index in xrange\"",
"# because the dependencies list will be modified within the loop body.",
"index",
"=",
"0",
"while",
"index",
"<",
"len",
"(",
"dependencies",
")",
":",
"(",
"dependency_build_file",
",",
"dependency_target",
",",
"dependency_toolset",
")",
"=",
"gyp",
".",
"common",
".",
"ParseQualifiedTarget",
"(",
"dependencies",
"[",
"index",
"]",
")",
"if",
"dependency_target",
"!=",
"'*'",
"and",
"dependency_toolset",
"!=",
"'*'",
":",
"# Not a wildcard. Keep it moving.",
"index",
"=",
"index",
"+",
"1",
"continue",
"if",
"dependency_build_file",
"==",
"target_build_file",
":",
"# It's an error for a target to depend on all other targets in",
"# the same file, because a target cannot depend on itself.",
"raise",
"GypError",
"(",
"'Found wildcard in '",
"+",
"dependency_key",
"+",
"' of '",
"+",
"target",
"+",
"' referring to same build file'",
")",
"# Take the wildcard out and adjust the index so that the next",
"# dependency in the list will be processed the next time through the",
"# loop.",
"del",
"dependencies",
"[",
"index",
"]",
"index",
"=",
"index",
"-",
"1",
"# Loop through the targets in the other build file, adding them to",
"# this target's list of dependencies in place of the removed",
"# wildcard.",
"dependency_target_dicts",
"=",
"data",
"[",
"dependency_build_file",
"]",
"[",
"'targets'",
"]",
"for",
"dependency_target_dict",
"in",
"dependency_target_dicts",
":",
"if",
"int",
"(",
"dependency_target_dict",
".",
"get",
"(",
"'suppress_wildcard'",
",",
"False",
")",
")",
":",
"continue",
"dependency_target_name",
"=",
"dependency_target_dict",
"[",
"'target_name'",
"]",
"if",
"(",
"dependency_target",
"!=",
"'*'",
"and",
"dependency_target",
"!=",
"dependency_target_name",
")",
":",
"continue",
"dependency_target_toolset",
"=",
"dependency_target_dict",
"[",
"'toolset'",
"]",
"if",
"(",
"dependency_toolset",
"!=",
"'*'",
"and",
"dependency_toolset",
"!=",
"dependency_target_toolset",
")",
":",
"continue",
"dependency",
"=",
"gyp",
".",
"common",
".",
"QualifiedTarget",
"(",
"dependency_build_file",
",",
"dependency_target_name",
",",
"dependency_target_toolset",
")",
"index",
"=",
"index",
"+",
"1",
"dependencies",
".",
"insert",
"(",
"index",
",",
"dependency",
")",
"index",
"=",
"index",
"+",
"1"
] | [
1404,
0
] | [
1470,
25
] | python | en | ['en', 'en', 'en'] | True |
Unify | (l) | Removes duplicate elements from l, keeping the first element. | Removes duplicate elements from l, keeping the first element. | def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen] | [
"def",
"Unify",
"(",
"l",
")",
":",
"seen",
"=",
"{",
"}",
"return",
"[",
"seen",
".",
"setdefault",
"(",
"e",
",",
"e",
")",
"for",
"e",
"in",
"l",
"if",
"e",
"not",
"in",
"seen",
"]"
] | [
1473,
0
] | [
1476,
60
] | python | en | ['en', 'en', 'en'] | True |
RemoveDuplicateDependencies | (targets) | Makes sure every dependency appears only once in all targets's dependency
lists. | Makes sure every dependency appears only once in all targets's dependency
lists. | def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies) | [
"def",
"RemoveDuplicateDependencies",
"(",
"targets",
")",
":",
"for",
"target_name",
",",
"target_dict",
"in",
"targets",
".",
"iteritems",
"(",
")",
":",
"for",
"dependency_key",
"in",
"dependency_sections",
":",
"dependencies",
"=",
"target_dict",
".",
"get",
"(",
"dependency_key",
",",
"[",
"]",
")",
"if",
"dependencies",
":",
"target_dict",
"[",
"dependency_key",
"]",
"=",
"Unify",
"(",
"dependencies",
")"
] | [
1479,
0
] | [
1486,
57
] | python | en | ['en', 'en', 'en'] | True |
Filter | (l, item) | Removes item from l. | Removes item from l. | def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item] | [
"def",
"Filter",
"(",
"l",
",",
"item",
")",
":",
"res",
"=",
"{",
"}",
"return",
"[",
"res",
".",
"setdefault",
"(",
"e",
",",
"e",
")",
"for",
"e",
"in",
"l",
"if",
"e",
"!=",
"item",
"]"
] | [
1489,
0
] | [
1492,
55
] | python | en | ['en', 'en', 'en'] | True |
RemoveSelfDependencies | (targets) | Remove self dependencies from targets that have the prune_self_dependency
variable set. | Remove self dependencies from targets that have the prune_self_dependency
variable set. | def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name) | [
"def",
"RemoveSelfDependencies",
"(",
"targets",
")",
":",
"for",
"target_name",
",",
"target_dict",
"in",
"targets",
".",
"iteritems",
"(",
")",
":",
"for",
"dependency_key",
"in",
"dependency_sections",
":",
"dependencies",
"=",
"target_dict",
".",
"get",
"(",
"dependency_key",
",",
"[",
"]",
")",
"if",
"dependencies",
":",
"for",
"t",
"in",
"dependencies",
":",
"if",
"t",
"==",
"target_name",
":",
"if",
"targets",
"[",
"t",
"]",
".",
"get",
"(",
"'variables'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'prune_self_dependency'",
",",
"0",
")",
":",
"target_dict",
"[",
"dependency_key",
"]",
"=",
"Filter",
"(",
"dependencies",
",",
"target_name",
")"
] | [
1495,
0
] | [
1505,
77
] | python | en | ['en', 'en', 'en'] | True |
RemoveLinkDependenciesFromNoneTargets | (targets) | Remove dependencies having the 'link_dependency' attribute from the 'none'
targets. | Remove dependencies having the 'link_dependency' attribute from the 'none'
targets. | def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t) | [
"def",
"RemoveLinkDependenciesFromNoneTargets",
"(",
"targets",
")",
":",
"for",
"target_name",
",",
"target_dict",
"in",
"targets",
".",
"iteritems",
"(",
")",
":",
"for",
"dependency_key",
"in",
"dependency_sections",
":",
"dependencies",
"=",
"target_dict",
".",
"get",
"(",
"dependency_key",
",",
"[",
"]",
")",
"if",
"dependencies",
":",
"for",
"t",
"in",
"dependencies",
":",
"if",
"target_dict",
".",
"get",
"(",
"'type'",
",",
"None",
")",
"==",
"'none'",
":",
"if",
"targets",
"[",
"t",
"]",
".",
"get",
"(",
"'variables'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'link_dependency'",
",",
"0",
")",
":",
"target_dict",
"[",
"dependency_key",
"]",
"=",
"Filter",
"(",
"target_dict",
"[",
"dependency_key",
"]",
",",
"t",
")"
] | [
1508,
0
] | [
1519,
56
] | python | en | ['en', 'en', 'en'] | True |
ProcessListFiltersInDict | (name, the_dict) | Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
| Process regular expression and exclusion-based filters on lists. | def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value) | [
"def",
"ProcessListFiltersInDict",
"(",
"name",
",",
"the_dict",
")",
":",
"# Look through the dictionary for any lists whose keys end in \"!\" or \"/\".",
"# These are lists that will be treated as exclude lists and regular",
"# expression-based exclude/include lists. Collect the lists that are",
"# needed first, looking for the lists that they operate on, and assemble",
"# then into |lists|. This is done in a separate loop up front, because",
"# the _included and _excluded keys need to be added to the_dict, and that",
"# can't be done while iterating through it.",
"lists",
"=",
"[",
"]",
"del_lists",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"the_dict",
".",
"iteritems",
"(",
")",
":",
"operation",
"=",
"key",
"[",
"-",
"1",
"]",
"if",
"operation",
"!=",
"'!'",
"and",
"operation",
"!=",
"'/'",
":",
"continue",
"if",
"type",
"(",
"value",
")",
"is",
"not",
"list",
":",
"raise",
"ValueError",
"(",
"name",
"+",
"' key '",
"+",
"key",
"+",
"' must be list, not '",
"+",
"value",
".",
"__class__",
".",
"__name__",
")",
"list_key",
"=",
"key",
"[",
":",
"-",
"1",
"]",
"if",
"list_key",
"not",
"in",
"the_dict",
":",
"# This happens when there's a list like \"sources!\" but no corresponding",
"# \"sources\" list. Since there's nothing for it to operate on, queue up",
"# the \"sources!\" list for deletion now.",
"del_lists",
".",
"append",
"(",
"key",
")",
"continue",
"if",
"type",
"(",
"the_dict",
"[",
"list_key",
"]",
")",
"is",
"not",
"list",
":",
"value",
"=",
"the_dict",
"[",
"list_key",
"]",
"raise",
"ValueError",
"(",
"name",
"+",
"' key '",
"+",
"list_key",
"+",
"' must be list, not '",
"+",
"value",
".",
"__class__",
".",
"__name__",
"+",
"' when applying '",
"+",
"{",
"'!'",
":",
"'exclusion'",
",",
"'/'",
":",
"'regex'",
"}",
"[",
"operation",
"]",
")",
"if",
"not",
"list_key",
"in",
"lists",
":",
"lists",
".",
"append",
"(",
"list_key",
")",
"# Delete the lists that are known to be unneeded at this point.",
"for",
"del_list",
"in",
"del_lists",
":",
"del",
"the_dict",
"[",
"del_list",
"]",
"for",
"list_key",
"in",
"lists",
":",
"the_list",
"=",
"the_dict",
"[",
"list_key",
"]",
"# Initialize the list_actions list, which is parallel to the_list. Each",
"# item in list_actions identifies whether the corresponding item in",
"# the_list should be excluded, unconditionally preserved (included), or",
"# whether no exclusion or inclusion has been applied. Items for which",
"# no exclusion or inclusion has been applied (yet) have value -1, items",
"# excluded have value 0, and items included have value 1. Includes and",
"# excludes override previous actions. All items in list_actions are",
"# initialized to -1 because no excludes or includes have been processed",
"# yet.",
"list_actions",
"=",
"list",
"(",
"(",
"-",
"1",
",",
")",
"*",
"len",
"(",
"the_list",
")",
")",
"exclude_key",
"=",
"list_key",
"+",
"'!'",
"if",
"exclude_key",
"in",
"the_dict",
":",
"for",
"exclude_item",
"in",
"the_dict",
"[",
"exclude_key",
"]",
":",
"for",
"index",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"the_list",
")",
")",
":",
"if",
"exclude_item",
"==",
"the_list",
"[",
"index",
"]",
":",
"# This item matches the exclude_item, so set its action to 0",
"# (exclude).",
"list_actions",
"[",
"index",
"]",
"=",
"0",
"# The \"whatever!\" list is no longer needed, dump it.",
"del",
"the_dict",
"[",
"exclude_key",
"]",
"regex_key",
"=",
"list_key",
"+",
"'/'",
"if",
"regex_key",
"in",
"the_dict",
":",
"for",
"regex_item",
"in",
"the_dict",
"[",
"regex_key",
"]",
":",
"[",
"action",
",",
"pattern",
"]",
"=",
"regex_item",
"pattern_re",
"=",
"re",
".",
"compile",
"(",
"pattern",
")",
"if",
"action",
"==",
"'exclude'",
":",
"# This item matches an exclude regex, so set its value to 0 (exclude).",
"action_value",
"=",
"0",
"elif",
"action",
"==",
"'include'",
":",
"# This item matches an include regex, so set its value to 1 (include).",
"action_value",
"=",
"1",
"else",
":",
"# This is an action that doesn't make any sense.",
"raise",
"ValueError",
"(",
"'Unrecognized action '",
"+",
"action",
"+",
"' in '",
"+",
"name",
"+",
"' key '",
"+",
"regex_key",
")",
"for",
"index",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"the_list",
")",
")",
":",
"list_item",
"=",
"the_list",
"[",
"index",
"]",
"if",
"list_actions",
"[",
"index",
"]",
"==",
"action_value",
":",
"# Even if the regex matches, nothing will change so continue (regex",
"# searches are expensive).",
"continue",
"if",
"pattern_re",
".",
"search",
"(",
"list_item",
")",
":",
"# Regular expression match.",
"list_actions",
"[",
"index",
"]",
"=",
"action_value",
"# The \"whatever/\" list is no longer needed, dump it.",
"del",
"the_dict",
"[",
"regex_key",
"]",
"# Add excluded items to the excluded list.",
"#",
"# Note that exclude_key (\"sources!\") is different from excluded_key",
"# (\"sources_excluded\"). The exclude_key list is input and it was already",
"# processed and deleted; the excluded_key list is output and it's about",
"# to be created.",
"excluded_key",
"=",
"list_key",
"+",
"'_excluded'",
"if",
"excluded_key",
"in",
"the_dict",
":",
"raise",
"GypError",
"(",
"name",
"+",
"' key '",
"+",
"excluded_key",
"+",
"' must not be present prior '",
"' to applying exclusion/regex filters for '",
"+",
"list_key",
")",
"excluded_list",
"=",
"[",
"]",
"# Go backwards through the list_actions list so that as items are deleted,",
"# the indices of items that haven't been seen yet don't shift. That means",
"# that things need to be prepended to excluded_list to maintain them in the",
"# same order that they existed in the_list.",
"for",
"index",
"in",
"xrange",
"(",
"len",
"(",
"list_actions",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"list_actions",
"[",
"index",
"]",
"==",
"0",
":",
"# Dump anything with action 0 (exclude). Keep anything with action 1",
"# (include) or -1 (no include or exclude seen for the item).",
"excluded_list",
".",
"insert",
"(",
"0",
",",
"the_list",
"[",
"index",
"]",
")",
"del",
"the_list",
"[",
"index",
"]",
"# If anything was excluded, put the excluded list into the_dict at",
"# excluded_key.",
"if",
"len",
"(",
"excluded_list",
")",
">",
"0",
":",
"the_dict",
"[",
"excluded_key",
"]",
"=",
"excluded_list",
"# Now recurse into subdicts and lists that may contain dicts.",
"for",
"key",
",",
"value",
"in",
"the_dict",
".",
"iteritems",
"(",
")",
":",
"if",
"type",
"(",
"value",
")",
"is",
"dict",
":",
"ProcessListFiltersInDict",
"(",
"key",
",",
"value",
")",
"elif",
"type",
"(",
"value",
")",
"is",
"list",
":",
"ProcessListFiltersInList",
"(",
"key",
",",
"value",
")"
] | [
2318,
0
] | [
2473,
42
] | python | en | ['en', 'en', 'en'] | True |
ParallelState.LoadTargetBuildFileCallback | (self, result) | Handle the results of running LoadTargetBuildFile in another process.
| Handle the results of running LoadTargetBuildFile in another process.
| def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release() | [
"def",
"LoadTargetBuildFileCallback",
"(",
"self",
",",
"result",
")",
":",
"self",
".",
"condition",
".",
"acquire",
"(",
")",
"if",
"not",
"result",
":",
"self",
".",
"error",
"=",
"True",
"self",
".",
"condition",
".",
"notify",
"(",
")",
"self",
".",
"condition",
".",
"release",
"(",
")",
"return",
"(",
"build_file_path0",
",",
"build_file_data0",
",",
"dependencies0",
")",
"=",
"result",
"self",
".",
"data",
"[",
"build_file_path0",
"]",
"=",
"build_file_data0",
"self",
".",
"data",
"[",
"'target_build_files'",
"]",
".",
"add",
"(",
"build_file_path0",
")",
"for",
"new_dependency",
"in",
"dependencies0",
":",
"if",
"new_dependency",
"not",
"in",
"self",
".",
"scheduled",
":",
"self",
".",
"scheduled",
".",
"add",
"(",
"new_dependency",
")",
"self",
".",
"dependencies",
".",
"append",
"(",
"new_dependency",
")",
"self",
".",
"pending",
"-=",
"1",
"self",
".",
"condition",
".",
"notify",
"(",
")",
"self",
".",
"condition",
".",
"release",
"(",
")"
] | [
559,
2
] | [
577,
28
] | python | en | ['en', 'en', 'en'] | True |
DependencyGraphNode.FindCycles | (self) |
Returns a list of cycles in the graph, where each cycle is its own list.
|
Returns a list of cycles in the graph, where each cycle is its own list.
| def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results | [
"def",
"FindCycles",
"(",
"self",
")",
":",
"results",
"=",
"[",
"]",
"visited",
"=",
"set",
"(",
")",
"def",
"Visit",
"(",
"node",
",",
"path",
")",
":",
"for",
"child",
"in",
"node",
".",
"dependents",
":",
"if",
"child",
"in",
"path",
":",
"results",
".",
"append",
"(",
"[",
"child",
"]",
"+",
"path",
"[",
":",
"path",
".",
"index",
"(",
"child",
")",
"+",
"1",
"]",
")",
"elif",
"not",
"child",
"in",
"visited",
":",
"visited",
".",
"add",
"(",
"child",
")",
"Visit",
"(",
"child",
",",
"[",
"child",
"]",
"+",
"path",
")",
"visited",
".",
"add",
"(",
"self",
")",
"Visit",
"(",
"self",
",",
"[",
"self",
"]",
")",
"return",
"results"
] | [
1587,
2
] | [
1605,
18
] | python | en | ['en', 'error', 'th'] | False |
DependencyGraphNode.DirectDependencies | (self, dependencies=None) | Returns a list of just direct dependencies. | Returns a list of just direct dependencies. | def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies | [
"def",
"DirectDependencies",
"(",
"self",
",",
"dependencies",
"=",
"None",
")",
":",
"if",
"dependencies",
"==",
"None",
":",
"dependencies",
"=",
"[",
"]",
"for",
"dependency",
"in",
"self",
".",
"dependencies",
":",
"# Check for None, corresponding to the root node.",
"if",
"dependency",
".",
"ref",
"!=",
"None",
"and",
"dependency",
".",
"ref",
"not",
"in",
"dependencies",
":",
"dependencies",
".",
"append",
"(",
"dependency",
".",
"ref",
")",
"return",
"dependencies"
] | [
1607,
2
] | [
1617,
23
] | python | en | ['en', 'en', 'en'] | True |
DependencyGraphNode._AddImportedDependencies | (self, targets, dependencies=None) | Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
| Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings. | def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies | [
"def",
"_AddImportedDependencies",
"(",
"self",
",",
"targets",
",",
"dependencies",
"=",
"None",
")",
":",
"if",
"dependencies",
"==",
"None",
":",
"dependencies",
"=",
"[",
"]",
"index",
"=",
"0",
"while",
"index",
"<",
"len",
"(",
"dependencies",
")",
":",
"dependency",
"=",
"dependencies",
"[",
"index",
"]",
"dependency_dict",
"=",
"targets",
"[",
"dependency",
"]",
"# Add any dependencies whose settings should be imported to the list",
"# if not already present. Newly-added items will be checked for",
"# their own imports when the list iteration reaches them.",
"# Rather than simply appending new items, insert them after the",
"# dependency that exported them. This is done to more closely match",
"# the depth-first method used by DeepDependencies.",
"add_index",
"=",
"1",
"for",
"imported_dependency",
"in",
"dependency_dict",
".",
"get",
"(",
"'export_dependent_settings'",
",",
"[",
"]",
")",
":",
"if",
"imported_dependency",
"not",
"in",
"dependencies",
":",
"dependencies",
".",
"insert",
"(",
"index",
"+",
"add_index",
",",
"imported_dependency",
")",
"add_index",
"=",
"add_index",
"+",
"1",
"index",
"=",
"index",
"+",
"1",
"return",
"dependencies"
] | [
1619,
2
] | [
1658,
23
] | python | en | ['en', 'en', 'en'] | True |
DependencyGraphNode.DirectAndImportedDependencies | (self, targets, dependencies=None) | Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
| Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
| def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies) | [
"def",
"DirectAndImportedDependencies",
"(",
"self",
",",
"targets",
",",
"dependencies",
"=",
"None",
")",
":",
"dependencies",
"=",
"self",
".",
"DirectDependencies",
"(",
"dependencies",
")",
"return",
"self",
".",
"_AddImportedDependencies",
"(",
"targets",
",",
"dependencies",
")"
] | [
1660,
2
] | [
1667,
63
] | python | en | ['en', 'en', 'en'] | True |
DependencyGraphNode.DeepDependencies | (self, dependencies=None) | Returns an OrderedSet of all of a target's dependencies, recursively. | Returns an OrderedSet of all of a target's dependencies, recursively. | def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies | [
"def",
"DeepDependencies",
"(",
"self",
",",
"dependencies",
"=",
"None",
")",
":",
"if",
"dependencies",
"is",
"None",
":",
"# Using a list to get ordered output and a set to do fast \"is it",
"# already added\" checks.",
"dependencies",
"=",
"OrderedSet",
"(",
")",
"for",
"dependency",
"in",
"self",
".",
"dependencies",
":",
"# Check for None, corresponding to the root node.",
"if",
"dependency",
".",
"ref",
"is",
"None",
":",
"continue",
"if",
"dependency",
".",
"ref",
"not",
"in",
"dependencies",
":",
"dependency",
".",
"DeepDependencies",
"(",
"dependencies",
")",
"dependencies",
".",
"add",
"(",
"dependency",
".",
"ref",
")",
"return",
"dependencies"
] | [
1669,
2
] | [
1684,
23
] | python | en | ['en', 'en', 'en'] | True |
DependencyGraphNode._LinkDependenciesInternal | (self, targets, include_shared_libraries,
dependencies=None, initial=True) | Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
| Returns an OrderedSet of dependency targets that are linked
into this target. | def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies | [
"def",
"_LinkDependenciesInternal",
"(",
"self",
",",
"targets",
",",
"include_shared_libraries",
",",
"dependencies",
"=",
"None",
",",
"initial",
"=",
"True",
")",
":",
"if",
"dependencies",
"is",
"None",
":",
"# Using a list to get ordered output and a set to do fast \"is it",
"# already added\" checks.",
"dependencies",
"=",
"OrderedSet",
"(",
")",
"# Check for None, corresponding to the root node.",
"if",
"self",
".",
"ref",
"is",
"None",
":",
"return",
"dependencies",
"# It's kind of sucky that |targets| has to be passed into this function,",
"# but that's presently the easiest way to access the target dicts so that",
"# this function can find target types.",
"if",
"'target_name'",
"not",
"in",
"targets",
"[",
"self",
".",
"ref",
"]",
":",
"raise",
"GypError",
"(",
"\"Missing 'target_name' field in target.\"",
")",
"if",
"'type'",
"not",
"in",
"targets",
"[",
"self",
".",
"ref",
"]",
":",
"raise",
"GypError",
"(",
"\"Missing 'type' field in target %s\"",
"%",
"targets",
"[",
"self",
".",
"ref",
"]",
"[",
"'target_name'",
"]",
")",
"target_type",
"=",
"targets",
"[",
"self",
".",
"ref",
"]",
"[",
"'type'",
"]",
"is_linkable",
"=",
"target_type",
"in",
"linkable_types",
"if",
"initial",
"and",
"not",
"is_linkable",
":",
"# If this is the first target being examined and it's not linkable,",
"# return an empty list of link dependencies, because the link",
"# dependencies are intended to apply to the target itself (initial is",
"# True) and this target won't be linked.",
"return",
"dependencies",
"# Don't traverse 'none' targets if explicitly excluded.",
"if",
"(",
"target_type",
"==",
"'none'",
"and",
"not",
"targets",
"[",
"self",
".",
"ref",
"]",
".",
"get",
"(",
"'dependencies_traverse'",
",",
"True",
")",
")",
":",
"dependencies",
".",
"add",
"(",
"self",
".",
"ref",
")",
"return",
"dependencies",
"# Executables, mac kernel extensions and loadable modules are already fully",
"# and finally linked. Nothing else can be a link dependency of them, there",
"# can only be dependencies in the sense that a dependent target might run",
"# an executable or load the loadable_module.",
"if",
"not",
"initial",
"and",
"target_type",
"in",
"(",
"'executable'",
",",
"'loadable_module'",
",",
"'mac_kernel_extension'",
")",
":",
"return",
"dependencies",
"# Shared libraries are already fully linked. They should only be included",
"# in |dependencies| when adjusting static library dependencies (in order to",
"# link against the shared_library's import lib), but should not be included",
"# in |dependencies| when propagating link_settings.",
"# The |include_shared_libraries| flag controls which of these two cases we",
"# are handling.",
"if",
"(",
"not",
"initial",
"and",
"target_type",
"==",
"'shared_library'",
"and",
"not",
"include_shared_libraries",
")",
":",
"return",
"dependencies",
"# The target is linkable, add it to the list of link dependencies.",
"if",
"self",
".",
"ref",
"not",
"in",
"dependencies",
":",
"dependencies",
".",
"add",
"(",
"self",
".",
"ref",
")",
"if",
"initial",
"or",
"not",
"is_linkable",
":",
"# If this is a subsequent target and it's linkable, don't look any",
"# further for linkable dependencies, as they'll already be linked into",
"# this target linkable. Always look at dependencies of the initial",
"# target, and always look at dependencies of non-linkables.",
"for",
"dependency",
"in",
"self",
".",
"dependencies",
":",
"dependency",
".",
"_LinkDependenciesInternal",
"(",
"targets",
",",
"include_shared_libraries",
",",
"dependencies",
",",
"False",
")",
"return",
"dependencies"
] | [
1686,
2
] | [
1770,
23
] | python | en | ['en', 'en', 'en'] | True |
DependencyGraphNode.DependenciesForLinkSettings | (self, targets) |
Returns a list of dependency targets whose link_settings should be merged
into this target.
|
Returns a list of dependency targets whose link_settings should be merged
into this target.
| def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries) | [
"def",
"DependenciesForLinkSettings",
"(",
"self",
",",
"targets",
")",
":",
"# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'",
"# link_settings are propagated. So for now, we will allow it, unless the",
"# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to",
"# False. Once chrome is fixed, we can remove this flag.",
"include_shared_libraries",
"=",
"targets",
"[",
"self",
".",
"ref",
"]",
".",
"get",
"(",
"'allow_sharedlib_linksettings_propagation'",
",",
"True",
")",
"return",
"self",
".",
"_LinkDependenciesInternal",
"(",
"targets",
",",
"include_shared_libraries",
")"
] | [
1772,
2
] | [
1784,
76
] | python | en | ['en', 'error', 'th'] | False |
DependencyGraphNode.DependenciesToLinkAgainst | (self, targets) |
Returns a list of dependency targets that are linked into this target.
|
Returns a list of dependency targets that are linked into this target.
| def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True) | [
"def",
"DependenciesToLinkAgainst",
"(",
"self",
",",
"targets",
")",
":",
"return",
"self",
".",
"_LinkDependenciesInternal",
"(",
"targets",
",",
"True",
")"
] | [
1786,
2
] | [
1790,
56
] | python | en | ['en', 'error', 'th'] | False |
get_bin_intervals | (data, num_bins) |
Returns bin intervals for 1D data.
Parameters
----------
data: np.ndarray
A 1D NumPy array of values to get bin intervals for.
num_bins: int
The number of bins to create.
Returns
-------
bin_intervals: np.ndarray of shape (num_bins, 2)
A 2D NumPy array of bin intervals, with each row being one bin,
with the first value being the lower bound for the bin and
the second being the upper bound for the bin.
|
Returns bin intervals for 1D data. | def get_bin_intervals(data, num_bins):
"""
Returns bin intervals for 1D data.
Parameters
----------
data: np.ndarray
A 1D NumPy array of values to get bin intervals for.
num_bins: int
The number of bins to create.
Returns
-------
bin_intervals: np.ndarray of shape (num_bins, 2)
A 2D NumPy array of bin intervals, with each row being one bin,
with the first value being the lower bound for the bin and
the second being the upper bound for the bin.
"""
# Transition points between bins.
bin_trans = np.linspace(data[0], data[-1], num_bins+1, endpoint=True)
bin_intervals = np.empty((num_bins, 2), dtype=data.dtype)
for i in range(num_bins):
bin_intervals[i, :] = [bin_trans[i], bin_trans[i+1]]
return bin_intervals | [
"def",
"get_bin_intervals",
"(",
"data",
",",
"num_bins",
")",
":",
"# Transition points between bins.",
"bin_trans",
"=",
"np",
".",
"linspace",
"(",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"-",
"1",
"]",
",",
"num_bins",
"+",
"1",
",",
"endpoint",
"=",
"True",
")",
"bin_intervals",
"=",
"np",
".",
"empty",
"(",
"(",
"num_bins",
",",
"2",
")",
",",
"dtype",
"=",
"data",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"num_bins",
")",
":",
"bin_intervals",
"[",
"i",
",",
":",
"]",
"=",
"[",
"bin_trans",
"[",
"i",
"]",
",",
"bin_trans",
"[",
"i",
"+",
"1",
"]",
"]",
"return",
"bin_intervals"
] | [
6,
0
] | [
29,
24
] | python | en | ['en', 'error', 'th'] | False |
xr_scale_res | (dataset, x_coord='longitude', y_coord='latitude',
frac_res=None, abs_res=None) |
Scales the resolution of an `xarray.Dataset` or `xarray.DataArray`
to a fraction of its original resolution or an absolute resolution.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to reduce the resolution of.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset` to scale.
frac_res: float
The fraction of the original resolution to scale to. Must be postive.
Note that this can be greater than 1.0, in which case the resolution
is upsampled.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
Overrides `frac_res` if specified.
Returns
-------
dataset_scaled: xarray.Dataset or xarray.DataArray
The result of scaling the resolution of `dataset`.
Raises
------
AssertionError: If neither `frac_res` nor `abs_res` is specified.
|
Scales the resolution of an `xarray.Dataset` or `xarray.DataArray`
to a fraction of its original resolution or an absolute resolution. | def xr_scale_res(dataset, x_coord='longitude', y_coord='latitude',
frac_res=None, abs_res=None):
"""
Scales the resolution of an `xarray.Dataset` or `xarray.DataArray`
to a fraction of its original resolution or an absolute resolution.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to reduce the resolution of.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset` to scale.
frac_res: float
The fraction of the original resolution to scale to. Must be postive.
Note that this can be greater than 1.0, in which case the resolution
is upsampled.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
Overrides `frac_res` if specified.
Returns
-------
dataset_scaled: xarray.Dataset or xarray.DataArray
The result of scaling the resolution of `dataset`.
Raises
------
AssertionError: If neither `frac_res` nor `abs_res` is specified.
"""
assert frac_res is not None or abs_res is not None, \
"Either frac_res or abs_res must be specified (i.e. not None)."
if frac_res is not None:
x_px = y_px = np.sqrt(frac_res)
interp_param = 'frac'
elif abs_res is not None:
interp_param = 'num'
x_px, y_px = abs_res
return xr_interp(dataset, {x_coord: ('interp', {interp_param: x_px}), \
y_coord: ('interp', {interp_param: y_px})}) | [
"def",
"xr_scale_res",
"(",
"dataset",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"frac_res",
"=",
"None",
",",
"abs_res",
"=",
"None",
")",
":",
"assert",
"frac_res",
"is",
"not",
"None",
"or",
"abs_res",
"is",
"not",
"None",
",",
"\"Either frac_res or abs_res must be specified (i.e. not None).\"",
"if",
"frac_res",
"is",
"not",
"None",
":",
"x_px",
"=",
"y_px",
"=",
"np",
".",
"sqrt",
"(",
"frac_res",
")",
"interp_param",
"=",
"'frac'",
"elif",
"abs_res",
"is",
"not",
"None",
":",
"interp_param",
"=",
"'num'",
"x_px",
",",
"y_px",
"=",
"abs_res",
"return",
"xr_interp",
"(",
"dataset",
",",
"{",
"x_coord",
":",
"(",
"'interp'",
",",
"{",
"interp_param",
":",
"x_px",
"}",
")",
",",
"y_coord",
":",
"(",
"'interp'",
",",
"{",
"interp_param",
":",
"y_px",
"}",
")",
"}",
")"
] | [
32,
0
] | [
70,
74
] | python | en | ['en', 'error', 'th'] | False |
xr_sel_time_by_bin | (dataset, num_bins, time_coord='time') |
Selects time coordinates by nearest neighbors of the means of bins.
This is useful for plotting data with high variance in temporal
spacing between acquisitions.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to aggregate by binning.
Must have a 'time' coordinate of type `datetime64`.
num_bins: int
The number of bins to use.
time_coord: str
The name of the time coordinate to bin.
Returns
-------
result: xarray.Dataset or xarray.DataArray
The result of aggregating within bins for the binned data.
|
Selects time coordinates by nearest neighbors of the means of bins.
This is useful for plotting data with high variance in temporal
spacing between acquisitions. | def xr_sel_time_by_bin(dataset, num_bins, time_coord='time'):
"""
Selects time coordinates by nearest neighbors of the means of bins.
This is useful for plotting data with high variance in temporal
spacing between acquisitions.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to aggregate by binning.
Must have a 'time' coordinate of type `datetime64`.
num_bins: int
The number of bins to use.
time_coord: str
The name of the time coordinate to bin.
Returns
-------
result: xarray.Dataset or xarray.DataArray
The result of aggregating within bins for the binned data.
"""
return xr_interp(dataset, {time_coord: ('bin', {'num': num_bins})}) | [
"def",
"xr_sel_time_by_bin",
"(",
"dataset",
",",
"num_bins",
",",
"time_coord",
"=",
"'time'",
")",
":",
"return",
"xr_interp",
"(",
"dataset",
",",
"{",
"time_coord",
":",
"(",
"'bin'",
",",
"{",
"'num'",
":",
"num_bins",
"}",
")",
"}",
")"
] | [
73,
0
] | [
94,
71
] | python | en | ['en', 'error', 'th'] | False |
xr_interp | (dataset, interp_config) |
Interpolates an `xarray.Dataset` or `xarray.DataArray`.
This is often done to match dimensions between xarray objects or
downsample to reduce memory consumption.
First, coordinates are interpolated according to `interp_config`.
Then the data values for those interpolated coordinates are obtained
through nearest neighbors interpolation.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to interpolate.
interp_config: dict
Mapping of names of coordinates to 2-tuples of the interpolation types
to use for those coordinates and the parameters for those interpolation types.
The supported coordinate interpolation types are 'interp' for
linear interpolation and 'bin' for binning.
The parameters, with supported interpolation types annotated to their
left, are as follow:
('interp', 'bin'): 'frac':
The fraction of the original size to use. Exclusive with 'num'.
('interp', 'bin'): 'num':
The number of points in the output. Exclusive with 'frac'.
Either 'frac' or 'num' must be in the interpolation parameters.
The following is an example value:
`{'latitude':('interp',{'frac':0.5}),'longitude':('interp',{'frac':0.5}),
'time':('bin',{'num':20})}`.
Returns
-------
interp_data: xarray.Dataset or xarray.DataArray
The specified interpolation of `dataset`.
:Authors:
John Rattz ([email protected])
|
Interpolates an `xarray.Dataset` or `xarray.DataArray`.
This is often done to match dimensions between xarray objects or
downsample to reduce memory consumption. | def xr_interp(dataset, interp_config):
"""
Interpolates an `xarray.Dataset` or `xarray.DataArray`.
This is often done to match dimensions between xarray objects or
downsample to reduce memory consumption.
First, coordinates are interpolated according to `interp_config`.
Then the data values for those interpolated coordinates are obtained
through nearest neighbors interpolation.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The Dataset or DataArray to interpolate.
interp_config: dict
Mapping of names of coordinates to 2-tuples of the interpolation types
to use for those coordinates and the parameters for those interpolation types.
The supported coordinate interpolation types are 'interp' for
linear interpolation and 'bin' for binning.
The parameters, with supported interpolation types annotated to their
left, are as follow:
('interp', 'bin'): 'frac':
The fraction of the original size to use. Exclusive with 'num'.
('interp', 'bin'): 'num':
The number of points in the output. Exclusive with 'frac'.
Either 'frac' or 'num' must be in the interpolation parameters.
The following is an example value:
`{'latitude':('interp',{'frac':0.5}),'longitude':('interp',{'frac':0.5}),
'time':('bin',{'num':20})}`.
Returns
-------
interp_data: xarray.Dataset or xarray.DataArray
The specified interpolation of `dataset`.
:Authors:
John Rattz ([email protected])
"""
# Create the new coordinates.
new_coords = {}
for dim, (interp_type, interp_kwargs) in interp_config.items():
# Determine the number of points to use.
num_pts = interp_kwargs.get('num', None)
if num_pts is None:
frac = interp_kwargs.get('frac', None)
num_pts_orig = len(dataset[dim])
num_pts = round(num_pts_orig * frac)
dim_vals = dataset[dim].values
dim_dtype = type(dim_vals[0])
# Convert NumPy datetime64 objects to scalars.
if dim_dtype == np.datetime64:
dim_vals = np.array(list(map(_n64_datetime_to_scalar, dim_vals)))
interp_vals = None
# Interpolate coordinates.
if interp_type == 'bin':
bin_intervals = get_bin_intervals(dim_vals, num_pts)
interp_vals = np.mean(bin_intervals, axis=1)
if interp_type == 'interp':
interp_inds = np.linspace(0, len(dim_vals) - 1, num_pts, dtype=np.int32)
interp_vals = dim_vals[interp_inds]
# Convert scalars to NumPy datetime64 objects.
if dim_dtype == np.datetime64:
interp_vals = np.array(list(map(_scalar_to_n64_datetime, interp_vals)))
new_coords[dim] = interp_vals
# Nearest-neighbor interpolate data values.
interp_data = dataset.interp(coords=new_coords, method='nearest')
# xarray.Dataset.interp() converts to dtype float64, so cast back to the original dtypes.
if isinstance(dataset, xr.DataArray):
interp_data = interp_data.astype(dataset.dtype)
elif isinstance(dataset, xr.Dataset):
for data_var_name in interp_data.data_vars:
interp_data[data_var_name] = interp_data[data_var_name].astype(dataset[data_var_name].dtype)
return interp_data | [
"def",
"xr_interp",
"(",
"dataset",
",",
"interp_config",
")",
":",
"# Create the new coordinates.",
"new_coords",
"=",
"{",
"}",
"for",
"dim",
",",
"(",
"interp_type",
",",
"interp_kwargs",
")",
"in",
"interp_config",
".",
"items",
"(",
")",
":",
"# Determine the number of points to use.",
"num_pts",
"=",
"interp_kwargs",
".",
"get",
"(",
"'num'",
",",
"None",
")",
"if",
"num_pts",
"is",
"None",
":",
"frac",
"=",
"interp_kwargs",
".",
"get",
"(",
"'frac'",
",",
"None",
")",
"num_pts_orig",
"=",
"len",
"(",
"dataset",
"[",
"dim",
"]",
")",
"num_pts",
"=",
"round",
"(",
"num_pts_orig",
"*",
"frac",
")",
"dim_vals",
"=",
"dataset",
"[",
"dim",
"]",
".",
"values",
"dim_dtype",
"=",
"type",
"(",
"dim_vals",
"[",
"0",
"]",
")",
"# Convert NumPy datetime64 objects to scalars.",
"if",
"dim_dtype",
"==",
"np",
".",
"datetime64",
":",
"dim_vals",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"_n64_datetime_to_scalar",
",",
"dim_vals",
")",
")",
")",
"interp_vals",
"=",
"None",
"# Interpolate coordinates.",
"if",
"interp_type",
"==",
"'bin'",
":",
"bin_intervals",
"=",
"get_bin_intervals",
"(",
"dim_vals",
",",
"num_pts",
")",
"interp_vals",
"=",
"np",
".",
"mean",
"(",
"bin_intervals",
",",
"axis",
"=",
"1",
")",
"if",
"interp_type",
"==",
"'interp'",
":",
"interp_inds",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"len",
"(",
"dim_vals",
")",
"-",
"1",
",",
"num_pts",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"interp_vals",
"=",
"dim_vals",
"[",
"interp_inds",
"]",
"# Convert scalars to NumPy datetime64 objects.",
"if",
"dim_dtype",
"==",
"np",
".",
"datetime64",
":",
"interp_vals",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"_scalar_to_n64_datetime",
",",
"interp_vals",
")",
")",
")",
"new_coords",
"[",
"dim",
"]",
"=",
"interp_vals",
"# Nearest-neighbor interpolate data values.",
"interp_data",
"=",
"dataset",
".",
"interp",
"(",
"coords",
"=",
"new_coords",
",",
"method",
"=",
"'nearest'",
")",
"# xarray.Dataset.interp() converts to dtype float64, so cast back to the original dtypes.",
"if",
"isinstance",
"(",
"dataset",
",",
"xr",
".",
"DataArray",
")",
":",
"interp_data",
"=",
"interp_data",
".",
"astype",
"(",
"dataset",
".",
"dtype",
")",
"elif",
"isinstance",
"(",
"dataset",
",",
"xr",
".",
"Dataset",
")",
":",
"for",
"data_var_name",
"in",
"interp_data",
".",
"data_vars",
":",
"interp_data",
"[",
"data_var_name",
"]",
"=",
"interp_data",
"[",
"data_var_name",
"]",
".",
"astype",
"(",
"dataset",
"[",
"data_var_name",
"]",
".",
"dtype",
")",
"return",
"interp_data"
] | [
97,
0
] | [
169,
22
] | python | en | ['en', 'error', 'th'] | False |
column_reflection_fallback | (
selectable: Select, dialect: Dialect, sqlalchemy_engine: Engine
) | If we can't reflect the table, use a query to at least get column names. | If we can't reflect the table, use a query to at least get column names. | def column_reflection_fallback(
selectable: Select, dialect: Dialect, sqlalchemy_engine: Engine
) -> List[Dict[str, str]]:
"""If we can't reflect the table, use a query to at least get column names."""
col_info_dict_list: List[Dict[str, str]]
if dialect.name.lower() == "mssql":
# Get column names and types from the database
# Reference: https://dataedo.com/kb/query/sql-server/list-table-columns-in-database
columns_query: str = f"""
SELECT
SCHEMA_NAME(tab.schema_id) AS schema_name,
tab.name AS table_name,
col.column_id AS column_id,
col.name AS column_name,
t.name AS column_data_type,
col.max_length AS column_max_length,
col.precision AS column_precision
FROM sys.tables AS tab
INNER JOIN sys.columns AS col
ON tab.object_id = col.object_id
LEFT JOIN sys.types AS t
ON col.user_type_id = t.user_type_id
WHERE tab.name = '{selectable}'
ORDER BY schema_name,
table_name,
column_id
"""
col_info_query: TextClause = sa.text(columns_query)
col_info_tuples_list: List[tuple] = sqlalchemy_engine.execute(
col_info_query
).fetchall()
# type_module = _get_dialect_type_module(dialect=dialect)
col_info_dict_list: List[Dict[str, str]] = [
{
"name": column_name,
# "type": getattr(type_module, column_data_type.upper())(),
"type": column_data_type.upper(),
}
for schema_name, table_name, column_id, column_name, column_data_type, column_max_length, column_precision in col_info_tuples_list
]
else:
query: Select = sa.select([sa.text("*")]).select_from(selectable).limit(1)
result_object = sqlalchemy_engine.execute(query)
# noinspection PyProtectedMember
col_names: List[str] = result_object._metadata.keys
col_info_dict_list = [{"name": col_name} for col_name in col_names]
return col_info_dict_list | [
"def",
"column_reflection_fallback",
"(",
"selectable",
":",
"Select",
",",
"dialect",
":",
"Dialect",
",",
"sqlalchemy_engine",
":",
"Engine",
")",
"->",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
":",
"col_info_dict_list",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"if",
"dialect",
".",
"name",
".",
"lower",
"(",
")",
"==",
"\"mssql\"",
":",
"# Get column names and types from the database",
"# Reference: https://dataedo.com/kb/query/sql-server/list-table-columns-in-database",
"columns_query",
":",
"str",
"=",
"f\"\"\"\nSELECT\n SCHEMA_NAME(tab.schema_id) AS schema_name,\n tab.name AS table_name, \n col.column_id AS column_id,\n col.name AS column_name, \n t.name AS column_data_type, \n col.max_length AS column_max_length,\n col.precision AS column_precision\nFROM sys.tables AS tab\n INNER JOIN sys.columns AS col\n ON tab.object_id = col.object_id\n LEFT JOIN sys.types AS t\n ON col.user_type_id = t.user_type_id\nWHERE tab.name = '{selectable}'\nORDER BY schema_name,\n table_name, \n column_id\n\"\"\"",
"col_info_query",
":",
"TextClause",
"=",
"sa",
".",
"text",
"(",
"columns_query",
")",
"col_info_tuples_list",
":",
"List",
"[",
"tuple",
"]",
"=",
"sqlalchemy_engine",
".",
"execute",
"(",
"col_info_query",
")",
".",
"fetchall",
"(",
")",
"# type_module = _get_dialect_type_module(dialect=dialect)",
"col_info_dict_list",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"[",
"{",
"\"name\"",
":",
"column_name",
",",
"# \"type\": getattr(type_module, column_data_type.upper())(),",
"\"type\"",
":",
"column_data_type",
".",
"upper",
"(",
")",
",",
"}",
"for",
"schema_name",
",",
"table_name",
",",
"column_id",
",",
"column_name",
",",
"column_data_type",
",",
"column_max_length",
",",
"column_precision",
"in",
"col_info_tuples_list",
"]",
"else",
":",
"query",
":",
"Select",
"=",
"sa",
".",
"select",
"(",
"[",
"sa",
".",
"text",
"(",
"\"*\"",
")",
"]",
")",
".",
"select_from",
"(",
"selectable",
")",
".",
"limit",
"(",
"1",
")",
"result_object",
"=",
"sqlalchemy_engine",
".",
"execute",
"(",
"query",
")",
"# noinspection PyProtectedMember",
"col_names",
":",
"List",
"[",
"str",
"]",
"=",
"result_object",
".",
"_metadata",
".",
"keys",
"col_info_dict_list",
"=",
"[",
"{",
"\"name\"",
":",
"col_name",
"}",
"for",
"col_name",
"in",
"col_names",
"]",
"return",
"col_info_dict_list"
] | [
242,
0
] | [
288,
29
] | python | en | ['en', 'en', 'en'] | True |
validate_distribution_parameters | (distribution, params) | Ensures that necessary parameters for a distribution are present and that all parameters are sensical.
If parameters necessary to construct a distribution are missing or invalid, this function raises ValueError\
with an informative description. Note that 'loc' and 'scale' are optional arguments, and that 'scale'\
must be positive.
Args:
distribution (string): \
The scipy distribution name, e.g. normal distribution is 'norm'.
params (dict or list): \
The distribution shape parameters in a named dictionary or positional list form following the scipy \
cdf argument scheme.
params={'mean': 40, 'std_dev': 5} or params=[40, 5]
Exceptions:
ValueError: \
With an informative description, usually when necessary parameters are omitted or are invalid.
| Ensures that necessary parameters for a distribution are present and that all parameters are sensical. | def validate_distribution_parameters(distribution, params):
"""Ensures that necessary parameters for a distribution are present and that all parameters are sensical.
If parameters necessary to construct a distribution are missing or invalid, this function raises ValueError\
with an informative description. Note that 'loc' and 'scale' are optional arguments, and that 'scale'\
must be positive.
Args:
distribution (string): \
The scipy distribution name, e.g. normal distribution is 'norm'.
params (dict or list): \
The distribution shape parameters in a named dictionary or positional list form following the scipy \
cdf argument scheme.
params={'mean': 40, 'std_dev': 5} or params=[40, 5]
Exceptions:
ValueError: \
With an informative description, usually when necessary parameters are omitted or are invalid.
"""
norm_msg = (
"norm distributions require 0 parameters and optionally 'mean', 'std_dev'."
)
beta_msg = "beta distributions require 2 positive parameters 'alpha', 'beta' and optionally 'loc', 'scale'."
gamma_msg = "gamma distributions require 1 positive parameter 'alpha' and optionally 'loc','scale'."
# poisson_msg = "poisson distributions require 1 positive parameter 'lambda' and optionally 'loc'."
uniform_msg = (
"uniform distributions require 0 parameters and optionally 'loc', 'scale'."
)
chi2_msg = "chi2 distributions require 1 positive parameter 'df' and optionally 'loc', 'scale'."
expon_msg = (
"expon distributions require 0 parameters and optionally 'loc', 'scale'."
)
if distribution not in [
"norm",
"beta",
"gamma",
"poisson",
"uniform",
"chi2",
"expon",
]:
raise AttributeError("Unsupported distribution provided: %s" % distribution)
if isinstance(params, dict):
# `params` is a dictionary
if params.get("std_dev", 1) <= 0 or params.get("scale", 1) <= 0:
raise ValueError("std_dev and scale must be positive.")
# alpha and beta are required and positive
if distribution == "beta" and (
params.get("alpha", -1) <= 0 or params.get("beta", -1) <= 0
):
raise ValueError("Invalid parameters: %s" % beta_msg)
# alpha is required and positive
elif distribution == "gamma" and params.get("alpha", -1) <= 0:
raise ValueError("Invalid parameters: %s" % gamma_msg)
# lambda is a required and positive
# elif distribution == 'poisson' and params.get('lambda', -1) <= 0:
# raise ValueError("Invalid parameters: %s" %poisson_msg)
# df is necessary and required to be positive
elif distribution == "chi2" and params.get("df", -1) <= 0:
raise ValueError("Invalid parameters: %s:" % chi2_msg)
elif isinstance(params, tuple) or isinstance(params, list):
scale = None
# `params` is a tuple or a list
if distribution == "beta":
if len(params) < 2:
raise ValueError("Missing required parameters: %s" % beta_msg)
if params[0] <= 0 or params[1] <= 0:
raise ValueError("Invalid parameters: %s" % beta_msg)
if len(params) == 4:
scale = params[3]
elif len(params) > 4:
raise ValueError("Too many parameters provided: %s" % beta_msg)
elif distribution == "norm":
if len(params) > 2:
raise ValueError("Too many parameters provided: %s" % norm_msg)
if len(params) == 2:
scale = params[1]
elif distribution == "gamma":
if len(params) < 1:
raise ValueError("Missing required parameters: %s" % gamma_msg)
if len(params) == 3:
scale = params[2]
if len(params) > 3:
raise ValueError("Too many parameters provided: %s" % gamma_msg)
elif params[0] <= 0:
raise ValueError("Invalid parameters: %s" % gamma_msg)
# elif distribution == 'poisson':
# if len(params) < 1:
# raise ValueError("Missing required parameters: %s" %poisson_msg)
# if len(params) > 2:
# raise ValueError("Too many parameters provided: %s" %poisson_msg)
# elif params[0] <= 0:
# raise ValueError("Invalid parameters: %s" %poisson_msg)
elif distribution == "uniform":
if len(params) == 2:
scale = params[1]
if len(params) > 2:
raise ValueError("Too many arguments provided: %s" % uniform_msg)
elif distribution == "chi2":
if len(params) < 1:
raise ValueError("Missing required parameters: %s" % chi2_msg)
elif len(params) == 3:
scale = params[2]
elif len(params) > 3:
raise ValueError("Too many arguments provided: %s" % chi2_msg)
if params[0] <= 0:
raise ValueError("Invalid parameters: %s" % chi2_msg)
elif distribution == "expon":
if len(params) == 2:
scale = params[1]
if len(params) > 2:
raise ValueError("Too many arguments provided: %s" % expon_msg)
if scale is not None and scale <= 0:
raise ValueError("std_dev and scale must be positive.")
else:
raise ValueError(
"params must be a dict or list, or use ge.dataset.util.infer_distribution_parameters(data, distribution)"
)
return | [
"def",
"validate_distribution_parameters",
"(",
"distribution",
",",
"params",
")",
":",
"norm_msg",
"=",
"(",
"\"norm distributions require 0 parameters and optionally 'mean', 'std_dev'.\"",
")",
"beta_msg",
"=",
"\"beta distributions require 2 positive parameters 'alpha', 'beta' and optionally 'loc', 'scale'.\"",
"gamma_msg",
"=",
"\"gamma distributions require 1 positive parameter 'alpha' and optionally 'loc','scale'.\"",
"# poisson_msg = \"poisson distributions require 1 positive parameter 'lambda' and optionally 'loc'.\"",
"uniform_msg",
"=",
"(",
"\"uniform distributions require 0 parameters and optionally 'loc', 'scale'.\"",
")",
"chi2_msg",
"=",
"\"chi2 distributions require 1 positive parameter 'df' and optionally 'loc', 'scale'.\"",
"expon_msg",
"=",
"(",
"\"expon distributions require 0 parameters and optionally 'loc', 'scale'.\"",
")",
"if",
"distribution",
"not",
"in",
"[",
"\"norm\"",
",",
"\"beta\"",
",",
"\"gamma\"",
",",
"\"poisson\"",
",",
"\"uniform\"",
",",
"\"chi2\"",
",",
"\"expon\"",
",",
"]",
":",
"raise",
"AttributeError",
"(",
"\"Unsupported distribution provided: %s\"",
"%",
"distribution",
")",
"if",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"# `params` is a dictionary",
"if",
"params",
".",
"get",
"(",
"\"std_dev\"",
",",
"1",
")",
"<=",
"0",
"or",
"params",
".",
"get",
"(",
"\"scale\"",
",",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"std_dev and scale must be positive.\"",
")",
"# alpha and beta are required and positive",
"if",
"distribution",
"==",
"\"beta\"",
"and",
"(",
"params",
".",
"get",
"(",
"\"alpha\"",
",",
"-",
"1",
")",
"<=",
"0",
"or",
"params",
".",
"get",
"(",
"\"beta\"",
",",
"-",
"1",
")",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"beta_msg",
")",
"# alpha is required and positive",
"elif",
"distribution",
"==",
"\"gamma\"",
"and",
"params",
".",
"get",
"(",
"\"alpha\"",
",",
"-",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"gamma_msg",
")",
"# lambda is a required and positive",
"# elif distribution == 'poisson' and params.get('lambda', -1) <= 0:",
"# raise ValueError(\"Invalid parameters: %s\" %poisson_msg)",
"# df is necessary and required to be positive",
"elif",
"distribution",
"==",
"\"chi2\"",
"and",
"params",
".",
"get",
"(",
"\"df\"",
",",
"-",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s:\"",
"%",
"chi2_msg",
")",
"elif",
"isinstance",
"(",
"params",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"params",
",",
"list",
")",
":",
"scale",
"=",
"None",
"# `params` is a tuple or a list",
"if",
"distribution",
"==",
"\"beta\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"beta_msg",
")",
"if",
"params",
"[",
"0",
"]",
"<=",
"0",
"or",
"params",
"[",
"1",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"beta_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"4",
":",
"scale",
"=",
"params",
"[",
"3",
"]",
"elif",
"len",
"(",
"params",
")",
">",
"4",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"beta_msg",
")",
"elif",
"distribution",
"==",
"\"norm\"",
":",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"norm_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"elif",
"distribution",
"==",
"\"gamma\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"gamma_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"3",
":",
"scale",
"=",
"params",
"[",
"2",
"]",
"if",
"len",
"(",
"params",
")",
">",
"3",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"gamma_msg",
")",
"elif",
"params",
"[",
"0",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"gamma_msg",
")",
"# elif distribution == 'poisson':",
"# if len(params) < 1:",
"# raise ValueError(\"Missing required parameters: %s\" %poisson_msg)",
"# if len(params) > 2:",
"# raise ValueError(\"Too many parameters provided: %s\" %poisson_msg)",
"# elif params[0] <= 0:",
"# raise ValueError(\"Invalid parameters: %s\" %poisson_msg)",
"elif",
"distribution",
"==",
"\"uniform\"",
":",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"uniform_msg",
")",
"elif",
"distribution",
"==",
"\"chi2\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"chi2_msg",
")",
"elif",
"len",
"(",
"params",
")",
"==",
"3",
":",
"scale",
"=",
"params",
"[",
"2",
"]",
"elif",
"len",
"(",
"params",
")",
">",
"3",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"chi2_msg",
")",
"if",
"params",
"[",
"0",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"chi2_msg",
")",
"elif",
"distribution",
"==",
"\"expon\"",
":",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"expon_msg",
")",
"if",
"scale",
"is",
"not",
"None",
"and",
"scale",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"std_dev and scale must be positive.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"params must be a dict or list, or use ge.dataset.util.infer_distribution_parameters(data, distribution)\"",
")",
"return"
] | [
358,
0
] | [
497,
10
] | python | en | ['en', 'en', 'en'] | True |
_scipy_distribution_positional_args_from_dict | (distribution, params) | Helper function that returns positional arguments for a scipy distribution using a dict of parameters.
See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\
to see an example of scipy's positional arguments. This function returns the arguments specified by the \
scipy.stat.distribution.cdf() for that distribution.
Args:
distribution (string): \
The scipy distribution name.
params (dict): \
A dict of named parameters.
Raises:
AttributeError: \
If an unsupported distribution is provided.
| Helper function that returns positional arguments for a scipy distribution using a dict of parameters. | def _scipy_distribution_positional_args_from_dict(distribution, params):
"""Helper function that returns positional arguments for a scipy distribution using a dict of parameters.
See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\
to see an example of scipy's positional arguments. This function returns the arguments specified by the \
scipy.stat.distribution.cdf() for that distribution.
Args:
distribution (string): \
The scipy distribution name.
params (dict): \
A dict of named parameters.
Raises:
AttributeError: \
If an unsupported distribution is provided.
"""
params["loc"] = params.get("loc", 0)
if "scale" not in params:
params["scale"] = 1
if distribution == "norm":
return params["mean"], params["std_dev"]
elif distribution == "beta":
return params["alpha"], params["beta"], params["loc"], params["scale"]
elif distribution == "gamma":
return params["alpha"], params["loc"], params["scale"]
# elif distribution == 'poisson':
# return params['lambda'], params['loc']
elif distribution == "uniform":
return params["min"], params["max"]
elif distribution == "chi2":
return params["df"], params["loc"], params["scale"]
elif distribution == "expon":
return params["loc"], params["scale"] | [
"def",
"_scipy_distribution_positional_args_from_dict",
"(",
"distribution",
",",
"params",
")",
":",
"params",
"[",
"\"loc\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"loc\"",
",",
"0",
")",
"if",
"\"scale\"",
"not",
"in",
"params",
":",
"params",
"[",
"\"scale\"",
"]",
"=",
"1",
"if",
"distribution",
"==",
"\"norm\"",
":",
"return",
"params",
"[",
"\"mean\"",
"]",
",",
"params",
"[",
"\"std_dev\"",
"]",
"elif",
"distribution",
"==",
"\"beta\"",
":",
"return",
"params",
"[",
"\"alpha\"",
"]",
",",
"params",
"[",
"\"beta\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"elif",
"distribution",
"==",
"\"gamma\"",
":",
"return",
"params",
"[",
"\"alpha\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"# elif distribution == 'poisson':",
"# return params['lambda'], params['loc']",
"elif",
"distribution",
"==",
"\"uniform\"",
":",
"return",
"params",
"[",
"\"min\"",
"]",
",",
"params",
"[",
"\"max\"",
"]",
"elif",
"distribution",
"==",
"\"chi2\"",
":",
"return",
"params",
"[",
"\"df\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"elif",
"distribution",
"==",
"\"expon\"",
":",
"return",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]"
] | [
500,
0
] | [
535,
45
] | python | en | ['en', 'en', 'en'] | True |
is_valid_continuous_partition_object | (partition_object) | Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`.
:param partition_object: The partition_object to evaluate
:return: Boolean
| Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`. | def is_valid_continuous_partition_object(partition_object):
"""Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`.
:param partition_object: The partition_object to evaluate
:return: Boolean
"""
if (
(partition_object is None)
or ("weights" not in partition_object)
or ("bins" not in partition_object)
):
return False
if "tail_weights" in partition_object:
if len(partition_object["tail_weights"]) != 2:
return False
comb_weights = partition_object["tail_weights"] + partition_object["weights"]
else:
comb_weights = partition_object["weights"]
## TODO: Consider adding this check to migrate to the tail_weights structure of partition objects
# if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):
# return False
# Expect one more bin edge than weight; all bin edges should be monotonically increasing; weights should sum to one
return (
(len(partition_object["bins"]) == (len(partition_object["weights"]) + 1))
and np.all(np.diff(partition_object["bins"]) > 0)
and np.allclose(np.sum(comb_weights), 1.0)
) | [
"def",
"is_valid_continuous_partition_object",
"(",
"partition_object",
")",
":",
"if",
"(",
"(",
"partition_object",
"is",
"None",
")",
"or",
"(",
"\"weights\"",
"not",
"in",
"partition_object",
")",
"or",
"(",
"\"bins\"",
"not",
"in",
"partition_object",
")",
")",
":",
"return",
"False",
"if",
"\"tail_weights\"",
"in",
"partition_object",
":",
"if",
"len",
"(",
"partition_object",
"[",
"\"tail_weights\"",
"]",
")",
"!=",
"2",
":",
"return",
"False",
"comb_weights",
"=",
"partition_object",
"[",
"\"tail_weights\"",
"]",
"+",
"partition_object",
"[",
"\"weights\"",
"]",
"else",
":",
"comb_weights",
"=",
"partition_object",
"[",
"\"weights\"",
"]",
"## TODO: Consider adding this check to migrate to the tail_weights structure of partition objects",
"# if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):",
"# return False",
"# Expect one more bin edge than weight; all bin edges should be monotonically increasing; weights should sum to one",
"return",
"(",
"(",
"len",
"(",
"partition_object",
"[",
"\"bins\"",
"]",
")",
"==",
"(",
"len",
"(",
"partition_object",
"[",
"\"weights\"",
"]",
")",
"+",
"1",
")",
")",
"and",
"np",
".",
"all",
"(",
"np",
".",
"diff",
"(",
"partition_object",
"[",
"\"bins\"",
"]",
")",
">",
"0",
")",
"and",
"np",
".",
"allclose",
"(",
"np",
".",
"sum",
"(",
"comb_weights",
")",
",",
"1.0",
")",
")"
] | [
538,
0
] | [
567,
5
] | python | en | ['en', 'en', 'en'] | True |
test_graceful_failure_with_no_internet | () | Test that having usage statistics enabled does not negatively impact kill signals or cause loss of queued usage statistics. | Test that having usage statistics enabled does not negatively impact kill signals or cause loss of queued usage statistics. | def test_graceful_failure_with_no_internet():
"""Test that having usage statistics enabled does not negatively impact kill signals or cause loss of queued usage statistics."""
# Execute process that initializes data context
# NOTE - JPC - 20200227 - this is crazy long (not because of logging I think, but worth revisiting)
acceptable_startup_time = 6
acceptable_shutdown_time = 1
nap_time = 0
start = datetime.datetime.now()
data_context_id = str(uuid.uuid4())
# Instruct the process to wait for 30 seconds after initializing before completing.
p = subprocess.Popen(
[
"python",
file_relative_path(
__file__, "./instantiate_context_with_usage_statistics.py"
),
data_context_id,
str(nap_time),
"True",
"True",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
outs, errs = p.communicate()
end = datetime.datetime.now()
# We didn't wait or send a signal, so just check that times were reasonable
assert (end - start) < datetime.timedelta(
seconds=acceptable_startup_time + acceptable_shutdown_time
)
outs = str(outs)
errs = str(errs)
assert "INFO" not in outs
assert "Done constructing a DataContext" in outs
assert "Ending a long nap" in outs | [
"def",
"test_graceful_failure_with_no_internet",
"(",
")",
":",
"# Execute process that initializes data context",
"# NOTE - JPC - 20200227 - this is crazy long (not because of logging I think, but worth revisiting)",
"acceptable_startup_time",
"=",
"6",
"acceptable_shutdown_time",
"=",
"1",
"nap_time",
"=",
"0",
"start",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"data_context_id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"# Instruct the process to wait for 30 seconds after initializing before completing.",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"python\"",
",",
"file_relative_path",
"(",
"__file__",
",",
"\"./instantiate_context_with_usage_statistics.py\"",
")",
",",
"data_context_id",
",",
"str",
"(",
"nap_time",
")",
",",
"\"True\"",
",",
"\"True\"",
",",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
")",
"outs",
",",
"errs",
"=",
"p",
".",
"communicate",
"(",
")",
"end",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"# We didn't wait or send a signal, so just check that times were reasonable",
"assert",
"(",
"end",
"-",
"start",
")",
"<",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"acceptable_startup_time",
"+",
"acceptable_shutdown_time",
")",
"outs",
"=",
"str",
"(",
"outs",
")",
"errs",
"=",
"str",
"(",
"errs",
")",
"assert",
"\"INFO\"",
"not",
"in",
"outs",
"assert",
"\"Done constructing a DataContext\"",
"in",
"outs",
"assert",
"\"Ending a long nap\"",
"in",
"outs"
] | [
144,
0
] | [
179,
38
] | python | en | ['en', 'en', 'en'] | True |
get_base_href_html | (full_url) | The base href line tells the html what the base page really is.
This is important when trying to open the page outside it's home. | The base href line tells the html what the base page really is.
This is important when trying to open the page outside it's home. | def get_base_href_html(full_url):
''' The base href line tells the html what the base page really is.
This is important when trying to open the page outside it's home. '''
base_url = get_base_url(full_url)
return '<base href="%s">' % base_url | [
"def",
"get_base_href_html",
"(",
"full_url",
")",
":",
"base_url",
"=",
"get_base_url",
"(",
"full_url",
")",
"return",
"'<base href=\"%s\">'",
"%",
"base_url"
] | [
96,
0
] | [
100,
40
] | python | en | ['en', 'en', 'en'] | True |
get_html_source_with_base_href | (driver, page_source) | Combines the domain base href with the html source.
This is needed for the page html to render correctly. | Combines the domain base href with the html source.
This is needed for the page html to render correctly. | def get_html_source_with_base_href(driver, page_source):
''' Combines the domain base href with the html source.
This is needed for the page html to render correctly. '''
last_page = get_last_page(driver)
if '://' in last_page:
base_href_html = get_base_href_html(last_page)
return '%s\n%s' % (base_href_html, page_source)
return '' | [
"def",
"get_html_source_with_base_href",
"(",
"driver",
",",
"page_source",
")",
":",
"last_page",
"=",
"get_last_page",
"(",
"driver",
")",
"if",
"'://'",
"in",
"last_page",
":",
"base_href_html",
"=",
"get_base_href_html",
"(",
"last_page",
")",
"return",
"'%s\\n%s'",
"%",
"(",
"base_href_html",
",",
"page_source",
")",
"return",
"''"
] | [
103,
0
] | [
110,
13
] | python | en | ['en', 'en', 'en'] | True |
archive_logs_if_set | (log_path, archive_logs=False) | Handle Logging | Handle Logging | def archive_logs_if_set(log_path, archive_logs=False):
""" Handle Logging """
arg_join = " ".join(sys.argv)
if ("-n" in sys.argv) or ("-n=" in arg_join) or (arg_join == "-c"):
return # Skip if multithreaded
if log_path.endswith("/"):
log_path = log_path[:-1]
if not os.path.exists(log_path):
try:
os.makedirs(log_path)
except Exception:
pass # Only reachable during multi-threaded runs
else:
if settings.ARCHIVE_EXISTING_LOGS or archive_logs:
if len(os.listdir(log_path)) > 0:
archived_folder = "%s/../archived_logs/" % log_path
archived_folder = os.path.realpath(archived_folder) + '/'
log_path = os.path.realpath(log_path) + '/'
if not os.path.exists(archived_folder):
try:
os.makedirs(archived_folder)
except Exception:
pass # Only reachable during multi-threaded runs
time_id = str(int(time.time()))
archived_logs = "%slogs_%s" % (archived_folder, time_id)
copytree(log_path, archived_logs) | [
"def",
"archive_logs_if_set",
"(",
"log_path",
",",
"archive_logs",
"=",
"False",
")",
":",
"arg_join",
"=",
"\" \"",
".",
"join",
"(",
"sys",
".",
"argv",
")",
"if",
"(",
"\"-n\"",
"in",
"sys",
".",
"argv",
")",
"or",
"(",
"\"-n=\"",
"in",
"arg_join",
")",
"or",
"(",
"arg_join",
"==",
"\"-c\"",
")",
":",
"return",
"# Skip if multithreaded",
"if",
"log_path",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"log_path",
"=",
"log_path",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"log_path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"log_path",
")",
"except",
"Exception",
":",
"pass",
"# Only reachable during multi-threaded runs",
"else",
":",
"if",
"settings",
".",
"ARCHIVE_EXISTING_LOGS",
"or",
"archive_logs",
":",
"if",
"len",
"(",
"os",
".",
"listdir",
"(",
"log_path",
")",
")",
">",
"0",
":",
"archived_folder",
"=",
"\"%s/../archived_logs/\"",
"%",
"log_path",
"archived_folder",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"archived_folder",
")",
"+",
"'/'",
"log_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"log_path",
")",
"+",
"'/'",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"archived_folder",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"archived_folder",
")",
"except",
"Exception",
":",
"pass",
"# Only reachable during multi-threaded runs",
"time_id",
"=",
"str",
"(",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"archived_logs",
"=",
"\"%slogs_%s\"",
"%",
"(",
"archived_folder",
",",
"time_id",
")",
"copytree",
"(",
"log_path",
",",
"archived_logs",
")"
] | [
127,
0
] | [
152,
49
] | python | en | ['it', 'ja', 'en'] | False |
log_folder_setup | (log_path, archive_logs=False) | Handle Logging | Handle Logging | def log_folder_setup(log_path, archive_logs=False):
""" Handle Logging """
if log_path.endswith("/"):
log_path = log_path[:-1]
if not os.path.exists(log_path):
try:
os.makedirs(log_path)
except Exception:
pass # Should only be reachable during multi-threaded runs
else:
archived_folder = "%s/../archived_logs/" % log_path
archived_folder = os.path.realpath(archived_folder) + '/'
if not os.path.exists(archived_folder):
try:
os.makedirs(archived_folder)
except Exception:
pass # Should only be reachable during multi-threaded runs
archived_logs = "%slogs_%s" % (
archived_folder, int(time.time()))
if len(os.listdir(log_path)) > 0:
try:
shutil.move(log_path, archived_logs)
os.makedirs(log_path)
except Exception:
pass # A file was probably open at the time
if not settings.ARCHIVE_EXISTING_LOGS and not archive_logs:
shutil.rmtree(archived_logs)
else:
a_join = " ".join(sys.argv)
if ("-n" in sys.argv) or ("-n=" in a_join) or (a_join == "-c"):
# Logs are saved/archived now if tests are multithreaded
pass
else:
shutil.rmtree(archived_logs) | [
"def",
"log_folder_setup",
"(",
"log_path",
",",
"archive_logs",
"=",
"False",
")",
":",
"if",
"log_path",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"log_path",
"=",
"log_path",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"log_path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"log_path",
")",
"except",
"Exception",
":",
"pass",
"# Should only be reachable during multi-threaded runs",
"else",
":",
"archived_folder",
"=",
"\"%s/../archived_logs/\"",
"%",
"log_path",
"archived_folder",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"archived_folder",
")",
"+",
"'/'",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"archived_folder",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"archived_folder",
")",
"except",
"Exception",
":",
"pass",
"# Should only be reachable during multi-threaded runs",
"archived_logs",
"=",
"\"%slogs_%s\"",
"%",
"(",
"archived_folder",
",",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"if",
"len",
"(",
"os",
".",
"listdir",
"(",
"log_path",
")",
")",
">",
"0",
":",
"try",
":",
"shutil",
".",
"move",
"(",
"log_path",
",",
"archived_logs",
")",
"os",
".",
"makedirs",
"(",
"log_path",
")",
"except",
"Exception",
":",
"pass",
"# A file was probably open at the time",
"if",
"not",
"settings",
".",
"ARCHIVE_EXISTING_LOGS",
"and",
"not",
"archive_logs",
":",
"shutil",
".",
"rmtree",
"(",
"archived_logs",
")",
"else",
":",
"a_join",
"=",
"\" \"",
".",
"join",
"(",
"sys",
".",
"argv",
")",
"if",
"(",
"\"-n\"",
"in",
"sys",
".",
"argv",
")",
"or",
"(",
"\"-n=\"",
"in",
"a_join",
")",
"or",
"(",
"a_join",
"==",
"\"-c\"",
")",
":",
"# Logs are saved/archived now if tests are multithreaded",
"pass",
"else",
":",
"shutil",
".",
"rmtree",
"(",
"archived_logs",
")"
] | [
155,
0
] | [
189,
48
] | python | en | ['it', 'ja', 'en'] | False |
Benchmark.hash | (self, val: str) |
Used only for testing purposes.
|
Used only for testing purposes.
| def hash(self, val: str):
"""
Used only for testing purposes.
"""
self._hash_value = val | [
"def",
"hash",
"(",
"self",
",",
"val",
":",
"str",
")",
":",
"self",
".",
"_hash_value",
"=",
"val"
] | [
136,
4
] | [
140,
30
] | python | en | ['en', 'error', 'th'] | False |
IR.agent_init | (self, aconf: Config) |
Initialize as the Intercept Agent, if we're doing that.
THIS WHOLE METHOD NEEDS TO GO AWAY: instead, just configure the agent with CRDs as usual.
However, that's just too painful to contemplate without `edgectl inject-agent`.
:param aconf: Config to work with
:return: None
|
Initialize as the Intercept Agent, if we're doing that. | def agent_init(self, aconf: Config) -> None:
"""
Initialize as the Intercept Agent, if we're doing that.
THIS WHOLE METHOD NEEDS TO GO AWAY: instead, just configure the agent with CRDs as usual.
However, that's just too painful to contemplate without `edgectl inject-agent`.
:param aconf: Config to work with
:return: None
"""
# Intercept stuff is an Edge Stack thing.
if not (self.edge_stack_allowed and self.agent_active):
self.logger.debug("Intercept agent not active, skipping initialization")
return
self.agent_service = os.environ.get("AGENT_SERVICE", None)
if self.agent_service is None:
# This is technically impossible, but whatever.
self.logger.info("Intercept agent active but no AGENT_SERVICE? skipping initialization")
self.agent_active = False
return
self.logger.debug(f"Intercept agent active for {self.agent_service}, initializing")
# We're going to either create a Host to terminate TLS, or to do cleartext. In neither
# case will we do ACME. Set additionalPort to -1 so we don't grab 8080 in the TLS case.
host_args: Dict[str, Any] = {
"hostname": "*",
"selector": {
"matchLabels": {
"intercept": self.agent_service
}
},
"acmeProvider": {
"authority": "none"
},
"requestPolicy": {
"insecure": {
"additionalPort": -1,
},
},
}
# Have they asked us to do TLS?
agent_termination_secret = os.environ.get("AGENT_TLS_TERM_SECRET", None)
if agent_termination_secret:
# Yup.
host_args["tlsSecret"] = { "name": agent_termination_secret }
else:
# No termination secret, so do cleartext.
host_args["requestPolicy"]["insecure"]["action"] = "Route"
host = IRHost(self, aconf, rkey=self.ambassador_module.rkey, location=self.ambassador_module.location,
name="agent-host",
**host_args)
if host.is_active():
host.referenced_by(self.ambassador_module)
host.sourced_by(self.ambassador_module)
self.logger.debug(f"Intercept agent: saving host {host.pretty()}")
# self.logger.debug(host.as_json())
self.save_host(host)
else:
self.logger.debug(f"Intercept agent: not saving inactive host {host.pretty()}")
# How about originating TLS?
agent_origination_secret = os.environ.get("AGENT_TLS_ORIG_SECRET", None)
if agent_origination_secret:
# Uhhhh. Synthesize a TLSContext for this, I guess.
#
# XXX What if they already have a context with this name?
ctx = IRTLSContext(self, aconf, rkey=self.ambassador_module.rkey, location=self.ambassador_module.location,
name="agent-origination-context",
secret=agent_origination_secret)
ctx.referenced_by(self.ambassador_module)
self.save_tls_context(ctx)
self.logger.debug(f"Intercept agent: saving origination TLSContext {ctx.name}")
# self.logger.debug(ctx.as_json())
self.agent_origination_ctx = ctx | [
"def",
"agent_init",
"(",
"self",
",",
"aconf",
":",
"Config",
")",
"->",
"None",
":",
"# Intercept stuff is an Edge Stack thing.",
"if",
"not",
"(",
"self",
".",
"edge_stack_allowed",
"and",
"self",
".",
"agent_active",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Intercept agent not active, skipping initialization\"",
")",
"return",
"self",
".",
"agent_service",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"AGENT_SERVICE\"",
",",
"None",
")",
"if",
"self",
".",
"agent_service",
"is",
"None",
":",
"# This is technically impossible, but whatever.",
"self",
".",
"logger",
".",
"info",
"(",
"\"Intercept agent active but no AGENT_SERVICE? skipping initialization\"",
")",
"self",
".",
"agent_active",
"=",
"False",
"return",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"Intercept agent active for {self.agent_service}, initializing\"",
")",
"# We're going to either create a Host to terminate TLS, or to do cleartext. In neither",
"# case will we do ACME. Set additionalPort to -1 so we don't grab 8080 in the TLS case.",
"host_args",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"\"hostname\"",
":",
"\"*\"",
",",
"\"selector\"",
":",
"{",
"\"matchLabels\"",
":",
"{",
"\"intercept\"",
":",
"self",
".",
"agent_service",
"}",
"}",
",",
"\"acmeProvider\"",
":",
"{",
"\"authority\"",
":",
"\"none\"",
"}",
",",
"\"requestPolicy\"",
":",
"{",
"\"insecure\"",
":",
"{",
"\"additionalPort\"",
":",
"-",
"1",
",",
"}",
",",
"}",
",",
"}",
"# Have they asked us to do TLS?",
"agent_termination_secret",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"AGENT_TLS_TERM_SECRET\"",
",",
"None",
")",
"if",
"agent_termination_secret",
":",
"# Yup.",
"host_args",
"[",
"\"tlsSecret\"",
"]",
"=",
"{",
"\"name\"",
":",
"agent_termination_secret",
"}",
"else",
":",
"# No termination secret, so do cleartext.",
"host_args",
"[",
"\"requestPolicy\"",
"]",
"[",
"\"insecure\"",
"]",
"[",
"\"action\"",
"]",
"=",
"\"Route\"",
"host",
"=",
"IRHost",
"(",
"self",
",",
"aconf",
",",
"rkey",
"=",
"self",
".",
"ambassador_module",
".",
"rkey",
",",
"location",
"=",
"self",
".",
"ambassador_module",
".",
"location",
",",
"name",
"=",
"\"agent-host\"",
",",
"*",
"*",
"host_args",
")",
"if",
"host",
".",
"is_active",
"(",
")",
":",
"host",
".",
"referenced_by",
"(",
"self",
".",
"ambassador_module",
")",
"host",
".",
"sourced_by",
"(",
"self",
".",
"ambassador_module",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"Intercept agent: saving host {host.pretty()}\"",
")",
"# self.logger.debug(host.as_json())",
"self",
".",
"save_host",
"(",
"host",
")",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"Intercept agent: not saving inactive host {host.pretty()}\"",
")",
"# How about originating TLS?",
"agent_origination_secret",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"AGENT_TLS_ORIG_SECRET\"",
",",
"None",
")",
"if",
"agent_origination_secret",
":",
"# Uhhhh. Synthesize a TLSContext for this, I guess.",
"#",
"# XXX What if they already have a context with this name?",
"ctx",
"=",
"IRTLSContext",
"(",
"self",
",",
"aconf",
",",
"rkey",
"=",
"self",
".",
"ambassador_module",
".",
"rkey",
",",
"location",
"=",
"self",
".",
"ambassador_module",
".",
"location",
",",
"name",
"=",
"\"agent-origination-context\"",
",",
"secret",
"=",
"agent_origination_secret",
")",
"ctx",
".",
"referenced_by",
"(",
"self",
".",
"ambassador_module",
")",
"self",
".",
"save_tls_context",
"(",
"ctx",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"Intercept agent: saving origination TLSContext {ctx.name}\"",
")",
"# self.logger.debug(ctx.as_json())",
"self",
".",
"agent_origination_ctx",
"=",
"ctx"
] | [
392,
4
] | [
478,
44
] | python | en | ['en', 'error', 'th'] | False |
IR.cache_fetch | (self, key: str) |
Fetch a key from our cache. If we get anything, make sure that its
IR pointer is set back to us -- since the cache can easily outlive
the IR, chances are pretty high that the object might've originally
been part of a different IR.
Yes, this implies that trying to use the cache for multiple IRs at
the same time is a Very Bad Idea.
|
Fetch a key from our cache. If we get anything, make sure that its
IR pointer is set back to us -- since the cache can easily outlive
the IR, chances are pretty high that the object might've originally
been part of a different IR. | def cache_fetch(self, key: str) -> Optional[IRResource]:
"""
Fetch a key from our cache. If we get anything, make sure that its
IR pointer is set back to us -- since the cache can easily outlive
the IR, chances are pretty high that the object might've originally
been part of a different IR.
Yes, this implies that trying to use the cache for multiple IRs at
the same time is a Very Bad Idea.
"""
rsrc = self.cache[key]
# Did we get anything?
if rsrc is not None:
# By definition, anything the IR layer pulls from the cache must be
# an IRResource.
assert(isinstance(rsrc, IRResource))
# Since it's an IRResource, it has a pointer to the IR. Reset that.
rsrc.ir = self
return rsrc | [
"def",
"cache_fetch",
"(",
"self",
",",
"key",
":",
"str",
")",
"->",
"Optional",
"[",
"IRResource",
"]",
":",
"rsrc",
"=",
"self",
".",
"cache",
"[",
"key",
"]",
"# Did we get anything?",
"if",
"rsrc",
"is",
"not",
"None",
":",
"# By definition, anything the IR layer pulls from the cache must be",
"# an IRResource.",
"assert",
"(",
"isinstance",
"(",
"rsrc",
",",
"IRResource",
")",
")",
"# Since it's an IRResource, it has a pointer to the IR. Reset that.",
"rsrc",
".",
"ir",
"=",
"self",
"return",
"rsrc"
] | [
548,
4
] | [
570,
19
] | python | en | ['en', 'error', 'th'] | False |
IR.cache_add | (self, rsrc: IRResource) |
Add an IRResource to our cache. Mostly this is here to let mypy check
that everything cached by the IR layer is an IRResource.
|
Add an IRResource to our cache. Mostly this is here to let mypy check
that everything cached by the IR layer is an IRResource.
| def cache_add(self, rsrc: IRResource) -> None:
"""
Add an IRResource to our cache. Mostly this is here to let mypy check
that everything cached by the IR layer is an IRResource.
"""
self.cache.add(rsrc) | [
"def",
"cache_add",
"(",
"self",
",",
"rsrc",
":",
"IRResource",
")",
"->",
"None",
":",
"self",
".",
"cache",
".",
"add",
"(",
"rsrc",
")"
] | [
572,
4
] | [
577,
28
] | python | en | ['en', 'error', 'th'] | False |
IR.cache_link | (self, owner: IRResource, owned: IRResource) |
Link two IRResources in our cache. Mostly this is here to let mypy check
that everything linked by the IR layer is an IRResource.
|
Link two IRResources in our cache. Mostly this is here to let mypy check
that everything linked by the IR layer is an IRResource.
| def cache_link(self, owner: IRResource, owned: IRResource) -> None:
"""
Link two IRResources in our cache. Mostly this is here to let mypy check
that everything linked by the IR layer is an IRResource.
"""
self.cache.link(owner, owned) | [
"def",
"cache_link",
"(",
"self",
",",
"owner",
":",
"IRResource",
",",
"owned",
":",
"IRResource",
")",
"->",
"None",
":",
"self",
".",
"cache",
".",
"link",
"(",
"owner",
",",
"owned",
")"
] | [
579,
4
] | [
584,
37
] | python | en | ['en', 'error', 'th'] | False |
_check_valid_s3_path | (
path: str,
) | Performs a basic check for validity of the S3 path | Performs a basic check for validity of the S3 path | def _check_valid_s3_path(
path: str,
) -> None:
"""Performs a basic check for validity of the S3 path"""
bad_chars = [c for c in INVALID_S3_CHARS if c in path]
if len(bad_chars) > 0:
msg = (
f"The parsed S3 path={path} contains the invalid characters {bad_chars}."
"Please make sure your regex is correct and characters are escaped."
)
if "*" in bad_chars:
msg += "Note: `*` is internally used to replace the regex for `.`."
raise ParserError(msg) | [
"def",
"_check_valid_s3_path",
"(",
"path",
":",
"str",
",",
")",
"->",
"None",
":",
"bad_chars",
"=",
"[",
"c",
"for",
"c",
"in",
"INVALID_S3_CHARS",
"if",
"c",
"in",
"path",
"]",
"if",
"len",
"(",
"bad_chars",
")",
">",
"0",
":",
"msg",
"=",
"(",
"f\"The parsed S3 path={path} contains the invalid characters {bad_chars}.\"",
"\"Please make sure your regex is correct and characters are escaped.\"",
")",
"if",
"\"*\"",
"in",
"bad_chars",
":",
"msg",
"+=",
"\"Note: `*` is internally used to replace the regex for `.`.\"",
"raise",
"ParserError",
"(",
"msg",
")"
] | [
145,
0
] | [
157,
30
] | python | en | ['en', 'en', 'en'] | True |
InferredAssetS3DataConnector.__init__ | (
self,
name: str,
datasource_name: str,
bucket: str,
execution_engine: Optional[ExecutionEngine] = None,
default_regex: Optional[dict] = None,
sorters: Optional[list] = None,
prefix: Optional[str] = "",
delimiter: Optional[str] = "/",
max_keys: Optional[int] = 1000,
boto3_options: Optional[dict] = None,
batch_spec_passthrough: Optional[dict] = None,
) |
InferredAssetS3DataConnector for connecting to S3.
Args:
name (str): required name for data_connector
datasource_name (str): required name for datasource
bucket (str): bucket for S3
execution_engine (ExecutionEngine): optional reference to ExecutionEngine
default_regex (dict): optional regex configuration for filtering data_references
sorters (list): optional list of sorters for sorting data_references
prefix (str): S3 prefix
delimiter (str): S3 delimiter
max_keys (int): S3 max_keys (default is 1000)
boto3_options (dict): optional boto3 options
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
|
InferredAssetS3DataConnector for connecting to S3. | def __init__(
self,
name: str,
datasource_name: str,
bucket: str,
execution_engine: Optional[ExecutionEngine] = None,
default_regex: Optional[dict] = None,
sorters: Optional[list] = None,
prefix: Optional[str] = "",
delimiter: Optional[str] = "/",
max_keys: Optional[int] = 1000,
boto3_options: Optional[dict] = None,
batch_spec_passthrough: Optional[dict] = None,
):
"""
InferredAssetS3DataConnector for connecting to S3.
Args:
name (str): required name for data_connector
datasource_name (str): required name for datasource
bucket (str): bucket for S3
execution_engine (ExecutionEngine): optional reference to ExecutionEngine
default_regex (dict): optional regex configuration for filtering data_references
sorters (list): optional list of sorters for sorting data_references
prefix (str): S3 prefix
delimiter (str): S3 delimiter
max_keys (int): S3 max_keys (default is 1000)
boto3_options (dict): optional boto3 options
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
"""
logger.debug(f'Constructing InferredAssetS3DataConnector "{name}".')
super().__init__(
name=name,
datasource_name=datasource_name,
execution_engine=execution_engine,
default_regex=default_regex,
sorters=sorters,
batch_spec_passthrough=batch_spec_passthrough,
)
self._bucket = bucket
self._prefix = os.path.join(prefix, "")
self._delimiter = delimiter
self._max_keys = max_keys
if boto3_options is None:
boto3_options = {}
try:
self._s3 = boto3.client("s3", **boto3_options)
except (TypeError, AttributeError):
raise ImportError(
"Unable to load boto3 (it is required for InferredAssetS3DataConnector)."
) | [
"def",
"__init__",
"(",
"self",
",",
"name",
":",
"str",
",",
"datasource_name",
":",
"str",
",",
"bucket",
":",
"str",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"default_regex",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"sorters",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"prefix",
":",
"Optional",
"[",
"str",
"]",
"=",
"\"\"",
",",
"delimiter",
":",
"Optional",
"[",
"str",
"]",
"=",
"\"/\"",
",",
"max_keys",
":",
"Optional",
"[",
"int",
"]",
"=",
"1000",
",",
"boto3_options",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"batch_spec_passthrough",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
")",
":",
"logger",
".",
"debug",
"(",
"f'Constructing InferredAssetS3DataConnector \"{name}\".'",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"name",
"=",
"name",
",",
"datasource_name",
"=",
"datasource_name",
",",
"execution_engine",
"=",
"execution_engine",
",",
"default_regex",
"=",
"default_regex",
",",
"sorters",
"=",
"sorters",
",",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
",",
")",
"self",
".",
"_bucket",
"=",
"bucket",
"self",
".",
"_prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"\"",
")",
"self",
".",
"_delimiter",
"=",
"delimiter",
"self",
".",
"_max_keys",
"=",
"max_keys",
"if",
"boto3_options",
"is",
"None",
":",
"boto3_options",
"=",
"{",
"}",
"try",
":",
"self",
".",
"_s3",
"=",
"boto3",
".",
"client",
"(",
"\"s3\"",
",",
"*",
"*",
"boto3_options",
")",
"except",
"(",
"TypeError",
",",
"AttributeError",
")",
":",
"raise",
"ImportError",
"(",
"\"Unable to load boto3 (it is required for InferredAssetS3DataConnector).\"",
")"
] | [
37,
4
] | [
91,
13
] | python | en | ['en', 'error', 'th'] | False |
InferredAssetS3DataConnector.build_batch_spec | (self, batch_definition: BatchDefinition) |
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
Args:
batch_definition (BatchDefinition): to be used to build batch_spec
Returns:
BatchSpec built from batch_definition
|
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. | def build_batch_spec(self, batch_definition: BatchDefinition) -> S3BatchSpec:
"""
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
Args:
batch_definition (BatchDefinition): to be used to build batch_spec
Returns:
BatchSpec built from batch_definition
"""
batch_spec: PathBatchSpec = super().build_batch_spec(
batch_definition=batch_definition
)
return S3BatchSpec(batch_spec) | [
"def",
"build_batch_spec",
"(",
"self",
",",
"batch_definition",
":",
"BatchDefinition",
")",
"->",
"S3BatchSpec",
":",
"batch_spec",
":",
"PathBatchSpec",
"=",
"super",
"(",
")",
".",
"build_batch_spec",
"(",
"batch_definition",
"=",
"batch_definition",
")",
"return",
"S3BatchSpec",
"(",
"batch_spec",
")"
] | [
93,
4
] | [
106,
38
] | python | en | ['en', 'error', 'th'] | False |
InferredAssetS3DataConnector._get_data_reference_list | (
self, data_asset_name: Optional[str] = None
) |
List objects in the underlying data store to create a list of data_references.
This method is used to refresh the cache.
|
List objects in the underlying data store to create a list of data_references. | def _get_data_reference_list(
self, data_asset_name: Optional[str] = None
) -> List[str]:
"""
List objects in the underlying data store to create a list of data_references.
This method is used to refresh the cache.
"""
query_options: dict = {
"Bucket": self._bucket,
"Prefix": self._prefix,
"Delimiter": self._delimiter,
"MaxKeys": self._max_keys,
}
path_list: List[str] = [
key
for key in list_s3_keys(
s3=self._s3,
query_options=query_options,
iterator_dict={},
recursive=True,
)
]
return path_list | [
"def",
"_get_data_reference_list",
"(",
"self",
",",
"data_asset_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"str",
"]",
":",
"query_options",
":",
"dict",
"=",
"{",
"\"Bucket\"",
":",
"self",
".",
"_bucket",
",",
"\"Prefix\"",
":",
"self",
".",
"_prefix",
",",
"\"Delimiter\"",
":",
"self",
".",
"_delimiter",
",",
"\"MaxKeys\"",
":",
"self",
".",
"_max_keys",
",",
"}",
"path_list",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"key",
"for",
"key",
"in",
"list_s3_keys",
"(",
"s3",
"=",
"self",
".",
"_s3",
",",
"query_options",
"=",
"query_options",
",",
"iterator_dict",
"=",
"{",
"}",
",",
"recursive",
"=",
"True",
",",
")",
"]",
"return",
"path_list"
] | [
108,
4
] | [
132,
24
] | python | en | ['en', 'error', 'th'] | False |
initialized_project | (mock_webbrowser, monkeypatch, tmp_path_factory) | This is an initialized project through the CLI. | This is an initialized project through the CLI. | def initialized_project(mock_webbrowser, monkeypatch, tmp_path_factory):
"""This is an initialized project through the CLI."""
project_dir = str(tmp_path_factory.mktemp("my_rad_project"))
os.makedirs(os.path.join(project_dir, "data"))
data_folder_path = os.path.join(project_dir, "data")
data_path = os.path.join(project_dir, "data/Titanic.csv")
fixture_path = file_relative_path(__file__, "../test_sets/Titanic.csv")
shutil.copy(fixture_path, data_path)
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(project_dir)
_ = runner.invoke(
cli,
["--v3-api", "init"],
input=f"\n\n1\n1\n{data_folder_path}\n\n\n\n2\n{data_path}\n\n\n\n",
catch_exceptions=False,
)
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/".format(
project_dir
)
in mock_webbrowser.call_args[0][0]
)
context = DataContext(os.path.join(project_dir, DataContext.GE_DIR))
assert isinstance(context, DataContext)
assert len(context.list_datasources()) == 1
return project_dir | [
"def",
"initialized_project",
"(",
"mock_webbrowser",
",",
"monkeypatch",
",",
"tmp_path_factory",
")",
":",
"project_dir",
"=",
"str",
"(",
"tmp_path_factory",
".",
"mktemp",
"(",
"\"my_rad_project\"",
")",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"data\"",
")",
")",
"data_folder_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"data\"",
")",
"data_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\"data/Titanic.csv\"",
")",
"fixture_path",
"=",
"file_relative_path",
"(",
"__file__",
",",
"\"../test_sets/Titanic.csv\"",
")",
"shutil",
".",
"copy",
"(",
"fixture_path",
",",
"data_path",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"monkeypatch",
".",
"chdir",
"(",
"project_dir",
")",
"_",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"--v3-api\"",
",",
"\"init\"",
"]",
",",
"input",
"=",
"f\"\\n\\n1\\n1\\n{data_folder_path}\\n\\n\\n\\n2\\n{data_path}\\n\\n\\n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"mock_webbrowser",
".",
"call_count",
"==",
"1",
"assert",
"(",
"\"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/\"",
".",
"format",
"(",
"project_dir",
")",
"in",
"mock_webbrowser",
".",
"call_args",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"context",
"=",
"DataContext",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"DataContext",
".",
"GE_DIR",
")",
")",
"assert",
"isinstance",
"(",
"context",
",",
"DataContext",
")",
"assert",
"len",
"(",
"context",
".",
"list_datasources",
"(",
")",
")",
"==",
"1",
"return",
"project_dir"
] | [
281,
0
] | [
308,
22
] | python | en | ['en', 'en', 'en'] | True |
load_data | (data, features, transformations={}) | Load data and set the feature matrix and label vector.
Args:
data (pandas.DataFrame): total input data
features (list of str): column names to be used in the inference model
transformation (dict of (str, func)): transformations to be applied to features
Returns:
X (numpy.matrix): a feature matrix
| Load data and set the feature matrix and label vector. | def load_data(data, features, transformations={}):
"""Load data and set the feature matrix and label vector.
Args:
data (pandas.DataFrame): total input data
features (list of str): column names to be used in the inference model
transformation (dict of (str, func)): transformations to be applied to features
Returns:
X (numpy.matrix): a feature matrix
"""
df = data[features].copy()
bool_cols = [col for col in df.columns if df[col].dtype == bool]
df.loc[:, bool_cols] = df[bool_cols].astype(np.int8)
for col, transformation in transformations.items():
logger.info('Applying {} to {}'.format(transformation.__name__, col))
df[col] = df[col].apply(transformation)
cat_cols = [col for col in features if df[col].dtype == np.object]
num_cols = [col for col in features if col not in cat_cols]
logger.info('Applying one-hot-encoding to {}'.format(cat_cols))
ohe = OneHotEncoder(min_obs=df.shape[0] * 0.001)
X_cat = ohe.fit_transform(df[cat_cols]).todense()
X = np.hstack([df[num_cols].values, X_cat])
return X | [
"def",
"load_data",
"(",
"data",
",",
"features",
",",
"transformations",
"=",
"{",
"}",
")",
":",
"df",
"=",
"data",
"[",
"features",
"]",
".",
"copy",
"(",
")",
"bool_cols",
"=",
"[",
"col",
"for",
"col",
"in",
"df",
".",
"columns",
"if",
"df",
"[",
"col",
"]",
".",
"dtype",
"==",
"bool",
"]",
"df",
".",
"loc",
"[",
":",
",",
"bool_cols",
"]",
"=",
"df",
"[",
"bool_cols",
"]",
".",
"astype",
"(",
"np",
".",
"int8",
")",
"for",
"col",
",",
"transformation",
"in",
"transformations",
".",
"items",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'Applying {} to {}'",
".",
"format",
"(",
"transformation",
".",
"__name__",
",",
"col",
")",
")",
"df",
"[",
"col",
"]",
"=",
"df",
"[",
"col",
"]",
".",
"apply",
"(",
"transformation",
")",
"cat_cols",
"=",
"[",
"col",
"for",
"col",
"in",
"features",
"if",
"df",
"[",
"col",
"]",
".",
"dtype",
"==",
"np",
".",
"object",
"]",
"num_cols",
"=",
"[",
"col",
"for",
"col",
"in",
"features",
"if",
"col",
"not",
"in",
"cat_cols",
"]",
"logger",
".",
"info",
"(",
"'Applying one-hot-encoding to {}'",
".",
"format",
"(",
"cat_cols",
")",
")",
"ohe",
"=",
"OneHotEncoder",
"(",
"min_obs",
"=",
"df",
".",
"shape",
"[",
"0",
"]",
"*",
"0.001",
")",
"X_cat",
"=",
"ohe",
".",
"fit_transform",
"(",
"df",
"[",
"cat_cols",
"]",
")",
".",
"todense",
"(",
")",
"X",
"=",
"np",
".",
"hstack",
"(",
"[",
"df",
"[",
"num_cols",
"]",
".",
"values",
",",
"X_cat",
"]",
")",
"return",
"X"
] | [
220,
0
] | [
250,
12
] | python | en | ['en', 'en', 'en'] | True |
LabelEncoder.__init__ | (self, min_obs=10) | Initialize the LabelEncoder class object.
Args:
min_obs (int): minimum number of observation to assign a label.
| Initialize the LabelEncoder class object. | def __init__(self, min_obs=10):
"""Initialize the LabelEncoder class object.
Args:
min_obs (int): minimum number of observation to assign a label.
"""
self.min_obs = min_obs | [
"def",
"__init__",
"(",
"self",
",",
"min_obs",
"=",
"10",
")",
":",
"self",
".",
"min_obs",
"=",
"min_obs"
] | [
24,
4
] | [
31,
30
] | python | en | ['en', 'en', 'en'] | True |
LabelEncoder._get_label_encoder_and_max | (self, x) | Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
| Return a mapping from values and its maximum of a column to integer labels. | def _get_label_encoder_and_max(self, x):
"""Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
"""
# NaN cannot be used as a key for dict. So replace it with a random integer.
label_count = x.fillna(NAN_INT).value_counts()
n_uniq = label_count.shape[0]
label_count = label_count[label_count >= self.min_obs]
n_uniq_new = label_count.shape[0]
# If every label appears more than min_obs, new label starts from 0.
# Otherwise, new label starts from 1 and 0 is used for all old labels
# that appear less than min_obs.
offset = 0 if n_uniq == n_uniq_new else 1
label_encoder = pd.Series(np.arange(n_uniq_new) + offset, index=label_count.index)
max_label = label_encoder.max()
label_encoder = label_encoder.to_dict()
return label_encoder, max_label | [
"def",
"_get_label_encoder_and_max",
"(",
"self",
",",
"x",
")",
":",
"# NaN cannot be used as a key for dict. So replace it with a random integer.",
"label_count",
"=",
"x",
".",
"fillna",
"(",
"NAN_INT",
")",
".",
"value_counts",
"(",
")",
"n_uniq",
"=",
"label_count",
".",
"shape",
"[",
"0",
"]",
"label_count",
"=",
"label_count",
"[",
"label_count",
">=",
"self",
".",
"min_obs",
"]",
"n_uniq_new",
"=",
"label_count",
".",
"shape",
"[",
"0",
"]",
"# If every label appears more than min_obs, new label starts from 0.",
"# Otherwise, new label starts from 1 and 0 is used for all old labels",
"# that appear less than min_obs.",
"offset",
"=",
"0",
"if",
"n_uniq",
"==",
"n_uniq_new",
"else",
"1",
"label_encoder",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"arange",
"(",
"n_uniq_new",
")",
"+",
"offset",
",",
"index",
"=",
"label_count",
".",
"index",
")",
"max_label",
"=",
"label_encoder",
".",
"max",
"(",
")",
"label_encoder",
"=",
"label_encoder",
".",
"to_dict",
"(",
")",
"return",
"label_encoder",
",",
"max_label"
] | [
36,
4
] | [
63,
39
] | python | en | ['en', 'en', 'en'] | True |
LabelEncoder._transform_col | (self, x, i) | Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels.
| Encode one categorical column into labels. | def _transform_col(self, x, i):
"""Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels.
"""
return x.fillna(NAN_INT).map(self.label_encoders[i]).fillna(0) | [
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"i",
")",
":",
"return",
"x",
".",
"fillna",
"(",
"NAN_INT",
")",
".",
"map",
"(",
"self",
".",
"label_encoders",
"[",
"i",
"]",
")",
".",
"fillna",
"(",
"0",
")"
] | [
65,
4
] | [
75,
70
] | python | en | ['it', 'en', 'en'] | True |
LabelEncoder.transform | (self, X) | Encode categorical columns into label encoded columns
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X (pandas.DataFrame): label encoded columns
| Encode categorical columns into label encoded columns | def transform(self, X):
"""Encode categorical columns into label encoded columns
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X (pandas.DataFrame): label encoded columns
"""
for i, col in enumerate(X.columns):
X.loc[:, col] = self._transform_col(X[col], i)
return X | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"X",
".",
"columns",
")",
":",
"X",
".",
"loc",
"[",
":",
",",
"col",
"]",
"=",
"self",
".",
"_transform_col",
"(",
"X",
"[",
"col",
"]",
",",
"i",
")",
"return",
"X"
] | [
87,
4
] | [
100,
16
] | python | en | ['en', 'en', 'en'] | True |
LabelEncoder.fit_transform | (self, X, y=None) | Encode categorical columns into label encoded columns
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X (pandas.DataFrame): label encoded columns
| Encode categorical columns into label encoded columns | def fit_transform(self, X, y=None):
"""Encode categorical columns into label encoded columns
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X (pandas.DataFrame): label encoded columns
"""
self.label_encoders = [None] * X.shape[1]
self.label_maxes = [None] * X.shape[1]
for i, col in enumerate(X.columns):
self.label_encoders[i], self.label_maxes[i] = \
self._get_label_encoder_and_max(X[col])
X.loc[:, col] = X[col].fillna(NAN_INT).map(self.label_encoders[i]).fillna(0)
return X | [
"def",
"fit_transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"self",
".",
"label_encoders",
"=",
"[",
"None",
"]",
"*",
"X",
".",
"shape",
"[",
"1",
"]",
"self",
".",
"label_maxes",
"=",
"[",
"None",
"]",
"*",
"X",
".",
"shape",
"[",
"1",
"]",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"X",
".",
"columns",
")",
":",
"self",
".",
"label_encoders",
"[",
"i",
"]",
",",
"self",
".",
"label_maxes",
"[",
"i",
"]",
"=",
"self",
".",
"_get_label_encoder_and_max",
"(",
"X",
"[",
"col",
"]",
")",
"X",
".",
"loc",
"[",
":",
",",
"col",
"]",
"=",
"X",
"[",
"col",
"]",
".",
"fillna",
"(",
"NAN_INT",
")",
".",
"map",
"(",
"self",
".",
"label_encoders",
"[",
"i",
"]",
")",
".",
"fillna",
"(",
"0",
")",
"return",
"X"
] | [
102,
4
] | [
121,
16
] | python | en | ['en', 'en', 'en'] | True |
OneHotEncoder.__init__ | (self, min_obs=10) | Initialize the OneHotEncoder class object.
Args:
min_obs (int): minimum number of observation to create a dummy variable
| Initialize the OneHotEncoder class object. | def __init__(self, min_obs=10):
"""Initialize the OneHotEncoder class object.
Args:
min_obs (int): minimum number of observation to create a dummy variable
"""
self.min_obs = min_obs
self.label_encoder = LabelEncoder(min_obs) | [
"def",
"__init__",
"(",
"self",
",",
"min_obs",
"=",
"10",
")",
":",
"self",
".",
"min_obs",
"=",
"min_obs",
"self",
".",
"label_encoder",
"=",
"LabelEncoder",
"(",
"min_obs",
")"
] | [
135,
4
] | [
143,
50
] | python | en | ['en', 'en', 'en'] | True |
OneHotEncoder._transform_col | (self, x, i) | Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
| Encode one categorical column into sparse matrix with one-hot-encoding. | def _transform_col(self, x, i):
"""Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
"""
labels = self.label_encoder._transform_col(x, i)
label_max = self.label_encoder.label_maxes[i]
# build row and column index for non-zero values of a sparse matrix
index = np.array(range(len(labels)))
i = index[labels > 0]
j = labels[labels > 0] - 1 # column index starts from 0
if len(i) > 0:
return sparse.coo_matrix((np.ones_like(i), (i, j)),
shape=(x.shape[0], label_max))
else:
# if there is no non-zero value, return no matrix
return None | [
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"i",
")",
":",
"labels",
"=",
"self",
".",
"label_encoder",
".",
"_transform_col",
"(",
"x",
",",
"i",
")",
"label_max",
"=",
"self",
".",
"label_encoder",
".",
"label_maxes",
"[",
"i",
"]",
"# build row and column index for non-zero values of a sparse matrix",
"index",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"len",
"(",
"labels",
")",
")",
")",
"i",
"=",
"index",
"[",
"labels",
">",
"0",
"]",
"j",
"=",
"labels",
"[",
"labels",
">",
"0",
"]",
"-",
"1",
"# column index starts from 0",
"if",
"len",
"(",
"i",
")",
">",
"0",
":",
"return",
"sparse",
".",
"coo_matrix",
"(",
"(",
"np",
".",
"ones_like",
"(",
"i",
")",
",",
"(",
"i",
",",
"j",
")",
")",
",",
"shape",
"=",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"label_max",
")",
")",
"else",
":",
"# if there is no non-zero value, return no matrix",
"return",
"None"
] | [
148,
4
] | [
173,
23
] | python | en | ['en', 'en', 'en'] | True |
OneHotEncoder.transform | (self, X) | Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
| Encode categorical columns into sparse matrix with one-hot-encoding. | def transform(self, X):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
"""
for i, col in enumerate(X.columns):
X_col = self._transform_col(X[col], i)
if X_col is not None:
if i == 0:
X_new = X_col
else:
X_new = sparse.hstack((X_new, X_col))
logger.debug('{} --> {} features'.format(
col, self.label_encoder.label_maxes[i])
)
return X_new | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"X",
".",
"columns",
")",
":",
"X_col",
"=",
"self",
".",
"_transform_col",
"(",
"X",
"[",
"col",
"]",
",",
"i",
")",
"if",
"X_col",
"is",
"not",
"None",
":",
"if",
"i",
"==",
"0",
":",
"X_new",
"=",
"X_col",
"else",
":",
"X_new",
"=",
"sparse",
".",
"hstack",
"(",
"(",
"X_new",
",",
"X_col",
")",
")",
"logger",
".",
"debug",
"(",
"'{} --> {} features'",
".",
"format",
"(",
"col",
",",
"self",
".",
"label_encoder",
".",
"label_maxes",
"[",
"i",
"]",
")",
")",
"return",
"X_new"
] | [
180,
4
] | [
203,
20
] | python | en | ['en', 'en', 'en'] | True |
OneHotEncoder.fit_transform | (self, X, y=None) | Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
sparse matrix encoding categorical variables into dummy variables
| Encode categorical columns into sparse matrix with one-hot-encoding. | def fit_transform(self, X, y=None):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
sparse matrix encoding categorical variables into dummy variables
"""
self.label_encoder.fit(X)
return self.transform(X) | [
"def",
"fit_transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"self",
".",
"label_encoder",
".",
"fit",
"(",
"X",
")",
"return",
"self",
".",
"transform",
"(",
"X",
")"
] | [
205,
4
] | [
217,
32
] | python | en | ['en', 'en', 'en'] | True |
convert_to_dtype | (data, dtype) |
A utility function converting xarray, pandas, or NumPy data to a given dtype.
Parameters
----------
data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame,
or numpy.ndarray
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
|
A utility function converting xarray, pandas, or NumPy data to a given dtype. | def convert_to_dtype(data, dtype):
"""
A utility function converting xarray, pandas, or NumPy data to a given dtype.
Parameters
----------
data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame,
or numpy.ndarray
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
"""
if dtype is None: # Don't convert the data type.
return data
return data.astype(dtype) | [
"def",
"convert_to_dtype",
"(",
"data",
",",
"dtype",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"# Don't convert the data type.",
"return",
"data",
"return",
"data",
".",
"astype",
"(",
"dtype",
")"
] | [
44,
0
] | [
58,
29
] | python | en | ['en', 'error', 'th'] | False |
create_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs) |
Creates a most-recent-to-oldest mosaic of the input dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Creates a most-recent-to-oldest mosaic of the input dataset. | def create_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):
"""
Creates a most-recent-to-oldest mosaic of the input dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
dataset_in = dataset_in.copy(deep=True)
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
# Mask data with clean_mask. All values where clean_mask==False are set to no_data.
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
time_slices = reversed(
range(len(dataset_in.time))) if 'reverse_time' in kwargs else range(len(dataset_in.time))
for index in time_slices:
dataset_slice = dataset_in.isel(time=index).drop('time')
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
utilities.clear_attrs(dataset_out)
else:
for key in list(dataset_in.data_vars):
dataset_out[key].values[dataset_out[key].values == -9999] = dataset_slice[key].values[dataset_out[key]
.values == -9999]
dataset_out[key].attrs = OrderedDict()
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"intermediate_product",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dataset_in",
"=",
"dataset_in",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"# Mask data with clean_mask. All values where clean_mask==False are set to no_data.",
"for",
"key",
"in",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
":",
"dataset_in",
"[",
"key",
"]",
".",
"values",
"[",
"np",
".",
"invert",
"(",
"clean_mask",
")",
"]",
"=",
"no_data",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"if",
"intermediate_product",
"is",
"not",
"None",
":",
"dataset_out",
"=",
"intermediate_product",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"else",
":",
"dataset_out",
"=",
"None",
"time_slices",
"=",
"reversed",
"(",
"range",
"(",
"len",
"(",
"dataset_in",
".",
"time",
")",
")",
")",
"if",
"'reverse_time'",
"in",
"kwargs",
"else",
"range",
"(",
"len",
"(",
"dataset_in",
".",
"time",
")",
")",
"for",
"index",
"in",
"time_slices",
":",
"dataset_slice",
"=",
"dataset_in",
".",
"isel",
"(",
"time",
"=",
"index",
")",
".",
"drop",
"(",
"'time'",
")",
"if",
"dataset_out",
"is",
"None",
":",
"dataset_out",
"=",
"dataset_slice",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"utilities",
".",
"clear_attrs",
"(",
"dataset_out",
")",
"else",
":",
"for",
"key",
"in",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
":",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"==",
"-",
"9999",
"]",
"=",
"dataset_slice",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"==",
"-",
"9999",
"]",
"dataset_out",
"[",
"key",
"]",
".",
"attrs",
"=",
"OrderedDict",
"(",
")",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
65,
0
] | [
127,
22
] | python | en | ['en', 'error', 'th'] | False |
create_mean_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs) |
Method for calculating the mean pixel value for a given dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Method for calculating the mean pixel value for a given dataset. | def create_mean_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs):
"""
Method for calculating the mean pixel value for a given dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and scan lines.
dataset_in = dataset_in.where((dataset_in != no_data) & (clean_mask))
dataset_out = dataset_in.mean(dim='time', skipna=True, keep_attrs=False)
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_mean_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"no_data",
")",
"&",
"(",
"clean_mask",
")",
")",
"dataset_out",
"=",
"dataset_in",
".",
"mean",
"(",
"dim",
"=",
"'time'",
",",
"skipna",
"=",
"True",
",",
"keep_attrs",
"=",
"False",
")",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
129,
0
] | [
171,
22
] | python | en | ['en', 'error', 'th'] | False |
create_median_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs) |
Method for calculating the median pixel value for a given dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Method for calculating the median pixel value for a given dataset. | def create_median_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, **kwargs):
"""
Method for calculating the median pixel value for a given dataset.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and Landsat 7 scan lines.
dataset_in = dataset_in.where((dataset_in != no_data) & (clean_mask))
dataset_out = dataset_in.median(dim='time', skipna=True, keep_attrs=False)
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_median_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and Landsat 7 scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"no_data",
")",
"&",
"(",
"clean_mask",
")",
")",
"dataset_out",
"=",
"dataset_in",
".",
"median",
"(",
"dim",
"=",
"'time'",
",",
"skipna",
"=",
"True",
",",
"keep_attrs",
"=",
"False",
")",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
174,
0
] | [
216,
22
] | python | en | ['en', 'error', 'th'] | False |
create_max_ndvi_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs) |
Method for calculating the pixel value for the max ndvi value.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Method for calculating the pixel value for the max ndvi value. | def create_max_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):
"""
Method for calculating the pixel value for the max ndvi value.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
dataset_in = dataset_in.copy(deep=True)
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and scan lines.
dataset_in = dataset_in.where((dataset_in != -9999) & clean_mask)
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
time_slices = range(len(dataset_in.time))
for timeslice in time_slices:
dataset_slice = dataset_in.isel(time=timeslice).drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice, ::]] = -1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
utilities.clear_attrs(dataset_out)
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] = \
dataset_slice[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values]
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_max_ndvi_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"intermediate_product",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dataset_in",
"=",
"dataset_in",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"-",
"9999",
")",
"&",
"clean_mask",
")",
"if",
"intermediate_product",
"is",
"not",
"None",
":",
"dataset_out",
"=",
"intermediate_product",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"else",
":",
"dataset_out",
"=",
"None",
"time_slices",
"=",
"range",
"(",
"len",
"(",
"dataset_in",
".",
"time",
")",
")",
"for",
"timeslice",
"in",
"time_slices",
":",
"dataset_slice",
"=",
"dataset_in",
".",
"isel",
"(",
"time",
"=",
"timeslice",
")",
".",
"drop",
"(",
"'time'",
")",
"ndvi",
"=",
"(",
"dataset_slice",
".",
"nir",
"-",
"dataset_slice",
".",
"red",
")",
"/",
"(",
"dataset_slice",
".",
"nir",
"+",
"dataset_slice",
".",
"red",
")",
"ndvi",
".",
"values",
"[",
"np",
".",
"invert",
"(",
"clean_mask",
")",
"[",
"timeslice",
",",
":",
":",
"]",
"]",
"=",
"-",
"1000000000",
"dataset_slice",
"[",
"'ndvi'",
"]",
"=",
"ndvi",
"if",
"dataset_out",
"is",
"None",
":",
"dataset_out",
"=",
"dataset_slice",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"utilities",
".",
"clear_attrs",
"(",
"dataset_out",
")",
"else",
":",
"for",
"key",
"in",
"list",
"(",
"dataset_slice",
".",
"data_vars",
")",
":",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_slice",
".",
"ndvi",
".",
"values",
">",
"dataset_out",
".",
"ndvi",
".",
"values",
"]",
"=",
"dataset_slice",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_slice",
".",
"ndvi",
".",
"values",
">",
"dataset_out",
".",
"ndvi",
".",
"values",
"]",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
219,
0
] | [
280,
22
] | python | en | ['en', 'error', 'th'] | False |
create_min_ndvi_mosaic | (dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs) |
Method for calculating the pixel value for the min ndvi value.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Method for calculating the pixel value for the min ndvi value. | def create_min_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, dtype=None, intermediate_product=None, **kwargs):
"""
Method for calculating the pixel value for the min ndvi value.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
dataset_in = dataset_in.copy(deep=True)
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and scan lines.
dataset_in = dataset_in.where((dataset_in != -9999) & clean_mask)
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
time_slices = range(len(dataset_in.time))
for timeslice in time_slices:
dataset_slice = dataset_in.isel(time=timeslice).drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice, ::]] = 1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
utilities.clear_attrs(dataset_out)
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values <
dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values <
dataset_out.ndvi.values]
# Handle datatype conversions.
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_min_ndvi_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"intermediate_product",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dataset_in",
"=",
"dataset_in",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"-",
"9999",
")",
"&",
"clean_mask",
")",
"if",
"intermediate_product",
"is",
"not",
"None",
":",
"dataset_out",
"=",
"intermediate_product",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"else",
":",
"dataset_out",
"=",
"None",
"time_slices",
"=",
"range",
"(",
"len",
"(",
"dataset_in",
".",
"time",
")",
")",
"for",
"timeslice",
"in",
"time_slices",
":",
"dataset_slice",
"=",
"dataset_in",
".",
"isel",
"(",
"time",
"=",
"timeslice",
")",
".",
"drop",
"(",
"'time'",
")",
"ndvi",
"=",
"(",
"dataset_slice",
".",
"nir",
"-",
"dataset_slice",
".",
"red",
")",
"/",
"(",
"dataset_slice",
".",
"nir",
"+",
"dataset_slice",
".",
"red",
")",
"ndvi",
".",
"values",
"[",
"np",
".",
"invert",
"(",
"clean_mask",
")",
"[",
"timeslice",
",",
":",
":",
"]",
"]",
"=",
"1000000000",
"dataset_slice",
"[",
"'ndvi'",
"]",
"=",
"ndvi",
"if",
"dataset_out",
"is",
"None",
":",
"dataset_out",
"=",
"dataset_slice",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"utilities",
".",
"clear_attrs",
"(",
"dataset_out",
")",
"else",
":",
"for",
"key",
"in",
"list",
"(",
"dataset_slice",
".",
"data_vars",
")",
":",
"dataset_out",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_slice",
".",
"ndvi",
".",
"values",
"<",
"dataset_out",
".",
"ndvi",
".",
"values",
"]",
"=",
"dataset_slice",
"[",
"key",
"]",
".",
"values",
"[",
"dataset_slice",
".",
"ndvi",
".",
"values",
"<",
"dataset_out",
".",
"ndvi",
".",
"values",
"]",
"# Handle datatype conversions.",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
283,
0
] | [
345,
22
] | python | en | ['en', 'error', 'th'] | False |
unpack_bits | (land_cover_endcoding, data_array, cover_type) |
Description:
Unpack bits for end of ls7 and ls8 functions
-----
Input:
land_cover_encoding(dict hash table) land cover endcoding provided by ls7 or ls8
data_array( xarray DataArray)
cover_type(String) type of cover
Output:
unpacked DataArray
|
Description:
Unpack bits for end of ls7 and ls8 functions
-----
Input:
land_cover_encoding(dict hash table) land cover endcoding provided by ls7 or ls8
data_array( xarray DataArray)
cover_type(String) type of cover
Output:
unpacked DataArray
| def unpack_bits(land_cover_endcoding, data_array, cover_type):
"""
Description:
Unpack bits for end of ls7 and ls8 functions
-----
Input:
land_cover_encoding(dict hash table) land cover endcoding provided by ls7 or ls8
data_array( xarray DataArray)
cover_type(String) type of cover
Output:
unpacked DataArray
"""
boolean_mask = np.isin(data_array.values, land_cover_endcoding[cover_type])
return xr.DataArray(boolean_mask.astype(bool),
coords = data_array.coords,
dims = data_array.dims,
name = cover_type + "_mask",
attrs = data_array.attrs) | [
"def",
"unpack_bits",
"(",
"land_cover_endcoding",
",",
"data_array",
",",
"cover_type",
")",
":",
"boolean_mask",
"=",
"np",
".",
"isin",
"(",
"data_array",
".",
"values",
",",
"land_cover_endcoding",
"[",
"cover_type",
"]",
")",
"return",
"xr",
".",
"DataArray",
"(",
"boolean_mask",
".",
"astype",
"(",
"bool",
")",
",",
"coords",
"=",
"data_array",
".",
"coords",
",",
"dims",
"=",
"data_array",
".",
"dims",
",",
"name",
"=",
"cover_type",
"+",
"\"_mask\"",
",",
"attrs",
"=",
"data_array",
".",
"attrs",
")"
] | [
347,
0
] | [
364,
49
] | python | en | ['en', 'error', 'th'] | False |
create_hdmedians_multiple_band_mosaic | (dataset_in,
clean_mask=None,
no_data=-9999,
dtype=None,
intermediate_product=None,
operation="median",
**kwargs) |
Calculates the geomedian or geomedoid using a multi-band processing method.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude (in that order)
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
operation: str in ['median', 'medoid']
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
|
Calculates the geomedian or geomedoid using a multi-band processing method. | def create_hdmedians_multiple_band_mosaic(dataset_in,
clean_mask=None,
no_data=-9999,
dtype=None,
intermediate_product=None,
operation="median",
**kwargs):
"""
Calculates the geomedian or geomedoid using a multi-band processing method.
Parameters
----------
dataset_in: xarray.Dataset
A dataset retrieved from the Data Cube; should contain:
coordinates: time, latitude, longitude (in that order)
variables: variables to be mosaicked (e.g. red, green, and blue bands)
clean_mask: np.ndarray
An ndarray of the same shape as `dataset_in` - specifying which values to mask out.
If no clean mask is specified, then all values are kept during compositing.
no_data: int or float
The no data value.
dtype: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
operation: str in ['median', 'medoid']
Returns
-------
dataset_out: xarray.Dataset
Compositited data with the format:
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
assert operation in ['median', 'medoid'], "Only median and medoid operations are supported."
# Save dtypes because masking with Dataset.where() converts to float64.
band_list = list(dataset_in.data_vars)
dataset_in_dtypes = {}
for band in band_list:
dataset_in_dtypes[band] = dataset_in[band].dtype
# Mask out clouds and scan lines.
dataset_in = dataset_in.where((dataset_in != no_data) & clean_mask)
arrays = [dataset_in[band] for band in band_list]
stacked_data = np.stack(arrays)
bands_shape, time_slices_shape, lat_shape, lon_shape = stacked_data.shape[0], \
stacked_data.shape[1], stacked_data.shape[2], \
stacked_data.shape[3]
# Reshape to remove lat/lon
reshaped_stack = stacked_data.reshape(bands_shape, time_slices_shape,
lat_shape * lon_shape)
# Build zeroes array across time slices.
hdmedians_result = np.zeros((bands_shape, lat_shape * lon_shape))
# For each pixel (lat/lon combination), find the geomedian or geomedoid across time.
for x in range(reshaped_stack.shape[2]):
try:
hdmedians_result[:, x] = hd.nangeomedian(
reshaped_stack[:, :, x], axis=1) if operation == "median" else hd.nanmedoid(
reshaped_stack[:, :, x], axis=1)
except ValueError as e:
# If all bands have nan values across time, the geomedians are nans.
hdmedians_result[:, x] = np.full((bands_shape), np.nan)
output_dict = {
value: (('latitude', 'longitude'), hdmedians_result[index, :].reshape(lat_shape, lon_shape))
for index, value in enumerate(band_list)
}
dataset_out = xr.Dataset(output_dict,
coords={'latitude': dataset_in['latitude'],
'longitude': dataset_in['longitude']})
dataset_out = restore_or_convert_dtypes(dtype, band_list, dataset_in_dtypes, dataset_out, no_data)
return dataset_out | [
"def",
"create_hdmedians_multiple_band_mosaic",
"(",
"dataset_in",
",",
"clean_mask",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"dtype",
"=",
"None",
",",
"intermediate_product",
"=",
"None",
",",
"operation",
"=",
"\"median\"",
",",
"*",
"*",
"kwargs",
")",
":",
"# Default to masking nothing.",
"if",
"clean_mask",
"is",
"None",
":",
"clean_mask",
"=",
"create_default_clean_mask",
"(",
"dataset_in",
")",
"assert",
"operation",
"in",
"[",
"'median'",
",",
"'medoid'",
"]",
",",
"\"Only median and medoid operations are supported.\"",
"# Save dtypes because masking with Dataset.where() converts to float64.",
"band_list",
"=",
"list",
"(",
"dataset_in",
".",
"data_vars",
")",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"band_list",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"dataset_in",
"[",
"band",
"]",
".",
"dtype",
"# Mask out clouds and scan lines.",
"dataset_in",
"=",
"dataset_in",
".",
"where",
"(",
"(",
"dataset_in",
"!=",
"no_data",
")",
"&",
"clean_mask",
")",
"arrays",
"=",
"[",
"dataset_in",
"[",
"band",
"]",
"for",
"band",
"in",
"band_list",
"]",
"stacked_data",
"=",
"np",
".",
"stack",
"(",
"arrays",
")",
"bands_shape",
",",
"time_slices_shape",
",",
"lat_shape",
",",
"lon_shape",
"=",
"stacked_data",
".",
"shape",
"[",
"0",
"]",
",",
"stacked_data",
".",
"shape",
"[",
"1",
"]",
",",
"stacked_data",
".",
"shape",
"[",
"2",
"]",
",",
"stacked_data",
".",
"shape",
"[",
"3",
"]",
"# Reshape to remove lat/lon",
"reshaped_stack",
"=",
"stacked_data",
".",
"reshape",
"(",
"bands_shape",
",",
"time_slices_shape",
",",
"lat_shape",
"*",
"lon_shape",
")",
"# Build zeroes array across time slices.",
"hdmedians_result",
"=",
"np",
".",
"zeros",
"(",
"(",
"bands_shape",
",",
"lat_shape",
"*",
"lon_shape",
")",
")",
"# For each pixel (lat/lon combination), find the geomedian or geomedoid across time.",
"for",
"x",
"in",
"range",
"(",
"reshaped_stack",
".",
"shape",
"[",
"2",
"]",
")",
":",
"try",
":",
"hdmedians_result",
"[",
":",
",",
"x",
"]",
"=",
"hd",
".",
"nangeomedian",
"(",
"reshaped_stack",
"[",
":",
",",
":",
",",
"x",
"]",
",",
"axis",
"=",
"1",
")",
"if",
"operation",
"==",
"\"median\"",
"else",
"hd",
".",
"nanmedoid",
"(",
"reshaped_stack",
"[",
":",
",",
":",
",",
"x",
"]",
",",
"axis",
"=",
"1",
")",
"except",
"ValueError",
"as",
"e",
":",
"# If all bands have nan values across time, the geomedians are nans.",
"hdmedians_result",
"[",
":",
",",
"x",
"]",
"=",
"np",
".",
"full",
"(",
"(",
"bands_shape",
")",
",",
"np",
".",
"nan",
")",
"output_dict",
"=",
"{",
"value",
":",
"(",
"(",
"'latitude'",
",",
"'longitude'",
")",
",",
"hdmedians_result",
"[",
"index",
",",
":",
"]",
".",
"reshape",
"(",
"lat_shape",
",",
"lon_shape",
")",
")",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"band_list",
")",
"}",
"dataset_out",
"=",
"xr",
".",
"Dataset",
"(",
"output_dict",
",",
"coords",
"=",
"{",
"'latitude'",
":",
"dataset_in",
"[",
"'latitude'",
"]",
",",
"'longitude'",
":",
"dataset_in",
"[",
"'longitude'",
"]",
"}",
")",
"dataset_out",
"=",
"restore_or_convert_dtypes",
"(",
"dtype",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
"return",
"dataset_out"
] | [
412,
0
] | [
486,
22
] | python | en | ['en', 'error', 'th'] | False |
restore_or_convert_dtypes | (dtype_for_all, band_list, dataset_in_dtypes, dataset_out, no_data) |
Restores original datatypes to data variables in Datasets
output by mosaic functions.
Parameters
----------
dtype_for_all: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
band_list: list-like
A list-like of the data variables in the dataset.
dataset_in_dtypes: dict
A dictionary mapping band names to datatypes.
no_data: int or float
The no data value.
Returns
-------
dataset_out: xarray.Dataset
The output Dataset.
|
Restores original datatypes to data variables in Datasets
output by mosaic functions. | def restore_or_convert_dtypes(dtype_for_all, band_list, dataset_in_dtypes, dataset_out, no_data):
"""
Restores original datatypes to data variables in Datasets
output by mosaic functions.
Parameters
----------
dtype_for_all: str or numpy.dtype
A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g.
np.int16, np.float32) to convert the data to.
band_list: list-like
A list-like of the data variables in the dataset.
dataset_in_dtypes: dict
A dictionary mapping band names to datatypes.
no_data: int or float
The no data value.
Returns
-------
dataset_out: xarray.Dataset
The output Dataset.
"""
if dtype_for_all is not None:
# Integer types can't represent nan.
if np.issubdtype(dtype_for_all, np.integer): # This also works for Python int type.
utilities.nan_to_num(dataset_out, no_data)
convert_to_dtype(dataset_out, dtype_for_all)
else: # Restore dtypes to state before masking.
for band in band_list:
band_dtype = dataset_in_dtypes[band]
if np.issubdtype(band_dtype, np.integer):
utilities.nan_to_num(dataset_out[band], no_data)
dataset_out[band] = dataset_out[band].astype(band_dtype)
return dataset_out | [
"def",
"restore_or_convert_dtypes",
"(",
"dtype_for_all",
",",
"band_list",
",",
"dataset_in_dtypes",
",",
"dataset_out",
",",
"no_data",
")",
":",
"if",
"dtype_for_all",
"is",
"not",
"None",
":",
"# Integer types can't represent nan.",
"if",
"np",
".",
"issubdtype",
"(",
"dtype_for_all",
",",
"np",
".",
"integer",
")",
":",
"# This also works for Python int type.",
"utilities",
".",
"nan_to_num",
"(",
"dataset_out",
",",
"no_data",
")",
"convert_to_dtype",
"(",
"dataset_out",
",",
"dtype_for_all",
")",
"else",
":",
"# Restore dtypes to state before masking.",
"for",
"band",
"in",
"band_list",
":",
"band_dtype",
"=",
"dataset_in_dtypes",
"[",
"band",
"]",
"if",
"np",
".",
"issubdtype",
"(",
"band_dtype",
",",
"np",
".",
"integer",
")",
":",
"utilities",
".",
"nan_to_num",
"(",
"dataset_out",
"[",
"band",
"]",
",",
"no_data",
")",
"dataset_out",
"[",
"band",
"]",
"=",
"dataset_out",
"[",
"band",
"]",
".",
"astype",
"(",
"band_dtype",
")",
"return",
"dataset_out"
] | [
488,
0
] | [
521,
22
] | python | en | ['en', 'error', 'th'] | False |
CalculateGeneratorInputInfo | (params) | Calculate the generator specific info that gets fed to input (called by
gyp). | Calculate the generator specific info that gets fed to input (called by
gyp). | def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
} | [
"def",
"CalculateGeneratorInputInfo",
"(",
"params",
")",
":",
"generator_flags",
"=",
"params",
".",
"get",
"(",
"'generator_flags'",
",",
"{",
"}",
")",
"if",
"generator_flags",
".",
"get",
"(",
"'adjust_static_libraries'",
",",
"False",
")",
":",
"global",
"generator_wants_static_library_dependencies_adjusted",
"generator_wants_static_library_dependencies_adjusted",
"=",
"True",
"toplevel",
"=",
"params",
"[",
"'options'",
"]",
".",
"toplevel_dir",
"generator_dir",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"params",
"[",
"'options'",
"]",
".",
"generator_output",
"or",
"'.'",
")",
"# output_dir: relative path from generator_dir to the build directory.",
"output_dir",
"=",
"generator_flags",
".",
"get",
"(",
"'output_dir'",
",",
"'out'",
")",
"qualified_out_dir",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"toplevel",
",",
"generator_dir",
",",
"output_dir",
",",
"'gypfiles'",
")",
")",
"global",
"generator_filelist_paths",
"generator_filelist_paths",
"=",
"{",
"'toplevel'",
":",
"toplevel",
",",
"'qualified_out_dir'",
":",
"qualified_out_dir",
",",
"}"
] | [
53,
0
] | [
71,
3
] | python | en | ['en', 'en', 'en'] | True |
features | (image, channel, levels=9, start_size=(dim_rows, dim_cols), ) |
Extracts features by down-scaling the image levels times,
transforms the image by applying the function channel to
each scaled version and computing the difference between
the scaled, transformed versions.
image : the image
channel : a function which transforms the image into
another image of the same size
levels : number of scaling levels
start_size : tuple. The size of the biggest image in
the scaling pyramid. The image is first
scaled to that size and then scaled by half
levels times. Therefore, both entries in
start_size must be divisible by 2^levels.
|
Extracts features by down-scaling the image levels times,
transforms the image by applying the function channel to
each scaled version and computing the difference between
the scaled, transformed versions.
image : the image
channel : a function which transforms the image into
another image of the same size
levels : number of scaling levels
start_size : tuple. The size of the biggest image in
the scaling pyramid. The image is first
scaled to that size and then scaled by half
levels times. Therefore, both entries in
start_size must be divisible by 2^levels.
| def features(image, channel, levels=9, start_size=(dim_rows, dim_cols), ):
#def features(image, channel, levels=9, start_size=(1983, 1088), ):
"""
Extracts features by down-scaling the image levels times,
transforms the image by applying the function channel to
each scaled version and computing the difference between
the scaled, transformed versions.
image : the image
channel : a function which transforms the image into
another image of the same size
levels : number of scaling levels
start_size : tuple. The size of the biggest image in
the scaling pyramid. The image is first
scaled to that size and then scaled by half
levels times. Therefore, both entries in
start_size must be divisible by 2^levels.
"""
image = channel(image)
if image.shape != start_size:
image = cv2.resize(image, dsize=start_size)
scales = [image]
for l in xrange(levels - 1):
logger.debug("scaling at level %d", l)
scales.append(cv2.pyrDown(scales[-1]))
features = []
for i in xrange(1, levels - 5):
big = scales[i]
for j in (3,4):
logger.debug("computing features for levels %d and %d", i, i + j)
small = scales[i + j]
srcsize = small.shape[1],small.shape[0]
dstsize = big.shape[1],big.shape[0]
logger.debug("Shape source: %s, Shape target :%s", srcsize, dstsize)
scaled = cv2.resize(src=small, dsize=dstsize)
features.append(((i+1,j+1),cv2.absdiff(big, scaled)))
return features | [
"def",
"features",
"(",
"image",
",",
"channel",
",",
"levels",
"=",
"9",
",",
"start_size",
"=",
"(",
"dim_rows",
",",
"dim_cols",
")",
",",
")",
":",
"#def features(image, channel, levels=9, start_size=(1983, 1088), ):",
"image",
"=",
"channel",
"(",
"image",
")",
"if",
"image",
".",
"shape",
"!=",
"start_size",
":",
"image",
"=",
"cv2",
".",
"resize",
"(",
"image",
",",
"dsize",
"=",
"start_size",
")",
"scales",
"=",
"[",
"image",
"]",
"for",
"l",
"in",
"xrange",
"(",
"levels",
"-",
"1",
")",
":",
"logger",
".",
"debug",
"(",
"\"scaling at level %d\"",
",",
"l",
")",
"scales",
".",
"append",
"(",
"cv2",
".",
"pyrDown",
"(",
"scales",
"[",
"-",
"1",
"]",
")",
")",
"features",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"levels",
"-",
"5",
")",
":",
"big",
"=",
"scales",
"[",
"i",
"]",
"for",
"j",
"in",
"(",
"3",
",",
"4",
")",
":",
"logger",
".",
"debug",
"(",
"\"computing features for levels %d and %d\"",
",",
"i",
",",
"i",
"+",
"j",
")",
"small",
"=",
"scales",
"[",
"i",
"+",
"j",
"]",
"srcsize",
"=",
"small",
".",
"shape",
"[",
"1",
"]",
",",
"small",
".",
"shape",
"[",
"0",
"]",
"dstsize",
"=",
"big",
".",
"shape",
"[",
"1",
"]",
",",
"big",
".",
"shape",
"[",
"0",
"]",
"logger",
".",
"debug",
"(",
"\"Shape source: %s, Shape target :%s\"",
",",
"srcsize",
",",
"dstsize",
")",
"scaled",
"=",
"cv2",
".",
"resize",
"(",
"src",
"=",
"small",
",",
"dsize",
"=",
"dstsize",
")",
"features",
".",
"append",
"(",
"(",
"(",
"i",
"+",
"1",
",",
"j",
"+",
"1",
")",
",",
"cv2",
".",
"absdiff",
"(",
"big",
",",
"scaled",
")",
")",
")",
"return",
"features"
] | [
23,
0
] | [
61,
16
] | python | en | ['en', 'error', 'th'] | False |
intensity | (image) |
Converts a color image into grayscale.
Used as `channel' argument to function `features'
|
Converts a color image into grayscale.
Used as `channel' argument to function `features'
| def intensity(image):
"""
Converts a color image into grayscale.
Used as `channel' argument to function `features'
"""
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) | [
"def",
"intensity",
"(",
"image",
")",
":",
"return",
"cv2",
".",
"cvtColor",
"(",
"image",
",",
"cv2",
".",
"COLOR_RGB2GRAY",
")"
] | [
63,
0
] | [
68,
47
] | python | en | ['en', 'error', 'th'] | False |
makeGaborFilter | (dims, lambd, theta, psi, sigma, gamma) |
Creates a Gabor filter (an array) with parameters labmbd, theta,
psi, sigma, and gamma of size dims. Returns a function which
can be passed to `features' as `channel' argument.
In some versions of OpenCV, sizes greater than (11,11) will lead
to segfaults (see http://code.opencv.org/issues/2644).
|
Creates a Gabor filter (an array) with parameters labmbd, theta,
psi, sigma, and gamma of size dims. Returns a function which
can be passed to `features' as `channel' argument.
In some versions of OpenCV, sizes greater than (11,11) will lead
to segfaults (see http://code.opencv.org/issues/2644).
| def makeGaborFilter(dims, lambd, theta, psi, sigma, gamma):
"""
Creates a Gabor filter (an array) with parameters labmbd, theta,
psi, sigma, and gamma of size dims. Returns a function which
can be passed to `features' as `channel' argument.
In some versions of OpenCV, sizes greater than (11,11) will lead
to segfaults (see http://code.opencv.org/issues/2644).
"""
def xpf(i,j):
return i*math.cos(theta) + j*math.sin(theta)
def ypf(i,j):
return -i*math.sin(theta) + j*math.cos(theta)
def gabor(i,j):
xp = xpf(i,j)
yp = ypf(i,j)
return math.exp(-(xp**2 + gamma**2*yp**2)/2*sigma**2) * math.cos(2*math.pi*xp/lambd + psi)
halfwidth = dims[0]/2
halfheight = dims[1]/2
kernel = numpy.array([[gabor(halfwidth - i,halfheight - j) for j in range(dims[1])] for i in range(dims[1])])
def theFilter(image):
return cv2.filter2D(src = image, ddepth = -1, kernel = kernel, )
return theFilter | [
"def",
"makeGaborFilter",
"(",
"dims",
",",
"lambd",
",",
"theta",
",",
"psi",
",",
"sigma",
",",
"gamma",
")",
":",
"def",
"xpf",
"(",
"i",
",",
"j",
")",
":",
"return",
"i",
"*",
"math",
".",
"cos",
"(",
"theta",
")",
"+",
"j",
"*",
"math",
".",
"sin",
"(",
"theta",
")",
"def",
"ypf",
"(",
"i",
",",
"j",
")",
":",
"return",
"-",
"i",
"*",
"math",
".",
"sin",
"(",
"theta",
")",
"+",
"j",
"*",
"math",
".",
"cos",
"(",
"theta",
")",
"def",
"gabor",
"(",
"i",
",",
"j",
")",
":",
"xp",
"=",
"xpf",
"(",
"i",
",",
"j",
")",
"yp",
"=",
"ypf",
"(",
"i",
",",
"j",
")",
"return",
"math",
".",
"exp",
"(",
"-",
"(",
"xp",
"**",
"2",
"+",
"gamma",
"**",
"2",
"*",
"yp",
"**",
"2",
")",
"/",
"2",
"*",
"sigma",
"**",
"2",
")",
"*",
"math",
".",
"cos",
"(",
"2",
"*",
"math",
".",
"pi",
"*",
"xp",
"/",
"lambd",
"+",
"psi",
")",
"halfwidth",
"=",
"dims",
"[",
"0",
"]",
"/",
"2",
"halfheight",
"=",
"dims",
"[",
"1",
"]",
"/",
"2",
"kernel",
"=",
"numpy",
".",
"array",
"(",
"[",
"[",
"gabor",
"(",
"halfwidth",
"-",
"i",
",",
"halfheight",
"-",
"j",
")",
"for",
"j",
"in",
"range",
"(",
"dims",
"[",
"1",
"]",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"dims",
"[",
"1",
"]",
")",
"]",
")",
"def",
"theFilter",
"(",
"image",
")",
":",
"return",
"cv2",
".",
"filter2D",
"(",
"src",
"=",
"image",
",",
"ddepth",
"=",
"-",
"1",
",",
"kernel",
"=",
"kernel",
",",
")",
"return",
"theFilter"
] | [
70,
0
] | [
95,
17
] | python | en | ['en', 'error', 'th'] | False |
intensityConspicuity | (image) |
Creates the conspicuity map for the channel `intensity'.
|
Creates the conspicuity map for the channel `intensity'.
| def intensityConspicuity(image):
"""
Creates the conspicuity map for the channel `intensity'.
"""
fs = features(image = im, channel = intensity)
return sumNormalizedFeatures(fs) | [
"def",
"intensityConspicuity",
"(",
"image",
")",
":",
"fs",
"=",
"features",
"(",
"image",
"=",
"im",
",",
"channel",
"=",
"intensity",
")",
"return",
"sumNormalizedFeatures",
"(",
"fs",
")"
] | [
97,
0
] | [
102,
33
] | python | en | ['en', 'error', 'th'] | False |
gaborConspicuity | (image, steps) |
Creates the conspicuity map for the channel `orientations'.
|
Creates the conspicuity map for the channel `orientations'.
| def gaborConspicuity(image, steps):
"""
Creates the conspicuity map for the channel `orientations'.
"""
# gaborConspicuity_ = numpy.zeros((1088, 1983), numpy.uint8)
gaborConspicuity_ = numpy.zeros((dim_cols, dim_rows), numpy.uint8)
for step in range(steps):
theta = step * (math.pi/steps)
gaborFilter = makeGaborFilter(dims=(10,10), lambd=2.5, theta=theta, psi=math.pi/2, sigma=2.5, gamma=.5)
gaborFeatures = features(image = intensity(im), channel = gaborFilter)
summedFeatures = sumNormalizedFeatures(gaborFeatures)
#gaborConspicuity_ += N(summedFeatures)
np.add(gaborConspicuity_, N(summedFeatures), out=gaborConspicuity_, casting="unsafe")
return gaborConspicuity_ | [
"def",
"gaborConspicuity",
"(",
"image",
",",
"steps",
")",
":",
"# gaborConspicuity_ = numpy.zeros((1088, 1983), numpy.uint8)",
"gaborConspicuity_",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"dim_cols",
",",
"dim_rows",
")",
",",
"numpy",
".",
"uint8",
")",
"for",
"step",
"in",
"range",
"(",
"steps",
")",
":",
"theta",
"=",
"step",
"*",
"(",
"math",
".",
"pi",
"/",
"steps",
")",
"gaborFilter",
"=",
"makeGaborFilter",
"(",
"dims",
"=",
"(",
"10",
",",
"10",
")",
",",
"lambd",
"=",
"2.5",
",",
"theta",
"=",
"theta",
",",
"psi",
"=",
"math",
".",
"pi",
"/",
"2",
",",
"sigma",
"=",
"2.5",
",",
"gamma",
"=",
".5",
")",
"gaborFeatures",
"=",
"features",
"(",
"image",
"=",
"intensity",
"(",
"im",
")",
",",
"channel",
"=",
"gaborFilter",
")",
"summedFeatures",
"=",
"sumNormalizedFeatures",
"(",
"gaborFeatures",
")",
"#gaborConspicuity_ += N(summedFeatures)",
"np",
".",
"add",
"(",
"gaborConspicuity_",
",",
"N",
"(",
"summedFeatures",
")",
",",
"out",
"=",
"gaborConspicuity_",
",",
"casting",
"=",
"\"unsafe\"",
")",
"return",
"gaborConspicuity_"
] | [
104,
0
] | [
117,
25
] | python | en | ['en', 'error', 'th'] | False |
rgConspicuity | (image) |
Creates the conspicuity map for the sub channel `red-green conspicuity'.
of the color channel.
|
Creates the conspicuity map for the sub channel `red-green conspicuity'.
of the color channel.
| def rgConspicuity(image):
"""
Creates the conspicuity map for the sub channel `red-green conspicuity'.
of the color channel.
"""
def rg(image):
r,g,_,__ = cv2.split(image)
return cv2.absdiff(r,g)
fs = features(image = image, channel = rg)
return sumNormalizedFeatures(fs) | [
"def",
"rgConspicuity",
"(",
"image",
")",
":",
"def",
"rg",
"(",
"image",
")",
":",
"r",
",",
"g",
",",
"_",
",",
"__",
"=",
"cv2",
".",
"split",
"(",
"image",
")",
"return",
"cv2",
".",
"absdiff",
"(",
"r",
",",
"g",
")",
"fs",
"=",
"features",
"(",
"image",
"=",
"image",
",",
"channel",
"=",
"rg",
")",
"return",
"sumNormalizedFeatures",
"(",
"fs",
")"
] | [
119,
0
] | [
128,
33
] | python | en | ['en', 'error', 'th'] | False |
byConspicuity | (image) |
Creates the conspicuity map for the sub channel `blue-yellow conspicuity'.
of the color channel.
|
Creates the conspicuity map for the sub channel `blue-yellow conspicuity'.
of the color channel.
| def byConspicuity(image):
"""
Creates the conspicuity map for the sub channel `blue-yellow conspicuity'.
of the color channel.
"""
def by(image):
_,__,b,y = cv2.split(image)
return cv2.absdiff(b,y)
fs = features(image = image, channel = by)
return sumNormalizedFeatures(fs) | [
"def",
"byConspicuity",
"(",
"image",
")",
":",
"def",
"by",
"(",
"image",
")",
":",
"_",
",",
"__",
",",
"b",
",",
"y",
"=",
"cv2",
".",
"split",
"(",
"image",
")",
"return",
"cv2",
".",
"absdiff",
"(",
"b",
",",
"y",
")",
"fs",
"=",
"features",
"(",
"image",
"=",
"image",
",",
"channel",
"=",
"by",
")",
"return",
"sumNormalizedFeatures",
"(",
"fs",
")"
] | [
130,
0
] | [
139,
33
] | python | en | ['en', 'error', 'th'] | False |
sumNormalizedFeatures | (features, levels=9, startSize=(dim_rows*8, dim_cols*8)) |
Normalizes the feature maps in argument features and combines them into one.
Arguments:
features : list of feature maps (images)
levels : the levels of the Gaussian pyramid used to
calculate the feature maps.
startSize : the base size of the Gaussian pyramit used to
calculate the feature maps.
returns:
a combined feature map.
|
Normalizes the feature maps in argument features and combines them into one.
Arguments:
features : list of feature maps (images)
levels : the levels of the Gaussian pyramid used to
calculate the feature maps.
startSize : the base size of the Gaussian pyramit used to
calculate the feature maps.
returns:
a combined feature map.
| def sumNormalizedFeatures(features, levels=9, startSize=(dim_rows*8, dim_cols*8)):
# def sumNormalizedFeatures(features, levels=9, startSize=(1983*8, 1088*8)):
"""
Normalizes the feature maps in argument features and combines them into one.
Arguments:
features : list of feature maps (images)
levels : the levels of the Gaussian pyramid used to
calculate the feature maps.
startSize : the base size of the Gaussian pyramit used to
calculate the feature maps.
returns:
a combined feature map.
"""
commonWidth = startSize[0] / 2**(levels/2 - 1)
commonHeight = startSize[1] / 2**(levels/2 - 1)
commonSize = commonWidth, commonHeight
logger.info("Size of conspicuity map: %s", commonSize)
consp = N(cv2.resize(features[0][1], commonSize))
for f in features[1:]:
resized = N(cv2.resize(f[1], commonSize))
consp = cv2.add(consp, resized)
return consp | [
"def",
"sumNormalizedFeatures",
"(",
"features",
",",
"levels",
"=",
"9",
",",
"startSize",
"=",
"(",
"dim_rows",
"*",
"8",
",",
"dim_cols",
"*",
"8",
")",
")",
":",
"# def sumNormalizedFeatures(features, levels=9, startSize=(1983*8, 1088*8)):",
"commonWidth",
"=",
"startSize",
"[",
"0",
"]",
"/",
"2",
"**",
"(",
"levels",
"/",
"2",
"-",
"1",
")",
"commonHeight",
"=",
"startSize",
"[",
"1",
"]",
"/",
"2",
"**",
"(",
"levels",
"/",
"2",
"-",
"1",
")",
"commonSize",
"=",
"commonWidth",
",",
"commonHeight",
"logger",
".",
"info",
"(",
"\"Size of conspicuity map: %s\"",
",",
"commonSize",
")",
"consp",
"=",
"N",
"(",
"cv2",
".",
"resize",
"(",
"features",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"commonSize",
")",
")",
"for",
"f",
"in",
"features",
"[",
"1",
":",
"]",
":",
"resized",
"=",
"N",
"(",
"cv2",
".",
"resize",
"(",
"f",
"[",
"1",
"]",
",",
"commonSize",
")",
")",
"consp",
"=",
"cv2",
".",
"add",
"(",
"consp",
",",
"resized",
")",
"return",
"consp"
] | [
141,
0
] | [
162,
13
] | python | en | ['en', 'error', 'th'] | False |
N | (image) |
Normalization parameter as per Itti et al. (1998).
returns a normalized feature map image.
|
Normalization parameter as per Itti et al. (1998).
returns a normalized feature map image.
| def N(image):
"""
Normalization parameter as per Itti et al. (1998).
returns a normalized feature map image.
"""
M = 8. # an arbitrary global maximum to which the image is scaled.
# (When saving saliency maps as images, pixel values may become
# too large or too small for the chosen image format depending
# on this constant)
image = cv2.convertScaleAbs(image, alpha=M/image.max(), beta=0.)
w,h = image.shape
maxima = maximum_filter(image, size=(w/10,h/1))
maxima = (image == maxima)
mnum = maxima.sum()
logger.debug("Found %d local maxima.", mnum)
maxima = numpy.multiply(maxima, image)
mbar = float(maxima.sum()) / mnum
logger.debug("Average of local maxima: %f. Global maximum: %f", mbar, M)
return image * (M-mbar)**2 | [
"def",
"N",
"(",
"image",
")",
":",
"M",
"=",
"8.",
"# an arbitrary global maximum to which the image is scaled.",
"# (When saving saliency maps as images, pixel values may become",
"# too large or too small for the chosen image format depending",
"# on this constant)",
"image",
"=",
"cv2",
".",
"convertScaleAbs",
"(",
"image",
",",
"alpha",
"=",
"M",
"/",
"image",
".",
"max",
"(",
")",
",",
"beta",
"=",
"0.",
")",
"w",
",",
"h",
"=",
"image",
".",
"shape",
"maxima",
"=",
"maximum_filter",
"(",
"image",
",",
"size",
"=",
"(",
"w",
"/",
"10",
",",
"h",
"/",
"1",
")",
")",
"maxima",
"=",
"(",
"image",
"==",
"maxima",
")",
"mnum",
"=",
"maxima",
".",
"sum",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Found %d local maxima.\"",
",",
"mnum",
")",
"maxima",
"=",
"numpy",
".",
"multiply",
"(",
"maxima",
",",
"image",
")",
"mbar",
"=",
"float",
"(",
"maxima",
".",
"sum",
"(",
")",
")",
"/",
"mnum",
"logger",
".",
"debug",
"(",
"\"Average of local maxima: %f. Global maximum: %f\"",
",",
"mbar",
",",
"M",
")",
"return",
"image",
"*",
"(",
"M",
"-",
"mbar",
")",
"**",
"2"
] | [
164,
0
] | [
182,
27
] | python | en | ['en', 'error', 'th'] | False |
makeNormalizedColorChannels | (image, thresholdRatio=10.) |
Creates a version of the (3-channel color) input image in which each of
the (4) channels is normalized. Implements color opponencies as per
Itti et al. (1998).
Arguments:
image : input image (3 color channels)
thresholdRatio : the threshold below which to set all color values
to zero.
Returns:
an output image with four normalized color channels for red, green,
blue and yellow.
|
Creates a version of the (3-channel color) input image in which each of
the (4) channels is normalized. Implements color opponencies as per
Itti et al. (1998).
Arguments:
image : input image (3 color channels)
thresholdRatio : the threshold below which to set all color values
to zero.
Returns:
an output image with four normalized color channels for red, green,
blue and yellow.
| def makeNormalizedColorChannels(image, thresholdRatio=10.):
"""
Creates a version of the (3-channel color) input image in which each of
the (4) channels is normalized. Implements color opponencies as per
Itti et al. (1998).
Arguments:
image : input image (3 color channels)
thresholdRatio : the threshold below which to set all color values
to zero.
Returns:
an output image with four normalized color channels for red, green,
blue and yellow.
"""
intens = intensity(image)
threshold = intens.max() / thresholdRatio
logger.debug("Threshold: %d", threshold)
r,g,b = cv2.split(image)
cv2.threshold(src=r, dst=r, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=g, dst=g, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=b, dst=b, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
R = r - (g + b) / 2
G = g - (r + b) / 2
B = b - (g + r) / 2
Y = (r + g) / 2 - cv2.absdiff(r,g) / 2 - b
# Negative values are set to zero.
cv2.threshold(src=R, dst=R, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=G, dst=G, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=B, dst=B, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=Y, dst=Y, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
image = cv2.merge((R,G,B,Y))
return image | [
"def",
"makeNormalizedColorChannels",
"(",
"image",
",",
"thresholdRatio",
"=",
"10.",
")",
":",
"intens",
"=",
"intensity",
"(",
"image",
")",
"threshold",
"=",
"intens",
".",
"max",
"(",
")",
"/",
"thresholdRatio",
"logger",
".",
"debug",
"(",
"\"Threshold: %d\"",
",",
"threshold",
")",
"r",
",",
"g",
",",
"b",
"=",
"cv2",
".",
"split",
"(",
"image",
")",
"cv2",
".",
"threshold",
"(",
"src",
"=",
"r",
",",
"dst",
"=",
"r",
",",
"thresh",
"=",
"threshold",
",",
"maxval",
"=",
"0.0",
",",
"type",
"=",
"cv2",
".",
"THRESH_TOZERO",
")",
"cv2",
".",
"threshold",
"(",
"src",
"=",
"g",
",",
"dst",
"=",
"g",
",",
"thresh",
"=",
"threshold",
",",
"maxval",
"=",
"0.0",
",",
"type",
"=",
"cv2",
".",
"THRESH_TOZERO",
")",
"cv2",
".",
"threshold",
"(",
"src",
"=",
"b",
",",
"dst",
"=",
"b",
",",
"thresh",
"=",
"threshold",
",",
"maxval",
"=",
"0.0",
",",
"type",
"=",
"cv2",
".",
"THRESH_TOZERO",
")",
"R",
"=",
"r",
"-",
"(",
"g",
"+",
"b",
")",
"/",
"2",
"G",
"=",
"g",
"-",
"(",
"r",
"+",
"b",
")",
"/",
"2",
"B",
"=",
"b",
"-",
"(",
"g",
"+",
"r",
")",
"/",
"2",
"Y",
"=",
"(",
"r",
"+",
"g",
")",
"/",
"2",
"-",
"cv2",
".",
"absdiff",
"(",
"r",
",",
"g",
")",
"/",
"2",
"-",
"b",
"# Negative values are set to zero.",
"cv2",
".",
"threshold",
"(",
"src",
"=",
"R",
",",
"dst",
"=",
"R",
",",
"thresh",
"=",
"0.",
",",
"maxval",
"=",
"0.0",
",",
"type",
"=",
"cv2",
".",
"THRESH_TOZERO",
")",
"cv2",
".",
"threshold",
"(",
"src",
"=",
"G",
",",
"dst",
"=",
"G",
",",
"thresh",
"=",
"0.",
",",
"maxval",
"=",
"0.0",
",",
"type",
"=",
"cv2",
".",
"THRESH_TOZERO",
")",
"cv2",
".",
"threshold",
"(",
"src",
"=",
"B",
",",
"dst",
"=",
"B",
",",
"thresh",
"=",
"0.",
",",
"maxval",
"=",
"0.0",
",",
"type",
"=",
"cv2",
".",
"THRESH_TOZERO",
")",
"cv2",
".",
"threshold",
"(",
"src",
"=",
"Y",
",",
"dst",
"=",
"Y",
",",
"thresh",
"=",
"0.",
",",
"maxval",
"=",
"0.0",
",",
"type",
"=",
"cv2",
".",
"THRESH_TOZERO",
")",
"image",
"=",
"cv2",
".",
"merge",
"(",
"(",
"R",
",",
"G",
",",
"B",
",",
"Y",
")",
")",
"return",
"image"
] | [
184,
0
] | [
216,
13
] | python | en | ['en', 'error', 'th'] | False |
markMaxima | (saliency) |
Mark the maxima in a saliency map (a gray-scale image).
|
Mark the maxima in a saliency map (a gray-scale image).
| def markMaxima(saliency):
"""
Mark the maxima in a saliency map (a gray-scale image).
"""
maxima = maximum_filter(saliency, size=(5, 5))
maxima = numpy.array(saliency == maxima, dtype=numpy.float64) * 255
g = cv2.max(saliency, maxima)
r = saliency
b = saliency
marked = cv2.merge((b,g,r))
return marked | [
"def",
"markMaxima",
"(",
"saliency",
")",
":",
"maxima",
"=",
"maximum_filter",
"(",
"saliency",
",",
"size",
"=",
"(",
"5",
",",
"5",
")",
")",
"maxima",
"=",
"numpy",
".",
"array",
"(",
"saliency",
"==",
"maxima",
",",
"dtype",
"=",
"numpy",
".",
"float64",
")",
"*",
"255",
"g",
"=",
"cv2",
".",
"max",
"(",
"saliency",
",",
"maxima",
")",
"r",
"=",
"saliency",
"b",
"=",
"saliency",
"marked",
"=",
"cv2",
".",
"merge",
"(",
"(",
"b",
",",
"g",
",",
"r",
")",
")",
"return",
"marked"
] | [
218,
0
] | [
228,
14
] | python | en | ['en', 'error', 'th'] | False |
uplift_tree_string | (decisionTree, x_names) |
Convert the tree to string for print.
Args
----
decisionTree : object
object of DecisionTree class
x_names : list
List of feature names
Returns
-------
A string representation of the tree.
|
Convert the tree to string for print. | def uplift_tree_string(decisionTree, x_names):
'''
Convert the tree to string for print.
Args
----
decisionTree : object
object of DecisionTree class
x_names : list
List of feature names
Returns
-------
A string representation of the tree.
'''
# Column Heading
dcHeadings = {}
for i, szY in enumerate(x_names + ['treatment_group_key']):
szCol = 'Column %d' % i
dcHeadings[szCol] = str(szY)
def toString(decisionTree, indent=''):
if decisionTree.results is not None: # leaf node
return str(decisionTree.results)
else:
szCol = 'Column %s' % decisionTree.col
if szCol in dcHeadings:
szCol = dcHeadings[szCol]
if isinstance(decisionTree.value, int) or isinstance(decisionTree.value, float):
decision = '%s >= %s?' % (szCol, decisionTree.value)
else:
decision = '%s == %s?' % (szCol, decisionTree.value)
trueBranch = indent + 'yes -> ' + toString(decisionTree.trueBranch, indent + '\t\t')
falseBranch = indent + 'no -> ' + toString(decisionTree.falseBranch, indent + '\t\t')
return (decision + '\n' + trueBranch + '\n' + falseBranch)
print(toString(decisionTree)) | [
"def",
"uplift_tree_string",
"(",
"decisionTree",
",",
"x_names",
")",
":",
"# Column Heading",
"dcHeadings",
"=",
"{",
"}",
"for",
"i",
",",
"szY",
"in",
"enumerate",
"(",
"x_names",
"+",
"[",
"'treatment_group_key'",
"]",
")",
":",
"szCol",
"=",
"'Column %d'",
"%",
"i",
"dcHeadings",
"[",
"szCol",
"]",
"=",
"str",
"(",
"szY",
")",
"def",
"toString",
"(",
"decisionTree",
",",
"indent",
"=",
"''",
")",
":",
"if",
"decisionTree",
".",
"results",
"is",
"not",
"None",
":",
"# leaf node",
"return",
"str",
"(",
"decisionTree",
".",
"results",
")",
"else",
":",
"szCol",
"=",
"'Column %s'",
"%",
"decisionTree",
".",
"col",
"if",
"szCol",
"in",
"dcHeadings",
":",
"szCol",
"=",
"dcHeadings",
"[",
"szCol",
"]",
"if",
"isinstance",
"(",
"decisionTree",
".",
"value",
",",
"int",
")",
"or",
"isinstance",
"(",
"decisionTree",
".",
"value",
",",
"float",
")",
":",
"decision",
"=",
"'%s >= %s?'",
"%",
"(",
"szCol",
",",
"decisionTree",
".",
"value",
")",
"else",
":",
"decision",
"=",
"'%s == %s?'",
"%",
"(",
"szCol",
",",
"decisionTree",
".",
"value",
")",
"trueBranch",
"=",
"indent",
"+",
"'yes -> '",
"+",
"toString",
"(",
"decisionTree",
".",
"trueBranch",
",",
"indent",
"+",
"'\\t\\t'",
")",
"falseBranch",
"=",
"indent",
"+",
"'no -> '",
"+",
"toString",
"(",
"decisionTree",
".",
"falseBranch",
",",
"indent",
"+",
"'\\t\\t'",
")",
"return",
"(",
"decision",
"+",
"'\\n'",
"+",
"trueBranch",
"+",
"'\\n'",
"+",
"falseBranch",
")",
"print",
"(",
"toString",
"(",
"decisionTree",
")",
")"
] | [
10,
0
] | [
49,
33
] | python | en | ['en', 'error', 'th'] | False |
uplift_tree_plot | (decisionTree, x_names) |
Convert the tree to dot graph for plots.
Args
----
decisionTree : object
object of DecisionTree class
x_names : list
List of feature names
Returns
-------
Dot class representing the tree graph.
|
Convert the tree to dot graph for plots. | def uplift_tree_plot(decisionTree, x_names):
'''
Convert the tree to dot graph for plots.
Args
----
decisionTree : object
object of DecisionTree class
x_names : list
List of feature names
Returns
-------
Dot class representing the tree graph.
'''
# Column Heading
dcHeadings = {}
for i, szY in enumerate(x_names + ['treatment_group_key']):
szCol = 'Column %d' % i
dcHeadings[szCol] = str(szY)
dcNodes = defaultdict(list)
"""Plots the obtained decision tree. """
def toString(iSplit, decisionTree, bBranch, szParent="null", indent='', indexParent=0, upliftScores=list()):
if decisionTree.results is not None: # leaf node
lsY = []
for szX, n in decisionTree.results.items():
lsY.append('%s:%.2f' % (szX, n))
dcY = {"name": "%s" % ', '.join(lsY), "parent": szParent}
dcSummary = decisionTree.summary
upliftScores += [dcSummary['matchScore']]
dcNodes[iSplit].append(['leaf', dcY['name'], szParent, bBranch,
str(-round(float(decisionTree.summary['impurity']), 3)), dcSummary['samples'],
dcSummary['group_size'], dcSummary['upliftScore'], dcSummary['matchScore'],
indexParent])
else:
szCol = 'Column %s' % decisionTree.col
if szCol in dcHeadings:
szCol = dcHeadings[szCol]
if isinstance(decisionTree.value, int) or isinstance(decisionTree.value, float):
decision = '%s >= %s' % (szCol, decisionTree.value)
else:
decision = '%s == %s' % (szCol, decisionTree.value)
indexOfLevel = len(dcNodes[iSplit])
toString(iSplit + 1, decisionTree.trueBranch, True, decision, indent + '\t\t', indexOfLevel, upliftScores)
toString(iSplit + 1, decisionTree.falseBranch, False, decision, indent + '\t\t', indexOfLevel, upliftScores)
dcSummary = decisionTree.summary
upliftScores += [dcSummary['matchScore']]
dcNodes[iSplit].append([iSplit + 1, decision, szParent, bBranch,
str(-round(float(decisionTree.summary['impurity']), 3)), dcSummary['samples'],
dcSummary['group_size'], dcSummary['upliftScore'], dcSummary['matchScore'],
indexParent])
upliftScores = list()
toString(0, decisionTree, None, upliftScores=upliftScores)
upliftScoreToColor = dict()
try:
# calculate colors for nodes based on uplifts
minUplift = min(upliftScores)
maxUplift = max(upliftScores)
upliftLevels = [(uplift-minUplift)/(maxUplift-minUplift) for uplift in upliftScores] # min max scaler
baseUplift = float(decisionTree.summary.get('matchScore'))
baseUpliftLevel = (baseUplift - minUplift) / (maxUplift - minUplift) # min max scaler normalization
white = np.array([255., 255., 255.])
blue = np.array([31., 119., 180.])
green = np.array([0., 128., 0.])
for i, upliftLevel in enumerate(upliftLevels):
if upliftLevel >= baseUpliftLevel: # go blue
color = upliftLevel * blue + (1 - upliftLevel) * white
else: # go green
color = (1 - upliftLevel) * green + upliftLevel * white
color = [int(c) for c in color]
upliftScoreToColor[upliftScores[i]] = ('#%2x%2x%2x' % tuple(color)).replace(' ', '0') # color code
except Exception as e:
print(e)
lsDot = ['digraph Tree {',
'node [shape=box, style="filled, rounded", color="black", fontname=helvetica] ;',
'edge [fontname=helvetica] ;'
]
i_node = 0
dcParent = {}
totalSample = int(decisionTree.summary.get('samples')) # initialize the value with the total sample size at root
for nSplit in range(len(dcNodes.items())):
lsY = dcNodes[nSplit]
indexOfLevel = 0
for lsX in lsY:
iSplit, decision, szParent, bBranch, szImpurity, szSamples, szGroup, \
upliftScore, matchScore, indexParent = lsX
sampleProportion = round(int(szSamples)*100./totalSample, 1)
if type(iSplit) is int:
szSplit = '%d-%d' % (iSplit, indexOfLevel)
dcParent[szSplit] = i_node
lsDot.append('%d [label=<%s<br/> impurity %s<br/> total_sample %s (%s%)<br/>group_sample %s <br/> '
'uplift score: %s <br/> uplift p_value %s <br/> '
'validation uplift score %s>, fillcolor="%s"] ;' % (
i_node, decision.replace('>=', '≥').replace('?', ''), szImpurity, szSamples,
str(sampleProportion), szGroup, str(upliftScore[0]), str(upliftScore[1]),
str(matchScore), upliftScoreToColor.get(matchScore, '#e5813900')
))
else:
lsDot.append('%d [label=< impurity %s<br/> total_sample %s (%s%)<br/>group_sample %s <br/> '
'uplift score: %s <br/> uplift p_value %s <br/> validation uplift score %s <br/> '
'mean %s>, fillcolor="%s"] ;' % (
i_node, szImpurity, szSamples, str(sampleProportion), szGroup, str(upliftScore[0]),
str(upliftScore[1]), str(matchScore), decision,
upliftScoreToColor.get(matchScore, '#e5813900')
))
if szParent != 'null':
if bBranch:
szAngle = '45'
szHeadLabel = 'True'
else:
szAngle = '-45'
szHeadLabel = 'False'
szSplit = '%d-%d' % (nSplit, indexParent)
p_node = dcParent[szSplit]
if nSplit == 1:
lsDot.append('%d -> %d [labeldistance=2.5, labelangle=%s, headlabel="%s"] ;' % (p_node,
i_node, szAngle,
szHeadLabel))
else:
lsDot.append('%d -> %d ;' % (p_node, i_node))
i_node += 1
indexOfLevel += 1
lsDot.append('}')
dot_data = '\n'.join(lsDot)
graph = pydotplus.graph_from_dot_data(dot_data)
return graph | [
"def",
"uplift_tree_plot",
"(",
"decisionTree",
",",
"x_names",
")",
":",
"# Column Heading",
"dcHeadings",
"=",
"{",
"}",
"for",
"i",
",",
"szY",
"in",
"enumerate",
"(",
"x_names",
"+",
"[",
"'treatment_group_key'",
"]",
")",
":",
"szCol",
"=",
"'Column %d'",
"%",
"i",
"dcHeadings",
"[",
"szCol",
"]",
"=",
"str",
"(",
"szY",
")",
"dcNodes",
"=",
"defaultdict",
"(",
"list",
")",
"\"\"\"Plots the obtained decision tree. \"\"\"",
"def",
"toString",
"(",
"iSplit",
",",
"decisionTree",
",",
"bBranch",
",",
"szParent",
"=",
"\"null\"",
",",
"indent",
"=",
"''",
",",
"indexParent",
"=",
"0",
",",
"upliftScores",
"=",
"list",
"(",
")",
")",
":",
"if",
"decisionTree",
".",
"results",
"is",
"not",
"None",
":",
"# leaf node",
"lsY",
"=",
"[",
"]",
"for",
"szX",
",",
"n",
"in",
"decisionTree",
".",
"results",
".",
"items",
"(",
")",
":",
"lsY",
".",
"append",
"(",
"'%s:%.2f'",
"%",
"(",
"szX",
",",
"n",
")",
")",
"dcY",
"=",
"{",
"\"name\"",
":",
"\"%s\"",
"%",
"', '",
".",
"join",
"(",
"lsY",
")",
",",
"\"parent\"",
":",
"szParent",
"}",
"dcSummary",
"=",
"decisionTree",
".",
"summary",
"upliftScores",
"+=",
"[",
"dcSummary",
"[",
"'matchScore'",
"]",
"]",
"dcNodes",
"[",
"iSplit",
"]",
".",
"append",
"(",
"[",
"'leaf'",
",",
"dcY",
"[",
"'name'",
"]",
",",
"szParent",
",",
"bBranch",
",",
"str",
"(",
"-",
"round",
"(",
"float",
"(",
"decisionTree",
".",
"summary",
"[",
"'impurity'",
"]",
")",
",",
"3",
")",
")",
",",
"dcSummary",
"[",
"'samples'",
"]",
",",
"dcSummary",
"[",
"'group_size'",
"]",
",",
"dcSummary",
"[",
"'upliftScore'",
"]",
",",
"dcSummary",
"[",
"'matchScore'",
"]",
",",
"indexParent",
"]",
")",
"else",
":",
"szCol",
"=",
"'Column %s'",
"%",
"decisionTree",
".",
"col",
"if",
"szCol",
"in",
"dcHeadings",
":",
"szCol",
"=",
"dcHeadings",
"[",
"szCol",
"]",
"if",
"isinstance",
"(",
"decisionTree",
".",
"value",
",",
"int",
")",
"or",
"isinstance",
"(",
"decisionTree",
".",
"value",
",",
"float",
")",
":",
"decision",
"=",
"'%s >= %s'",
"%",
"(",
"szCol",
",",
"decisionTree",
".",
"value",
")",
"else",
":",
"decision",
"=",
"'%s == %s'",
"%",
"(",
"szCol",
",",
"decisionTree",
".",
"value",
")",
"indexOfLevel",
"=",
"len",
"(",
"dcNodes",
"[",
"iSplit",
"]",
")",
"toString",
"(",
"iSplit",
"+",
"1",
",",
"decisionTree",
".",
"trueBranch",
",",
"True",
",",
"decision",
",",
"indent",
"+",
"'\\t\\t'",
",",
"indexOfLevel",
",",
"upliftScores",
")",
"toString",
"(",
"iSplit",
"+",
"1",
",",
"decisionTree",
".",
"falseBranch",
",",
"False",
",",
"decision",
",",
"indent",
"+",
"'\\t\\t'",
",",
"indexOfLevel",
",",
"upliftScores",
")",
"dcSummary",
"=",
"decisionTree",
".",
"summary",
"upliftScores",
"+=",
"[",
"dcSummary",
"[",
"'matchScore'",
"]",
"]",
"dcNodes",
"[",
"iSplit",
"]",
".",
"append",
"(",
"[",
"iSplit",
"+",
"1",
",",
"decision",
",",
"szParent",
",",
"bBranch",
",",
"str",
"(",
"-",
"round",
"(",
"float",
"(",
"decisionTree",
".",
"summary",
"[",
"'impurity'",
"]",
")",
",",
"3",
")",
")",
",",
"dcSummary",
"[",
"'samples'",
"]",
",",
"dcSummary",
"[",
"'group_size'",
"]",
",",
"dcSummary",
"[",
"'upliftScore'",
"]",
",",
"dcSummary",
"[",
"'matchScore'",
"]",
",",
"indexParent",
"]",
")",
"upliftScores",
"=",
"list",
"(",
")",
"toString",
"(",
"0",
",",
"decisionTree",
",",
"None",
",",
"upliftScores",
"=",
"upliftScores",
")",
"upliftScoreToColor",
"=",
"dict",
"(",
")",
"try",
":",
"# calculate colors for nodes based on uplifts",
"minUplift",
"=",
"min",
"(",
"upliftScores",
")",
"maxUplift",
"=",
"max",
"(",
"upliftScores",
")",
"upliftLevels",
"=",
"[",
"(",
"uplift",
"-",
"minUplift",
")",
"/",
"(",
"maxUplift",
"-",
"minUplift",
")",
"for",
"uplift",
"in",
"upliftScores",
"]",
"# min max scaler",
"baseUplift",
"=",
"float",
"(",
"decisionTree",
".",
"summary",
".",
"get",
"(",
"'matchScore'",
")",
")",
"baseUpliftLevel",
"=",
"(",
"baseUplift",
"-",
"minUplift",
")",
"/",
"(",
"maxUplift",
"-",
"minUplift",
")",
"# min max scaler normalization",
"white",
"=",
"np",
".",
"array",
"(",
"[",
"255.",
",",
"255.",
",",
"255.",
"]",
")",
"blue",
"=",
"np",
".",
"array",
"(",
"[",
"31.",
",",
"119.",
",",
"180.",
"]",
")",
"green",
"=",
"np",
".",
"array",
"(",
"[",
"0.",
",",
"128.",
",",
"0.",
"]",
")",
"for",
"i",
",",
"upliftLevel",
"in",
"enumerate",
"(",
"upliftLevels",
")",
":",
"if",
"upliftLevel",
">=",
"baseUpliftLevel",
":",
"# go blue",
"color",
"=",
"upliftLevel",
"*",
"blue",
"+",
"(",
"1",
"-",
"upliftLevel",
")",
"*",
"white",
"else",
":",
"# go green",
"color",
"=",
"(",
"1",
"-",
"upliftLevel",
")",
"*",
"green",
"+",
"upliftLevel",
"*",
"white",
"color",
"=",
"[",
"int",
"(",
"c",
")",
"for",
"c",
"in",
"color",
"]",
"upliftScoreToColor",
"[",
"upliftScores",
"[",
"i",
"]",
"]",
"=",
"(",
"'#%2x%2x%2x'",
"%",
"tuple",
"(",
"color",
")",
")",
".",
"replace",
"(",
"' '",
",",
"'0'",
")",
"# color code",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"lsDot",
"=",
"[",
"'digraph Tree {'",
",",
"'node [shape=box, style=\"filled, rounded\", color=\"black\", fontname=helvetica] ;'",
",",
"'edge [fontname=helvetica] ;'",
"]",
"i_node",
"=",
"0",
"dcParent",
"=",
"{",
"}",
"totalSample",
"=",
"int",
"(",
"decisionTree",
".",
"summary",
".",
"get",
"(",
"'samples'",
")",
")",
"# initialize the value with the total sample size at root",
"for",
"nSplit",
"in",
"range",
"(",
"len",
"(",
"dcNodes",
".",
"items",
"(",
")",
")",
")",
":",
"lsY",
"=",
"dcNodes",
"[",
"nSplit",
"]",
"indexOfLevel",
"=",
"0",
"for",
"lsX",
"in",
"lsY",
":",
"iSplit",
",",
"decision",
",",
"szParent",
",",
"bBranch",
",",
"szImpurity",
",",
"szSamples",
",",
"szGroup",
",",
"upliftScore",
",",
"matchScore",
",",
"indexParent",
"=",
"lsX",
"sampleProportion",
"=",
"round",
"(",
"int",
"(",
"szSamples",
")",
"*",
"100.",
"/",
"totalSample",
",",
"1",
")",
"if",
"type",
"(",
"iSplit",
")",
"is",
"int",
":",
"szSplit",
"=",
"'%d-%d'",
"%",
"(",
"iSplit",
",",
"indexOfLevel",
")",
"dcParent",
"[",
"szSplit",
"]",
"=",
"i_node",
"lsDot",
".",
"append",
"(",
"'%d [label=<%s<br/> impurity %s<br/> total_sample %s (%s%)<br/>group_sample %s <br/> '",
"'uplift score: %s <br/> uplift p_value %s <br/> '",
"'validation uplift score %s>, fillcolor=\"%s\"] ;'",
"%",
"(",
"i_node",
",",
"decision",
".",
"replace",
"(",
"'>='",
",",
"'≥'",
")",
".",
"replace",
"(",
"'?'",
",",
"''",
")",
",",
"szImpurity",
",",
"szSamples",
",",
"str",
"(",
"sampleProportion",
")",
",",
"szGroup",
",",
"str",
"(",
"upliftScore",
"[",
"0",
"]",
")",
",",
"str",
"(",
"upliftScore",
"[",
"1",
"]",
")",
",",
"str",
"(",
"matchScore",
")",
",",
"upliftScoreToColor",
".",
"get",
"(",
"matchScore",
",",
"'#e5813900'",
")",
")",
")",
"else",
":",
"lsDot",
".",
"append",
"(",
"'%d [label=< impurity %s<br/> total_sample %s (%s%)<br/>group_sample %s <br/> '",
"'uplift score: %s <br/> uplift p_value %s <br/> validation uplift score %s <br/> '",
"'mean %s>, fillcolor=\"%s\"] ;'",
"%",
"(",
"i_node",
",",
"szImpurity",
",",
"szSamples",
",",
"str",
"(",
"sampleProportion",
")",
",",
"szGroup",
",",
"str",
"(",
"upliftScore",
"[",
"0",
"]",
")",
",",
"str",
"(",
"upliftScore",
"[",
"1",
"]",
")",
",",
"str",
"(",
"matchScore",
")",
",",
"decision",
",",
"upliftScoreToColor",
".",
"get",
"(",
"matchScore",
",",
"'#e5813900'",
")",
")",
")",
"if",
"szParent",
"!=",
"'null'",
":",
"if",
"bBranch",
":",
"szAngle",
"=",
"'45'",
"szHeadLabel",
"=",
"'True'",
"else",
":",
"szAngle",
"=",
"'-45'",
"szHeadLabel",
"=",
"'False'",
"szSplit",
"=",
"'%d-%d'",
"%",
"(",
"nSplit",
",",
"indexParent",
")",
"p_node",
"=",
"dcParent",
"[",
"szSplit",
"]",
"if",
"nSplit",
"==",
"1",
":",
"lsDot",
".",
"append",
"(",
"'%d -> %d [labeldistance=2.5, labelangle=%s, headlabel=\"%s\"] ;'",
"%",
"(",
"p_node",
",",
"i_node",
",",
"szAngle",
",",
"szHeadLabel",
")",
")",
"else",
":",
"lsDot",
".",
"append",
"(",
"'%d -> %d ;'",
"%",
"(",
"p_node",
",",
"i_node",
")",
")",
"i_node",
"+=",
"1",
"indexOfLevel",
"+=",
"1",
"lsDot",
".",
"append",
"(",
"'}'",
")",
"dot_data",
"=",
"'\\n'",
".",
"join",
"(",
"lsDot",
")",
"graph",
"=",
"pydotplus",
".",
"graph_from_dot_data",
"(",
"dot_data",
")",
"return",
"graph"
] | [
52,
0
] | [
188,
16
] | python | en | ['en', 'error', 'th'] | False |
ColumnRuleFollowers._helper | (x, rule) | Helper function since Python doesn't like multiline functions | Helper function since Python doesn't like multiline functions | def _helper(x, rule):
"""Helper function since Python doesn't like multiline functions"""
strings = {}
ldict = {}
names = ""
if x is None:
x = ""
if not isinstance(x, str):
raise TypeError(
"Column values must be strings in order to use 'expect_column_values_to_follow_rule'"
)
for name, rnge in rule["ranges"].items():
if rnge[0] < rnge[1]:
strings[name] = str(x[rnge[0] : rnge[1]])
names += name + ","
else:
raise ValueError(
"Unexpected range. Ensure that the second number in your range is larger than the first."
)
exec("expr = lambda " + names + ":" + rule["expr"], None, ldict)
func = ldict["expr"]
return func(**strings) | [
"def",
"_helper",
"(",
"x",
",",
"rule",
")",
":",
"strings",
"=",
"{",
"}",
"ldict",
"=",
"{",
"}",
"names",
"=",
"\"\"",
"if",
"x",
"is",
"None",
":",
"x",
"=",
"\"\"",
"if",
"not",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Column values must be strings in order to use 'expect_column_values_to_follow_rule'\"",
")",
"for",
"name",
",",
"rnge",
"in",
"rule",
"[",
"\"ranges\"",
"]",
".",
"items",
"(",
")",
":",
"if",
"rnge",
"[",
"0",
"]",
"<",
"rnge",
"[",
"1",
"]",
":",
"strings",
"[",
"name",
"]",
"=",
"str",
"(",
"x",
"[",
"rnge",
"[",
"0",
"]",
":",
"rnge",
"[",
"1",
"]",
"]",
")",
"names",
"+=",
"name",
"+",
"\",\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected range. Ensure that the second number in your range is larger than the first.\"",
")",
"exec",
"(",
"\"expr = lambda \"",
"+",
"names",
"+",
"\":\"",
"+",
"rule",
"[",
"\"expr\"",
"]",
",",
"None",
",",
"ldict",
")",
"func",
"=",
"ldict",
"[",
"\"expr\"",
"]",
"return",
"func",
"(",
"*",
"*",
"strings",
")"
] | [
62,
4
] | [
84,
30
] | python | en | ['en', 'en', 'en'] | True |
reverse_array_dict | (dictionary) |
Returns a reversed version a dictionary of keys to list-like objects. Each value in each list-like
becomes a key in the returned dictionary mapping to its key in the provided dictionary.
|
Returns a reversed version a dictionary of keys to list-like objects. Each value in each list-like
becomes a key in the returned dictionary mapping to its key in the provided dictionary.
| def reverse_array_dict(dictionary):
"""
Returns a reversed version a dictionary of keys to list-like objects. Each value in each list-like
becomes a key in the returned dictionary mapping to its key in the provided dictionary.
"""
return_dict = {}
for label, values in dictionary.items():
for value in values:
return_dict[value] = label
return return_dict | [
"def",
"reverse_array_dict",
"(",
"dictionary",
")",
":",
"return_dict",
"=",
"{",
"}",
"for",
"label",
",",
"values",
"in",
"dictionary",
".",
"items",
"(",
")",
":",
"for",
"value",
"in",
"values",
":",
"return_dict",
"[",
"value",
"]",
"=",
"label",
"return",
"return_dict"
] | [
35,
0
] | [
44,
22
] | python | en | ['en', 'error', 'th'] | False |
list_prod | (lst) | Takes the product of elements in a list. | Takes the product of elements in a list. | def list_prod(lst):
"""Takes the product of elements in a list."""
return functools.reduce(operator.mul, lst) | [
"def",
"list_prod",
"(",
"lst",
")",
":",
"return",
"functools",
".",
"reduce",
"(",
"operator",
".",
"mul",
",",
"lst",
")"
] | [
46,
0
] | [
48,
46
] | python | en | ['en', 'en', 'en'] | True |
check_for_float | (array) |
Check if a NumPy array-like contains floats.
Parameters
----------
array : numpy.ndarray or convertible
The array to check.
|
Check if a NumPy array-like contains floats. | def check_for_float(array):
"""
Check if a NumPy array-like contains floats.
Parameters
----------
array : numpy.ndarray or convertible
The array to check.
"""
try:
return array.dtype.kind == 'f'
except AttributeError:
# in case it's not a numpy array it will probably have no dtype.
return np.asarray(array).dtype.kind in numerical_dtype_kinds | [
"def",
"check_for_float",
"(",
"array",
")",
":",
"try",
":",
"return",
"array",
".",
"dtype",
".",
"kind",
"==",
"'f'",
"except",
"AttributeError",
":",
"# in case it's not a numpy array it will probably have no dtype.",
"return",
"np",
".",
"asarray",
"(",
"array",
")",
".",
"dtype",
".",
"kind",
"in",
"numerical_dtype_kinds"
] | [
50,
0
] | [
63,
68
] | python | en | ['en', 'error', 'th'] | False |
create_cfmask_clean_mask | (cfmask) |
Description:
Create a clean mask for clear land/water pixels,
i.e. mask out shadow, snow, cloud, and no data
-----
Input:
cfmask (xarray) - cf_mask from the ledaps products
Output:
clean_mask (boolean numpy array) - clear land/water mask
|
Description:
Create a clean mask for clear land/water pixels,
i.e. mask out shadow, snow, cloud, and no data
-----
Input:
cfmask (xarray) - cf_mask from the ledaps products
Output:
clean_mask (boolean numpy array) - clear land/water mask
| def create_cfmask_clean_mask(cfmask):
"""
Description:
Create a clean mask for clear land/water pixels,
i.e. mask out shadow, snow, cloud, and no data
-----
Input:
cfmask (xarray) - cf_mask from the ledaps products
Output:
clean_mask (boolean numpy array) - clear land/water mask
"""
#########################
# cfmask values: #
# 0 - clear #
# 1 - water #
# 2 - cloud shadow #
# 3 - snow #
# 4 - cloud #
# 255 - fill #
#########################
clean_mask = (cfmask == 0) | (cfmask == 1)
return clean_mask.values | [
"def",
"create_cfmask_clean_mask",
"(",
"cfmask",
")",
":",
"#########################",
"# cfmask values: #",
"# 0 - clear #",
"# 1 - water #",
"# 2 - cloud shadow #",
"# 3 - snow #",
"# 4 - cloud #",
"# 255 - fill #",
"#########################",
"clean_mask",
"=",
"(",
"cfmask",
"==",
"0",
")",
"|",
"(",
"cfmask",
"==",
"1",
")",
"return",
"clean_mask",
".",
"values"
] | [
65,
0
] | [
88,
28
] | python | en | ['en', 'error', 'th'] | False |
create_default_clean_mask | (dataset_in) |
Description:
Creates a data mask that masks nothing.
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube.
Throws:
ValueError - if dataset_in is an empty xarray.Dataset.
|
Description:
Creates a data mask that masks nothing.
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube.
Throws:
ValueError - if dataset_in is an empty xarray.Dataset.
| def create_default_clean_mask(dataset_in):
"""
Description:
Creates a data mask that masks nothing.
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube.
Throws:
ValueError - if dataset_in is an empty xarray.Dataset.
"""
data_vars = dataset_in.data_vars
if len(data_vars) != 0:
first_data_var = next(iter(data_vars))
clean_mask = np.ones(dataset_in[first_data_var].shape).astype(np.bool)
return clean_mask
else:
raise ValueError('`dataset_in` has no data!') | [
"def",
"create_default_clean_mask",
"(",
"dataset_in",
")",
":",
"data_vars",
"=",
"dataset_in",
".",
"data_vars",
"if",
"len",
"(",
"data_vars",
")",
"!=",
"0",
":",
"first_data_var",
"=",
"next",
"(",
"iter",
"(",
"data_vars",
")",
")",
"clean_mask",
"=",
"np",
".",
"ones",
"(",
"dataset_in",
"[",
"first_data_var",
"]",
".",
"shape",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"return",
"clean_mask",
"else",
":",
"raise",
"ValueError",
"(",
"'`dataset_in` has no data!'",
")"
] | [
90,
0
] | [
106,
53
] | python | en | ['en', 'error', 'th'] | False |
get_spatial_ref | (crs) |
Description:
Get the spatial reference of a given crs
-----
Input:
crs (datacube.model.CRS) - Example: CRS('EPSG:4326')
Output:
ref (str) - spatial reference of given crs
|
Description:
Get the spatial reference of a given crs
-----
Input:
crs (datacube.model.CRS) - Example: CRS('EPSG:4326')
Output:
ref (str) - spatial reference of given crs
| def get_spatial_ref(crs):
"""
Description:
Get the spatial reference of a given crs
-----
Input:
crs (datacube.model.CRS) - Example: CRS('EPSG:4326')
Output:
ref (str) - spatial reference of given crs
"""
crs_str = str(crs)
epsg_code = int(crs_str.split(':')[1])
ref = osr.SpatialReference()
ref.ImportFromEPSG(epsg_code)
return str(ref) | [
"def",
"get_spatial_ref",
"(",
"crs",
")",
":",
"crs_str",
"=",
"str",
"(",
"crs",
")",
"epsg_code",
"=",
"int",
"(",
"crs_str",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
")",
"ref",
"=",
"osr",
".",
"SpatialReference",
"(",
")",
"ref",
".",
"ImportFromEPSG",
"(",
"epsg_code",
")",
"return",
"str",
"(",
"ref",
")"
] | [
108,
0
] | [
123,
19
] | python | en | ['en', 'error', 'th'] | False |
get_spatial_ref | (crs) |
Description:
Get the spatial reference of a given crs
-----
Input:
crs (datacube.model.CRS) - Example: CRS('EPSG:4326')
Output:
ref (str) - spatial reference of given crs
|
Description:
Get the spatial reference of a given crs
-----
Input:
crs (datacube.model.CRS) - Example: CRS('EPSG:4326')
Output:
ref (str) - spatial reference of given crs
| def get_spatial_ref(crs):
"""
Description:
Get the spatial reference of a given crs
-----
Input:
crs (datacube.model.CRS) - Example: CRS('EPSG:4326')
Output:
ref (str) - spatial reference of given crs
"""
crs_str = str(crs)
epsg_code = int(crs_str.split(':')[1])
ref = osr.SpatialReference()
ref.ImportFromEPSG(epsg_code)
return str(ref) | [
"def",
"get_spatial_ref",
"(",
"crs",
")",
":",
"crs_str",
"=",
"str",
"(",
"crs",
")",
"epsg_code",
"=",
"int",
"(",
"crs_str",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
")",
"ref",
"=",
"osr",
".",
"SpatialReference",
"(",
")",
"ref",
".",
"ImportFromEPSG",
"(",
"epsg_code",
")",
"return",
"str",
"(",
"ref",
")"
] | [
126,
0
] | [
141,
19
] | python | en | ['en', 'error', 'th'] | False |
perform_timeseries_analysis | (dataset_in, band_name, intermediate_product=None, no_data=-9999, operation="mean") |
Description:
-----
Input:
dataset_in (xarray.DataSet) - dataset with one variable to perform timeseries on
band_name: name of the band to create stats for.
intermediate_product: result of this function for previous data, to be combined here
Output:
dataset_out (xarray.DataSet) - dataset containing
variables: normalized_data, total_data, total_clean
|
Description: | def perform_timeseries_analysis(dataset_in, band_name, intermediate_product=None, no_data=-9999, operation="mean"):
"""
Description:
-----
Input:
dataset_in (xarray.DataSet) - dataset with one variable to perform timeseries on
band_name: name of the band to create stats for.
intermediate_product: result of this function for previous data, to be combined here
Output:
dataset_out (xarray.DataSet) - dataset containing
variables: normalized_data, total_data, total_clean
"""
assert operation in ['mean', 'max', 'min'], "Please enter a valid operation."
data = dataset_in[band_name]
data = data.where(data != no_data)
processed_data_sum = data.sum('time')
clean_data = data.notnull()
clean_data_sum = clean_data.astype('bool').sum('time')
dataset_out = None
if intermediate_product is None:
processed_data_normalized = processed_data_sum / clean_data_sum
dataset_out = xr.Dataset(
{
'normalized_data': processed_data_normalized,
'min': data.min(dim='time'),
'max': data.max(dim='time'),
'total_data': processed_data_sum,
'total_clean': clean_data_sum
},
coords={'latitude': dataset_in.latitude,
'longitude': dataset_in.longitude})
else:
dataset_out = intermediate_product
dataset_out['total_data'] += processed_data_sum
dataset_out['total_clean'] += clean_data_sum
dataset_out['normalized_data'] = dataset_out['total_data'] / dataset_out['total_clean']
dataset_out['min'] = xr.concat([dataset_out['min'], data.min(dim='time')], dim='time').min(dim='time')
dataset_out['max'] = xr.concat([dataset_out['max'], data.max(dim='time')], dim='time').max(dim='time')
nan_to_num(dataset_out, 0)
return dataset_out | [
"def",
"perform_timeseries_analysis",
"(",
"dataset_in",
",",
"band_name",
",",
"intermediate_product",
"=",
"None",
",",
"no_data",
"=",
"-",
"9999",
",",
"operation",
"=",
"\"mean\"",
")",
":",
"assert",
"operation",
"in",
"[",
"'mean'",
",",
"'max'",
",",
"'min'",
"]",
",",
"\"Please enter a valid operation.\"",
"data",
"=",
"dataset_in",
"[",
"band_name",
"]",
"data",
"=",
"data",
".",
"where",
"(",
"data",
"!=",
"no_data",
")",
"processed_data_sum",
"=",
"data",
".",
"sum",
"(",
"'time'",
")",
"clean_data",
"=",
"data",
".",
"notnull",
"(",
")",
"clean_data_sum",
"=",
"clean_data",
".",
"astype",
"(",
"'bool'",
")",
".",
"sum",
"(",
"'time'",
")",
"dataset_out",
"=",
"None",
"if",
"intermediate_product",
"is",
"None",
":",
"processed_data_normalized",
"=",
"processed_data_sum",
"/",
"clean_data_sum",
"dataset_out",
"=",
"xr",
".",
"Dataset",
"(",
"{",
"'normalized_data'",
":",
"processed_data_normalized",
",",
"'min'",
":",
"data",
".",
"min",
"(",
"dim",
"=",
"'time'",
")",
",",
"'max'",
":",
"data",
".",
"max",
"(",
"dim",
"=",
"'time'",
")",
",",
"'total_data'",
":",
"processed_data_sum",
",",
"'total_clean'",
":",
"clean_data_sum",
"}",
",",
"coords",
"=",
"{",
"'latitude'",
":",
"dataset_in",
".",
"latitude",
",",
"'longitude'",
":",
"dataset_in",
".",
"longitude",
"}",
")",
"else",
":",
"dataset_out",
"=",
"intermediate_product",
"dataset_out",
"[",
"'total_data'",
"]",
"+=",
"processed_data_sum",
"dataset_out",
"[",
"'total_clean'",
"]",
"+=",
"clean_data_sum",
"dataset_out",
"[",
"'normalized_data'",
"]",
"=",
"dataset_out",
"[",
"'total_data'",
"]",
"/",
"dataset_out",
"[",
"'total_clean'",
"]",
"dataset_out",
"[",
"'min'",
"]",
"=",
"xr",
".",
"concat",
"(",
"[",
"dataset_out",
"[",
"'min'",
"]",
",",
"data",
".",
"min",
"(",
"dim",
"=",
"'time'",
")",
"]",
",",
"dim",
"=",
"'time'",
")",
".",
"min",
"(",
"dim",
"=",
"'time'",
")",
"dataset_out",
"[",
"'max'",
"]",
"=",
"xr",
".",
"concat",
"(",
"[",
"dataset_out",
"[",
"'max'",
"]",
",",
"data",
".",
"max",
"(",
"dim",
"=",
"'time'",
")",
"]",
",",
"dim",
"=",
"'time'",
")",
".",
"max",
"(",
"dim",
"=",
"'time'",
")",
"nan_to_num",
"(",
"dataset_out",
",",
"0",
")",
"return",
"dataset_out"
] | [
144,
0
] | [
192,
22
] | python | en | ['en', 'error', 'th'] | False |
nan_to_num | (data, number) |
Converts all nan values in `data` to `number`.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
|
Converts all nan values in `data` to `number`. | def nan_to_num(data, number):
"""
Converts all nan values in `data` to `number`.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
"""
if isinstance(data, xr.Dataset):
for key in list(data.data_vars):
data[key].values[np.isnan(data[key].values)] = number
elif isinstance(data, xr.DataArray):
data.values[np.isnan(data.values)] = number | [
"def",
"nan_to_num",
"(",
"data",
",",
"number",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"xr",
".",
"Dataset",
")",
":",
"for",
"key",
"in",
"list",
"(",
"data",
".",
"data_vars",
")",
":",
"data",
"[",
"key",
"]",
".",
"values",
"[",
"np",
".",
"isnan",
"(",
"data",
"[",
"key",
"]",
".",
"values",
")",
"]",
"=",
"number",
"elif",
"isinstance",
"(",
"data",
",",
"xr",
".",
"DataArray",
")",
":",
"data",
".",
"values",
"[",
"np",
".",
"isnan",
"(",
"data",
".",
"values",
")",
"]",
"=",
"number"
] | [
195,
0
] | [
207,
51
] | python | en | ['en', 'error', 'th'] | False |
clear_attrs | (dataset) | Clear out all attributes on an xarray dataset to write to disk. | Clear out all attributes on an xarray dataset to write to disk. | def clear_attrs(dataset):
"""Clear out all attributes on an xarray dataset to write to disk."""
dataset.attrs = collections.OrderedDict()
for band in dataset.data_vars:
dataset[band].attrs = collections.OrderedDict() | [
"def",
"clear_attrs",
"(",
"dataset",
")",
":",
"dataset",
".",
"attrs",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"band",
"in",
"dataset",
".",
"data_vars",
":",
"dataset",
"[",
"band",
"]",
".",
"attrs",
"=",
"collections",
".",
"OrderedDict",
"(",
")"
] | [
210,
0
] | [
214,
55
] | python | en | ['en', 'en', 'en'] | True |
create_bit_mask | (data_array, valid_bits, no_data=-9999) | Create a boolean bit mask from a list of valid bits
Args:
data_array: xarray data array to extract bit information for.
valid_bits: array of ints representing what bits should be considered valid.
no_data: no_data value for the data array.
Returns:
Boolean mask signifying valid data.
| Create a boolean bit mask from a list of valid bits | def create_bit_mask(data_array, valid_bits, no_data=-9999):
"""Create a boolean bit mask from a list of valid bits
Args:
data_array: xarray data array to extract bit information for.
valid_bits: array of ints representing what bits should be considered valid.
no_data: no_data value for the data array.
Returns:
Boolean mask signifying valid data.
"""
assert isinstance(valid_bits, list) and isinstance(valid_bits[0], int), "Valid bits must be a list of integer bits"
#do bitwise and on valid mask - all zeros means no intersection e.g. invalid else return a truthy value?
valid_mask = sum([1 << valid_bit for valid_bit in valid_bits])
clean_mask = (data_array & valid_mask).astype('bool')
return clean_mask.values | [
"def",
"create_bit_mask",
"(",
"data_array",
",",
"valid_bits",
",",
"no_data",
"=",
"-",
"9999",
")",
":",
"assert",
"isinstance",
"(",
"valid_bits",
",",
"list",
")",
"and",
"isinstance",
"(",
"valid_bits",
"[",
"0",
"]",
",",
"int",
")",
",",
"\"Valid bits must be a list of integer bits\"",
"#do bitwise and on valid mask - all zeros means no intersection e.g. invalid else return a truthy value?",
"valid_mask",
"=",
"sum",
"(",
"[",
"1",
"<<",
"valid_bit",
"for",
"valid_bit",
"in",
"valid_bits",
"]",
")",
"clean_mask",
"=",
"(",
"data_array",
"&",
"valid_mask",
")",
".",
"astype",
"(",
"'bool'",
")",
"return",
"clean_mask",
".",
"values"
] | [
217,
0
] | [
233,
28
] | python | en | ['en', 'en', 'en'] | True |
add_timestamp_data_to_xr | (dataset) | Add timestamp data to an xarray dataset using the time dimension.
Adds both a timestamp and a human readable date int to a dataset - int32 format required.
modifies the dataset in place.
| Add timestamp data to an xarray dataset using the time dimension. | def add_timestamp_data_to_xr(dataset):
"""Add timestamp data to an xarray dataset using the time dimension.
Adds both a timestamp and a human readable date int to a dataset - int32 format required.
modifies the dataset in place.
"""
dims_data_var = list(dataset.data_vars)[0]
timestamp_data = np.full(dataset[dims_data_var].values.shape, 0, dtype="int32")
date_data = np.full(dataset[dims_data_var].values.shape, 0, dtype="int32")
for index, acq_date in enumerate(dataset.time.values.astype('M8[ms]').tolist()):
timestamp_data[index::] = acq_date.timestamp()
date_data[index::] = int(acq_date.strftime("%Y%m%d"))
dataset['timestamp'] = xr.DataArray(
timestamp_data,
dims=('time', 'latitude', 'longitude'),
coords={'latitude': dataset.latitude,
'longitude': dataset.longitude,
'time': dataset.time})
dataset['date'] = xr.DataArray(
date_data,
dims=('time', 'latitude', 'longitude'),
coords={'latitude': dataset.latitude,
'longitude': dataset.longitude,
'time': dataset.time}) | [
"def",
"add_timestamp_data_to_xr",
"(",
"dataset",
")",
":",
"dims_data_var",
"=",
"list",
"(",
"dataset",
".",
"data_vars",
")",
"[",
"0",
"]",
"timestamp_data",
"=",
"np",
".",
"full",
"(",
"dataset",
"[",
"dims_data_var",
"]",
".",
"values",
".",
"shape",
",",
"0",
",",
"dtype",
"=",
"\"int32\"",
")",
"date_data",
"=",
"np",
".",
"full",
"(",
"dataset",
"[",
"dims_data_var",
"]",
".",
"values",
".",
"shape",
",",
"0",
",",
"dtype",
"=",
"\"int32\"",
")",
"for",
"index",
",",
"acq_date",
"in",
"enumerate",
"(",
"dataset",
".",
"time",
".",
"values",
".",
"astype",
"(",
"'M8[ms]'",
")",
".",
"tolist",
"(",
")",
")",
":",
"timestamp_data",
"[",
"index",
":",
":",
"]",
"=",
"acq_date",
".",
"timestamp",
"(",
")",
"date_data",
"[",
"index",
":",
":",
"]",
"=",
"int",
"(",
"acq_date",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
")",
"dataset",
"[",
"'timestamp'",
"]",
"=",
"xr",
".",
"DataArray",
"(",
"timestamp_data",
",",
"dims",
"=",
"(",
"'time'",
",",
"'latitude'",
",",
"'longitude'",
")",
",",
"coords",
"=",
"{",
"'latitude'",
":",
"dataset",
".",
"latitude",
",",
"'longitude'",
":",
"dataset",
".",
"longitude",
",",
"'time'",
":",
"dataset",
".",
"time",
"}",
")",
"dataset",
"[",
"'date'",
"]",
"=",
"xr",
".",
"DataArray",
"(",
"date_data",
",",
"dims",
"=",
"(",
"'time'",
",",
"'latitude'",
",",
"'longitude'",
")",
",",
"coords",
"=",
"{",
"'latitude'",
":",
"dataset",
".",
"latitude",
",",
"'longitude'",
":",
"dataset",
".",
"longitude",
",",
"'time'",
":",
"dataset",
".",
"time",
"}",
")"
] | [
236,
0
] | [
261,
38
] | python | en | ['en', 'en', 'en'] | True |
write_geotiff_from_xr | (tif_path, dataset, bands, no_data=-9999, crs="EPSG:4326") | Write a geotiff from an xarray dataset.
Args:
tif_path: path for the tif to be written to.
dataset: xarray dataset
bands: list of strings representing the bands in the order they should be written
no_data: nodata value for the dataset
crs: requested crs.
| Write a geotiff from an xarray dataset. | def write_geotiff_from_xr(tif_path, dataset, bands, no_data=-9999, crs="EPSG:4326"):
"""Write a geotiff from an xarray dataset.
Args:
tif_path: path for the tif to be written to.
dataset: xarray dataset
bands: list of strings representing the bands in the order they should be written
no_data: nodata value for the dataset
crs: requested crs.
"""
assert isinstance(bands, list), "Bands must a list of strings"
assert len(bands) > 0 and isinstance(bands[0], str), "You must supply at least one band."
with rasterio.open(
tif_path,
'w',
driver='GTiff',
height=dataset.dims['latitude'],
width=dataset.dims['longitude'],
count=len(bands),
dtype=dataset[bands[0]].dtype,#str(dataset[bands[0]].dtype),
crs=crs,
transform=_get_transform_from_xr(dataset),
nodata=no_data) as dst:
for index, band in enumerate(bands):
dst.write(dataset[band].values, index + 1)
dst.close() | [
"def",
"write_geotiff_from_xr",
"(",
"tif_path",
",",
"dataset",
",",
"bands",
",",
"no_data",
"=",
"-",
"9999",
",",
"crs",
"=",
"\"EPSG:4326\"",
")",
":",
"assert",
"isinstance",
"(",
"bands",
",",
"list",
")",
",",
"\"Bands must a list of strings\"",
"assert",
"len",
"(",
"bands",
")",
">",
"0",
"and",
"isinstance",
"(",
"bands",
"[",
"0",
"]",
",",
"str",
")",
",",
"\"You must supply at least one band.\"",
"with",
"rasterio",
".",
"open",
"(",
"tif_path",
",",
"'w'",
",",
"driver",
"=",
"'GTiff'",
",",
"height",
"=",
"dataset",
".",
"dims",
"[",
"'latitude'",
"]",
",",
"width",
"=",
"dataset",
".",
"dims",
"[",
"'longitude'",
"]",
",",
"count",
"=",
"len",
"(",
"bands",
")",
",",
"dtype",
"=",
"dataset",
"[",
"bands",
"[",
"0",
"]",
"]",
".",
"dtype",
",",
"#str(dataset[bands[0]].dtype),",
"crs",
"=",
"crs",
",",
"transform",
"=",
"_get_transform_from_xr",
"(",
"dataset",
")",
",",
"nodata",
"=",
"no_data",
")",
"as",
"dst",
":",
"for",
"index",
",",
"band",
"in",
"enumerate",
"(",
"bands",
")",
":",
"dst",
".",
"write",
"(",
"dataset",
"[",
"band",
"]",
".",
"values",
",",
"index",
"+",
"1",
")",
"dst",
".",
"close",
"(",
")"
] | [
264,
0
] | [
290,
19
] | python | en | ['en', 'en', 'en'] | True |
write_png_from_xr | (png_path, dataset, bands, png_filled_path=None, fill_color='red', scale=None, low_res=False,
no_data=-9999, crs="EPSG:4326") | Write a rgb png from an xarray dataset.
Args:
png_path: path for the png to be written to.
dataset: dataset to use for the png creation.
bands: a list of three strings representing the bands and their order
png_filled_path: optional png with no_data values filled
fill_color: color to use as the no_data fill
scale: desired scale - tuple like (0, 4000) for the upper and lower bounds
| Write a rgb png from an xarray dataset. | def write_png_from_xr(png_path, dataset, bands, png_filled_path=None, fill_color='red', scale=None, low_res=False,
no_data=-9999, crs="EPSG:4326"):
"""Write a rgb png from an xarray dataset.
Args:
png_path: path for the png to be written to.
dataset: dataset to use for the png creation.
bands: a list of three strings representing the bands and their order
png_filled_path: optional png with no_data values filled
fill_color: color to use as the no_data fill
scale: desired scale - tuple like (0, 4000) for the upper and lower bounds
"""
assert isinstance(bands, list), "Bands must a list of strings"
assert len(bands) == 3 and isinstance(bands[0], str), "You must supply three string bands for a PNG."
tif_path = os.path.join(os.path.dirname(png_path), str(uuid.uuid4()) + ".png")
write_geotiff_from_xr(tif_path, dataset, bands, no_data=no_data, crs=crs)
scale_string = ""
if scale is not None and len(scale) == 2:
scale_string = "-scale {} {} 0 255".format(scale[0], scale[1])
elif scale is not None and len(scale) == 3:
for index, scale_member in enumerate(scale):
scale_string += " -scale_{} {} {} 0 255".format(index + 1, scale_member[0], scale_member[1])
outsize_string = "-outsize 25% 25%" if low_res else ""
cmd = "gdal_translate -ot Byte " + outsize_string + " " + scale_string + " -of PNG -b 1 -b 2 -b 3 " + tif_path + ' ' + png_path
os.system(cmd)
if png_filled_path is not None and fill_color is not None:
cmd = "convert -transparent \"#000000\" " + png_path + " " + png_path
os.system(cmd)
cmd = "convert " + png_path + " -background " + \
fill_color + " -alpha remove " + png_filled_path
os.system(cmd)
os.remove(tif_path) | [
"def",
"write_png_from_xr",
"(",
"png_path",
",",
"dataset",
",",
"bands",
",",
"png_filled_path",
"=",
"None",
",",
"fill_color",
"=",
"'red'",
",",
"scale",
"=",
"None",
",",
"low_res",
"=",
"False",
",",
"no_data",
"=",
"-",
"9999",
",",
"crs",
"=",
"\"EPSG:4326\"",
")",
":",
"assert",
"isinstance",
"(",
"bands",
",",
"list",
")",
",",
"\"Bands must a list of strings\"",
"assert",
"len",
"(",
"bands",
")",
"==",
"3",
"and",
"isinstance",
"(",
"bands",
"[",
"0",
"]",
",",
"str",
")",
",",
"\"You must supply three string bands for a PNG.\"",
"tif_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"png_path",
")",
",",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"+",
"\".png\"",
")",
"write_geotiff_from_xr",
"(",
"tif_path",
",",
"dataset",
",",
"bands",
",",
"no_data",
"=",
"no_data",
",",
"crs",
"=",
"crs",
")",
"scale_string",
"=",
"\"\"",
"if",
"scale",
"is",
"not",
"None",
"and",
"len",
"(",
"scale",
")",
"==",
"2",
":",
"scale_string",
"=",
"\"-scale {} {} 0 255\"",
".",
"format",
"(",
"scale",
"[",
"0",
"]",
",",
"scale",
"[",
"1",
"]",
")",
"elif",
"scale",
"is",
"not",
"None",
"and",
"len",
"(",
"scale",
")",
"==",
"3",
":",
"for",
"index",
",",
"scale_member",
"in",
"enumerate",
"(",
"scale",
")",
":",
"scale_string",
"+=",
"\" -scale_{} {} {} 0 255\"",
".",
"format",
"(",
"index",
"+",
"1",
",",
"scale_member",
"[",
"0",
"]",
",",
"scale_member",
"[",
"1",
"]",
")",
"outsize_string",
"=",
"\"-outsize 25% 25%\"",
"if",
"low_res",
"else",
"\"\"",
"cmd",
"=",
"\"gdal_translate -ot Byte \"",
"+",
"outsize_string",
"+",
"\" \"",
"+",
"scale_string",
"+",
"\" -of PNG -b 1 -b 2 -b 3 \"",
"+",
"tif_path",
"+",
"' '",
"+",
"png_path",
"os",
".",
"system",
"(",
"cmd",
")",
"if",
"png_filled_path",
"is",
"not",
"None",
"and",
"fill_color",
"is",
"not",
"None",
":",
"cmd",
"=",
"\"convert -transparent \\\"#000000\\\" \"",
"+",
"png_path",
"+",
"\" \"",
"+",
"png_path",
"os",
".",
"system",
"(",
"cmd",
")",
"cmd",
"=",
"\"convert \"",
"+",
"png_path",
"+",
"\" -background \"",
"+",
"fill_color",
"+",
"\" -alpha remove \"",
"+",
"png_filled_path",
"os",
".",
"system",
"(",
"cmd",
")",
"os",
".",
"remove",
"(",
"tif_path",
")"
] | [
293,
0
] | [
330,
23
] | python | en | ['en', 'en', 'en'] | True |
write_single_band_png_from_xr | (png_path, dataset, band, color_scale=None, fill_color=None, interpolate=True,
no_data=-9999, crs="EPSG:4326") | Write a pseudocolor png from an xarray dataset.
Args:
png_path: path for the png to be written to.
dataset: dataset to use for the png creation.
band: The band to write to a png
png_filled_path: optional png with no_data values filled
fill_color: color to use as the no_data fill
color_scale: path to a color scale compatible with gdal.
| Write a pseudocolor png from an xarray dataset. | def write_single_band_png_from_xr(png_path, dataset, band, color_scale=None, fill_color=None, interpolate=True,
no_data=-9999, crs="EPSG:4326"):
"""Write a pseudocolor png from an xarray dataset.
Args:
png_path: path for the png to be written to.
dataset: dataset to use for the png creation.
band: The band to write to a png
png_filled_path: optional png with no_data values filled
fill_color: color to use as the no_data fill
color_scale: path to a color scale compatible with gdal.
"""
assert os.path.exists(color_scale), "Color scale must be a path to a text file containing a gdal compatible scale."
assert isinstance(band, str), "Band must be a string."
tif_path = os.path.join(os.path.dirname(png_path), str(uuid.uuid4()) + ".png")
write_geotiff_from_xr(tif_path, dataset, [band], no_data=no_data, crs=crs)
cmd = "gdaldem color-relief -of PNG -b 1 " + tif_path + " " + \
color_scale + " " + png_path
os.system(cmd)
if fill_color is not None:
cmd = "convert -transparent \"#FFFFFF\" " + \
png_path + " " + png_path
os.system(cmd)
if fill_color is not None and fill_color != "transparent":
cmd = "convert " + png_path + " -background " + \
fill_color + " -alpha remove " + png_path
os.system(cmd)
os.remove(tif_path) | [
"def",
"write_single_band_png_from_xr",
"(",
"png_path",
",",
"dataset",
",",
"band",
",",
"color_scale",
"=",
"None",
",",
"fill_color",
"=",
"None",
",",
"interpolate",
"=",
"True",
",",
"no_data",
"=",
"-",
"9999",
",",
"crs",
"=",
"\"EPSG:4326\"",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"color_scale",
")",
",",
"\"Color scale must be a path to a text file containing a gdal compatible scale.\"",
"assert",
"isinstance",
"(",
"band",
",",
"str",
")",
",",
"\"Band must be a string.\"",
"tif_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"png_path",
")",
",",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"+",
"\".png\"",
")",
"write_geotiff_from_xr",
"(",
"tif_path",
",",
"dataset",
",",
"[",
"band",
"]",
",",
"no_data",
"=",
"no_data",
",",
"crs",
"=",
"crs",
")",
"cmd",
"=",
"\"gdaldem color-relief -of PNG -b 1 \"",
"+",
"tif_path",
"+",
"\" \"",
"+",
"color_scale",
"+",
"\" \"",
"+",
"png_path",
"os",
".",
"system",
"(",
"cmd",
")",
"if",
"fill_color",
"is",
"not",
"None",
":",
"cmd",
"=",
"\"convert -transparent \\\"#FFFFFF\\\" \"",
"+",
"png_path",
"+",
"\" \"",
"+",
"png_path",
"os",
".",
"system",
"(",
"cmd",
")",
"if",
"fill_color",
"is",
"not",
"None",
"and",
"fill_color",
"!=",
"\"transparent\"",
":",
"cmd",
"=",
"\"convert \"",
"+",
"png_path",
"+",
"\" -background \"",
"+",
"fill_color",
"+",
"\" -alpha remove \"",
"+",
"png_path",
"os",
".",
"system",
"(",
"cmd",
")",
"os",
".",
"remove",
"(",
"tif_path",
")"
] | [
333,
0
] | [
365,
23
] | python | en | ['en', 'en', 'en'] | True |
_get_transform_from_xr | (dataset) | Create a geotransform from an xarray dataset.
| Create a geotransform from an xarray dataset.
| def _get_transform_from_xr(dataset):
"""Create a geotransform from an xarray dataset.
"""
from rasterio.transform import from_bounds
geotransform = from_bounds(dataset.longitude[0], dataset.latitude[-1], dataset.longitude[-1], dataset.latitude[0],
len(dataset.longitude), len(dataset.latitude))
return geotransform | [
"def",
"_get_transform_from_xr",
"(",
"dataset",
")",
":",
"from",
"rasterio",
".",
"transform",
"import",
"from_bounds",
"geotransform",
"=",
"from_bounds",
"(",
"dataset",
".",
"longitude",
"[",
"0",
"]",
",",
"dataset",
".",
"latitude",
"[",
"-",
"1",
"]",
",",
"dataset",
".",
"longitude",
"[",
"-",
"1",
"]",
",",
"dataset",
".",
"latitude",
"[",
"0",
"]",
",",
"len",
"(",
"dataset",
".",
"longitude",
")",
",",
"len",
"(",
"dataset",
".",
"latitude",
")",
")",
"return",
"geotransform"
] | [
367,
0
] | [
374,
23
] | python | en | ['en', 'lb', 'en'] | True |
ignore_warnings | (func, *args, **kwargs) | Runs a function while ignoring warnings | Runs a function while ignoring warnings | def ignore_warnings(func, *args, **kwargs):
"""Runs a function while ignoring warnings"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ret = func(*args, **kwargs)
return ret | [
"def",
"ignore_warnings",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"ret",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"ret"
] | [
382,
0
] | [
387,
14
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.