Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
DBReporting.addFailure
(self, test, err, capt=None, tbinfo=None)
After a test failure, we want to record testcase run information.
After a test failure, we want to record testcase run information.
def addFailure(self, test, err, capt=None, tbinfo=None): """ After a test failure, we want to record testcase run information. """ self.__insert_test_result(constants.State.FAILURE, test, err)
[ "def", "addFailure", "(", "self", ",", "test", ",", "err", ",", "capt", "=", "None", ",", "tbinfo", "=", "None", ")", ":", "self", ".", "__insert_test_result", "(", "constants", ".", "State", ".", "FAILURE", ",", "test", ",", "err", ")" ]
[ 128, 4 ]
[ 132, 69 ]
python
en
['en', 'error', 'th']
False
_waitid_system_task
(pid: int, event: Event)
Spawn a thread that waits for ``pid`` to exit, then wake any tasks that were waiting on it.
Spawn a thread that waits for ``pid`` to exit, then wake any tasks that were waiting on it.
async def _waitid_system_task(pid: int, event: Event) -> None: """Spawn a thread that waits for ``pid`` to exit, then wake any tasks that were waiting on it. """ # cancellable=True: if this task is cancelled, then we abandon the # thread to keep running waitpid in the background. Since this is # always run as a system task, this will only happen if the whole # call to trio.run is shutting down. try: await to_thread_run_sync( sync_wait_reapable, pid, cancellable=True, limiter=waitid_limiter ) except OSError: # If waitid fails, waitpid will fail too, so it still makes # sense to wake up the callers of wait_process_exiting(). The # most likely reason for this error in practice is a child # exiting when wait() is not possible because SIGCHLD is # ignored. pass finally: event.set()
[ "async", "def", "_waitid_system_task", "(", "pid", ":", "int", ",", "event", ":", "Event", ")", "->", "None", ":", "# cancellable=True: if this task is cancelled, then we abandon the", "# thread to keep running waitpid in the background. Since this is", "# always run as a system task, this will only happen if the whole", "# call to trio.run is shutting down.", "try", ":", "await", "to_thread_run_sync", "(", "sync_wait_reapable", ",", "pid", ",", "cancellable", "=", "True", ",", "limiter", "=", "waitid_limiter", ")", "except", "OSError", ":", "# If waitid fails, waitpid will fail too, so it still makes", "# sense to wake up the callers of wait_process_exiting(). The", "# most likely reason for this error in practice is a child", "# exiting when wait() is not possible because SIGCHLD is", "# ignored.", "pass", "finally", ":", "event", ".", "set", "(", ")" ]
[ 68, 0 ]
[ 89, 19 ]
python
en
['en', 'en', 'en']
True
TLSContextTest._go_close_connection_error
(url)
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url)
[ "def", "_go_close_connection_error", "(", "url", ")", ":", "return", "\"Get {}: EOF\"", ".", "format", "(", "url", ")" ]
[ 615, 4 ]
[ 620, 40 ]
python
en
['en', 'error', 'th']
False
TLSIngressTest._go_close_connection_error
(url)
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url)
[ "def", "_go_close_connection_error", "(", "url", ")", ":", "return", "\"Get {}: EOF\"", ".", "format", "(", "url", ")" ]
[ 863, 4 ]
[ 868, 40 ]
python
en
['en', 'error', 'th']
False
TLSContextProtocolMaxVersion._go_close_connection_error
(url)
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url)
[ "def", "_go_close_connection_error", "(", "url", ")", ":", "return", "\"Get {}: EOF\"", ".", "format", "(", "url", ")" ]
[ 1032, 4 ]
[ 1037, 40 ]
python
en
['en', 'error', 'th']
False
TLSContextProtocolMinVersion._go_close_connection_error
(url)
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url)
[ "def", "_go_close_connection_error", "(", "url", ")", ":", "return", "\"Get {}: EOF\"", ".", "format", "(", "url", ")" ]
[ 1144, 4 ]
[ 1149, 40 ]
python
en
['en', 'error', 'th']
False
TLSContextCipherSuites._go_close_connection_error
(url)
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url)
[ "def", "_go_close_connection_error", "(", "url", ")", ":", "return", "\"Get {}: EOF\"", ".", "format", "(", "url", ")" ]
[ 1247, 4 ]
[ 1252, 40 ]
python
en
['en', 'error', 'th']
False
TLSCoalescing._go_close_connection_error
(url)
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
:param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection
def _go_close_connection_error(url): """ :param url: url passed to the query :return: error message string that Go's net/http package throws when server closes connection """ return "Get {}: EOF".format(url)
[ "def", "_go_close_connection_error", "(", "url", ")", ":", "return", "\"Get {}: EOF\"", ".", "format", "(", "url", ")" ]
[ 1384, 4 ]
[ 1389, 40 ]
python
en
['en', 'error', 'th']
False
_ToGypPath
(path)
Converts a path to the format used by gyp.
Converts a path to the format used by gyp.
def _ToGypPath(path): """Converts a path to the format used by gyp.""" if os.sep == '\\' and os.altsep == '/': return path.replace('\\', '/') return path
[ "def", "_ToGypPath", "(", "path", ")", ":", "if", "os", ".", "sep", "==", "'\\\\'", "and", "os", ".", "altsep", "==", "'/'", ":", "return", "path", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "return", "path" ]
[ 111, 0 ]
[ 115, 13 ]
python
en
['en', 'en', 'en']
True
_ResolveParent
(path, base_path_components)
Resolves |path|, which starts with at least one '../'. Returns an empty string if the path shouldn't be considered. See _AddSources() for a description of |base_path_components|.
Resolves |path|, which starts with at least one '../'. Returns an empty string if the path shouldn't be considered. See _AddSources() for a description of |base_path_components|.
def _ResolveParent(path, base_path_components): """Resolves |path|, which starts with at least one '../'. Returns an empty string if the path shouldn't be considered. See _AddSources() for a description of |base_path_components|.""" depth = 0 while path.startswith('../'): depth += 1 path = path[3:] # Relative includes may go outside the source tree. For example, an action may # have inputs in /usr/include, which are not in the source tree. if depth > len(base_path_components): return '' if depth == len(base_path_components): return path return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \ '/' + path
[ "def", "_ResolveParent", "(", "path", ",", "base_path_components", ")", ":", "depth", "=", "0", "while", "path", ".", "startswith", "(", "'../'", ")", ":", "depth", "+=", "1", "path", "=", "path", "[", "3", ":", "]", "# Relative includes may go outside the source tree. For example, an action may", "# have inputs in /usr/include, which are not in the source tree.", "if", "depth", ">", "len", "(", "base_path_components", ")", ":", "return", "''", "if", "depth", "==", "len", "(", "base_path_components", ")", ":", "return", "path", "return", "'/'", ".", "join", "(", "base_path_components", "[", "0", ":", "len", "(", "base_path_components", ")", "-", "depth", "]", ")", "+", "'/'", "+", "path" ]
[ 118, 0 ]
[ 133, 16 ]
python
en
['en', 'en', 'en']
True
_AddSources
(sources, base_path, base_path_components, result)
Extracts valid sources from |sources| and adds them to |result|. Each source file is relative to |base_path|, but may contain '..'. To make resolving '..' easier |base_path_components| contains each of the directories in |base_path|. Additionally each source may contain variables. Such sources are ignored as it is assumed dependencies on them are expressed and tracked in some other means.
Extracts valid sources from |sources| and adds them to |result|. Each source file is relative to |base_path|, but may contain '..'. To make resolving '..' easier |base_path_components| contains each of the directories in |base_path|. Additionally each source may contain variables. Such sources are ignored as it is assumed dependencies on them are expressed and tracked in some other means.
def _AddSources(sources, base_path, base_path_components, result): """Extracts valid sources from |sources| and adds them to |result|. Each source file is relative to |base_path|, but may contain '..'. To make resolving '..' easier |base_path_components| contains each of the directories in |base_path|. Additionally each source may contain variables. Such sources are ignored as it is assumed dependencies on them are expressed and tracked in some other means.""" # NOTE: gyp paths are always posix style. for source in sources: if not len(source) or source.startswith('!!!') or source.startswith('$'): continue # variable expansion may lead to //. org_source = source source = source[0] + source[1:].replace('//', '/') if source.startswith('../'): source = _ResolveParent(source, base_path_components) if len(source): result.append(source) continue result.append(base_path + source) if debug: print 'AddSource', org_source, result[len(result) - 1]
[ "def", "_AddSources", "(", "sources", ",", "base_path", ",", "base_path_components", ",", "result", ")", ":", "# NOTE: gyp paths are always posix style.", "for", "source", "in", "sources", ":", "if", "not", "len", "(", "source", ")", "or", "source", ".", "startswith", "(", "'!!!'", ")", "or", "source", ".", "startswith", "(", "'$'", ")", ":", "continue", "# variable expansion may lead to //.", "org_source", "=", "source", "source", "=", "source", "[", "0", "]", "+", "source", "[", "1", ":", "]", ".", "replace", "(", "'//'", ",", "'/'", ")", "if", "source", ".", "startswith", "(", "'../'", ")", ":", "source", "=", "_ResolveParent", "(", "source", ",", "base_path_components", ")", "if", "len", "(", "source", ")", ":", "result", ".", "append", "(", "source", ")", "continue", "result", ".", "append", "(", "base_path", "+", "source", ")", "if", "debug", ":", "print", "'AddSource'", ",", "org_source", ",", "result", "[", "len", "(", "result", ")", "-", "1", "]" ]
[ 136, 0 ]
[ 157, 60 ]
python
en
['en', 'en', 'en']
True
_ToLocalPath
(toplevel_dir, path)
Converts |path| to a path relative to |toplevel_dir|.
Converts |path| to a path relative to |toplevel_dir|.
def _ToLocalPath(toplevel_dir, path): """Converts |path| to a path relative to |toplevel_dir|.""" if path == toplevel_dir: return '' if path.startswith(toplevel_dir + '/'): return path[len(toplevel_dir) + len('/'):] return path
[ "def", "_ToLocalPath", "(", "toplevel_dir", ",", "path", ")", ":", "if", "path", "==", "toplevel_dir", ":", "return", "''", "if", "path", ".", "startswith", "(", "toplevel_dir", "+", "'/'", ")", ":", "return", "path", "[", "len", "(", "toplevel_dir", ")", "+", "len", "(", "'/'", ")", ":", "]", "return", "path" ]
[ 166, 0 ]
[ 172, 13 ]
python
en
['en', 'en', 'en']
True
_WasBuildFileModified
(build_file, data, files, toplevel_dir)
Returns true if the build file |build_file| is either in |files| or one of the files included by |build_file| is in |files|. |toplevel_dir| is the root of the source tree.
Returns true if the build file |build_file| is either in |files| or one of the files included by |build_file| is in |files|. |toplevel_dir| is the root of the source tree.
def _WasBuildFileModified(build_file, data, files, toplevel_dir): """Returns true if the build file |build_file| is either in |files| or one of the files included by |build_file| is in |files|. |toplevel_dir| is the root of the source tree.""" if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files: if debug: print 'gyp file modified', build_file return True # First element of included_files is the file itself. if len(data[build_file]['included_files']) <= 1: return False for include_file in data[build_file]['included_files'][1:]: # |included_files| are relative to the directory of the |build_file|. rel_include_file = \ _ToGypPath(gyp.common.UnrelativePath(include_file, build_file)) if _ToLocalPath(toplevel_dir, rel_include_file) in files: if debug: print 'included gyp file modified, gyp_file=', build_file, \ 'included file=', rel_include_file return True return False
[ "def", "_WasBuildFileModified", "(", "build_file", ",", "data", ",", "files", ",", "toplevel_dir", ")", ":", "if", "_ToLocalPath", "(", "toplevel_dir", ",", "_ToGypPath", "(", "build_file", ")", ")", "in", "files", ":", "if", "debug", ":", "print", "'gyp file modified'", ",", "build_file", "return", "True", "# First element of included_files is the file itself.", "if", "len", "(", "data", "[", "build_file", "]", "[", "'included_files'", "]", ")", "<=", "1", ":", "return", "False", "for", "include_file", "in", "data", "[", "build_file", "]", "[", "'included_files'", "]", "[", "1", ":", "]", ":", "# |included_files| are relative to the directory of the |build_file|.", "rel_include_file", "=", "_ToGypPath", "(", "gyp", ".", "common", ".", "UnrelativePath", "(", "include_file", ",", "build_file", ")", ")", "if", "_ToLocalPath", "(", "toplevel_dir", ",", "rel_include_file", ")", "in", "files", ":", "if", "debug", ":", "print", "'included gyp file modified, gyp_file='", ",", "build_file", ",", "'included file='", ",", "rel_include_file", "return", "True", "return", "False" ]
[ 274, 0 ]
[ 296, 14 ]
python
en
['en', 'en', 'en']
True
_GetOrCreateTargetByName
(targets, target_name)
Creates or returns the Target at targets[target_name]. If there is no Target for |target_name| one is created. Returns a tuple of whether a new Target was created and the Target.
Creates or returns the Target at targets[target_name]. If there is no Target for |target_name| one is created. Returns a tuple of whether a new Target was created and the Target.
def _GetOrCreateTargetByName(targets, target_name): """Creates or returns the Target at targets[target_name]. If there is no Target for |target_name| one is created. Returns a tuple of whether a new Target was created and the Target.""" if target_name in targets: return False, targets[target_name] target = Target(target_name) targets[target_name] = target return True, target
[ "def", "_GetOrCreateTargetByName", "(", "targets", ",", "target_name", ")", ":", "if", "target_name", "in", "targets", ":", "return", "False", ",", "targets", "[", "target_name", "]", "target", "=", "Target", "(", "target_name", ")", "targets", "[", "target_name", "]", "=", "target", "return", "True", ",", "target" ]
[ 299, 0 ]
[ 307, 21 ]
python
en
['en', 'en', 'en']
True
_DoesTargetTypeRequireBuild
(target_dict)
Returns true if the target type is such that it needs to be built.
Returns true if the target type is such that it needs to be built.
def _DoesTargetTypeRequireBuild(target_dict): """Returns true if the target type is such that it needs to be built.""" # If a 'none' target has rules or actions we assume it requires a build. return bool(target_dict['type'] != 'none' or target_dict.get('actions') or target_dict.get('rules'))
[ "def", "_DoesTargetTypeRequireBuild", "(", "target_dict", ")", ":", "# If a 'none' target has rules or actions we assume it requires a build.", "return", "bool", "(", "target_dict", "[", "'type'", "]", "!=", "'none'", "or", "target_dict", ".", "get", "(", "'actions'", ")", "or", "target_dict", ".", "get", "(", "'rules'", ")", ")" ]
[ 310, 0 ]
[ 314, 69 ]
python
en
['en', 'en', 'en']
True
_GenerateTargets
(data, target_list, target_dicts, toplevel_dir, files, build_files)
Returns a tuple of the following: . A dictionary mapping from fully qualified name to Target. . A list of the targets that have a source file in |files|. . Targets that constitute the 'all' target. See description at top of file for details on the 'all' target. This sets the |match_status| of the targets that contain any of the source files in |files| to MATCH_STATUS_MATCHES. |toplevel_dir| is the root of the source tree.
Returns a tuple of the following: . A dictionary mapping from fully qualified name to Target. . A list of the targets that have a source file in |files|. . Targets that constitute the 'all' target. See description at top of file for details on the 'all' target. This sets the |match_status| of the targets that contain any of the source files in |files| to MATCH_STATUS_MATCHES. |toplevel_dir| is the root of the source tree.
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files, build_files): """Returns a tuple of the following: . A dictionary mapping from fully qualified name to Target. . A list of the targets that have a source file in |files|. . Targets that constitute the 'all' target. See description at top of file for details on the 'all' target. This sets the |match_status| of the targets that contain any of the source files in |files| to MATCH_STATUS_MATCHES. |toplevel_dir| is the root of the source tree.""" # Maps from target name to Target. name_to_target = {} # Targets that matched. matching_targets = [] # Queue of targets to visit. targets_to_visit = target_list[:] # Maps from build file to a boolean indicating whether the build file is in # |files|. build_file_in_files = {} # Root targets across all files. roots = set() # Set of Targets in |build_files|. build_file_targets = set() while len(targets_to_visit) > 0: target_name = targets_to_visit.pop() created_target, target = _GetOrCreateTargetByName(name_to_target, target_name) if created_target: roots.add(target) elif target.visited: continue target.visited = True target.requires_build = _DoesTargetTypeRequireBuild( target_dicts[target_name]) target_type = target_dicts[target_name]['type'] target.is_executable = target_type == 'executable' target.is_static_library = target_type == 'static_library' target.is_or_has_linked_ancestor = (target_type == 'executable' or target_type == 'shared_library') build_file = gyp.common.ParseQualifiedTarget(target_name)[0] if not build_file in build_file_in_files: build_file_in_files[build_file] = \ _WasBuildFileModified(build_file, data, files, toplevel_dir) if build_file in build_files: build_file_targets.add(target) # If a build file (or any of its included files) is modified we assume all # targets in the file are modified. if build_file_in_files[build_file]: print 'matching target from modified build file', target_name target.match_status = MATCH_STATUS_MATCHES matching_targets.append(target) else: sources = _ExtractSources(target_name, target_dicts[target_name], toplevel_dir) for source in sources: if _ToGypPath(os.path.normpath(source)) in files: print 'target', target_name, 'matches', source target.match_status = MATCH_STATUS_MATCHES matching_targets.append(target) break # Add dependencies to visit as well as updating back pointers for deps. for dep in target_dicts[target_name].get('dependencies', []): targets_to_visit.append(dep) created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target, dep) if not created_dep_target: roots.discard(dep_target) target.deps.add(dep_target) dep_target.back_deps.add(target) return name_to_target, matching_targets, roots & build_file_targets
[ "def", "_GenerateTargets", "(", "data", ",", "target_list", ",", "target_dicts", ",", "toplevel_dir", ",", "files", ",", "build_files", ")", ":", "# Maps from target name to Target.", "name_to_target", "=", "{", "}", "# Targets that matched.", "matching_targets", "=", "[", "]", "# Queue of targets to visit.", "targets_to_visit", "=", "target_list", "[", ":", "]", "# Maps from build file to a boolean indicating whether the build file is in", "# |files|.", "build_file_in_files", "=", "{", "}", "# Root targets across all files.", "roots", "=", "set", "(", ")", "# Set of Targets in |build_files|.", "build_file_targets", "=", "set", "(", ")", "while", "len", "(", "targets_to_visit", ")", ">", "0", ":", "target_name", "=", "targets_to_visit", ".", "pop", "(", ")", "created_target", ",", "target", "=", "_GetOrCreateTargetByName", "(", "name_to_target", ",", "target_name", ")", "if", "created_target", ":", "roots", ".", "add", "(", "target", ")", "elif", "target", ".", "visited", ":", "continue", "target", ".", "visited", "=", "True", "target", ".", "requires_build", "=", "_DoesTargetTypeRequireBuild", "(", "target_dicts", "[", "target_name", "]", ")", "target_type", "=", "target_dicts", "[", "target_name", "]", "[", "'type'", "]", "target", ".", "is_executable", "=", "target_type", "==", "'executable'", "target", ".", "is_static_library", "=", "target_type", "==", "'static_library'", "target", ".", "is_or_has_linked_ancestor", "=", "(", "target_type", "==", "'executable'", "or", "target_type", "==", "'shared_library'", ")", "build_file", "=", "gyp", ".", "common", ".", "ParseQualifiedTarget", "(", "target_name", ")", "[", "0", "]", "if", "not", "build_file", "in", "build_file_in_files", ":", "build_file_in_files", "[", "build_file", "]", "=", "_WasBuildFileModified", "(", "build_file", ",", "data", ",", "files", ",", "toplevel_dir", ")", "if", "build_file", "in", "build_files", ":", "build_file_targets", ".", "add", "(", "target", ")", "# If a build file (or any of its included files) is modified we assume all", "# targets in the file are modified.", "if", "build_file_in_files", "[", "build_file", "]", ":", "print", "'matching target from modified build file'", ",", "target_name", "target", ".", "match_status", "=", "MATCH_STATUS_MATCHES", "matching_targets", ".", "append", "(", "target", ")", "else", ":", "sources", "=", "_ExtractSources", "(", "target_name", ",", "target_dicts", "[", "target_name", "]", ",", "toplevel_dir", ")", "for", "source", "in", "sources", ":", "if", "_ToGypPath", "(", "os", ".", "path", ".", "normpath", "(", "source", ")", ")", "in", "files", ":", "print", "'target'", ",", "target_name", ",", "'matches'", ",", "source", "target", ".", "match_status", "=", "MATCH_STATUS_MATCHES", "matching_targets", ".", "append", "(", "target", ")", "break", "# Add dependencies to visit as well as updating back pointers for deps.", "for", "dep", "in", "target_dicts", "[", "target_name", "]", ".", "get", "(", "'dependencies'", ",", "[", "]", ")", ":", "targets_to_visit", ".", "append", "(", "dep", ")", "created_dep_target", ",", "dep_target", "=", "_GetOrCreateTargetByName", "(", "name_to_target", ",", "dep", ")", "if", "not", "created_dep_target", ":", "roots", ".", "discard", "(", "dep_target", ")", "target", ".", "deps", ".", "add", "(", "dep_target", ")", "dep_target", ".", "back_deps", ".", "add", "(", "target", ")", "return", "name_to_target", ",", "matching_targets", ",", "roots", "&", "build_file_targets" ]
[ 317, 0 ]
[ 400, 69 ]
python
en
['en', 'en', 'en']
True
_GetUnqualifiedToTargetMapping
(all_targets, to_find)
Returns a tuple of the following: . mapping (dictionary) from unqualified name to Target for all the Targets in |to_find|. . any target names not found. If this is empty all targets were found.
Returns a tuple of the following: . mapping (dictionary) from unqualified name to Target for all the Targets in |to_find|. . any target names not found. If this is empty all targets were found.
def _GetUnqualifiedToTargetMapping(all_targets, to_find): """Returns a tuple of the following: . mapping (dictionary) from unqualified name to Target for all the Targets in |to_find|. . any target names not found. If this is empty all targets were found.""" result = {} if not to_find: return {}, [] to_find = set(to_find) for target_name in all_targets.keys(): extracted = gyp.common.ParseQualifiedTarget(target_name) if len(extracted) > 1 and extracted[1] in to_find: to_find.remove(extracted[1]) result[extracted[1]] = all_targets[target_name] if not to_find: return result, [] return result, [x for x in to_find]
[ "def", "_GetUnqualifiedToTargetMapping", "(", "all_targets", ",", "to_find", ")", ":", "result", "=", "{", "}", "if", "not", "to_find", ":", "return", "{", "}", ",", "[", "]", "to_find", "=", "set", "(", "to_find", ")", "for", "target_name", "in", "all_targets", ".", "keys", "(", ")", ":", "extracted", "=", "gyp", ".", "common", ".", "ParseQualifiedTarget", "(", "target_name", ")", "if", "len", "(", "extracted", ")", ">", "1", "and", "extracted", "[", "1", "]", "in", "to_find", ":", "to_find", ".", "remove", "(", "extracted", "[", "1", "]", ")", "result", "[", "extracted", "[", "1", "]", "]", "=", "all_targets", "[", "target_name", "]", "if", "not", "to_find", ":", "return", "result", ",", "[", "]", "return", "result", ",", "[", "x", "for", "x", "in", "to_find", "]" ]
[ 403, 0 ]
[ 419, 37 ]
python
en
['en', 'en', 'en']
True
_DoesTargetDependOnMatchingTargets
(target)
Returns true if |target| or any of its dependencies is one of the targets containing the files supplied as input to analyzer. This updates |matches| of the Targets as it recurses. target: the Target to look for.
Returns true if |target| or any of its dependencies is one of the targets containing the files supplied as input to analyzer. This updates |matches| of the Targets as it recurses. target: the Target to look for.
def _DoesTargetDependOnMatchingTargets(target): """Returns true if |target| or any of its dependencies is one of the targets containing the files supplied as input to analyzer. This updates |matches| of the Targets as it recurses. target: the Target to look for.""" if target.match_status == MATCH_STATUS_DOESNT_MATCH: return False if target.match_status == MATCH_STATUS_MATCHES or \ target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY: return True for dep in target.deps: if _DoesTargetDependOnMatchingTargets(dep): target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY print '\t', target.name, 'matches by dep', dep.name return True target.match_status = MATCH_STATUS_DOESNT_MATCH return False
[ "def", "_DoesTargetDependOnMatchingTargets", "(", "target", ")", ":", "if", "target", ".", "match_status", "==", "MATCH_STATUS_DOESNT_MATCH", ":", "return", "False", "if", "target", ".", "match_status", "==", "MATCH_STATUS_MATCHES", "or", "target", ".", "match_status", "==", "MATCH_STATUS_MATCHES_BY_DEPENDENCY", ":", "return", "True", "for", "dep", "in", "target", ".", "deps", ":", "if", "_DoesTargetDependOnMatchingTargets", "(", "dep", ")", ":", "target", ".", "match_status", "=", "MATCH_STATUS_MATCHES_BY_DEPENDENCY", "print", "'\\t'", ",", "target", ".", "name", ",", "'matches by dep'", ",", "dep", ".", "name", "return", "True", "target", ".", "match_status", "=", "MATCH_STATUS_DOESNT_MATCH", "return", "False" ]
[ 422, 0 ]
[ 438, 14 ]
python
en
['en', 'en', 'en']
True
_GetTargetsDependingOnMatchingTargets
(possible_targets)
Returns the list of Targets in |possible_targets| that depend (either directly on indirectly) on at least one of the targets containing the files supplied as input to analyzer. possible_targets: targets to search from.
Returns the list of Targets in |possible_targets| that depend (either directly on indirectly) on at least one of the targets containing the files supplied as input to analyzer. possible_targets: targets to search from.
def _GetTargetsDependingOnMatchingTargets(possible_targets): """Returns the list of Targets in |possible_targets| that depend (either directly on indirectly) on at least one of the targets containing the files supplied as input to analyzer. possible_targets: targets to search from.""" found = [] print 'Targets that matched by dependency:' for target in possible_targets: if _DoesTargetDependOnMatchingTargets(target): found.append(target) return found
[ "def", "_GetTargetsDependingOnMatchingTargets", "(", "possible_targets", ")", ":", "found", "=", "[", "]", "print", "'Targets that matched by dependency:'", "for", "target", "in", "possible_targets", ":", "if", "_DoesTargetDependOnMatchingTargets", "(", "target", ")", ":", "found", ".", "append", "(", "target", ")", "return", "found" ]
[ 441, 0 ]
[ 451, 14 ]
python
en
['en', 'en', 'en']
True
_AddCompileTargets
(target, roots, add_if_no_ancestor, result)
Recurses through all targets that depend on |target|, adding all targets that need to be built (and are in |roots|) to |result|. roots: set of root targets. add_if_no_ancestor: If true and there are no ancestors of |target| then add |target| to |result|. |target| must still be in |roots|. result: targets that need to be built are added here.
Recurses through all targets that depend on |target|, adding all targets that need to be built (and are in |roots|) to |result|. roots: set of root targets. add_if_no_ancestor: If true and there are no ancestors of |target| then add |target| to |result|. |target| must still be in |roots|. result: targets that need to be built are added here.
def _AddCompileTargets(target, roots, add_if_no_ancestor, result): """Recurses through all targets that depend on |target|, adding all targets that need to be built (and are in |roots|) to |result|. roots: set of root targets. add_if_no_ancestor: If true and there are no ancestors of |target| then add |target| to |result|. |target| must still be in |roots|. result: targets that need to be built are added here.""" if target.visited: return target.visited = True target.in_roots = target in roots for back_dep_target in target.back_deps: _AddCompileTargets(back_dep_target, roots, False, result) target.added_to_compile_targets |= back_dep_target.added_to_compile_targets target.in_roots |= back_dep_target.in_roots target.is_or_has_linked_ancestor |= ( back_dep_target.is_or_has_linked_ancestor) # Always add 'executable' targets. Even though they may be built by other # targets that depend upon them it makes detection of what is going to be # built easier. # And always add static_libraries that have no dependencies on them from # linkables. This is necessary as the other dependencies on them may be # static libraries themselves, which are not compile time dependencies. if target.in_roots and \ (target.is_executable or (not target.added_to_compile_targets and (add_if_no_ancestor or target.requires_build)) or (target.is_static_library and add_if_no_ancestor and not target.is_or_has_linked_ancestor)): print '\t\tadding to compile targets', target.name, 'executable', \ target.is_executable, 'added_to_compile_targets', \ target.added_to_compile_targets, 'add_if_no_ancestor', \ add_if_no_ancestor, 'requires_build', target.requires_build, \ 'is_static_library', target.is_static_library, \ 'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor result.add(target) target.added_to_compile_targets = True
[ "def", "_AddCompileTargets", "(", "target", ",", "roots", ",", "add_if_no_ancestor", ",", "result", ")", ":", "if", "target", ".", "visited", ":", "return", "target", ".", "visited", "=", "True", "target", ".", "in_roots", "=", "target", "in", "roots", "for", "back_dep_target", "in", "target", ".", "back_deps", ":", "_AddCompileTargets", "(", "back_dep_target", ",", "roots", ",", "False", ",", "result", ")", "target", ".", "added_to_compile_targets", "|=", "back_dep_target", ".", "added_to_compile_targets", "target", ".", "in_roots", "|=", "back_dep_target", ".", "in_roots", "target", ".", "is_or_has_linked_ancestor", "|=", "(", "back_dep_target", ".", "is_or_has_linked_ancestor", ")", "# Always add 'executable' targets. Even though they may be built by other", "# targets that depend upon them it makes detection of what is going to be", "# built easier.", "# And always add static_libraries that have no dependencies on them from", "# linkables. This is necessary as the other dependencies on them may be", "# static libraries themselves, which are not compile time dependencies.", "if", "target", ".", "in_roots", "and", "(", "target", ".", "is_executable", "or", "(", "not", "target", ".", "added_to_compile_targets", "and", "(", "add_if_no_ancestor", "or", "target", ".", "requires_build", ")", ")", "or", "(", "target", ".", "is_static_library", "and", "add_if_no_ancestor", "and", "not", "target", ".", "is_or_has_linked_ancestor", ")", ")", ":", "print", "'\\t\\tadding to compile targets'", ",", "target", ".", "name", ",", "'executable'", ",", "target", ".", "is_executable", ",", "'added_to_compile_targets'", ",", "target", ".", "added_to_compile_targets", ",", "'add_if_no_ancestor'", ",", "add_if_no_ancestor", ",", "'requires_build'", ",", "target", ".", "requires_build", ",", "'is_static_library'", ",", "target", ".", "is_static_library", ",", "'is_or_has_linked_ancestor'", ",", "target", ".", "is_or_has_linked_ancestor", "result", ".", "add", "(", "target", ")", "target", ".", "added_to_compile_targets", "=", "True" ]
[ 454, 0 ]
[ 493, 42 ]
python
en
['en', 'en', 'en']
True
_GetCompileTargets
(matching_targets, supplied_targets)
Returns the set of Targets that require a build. matching_targets: targets that changed and need to be built. supplied_targets: set of targets supplied to analyzer to search from.
Returns the set of Targets that require a build. matching_targets: targets that changed and need to be built. supplied_targets: set of targets supplied to analyzer to search from.
def _GetCompileTargets(matching_targets, supplied_targets): """Returns the set of Targets that require a build. matching_targets: targets that changed and need to be built. supplied_targets: set of targets supplied to analyzer to search from.""" result = set() for target in matching_targets: print 'finding compile targets for match', target.name _AddCompileTargets(target, supplied_targets, True, result) return result
[ "def", "_GetCompileTargets", "(", "matching_targets", ",", "supplied_targets", ")", ":", "result", "=", "set", "(", ")", "for", "target", "in", "matching_targets", ":", "print", "'finding compile targets for match'", ",", "target", ".", "name", "_AddCompileTargets", "(", "target", ",", "supplied_targets", ",", "True", ",", "result", ")", "return", "result" ]
[ 496, 0 ]
[ 504, 15 ]
python
en
['en', 'en', 'en']
True
_WriteOutput
(params, **values)
Writes the output, either to stdout or a file is specified.
Writes the output, either to stdout or a file is specified.
def _WriteOutput(params, **values): """Writes the output, either to stdout or a file is specified.""" if 'error' in values: print 'Error:', values['error'] if 'status' in values: print values['status'] if 'targets' in values: values['targets'].sort() print 'Supplied targets that depend on changed files:' for target in values['targets']: print '\t', target if 'invalid_targets' in values: values['invalid_targets'].sort() print 'The following targets were not found:' for target in values['invalid_targets']: print '\t', target if 'build_targets' in values: values['build_targets'].sort() print 'Targets that require a build:' for target in values['build_targets']: print '\t', target if 'compile_targets' in values: values['compile_targets'].sort() print 'Targets that need to be built:' for target in values['compile_targets']: print '\t', target if 'test_targets' in values: values['test_targets'].sort() print 'Test targets:' for target in values['test_targets']: print '\t', target output_path = params.get('generator_flags', {}).get( 'analyzer_output_path', None) if not output_path: print json.dumps(values) return try: f = open(output_path, 'w') f.write(json.dumps(values) + '\n') f.close() except IOError as e: print 'Error writing to output file', output_path, str(e)
[ "def", "_WriteOutput", "(", "params", ",", "*", "*", "values", ")", ":", "if", "'error'", "in", "values", ":", "print", "'Error:'", ",", "values", "[", "'error'", "]", "if", "'status'", "in", "values", ":", "print", "values", "[", "'status'", "]", "if", "'targets'", "in", "values", ":", "values", "[", "'targets'", "]", ".", "sort", "(", ")", "print", "'Supplied targets that depend on changed files:'", "for", "target", "in", "values", "[", "'targets'", "]", ":", "print", "'\\t'", ",", "target", "if", "'invalid_targets'", "in", "values", ":", "values", "[", "'invalid_targets'", "]", ".", "sort", "(", ")", "print", "'The following targets were not found:'", "for", "target", "in", "values", "[", "'invalid_targets'", "]", ":", "print", "'\\t'", ",", "target", "if", "'build_targets'", "in", "values", ":", "values", "[", "'build_targets'", "]", ".", "sort", "(", ")", "print", "'Targets that require a build:'", "for", "target", "in", "values", "[", "'build_targets'", "]", ":", "print", "'\\t'", ",", "target", "if", "'compile_targets'", "in", "values", ":", "values", "[", "'compile_targets'", "]", ".", "sort", "(", ")", "print", "'Targets that need to be built:'", "for", "target", "in", "values", "[", "'compile_targets'", "]", ":", "print", "'\\t'", ",", "target", "if", "'test_targets'", "in", "values", ":", "values", "[", "'test_targets'", "]", ".", "sort", "(", ")", "print", "'Test targets:'", "for", "target", "in", "values", "[", "'test_targets'", "]", ":", "print", "'\\t'", ",", "target", "output_path", "=", "params", ".", "get", "(", "'generator_flags'", ",", "{", "}", ")", ".", "get", "(", "'analyzer_output_path'", ",", "None", ")", "if", "not", "output_path", ":", "print", "json", ".", "dumps", "(", "values", ")", "return", "try", ":", "f", "=", "open", "(", "output_path", ",", "'w'", ")", "f", ".", "write", "(", "json", ".", "dumps", "(", "values", ")", "+", "'\\n'", ")", "f", ".", "close", "(", ")", "except", "IOError", "as", "e", ":", "print", "'Error writing to output file'", ",", "output_path", ",", "str", "(", "e", ")" ]
[ 507, 0 ]
[ 549, 61 ]
python
en
['en', 'en', 'en']
True
_WasGypIncludeFileModified
(params, files)
Returns true if one of the files in |files| is in the set of included files.
Returns true if one of the files in |files| is in the set of included files.
def _WasGypIncludeFileModified(params, files): """Returns true if one of the files in |files| is in the set of included files.""" if params['options'].includes: for include in params['options'].includes: if _ToGypPath(os.path.normpath(include)) in files: print 'Include file modified, assuming all changed', include return True return False
[ "def", "_WasGypIncludeFileModified", "(", "params", ",", "files", ")", ":", "if", "params", "[", "'options'", "]", ".", "includes", ":", "for", "include", "in", "params", "[", "'options'", "]", ".", "includes", ":", "if", "_ToGypPath", "(", "os", ".", "path", ".", "normpath", "(", "include", ")", ")", "in", "files", ":", "print", "'Include file modified, assuming all changed'", ",", "include", "return", "True", "return", "False" ]
[ 552, 0 ]
[ 560, 14 ]
python
en
['en', 'en', 'en']
True
_NamesNotIn
(names, mapping)
Returns a list of the values in |names| that are not in |mapping|.
Returns a list of the values in |names| that are not in |mapping|.
def _NamesNotIn(names, mapping): """Returns a list of the values in |names| that are not in |mapping|.""" return [name for name in names if name not in mapping]
[ "def", "_NamesNotIn", "(", "names", ",", "mapping", ")", ":", "return", "[", "name", "for", "name", "in", "names", "if", "name", "not", "in", "mapping", "]" ]
[ 563, 0 ]
[ 565, 56 ]
python
en
['en', 'en', 'en']
True
_LookupTargets
(names, mapping)
Returns a list of the mapping[name] for each value in |names| that is in |mapping|.
Returns a list of the mapping[name] for each value in |names| that is in |mapping|.
def _LookupTargets(names, mapping): """Returns a list of the mapping[name] for each value in |names| that is in |mapping|.""" return [mapping[name] for name in names if name in mapping]
[ "def", "_LookupTargets", "(", "names", ",", "mapping", ")", ":", "return", "[", "mapping", "[", "name", "]", "for", "name", "in", "names", "if", "name", "in", "mapping", "]" ]
[ 568, 0 ]
[ 571, 61 ]
python
en
['en', 'en', 'en']
True
CalculateVariables
(default_variables, params)
Calculate additional variables for use in the build (called by gyp).
Calculate additional variables for use in the build (called by gyp).
def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" flavor = gyp.common.GetFlavor(params) if flavor == 'mac': default_variables.setdefault('OS', 'mac') elif flavor == 'win': default_variables.setdefault('OS', 'win') # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) else: operating_system = flavor if flavor == 'android': operating_system = 'linux' # Keep this legacy behavior for now. default_variables.setdefault('OS', operating_system)
[ "def", "CalculateVariables", "(", "default_variables", ",", "params", ")", ":", "flavor", "=", "gyp", ".", "common", ".", "GetFlavor", "(", "params", ")", "if", "flavor", "==", "'mac'", ":", "default_variables", ".", "setdefault", "(", "'OS'", ",", "'mac'", ")", "elif", "flavor", "==", "'win'", ":", "default_variables", ".", "setdefault", "(", "'OS'", ",", "'win'", ")", "# Copy additional generator configuration data from VS, which is shared", "# by the Windows Ninja generator.", "import", "gyp", ".", "generator", ".", "msvs", "as", "msvs_generator", "generator_additional_non_configuration_keys", "=", "getattr", "(", "msvs_generator", ",", "'generator_additional_non_configuration_keys'", ",", "[", "]", ")", "generator_additional_path_sections", "=", "getattr", "(", "msvs_generator", ",", "'generator_additional_path_sections'", ",", "[", "]", ")", "gyp", ".", "msvs_emulation", ".", "CalculateCommonVariables", "(", "default_variables", ",", "params", ")", "else", ":", "operating_system", "=", "flavor", "if", "flavor", "==", "'android'", ":", "operating_system", "=", "'linux'", "# Keep this legacy behavior for now.", "default_variables", ".", "setdefault", "(", "'OS'", ",", "operating_system", ")" ]
[ 574, 0 ]
[ 594, 56 ]
python
en
['en', 'en', 'en']
True
GenerateOutput
(target_list, target_dicts, data, params)
Called by gyp as the final stage. Outputs results.
Called by gyp as the final stage. Outputs results.
def GenerateOutput(target_list, target_dicts, data, params): """Called by gyp as the final stage. Outputs results.""" config = Config() try: config.Init(params) if not config.files: raise Exception('Must specify files to analyze via config_path generator ' 'flag') toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir)) if debug: print 'toplevel_dir', toplevel_dir if _WasGypIncludeFileModified(params, config.files): result_dict = { 'status': all_changed_string, 'test_targets': list(config.test_target_names), 'compile_targets': list( config.additional_compile_target_names | config.test_target_names) } _WriteOutput(params, **result_dict) return calculator = TargetCalculator(config.files, config.additional_compile_target_names, config.test_target_names, data, target_list, target_dicts, toplevel_dir, params['build_files']) if not calculator.is_build_impacted(): result_dict = { 'status': no_dependency_string, 'test_targets': [], 'compile_targets': [] } if calculator.invalid_targets: result_dict['invalid_targets'] = calculator.invalid_targets _WriteOutput(params, **result_dict) return test_target_names = calculator.find_matching_test_target_names() compile_target_names = calculator.find_matching_compile_target_names() found_at_least_one_target = compile_target_names or test_target_names result_dict = { 'test_targets': test_target_names, 'status': found_dependency_string if found_at_least_one_target else no_dependency_string, 'compile_targets': list( set(compile_target_names) | set(test_target_names)) } if calculator.invalid_targets: result_dict['invalid_targets'] = calculator.invalid_targets _WriteOutput(params, **result_dict) except Exception as e: _WriteOutput(params, error=str(e))
[ "def", "GenerateOutput", "(", "target_list", ",", "target_dicts", ",", "data", ",", "params", ")", ":", "config", "=", "Config", "(", ")", "try", ":", "config", ".", "Init", "(", "params", ")", "if", "not", "config", ".", "files", ":", "raise", "Exception", "(", "'Must specify files to analyze via config_path generator '", "'flag'", ")", "toplevel_dir", "=", "_ToGypPath", "(", "os", ".", "path", ".", "abspath", "(", "params", "[", "'options'", "]", ".", "toplevel_dir", ")", ")", "if", "debug", ":", "print", "'toplevel_dir'", ",", "toplevel_dir", "if", "_WasGypIncludeFileModified", "(", "params", ",", "config", ".", "files", ")", ":", "result_dict", "=", "{", "'status'", ":", "all_changed_string", ",", "'test_targets'", ":", "list", "(", "config", ".", "test_target_names", ")", ",", "'compile_targets'", ":", "list", "(", "config", ".", "additional_compile_target_names", "|", "config", ".", "test_target_names", ")", "}", "_WriteOutput", "(", "params", ",", "*", "*", "result_dict", ")", "return", "calculator", "=", "TargetCalculator", "(", "config", ".", "files", ",", "config", ".", "additional_compile_target_names", ",", "config", ".", "test_target_names", ",", "data", ",", "target_list", ",", "target_dicts", ",", "toplevel_dir", ",", "params", "[", "'build_files'", "]", ")", "if", "not", "calculator", ".", "is_build_impacted", "(", ")", ":", "result_dict", "=", "{", "'status'", ":", "no_dependency_string", ",", "'test_targets'", ":", "[", "]", ",", "'compile_targets'", ":", "[", "]", "}", "if", "calculator", ".", "invalid_targets", ":", "result_dict", "[", "'invalid_targets'", "]", "=", "calculator", ".", "invalid_targets", "_WriteOutput", "(", "params", ",", "*", "*", "result_dict", ")", "return", "test_target_names", "=", "calculator", ".", "find_matching_test_target_names", "(", ")", "compile_target_names", "=", "calculator", ".", "find_matching_compile_target_names", "(", ")", "found_at_least_one_target", "=", "compile_target_names", "or", "test_target_names", "result_dict", "=", "{", "'test_targets'", ":", "test_target_names", ",", "'status'", ":", "found_dependency_string", "if", "found_at_least_one_target", "else", "no_dependency_string", ",", "'compile_targets'", ":", "list", "(", "set", "(", "compile_target_names", ")", "|", "set", "(", "test_target_names", ")", ")", "}", "if", "calculator", ".", "invalid_targets", ":", "result_dict", "[", "'invalid_targets'", "]", "=", "calculator", ".", "invalid_targets", "_WriteOutput", "(", "params", ",", "*", "*", "result_dict", ")", "except", "Exception", "as", "e", ":", "_WriteOutput", "(", "params", ",", "error", "=", "str", "(", "e", ")", ")" ]
[ 689, 0 ]
[ 740, 38 ]
python
en
['en', 'en', 'en']
True
Config.Init
(self, params)
Initializes Config. This is a separate method as it raises an exception if there is a parse error.
Initializes Config. This is a separate method as it raises an exception if there is a parse error.
def Init(self, params): """Initializes Config. This is a separate method as it raises an exception if there is a parse error.""" generator_flags = params.get('generator_flags', {}) config_path = generator_flags.get('config_path', None) if not config_path: return try: f = open(config_path, 'r') config = json.load(f) f.close() except IOError: raise Exception('Unable to open file ' + config_path) except ValueError as e: raise Exception('Unable to parse config file ' + config_path + str(e)) if not isinstance(config, dict): raise Exception('config_path must be a JSON file containing a dictionary') self.files = config.get('files', []) self.additional_compile_target_names = set( config.get('additional_compile_targets', [])) self.test_target_names = set(config.get('test_targets', []))
[ "def", "Init", "(", "self", ",", "params", ")", ":", "generator_flags", "=", "params", ".", "get", "(", "'generator_flags'", ",", "{", "}", ")", "config_path", "=", "generator_flags", ".", "get", "(", "'config_path'", ",", "None", ")", "if", "not", "config_path", ":", "return", "try", ":", "f", "=", "open", "(", "config_path", ",", "'r'", ")", "config", "=", "json", ".", "load", "(", "f", ")", "f", ".", "close", "(", ")", "except", "IOError", ":", "raise", "Exception", "(", "'Unable to open file '", "+", "config_path", ")", "except", "ValueError", "as", "e", ":", "raise", "Exception", "(", "'Unable to parse config file '", "+", "config_path", "+", "str", "(", "e", ")", ")", "if", "not", "isinstance", "(", "config", ",", "dict", ")", ":", "raise", "Exception", "(", "'config_path must be a JSON file containing a dictionary'", ")", "self", ".", "files", "=", "config", ".", "get", "(", "'files'", ",", "[", "]", ")", "self", ".", "additional_compile_target_names", "=", "set", "(", "config", ".", "get", "(", "'additional_compile_targets'", ",", "[", "]", ")", ")", "self", ".", "test_target_names", "=", "set", "(", "config", ".", "get", "(", "'test_targets'", ",", "[", "]", ")", ")" ]
[ 251, 2 ]
[ 271, 64 ]
python
en
['en', 'en', 'en']
True
TargetCalculator._supplied_target_names_no_all
(self)
Returns the supplied test targets without 'all'.
Returns the supplied test targets without 'all'.
def _supplied_target_names_no_all(self): """Returns the supplied test targets without 'all'.""" result = self._supplied_target_names(); result.discard('all') return result
[ "def", "_supplied_target_names_no_all", "(", "self", ")", ":", "result", "=", "self", ".", "_supplied_target_names", "(", ")", "result", ".", "discard", "(", "'all'", ")", "return", "result" ]
[ 613, 2 ]
[ 617, 17 ]
python
en
['en', 'en', 'en']
True
TargetCalculator.is_build_impacted
(self)
Returns true if the supplied files impact the build at all.
Returns true if the supplied files impact the build at all.
def is_build_impacted(self): """Returns true if the supplied files impact the build at all.""" return self._changed_targets
[ "def", "is_build_impacted", "(", "self", ")", ":", "return", "self", ".", "_changed_targets" ]
[ 619, 2 ]
[ 621, 32 ]
python
en
['en', 'en', 'en']
True
TargetCalculator.find_matching_test_target_names
(self)
Returns the set of output test targets.
Returns the set of output test targets.
def find_matching_test_target_names(self): """Returns the set of output test targets.""" assert self.is_build_impacted() # Find the test targets first. 'all' is special cased to mean all the # root targets. To deal with all the supplied |test_targets| are expanded # to include the root targets during lookup. If any of the root targets # match, we remove it and replace it with 'all'. test_target_names_no_all = set(self._test_target_names) test_target_names_no_all.discard('all') test_targets_no_all = _LookupTargets(test_target_names_no_all, self._unqualified_mapping) test_target_names_contains_all = 'all' in self._test_target_names if test_target_names_contains_all: test_targets = [x for x in (set(test_targets_no_all) | set(self._root_targets))] else: test_targets = [x for x in test_targets_no_all] print 'supplied test_targets' for target_name in self._test_target_names: print '\t', target_name print 'found test_targets' for target in test_targets: print '\t', target.name print 'searching for matching test targets' matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets) matching_test_targets_contains_all = (test_target_names_contains_all and set(matching_test_targets) & set(self._root_targets)) if matching_test_targets_contains_all: # Remove any of the targets for all that were not explicitly supplied, # 'all' is subsequentely added to the matching names below. matching_test_targets = [x for x in (set(matching_test_targets) & set(test_targets_no_all))] print 'matched test_targets' for target in matching_test_targets: print '\t', target.name matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1] for target in matching_test_targets] if matching_test_targets_contains_all: matching_target_names.append('all') print '\tall' return matching_target_names
[ "def", "find_matching_test_target_names", "(", "self", ")", ":", "assert", "self", ".", "is_build_impacted", "(", ")", "# Find the test targets first. 'all' is special cased to mean all the", "# root targets. To deal with all the supplied |test_targets| are expanded", "# to include the root targets during lookup. If any of the root targets", "# match, we remove it and replace it with 'all'.", "test_target_names_no_all", "=", "set", "(", "self", ".", "_test_target_names", ")", "test_target_names_no_all", ".", "discard", "(", "'all'", ")", "test_targets_no_all", "=", "_LookupTargets", "(", "test_target_names_no_all", ",", "self", ".", "_unqualified_mapping", ")", "test_target_names_contains_all", "=", "'all'", "in", "self", ".", "_test_target_names", "if", "test_target_names_contains_all", ":", "test_targets", "=", "[", "x", "for", "x", "in", "(", "set", "(", "test_targets_no_all", ")", "|", "set", "(", "self", ".", "_root_targets", ")", ")", "]", "else", ":", "test_targets", "=", "[", "x", "for", "x", "in", "test_targets_no_all", "]", "print", "'supplied test_targets'", "for", "target_name", "in", "self", ".", "_test_target_names", ":", "print", "'\\t'", ",", "target_name", "print", "'found test_targets'", "for", "target", "in", "test_targets", ":", "print", "'\\t'", ",", "target", ".", "name", "print", "'searching for matching test targets'", "matching_test_targets", "=", "_GetTargetsDependingOnMatchingTargets", "(", "test_targets", ")", "matching_test_targets_contains_all", "=", "(", "test_target_names_contains_all", "and", "set", "(", "matching_test_targets", ")", "&", "set", "(", "self", ".", "_root_targets", ")", ")", "if", "matching_test_targets_contains_all", ":", "# Remove any of the targets for all that were not explicitly supplied,", "# 'all' is subsequentely added to the matching names below.", "matching_test_targets", "=", "[", "x", "for", "x", "in", "(", "set", "(", "matching_test_targets", ")", "&", "set", "(", "test_targets_no_all", ")", ")", "]", "print", "'matched test_targets'", "for", "target", "in", "matching_test_targets", ":", "print", "'\\t'", ",", "target", ".", "name", "matching_target_names", "=", "[", "gyp", ".", "common", ".", "ParseQualifiedTarget", "(", "target", ".", "name", ")", "[", "1", "]", "for", "target", "in", "matching_test_targets", "]", "if", "matching_test_targets_contains_all", ":", "matching_target_names", ".", "append", "(", "'all'", ")", "print", "'\\tall'", "return", "matching_target_names" ]
[ 623, 2 ]
[ 664, 32 ]
python
en
['en', 'en', 'en']
True
TargetCalculator.find_matching_compile_target_names
(self)
Returns the set of output compile targets.
Returns the set of output compile targets.
def find_matching_compile_target_names(self): """Returns the set of output compile targets.""" assert self.is_build_impacted(); # Compile targets are found by searching up from changed targets. # Reset the visited status for _GetBuildTargets. for target in self._name_to_target.itervalues(): target.visited = False supplied_targets = _LookupTargets(self._supplied_target_names_no_all(), self._unqualified_mapping) if 'all' in self._supplied_target_names(): supplied_targets = [x for x in (set(supplied_targets) | set(self._root_targets))] print 'Supplied test_targets & compile_targets' for target in supplied_targets: print '\t', target.name print 'Finding compile targets' compile_targets = _GetCompileTargets(self._changed_targets, supplied_targets) return [gyp.common.ParseQualifiedTarget(target.name)[1] for target in compile_targets]
[ "def", "find_matching_compile_target_names", "(", "self", ")", ":", "assert", "self", ".", "is_build_impacted", "(", ")", "# Compile targets are found by searching up from changed targets.", "# Reset the visited status for _GetBuildTargets.", "for", "target", "in", "self", ".", "_name_to_target", ".", "itervalues", "(", ")", ":", "target", ".", "visited", "=", "False", "supplied_targets", "=", "_LookupTargets", "(", "self", ".", "_supplied_target_names_no_all", "(", ")", ",", "self", ".", "_unqualified_mapping", ")", "if", "'all'", "in", "self", ".", "_supplied_target_names", "(", ")", ":", "supplied_targets", "=", "[", "x", "for", "x", "in", "(", "set", "(", "supplied_targets", ")", "|", "set", "(", "self", ".", "_root_targets", ")", ")", "]", "print", "'Supplied test_targets & compile_targets'", "for", "target", "in", "supplied_targets", ":", "print", "'\\t'", ",", "target", ".", "name", "print", "'Finding compile targets'", "compile_targets", "=", "_GetCompileTargets", "(", "self", ".", "_changed_targets", ",", "supplied_targets", ")", "return", "[", "gyp", ".", "common", ".", "ParseQualifiedTarget", "(", "target", ".", "name", ")", "[", "1", "]", "for", "target", "in", "compile_targets", "]" ]
[ 666, 2 ]
[ 686, 42 ]
python
en
['en', 'en', 'en']
True
cli
(file)
\b Read Azure IoT Hub Avro files Returns the content of Avro file FILE as a json string. Examples: \b hubavroreader c:\\temp\\05 hubavroreader c:\\temp\\25.avro
\b Read Azure IoT Hub Avro files Returns the content of Avro file FILE as a json string.
def cli(file): """ \b Read Azure IoT Hub Avro files Returns the content of Avro file FILE as a json string. Examples: \b hubavroreader c:\\temp\\05 hubavroreader c:\\temp\\25.avro """ if (isfile(file)): messages = load_avro_file(file) click.echo(json.dumps(messages, indent=4, sort_keys=True)) return click.echo('Could not find file %s' % file)
[ "def", "cli", "(", "file", ")", ":", "if", "(", "isfile", "(", "file", ")", ")", ":", "messages", "=", "load_avro_file", "(", "file", ")", "click", ".", "echo", "(", "json", ".", "dumps", "(", "messages", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", ")", "return", "click", ".", "echo", "(", "'Could not find file %s'", "%", "file", ")" ]
[ 19, 0 ]
[ 36, 47 ]
python
en
['en', 'error', 'th']
False
Profiler.__init__
( self, *, profiler_config: Optional[Dict[str, Dict[str, Dict[str, Any]]]] = None, data_context: Optional[DataContext] = None, )
Create a new Profiler using configured rules. For a rule or an item in a rule configuration, instantiates the following if available: a domain builder, a parameter builder, and a configuration builder. These will be used to define profiler computation patterns. Args: profiler_config: Variables and Rules configuration as a dictionary data_context: DataContext object that defines a full runtime environment (data access, etc.)
Create a new Profiler using configured rules. For a rule or an item in a rule configuration, instantiates the following if available: a domain builder, a parameter builder, and a configuration builder. These will be used to define profiler computation patterns.
def __init__( self, *, profiler_config: Optional[Dict[str, Dict[str, Dict[str, Any]]]] = None, data_context: Optional[DataContext] = None, ): """ Create a new Profiler using configured rules. For a rule or an item in a rule configuration, instantiates the following if available: a domain builder, a parameter builder, and a configuration builder. These will be used to define profiler computation patterns. Args: profiler_config: Variables and Rules configuration as a dictionary data_context: DataContext object that defines a full runtime environment (data access, etc.) """ self._profiler_config = profiler_config self._data_context = data_context self._rules = [] rules_configs: Dict[str, Dict[str, Any]] = self._profiler_config.get( "rules", {} ) rule_name: str rule_config: Dict[str, Any] for rule_name, rule_config in rules_configs.items(): domain_builder_config: dict = rule_config.get("domain_builder") if domain_builder_config is None: raise ge_exceptions.ProfilerConfigurationError( message=f'Invalid rule "{rule_name}": no domain_builder found.' ) domain_builder: DomainBuilder = instantiate_class_from_config( config=domain_builder_config, runtime_environment={"data_context": data_context}, config_defaults={ "module_name": "great_expectations.rule_based_profiler.domain_builder" }, ) parameter_builders: List[ParameterBuilder] = [] parameter_builder_configs: dict = rule_config.get("parameter_builders") if parameter_builder_configs: parameter_builder_config: dict for parameter_builder_config in parameter_builder_configs: parameter_builders.append( instantiate_class_from_config( config=parameter_builder_config, runtime_environment={"data_context": data_context}, config_defaults={ "module_name": "great_expectations.rule_based_profiler.parameter_builder" }, ) ) expectation_configuration_builders: List[ ExpectationConfigurationBuilder ] = [] expectation_configuration_builder_configs: dict = rule_config.get( "expectation_configuration_builders" ) if expectation_configuration_builder_configs: expectation_configuration_builder_config: dict for ( expectation_configuration_builder_config ) in expectation_configuration_builder_configs: expectation_configuration_builders.append( instantiate_class_from_config( config=expectation_configuration_builder_config, runtime_environment={}, config_defaults={ "class_name": "DefaultExpectationConfigurationBuilder", "module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder", }, ) ) variables_configs: Dict[str, Dict] = self._profiler_config.get( "variables", {} ) variables: Optional[ParameterContainer] = None if variables_configs: variables = build_parameter_container_for_variables( variables_configs=variables_configs ) self._rules.append( Rule( name=rule_name, domain_builder=domain_builder, parameter_builders=parameter_builders, expectation_configuration_builders=expectation_configuration_builders, variables=variables, ) )
[ "def", "__init__", "(", "self", ",", "*", ",", "profiler_config", ":", "Optional", "[", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "Any", "]", "]", "]", "]", "=", "None", ",", "data_context", ":", "Optional", "[", "DataContext", "]", "=", "None", ",", ")", ":", "self", ".", "_profiler_config", "=", "profiler_config", "self", ".", "_data_context", "=", "data_context", "self", ".", "_rules", "=", "[", "]", "rules_configs", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "Any", "]", "]", "=", "self", ".", "_profiler_config", ".", "get", "(", "\"rules\"", ",", "{", "}", ")", "rule_name", ":", "str", "rule_config", ":", "Dict", "[", "str", ",", "Any", "]", "for", "rule_name", ",", "rule_config", "in", "rules_configs", ".", "items", "(", ")", ":", "domain_builder_config", ":", "dict", "=", "rule_config", ".", "get", "(", "\"domain_builder\"", ")", "if", "domain_builder_config", "is", "None", ":", "raise", "ge_exceptions", ".", "ProfilerConfigurationError", "(", "message", "=", "f'Invalid rule \"{rule_name}\": no domain_builder found.'", ")", "domain_builder", ":", "DomainBuilder", "=", "instantiate_class_from_config", "(", "config", "=", "domain_builder_config", ",", "runtime_environment", "=", "{", "\"data_context\"", ":", "data_context", "}", ",", "config_defaults", "=", "{", "\"module_name\"", ":", "\"great_expectations.rule_based_profiler.domain_builder\"", "}", ",", ")", "parameter_builders", ":", "List", "[", "ParameterBuilder", "]", "=", "[", "]", "parameter_builder_configs", ":", "dict", "=", "rule_config", ".", "get", "(", "\"parameter_builders\"", ")", "if", "parameter_builder_configs", ":", "parameter_builder_config", ":", "dict", "for", "parameter_builder_config", "in", "parameter_builder_configs", ":", "parameter_builders", ".", "append", "(", "instantiate_class_from_config", "(", "config", "=", "parameter_builder_config", ",", "runtime_environment", "=", "{", "\"data_context\"", ":", "data_context", "}", ",", "config_defaults", "=", "{", "\"module_name\"", ":", "\"great_expectations.rule_based_profiler.parameter_builder\"", "}", ",", ")", ")", "expectation_configuration_builders", ":", "List", "[", "ExpectationConfigurationBuilder", "]", "=", "[", "]", "expectation_configuration_builder_configs", ":", "dict", "=", "rule_config", ".", "get", "(", "\"expectation_configuration_builders\"", ")", "if", "expectation_configuration_builder_configs", ":", "expectation_configuration_builder_config", ":", "dict", "for", "(", "expectation_configuration_builder_config", ")", "in", "expectation_configuration_builder_configs", ":", "expectation_configuration_builders", ".", "append", "(", "instantiate_class_from_config", "(", "config", "=", "expectation_configuration_builder_config", ",", "runtime_environment", "=", "{", "}", ",", "config_defaults", "=", "{", "\"class_name\"", ":", "\"DefaultExpectationConfigurationBuilder\"", ",", "\"module_name\"", ":", "\"great_expectations.rule_based_profiler.expectation_configuration_builder\"", ",", "}", ",", ")", ")", "variables_configs", ":", "Dict", "[", "str", ",", "Dict", "]", "=", "self", ".", "_profiler_config", ".", "get", "(", "\"variables\"", ",", "{", "}", ")", "variables", ":", "Optional", "[", "ParameterContainer", "]", "=", "None", "if", "variables_configs", ":", "variables", "=", "build_parameter_container_for_variables", "(", "variables_configs", "=", "variables_configs", ")", "self", ".", "_rules", ".", "append", "(", "Rule", "(", "name", "=", "rule_name", ",", "domain_builder", "=", "domain_builder", ",", "parameter_builders", "=", "parameter_builders", ",", "expectation_configuration_builders", "=", "expectation_configuration_builders", ",", "variables", "=", "variables", ",", ")", ")" ]
[ 97, 4 ]
[ 198, 13 ]
python
en
['en', 'error', 'th']
False
Profiler.profile
( self, *, expectation_suite_name: Optional[str] = None, include_citation: bool = True, )
Args: :param expectation_suite_name: A name for returned Expectation suite. :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler :return: Set of rule evaluation results in the form of an ExpectationSuite
Args: :param expectation_suite_name: A name for returned Expectation suite. :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler :return: Set of rule evaluation results in the form of an ExpectationSuite
def profile( self, *, expectation_suite_name: Optional[str] = None, include_citation: bool = True, ) -> ExpectationSuite: """ Args: :param expectation_suite_name: A name for returned Expectation suite. :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler :return: Set of rule evaluation results in the form of an ExpectationSuite """ if expectation_suite_name is None: expectation_suite_name = ( f"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}" ) expectation_suite: ExpectationSuite = ExpectationSuite( expectation_suite_name=expectation_suite_name ) if include_citation: expectation_suite.add_citation( comment="Suite created by Rule-Based Profiler with the configuration included.", profiler_config=self._profiler_config, ) rule: Rule for rule in self._rules: expectation_configurations: List[ExpectationConfiguration] = rule.generate() expectation_configuration: ExpectationConfiguration for expectation_configuration in expectation_configurations: expectation_suite.add_expectation( expectation_configuration=expectation_configuration ) return expectation_suite
[ "def", "profile", "(", "self", ",", "*", ",", "expectation_suite_name", ":", "Optional", "[", "str", "]", "=", "None", ",", "include_citation", ":", "bool", "=", "True", ",", ")", "->", "ExpectationSuite", ":", "if", "expectation_suite_name", "is", "None", ":", "expectation_suite_name", "=", "(", "f\"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}\"", ")", "expectation_suite", ":", "ExpectationSuite", "=", "ExpectationSuite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "if", "include_citation", ":", "expectation_suite", ".", "add_citation", "(", "comment", "=", "\"Suite created by Rule-Based Profiler with the configuration included.\"", ",", "profiler_config", "=", "self", ".", "_profiler_config", ",", ")", "rule", ":", "Rule", "for", "rule", "in", "self", ".", "_rules", ":", "expectation_configurations", ":", "List", "[", "ExpectationConfiguration", "]", "=", "rule", ".", "generate", "(", ")", "expectation_configuration", ":", "ExpectationConfiguration", "for", "expectation_configuration", "in", "expectation_configurations", ":", "expectation_suite", ".", "add_expectation", "(", "expectation_configuration", "=", "expectation_configuration", ")", "return", "expectation_suite" ]
[ 200, 4 ]
[ 236, 32 ]
python
en
['en', 'error', 'th']
False
compute_convolution
(I, T, stride: int = 1, padding: int = 0)
This function takes an image <I> and a template <T> (both numpy arrays) and returns a heatmap where each grid represents the output produced by convolution at each location.
This function takes an image <I> and a template <T> (both numpy arrays) and returns a heatmap where each grid represents the output produced by convolution at each location.
def compute_convolution(I, T, stride: int = 1, padding: int = 0): """ This function takes an image <I> and a template <T> (both numpy arrays) and returns a heatmap where each grid represents the output produced by convolution at each location. """ # Validate input assert np.ndim(I) == np.ndim(T) == 3 (n_rows_i, n_cols_i, n_channels_i) = np.shape(I) (n_rows_t, n_cols_t, n_channels_t) = np.shape(T) assert n_rows_t <= n_rows_i assert n_cols_t <= n_cols_i assert n_channels_t == n_channels_i # We downsize the heatmap slightly so that the template can match # only valid pixels # Calculate shapes along the convolution dimensions (non-channel) shape_i = np.array(I.shape[:-1], dtype=int) shape_t = np.array(T.shape[:-1], dtype=int) shape_h = ((shape_i + (2 * padding) - shape_t) // stride) + 1 heatmap = np.zeros(shape_h) # Iterate over rows and columns of heatmap for row_h in range(shape_h[0]): for col_h in range(shape_h[1]): # Translate strides/padding to image-indexing row_i, col_i = heatmap_idx_to_image_idx( np.array([row_h, col_h]), stride=stride, padding=padding ) # Slice input image to template size sub_image = I[row_i : (row_i + n_rows_t), col_i : (col_i + n_cols_t)] # Store the correlation between this image slice and the template corr = pearsonr(sub_image.flatten(), T.flatten())[0] heatmap[row_h, col_h] = corr return heatmap
[ "def", "compute_convolution", "(", "I", ",", "T", ",", "stride", ":", "int", "=", "1", ",", "padding", ":", "int", "=", "0", ")", ":", "# Validate input", "assert", "np", ".", "ndim", "(", "I", ")", "==", "np", ".", "ndim", "(", "T", ")", "==", "3", "(", "n_rows_i", ",", "n_cols_i", ",", "n_channels_i", ")", "=", "np", ".", "shape", "(", "I", ")", "(", "n_rows_t", ",", "n_cols_t", ",", "n_channels_t", ")", "=", "np", ".", "shape", "(", "T", ")", "assert", "n_rows_t", "<=", "n_rows_i", "assert", "n_cols_t", "<=", "n_cols_i", "assert", "n_channels_t", "==", "n_channels_i", "# We downsize the heatmap slightly so that the template can match", "# only valid pixels", "# Calculate shapes along the convolution dimensions (non-channel)", "shape_i", "=", "np", ".", "array", "(", "I", ".", "shape", "[", ":", "-", "1", "]", ",", "dtype", "=", "int", ")", "shape_t", "=", "np", ".", "array", "(", "T", ".", "shape", "[", ":", "-", "1", "]", ",", "dtype", "=", "int", ")", "shape_h", "=", "(", "(", "shape_i", "+", "(", "2", "*", "padding", ")", "-", "shape_t", ")", "//", "stride", ")", "+", "1", "heatmap", "=", "np", ".", "zeros", "(", "shape_h", ")", "# Iterate over rows and columns of heatmap", "for", "row_h", "in", "range", "(", "shape_h", "[", "0", "]", ")", ":", "for", "col_h", "in", "range", "(", "shape_h", "[", "1", "]", ")", ":", "# Translate strides/padding to image-indexing", "row_i", ",", "col_i", "=", "heatmap_idx_to_image_idx", "(", "np", ".", "array", "(", "[", "row_h", ",", "col_h", "]", ")", ",", "stride", "=", "stride", ",", "padding", "=", "padding", ")", "# Slice input image to template size", "sub_image", "=", "I", "[", "row_i", ":", "(", "row_i", "+", "n_rows_t", ")", ",", "col_i", ":", "(", "col_i", "+", "n_cols_t", ")", "]", "# Store the correlation between this image slice and the template", "corr", "=", "pearsonr", "(", "sub_image", ".", "flatten", "(", ")", ",", "T", ".", "flatten", "(", ")", ")", "[", "0", "]", "heatmap", "[", "row_h", ",", "col_h", "]", "=", "corr", "return", "heatmap" ]
[ 11, 0 ]
[ 48, 18 ]
python
en
['en', 'error', 'th']
False
predict_boxes
( heatmap: np.ndarray, bbox_shapes: np.ndarray, stride: int = 1, padding: int = 0, threshold: float = 0.7, )
This function takes heatmap and returns the bounding boxes and associated confidence scores. Arguments --------- heatmap: np.array of confidence scores (correlations with template) bbox_shapes: (heatmap_rows, heatmap_cols, 2) map that stores the size of the bounding box that performed best at each pixel stride, padding: same values used to construct the heatmap
This function takes heatmap and returns the bounding boxes and associated confidence scores.
def predict_boxes( heatmap: np.ndarray, bbox_shapes: np.ndarray, stride: int = 1, padding: int = 0, threshold: float = 0.7, ): """ This function takes heatmap and returns the bounding boxes and associated confidence scores. Arguments --------- heatmap: np.array of confidence scores (correlations with template) bbox_shapes: (heatmap_rows, heatmap_cols, 2) map that stores the size of the bounding box that performed best at each pixel stride, padding: same values used to construct the heatmap """ # Threshold heatmap to find objects object_detected_mask = heatmap > threshold object_locs = np.argwhere(object_detected_mask) bbox_list = [] for row_h, col_h in object_locs: # Convert heatmap coordinates back to original coordinates tl_row_i, tl_col_i = heatmap_idx_to_image_idx( np.array([row_h, col_h]), stride=stride, padding=padding ) # Bounding box size is pre-defined br_row_i, br_col_i = np.array([tl_row_i, tl_col_i]) + np.asarray(bbox_shapes[row_h, col_h]) score = heatmap[row_h, col_h] # Convert to native Python integers to fix JSON parsing bbox_list.append( [tl_row_i.item(), tl_col_i.item(), br_row_i.item(), br_col_i.item(), score] ) return bbox_list
[ "def", "predict_boxes", "(", "heatmap", ":", "np", ".", "ndarray", ",", "bbox_shapes", ":", "np", ".", "ndarray", ",", "stride", ":", "int", "=", "1", ",", "padding", ":", "int", "=", "0", ",", "threshold", ":", "float", "=", "0.7", ",", ")", ":", "# Threshold heatmap to find objects", "object_detected_mask", "=", "heatmap", ">", "threshold", "object_locs", "=", "np", ".", "argwhere", "(", "object_detected_mask", ")", "bbox_list", "=", "[", "]", "for", "row_h", ",", "col_h", "in", "object_locs", ":", "# Convert heatmap coordinates back to original coordinates", "tl_row_i", ",", "tl_col_i", "=", "heatmap_idx_to_image_idx", "(", "np", ".", "array", "(", "[", "row_h", ",", "col_h", "]", ")", ",", "stride", "=", "stride", ",", "padding", "=", "padding", ")", "# Bounding box size is pre-defined", "br_row_i", ",", "br_col_i", "=", "np", ".", "array", "(", "[", "tl_row_i", ",", "tl_col_i", "]", ")", "+", "np", ".", "asarray", "(", "bbox_shapes", "[", "row_h", ",", "col_h", "]", ")", "score", "=", "heatmap", "[", "row_h", ",", "col_h", "]", "# Convert to native Python integers to fix JSON parsing", "bbox_list", ".", "append", "(", "[", "tl_row_i", ".", "item", "(", ")", ",", "tl_col_i", ".", "item", "(", ")", ",", "br_row_i", ".", "item", "(", ")", ",", "br_col_i", ".", "item", "(", ")", ",", "score", "]", ")", "return", "bbox_list" ]
[ 51, 0 ]
[ 90, 20 ]
python
en
['en', 'error', 'th']
False
heatmap_idx_to_image_idx
(idx_h: int, stride: int, padding: int)
Helper function to convert between heatmap coordinates to image coordinates Arguments --------- idx_h : int or 1-D np.array of heatmap indices
Helper function to convert between heatmap coordinates to image coordinates
def heatmap_idx_to_image_idx(idx_h: int, stride: int, padding: int): """ Helper function to convert between heatmap coordinates to image coordinates Arguments --------- idx_h : int or 1-D np.array of heatmap indices """ idx_i = (stride * idx_h) - padding return idx_i
[ "def", "heatmap_idx_to_image_idx", "(", "idx_h", ":", "int", ",", "stride", ":", "int", ",", "padding", ":", "int", ")", ":", "idx_i", "=", "(", "stride", "*", "idx_h", ")", "-", "padding", "return", "idx_i" ]
[ 93, 0 ]
[ 102, 16 ]
python
en
['en', 'error', 'th']
False
detect_red_light_mf
(I, template_list: List)
This function takes a numpy array <I> and returns a list <output>. The length of <output> is the number of bounding boxes predicted for <I>. Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. The first four entries are four integers specifying a bounding box (the row and column index of the top left corner and the row and column index of the bottom right corner). <score> is a confidence score ranging from 0 to 1. Note that PIL loads images in RGB order, so: I[:,:,0] is the red channel I[:,:,1] is the green channel I[:,:,2] is the blue channel template_list: list of numpy arrays corresponding to multiple filters
This function takes a numpy array <I> and returns a list <output>. The length of <output> is the number of bounding boxes predicted for <I>. Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. The first four entries are four integers specifying a bounding box (the row and column index of the top left corner and the row and column index of the bottom right corner). <score> is a confidence score ranging from 0 to 1.
def detect_red_light_mf(I, template_list: List): """ This function takes a numpy array <I> and returns a list <output>. The length of <output> is the number of bounding boxes predicted for <I>. Each entry of <output> is a list <[row_TL,col_TL,row_BR,col_BR,score]>. The first four entries are four integers specifying a bounding box (the row and column index of the top left corner and the row and column index of the bottom right corner). <score> is a confidence score ranging from 0 to 1. Note that PIL loads images in RGB order, so: I[:,:,0] is the red channel I[:,:,1] is the green channel I[:,:,2] is the blue channel template_list: list of numpy arrays corresponding to multiple filters """ # Use a shallow, "wide"-CNN with multiple matched filters # If an image matches any one of the templates, it gets added. stride = 2 # Speed up the computations # The less robust method # # def helper_func(template): # heatmap = compute_convolution(I, template, stride=stride) # bbox_list = predict_boxes(heatmap, template.shape[:-1], stride=stride) # return bbox_list # # Compute template matches in parallel to speed up # bbox_list_list = Parallel(n_jobs=-3)( # delayed(helper_func)(template) for template in template_list # ) # Compute template matches in parallel to speed up heatmap_list = Parallel(n_jobs=-3)( delayed(compute_convolution)(I, template, stride=stride) for template in template_list ) # Pad heatmaps to the same size heatmap_max_size = np.max([h.shape for h in heatmap_list], axis=0) heatmap_list = [endpad_to_size(h, heatmap_max_size) for h in heatmap_list] # Combine heatmaps by taking the average of the top-3 matches heatmap_sorted = np.sort(heatmap_list, axis=0) # heatmap is now in ascending correlation value. # We want the 3 highest correlation values (end of sort) heatmap_avg = heatmap_sorted[-3:].mean(axis=0) # Create map of which bounding box size to use at each location template_sizes = np.array([t.shape[:-1] for t in template_list]) heatmap_argmax = np.argmax(heatmap_list, axis=0) # -> might be able to convert this to an object array for use with argwhere heatmap_template_sizes = template_sizes[heatmap_argmax] # Use this heatmap to grab bboxes bbox_list = predict_boxes(heatmap_avg, heatmap_template_sizes, stride=stride) # Check on output for idx, bbox in enumerate(bbox_list): assert len(bbox) == 5 assert (bbox[4] >= 0.0) and (bbox[4] <= 1.0) return bbox_list
[ "def", "detect_red_light_mf", "(", "I", ",", "template_list", ":", "List", ")", ":", "# Use a shallow, \"wide\"-CNN with multiple matched filters", "# If an image matches any one of the templates, it gets added.", "stride", "=", "2", "# Speed up the computations", "# The less robust method", "#", "# def helper_func(template):", "# heatmap = compute_convolution(I, template, stride=stride)", "# bbox_list = predict_boxes(heatmap, template.shape[:-1], stride=stride)", "# return bbox_list", "# # Compute template matches in parallel to speed up", "# bbox_list_list = Parallel(n_jobs=-3)(", "# delayed(helper_func)(template) for template in template_list", "# )", "# Compute template matches in parallel to speed up", "heatmap_list", "=", "Parallel", "(", "n_jobs", "=", "-", "3", ")", "(", "delayed", "(", "compute_convolution", ")", "(", "I", ",", "template", ",", "stride", "=", "stride", ")", "for", "template", "in", "template_list", ")", "# Pad heatmaps to the same size", "heatmap_max_size", "=", "np", ".", "max", "(", "[", "h", ".", "shape", "for", "h", "in", "heatmap_list", "]", ",", "axis", "=", "0", ")", "heatmap_list", "=", "[", "endpad_to_size", "(", "h", ",", "heatmap_max_size", ")", "for", "h", "in", "heatmap_list", "]", "# Combine heatmaps by taking the average of the top-3 matches", "heatmap_sorted", "=", "np", ".", "sort", "(", "heatmap_list", ",", "axis", "=", "0", ")", "# heatmap is now in ascending correlation value.", "# We want the 3 highest correlation values (end of sort)", "heatmap_avg", "=", "heatmap_sorted", "[", "-", "3", ":", "]", ".", "mean", "(", "axis", "=", "0", ")", "# Create map of which bounding box size to use at each location", "template_sizes", "=", "np", ".", "array", "(", "[", "t", ".", "shape", "[", ":", "-", "1", "]", "for", "t", "in", "template_list", "]", ")", "heatmap_argmax", "=", "np", ".", "argmax", "(", "heatmap_list", ",", "axis", "=", "0", ")", "# -> might be able to convert this to an object array for use with argwhere", "heatmap_template_sizes", "=", "template_sizes", "[", "heatmap_argmax", "]", "# Use this heatmap to grab bboxes", "bbox_list", "=", "predict_boxes", "(", "heatmap_avg", ",", "heatmap_template_sizes", ",", "stride", "=", "stride", ")", "# Check on output", "for", "idx", ",", "bbox", "in", "enumerate", "(", "bbox_list", ")", ":", "assert", "len", "(", "bbox", ")", "==", "5", "assert", "(", "bbox", "[", "4", "]", ">=", "0.0", ")", "and", "(", "bbox", "[", "4", "]", "<=", "1.0", ")", "return", "bbox_list" ]
[ 105, 0 ]
[ 167, 20 ]
python
en
['en', 'error', 'th']
False
endpad_to_size
(array: np.ndarray, output_size: tuple, fill_value=np.nan)
Pad an array with fill_values at the end, up to output_size.
Pad an array with fill_values at the end, up to output_size.
def endpad_to_size(array: np.ndarray, output_size: tuple, fill_value=np.nan) -> np.ndarray: """Pad an array with fill_values at the end, up to output_size.""" pad_amounts = np.array(output_size, dtype=int) - np.array(array.shape, dtype=int) assert (pad_amounts >= 0).all() pad_before_after = [(0, pad) for pad in pad_amounts] output = np.pad(array, pad_before_after, constant_values=fill_value) return output
[ "def", "endpad_to_size", "(", "array", ":", "np", ".", "ndarray", ",", "output_size", ":", "tuple", ",", "fill_value", "=", "np", ".", "nan", ")", "->", "np", ".", "ndarray", ":", "pad_amounts", "=", "np", ".", "array", "(", "output_size", ",", "dtype", "=", "int", ")", "-", "np", ".", "array", "(", "array", ".", "shape", ",", "dtype", "=", "int", ")", "assert", "(", "pad_amounts", ">=", "0", ")", ".", "all", "(", ")", "pad_before_after", "=", "[", "(", "0", ",", "pad", ")", "for", "pad", "in", "pad_amounts", "]", "output", "=", "np", ".", "pad", "(", "array", ",", "pad_before_after", ",", "constant_values", "=", "fill_value", ")", "return", "output" ]
[ 170, 0 ]
[ 177, 17 ]
python
en
['en', 'en', 'en']
True
main
()
Make predictions on the training set.
Make predictions on the training set.
def main(): args = parse_args() # Load splits file_names_train = np.load(args.splits_folder.joinpath('file_names_train.npy')) file_names_test = np.load(args.splits_folder.joinpath('file_names_test.npy')) # Potentially sub-sample the file names if args.num_images is not None: file_names_train = np.random.choice(file_names_train, args.num_images) # Create folder for saving predictions, if it doesn't exist args.output_folder.mkdir(exist_ok=True) # Load in templates template_list = [] for template_path in args.template_folder.iterdir(): # Ignore non-jpg though if template_path.suffix != '.jpg': continue template = Image.open(template_path) template_list.append(np.asarray(template)) ''' Make predictions on the training set. ''' if not args.done_tweaking: preds_train = {} for fname in file_names_train: print('Processing train set:', fname) # read image using PIL: I = Image.open(args.data_folder.joinpath(fname)) # convert to numpy array: I = np.asarray(I) preds_train[fname] = detect_red_light_mf(I, template_list) # save preds (overwrites any previous predictions!) output_path = args.output_folder.joinpath('preds_train.json') with output_path.open('w') as f: print('Saving predictions to:', f.name) json.dump(preds_train, f) if args.done_tweaking: """ Make predictions on the test set. """ preds_test = {} for fname_test in file_names_test: print('Processing test set:', fname_test) # read image using PIL: I = Image.open(args.data_folder.joinpath(fname_test)) # convert to numpy array: I = np.asarray(I) preds_test[fname_test] = detect_red_light_mf(I, template_list) # save preds (overwrites any previous predictions!) output_path_test = args.output_folder.joinpath('preds_test.json') with output_path_test.open('w') as f: print('Saving predictions to:', f.name) json.dump(preds_test, f)
[ "def", "main", "(", ")", ":", "args", "=", "parse_args", "(", ")", "# Load splits", "file_names_train", "=", "np", ".", "load", "(", "args", ".", "splits_folder", ".", "joinpath", "(", "'file_names_train.npy'", ")", ")", "file_names_test", "=", "np", ".", "load", "(", "args", ".", "splits_folder", ".", "joinpath", "(", "'file_names_test.npy'", ")", ")", "# Potentially sub-sample the file names", "if", "args", ".", "num_images", "is", "not", "None", ":", "file_names_train", "=", "np", ".", "random", ".", "choice", "(", "file_names_train", ",", "args", ".", "num_images", ")", "# Create folder for saving predictions, if it doesn't exist", "args", ".", "output_folder", ".", "mkdir", "(", "exist_ok", "=", "True", ")", "# Load in templates", "template_list", "=", "[", "]", "for", "template_path", "in", "args", ".", "template_folder", ".", "iterdir", "(", ")", ":", "# Ignore non-jpg though", "if", "template_path", ".", "suffix", "!=", "'.jpg'", ":", "continue", "template", "=", "Image", ".", "open", "(", "template_path", ")", "template_list", ".", "append", "(", "np", ".", "asarray", "(", "template", ")", ")", "if", "not", "args", ".", "done_tweaking", ":", "preds_train", "=", "{", "}", "for", "fname", "in", "file_names_train", ":", "print", "(", "'Processing train set:'", ",", "fname", ")", "# read image using PIL:", "I", "=", "Image", ".", "open", "(", "args", ".", "data_folder", ".", "joinpath", "(", "fname", ")", ")", "# convert to numpy array:", "I", "=", "np", ".", "asarray", "(", "I", ")", "preds_train", "[", "fname", "]", "=", "detect_red_light_mf", "(", "I", ",", "template_list", ")", "# save preds (overwrites any previous predictions!)", "output_path", "=", "args", ".", "output_folder", ".", "joinpath", "(", "'preds_train.json'", ")", "with", "output_path", ".", "open", "(", "'w'", ")", "as", "f", ":", "print", "(", "'Saving predictions to:'", ",", "f", ".", "name", ")", "json", ".", "dump", "(", "preds_train", ",", "f", ")", "if", "args", ".", "done_tweaking", ":", "\"\"\"\n Make predictions on the test set.\n \"\"\"", "preds_test", "=", "{", "}", "for", "fname_test", "in", "file_names_test", ":", "print", "(", "'Processing test set:'", ",", "fname_test", ")", "# read image using PIL:", "I", "=", "Image", ".", "open", "(", "args", ".", "data_folder", ".", "joinpath", "(", "fname_test", ")", ")", "# convert to numpy array:", "I", "=", "np", ".", "asarray", "(", "I", ")", "preds_test", "[", "fname_test", "]", "=", "detect_red_light_mf", "(", "I", ",", "template_list", ")", "# save preds (overwrites any previous predictions!)", "output_path_test", "=", "args", ".", "output_folder", ".", "joinpath", "(", "'preds_test.json'", ")", "with", "output_path_test", ".", "open", "(", "'w'", ")", "as", "f", ":", "print", "(", "'Saving predictions to:'", ",", "f", ".", "name", ")", "json", ".", "dump", "(", "preds_test", ",", "f", ")" ]
[ 228, 0 ]
[ 292, 36 ]
python
en
['en', 'error', 'th']
False
V3Route.host_constraints
(self, prune_unreachable_routes: bool)
Return a set of hostglobs that match (a superset of) all hostnames that this route can apply to. An emtpy set means that this route cannot possibly apply to any hostnames. This considers SNI information and (if prune_unreachable_routes) HeaderMatchers that `exact_match` on the `:authority` header. There are other things that could narrow the set down more, but that we don't consider (like regex matches on `:authority`), leading to it possibly returning a set that is too broad. That's OK for correctness, it just means that we'll emit an Envoy config that contains extra work for Envoy.
Return a set of hostglobs that match (a superset of) all hostnames that this route can apply to.
def host_constraints(self, prune_unreachable_routes: bool) -> Set[str]: """Return a set of hostglobs that match (a superset of) all hostnames that this route can apply to. An emtpy set means that this route cannot possibly apply to any hostnames. This considers SNI information and (if prune_unreachable_routes) HeaderMatchers that `exact_match` on the `:authority` header. There are other things that could narrow the set down more, but that we don't consider (like regex matches on `:authority`), leading to it possibly returning a set that is too broad. That's OK for correctness, it just means that we'll emit an Envoy config that contains extra work for Envoy. """ # Start by grabbing a list of all the SNI host globs for this route. If there aren't any, # default to "*". hostglobs = set(self.get('_sni', {}).get('hosts', ['*'])) # If we're going to do any aggressive pruning here... if prune_unreachable_routes: # Note: We're *pruning*; the hostglobs set will only ever get *smaller*, it will never # grow. If it gets down to the empty set, then we can safely bail early. # Take all the HeaderMatchers... header_matchers = self.get("match", {}).get("headers", []) for header in header_matchers: # ... and look for ones that exact_match on :authority. if header.get("name") == ":authority" and "exact_match" in header: exact_match = header["exact_match"] if "*" in exact_match: # A real :authority header will never contain a "*", so if this route has an # exact_match looking for one, then this route is unreachable. hostglobs = set() break # hostglobs is empty, no point in doing more work elif any(hostglob_matches(glob, exact_match) for glob in hostglobs): # The exact_match that this route is looking for is matched by one or more # of the hostglobs; so this route is reachable (so far). Set hostglobs to # just match that route. Because we already checked if the exact_match # contains a "*", we don't need to worry about it possibly being interpreted # incorrectly as a glob. hostglobs = set([exact_match]) # Don't "break" here--if somehow this route has multiple disagreeing # HeaderMatchers on :authority, then it's unreachable and we want the next # iteration of the loop to trigger the "else" clause and prune hostglobs # down to the empty set. else: # The exact_match that this route is looking for isn't matched by any of the # hostglobs; so this route is unreachable. hostglobs = set() break # hostglobs is empty, no point in doing more work return hostglobs
[ "def", "host_constraints", "(", "self", ",", "prune_unreachable_routes", ":", "bool", ")", "->", "Set", "[", "str", "]", ":", "# Start by grabbing a list of all the SNI host globs for this route. If there aren't any,", "# default to \"*\".", "hostglobs", "=", "set", "(", "self", ".", "get", "(", "'_sni'", ",", "{", "}", ")", ".", "get", "(", "'hosts'", ",", "[", "'*'", "]", ")", ")", "# If we're going to do any aggressive pruning here...", "if", "prune_unreachable_routes", ":", "# Note: We're *pruning*; the hostglobs set will only ever get *smaller*, it will never", "# grow. If it gets down to the empty set, then we can safely bail early.", "# Take all the HeaderMatchers...", "header_matchers", "=", "self", ".", "get", "(", "\"match\"", ",", "{", "}", ")", ".", "get", "(", "\"headers\"", ",", "[", "]", ")", "for", "header", "in", "header_matchers", ":", "# ... and look for ones that exact_match on :authority.", "if", "header", ".", "get", "(", "\"name\"", ")", "==", "\":authority\"", "and", "\"exact_match\"", "in", "header", ":", "exact_match", "=", "header", "[", "\"exact_match\"", "]", "if", "\"*\"", "in", "exact_match", ":", "# A real :authority header will never contain a \"*\", so if this route has an", "# exact_match looking for one, then this route is unreachable.", "hostglobs", "=", "set", "(", ")", "break", "# hostglobs is empty, no point in doing more work", "elif", "any", "(", "hostglob_matches", "(", "glob", ",", "exact_match", ")", "for", "glob", "in", "hostglobs", ")", ":", "# The exact_match that this route is looking for is matched by one or more", "# of the hostglobs; so this route is reachable (so far). Set hostglobs to", "# just match that route. Because we already checked if the exact_match", "# contains a \"*\", we don't need to worry about it possibly being interpreted", "# incorrectly as a glob.", "hostglobs", "=", "set", "(", "[", "exact_match", "]", ")", "# Don't \"break\" here--if somehow this route has multiple disagreeing", "# HeaderMatchers on :authority, then it's unreachable and we want the next", "# iteration of the loop to trigger the \"else\" clause and prune hostglobs", "# down to the empty set.", "else", ":", "# The exact_match that this route is looking for isn't matched by any of the", "# hostglobs; so this route is unreachable.", "hostglobs", "=", "set", "(", ")", "break", "# hostglobs is empty, no point in doing more work", "return", "hostglobs" ]
[ 411, 4 ]
[ 464, 24 ]
python
en
['en', 'en', 'en']
True
test_expectation_suite_equality
(baseline_suite, identical_suite, equivalent_suite)
Equality should depend on all defined properties of a configuration object, but not on whether the *instances* are the same.
Equality should depend on all defined properties of a configuration object, but not on whether the *instances* are the same.
def test_expectation_suite_equality(baseline_suite, identical_suite, equivalent_suite): """Equality should depend on all defined properties of a configuration object, but not on whether the *instances* are the same.""" assert baseline_suite is baseline_suite # no difference assert ( baseline_suite is not identical_suite ) # different instances, but same content assert baseline_suite == identical_suite # different instances, but same content assert not (baseline_suite != identical_suite) # ne works properly assert not (baseline_suite == equivalent_suite) # different meta assert baseline_suite != equivalent_suite
[ "def", "test_expectation_suite_equality", "(", "baseline_suite", ",", "identical_suite", ",", "equivalent_suite", ")", ":", "assert", "baseline_suite", "is", "baseline_suite", "# no difference", "assert", "(", "baseline_suite", "is", "not", "identical_suite", ")", "# different instances, but same content", "assert", "baseline_suite", "==", "identical_suite", "# different instances, but same content", "assert", "not", "(", "baseline_suite", "!=", "identical_suite", ")", "# ne works properly", "assert", "not", "(", "baseline_suite", "==", "equivalent_suite", ")", "# different meta", "assert", "baseline_suite", "!=", "equivalent_suite" ]
[ 215, 0 ]
[ 225, 45 ]
python
en
['en', 'en', 'en']
True
test_expectation_suite_equivalence
( baseline_suite, identical_suite, equivalent_suite, different_suite, single_expectation_suite, )
Equivalence should depend only on properties that affect the result of the expectation.
Equivalence should depend only on properties that affect the result of the expectation.
def test_expectation_suite_equivalence( baseline_suite, identical_suite, equivalent_suite, different_suite, single_expectation_suite, ): """Equivalence should depend only on properties that affect the result of the expectation.""" assert baseline_suite.isEquivalentTo(baseline_suite) # no difference assert baseline_suite.isEquivalentTo(identical_suite) assert baseline_suite.isEquivalentTo(equivalent_suite) # different meta assert not baseline_suite.isEquivalentTo( different_suite ) # different value_set in one expectation assert not single_expectation_suite.isEquivalentTo(baseline_suite)
[ "def", "test_expectation_suite_equivalence", "(", "baseline_suite", ",", "identical_suite", ",", "equivalent_suite", ",", "different_suite", ",", "single_expectation_suite", ",", ")", ":", "assert", "baseline_suite", ".", "isEquivalentTo", "(", "baseline_suite", ")", "# no difference", "assert", "baseline_suite", ".", "isEquivalentTo", "(", "identical_suite", ")", "assert", "baseline_suite", ".", "isEquivalentTo", "(", "equivalent_suite", ")", "# different meta", "assert", "not", "baseline_suite", ".", "isEquivalentTo", "(", "different_suite", ")", "# different value_set in one expectation", "assert", "not", "single_expectation_suite", ".", "isEquivalentTo", "(", "baseline_suite", ")" ]
[ 228, 0 ]
[ 242, 70 ]
python
en
['en', 'en', 'en']
True
DefaultJinjaView.render_content_block
( self, jinja_context, content_block, index=None, content_block_id=None, render_to_markdown: bool = False, )
:param jinja_context: :param content_block: :param index: :param content_block_id: :param render_to_markdown: Whether this method should render the markdown version instead of HTML :return:
def render_content_block( self, jinja_context, content_block, index=None, content_block_id=None, render_to_markdown: bool = False, ): """ :param jinja_context: :param content_block: :param index: :param content_block_id: :param render_to_markdown: Whether this method should render the markdown version instead of HTML :return: """ if isinstance(content_block, str): return content_block elif content_block is None: return "" elif isinstance(content_block, list): # If the content_block item here is actually a list of content blocks then we want to recursively render rendered_block = "" for idx, content_block_el in enumerate(content_block): if ( isinstance(content_block_el, RenderedComponentContent) or isinstance(content_block_el, dict) and "content_block_type" in content_block_el ): new_content_block_id = None if content_block_id: new_content_block_id = content_block_id + "-" + str(idx) rendered_block += self.render_content_block( jinja_context, content_block_el, idx, content_block_id=new_content_block_id, ) else: if render_to_markdown: rendered_block += str(content_block_el) else: rendered_block += "<span>" + str(content_block_el) + "</span>" return rendered_block elif not isinstance(content_block, dict): return content_block content_block_type = content_block.get("content_block_type") if content_block_type is None: return content_block if render_to_markdown: template_filename = f"markdown_{content_block_type}.j2" else: template_filename = f"{content_block_type}.j2" template = self._get_template(template=template_filename) if content_block_id: return template.render( jinja_context, content_block=content_block, index=index, content_block_id=content_block_id, ) else: return template.render( jinja_context, content_block=content_block, index=index )
[ "def", "render_content_block", "(", "self", ",", "jinja_context", ",", "content_block", ",", "index", "=", "None", ",", "content_block_id", "=", "None", ",", "render_to_markdown", ":", "bool", "=", "False", ",", ")", ":", "if", "isinstance", "(", "content_block", ",", "str", ")", ":", "return", "content_block", "elif", "content_block", "is", "None", ":", "return", "\"\"", "elif", "isinstance", "(", "content_block", ",", "list", ")", ":", "# If the content_block item here is actually a list of content blocks then we want to recursively render", "rendered_block", "=", "\"\"", "for", "idx", ",", "content_block_el", "in", "enumerate", "(", "content_block", ")", ":", "if", "(", "isinstance", "(", "content_block_el", ",", "RenderedComponentContent", ")", "or", "isinstance", "(", "content_block_el", ",", "dict", ")", "and", "\"content_block_type\"", "in", "content_block_el", ")", ":", "new_content_block_id", "=", "None", "if", "content_block_id", ":", "new_content_block_id", "=", "content_block_id", "+", "\"-\"", "+", "str", "(", "idx", ")", "rendered_block", "+=", "self", ".", "render_content_block", "(", "jinja_context", ",", "content_block_el", ",", "idx", ",", "content_block_id", "=", "new_content_block_id", ",", ")", "else", ":", "if", "render_to_markdown", ":", "rendered_block", "+=", "str", "(", "content_block_el", ")", "else", ":", "rendered_block", "+=", "\"<span>\"", "+", "str", "(", "content_block_el", ")", "+", "\"</span>\"", "return", "rendered_block", "elif", "not", "isinstance", "(", "content_block", ",", "dict", ")", ":", "return", "content_block", "content_block_type", "=", "content_block", ".", "get", "(", "\"content_block_type\"", ")", "if", "content_block_type", "is", "None", ":", "return", "content_block", "if", "render_to_markdown", ":", "template_filename", "=", "f\"markdown_{content_block_type}.j2\"", "else", ":", "template_filename", "=", "f\"{content_block_type}.j2\"", "template", "=", "self", ".", "_get_template", "(", "template", "=", "template_filename", ")", "if", "content_block_id", ":", "return", "template", ".", "render", "(", "jinja_context", ",", "content_block", "=", "content_block", ",", "index", "=", "index", ",", "content_block_id", "=", "content_block_id", ",", ")", "else", ":", "return", "template", ".", "render", "(", "jinja_context", ",", "content_block", "=", "content_block", ",", "index", "=", "index", ")" ]
[ 128, 4 ]
[ 194, 13 ]
python
en
['en', 'error', 'th']
False
DefaultJinjaView.render_styling
(self, styling)
Adds styling information suitable for an html tag. Example styling block:: styling = { "classes": ["alert", "alert-warning"], "attributes": { "role": "alert", "data-toggle": "popover", }, "styles" : { "padding" : "10px", "border-radius" : "2px", } } The above block returns a string similar to:: 'class="alert alert-warning" role="alert" data-toggle="popover" style="padding: 10px; border-radius: 2px"' "classes", "attributes" and "styles" are all optional parameters. If they aren't present, they simply won't be rendered. Other dictionary keys are also allowed and ignored.
Adds styling information suitable for an html tag.
def render_styling(self, styling): """Adds styling information suitable for an html tag. Example styling block:: styling = { "classes": ["alert", "alert-warning"], "attributes": { "role": "alert", "data-toggle": "popover", }, "styles" : { "padding" : "10px", "border-radius" : "2px", } } The above block returns a string similar to:: 'class="alert alert-warning" role="alert" data-toggle="popover" style="padding: 10px; border-radius: 2px"' "classes", "attributes" and "styles" are all optional parameters. If they aren't present, they simply won't be rendered. Other dictionary keys are also allowed and ignored. """ class_list = styling.get("classes", None) if class_list is None: class_str = "" else: if type(class_list) == str: raise TypeError("classes must be a list, not a string.") class_str = 'class="' + " ".join(class_list) + '" ' attribute_dict = styling.get("attributes", None) if attribute_dict is None: attribute_str = "" else: attribute_str = "" for k, v in attribute_dict.items(): attribute_str += k + '="' + v + '" ' style_dict = styling.get("styles", None) if style_dict is None: style_str = "" else: style_str = 'style="' style_str += " ".join([k + ":" + v + ";" for k, v in style_dict.items()]) style_str += '" ' styling_string = pTemplate("$classes$attributes$style").substitute( { "classes": class_str, "attributes": attribute_str, "style": style_str, } ) return styling_string
[ "def", "render_styling", "(", "self", ",", "styling", ")", ":", "class_list", "=", "styling", ".", "get", "(", "\"classes\"", ",", "None", ")", "if", "class_list", "is", "None", ":", "class_str", "=", "\"\"", "else", ":", "if", "type", "(", "class_list", ")", "==", "str", ":", "raise", "TypeError", "(", "\"classes must be a list, not a string.\"", ")", "class_str", "=", "'class=\"'", "+", "\" \"", ".", "join", "(", "class_list", ")", "+", "'\" '", "attribute_dict", "=", "styling", ".", "get", "(", "\"attributes\"", ",", "None", ")", "if", "attribute_dict", "is", "None", ":", "attribute_str", "=", "\"\"", "else", ":", "attribute_str", "=", "\"\"", "for", "k", ",", "v", "in", "attribute_dict", ".", "items", "(", ")", ":", "attribute_str", "+=", "k", "+", "'=\"'", "+", "v", "+", "'\" '", "style_dict", "=", "styling", ".", "get", "(", "\"styles\"", ",", "None", ")", "if", "style_dict", "is", "None", ":", "style_str", "=", "\"\"", "else", ":", "style_str", "=", "'style=\"'", "style_str", "+=", "\" \"", ".", "join", "(", "[", "k", "+", "\":\"", "+", "v", "+", "\";\"", "for", "k", ",", "v", "in", "style_dict", ".", "items", "(", ")", "]", ")", "style_str", "+=", "'\" '", "styling_string", "=", "pTemplate", "(", "\"$classes$attributes$style\"", ")", ".", "substitute", "(", "{", "\"classes\"", ":", "class_str", ",", "\"attributes\"", ":", "attribute_str", ",", "\"style\"", ":", "style_str", ",", "}", ")", "return", "styling_string" ]
[ 223, 4 ]
[ 283, 29 ]
python
en
['en', 'en', 'en']
True
DefaultJinjaView.render_styling_from_string_template
(self, template)
This method is a thin wrapper use to call `render_styling` from within jinja templates.
This method is a thin wrapper use to call `render_styling` from within jinja templates.
def render_styling_from_string_template(self, template): # NOTE: We should add some kind of type-checking to template """This method is a thin wrapper use to call `render_styling` from within jinja templates.""" if not isinstance(template, (dict, OrderedDict)): return template if "styling" in template: return self.render_styling(template["styling"]) else: return ""
[ "def", "render_styling_from_string_template", "(", "self", ",", "template", ")", ":", "# NOTE: We should add some kind of type-checking to template", "if", "not", "isinstance", "(", "template", ",", "(", "dict", ",", "OrderedDict", ")", ")", ":", "return", "template", "if", "\"styling\"", "in", "template", ":", "return", "self", ".", "render_styling", "(", "template", "[", "\"styling\"", "]", ")", "else", ":", "return", "\"\"" ]
[ 285, 4 ]
[ 295, 21 ]
python
en
['en', 'en', 'en']
True
DefaultMarkdownPageView._validate_document
(self, document: RenderedDocumentContent)
Validate that the document is of the appropriate type at runtime.
Validate that the document is of the appropriate type at runtime.
def _validate_document(self, document: RenderedDocumentContent) -> bool: """ Validate that the document is of the appropriate type at runtime. """ assert isinstance(document, RenderedDocumentContent)
[ "def", "_validate_document", "(", "self", ",", "document", ":", "RenderedDocumentContent", ")", "->", "bool", ":", "assert", "isinstance", "(", "document", ",", "RenderedDocumentContent", ")" ]
[ 470, 4 ]
[ 474, 60 ]
python
en
['en', 'error', 'th']
False
DefaultMarkdownPageView.render
(self, document, template=None, **kwargs)
Handle list as well as single document
Handle list as well as single document
def render(self, document, template=None, **kwargs): """ Handle list as well as single document """ if isinstance(document, list): # We need to keep this as super(DefaultMarkdownPageView, self); otherwise a wrong render will be called. return [ super(DefaultMarkdownPageView, self).render( document=d, template=template, **kwargs ) for d in document ] else: return super().render(document=document, template=template, **kwargs)
[ "def", "render", "(", "self", ",", "document", ",", "template", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "document", ",", "list", ")", ":", "# We need to keep this as super(DefaultMarkdownPageView, self); otherwise a wrong render will be called.", "return", "[", "super", "(", "DefaultMarkdownPageView", ",", "self", ")", ".", "render", "(", "document", "=", "d", ",", "template", "=", "template", ",", "*", "*", "kwargs", ")", "for", "d", "in", "document", "]", "else", ":", "return", "super", "(", ")", ".", "render", "(", "document", "=", "document", ",", "template", "=", "template", ",", "*", "*", "kwargs", ")" ]
[ 478, 4 ]
[ 492, 81 ]
python
en
['en', 'error', 'th']
False
DefaultMarkdownPageView.render_string_template
(self, template: pTemplate)
Render string for markdown rendering. Bold all parameters and perform substitution. Args: template: python Template object Returns: Template with substituted values and all parameters bolded
Render string for markdown rendering. Bold all parameters and perform substitution. Args: template: python Template object
def render_string_template(self, template: pTemplate) -> pTemplate: """ Render string for markdown rendering. Bold all parameters and perform substitution. Args: template: python Template object Returns: Template with substituted values and all parameters bolded """ if not isinstance(template, (dict, OrderedDict)): return template # replace and render any horizontal lines using *** tag = template.get("tag", None) if tag and tag == "hr": template["template"] = "***" # if there are any groupings of two or more $, we need to double the groupings to account # for template string substitution escaping template["template"] = re.sub( r"\${2,}", lambda m: m.group(0) * 2, template.get("template", "") ) # Bold all parameters: base_param_template_string = "**$content**" # Make sure template["params"] is a dict template["params"] = template.get("params", {}) # TODO: Revisit handling of icons in markdown. E.g. inline rendered icons. if "markdown_status_icon" in template["params"]: return template["params"]["markdown_status_icon"] for parameter in template["params"].keys(): if parameter == "html_success_icon": template["params"][parameter] = "" continue # to escape any values that are '*' which, when combined with bold ('**') in markdown, # does not give the output we want. elif template["params"][parameter] == "*": template["params"][parameter] = "\\*" continue template["params"][parameter] = pTemplate( base_param_template_string ).safe_substitute( { "content": template["params"][parameter], } ) template["template"] = template.get("template", "").replace( "$PARAMETER", "$$PARAMETER" ) return pTemplate(template.get("template")).safe_substitute( template.get("params", {}) )
[ "def", "render_string_template", "(", "self", ",", "template", ":", "pTemplate", ")", "->", "pTemplate", ":", "if", "not", "isinstance", "(", "template", ",", "(", "dict", ",", "OrderedDict", ")", ")", ":", "return", "template", "# replace and render any horizontal lines using ***", "tag", "=", "template", ".", "get", "(", "\"tag\"", ",", "None", ")", "if", "tag", "and", "tag", "==", "\"hr\"", ":", "template", "[", "\"template\"", "]", "=", "\"***\"", "# if there are any groupings of two or more $, we need to double the groupings to account", "# for template string substitution escaping", "template", "[", "\"template\"", "]", "=", "re", ".", "sub", "(", "r\"\\${2,}\"", ",", "lambda", "m", ":", "m", ".", "group", "(", "0", ")", "*", "2", ",", "template", ".", "get", "(", "\"template\"", ",", "\"\"", ")", ")", "# Bold all parameters:", "base_param_template_string", "=", "\"**$content**\"", "# Make sure template[\"params\"] is a dict", "template", "[", "\"params\"", "]", "=", "template", ".", "get", "(", "\"params\"", ",", "{", "}", ")", "# TODO: Revisit handling of icons in markdown. E.g. inline rendered icons.", "if", "\"markdown_status_icon\"", "in", "template", "[", "\"params\"", "]", ":", "return", "template", "[", "\"params\"", "]", "[", "\"markdown_status_icon\"", "]", "for", "parameter", "in", "template", "[", "\"params\"", "]", ".", "keys", "(", ")", ":", "if", "parameter", "==", "\"html_success_icon\"", ":", "template", "[", "\"params\"", "]", "[", "parameter", "]", "=", "\"\"", "continue", "# to escape any values that are '*' which, when combined with bold ('**') in markdown,", "# does not give the output we want.", "elif", "template", "[", "\"params\"", "]", "[", "parameter", "]", "==", "\"*\"", ":", "template", "[", "\"params\"", "]", "[", "parameter", "]", "=", "\"\\\\*\"", "continue", "template", "[", "\"params\"", "]", "[", "parameter", "]", "=", "pTemplate", "(", "base_param_template_string", ")", ".", "safe_substitute", "(", "{", "\"content\"", ":", "template", "[", "\"params\"", "]", "[", "parameter", "]", ",", "}", ")", "template", "[", "\"template\"", "]", "=", "template", ".", "get", "(", "\"template\"", ",", "\"\"", ")", ".", "replace", "(", "\"$PARAMETER\"", ",", "\"$$PARAMETER\"", ")", "return", "pTemplate", "(", "template", ".", "get", "(", "\"template\"", ")", ")", ".", "safe_substitute", "(", "template", ".", "get", "(", "\"params\"", ",", "{", "}", ")", ")" ]
[ 494, 4 ]
[ 553, 9 ]
python
en
['en', 'error', 'th']
False
DefaultMarkdownPageView.render_content_block
( self, jinja_context, content_block, index=None, content_block_id=None, render_to_markdown: bool = True, )
Render a content block to markdown using jinja templates. Args: jinja_context: content_block: index: content_block_id: render_to_markdown: Default of True here instead of parent class default of False Returns:
Render a content block to markdown using jinja templates. Args: jinja_context: content_block: index: content_block_id: render_to_markdown: Default of True here instead of parent class default of False
def render_content_block( self, jinja_context, content_block, index=None, content_block_id=None, render_to_markdown: bool = True, ): """ Render a content block to markdown using jinja templates. Args: jinja_context: content_block: index: content_block_id: render_to_markdown: Default of True here instead of parent class default of False Returns: """ return super().render_content_block( jinja_context=jinja_context, content_block=content_block, index=index, content_block_id=content_block_id, render_to_markdown=render_to_markdown, )
[ "def", "render_content_block", "(", "self", ",", "jinja_context", ",", "content_block", ",", "index", "=", "None", ",", "content_block_id", "=", "None", ",", "render_to_markdown", ":", "bool", "=", "True", ",", ")", ":", "return", "super", "(", ")", ".", "render_content_block", "(", "jinja_context", "=", "jinja_context", ",", "content_block", "=", "content_block", ",", "index", "=", "index", ",", "content_block_id", "=", "content_block_id", ",", "render_to_markdown", "=", "render_to_markdown", ",", ")" ]
[ 556, 4 ]
[ 583, 9 ]
python
en
['en', 'error', 'th']
False
test_checkpoint_script_happy_path_executable_successful_validation_with_ge_config_v2
( caplog, titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled )
We call the "checkpoint script" command on a project with a Checkpoint. The command should: - create the script (note output is tested in other tests) When run the script should: - execute - return a 0 status code - print a success message
We call the "checkpoint script" command on a project with a Checkpoint.
def test_checkpoint_script_happy_path_executable_successful_validation_with_ge_config_v2( caplog, titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled ): """ We call the "checkpoint script" command on a project with a Checkpoint. The command should: - create the script (note output is tested in other tests) When run the script should: - execute - return a 0 status code - print a success message """ context = titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled root_dir = context.root_directory runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, f"checkpoint script my_checkpoint -d {root_dir}", catch_exceptions=False, ) stdout = result.stdout assert result.exit_code == 0 assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, allowed_deprecation_message=LEGACY_CONFIG_DEFAULT_CHECKPOINT_STORE_MESSAGE, ) script_path = os.path.abspath( os.path.join(root_dir, context.GE_UNCOMMITTED_DIR, "run_my_checkpoint.py") ) assert os.path.isfile(script_path) # In travis on osx, python may not execute from the build dir cmdstring = f"python {script_path}" if os.environ.get("TRAVIS_OS_NAME") == "osx": build_dir = os.environ.get("TRAVIS_BUILD_DIR") print(os.listdir(build_dir)) cmdstring = f"python3 {script_path}" print("about to run: " + cmdstring) print(os.curdir) print(os.listdir(os.curdir)) print(os.listdir(os.path.abspath(os.path.join(root_dir, "..")))) status, output = subprocess.getstatusoutput(cmdstring) print(f"\n\nScript exited with code: {status} and output:\n{output}") assert status == 0 assert "Validation succeeded!" in output
[ "def", "test_checkpoint_script_happy_path_executable_successful_validation_with_ge_config_v2", "(", "caplog", ",", "titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled", ")", ":", "context", "=", "titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled", "root_dir", "=", "context", ".", "root_directory", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"checkpoint script my_checkpoint -d {root_dir}\"", ",", "catch_exceptions", "=", "False", ",", ")", "stdout", "=", "result", ".", "stdout", "assert", "result", ".", "exit_code", "==", "0", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", "allowed_deprecation_message", "=", "LEGACY_CONFIG_DEFAULT_CHECKPOINT_STORE_MESSAGE", ",", ")", "script_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "context", ".", "GE_UNCOMMITTED_DIR", ",", "\"run_my_checkpoint.py\"", ")", ")", "assert", "os", ".", "path", ".", "isfile", "(", "script_path", ")", "# In travis on osx, python may not execute from the build dir", "cmdstring", "=", "f\"python {script_path}\"", "if", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_OS_NAME\"", ")", "==", "\"osx\"", ":", "build_dir", "=", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_BUILD_DIR\"", ")", "print", "(", "os", ".", "listdir", "(", "build_dir", ")", ")", "cmdstring", "=", "f\"python3 {script_path}\"", "print", "(", "\"about to run: \"", "+", "cmdstring", ")", "print", "(", "os", ".", "curdir", ")", "print", "(", "os", ".", "listdir", "(", "os", ".", "curdir", ")", ")", "print", "(", "os", ".", "listdir", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"..\"", ")", ")", ")", ")", "status", ",", "output", "=", "subprocess", ".", "getstatusoutput", "(", "cmdstring", ")", "print", "(", "f\"\\n\\nScript exited with code: {status} and output:\\n{output}\"", ")", "assert", "status", "==", "0", "assert", "\"Validation succeeded!\"", "in", "output" ]
[ 989, 0 ]
[ 1039, 44 ]
python
en
['en', 'error', 'th']
False
test_checkpoint_script_happy_path_executable_failed_validation_with_ge_config_v2
( caplog, titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled )
We call the "checkpoint script" command on a project with a Checkpoint. The command should: - create the script (note output is tested in other tests) When run the script should: - execute - return a 1 status code - print a failure message
We call the "checkpoint script" command on a project with a Checkpoint.
def test_checkpoint_script_happy_path_executable_failed_validation_with_ge_config_v2( caplog, titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled ): """ We call the "checkpoint script" command on a project with a Checkpoint. The command should: - create the script (note output is tested in other tests) When run the script should: - execute - return a 1 status code - print a failure message """ context = titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled root_dir = context.root_directory # mangle the csv csv_path = os.path.join(context.root_directory, "..", "data", "Titanic.csv") with open(csv_path, "w") as f: f.write("foo,bar\n1,2\n") runner = CliRunner(mix_stderr=False) result = runner.invoke( cli, f"checkpoint script my_checkpoint -d {root_dir}", catch_exceptions=False, ) assert result.exit_code == 0 assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, allowed_deprecation_message=LEGACY_CONFIG_DEFAULT_CHECKPOINT_STORE_MESSAGE, ) script_path = os.path.abspath( os.path.join(root_dir, context.GE_UNCOMMITTED_DIR, "run_my_checkpoint.py") ) assert os.path.isfile(script_path) # In travis on osx, python may not execute from the build dir cmdstring = f"python {script_path}" if os.environ.get("TRAVIS_OS_NAME") == "osx": build_dir = os.environ.get("TRAVIS_BUILD_DIR") print(os.listdir(build_dir)) cmdstring = f"python3 {script_path}" print("about to run: " + cmdstring) print(os.curdir) print(os.listdir(os.curdir)) print(os.listdir(os.path.abspath(os.path.join(root_dir, "..")))) status, output = subprocess.getstatusoutput(cmdstring) print(f"\n\nScript exited with code: {status} and output:\n{output}") assert status == 1 assert "Validation failed!" in output
[ "def", "test_checkpoint_script_happy_path_executable_failed_validation_with_ge_config_v2", "(", "caplog", ",", "titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled", ")", ":", "context", "=", "titanic_data_context_v2_with_checkpoint_suite_and_stats_enabled", "root_dir", "=", "context", ".", "root_directory", "# mangle the csv", "csv_path", "=", "os", ".", "path", ".", "join", "(", "context", ".", "root_directory", ",", "\"..\"", ",", "\"data\"", ",", "\"Titanic.csv\"", ")", "with", "open", "(", "csv_path", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"foo,bar\\n1,2\\n\"", ")", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"checkpoint script my_checkpoint -d {root_dir}\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", "allowed_deprecation_message", "=", "LEGACY_CONFIG_DEFAULT_CHECKPOINT_STORE_MESSAGE", ",", ")", "script_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "context", ".", "GE_UNCOMMITTED_DIR", ",", "\"run_my_checkpoint.py\"", ")", ")", "assert", "os", ".", "path", ".", "isfile", "(", "script_path", ")", "# In travis on osx, python may not execute from the build dir", "cmdstring", "=", "f\"python {script_path}\"", "if", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_OS_NAME\"", ")", "==", "\"osx\"", ":", "build_dir", "=", "os", ".", "environ", ".", "get", "(", "\"TRAVIS_BUILD_DIR\"", ")", "print", "(", "os", ".", "listdir", "(", "build_dir", ")", ")", "cmdstring", "=", "f\"python3 {script_path}\"", "print", "(", "\"about to run: \"", "+", "cmdstring", ")", "print", "(", "os", ".", "curdir", ")", "print", "(", "os", ".", "listdir", "(", "os", ".", "curdir", ")", ")", "print", "(", "os", ".", "listdir", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "\"..\"", ")", ")", ")", ")", "status", ",", "output", "=", "subprocess", ".", "getstatusoutput", "(", "cmdstring", ")", "print", "(", "f\"\\n\\nScript exited with code: {status} and output:\\n{output}\"", ")", "assert", "status", "==", "1", "assert", "\"Validation failed!\"", "in", "output" ]
[ 1042, 0 ]
[ 1095, 41 ]
python
en
['en', 'error', 'th']
False
series_to_supervised
(data, n_in=1, n_out=1, dropnan=True)
Frame a time series as a supervised learning dataset. Arguments: :param data: Sequence of observations as a list or NumPy array. :param n_in: Number of lag observations as input (X). :param n_out: Number of observations as output (y). :param dropnan: Boolean whether or not to drop rows with NaN values. :return: Pandas DataFrame of series framed for supervised learning.
Frame a time series as a supervised learning dataset. Arguments: :param data: Sequence of observations as a list or NumPy array. :param n_in: Number of lag observations as input (X). :param n_out: Number of observations as output (y). :param dropnan: Boolean whether or not to drop rows with NaN values. :return: Pandas DataFrame of series framed for supervised learning.
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): """ Frame a time series as a supervised learning dataset. Arguments: :param data: Sequence of observations as a list or NumPy array. :param n_in: Number of lag observations as input (X). :param n_out: Number of observations as output (y). :param dropnan: Boolean whether or not to drop rows with NaN values. :return: Pandas DataFrame of series framed for supervised learning. """ n_vars = 1 if type(data) is list else data.shape[1] dataframe = pd.DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(dataframe.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(dataframe.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # put it all together agg = pd.concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg
[ "def", "series_to_supervised", "(", "data", ",", "n_in", "=", "1", ",", "n_out", "=", "1", ",", "dropnan", "=", "True", ")", ":", "n_vars", "=", "1", "if", "type", "(", "data", ")", "is", "list", "else", "data", ".", "shape", "[", "1", "]", "dataframe", "=", "pd", ".", "DataFrame", "(", "data", ")", "cols", ",", "names", "=", "list", "(", ")", ",", "list", "(", ")", "# input sequence (t-n, ... t-1)\r", "for", "i", "in", "range", "(", "n_in", ",", "0", ",", "-", "1", ")", ":", "cols", ".", "append", "(", "dataframe", ".", "shift", "(", "i", ")", ")", "names", "+=", "[", "(", "'var%d(t-%d)'", "%", "(", "j", "+", "1", ",", "i", ")", ")", "for", "j", "in", "range", "(", "n_vars", ")", "]", "# forecast sequence (t, t+1, ... t+n)\r", "for", "i", "in", "range", "(", "0", ",", "n_out", ")", ":", "cols", ".", "append", "(", "dataframe", ".", "shift", "(", "-", "i", ")", ")", "if", "i", "==", "0", ":", "names", "+=", "[", "(", "'var%d(t)'", "%", "(", "j", "+", "1", ")", ")", "for", "j", "in", "range", "(", "n_vars", ")", "]", "else", ":", "names", "+=", "[", "(", "'var%d(t+%d)'", "%", "(", "j", "+", "1", ",", "i", ")", ")", "for", "j", "in", "range", "(", "n_vars", ")", "]", "# put it all together\r", "agg", "=", "pd", ".", "concat", "(", "cols", ",", "axis", "=", "1", ")", "agg", ".", "columns", "=", "names", "# drop rows with NaN values\r", "if", "dropnan", ":", "agg", ".", "dropna", "(", "inplace", "=", "True", ")", "return", "agg" ]
[ 31, 0 ]
[ 62, 14 ]
python
en
['en', 'ja', 'th']
False
UserProfileSerializer.create
(self, validated_data)
Create and return a new user.
Create and return a new user.
def create(self, validated_data): """Create and return a new user.""" user = models.UserProfile.objects.create_user( email=validated_data['email'], name=validated_data['name'], password=validated_data['password'] ) return user
[ "def", "create", "(", "self", ",", "validated_data", ")", ":", "user", "=", "models", ".", "UserProfile", ".", "objects", ".", "create_user", "(", "email", "=", "validated_data", "[", "'email'", "]", ",", "name", "=", "validated_data", "[", "'name'", "]", ",", "password", "=", "validated_data", "[", "'password'", "]", ")", "return", "user" ]
[ 23, 4 ]
[ 34, 19 ]
python
en
['en', 'en', 'en']
True
create_session
()
create a db session @return: db session
create a db session
def create_session(): """ create a db session @return: db session """ engine = create_engine(Config.SQLALCHEMY_DATABASE_URI) return sessionmaker(bind=engine)()
[ "def", "create_session", "(", ")", ":", "engine", "=", "create_engine", "(", "Config", ".", "SQLALCHEMY_DATABASE_URI", ")", "return", "sessionmaker", "(", "bind", "=", "engine", ")", "(", ")" ]
[ 24, 0 ]
[ 30, 38 ]
python
en
['en', 'error', 'th']
False
start_timer
()
start time count @return:
start time count
def start_timer(): """ start time count @return: """ request.start_time = time.time()
[ "def", "start_timer", "(", ")", ":", "request", ".", "start_time", "=", "time", ".", "time", "(", ")" ]
[ 33, 0 ]
[ 38, 36 ]
python
en
['en', 'error', 'th']
False
stop_timer
(response)
record request detail @param response: http response @return: response
record request detail
def stop_timer(response): """ record request detail @param response: http response @return: response """ content_type = response.content_type.replace(" ", "") if "static" in request.path or content_type != "application/json": return response endpoint = request.path if endpoint not in Config.LOG_ENTRYPOINTS: return response method = request.method username = request.remote_addr endpoint = request.path http_status = response.status_code start = request.start_time end = time.time() duration_ms = round((end - start) * 1000, 2) start_time = datetime.fromtimestamp(start).strftime(DATETIME_FORMAT) end_time = datetime.fromtimestamp(start).strftime(DATETIME_FORMAT) remote_addr = request.remote_addr url = request.url d = request.form.to_dict() or {} # request parameters can overwrite post body request_params = request.args.to_dict() json_data = {} if request.json is None else dict(request.json) d.update(request_params) d.update(json_data) params = '' try: params = json.dumps(d, ensure_ascii=False) except Exception: pass user_agent = parse(request.user_agent.string) browser = user_agent.browser.family system = user_agent.os.family brand = user_agent.device.brand is_mobile = user_agent.is_mobile content_type = response.content_type.replace(" ", "") device_uuid = "" if "uuid" not in d else d['uuid'] # extra = utils.parse_location(remote_addr) request_log = RequestLog(method=method, url=url, params=params, endpoint=endpoint, content_type=content_type, duration_ms=duration_ms, http_status=http_status, username=username, remote_addr=remote_addr, browser=browser, system=system, brand=brand, device_uuid=device_uuid, is_mobile=is_mobile, start_time=start_time, end_time=end_time, # **extra ) try: ses = create_session() ses.add(request_log) ses.commit() except Exception as e: logging.error("fail to save request log to db %s" % str(e)) return response
[ "def", "stop_timer", "(", "response", ")", ":", "content_type", "=", "response", ".", "content_type", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "if", "\"static\"", "in", "request", ".", "path", "or", "content_type", "!=", "\"application/json\"", ":", "return", "response", "endpoint", "=", "request", ".", "path", "if", "endpoint", "not", "in", "Config", ".", "LOG_ENTRYPOINTS", ":", "return", "response", "method", "=", "request", ".", "method", "username", "=", "request", ".", "remote_addr", "endpoint", "=", "request", ".", "path", "http_status", "=", "response", ".", "status_code", "start", "=", "request", ".", "start_time", "end", "=", "time", ".", "time", "(", ")", "duration_ms", "=", "round", "(", "(", "end", "-", "start", ")", "*", "1000", ",", "2", ")", "start_time", "=", "datetime", ".", "fromtimestamp", "(", "start", ")", ".", "strftime", "(", "DATETIME_FORMAT", ")", "end_time", "=", "datetime", ".", "fromtimestamp", "(", "start", ")", ".", "strftime", "(", "DATETIME_FORMAT", ")", "remote_addr", "=", "request", ".", "remote_addr", "url", "=", "request", ".", "url", "d", "=", "request", ".", "form", ".", "to_dict", "(", ")", "or", "{", "}", "# request parameters can overwrite post body", "request_params", "=", "request", ".", "args", ".", "to_dict", "(", ")", "json_data", "=", "{", "}", "if", "request", ".", "json", "is", "None", "else", "dict", "(", "request", ".", "json", ")", "d", ".", "update", "(", "request_params", ")", "d", ".", "update", "(", "json_data", ")", "params", "=", "''", "try", ":", "params", "=", "json", ".", "dumps", "(", "d", ",", "ensure_ascii", "=", "False", ")", "except", "Exception", ":", "pass", "user_agent", "=", "parse", "(", "request", ".", "user_agent", ".", "string", ")", "browser", "=", "user_agent", ".", "browser", ".", "family", "system", "=", "user_agent", ".", "os", ".", "family", "brand", "=", "user_agent", ".", "device", ".", "brand", "is_mobile", "=", "user_agent", ".", "is_mobile", "content_type", "=", "response", ".", "content_type", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "device_uuid", "=", "\"\"", "if", "\"uuid\"", "not", "in", "d", "else", "d", "[", "'uuid'", "]", "# extra = utils.parse_location(remote_addr)", "request_log", "=", "RequestLog", "(", "method", "=", "method", ",", "url", "=", "url", ",", "params", "=", "params", ",", "endpoint", "=", "endpoint", ",", "content_type", "=", "content_type", ",", "duration_ms", "=", "duration_ms", ",", "http_status", "=", "http_status", ",", "username", "=", "username", ",", "remote_addr", "=", "remote_addr", ",", "browser", "=", "browser", ",", "system", "=", "system", ",", "brand", "=", "brand", ",", "device_uuid", "=", "device_uuid", ",", "is_mobile", "=", "is_mobile", ",", "start_time", "=", "start_time", ",", "end_time", "=", "end_time", ",", "# **extra", ")", "try", ":", "ses", "=", "create_session", "(", ")", "ses", ".", "add", "(", "request_log", ")", "ses", ".", "commit", "(", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "\"fail to save request log to db %s\"", "%", "str", "(", "e", ")", ")", "return", "response" ]
[ 41, 0 ]
[ 108, 19 ]
python
en
['en', 'error', 'th']
False
setup_access_log
(app)
set up access log recording @param app: flask app @return:
set up access log recording
def setup_access_log(app): """ set up access log recording @param app: flask app @return: """ # record start time of each request app.before_request(start_timer) # record request detail app.after_request(stop_timer)
[ "def", "setup_access_log", "(", "app", ")", ":", "# record start time of each request", "app", ".", "before_request", "(", "start_timer", ")", "# record request detail", "app", ".", "after_request", "(", "stop_timer", ")" ]
[ 111, 0 ]
[ 120, 33 ]
python
en
['en', 'error', 'th']
False
GCP.download_metrics
( self, function_name: str, start_time: int, end_time: int, requests: dict, metrics: dict )
Use GCP's logging system to find execution time of each function invocation. There shouldn't be problem of waiting for complete results, since logs appear very quickly here.
Use GCP's logging system to find execution time of each function invocation.
def download_metrics( self, function_name: str, start_time: int, end_time: int, requests: dict, metrics: dict ): from google.api_core import exceptions from time import sleep def wrapper(gen): while True: try: yield next(gen) except StopIteration: break except exceptions.ResourceExhausted as e: print("Exhausted") sleep(30) """ Use GCP's logging system to find execution time of each function invocation. There shouldn't be problem of waiting for complete results, since logs appear very quickly here. """ from google.cloud import logging as gcp_logging logging_client = gcp_logging.Client() logger = logging_client.logger("cloudfunctions.googleapis.com%2Fcloud-functions") """ GCP accepts only single date format: 'YYYY-MM-DDTHH:MM:SSZ'. Thus, we first convert timestamp to UTC timezone. Then, we generate correct format. Add 1 second to end time to ensure that removing milliseconds doesn't affect query. """ timestamps = [] for timestamp in [start_time, end_time + 1]: utc_date = datetime.fromtimestamp(timestamp, tz=timezone.utc) timestamps.append(utc_date.strftime("%Y-%m-%dT%H:%M:%SZ")) invocations = logger.list_entries( filter_=( f'resource.labels.function_name = "{function_name}" ' f'timestamp >= "{timestamps[0]}" ' f'timestamp <= "{timestamps[1]}"' ), page_size=1000, ) invocations_processed = 0 pages = list(wrapper(invocations.pages)) entries = 0 for page in pages: # invocations.pages: for invoc in page: entries += 1 if "execution took" in invoc.payload: execution_id = invoc.labels["execution_id"] # might happen that we get invocation from another experiment if execution_id not in requests: continue # find number of miliseconds exec_time = re.search(r"\d+ ms", invoc.payload).group().split()[0] # convert into microseconds requests[execution_id].provider_times.execution = int(exec_time) * 1000 invocations_processed += 1 self.logging.info( f"GCP: Received {entries} entries, found time metrics for {invocations_processed} " f"out of {len(requests.keys())} invocations." ) """ Use metrics to find estimated values for maximum memory used, active instances and network traffic. https://cloud.google.com/monitoring/api/metrics_gcp#gcp-cloudfunctions """ # Set expected metrics here available_metrics = ["execution_times", "user_memory_bytes", "network_egress"] client = monitoring_v3.MetricServiceClient() project_name = client.common_project_path(self.config.project_name) end_time_nanos, end_time_seconds = math.modf(end_time) start_time_nanos, start_time_seconds = math.modf(start_time) interval = monitoring_v3.TimeInterval( { "end_time": {"seconds": int(end_time_seconds) + 60}, "start_time": {"seconds": int(start_time_seconds)}, } ) for metric in available_metrics: metrics[metric] = [] list_request = monitoring_v3.ListTimeSeriesRequest( name=project_name, filter='metric.type = "cloudfunctions.googleapis.com/function/{}"'.format(metric), interval=interval, ) results = client.list_time_series(list_request) for result in results: if result.resource.labels.get("function_name") == function_name: for point in result.points: metrics[metric] += [ { "mean_value": point.value.distribution_value.mean, "executions_count": point.value.distribution_value.count, } ]
[ "def", "download_metrics", "(", "self", ",", "function_name", ":", "str", ",", "start_time", ":", "int", ",", "end_time", ":", "int", ",", "requests", ":", "dict", ",", "metrics", ":", "dict", ")", ":", "from", "google", ".", "api_core", "import", "exceptions", "from", "time", "import", "sleep", "def", "wrapper", "(", "gen", ")", ":", "while", "True", ":", "try", ":", "yield", "next", "(", "gen", ")", "except", "StopIteration", ":", "break", "except", "exceptions", ".", "ResourceExhausted", "as", "e", ":", "print", "(", "\"Exhausted\"", ")", "sleep", "(", "30", ")", "from", "google", ".", "cloud", "import", "logging", "as", "gcp_logging", "logging_client", "=", "gcp_logging", ".", "Client", "(", ")", "logger", "=", "logging_client", ".", "logger", "(", "\"cloudfunctions.googleapis.com%2Fcloud-functions\"", ")", "\"\"\"\n GCP accepts only single date format: 'YYYY-MM-DDTHH:MM:SSZ'.\n Thus, we first convert timestamp to UTC timezone.\n Then, we generate correct format.\n\n Add 1 second to end time to ensure that removing\n milliseconds doesn't affect query.\n \"\"\"", "timestamps", "=", "[", "]", "for", "timestamp", "in", "[", "start_time", ",", "end_time", "+", "1", "]", ":", "utc_date", "=", "datetime", ".", "fromtimestamp", "(", "timestamp", ",", "tz", "=", "timezone", ".", "utc", ")", "timestamps", ".", "append", "(", "utc_date", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", ")", "invocations", "=", "logger", ".", "list_entries", "(", "filter_", "=", "(", "f'resource.labels.function_name = \"{function_name}\" '", "f'timestamp >= \"{timestamps[0]}\" '", "f'timestamp <= \"{timestamps[1]}\"'", ")", ",", "page_size", "=", "1000", ",", ")", "invocations_processed", "=", "0", "pages", "=", "list", "(", "wrapper", "(", "invocations", ".", "pages", ")", ")", "entries", "=", "0", "for", "page", "in", "pages", ":", "# invocations.pages:", "for", "invoc", "in", "page", ":", "entries", "+=", "1", "if", "\"execution took\"", "in", "invoc", ".", "payload", ":", "execution_id", "=", "invoc", ".", "labels", "[", "\"execution_id\"", "]", "# might happen that we get invocation from another experiment", "if", "execution_id", "not", "in", "requests", ":", "continue", "# find number of miliseconds", "exec_time", "=", "re", ".", "search", "(", "r\"\\d+ ms\"", ",", "invoc", ".", "payload", ")", ".", "group", "(", ")", ".", "split", "(", ")", "[", "0", "]", "# convert into microseconds", "requests", "[", "execution_id", "]", ".", "provider_times", ".", "execution", "=", "int", "(", "exec_time", ")", "*", "1000", "invocations_processed", "+=", "1", "self", ".", "logging", ".", "info", "(", "f\"GCP: Received {entries} entries, found time metrics for {invocations_processed} \"", "f\"out of {len(requests.keys())} invocations.\"", ")", "\"\"\"\n Use metrics to find estimated values for maximum memory used, active instances\n and network traffic.\n https://cloud.google.com/monitoring/api/metrics_gcp#gcp-cloudfunctions\n \"\"\"", "# Set expected metrics here", "available_metrics", "=", "[", "\"execution_times\"", ",", "\"user_memory_bytes\"", ",", "\"network_egress\"", "]", "client", "=", "monitoring_v3", ".", "MetricServiceClient", "(", ")", "project_name", "=", "client", ".", "common_project_path", "(", "self", ".", "config", ".", "project_name", ")", "end_time_nanos", ",", "end_time_seconds", "=", "math", ".", "modf", "(", "end_time", ")", "start_time_nanos", ",", "start_time_seconds", "=", "math", ".", "modf", "(", "start_time", ")", "interval", "=", "monitoring_v3", ".", "TimeInterval", "(", "{", "\"end_time\"", ":", "{", "\"seconds\"", ":", "int", "(", "end_time_seconds", ")", "+", "60", "}", ",", "\"start_time\"", ":", "{", "\"seconds\"", ":", "int", "(", "start_time_seconds", ")", "}", ",", "}", ")", "for", "metric", "in", "available_metrics", ":", "metrics", "[", "metric", "]", "=", "[", "]", "list_request", "=", "monitoring_v3", ".", "ListTimeSeriesRequest", "(", "name", "=", "project_name", ",", "filter", "=", "'metric.type = \"cloudfunctions.googleapis.com/function/{}\"'", ".", "format", "(", "metric", ")", ",", "interval", "=", "interval", ",", ")", "results", "=", "client", ".", "list_time_series", "(", "list_request", ")", "for", "result", "in", "results", ":", "if", "result", ".", "resource", ".", "labels", ".", "get", "(", "\"function_name\"", ")", "==", "function_name", ":", "for", "point", "in", "result", ".", "points", ":", "metrics", "[", "metric", "]", "+=", "[", "{", "\"mean_value\"", ":", "point", ".", "value", ".", "distribution_value", ".", "mean", ",", "\"executions_count\"", ":", "point", ".", "value", ".", "distribution_value", ".", "count", ",", "}", "]" ]
[ 369, 4 ]
[ 480, 25 ]
python
en
['en', 'error', 'th']
False
location_t.file_name
(self)
Absolute source file name, type string.
Absolute source file name, type string.
def file_name(self): """ Absolute source file name, type string. """ return self._file_name
[ "def", "file_name", "(", "self", ")", ":", "return", "self", ".", "_file_name" ]
[ 35, 4 ]
[ 41, 30 ]
python
en
['en', 'error', 'th']
False
location_t.line
(self)
Line number, type int.
Line number, type int.
def line(self): """ Line number, type int. """ return self._line
[ "def", "line", "(", "self", ")", ":", "return", "self", ".", "_line" ]
[ 48, 4 ]
[ 54, 25 ]
python
en
['en', 'error', 'th']
False
location_t.as_tuple
(self)
Return tuple(self.file_name, self.line)
Return tuple(self.file_name, self.line)
def as_tuple(self): """ Return tuple(self.file_name, self.line) """ return self.file_name, self.line
[ "def", "as_tuple", "(", "self", ")", ":", "return", "self", ".", "file_name", ",", "self", ".", "line" ]
[ 60, 4 ]
[ 66, 40 ]
python
en
['en', 'error', 'th']
False
cancel_shielded_checkpoint
()
Introduce a schedule point, but not a cancel point. This is *not* a :ref:`checkpoint <checkpoints>`, but it is half of a checkpoint, and when combined with :func:`checkpoint_if_cancelled` it can make a full checkpoint. Equivalent to (but potentially more efficient than):: with trio.CancelScope(shield=True): await trio.lowlevel.checkpoint()
Introduce a schedule point, but not a cancel point.
async def cancel_shielded_checkpoint(): """Introduce a schedule point, but not a cancel point. This is *not* a :ref:`checkpoint <checkpoints>`, but it is half of a checkpoint, and when combined with :func:`checkpoint_if_cancelled` it can make a full checkpoint. Equivalent to (but potentially more efficient than):: with trio.CancelScope(shield=True): await trio.lowlevel.checkpoint() """ return (await _async_yield(CancelShieldedCheckpoint)).unwrap()
[ "async", "def", "cancel_shielded_checkpoint", "(", ")", ":", "return", "(", "await", "_async_yield", "(", "CancelShieldedCheckpoint", ")", ")", ".", "unwrap", "(", ")" ]
[ 29, 0 ]
[ 42, 66 ]
python
en
['en', 'en', 'en']
True
wait_task_rescheduled
(abort_func)
Put the current task to sleep, with cancellation support. This is the lowest-level API for blocking in Trio. Every time a :class:`~trio.lowlevel.Task` blocks, it does so by calling this function (usually indirectly via some higher-level API). This is a tricky interface with no guard rails. If you can use :class:`ParkingLot` or the built-in I/O wait functions instead, then you should. Generally the way it works is that before calling this function, you make arrangements for "someone" to call :func:`reschedule` on the current task at some later point. Then you call :func:`wait_task_rescheduled`, passing in ``abort_func``, an "abort callback". (Terminology: in Trio, "aborting" is the process of attempting to interrupt a blocked task to deliver a cancellation.) There are two possibilities for what happens next: 1. "Someone" calls :func:`reschedule` on the current task, and :func:`wait_task_rescheduled` returns or raises whatever value or error was passed to :func:`reschedule`. 2. The call's context transitions to a cancelled state (e.g. due to a timeout expiring). When this happens, the ``abort_func`` is called. Its interface looks like:: def abort_func(raise_cancel): ... return trio.lowlevel.Abort.SUCCEEDED # or FAILED It should attempt to clean up any state associated with this call, and in particular, arrange that :func:`reschedule` will *not* be called later. If (and only if!) it is successful, then it should return :data:`Abort.SUCCEEDED`, in which case the task will automatically be rescheduled with an appropriate :exc:`~trio.Cancelled` error. Otherwise, it should return :data:`Abort.FAILED`. This means that the task can't be cancelled at this time, and still has to make sure that "someone" eventually calls :func:`reschedule`. At that point there are again two possibilities. You can simply ignore the cancellation altogether: wait for the operation to complete and then reschedule and continue as normal. (For example, this is what :func:`trio.to_thread.run_sync` does if cancellation is disabled.) The other possibility is that the ``abort_func`` does succeed in cancelling the operation, but for some reason isn't able to report that right away. (Example: on Windows, it's possible to request that an async ("overlapped") I/O operation be cancelled, but this request is *also* asynchronous – you don't find out until later whether the operation was actually cancelled or not.) To report a delayed cancellation, then you should reschedule the task yourself, and call the ``raise_cancel`` callback passed to ``abort_func`` to raise a :exc:`~trio.Cancelled` (or possibly :exc:`KeyboardInterrupt`) exception into this task. Either of the approaches sketched below can work:: # Option 1: # Catch the exception from raise_cancel and inject it into the task. # (This is what Trio does automatically for you if you return # Abort.SUCCEEDED.) trio.lowlevel.reschedule(task, outcome.capture(raise_cancel)) # Option 2: # wait to be woken by "someone", and then decide whether to raise # the error from inside the task. outer_raise_cancel = None def abort(inner_raise_cancel): nonlocal outer_raise_cancel outer_raise_cancel = inner_raise_cancel TRY_TO_CANCEL_OPERATION() return trio.lowlevel.Abort.FAILED await wait_task_rescheduled(abort) if OPERATION_WAS_SUCCESSFULLY_CANCELLED: # raises the error outer_raise_cancel() In any case it's guaranteed that we only call the ``abort_func`` at most once per call to :func:`wait_task_rescheduled`. Sometimes, it's useful to be able to share some mutable sleep-related data between the sleeping task, the abort function, and the waking task. You can use the sleeping task's :data:`~Task.custom_sleep_data` attribute to store this data, and Trio won't touch it, except to make sure that it gets cleared when the task is rescheduled. .. warning:: If your ``abort_func`` raises an error, or returns any value other than :data:`Abort.SUCCEEDED` or :data:`Abort.FAILED`, then Trio will crash violently. Be careful! Similarly, it is entirely possible to deadlock a Trio program by failing to reschedule a blocked task, or cause havoc by calling :func:`reschedule` too many times. Remember what we said up above about how you should use a higher-level API if at all possible?
Put the current task to sleep, with cancellation support.
async def wait_task_rescheduled(abort_func): """Put the current task to sleep, with cancellation support. This is the lowest-level API for blocking in Trio. Every time a :class:`~trio.lowlevel.Task` blocks, it does so by calling this function (usually indirectly via some higher-level API). This is a tricky interface with no guard rails. If you can use :class:`ParkingLot` or the built-in I/O wait functions instead, then you should. Generally the way it works is that before calling this function, you make arrangements for "someone" to call :func:`reschedule` on the current task at some later point. Then you call :func:`wait_task_rescheduled`, passing in ``abort_func``, an "abort callback". (Terminology: in Trio, "aborting" is the process of attempting to interrupt a blocked task to deliver a cancellation.) There are two possibilities for what happens next: 1. "Someone" calls :func:`reschedule` on the current task, and :func:`wait_task_rescheduled` returns or raises whatever value or error was passed to :func:`reschedule`. 2. The call's context transitions to a cancelled state (e.g. due to a timeout expiring). When this happens, the ``abort_func`` is called. Its interface looks like:: def abort_func(raise_cancel): ... return trio.lowlevel.Abort.SUCCEEDED # or FAILED It should attempt to clean up any state associated with this call, and in particular, arrange that :func:`reschedule` will *not* be called later. If (and only if!) it is successful, then it should return :data:`Abort.SUCCEEDED`, in which case the task will automatically be rescheduled with an appropriate :exc:`~trio.Cancelled` error. Otherwise, it should return :data:`Abort.FAILED`. This means that the task can't be cancelled at this time, and still has to make sure that "someone" eventually calls :func:`reschedule`. At that point there are again two possibilities. You can simply ignore the cancellation altogether: wait for the operation to complete and then reschedule and continue as normal. (For example, this is what :func:`trio.to_thread.run_sync` does if cancellation is disabled.) The other possibility is that the ``abort_func`` does succeed in cancelling the operation, but for some reason isn't able to report that right away. (Example: on Windows, it's possible to request that an async ("overlapped") I/O operation be cancelled, but this request is *also* asynchronous – you don't find out until later whether the operation was actually cancelled or not.) To report a delayed cancellation, then you should reschedule the task yourself, and call the ``raise_cancel`` callback passed to ``abort_func`` to raise a :exc:`~trio.Cancelled` (or possibly :exc:`KeyboardInterrupt`) exception into this task. Either of the approaches sketched below can work:: # Option 1: # Catch the exception from raise_cancel and inject it into the task. # (This is what Trio does automatically for you if you return # Abort.SUCCEEDED.) trio.lowlevel.reschedule(task, outcome.capture(raise_cancel)) # Option 2: # wait to be woken by "someone", and then decide whether to raise # the error from inside the task. outer_raise_cancel = None def abort(inner_raise_cancel): nonlocal outer_raise_cancel outer_raise_cancel = inner_raise_cancel TRY_TO_CANCEL_OPERATION() return trio.lowlevel.Abort.FAILED await wait_task_rescheduled(abort) if OPERATION_WAS_SUCCESSFULLY_CANCELLED: # raises the error outer_raise_cancel() In any case it's guaranteed that we only call the ``abort_func`` at most once per call to :func:`wait_task_rescheduled`. Sometimes, it's useful to be able to share some mutable sleep-related data between the sleeping task, the abort function, and the waking task. You can use the sleeping task's :data:`~Task.custom_sleep_data` attribute to store this data, and Trio won't touch it, except to make sure that it gets cleared when the task is rescheduled. .. warning:: If your ``abort_func`` raises an error, or returns any value other than :data:`Abort.SUCCEEDED` or :data:`Abort.FAILED`, then Trio will crash violently. Be careful! Similarly, it is entirely possible to deadlock a Trio program by failing to reschedule a blocked task, or cause havoc by calling :func:`reschedule` too many times. Remember what we said up above about how you should use a higher-level API if at all possible? """ return (await _async_yield(WaitTaskRescheduled(abort_func))).unwrap()
[ "async", "def", "wait_task_rescheduled", "(", "abort_func", ")", ":", "return", "(", "await", "_async_yield", "(", "WaitTaskRescheduled", "(", "abort_func", ")", ")", ")", ".", "unwrap", "(", ")" ]
[ 66, 0 ]
[ 165, 73 ]
python
en
['en', 'en', 'en']
True
permanently_detach_coroutine_object
(final_outcome)
Permanently detach the current task from the Trio scheduler. Normally, a Trio task doesn't exit until its coroutine object exits. When you call this function, Trio acts like the coroutine object just exited and the task terminates with the given outcome. This is useful if you want to permanently switch the coroutine object over to a different coroutine runner. When the calling coroutine enters this function it's running under Trio, and when the function returns it's running under the foreign coroutine runner. You should make sure that the coroutine object has released any Trio-specific resources it has acquired (e.g. nurseries). Args: final_outcome (outcome.Outcome): Trio acts as if the current task exited with the given return value or exception. Returns or raises whatever value or exception the new coroutine runner uses to resume the coroutine.
Permanently detach the current task from the Trio scheduler.
async def permanently_detach_coroutine_object(final_outcome): """Permanently detach the current task from the Trio scheduler. Normally, a Trio task doesn't exit until its coroutine object exits. When you call this function, Trio acts like the coroutine object just exited and the task terminates with the given outcome. This is useful if you want to permanently switch the coroutine object over to a different coroutine runner. When the calling coroutine enters this function it's running under Trio, and when the function returns it's running under the foreign coroutine runner. You should make sure that the coroutine object has released any Trio-specific resources it has acquired (e.g. nurseries). Args: final_outcome (outcome.Outcome): Trio acts as if the current task exited with the given return value or exception. Returns or raises whatever value or exception the new coroutine runner uses to resume the coroutine. """ if _run.current_task().child_nurseries: raise RuntimeError( "can't permanently detach a coroutine object with open nurseries" ) return await _async_yield(PermanentlyDetachCoroutineObject(final_outcome))
[ "async", "def", "permanently_detach_coroutine_object", "(", "final_outcome", ")", ":", "if", "_run", ".", "current_task", "(", ")", ".", "child_nurseries", ":", "raise", "RuntimeError", "(", "\"can't permanently detach a coroutine object with open nurseries\"", ")", "return", "await", "_async_yield", "(", "PermanentlyDetachCoroutineObject", "(", "final_outcome", ")", ")" ]
[ 174, 0 ]
[ 202, 78 ]
python
en
['en', 'en', 'en']
True
temporarily_detach_coroutine_object
(abort_func)
Temporarily detach the current coroutine object from the Trio scheduler. When the calling coroutine enters this function it's running under Trio, and when the function returns it's running under the foreign coroutine runner. The Trio :class:`Task` will continue to exist, but will be suspended until you use :func:`reattach_detached_coroutine_object` to resume it. In the mean time, you can use another coroutine runner to schedule the coroutine object. In fact, you have to – the function doesn't return until the coroutine is advanced from outside. Note that you'll need to save the current :class:`Task` object to later resume; you can retrieve it with :func:`current_task`. You can also use this :class:`Task` object to retrieve the coroutine object – see :data:`Task.coro`. Args: abort_func: Same as for :func:`wait_task_rescheduled`, except that it must return :data:`Abort.FAILED`. (If it returned :data:`Abort.SUCCEEDED`, then Trio would attempt to reschedule the detached task directly without going through :func:`reattach_detached_coroutine_object`, which would be bad.) Your ``abort_func`` should still arrange for whatever the coroutine object is doing to be cancelled, and then reattach to Trio and call the ``raise_cancel`` callback, if possible. Returns or raises whatever value or exception the new coroutine runner uses to resume the coroutine.
Temporarily detach the current coroutine object from the Trio scheduler.
async def temporarily_detach_coroutine_object(abort_func): """Temporarily detach the current coroutine object from the Trio scheduler. When the calling coroutine enters this function it's running under Trio, and when the function returns it's running under the foreign coroutine runner. The Trio :class:`Task` will continue to exist, but will be suspended until you use :func:`reattach_detached_coroutine_object` to resume it. In the mean time, you can use another coroutine runner to schedule the coroutine object. In fact, you have to – the function doesn't return until the coroutine is advanced from outside. Note that you'll need to save the current :class:`Task` object to later resume; you can retrieve it with :func:`current_task`. You can also use this :class:`Task` object to retrieve the coroutine object – see :data:`Task.coro`. Args: abort_func: Same as for :func:`wait_task_rescheduled`, except that it must return :data:`Abort.FAILED`. (If it returned :data:`Abort.SUCCEEDED`, then Trio would attempt to reschedule the detached task directly without going through :func:`reattach_detached_coroutine_object`, which would be bad.) Your ``abort_func`` should still arrange for whatever the coroutine object is doing to be cancelled, and then reattach to Trio and call the ``raise_cancel`` callback, if possible. Returns or raises whatever value or exception the new coroutine runner uses to resume the coroutine. """ return await _async_yield(WaitTaskRescheduled(abort_func))
[ "async", "def", "temporarily_detach_coroutine_object", "(", "abort_func", ")", ":", "return", "await", "_async_yield", "(", "WaitTaskRescheduled", "(", "abort_func", ")", ")" ]
[ 205, 0 ]
[ 238, 62 ]
python
en
['en', 'en', 'en']
True
reattach_detached_coroutine_object
(task, yield_value)
Reattach a coroutine object that was detached using :func:`temporarily_detach_coroutine_object`. When the calling coroutine enters this function it's running under the foreign coroutine runner, and when the function returns it's running under Trio. This must be called from inside the coroutine being resumed, and yields whatever value you pass in. (Presumably you'll pass a value that will cause the current coroutine runner to stop scheduling this task.) Then the coroutine is resumed by the Trio scheduler at the next opportunity. Args: task (Task): The Trio task object that the current coroutine was detached from. yield_value (object): The object to yield to the current coroutine runner.
Reattach a coroutine object that was detached using :func:`temporarily_detach_coroutine_object`.
async def reattach_detached_coroutine_object(task, yield_value): """Reattach a coroutine object that was detached using :func:`temporarily_detach_coroutine_object`. When the calling coroutine enters this function it's running under the foreign coroutine runner, and when the function returns it's running under Trio. This must be called from inside the coroutine being resumed, and yields whatever value you pass in. (Presumably you'll pass a value that will cause the current coroutine runner to stop scheduling this task.) Then the coroutine is resumed by the Trio scheduler at the next opportunity. Args: task (Task): The Trio task object that the current coroutine was detached from. yield_value (object): The object to yield to the current coroutine runner. """ # This is a kind of crude check – in particular, it can fail if the # passed-in task is where the coroutine *runner* is running. But this is # an experts-only interface, and there's no easy way to do a more accurate # check, so I guess that's OK. if not task.coro.cr_running: raise RuntimeError("given task does not match calling coroutine") _run.reschedule(task, outcome.Value("reattaching")) value = await _async_yield(yield_value) assert value == outcome.Value("reattaching")
[ "async", "def", "reattach_detached_coroutine_object", "(", "task", ",", "yield_value", ")", ":", "# This is a kind of crude check – in particular, it can fail if the", "# passed-in task is where the coroutine *runner* is running. But this is", "# an experts-only interface, and there's no easy way to do a more accurate", "# check, so I guess that's OK.", "if", "not", "task", ".", "coro", ".", "cr_running", ":", "raise", "RuntimeError", "(", "\"given task does not match calling coroutine\"", ")", "_run", ".", "reschedule", "(", "task", ",", "outcome", ".", "Value", "(", "\"reattaching\"", ")", ")", "value", "=", "await", "_async_yield", "(", "yield_value", ")", "assert", "value", "==", "outcome", ".", "Value", "(", "\"reattaching\"", ")" ]
[ 241, 0 ]
[ 269, 48 ]
python
en
['en', 'en', 'en']
True
test_expectations_store_report_store_backend_id_in_memory_store_backend
()
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
def test_expectations_store_report_store_backend_id_in_memory_store_backend(): """ What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated. """ in_memory_expectations_store = ExpectationsStore() # Check that store_backend_id exists can be read assert in_memory_expectations_store.store_backend_id is not None # Check that store_backend_id is a valid UUID assert test_utils.validate_uuid4(in_memory_expectations_store.store_backend_id)
[ "def", "test_expectations_store_report_store_backend_id_in_memory_store_backend", "(", ")", ":", "in_memory_expectations_store", "=", "ExpectationsStore", "(", ")", "# Check that store_backend_id exists can be read", "assert", "in_memory_expectations_store", ".", "store_backend_id", "is", "not", "None", "# Check that store_backend_id is a valid UUID", "assert", "test_utils", ".", "validate_uuid4", "(", "in_memory_expectations_store", ".", "store_backend_id", ")" ]
[ 94, 0 ]
[ 104, 83 ]
python
en
['en', 'error', 'th']
False
test_expectations_store_report_same_id_with_same_configuration_TupleFilesystemStoreBackend
( tmp_path_factory, )
What does this test and why? A store with the same config (must be persistent store) should report the same store_backend_id
What does this test and why? A store with the same config (must be persistent store) should report the same store_backend_id
def test_expectations_store_report_same_id_with_same_configuration_TupleFilesystemStoreBackend( tmp_path_factory, ): """ What does this test and why? A store with the same config (must be persistent store) should report the same store_backend_id """ path = "dummy_str" project_path = str( tmp_path_factory.mktemp( "test_expectations_store_report_same_id_with_same_configuration__dir" ) ) assert ( gen_directory_tree_str(project_path) == """\ test_expectations_store_report_same_id_with_same_configuration__dir0/ """ ) # Check two stores with the same config persistent_expectations_store = ExpectationsStore( store_backend={ "class_name": "TupleFilesystemStoreBackend", "base_directory": project_path, } ) # Check successful initialization with a store_backend_id initialized_directory_tree_with_store_backend_id = """\ test_expectations_store_report_same_id_with_same_configuration__dir0/ .ge_store_backend_id """ assert ( gen_directory_tree_str(project_path) == initialized_directory_tree_with_store_backend_id ) assert persistent_expectations_store.store_backend_id is not None # Check that a duplicate store reports the same store_backend_id persistent_expectations_store_duplicate = ExpectationsStore( store_backend={ "class_name": "TupleFilesystemStoreBackend", "base_directory": project_path, } ) assert persistent_expectations_store_duplicate.store_backend_id is not None assert ( persistent_expectations_store.store_backend_id == persistent_expectations_store_duplicate.store_backend_id ) # Check no change to filesystem assert ( gen_directory_tree_str(project_path) == initialized_directory_tree_with_store_backend_id )
[ "def", "test_expectations_store_report_same_id_with_same_configuration_TupleFilesystemStoreBackend", "(", "tmp_path_factory", ",", ")", ":", "path", "=", "\"dummy_str\"", "project_path", "=", "str", "(", "tmp_path_factory", ".", "mktemp", "(", "\"test_expectations_store_report_same_id_with_same_configuration__dir\"", ")", ")", "assert", "(", "gen_directory_tree_str", "(", "project_path", ")", "==", "\"\"\"\\\ntest_expectations_store_report_same_id_with_same_configuration__dir0/\n\"\"\"", ")", "# Check two stores with the same config", "persistent_expectations_store", "=", "ExpectationsStore", "(", "store_backend", "=", "{", "\"class_name\"", ":", "\"TupleFilesystemStoreBackend\"", ",", "\"base_directory\"", ":", "project_path", ",", "}", ")", "# Check successful initialization with a store_backend_id", "initialized_directory_tree_with_store_backend_id", "=", "\"\"\"\\\ntest_expectations_store_report_same_id_with_same_configuration__dir0/\n .ge_store_backend_id\n\"\"\"", "assert", "(", "gen_directory_tree_str", "(", "project_path", ")", "==", "initialized_directory_tree_with_store_backend_id", ")", "assert", "persistent_expectations_store", ".", "store_backend_id", "is", "not", "None", "# Check that a duplicate store reports the same store_backend_id", "persistent_expectations_store_duplicate", "=", "ExpectationsStore", "(", "store_backend", "=", "{", "\"class_name\"", ":", "\"TupleFilesystemStoreBackend\"", ",", "\"base_directory\"", ":", "project_path", ",", "}", ")", "assert", "persistent_expectations_store_duplicate", ".", "store_backend_id", "is", "not", "None", "assert", "(", "persistent_expectations_store", ".", "store_backend_id", "==", "persistent_expectations_store_duplicate", ".", "store_backend_id", ")", "# Check no change to filesystem", "assert", "(", "gen_directory_tree_str", "(", "project_path", ")", "==", "initialized_directory_tree_with_store_backend_id", ")" ]
[ 107, 0 ]
[ 162, 5 ]
python
en
['en', 'error', 'th']
False
PandasExecutionEngine.dataframe
(self)
Tests whether or not a Batch has been loaded. If the loaded batch does not exist, raises a ValueError Exception
Tests whether or not a Batch has been loaded. If the loaded batch does not exist, raises a ValueError Exception
def dataframe(self): """Tests whether or not a Batch has been loaded. If the loaded batch does not exist, raises a ValueError Exception """ # Changed to is None because was breaking prior if self.active_batch_data is None: raise ValueError( "Batch has not been loaded - please run load_batch_data() to load a batch." ) return self.active_batch_data.dataframe
[ "def", "dataframe", "(", "self", ")", ":", "# Changed to is None because was breaking prior", "if", "self", ".", "active_batch_data", "is", "None", ":", "raise", "ValueError", "(", "\"Batch has not been loaded - please run load_batch_data() to load a batch.\"", ")", "return", "self", ".", "active_batch_data", ".", "dataframe" ]
[ 197, 4 ]
[ 207, 47 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._get_reader_fn
(self, reader_method=None, path=None)
Static helper for parsing reader types. If reader_method is not provided, path will be used to guess the correct reader_method. Args: reader_method (str): the name of the reader method to use, if available. path (str): the path used to guess Returns: ReaderMethod to use for the filepath
Static helper for parsing reader types. If reader_method is not provided, path will be used to guess the correct reader_method.
def _get_reader_fn(self, reader_method=None, path=None): """Static helper for parsing reader types. If reader_method is not provided, path will be used to guess the correct reader_method. Args: reader_method (str): the name of the reader method to use, if available. path (str): the path used to guess Returns: ReaderMethod to use for the filepath """ if reader_method is None and path is None: raise ge_exceptions.BatchSpecError( "Unable to determine pandas reader function without reader_method or path." ) reader_options = dict() if reader_method is None: path_guess = self.guess_reader_method_from_path(path) reader_method = path_guess["reader_method"] reader_options = path_guess.get( "reader_options" ) # This may not be there; use None in that case try: reader_fn = getattr(pd, reader_method) if reader_options: reader_fn = partial(reader_fn, **reader_options) return reader_fn except AttributeError: raise ge_exceptions.BatchSpecError( f'Unable to find reader_method "{reader_method}" in pandas.' )
[ "def", "_get_reader_fn", "(", "self", ",", "reader_method", "=", "None", ",", "path", "=", "None", ")", ":", "if", "reader_method", "is", "None", "and", "path", "is", "None", ":", "raise", "ge_exceptions", ".", "BatchSpecError", "(", "\"Unable to determine pandas reader function without reader_method or path.\"", ")", "reader_options", "=", "dict", "(", ")", "if", "reader_method", "is", "None", ":", "path_guess", "=", "self", ".", "guess_reader_method_from_path", "(", "path", ")", "reader_method", "=", "path_guess", "[", "\"reader_method\"", "]", "reader_options", "=", "path_guess", ".", "get", "(", "\"reader_options\"", ")", "# This may not be there; use None in that case", "try", ":", "reader_fn", "=", "getattr", "(", "pd", ",", "reader_method", ")", "if", "reader_options", ":", "reader_fn", "=", "partial", "(", "reader_fn", ",", "*", "*", "reader_options", ")", "return", "reader_fn", "except", "AttributeError", ":", "raise", "ge_exceptions", ".", "BatchSpecError", "(", "f'Unable to find reader_method \"{reader_method}\" in pandas.'", ")" ]
[ 209, 4 ]
[ 242, 13 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine.guess_reader_method_from_path
(path)
Helper method for deciding which reader to use to read in a certain path. Args: path (str): the to use to guess Returns: ReaderMethod to use for the filepath
Helper method for deciding which reader to use to read in a certain path.
def guess_reader_method_from_path(path): """Helper method for deciding which reader to use to read in a certain path. Args: path (str): the to use to guess Returns: ReaderMethod to use for the filepath """ if path.endswith(".csv") or path.endswith(".tsv"): return {"reader_method": "read_csv"} elif path.endswith(".parquet"): return {"reader_method": "read_parquet"} elif path.endswith(".xlsx") or path.endswith(".xls"): return {"reader_method": "read_excel"} elif path.endswith(".json"): return {"reader_method": "read_json"} elif path.endswith(".pkl"): return {"reader_method": "read_pickle"} elif path.endswith(".feather"): return {"reader_method": "read_feather"} elif path.endswith(".csv.gz") or path.endswith(".tsv.gz"): return { "reader_method": "read_csv", "reader_options": {"compression": "gzip"}, } raise ge_exceptions.BatchSpecError( f'Unable to determine reader method from path: "{path}".' )
[ "def", "guess_reader_method_from_path", "(", "path", ")", ":", "if", "path", ".", "endswith", "(", "\".csv\"", ")", "or", "path", ".", "endswith", "(", "\".tsv\"", ")", ":", "return", "{", "\"reader_method\"", ":", "\"read_csv\"", "}", "elif", "path", ".", "endswith", "(", "\".parquet\"", ")", ":", "return", "{", "\"reader_method\"", ":", "\"read_parquet\"", "}", "elif", "path", ".", "endswith", "(", "\".xlsx\"", ")", "or", "path", ".", "endswith", "(", "\".xls\"", ")", ":", "return", "{", "\"reader_method\"", ":", "\"read_excel\"", "}", "elif", "path", ".", "endswith", "(", "\".json\"", ")", ":", "return", "{", "\"reader_method\"", ":", "\"read_json\"", "}", "elif", "path", ".", "endswith", "(", "\".pkl\"", ")", ":", "return", "{", "\"reader_method\"", ":", "\"read_pickle\"", "}", "elif", "path", ".", "endswith", "(", "\".feather\"", ")", ":", "return", "{", "\"reader_method\"", ":", "\"read_feather\"", "}", "elif", "path", ".", "endswith", "(", "\".csv.gz\"", ")", "or", "path", ".", "endswith", "(", "\".tsv.gz\"", ")", ":", "return", "{", "\"reader_method\"", ":", "\"read_csv\"", ",", "\"reader_options\"", ":", "{", "\"compression\"", ":", "\"gzip\"", "}", ",", "}", "raise", "ge_exceptions", ".", "BatchSpecError", "(", "f'Unable to determine reader method from path: \"{path}\".'", ")" ]
[ 246, 4 ]
[ 276, 9 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine.get_compute_domain
( self, domain_kwargs: dict, domain_type: Union[str, MetricDomainTypes], accessor_keys: Optional[Iterable[str]] = None, )
Uses a given batch dictionary and domain kwargs (which include a row condition and a condition parser) to obtain and/or query a batch. Returns in the format of a Pandas DataFrame. If the domain is a single column, this is added to 'accessor domain kwargs' and used for later access Args: domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would like to be using, or a corresponding string value representing it. String types include "identity", "column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the class MetricDomainTypes. accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when describing the domain and simply transferred with their associated values into accessor_domain_kwargs. Returns: A tuple including: - a DataFrame (the data on which to compute) - a dictionary of compute_domain_kwargs, describing the DataFrame - a dictionary of accessor_domain_kwargs, describing any accessors needed to identify the domain within the compute domain
Uses a given batch dictionary and domain kwargs (which include a row condition and a condition parser) to obtain and/or query a batch. Returns in the format of a Pandas DataFrame. If the domain is a single column, this is added to 'accessor domain kwargs' and used for later access
def get_compute_domain( self, domain_kwargs: dict, domain_type: Union[str, MetricDomainTypes], accessor_keys: Optional[Iterable[str]] = None, ) -> Tuple[pd.DataFrame, dict, dict]: """Uses a given batch dictionary and domain kwargs (which include a row condition and a condition parser) to obtain and/or query a batch. Returns in the format of a Pandas DataFrame. If the domain is a single column, this is added to 'accessor domain kwargs' and used for later access Args: domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would like to be using, or a corresponding string value representing it. String types include "identity", "column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the class MetricDomainTypes. accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when describing the domain and simply transferred with their associated values into accessor_domain_kwargs. Returns: A tuple including: - a DataFrame (the data on which to compute) - a dictionary of compute_domain_kwargs, describing the DataFrame - a dictionary of accessor_domain_kwargs, describing any accessors needed to identify the domain within the compute domain """ # Extracting value from enum if it is given for future computation domain_type = MetricDomainTypes(domain_type) batch_id = domain_kwargs.get("batch_id") if batch_id is None: # We allow no batch id specified if there is only one batch if self.active_batch_data_id is not None: data = self.active_batch_data.dataframe else: raise ge_exceptions.ValidationError( "No batch is specified, but could not identify a loaded batch." ) else: if batch_id in self.loaded_batch_data_dict: data = self.loaded_batch_data_dict[batch_id].dataframe else: raise ge_exceptions.ValidationError( f"Unable to find batch with batch_id {batch_id}" ) compute_domain_kwargs = copy.deepcopy(domain_kwargs) accessor_domain_kwargs = dict() table = domain_kwargs.get("table", None) if table: raise ValueError( "PandasExecutionEngine does not currently support multiple named tables." ) # Filtering by row condition row_condition = domain_kwargs.get("row_condition", None) if row_condition: condition_parser = domain_kwargs.get("condition_parser", None) # Ensuring proper condition parser has been provided if condition_parser not in ["python", "pandas"]: raise ValueError( "condition_parser is required when setting a row_condition," " and must be 'python' or 'pandas'" ) else: # Querying row condition data = data.query(row_condition, parser=condition_parser).reset_index( drop=True ) # Warning user if accessor keys are in any domain that is not of type table, will be ignored if ( domain_type != MetricDomainTypes.TABLE and accessor_keys is not None and len(list(accessor_keys)) > 0 ): logger.warning( "Accessor keys ignored since Metric Domain Type is not 'table" ) # If given table (this is default), get all unexpected accessor_keys (an optional parameters allowing us to # modify domain access) if domain_type == MetricDomainTypes.TABLE: if accessor_keys is not None and len(list(accessor_keys)) > 0: for key in accessor_keys: accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key) if len(compute_domain_kwargs.keys()) > 0: # Warn user if kwarg not "normal". unexpected_keys: set = set(compute_domain_kwargs.keys()).difference( { "batch_id", "table", "row_condition", "condition_parser", } ) if len(unexpected_keys) > 0: unexpected_keys_str: str = ", ".join( map(lambda element: f'"{element}"', unexpected_keys) ) logger.warning( f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".' ) return data, compute_domain_kwargs, accessor_domain_kwargs # If user has stated they want a column, checking if one is provided, and elif domain_type == MetricDomainTypes.COLUMN: if "column" in compute_domain_kwargs: accessor_domain_kwargs["column"] = compute_domain_kwargs.pop("column") else: # If column not given raise ge_exceptions.GreatExpectationsError( "Column not provided in compute_domain_kwargs" ) # Else, if column pair values requested elif domain_type == MetricDomainTypes.COLUMN_PAIR: # Ensuring column_A and column_B parameters provided if ( "column_A" in compute_domain_kwargs and "column_B" in compute_domain_kwargs ): accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop( "column_A" ) accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop( "column_B" ) else: raise ge_exceptions.GreatExpectationsError( "column_A or column_B not found within compute_domain_kwargs" ) # Checking if table or identity or other provided, column is not specified. If it is, warning the user elif domain_type == MetricDomainTypes.MULTICOLUMN: if "column_list" in compute_domain_kwargs: # If column_list exists accessor_domain_kwargs["column_list"] = compute_domain_kwargs.pop( "column_list" ) # Filtering if identity elif domain_type == MetricDomainTypes.IDENTITY: # If we would like our data to become a single column if "column" in compute_domain_kwargs: data = pd.DataFrame(data[compute_domain_kwargs["column"]]) # If we would like our data to now become a column pair elif ("column_A" in compute_domain_kwargs) and ( "column_B" in compute_domain_kwargs ): # Dropping all not needed columns column_a, column_b = ( compute_domain_kwargs["column_A"], compute_domain_kwargs["column_B"], ) data = pd.DataFrame( {column_a: data[column_a], column_b: data[column_b]} ) else: # If we would like our data to become a multicolumn if "column_list" in compute_domain_kwargs: data = data[compute_domain_kwargs["column_list"]] return data, compute_domain_kwargs, accessor_domain_kwargs
[ "def", "get_compute_domain", "(", "self", ",", "domain_kwargs", ":", "dict", ",", "domain_type", ":", "Union", "[", "str", ",", "MetricDomainTypes", "]", ",", "accessor_keys", ":", "Optional", "[", "Iterable", "[", "str", "]", "]", "=", "None", ",", ")", "->", "Tuple", "[", "pd", ".", "DataFrame", ",", "dict", ",", "dict", "]", ":", "# Extracting value from enum if it is given for future computation", "domain_type", "=", "MetricDomainTypes", "(", "domain_type", ")", "batch_id", "=", "domain_kwargs", ".", "get", "(", "\"batch_id\"", ")", "if", "batch_id", "is", "None", ":", "# We allow no batch id specified if there is only one batch", "if", "self", ".", "active_batch_data_id", "is", "not", "None", ":", "data", "=", "self", ".", "active_batch_data", ".", "dataframe", "else", ":", "raise", "ge_exceptions", ".", "ValidationError", "(", "\"No batch is specified, but could not identify a loaded batch.\"", ")", "else", ":", "if", "batch_id", "in", "self", ".", "loaded_batch_data_dict", ":", "data", "=", "self", ".", "loaded_batch_data_dict", "[", "batch_id", "]", ".", "dataframe", "else", ":", "raise", "ge_exceptions", ".", "ValidationError", "(", "f\"Unable to find batch with batch_id {batch_id}\"", ")", "compute_domain_kwargs", "=", "copy", ".", "deepcopy", "(", "domain_kwargs", ")", "accessor_domain_kwargs", "=", "dict", "(", ")", "table", "=", "domain_kwargs", ".", "get", "(", "\"table\"", ",", "None", ")", "if", "table", ":", "raise", "ValueError", "(", "\"PandasExecutionEngine does not currently support multiple named tables.\"", ")", "# Filtering by row condition", "row_condition", "=", "domain_kwargs", ".", "get", "(", "\"row_condition\"", ",", "None", ")", "if", "row_condition", ":", "condition_parser", "=", "domain_kwargs", ".", "get", "(", "\"condition_parser\"", ",", "None", ")", "# Ensuring proper condition parser has been provided", "if", "condition_parser", "not", "in", "[", "\"python\"", ",", "\"pandas\"", "]", ":", "raise", "ValueError", "(", "\"condition_parser is required when setting a row_condition,\"", "\" and must be 'python' or 'pandas'\"", ")", "else", ":", "# Querying row condition", "data", "=", "data", ".", "query", "(", "row_condition", ",", "parser", "=", "condition_parser", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "# Warning user if accessor keys are in any domain that is not of type table, will be ignored", "if", "(", "domain_type", "!=", "MetricDomainTypes", ".", "TABLE", "and", "accessor_keys", "is", "not", "None", "and", "len", "(", "list", "(", "accessor_keys", ")", ")", ">", "0", ")", ":", "logger", ".", "warning", "(", "\"Accessor keys ignored since Metric Domain Type is not 'table\"", ")", "# If given table (this is default), get all unexpected accessor_keys (an optional parameters allowing us to", "# modify domain access)", "if", "domain_type", "==", "MetricDomainTypes", ".", "TABLE", ":", "if", "accessor_keys", "is", "not", "None", "and", "len", "(", "list", "(", "accessor_keys", ")", ")", ">", "0", ":", "for", "key", "in", "accessor_keys", ":", "accessor_domain_kwargs", "[", "key", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "key", ")", "if", "len", "(", "compute_domain_kwargs", ".", "keys", "(", ")", ")", ">", "0", ":", "# Warn user if kwarg not \"normal\".", "unexpected_keys", ":", "set", "=", "set", "(", "compute_domain_kwargs", ".", "keys", "(", ")", ")", ".", "difference", "(", "{", "\"batch_id\"", ",", "\"table\"", ",", "\"row_condition\"", ",", "\"condition_parser\"", ",", "}", ")", "if", "len", "(", "unexpected_keys", ")", ">", "0", ":", "unexpected_keys_str", ":", "str", "=", "\", \"", ".", "join", "(", "map", "(", "lambda", "element", ":", "f'\"{element}\"'", ",", "unexpected_keys", ")", ")", "logger", ".", "warning", "(", "f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type \"{domain_type.value}\".'", ")", "return", "data", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs", "# If user has stated they want a column, checking if one is provided, and", "elif", "domain_type", "==", "MetricDomainTypes", ".", "COLUMN", ":", "if", "\"column\"", "in", "compute_domain_kwargs", ":", "accessor_domain_kwargs", "[", "\"column\"", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "\"column\"", ")", "else", ":", "# If column not given", "raise", "ge_exceptions", ".", "GreatExpectationsError", "(", "\"Column not provided in compute_domain_kwargs\"", ")", "# Else, if column pair values requested", "elif", "domain_type", "==", "MetricDomainTypes", ".", "COLUMN_PAIR", ":", "# Ensuring column_A and column_B parameters provided", "if", "(", "\"column_A\"", "in", "compute_domain_kwargs", "and", "\"column_B\"", "in", "compute_domain_kwargs", ")", ":", "accessor_domain_kwargs", "[", "\"column_A\"", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "\"column_A\"", ")", "accessor_domain_kwargs", "[", "\"column_B\"", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "\"column_B\"", ")", "else", ":", "raise", "ge_exceptions", ".", "GreatExpectationsError", "(", "\"column_A or column_B not found within compute_domain_kwargs\"", ")", "# Checking if table or identity or other provided, column is not specified. If it is, warning the user", "elif", "domain_type", "==", "MetricDomainTypes", ".", "MULTICOLUMN", ":", "if", "\"column_list\"", "in", "compute_domain_kwargs", ":", "# If column_list exists", "accessor_domain_kwargs", "[", "\"column_list\"", "]", "=", "compute_domain_kwargs", ".", "pop", "(", "\"column_list\"", ")", "# Filtering if identity", "elif", "domain_type", "==", "MetricDomainTypes", ".", "IDENTITY", ":", "# If we would like our data to become a single column", "if", "\"column\"", "in", "compute_domain_kwargs", ":", "data", "=", "pd", ".", "DataFrame", "(", "data", "[", "compute_domain_kwargs", "[", "\"column\"", "]", "]", ")", "# If we would like our data to now become a column pair", "elif", "(", "\"column_A\"", "in", "compute_domain_kwargs", ")", "and", "(", "\"column_B\"", "in", "compute_domain_kwargs", ")", ":", "# Dropping all not needed columns", "column_a", ",", "column_b", "=", "(", "compute_domain_kwargs", "[", "\"column_A\"", "]", ",", "compute_domain_kwargs", "[", "\"column_B\"", "]", ",", ")", "data", "=", "pd", ".", "DataFrame", "(", "{", "column_a", ":", "data", "[", "column_a", "]", ",", "column_b", ":", "data", "[", "column_b", "]", "}", ")", "else", ":", "# If we would like our data to become a multicolumn", "if", "\"column_list\"", "in", "compute_domain_kwargs", ":", "data", "=", "data", "[", "compute_domain_kwargs", "[", "\"column_list\"", "]", "]", "return", "data", ",", "compute_domain_kwargs", ",", "accessor_domain_kwargs" ]
[ 278, 4 ]
[ 446, 66 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._split_on_converted_datetime
( df, column_name: str, batch_identifiers: dict, date_format_string: str = "%Y-%m-%d", )
Convert the values in the named column to the given date_format, and split on that
Convert the values in the named column to the given date_format, and split on that
def _split_on_converted_datetime( df, column_name: str, batch_identifiers: dict, date_format_string: str = "%Y-%m-%d", ): """Convert the values in the named column to the given date_format, and split on that""" stringified_datetime_series = df[column_name].map( lambda x: x.strftime(date_format_string) ) matching_string = batch_identifiers[column_name] return df[stringified_datetime_series == matching_string]
[ "def", "_split_on_converted_datetime", "(", "df", ",", "column_name", ":", "str", ",", "batch_identifiers", ":", "dict", ",", "date_format_string", ":", "str", "=", "\"%Y-%m-%d\"", ",", ")", ":", "stringified_datetime_series", "=", "df", "[", "column_name", "]", ".", "map", "(", "lambda", "x", ":", "x", ".", "strftime", "(", "date_format_string", ")", ")", "matching_string", "=", "batch_identifiers", "[", "column_name", "]", "return", "df", "[", "stringified_datetime_series", "==", "matching_string", "]" ]
[ 463, 4 ]
[ 474, 65 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._split_on_divided_integer
( df, column_name: str, divisor: int, batch_identifiers: dict )
Divide the values in the named column by `divisor`, and split on that
Divide the values in the named column by `divisor`, and split on that
def _split_on_divided_integer( df, column_name: str, divisor: int, batch_identifiers: dict ): """Divide the values in the named column by `divisor`, and split on that""" matching_divisor = batch_identifiers[column_name] matching_rows = df[column_name].map( lambda x: int(x / divisor) == matching_divisor ) return df[matching_rows]
[ "def", "_split_on_divided_integer", "(", "df", ",", "column_name", ":", "str", ",", "divisor", ":", "int", ",", "batch_identifiers", ":", "dict", ")", ":", "matching_divisor", "=", "batch_identifiers", "[", "column_name", "]", "matching_rows", "=", "df", "[", "column_name", "]", ".", "map", "(", "lambda", "x", ":", "int", "(", "x", "/", "divisor", ")", "==", "matching_divisor", ")", "return", "df", "[", "matching_rows", "]" ]
[ 477, 4 ]
[ 487, 32 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._split_on_mod_integer
(df, column_name: str, mod: int, batch_identifiers: dict)
Divide the values in the named column by `divisor`, and split on that
Divide the values in the named column by `divisor`, and split on that
def _split_on_mod_integer(df, column_name: str, mod: int, batch_identifiers: dict): """Divide the values in the named column by `divisor`, and split on that""" matching_mod_value = batch_identifiers[column_name] matching_rows = df[column_name].map(lambda x: x % mod == matching_mod_value) return df[matching_rows]
[ "def", "_split_on_mod_integer", "(", "df", ",", "column_name", ":", "str", ",", "mod", ":", "int", ",", "batch_identifiers", ":", "dict", ")", ":", "matching_mod_value", "=", "batch_identifiers", "[", "column_name", "]", "matching_rows", "=", "df", "[", "column_name", "]", ".", "map", "(", "lambda", "x", ":", "x", "%", "mod", "==", "matching_mod_value", ")", "return", "df", "[", "matching_rows", "]" ]
[ 490, 4 ]
[ 496, 32 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._split_on_multi_column_values
( df, column_names: List[str], batch_identifiers: dict )
Split on the joint values in the named columns
Split on the joint values in the named columns
def _split_on_multi_column_values( df, column_names: List[str], batch_identifiers: dict ): """Split on the joint values in the named columns""" subset_df = df.copy() for column_name in column_names: value = batch_identifiers.get(column_name) if not value: raise ValueError( f"In order for PandasExecution to `_split_on_multi_column_values`, " f"all values in column_names must also exist in batch_identifiers. " f"{column_name} was not found in batch_identifiers." ) subset_df = subset_df[subset_df[column_name] == value] return subset_df
[ "def", "_split_on_multi_column_values", "(", "df", ",", "column_names", ":", "List", "[", "str", "]", ",", "batch_identifiers", ":", "dict", ")", ":", "subset_df", "=", "df", ".", "copy", "(", ")", "for", "column_name", "in", "column_names", ":", "value", "=", "batch_identifiers", ".", "get", "(", "column_name", ")", "if", "not", "value", ":", "raise", "ValueError", "(", "f\"In order for PandasExecution to `_split_on_multi_column_values`, \"", "f\"all values in column_names must also exist in batch_identifiers. \"", "f\"{column_name} was not found in batch_identifiers.\"", ")", "subset_df", "=", "subset_df", "[", "subset_df", "[", "column_name", "]", "==", "value", "]", "return", "subset_df" ]
[ 499, 4 ]
[ 514, 24 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._split_on_hashed_column
( df, column_name: str, hash_digits: int, batch_identifiers: dict, hash_function_name: str = "md5", )
Split on the hashed value of the named column
Split on the hashed value of the named column
def _split_on_hashed_column( df, column_name: str, hash_digits: int, batch_identifiers: dict, hash_function_name: str = "md5", ): """Split on the hashed value of the named column""" try: hash_method = getattr(hashlib, hash_function_name) except (TypeError, AttributeError) as e: raise ( ge_exceptions.ExecutionEngineError( f"""The splitting method used with SparkDFExecutionEngine has a reference to an invalid hash_function_name. Reference to {hash_function_name} cannot be found.""" ) ) matching_rows = df[column_name].map( lambda x: hash_method(str(x).encode()).hexdigest()[-1 * hash_digits :] == batch_identifiers["hash_value"] ) return df[matching_rows]
[ "def", "_split_on_hashed_column", "(", "df", ",", "column_name", ":", "str", ",", "hash_digits", ":", "int", ",", "batch_identifiers", ":", "dict", ",", "hash_function_name", ":", "str", "=", "\"md5\"", ",", ")", ":", "try", ":", "hash_method", "=", "getattr", "(", "hashlib", ",", "hash_function_name", ")", "except", "(", "TypeError", ",", "AttributeError", ")", "as", "e", ":", "raise", "(", "ge_exceptions", ".", "ExecutionEngineError", "(", "f\"\"\"The splitting method used with SparkDFExecutionEngine has a reference to an invalid hash_function_name.\n Reference to {hash_function_name} cannot be found.\"\"\"", ")", ")", "matching_rows", "=", "df", "[", "column_name", "]", ".", "map", "(", "lambda", "x", ":", "hash_method", "(", "str", "(", "x", ")", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")", "[", "-", "1", "*", "hash_digits", ":", "]", "==", "batch_identifiers", "[", "\"hash_value\"", "]", ")", "return", "df", "[", "matching_rows", "]" ]
[ 517, 4 ]
[ 538, 32 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._sample_using_random
( df, p: float = 0.1, )
Take a random sample of rows, retaining proportion p Note: the Random function behaves differently on different dialects of SQL
Take a random sample of rows, retaining proportion p
def _sample_using_random( df, p: float = 0.1, ): """Take a random sample of rows, retaining proportion p Note: the Random function behaves differently on different dialects of SQL """ return df[df.index.map(lambda x: random.random() < p)]
[ "def", "_sample_using_random", "(", "df", ",", "p", ":", "float", "=", "0.1", ",", ")", ":", "return", "df", "[", "df", ".", "index", ".", "map", "(", "lambda", "x", ":", "random", ".", "random", "(", ")", "<", "p", ")", "]" ]
[ 543, 4 ]
[ 551, 62 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._sample_using_mod
( df, column_name: str, mod: int, value: int, )
Take the mod of named column, and only keep rows that match the given value
Take the mod of named column, and only keep rows that match the given value
def _sample_using_mod( df, column_name: str, mod: int, value: int, ): """Take the mod of named column, and only keep rows that match the given value""" return df[df[column_name].map(lambda x: x % mod == value)]
[ "def", "_sample_using_mod", "(", "df", ",", "column_name", ":", "str", ",", "mod", ":", "int", ",", "value", ":", "int", ",", ")", ":", "return", "df", "[", "df", "[", "column_name", "]", ".", "map", "(", "lambda", "x", ":", "x", "%", "mod", "==", "value", ")", "]" ]
[ 554, 4 ]
[ 561, 66 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._sample_using_a_list
( df, column_name: str, value_list: list, )
Match the values in the named column against value_list, and only keep the matches
Match the values in the named column against value_list, and only keep the matches
def _sample_using_a_list( df, column_name: str, value_list: list, ): """Match the values in the named column against value_list, and only keep the matches""" return df[df[column_name].isin(value_list)]
[ "def", "_sample_using_a_list", "(", "df", ",", "column_name", ":", "str", ",", "value_list", ":", "list", ",", ")", ":", "return", "df", "[", "df", "[", "column_name", "]", ".", "isin", "(", "value_list", ")", "]" ]
[ 564, 4 ]
[ 570, 51 ]
python
en
['en', 'en', 'en']
True
PandasExecutionEngine._sample_using_hash
( df, column_name: str, hash_digits: int = 1, hash_value: str = "f", hash_function_name: str = "md5", )
Hash the values in the named column, and split on that
Hash the values in the named column, and split on that
def _sample_using_hash( df, column_name: str, hash_digits: int = 1, hash_value: str = "f", hash_function_name: str = "md5", ): """Hash the values in the named column, and split on that""" try: hash_func = getattr(hashlib, hash_function_name) except (TypeError, AttributeError) as e: raise ( ge_exceptions.ExecutionEngineError( f"""The sampling method used with PandasExecutionEngine has a reference to an invalid hash_function_name. Reference to {hash_function_name} cannot be found.""" ) ) matches = df[column_name].map( lambda x: hash_func(str(x).encode()).hexdigest()[-1 * hash_digits :] == hash_value ) return df[matches]
[ "def", "_sample_using_hash", "(", "df", ",", "column_name", ":", "str", ",", "hash_digits", ":", "int", "=", "1", ",", "hash_value", ":", "str", "=", "\"f\"", ",", "hash_function_name", ":", "str", "=", "\"md5\"", ",", ")", ":", "try", ":", "hash_func", "=", "getattr", "(", "hashlib", ",", "hash_function_name", ")", "except", "(", "TypeError", ",", "AttributeError", ")", "as", "e", ":", "raise", "(", "ge_exceptions", ".", "ExecutionEngineError", "(", "f\"\"\"The sampling method used with PandasExecutionEngine has a reference to an invalid hash_function_name.\n Reference to {hash_function_name} cannot be found.\"\"\"", ")", ")", "matches", "=", "df", "[", "column_name", "]", ".", "map", "(", "lambda", "x", ":", "hash_func", "(", "str", "(", "x", ")", ".", "encode", "(", ")", ")", ".", "hexdigest", "(", ")", "[", "-", "1", "*", "hash_digits", ":", "]", "==", "hash_value", ")", "return", "df", "[", "matches", "]" ]
[ 573, 4 ]
[ 595, 26 ]
python
en
['en', 'en', 'en']
True
print_declarations
( decls, detailed=True, recursive=True, writer=lambda x: sys.stdout.write(x + os.linesep), verbose=True)
print declarations tree rooted at each of the included nodes. :param decls: either a single :class:declaration_t object or list of :class:declaration_t objects
print declarations tree rooted at each of the included nodes.
def print_declarations( decls, detailed=True, recursive=True, writer=lambda x: sys.stdout.write(x + os.linesep), verbose=True): """ print declarations tree rooted at each of the included nodes. :param decls: either a single :class:declaration_t object or list of :class:declaration_t objects """ prn = decl_printer_t(0, detailed, recursive, writer, verbose=verbose) if not isinstance(decls, list): decls = [decls] for d in decls: prn.level = 0 prn.instance = d algorithm.apply_visitor(prn, d)
[ "def", "print_declarations", "(", "decls", ",", "detailed", "=", "True", ",", "recursive", "=", "True", ",", "writer", "=", "lambda", "x", ":", "sys", ".", "stdout", ".", "write", "(", "x", "+", "os", ".", "linesep", ")", ",", "verbose", "=", "True", ")", ":", "prn", "=", "decl_printer_t", "(", "0", ",", "detailed", ",", "recursive", ",", "writer", ",", "verbose", "=", "verbose", ")", "if", "not", "isinstance", "(", "decls", ",", "list", ")", ":", "decls", "=", "[", "decls", "]", "for", "d", "in", "decls", ":", "prn", ".", "level", "=", "0", "prn", ".", "instance", "=", "d", "algorithm", ".", "apply_visitor", "(", "prn", ",", "d", ")" ]
[ 465, 0 ]
[ 483, 39 ]
python
en
['en', 'error', 'th']
False
dump_declarations
(declarations, file_path)
Dump declarations tree rooted at each of the included nodes to the file :param declarations: either a single :class:declaration_t object or a list of :class:declaration_t objects :param file_path: path to a file
Dump declarations tree rooted at each of the included nodes to the file
def dump_declarations(declarations, file_path): """ Dump declarations tree rooted at each of the included nodes to the file :param declarations: either a single :class:declaration_t object or a list of :class:declaration_t objects :param file_path: path to a file """ with open(file_path, "w+") as f: print_declarations(declarations, writer=f.write)
[ "def", "dump_declarations", "(", "declarations", ",", "file_path", ")", ":", "with", "open", "(", "file_path", ",", "\"w+\"", ")", "as", "f", ":", "print_declarations", "(", "declarations", ",", "writer", "=", "f", ".", "write", ")" ]
[ 486, 0 ]
[ 497, 56 ]
python
en
['en', 'error', 'th']
False
FlaskRageFormatter.__prepare_error_info
(self, output: Dict[str, Any], record: logging.LogRecord)
Adds some information about potential exception to the output message.
Adds some information about potential exception to the output message.
def __prepare_error_info(self, output: Dict[str, Any], record: logging.LogRecord): """ Adds some information about potential exception to the output message. """ if record.exc_info: backtrace = self.formatException(record.exc_info) if backtrace: output['exception_object'] = backtrace output['exception'] = [ str(record.exc_info[0]), self.__extract_msg(record.exc_info[1]) ]
[ "def", "__prepare_error_info", "(", "self", ",", "output", ":", "Dict", "[", "str", ",", "Any", "]", ",", "record", ":", "logging", ".", "LogRecord", ")", ":", "if", "record", ".", "exc_info", ":", "backtrace", "=", "self", ".", "formatException", "(", "record", ".", "exc_info", ")", "if", "backtrace", ":", "output", "[", "'exception_object'", "]", "=", "backtrace", "output", "[", "'exception'", "]", "=", "[", "str", "(", "record", ".", "exc_info", "[", "0", "]", ")", ",", "self", ".", "__extract_msg", "(", "record", ".", "exc_info", "[", "1", "]", ")", "]" ]
[ 45, 4 ]
[ 56, 13 ]
python
en
['en', 'error', 'th']
False
FlaskRage.init_app
(self, flask_app: flask.Flask)
Initialize logging for Flask application :param flask_app: Flask application
Initialize logging for Flask application
def init_app(self, flask_app: flask.Flask) -> None: """ Initialize logging for Flask application :param flask_app: Flask application """ self.logger = logging.getLogger(getattr(flask_app, "logger_name", "name")) self._setup_db_timer() self._register_handlers(flask_app)
[ "def", "init_app", "(", "self", ",", "flask_app", ":", "flask", ".", "Flask", ")", "->", "None", ":", "self", ".", "logger", "=", "logging", ".", "getLogger", "(", "getattr", "(", "flask_app", ",", "\"logger_name\"", ",", "\"name\"", ")", ")", "self", ".", "_setup_db_timer", "(", ")", "self", ".", "_register_handlers", "(", "flask_app", ")" ]
[ 82, 4 ]
[ 90, 42 ]
python
en
['en', 'error', 'th']
False
FlaskRage.log_request
(self, response: flask.Response)
Log a regular HTTP request in lograge-ish format :param response: flask.Response :return: response
Log a regular HTTP request in lograge-ish format
def log_request(self, response: flask.Response) -> flask.Response: """ Log a regular HTTP request in lograge-ish format :param response: flask.Response :return: response """ if response.status_code >= 500: return response if response.status_code >= 400 and response.status_code != 404: log_fn = self.logger.error else: log_fn = self.logger.info message, extra = self._parse(request, response) log_fn(message, extra=extra) return response
[ "def", "log_request", "(", "self", ",", "response", ":", "flask", ".", "Response", ")", "->", "flask", ".", "Response", ":", "if", "response", ".", "status_code", ">=", "500", ":", "return", "response", "if", "response", ".", "status_code", ">=", "400", "and", "response", ".", "status_code", "!=", "404", ":", "log_fn", "=", "self", ".", "logger", ".", "error", "else", ":", "log_fn", "=", "self", ".", "logger", ".", "info", "message", ",", "extra", "=", "self", ".", "_parse", "(", "request", ",", "response", ")", "log_fn", "(", "message", ",", "extra", "=", "extra", ")", "return", "response" ]
[ 92, 4 ]
[ 108, 23 ]
python
en
['en', 'error', 'th']
False
FlaskRage.log_exception
(self, exception: Exception)
Log an exception in lograge-ish format This can be called e.g. from flask's errorhandlers :param exception: Exception
Log an exception in lograge-ish format
def log_exception(self, exception: Exception) -> None: """ Log an exception in lograge-ish format This can be called e.g. from flask's errorhandlers :param exception: Exception """ message, extra = self._parse(request, exception) self.logger.error(message, extra=extra)
[ "def", "log_exception", "(", "self", ",", "exception", ":", "Exception", ")", "->", "None", ":", "message", ",", "extra", "=", "self", ".", "_parse", "(", "request", ",", "exception", ")", "self", ".", "logger", ".", "error", "(", "message", ",", "extra", "=", "extra", ")" ]
[ 110, 4 ]
[ 119, 47 ]
python
en
['en', 'error', 'th']
False
add_instrument
(instrument: Instrument)
Start instrumenting the current run loop with the given instrument. Args: instrument (trio.abc.Instrument): The instrument to activate. If ``instrument`` is already active, does nothing.
Start instrumenting the current run loop with the given instrument.
def add_instrument(instrument: Instrument) ->None: """Start instrumenting the current run loop with the given instrument. Args: instrument (trio.abc.Instrument): The instrument to activate. If ``instrument`` is already active, does nothing. """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.instruments.add_instrument(instrument) except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "add_instrument", "(", "instrument", ":", "Instrument", ")", "->", "None", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "instruments", ".", "add_instrument", "(", "instrument", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 10, 0 ]
[ 23, 63 ]
python
en
['en', 'en', 'en']
True
remove_instrument
(instrument: Instrument)
Stop instrumenting the current run loop with the given instrument. Args: instrument (trio.abc.Instrument): The instrument to de-activate. Raises: KeyError: if the instrument is not currently active. This could occur either because you never added it, or because you added it and then it raised an unhandled exception and was automatically deactivated.
Stop instrumenting the current run loop with the given instrument.
def remove_instrument(instrument: Instrument) ->None: """Stop instrumenting the current run loop with the given instrument. Args: instrument (trio.abc.Instrument): The instrument to de-activate. Raises: KeyError: if the instrument is not currently active. This could occur either because you never added it, or because you added it and then it raised an unhandled exception and was automatically deactivated. """ locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True try: return GLOBAL_RUN_CONTEXT.runner.instruments.remove_instrument(instrument) except AttributeError: raise RuntimeError("must be called from async context")
[ "def", "remove_instrument", "(", "instrument", ":", "Instrument", ")", "->", "None", ":", "locals", "(", ")", "[", "LOCALS_KEY_KI_PROTECTION_ENABLED", "]", "=", "True", "try", ":", "return", "GLOBAL_RUN_CONTEXT", ".", "runner", ".", "instruments", ".", "remove_instrument", "(", "instrument", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"must be called from async context\"", ")" ]
[ 26, 0 ]
[ 43, 63 ]
python
en
['en', 'en', 'en']
True
Orion.fit
(self, data: pd.DataFrame)
Fit the pipeline to the given data. Args: data (DataFrame): Input data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value.
Fit the pipeline to the given data.
def fit(self, data: pd.DataFrame): """Fit the pipeline to the given data. Args: data (DataFrame): Input data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. """ self._mlpipeline = self._get_mlpipeline() self._mlpipeline.fit(data) self._fitted = True
[ "def", "fit", "(", "self", ",", "data", ":", "pd", ".", "DataFrame", ")", ":", "self", ".", "_mlpipeline", "=", "self", ".", "_get_mlpipeline", "(", ")", "self", ".", "_mlpipeline", ".", "fit", "(", "data", ")", "self", ".", "_fitted", "=", "True" ]
[ 74, 4 ]
[ 84, 27 ]
python
en
['en', 'en', 'en']
True
Orion.detect
(self, data: pd.DataFrame, visualization: bool = False)
Detect anomalies in the given data.. If ``visualization=True``, also return the visualization outputs from the MLPipeline object. Args: data (DataFrame): Input data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. visualization (bool): If ``True``, also capture the ``visualization`` named output from the ``MLPipeline`` and return it as a second output. Returns: DataFrame or tuple: If visualization is ``False``, it returns the events DataFrame. If visualization is ``True``, it returns a tuple containing the events DataFrame followed by the visualization outputs dict.
Detect anomalies in the given data..
def detect(self, data: pd.DataFrame, visualization: bool = False) -> pd.DataFrame: """Detect anomalies in the given data.. If ``visualization=True``, also return the visualization outputs from the MLPipeline object. Args: data (DataFrame): Input data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. visualization (bool): If ``True``, also capture the ``visualization`` named output from the ``MLPipeline`` and return it as a second output. Returns: DataFrame or tuple: If visualization is ``False``, it returns the events DataFrame. If visualization is ``True``, it returns a tuple containing the events DataFrame followed by the visualization outputs dict. """ return self._detect(self._mlpipeline.predict, data, visualization)
[ "def", "detect", "(", "self", ",", "data", ":", "pd", ".", "DataFrame", ",", "visualization", ":", "bool", "=", "False", ")", "->", "pd", ".", "DataFrame", ":", "return", "self", ".", "_detect", "(", "self", ".", "_mlpipeline", ".", "predict", ",", "data", ",", "visualization", ")" ]
[ 125, 4 ]
[ 147, 74 ]
python
en
['en', 'en', 'en']
True
Orion.fit_detect
(self, data: pd.DataFrame, visualization: bool = False)
Fit the pipeline to the data and then detect anomalies. This method is functionally equivalent to calling ``fit(data)`` and later on ``detect(data)`` but with the difference that here the ``MLPipeline`` is called only once, using its ``fit`` method, and the output is directly captured without having to execute the whole pipeline again during the ``predict`` phase. If ``visualization=True``, also return the visualization outputs from the MLPipeline object. Args: data (DataFrame): Input data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. visualization (bool): If ``True``, also capture the ``visualization`` named output from the ``MLPipeline`` and return it as a second output. Returns: DataFrame or tuple: If visualization is ``False``, it returns the events DataFrame. If visualization is ``True``, it returns a tuple containing the events DataFrame followed by the visualization outputs dict.
Fit the pipeline to the data and then detect anomalies.
def fit_detect(self, data: pd.DataFrame, visualization: bool = False) -> pd.DataFrame: """Fit the pipeline to the data and then detect anomalies. This method is functionally equivalent to calling ``fit(data)`` and later on ``detect(data)`` but with the difference that here the ``MLPipeline`` is called only once, using its ``fit`` method, and the output is directly captured without having to execute the whole pipeline again during the ``predict`` phase. If ``visualization=True``, also return the visualization outputs from the MLPipeline object. Args: data (DataFrame): Input data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. visualization (bool): If ``True``, also capture the ``visualization`` named output from the ``MLPipeline`` and return it as a second output. Returns: DataFrame or tuple: If visualization is ``False``, it returns the events DataFrame. If visualization is ``True``, it returns a tuple containing the events DataFrame followed by the visualization outputs dict. """ self._mlpipeline = self._get_mlpipeline() result = self._detect(self._mlpipeline.fit, data, visualization) self._fitted = True return result
[ "def", "fit_detect", "(", "self", ",", "data", ":", "pd", ".", "DataFrame", ",", "visualization", ":", "bool", "=", "False", ")", "->", "pd", ".", "DataFrame", ":", "self", ".", "_mlpipeline", "=", "self", ".", "_get_mlpipeline", "(", ")", "result", "=", "self", ".", "_detect", "(", "self", ".", "_mlpipeline", ".", "fit", ",", "data", ",", "visualization", ")", "self", ".", "_fitted", "=", "True", "return", "result" ]
[ 149, 4 ]
[ 181, 21 ]
python
en
['en', 'en', 'en']
True
Orion.save
(self, path: str)
Save this object using pickle. Args: path (str): Path to the file where the serialization of this object will be stored.
Save this object using pickle.
def save(self, path: str): """Save this object using pickle. Args: path (str): Path to the file where the serialization of this object will be stored. """ os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'wb') as pickle_file: pickle.dump(self, pickle_file)
[ "def", "save", "(", "self", ",", "path", ":", "str", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "exist_ok", "=", "True", ")", "with", "open", "(", "path", ",", "'wb'", ")", "as", "pickle_file", ":", "pickle", ".", "dump", "(", "self", ",", "pickle_file", ")" ]
[ 183, 4 ]
[ 193, 42 ]
python
en
['en', 'en', 'en']
True
Orion.load
(cls, path: str)
Load an Orion instance from a pickle file. Args: path (str): Path to the file where the instance has been previously serialized. Returns: Orion Raises: ValueError: If the serialized object is not an Orion instance.
Load an Orion instance from a pickle file.
def load(cls, path: str): """Load an Orion instance from a pickle file. Args: path (str): Path to the file where the instance has been previously serialized. Returns: Orion Raises: ValueError: If the serialized object is not an Orion instance. """ with open(path, 'rb') as pickle_file: orion = pickle.load(pickle_file) if not isinstance(orion, cls): raise ValueError('Serialized object is not an Orion instance') return orion
[ "def", "load", "(", "cls", ",", "path", ":", "str", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "pickle_file", ":", "orion", "=", "pickle", ".", "load", "(", "pickle_file", ")", "if", "not", "isinstance", "(", "orion", ",", "cls", ")", ":", "raise", "ValueError", "(", "'Serialized object is not an Orion instance'", ")", "return", "orion" ]
[ 196, 4 ]
[ 216, 24 ]
python
en
['en', 'en', 'en']
True
Orion.evaluate
(self, data: pd.DataFrame, ground_truth: pd.DataFrame, fit: bool = False, train_data: pd.DataFrame = None, metrics: List[str] = METRICS)
Evaluate the performance against ground truth anomalies. Args: data (DataFrame): Input data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. ground_truth (DataFrame): Ground truth anomalies passed as a ``pandas.DataFrame`` containing two columns: start and stop. fit (bool): Whether to fit the pipeline before evaluating it. Defaults to ``False``. train_data (DataFrame): Training data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. If not given, the pipeline is fitted on ``data``. metrics (list): List of metrics to used passed as a list of strings. If not given, it defaults to all the Orion metrics. Returns: Series: ``pandas.Series`` containing one element for each metric applied, with the metric name as index.
Evaluate the performance against ground truth anomalies.
def evaluate(self, data: pd.DataFrame, ground_truth: pd.DataFrame, fit: bool = False, train_data: pd.DataFrame = None, metrics: List[str] = METRICS) -> pd.Series: """Evaluate the performance against ground truth anomalies. Args: data (DataFrame): Input data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. ground_truth (DataFrame): Ground truth anomalies passed as a ``pandas.DataFrame`` containing two columns: start and stop. fit (bool): Whether to fit the pipeline before evaluating it. Defaults to ``False``. train_data (DataFrame): Training data, passed as a ``pandas.DataFrame`` containing exactly two columns: timestamp and value. If not given, the pipeline is fitted on ``data``. metrics (list): List of metrics to used passed as a list of strings. If not given, it defaults to all the Orion metrics. Returns: Series: ``pandas.Series`` containing one element for each metric applied, with the metric name as index. """ if not fit: method = self._mlpipeline.predict else: mlpipeline = self._get_mlpipeline() if train_data is not None: # Fit first and then predict mlpipeline.fit(train_data) method = mlpipeline.predict else: # Fit and predict at once method = mlpipeline.fit events = self._detect(method, data) scores = { metric: METRICS[metric](ground_truth, events, data=data) for metric in metrics } return pd.Series(scores)
[ "def", "evaluate", "(", "self", ",", "data", ":", "pd", ".", "DataFrame", ",", "ground_truth", ":", "pd", ".", "DataFrame", ",", "fit", ":", "bool", "=", "False", ",", "train_data", ":", "pd", ".", "DataFrame", "=", "None", ",", "metrics", ":", "List", "[", "str", "]", "=", "METRICS", ")", "->", "pd", ".", "Series", ":", "if", "not", "fit", ":", "method", "=", "self", ".", "_mlpipeline", ".", "predict", "else", ":", "mlpipeline", "=", "self", ".", "_get_mlpipeline", "(", ")", "if", "train_data", "is", "not", "None", ":", "# Fit first and then predict", "mlpipeline", ".", "fit", "(", "train_data", ")", "method", "=", "mlpipeline", ".", "predict", "else", ":", "# Fit and predict at once", "method", "=", "mlpipeline", ".", "fit", "events", "=", "self", ".", "_detect", "(", "method", ",", "data", ")", "scores", "=", "{", "metric", ":", "METRICS", "[", "metric", "]", "(", "ground_truth", ",", "events", ",", "data", "=", "data", ")", "for", "metric", "in", "metrics", "}", "return", "pd", ".", "Series", "(", "scores", ")" ]
[ 218, 4 ]
[ 264, 32 ]
python
en
['en', 'en', 'en']
True
from_pandas_contextual
(df)
Convert contextual ``pandas.DataFrame`` to list of tuples. Args: df (DataFrame): anomalies, passed as ``pandas.DataFrame`` containing two columns: start and stop. Returns: list: tuple (start, end) timestamp. Raises: KeyError: If the received ``pandas.DataFrame`` does not contain the required columns.
Convert contextual ``pandas.DataFrame`` to list of tuples.
def from_pandas_contextual(df): """ Convert contextual ``pandas.DataFrame`` to list of tuples. Args: df (DataFrame): anomalies, passed as ``pandas.DataFrame`` containing two columns: start and stop. Returns: list: tuple (start, end) timestamp. Raises: KeyError: If the received ``pandas.DataFrame`` does not contain the required columns. """ require = ['start', 'end'] columns = df.columns.tolist() if all(x in columns for x in require): if 'severity' in columns: return list(df[require + ['severity']].itertuples(index=False)) return list(df[require].itertuples(index=False)) raise KeyError('{} not found in columns: {}.'.format(require, columns))
[ "def", "from_pandas_contextual", "(", "df", ")", ":", "require", "=", "[", "'start'", ",", "'end'", "]", "columns", "=", "df", ".", "columns", ".", "tolist", "(", ")", "if", "all", "(", "x", "in", "columns", "for", "x", "in", "require", ")", ":", "if", "'severity'", "in", "columns", ":", "return", "list", "(", "df", "[", "require", "+", "[", "'severity'", "]", "]", ".", "itertuples", "(", "index", "=", "False", ")", ")", "return", "list", "(", "df", "[", "require", "]", ".", "itertuples", "(", "index", "=", "False", ")", ")", "raise", "KeyError", "(", "'{} not found in columns: {}.'", ".", "format", "(", "require", ",", "columns", ")", ")" ]
[ 6, 0 ]
[ 30, 75 ]
python
en
['en', 'lb', 'en']
True
from_list_points_timestamps
(timestamps, gap=1)
Convert list of timestamps to list of tuples. Convert a list of anomalies identified by timestamps, to a list of tuples marking the start and end interval of anomalies; make it contextually defined. Args: timestamps (list): contains timestamp of anomalies. gap (int): allowed gap between anomalies. Returns: list: tuple (start, end) timestamp.
Convert list of timestamps to list of tuples.
def from_list_points_timestamps(timestamps, gap=1): """ Convert list of timestamps to list of tuples. Convert a list of anomalies identified by timestamps, to a list of tuples marking the start and end interval of anomalies; make it contextually defined. Args: timestamps (list): contains timestamp of anomalies. gap (int): allowed gap between anomalies. Returns: list: tuple (start, end) timestamp. """ timestamps = sorted(timestamps) start_ts = 0 max_ts = len(timestamps) - 1 anomalies = list() break_point = start_ts while break_point < max_ts: if timestamps[break_point + 1] - timestamps[break_point] <= gap: break_point += 1 continue anomalies.append((timestamps[start_ts], timestamps[break_point])) break_point += 1 start_ts = break_point anomalies.append((timestamps[start_ts], timestamps[break_point])) return anomalies
[ "def", "from_list_points_timestamps", "(", "timestamps", ",", "gap", "=", "1", ")", ":", "timestamps", "=", "sorted", "(", "timestamps", ")", "start_ts", "=", "0", "max_ts", "=", "len", "(", "timestamps", ")", "-", "1", "anomalies", "=", "list", "(", ")", "break_point", "=", "start_ts", "while", "break_point", "<", "max_ts", ":", "if", "timestamps", "[", "break_point", "+", "1", "]", "-", "timestamps", "[", "break_point", "]", "<=", "gap", ":", "break_point", "+=", "1", "continue", "anomalies", ".", "append", "(", "(", "timestamps", "[", "start_ts", "]", ",", "timestamps", "[", "break_point", "]", ")", ")", "break_point", "+=", "1", "start_ts", "=", "break_point", "anomalies", ".", "append", "(", "(", "timestamps", "[", "start_ts", "]", ",", "timestamps", "[", "break_point", "]", ")", ")", "return", "anomalies" ]
[ 33, 0 ]
[ 65, 20 ]
python
en
['en', 'en', 'en']
True
from_pandas_points
(df)
Convert point ``pandas.DataFrame`` to list of tuples. Convert a ``pandas.DataFrame`` of anomalies identified by one column (timestamp) to a list of tuples marking the start and end interval of anomalies; make it contextually defined. Args: df (DataFrame): anomalies, passed as ``pandas.DataFrame`` containing one column: timestamp. Returns: list: tuple (start, end) timestamp. Raises: KeyError: If the received ``pandas.DataFrame`` does not contain column `timestamp`.
Convert point ``pandas.DataFrame`` to list of tuples.
def from_pandas_points(df): """ Convert point ``pandas.DataFrame`` to list of tuples. Convert a ``pandas.DataFrame`` of anomalies identified by one column (timestamp) to a list of tuples marking the start and end interval of anomalies; make it contextually defined. Args: df (DataFrame): anomalies, passed as ``pandas.DataFrame`` containing one column: timestamp. Returns: list: tuple (start, end) timestamp. Raises: KeyError: If the received ``pandas.DataFrame`` does not contain column `timestamp`. """ time_column = 'timestamp' columns = df.columns.tolist() if time_column not in columns: raise KeyError('{} not found in columns: {}.'.format(time_column, columns)) timestamps = list(df['timestamp']) return from_list_points_timestamps(timestamps)
[ "def", "from_pandas_points", "(", "df", ")", ":", "time_column", "=", "'timestamp'", "columns", "=", "df", ".", "columns", ".", "tolist", "(", ")", "if", "time_column", "not", "in", "columns", ":", "raise", "KeyError", "(", "'{} not found in columns: {}.'", ".", "format", "(", "time_column", ",", "columns", ")", ")", "timestamps", "=", "list", "(", "df", "[", "'timestamp'", "]", ")", "return", "from_list_points_timestamps", "(", "timestamps", ")" ]
[ 68, 0 ]
[ 96, 50 ]
python
en
['en', 'lb', 'en']
True