text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
If it's not an assistant having only tasks that are without
<END_TASK>
<USER_TASK:>
Description:
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
|
if self.assistant:
return False
return all(not task.resources for task in self.get_tasks(state, PENDING))
|
<SYSTEM_TASK:>
Update priority of the given task.
<END_TASK>
<USER_TASK:>
Description:
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
|
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
|
<SYSTEM_TASK:>
Returns the dependency graph rooted at task_id
<END_TASK>
<USER_TASK:>
Description:
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True):
""" Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
"""
|
if seen is None:
seen = set()
elif root_task_id in seen:
return {}
if dep_func is None:
def dep_func(t):
return t.deps
seen.add(root_task_id)
serialized = {}
queue = collections.deque([root_task_id])
while queue:
task_id = queue.popleft()
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.debug('Missing task for id [%s]', task_id)
# NOTE : If a dependency is missing from self._state there is no way to deduce the
# task family and parameters.
family_match = TASK_FAMILY_RE.match(task_id)
family = family_match.group(1) if family_match else UNKNOWN
params = {'task_id': task_id}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'display_name': task_id,
'priority': 0,
}
else:
deps = dep_func(task)
if not include_done:
deps = list(self._filter_done(deps))
serialized[task_id] = self._serialize_task(task_id, deps=deps)
for dep in sorted(deps):
if dep not in seen:
seen.add(dep)
queue.append(dep)
if task_id != root_task_id:
del serialized[task_id]['display_name']
if len(serialized) >= self._config.max_graph_nodes:
break
return serialized
|
<SYSTEM_TASK:>
Query for a subset of tasks by status.
<END_TASK>
<USER_TASK:>
Description:
def task_list(self, status='', upstream_status='', limit=True, search=None, max_shown_tasks=None,
**kwargs):
"""
Query for a subset of tasks by status.
"""
|
if not search:
count_limit = max_shown_tasks or self._config.max_shown_tasks
pre_count = self._state.get_active_task_count_for_status(status)
if limit and pre_count > count_limit:
return {'num_tasks': -1 if upstream_status else pre_count}
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
def filter_func(_):
return True
else:
terms = search.split()
def filter_func(t):
return all(term in t.pretty_id for term in terms)
tasks = self._state.get_active_tasks_by_status(status) if status else self._state.get_active_tasks()
for task in filter(filter_func, tasks):
if task.status != PENDING or not upstream_status or upstream_status == self._upstream_status(task.id, upstream_status_table):
serialized = self._serialize_task(task.id, include_deps=False)
result[task.id] = serialized
if limit and len(result) > (max_shown_tasks or self._config.max_shown_tasks):
return {'num_tasks': len(result)}
return result
|
<SYSTEM_TASK:>
Query for a subset of tasks by task_id.
<END_TASK>
<USER_TASK:>
Description:
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
|
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, include_deps=False)
result[task.status][task.id] = serialized
return result
|
<SYSTEM_TASK:>
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
<END_TASK>
<USER_TASK:>
Description:
def exists(self):
"""
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:attr:`fs`.
"""
|
path = self.path
if '*' in path or '?' in path or '[' in path or '{' in path:
logger.warning("Using wildcards in path %s might lead to processing of an incomplete dataset; "
"override exists() to suppress the warning.", path)
return self.fs.exists(path)
|
<SYSTEM_TASK:>
Generate an id for the indicator document.
<END_TASK>
<USER_TASK:>
Description:
def marker_index_document_id(self):
"""
Generate an id for the indicator document.
"""
|
params = '%s:%s:%s' % (self.index, self.doc_type, self.update_id)
return hashlib.sha1(params.encode('utf-8')).hexdigest()
|
<SYSTEM_TASK:>
Test, if this task has been run.
<END_TASK>
<USER_TASK:>
Description:
def exists(self):
"""
Test, if this task has been run.
"""
|
try:
self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id())
return True
except elasticsearch.NotFoundError:
logger.debug('Marker document not found.')
except elasticsearch.ElasticsearchException as err:
logger.warn(err)
return False
|
<SYSTEM_TASK:>
Create the index that will keep track of the tasks if necessary.
<END_TASK>
<USER_TASK:>
Description:
def create_marker_index(self):
"""
Create the index that will keep track of the tasks if necessary.
"""
|
if not self.es.indices.exists(index=self.marker_index):
self.es.indices.create(index=self.marker_index)
|
<SYSTEM_TASK:>
Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`,
<END_TASK>
<USER_TASK:>
Description:
def _docs(self):
"""
Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`,
add those attributes here, if necessary.
"""
|
iterdocs = iter(self.docs())
first = next(iterdocs)
needs_parsing = False
if isinstance(first, six.string_types):
needs_parsing = True
elif isinstance(first, dict):
pass
else:
raise RuntimeError('Document must be either JSON strings or dict.')
for doc in itertools.chain([first], iterdocs):
if needs_parsing:
doc = json.loads(doc)
if '_index' not in doc:
doc['_index'] = self.index
if '_type' not in doc:
doc['_type'] = self.doc_type
yield doc
|
<SYSTEM_TASK:>
Override to provide code for creating the target index.
<END_TASK>
<USER_TASK:>
Description:
def create_index(self):
"""
Override to provide code for creating the target index.
By default it will be created without any special settings or mappings.
"""
|
es = self._init_connection()
if not es.indices.exists(index=self.index):
es.indices.create(index=self.index, body=self.settings)
|
<SYSTEM_TASK:>
Delete the index, if it exists.
<END_TASK>
<USER_TASK:>
Description:
def delete_index(self):
"""
Delete the index, if it exists.
"""
|
es = self._init_connection()
if es.indices.exists(index=self.index):
es.indices.delete(index=self.index)
|
<SYSTEM_TASK:>
Returns a ElasticsearchTarget representing the inserted dataset.
<END_TASK>
<USER_TASK:>
Description:
def output(self):
"""
Returns a ElasticsearchTarget representing the inserted dataset.
Normally you don't override this.
"""
|
return ElasticsearchTarget(
host=self.host,
port=self.port,
http_auth=self.http_auth,
index=self.index,
doc_type=self.doc_type,
update_id=self.update_id(),
marker_index_hist_size=self.marker_index_hist_size,
timeout=self.timeout,
extra_elasticsearch_args=self.extra_elasticsearch_args
)
|
<SYSTEM_TASK:>
Poll job status while active
<END_TASK>
<USER_TASK:>
Description:
def __track_job(self):
"""Poll job status while active"""
|
while not self.__verify_job_has_started():
time.sleep(self.__POLL_TIME)
self.__logger.debug("Waiting for Kubernetes job " + self.uu_name + " to start")
self.__print_kubectl_hints()
status = self.__get_job_status()
while status == "RUNNING":
self.__logger.debug("Kubernetes job " + self.uu_name + " is running")
time.sleep(self.__POLL_TIME)
status = self.__get_job_status()
assert status != "FAILED", "Kubernetes job " + self.uu_name + " failed"
# status == "SUCCEEDED"
self.__logger.info("Kubernetes job " + self.uu_name + " succeeded")
self.signal_complete()
|
<SYSTEM_TASK:>
Asserts that the job has successfully started
<END_TASK>
<USER_TASK:>
Description:
def __verify_job_has_started(self):
"""Asserts that the job has successfully started"""
|
# Verify that the job started
self.__get_job()
# Verify that the pod started
pods = self.__get_pods()
assert len(pods) > 0, "No pod scheduled by " + self.uu_name
for pod in pods:
status = pod.obj['status']
for cont_stats in status.get('containerStatuses', []):
if 'terminated' in cont_stats['state']:
t = cont_stats['state']['terminated']
err_msg = "Pod %s %s (exit code %d). Logs: `kubectl logs pod/%s`" % (
pod.name, t['reason'], t['exitCode'], pod.name)
assert t['exitCode'] == 0, err_msg
if 'waiting' in cont_stats['state']:
wr = cont_stats['state']['waiting']['reason']
assert wr == 'ContainerCreating', "Pod %s %s. Logs: `kubectl logs pod/%s`" % (
pod.name, wr, pod.name)
for cond in status.get('conditions', []):
if 'message' in cond:
if cond['reason'] == 'ContainersNotReady':
return False
assert cond['status'] != 'False', \
"[ERROR] %s - %s" % (cond['reason'], cond['message'])
return True
|
<SYSTEM_TASK:>
Return an engine instance, creating it if it doesn't exist.
<END_TASK>
<USER_TASK:>
Description:
def engine(self):
"""
Return an engine instance, creating it if it doesn't exist.
Recreate the engine connection if it wasn't originally created
by the current process.
"""
|
pid = os.getpid()
conn = SQLAlchemyTarget._engine_dict.get(self.connection_string)
if not conn or conn.pid != pid:
# create and reset connection
engine = sqlalchemy.create_engine(
self.connection_string,
connect_args=self.connect_args,
echo=self.echo
)
SQLAlchemyTarget._engine_dict[self.connection_string] = self.Connection(engine, pid)
return SQLAlchemyTarget._engine_dict[self.connection_string].engine
|
<SYSTEM_TASK:>
Does not change self.path.
<END_TASK>
<USER_TASK:>
Description:
def rename(self, path, raise_if_exists=False):
"""
Does not change self.path.
Unlike ``move_dir()``, ``rename()`` might cause nested directories.
See spotify/luigi#522
"""
|
if isinstance(path, HdfsTarget):
path = path.path
if raise_if_exists and self.fs.exists(path):
raise RuntimeError('Destination exists: %s' % path)
self.fs.rename(self.path, path)
|
<SYSTEM_TASK:>
Currently only works with hadoopcli
<END_TASK>
<USER_TASK:>
Description:
def is_writable(self):
"""
Currently only works with hadoopcli
"""
|
if "/" in self.path:
# example path: /log/ap/2013-01-17/00
parts = self.path.split("/")
# start with the full path and then up the tree until we can check
length = len(parts)
for part in range(length):
path = "/".join(parts[0:length - part]) + "/"
if self.fs.exists(path):
# if the path exists and we can write there, great!
if self._is_writable(path):
return True
# if it exists and we can't =( sad panda
else:
return False
# We went through all parts of the path and we still couldn't find
# one that exists.
return False
|
<SYSTEM_TASK:>
Takes a worker and sorts out tasks based on their status.
<END_TASK>
<USER_TASK:>
Description:
def _partition_tasks(worker):
"""
Takes a worker and sorts out tasks based on their status.
Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker
"""
|
task_history = worker._add_task_history
pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'}
set_tasks = {}
set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks}
set_tasks["already_done"] = {task for (task, status, ext) in task_history
if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]}
set_tasks["ever_failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'}
set_tasks["failed"] = set_tasks["ever_failed"] - set_tasks["completed"]
set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'}
set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history
if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and not ext}
set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history
if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and ext}
set_tasks["run_by_other_worker"] = set()
set_tasks["upstream_failure"] = set()
set_tasks["upstream_missing_dependency"] = set()
set_tasks["upstream_run_by_other_worker"] = set()
set_tasks["upstream_scheduling_error"] = set()
set_tasks["not_run"] = set()
return set_tasks
|
<SYSTEM_TASK:>
This dfs checks why tasks are still pending.
<END_TASK>
<USER_TASK:>
Description:
def _depth_first_search(set_tasks, current_task, visited):
"""
This dfs checks why tasks are still pending.
"""
|
visited.add(current_task)
if current_task in set_tasks["still_pending_not_ext"]:
upstream_failure = False
upstream_missing_dependency = False
upstream_run_by_other_worker = False
upstream_scheduling_error = False
for task in current_task._requires():
if task not in visited:
_depth_first_search(set_tasks, task, visited)
if task in set_tasks["ever_failed"] or task in set_tasks["upstream_failure"]:
set_tasks["upstream_failure"].add(current_task)
upstream_failure = True
if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]:
set_tasks["upstream_missing_dependency"].add(current_task)
upstream_missing_dependency = True
if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]:
set_tasks["upstream_run_by_other_worker"].add(current_task)
upstream_run_by_other_worker = True
if task in set_tasks["scheduling_error"]:
set_tasks["upstream_scheduling_error"].add(current_task)
upstream_scheduling_error = True
if not upstream_failure and not upstream_missing_dependency and \
not upstream_run_by_other_worker and not upstream_scheduling_error and \
current_task not in set_tasks["run_by_other_worker"]:
set_tasks["not_run"].add(current_task)
|
<SYSTEM_TASK:>
Checks if there is a continuous range
<END_TASK>
<USER_TASK:>
Description:
def _ranging_attributes(attributes, param_class):
"""
Checks if there is a continuous range
"""
|
next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes}
in_first = attributes.difference(next_attributes)
in_second = next_attributes.difference(attributes)
if len(in_first) == 1 and len(in_second) == 1:
for x in attributes:
if {param_class.next_in_enumeration(x)} == in_second:
return next(iter(in_first)), x
return None, None
|
<SYSTEM_TASK:>
Get the human readable comments and quantities for the task types.
<END_TASK>
<USER_TASK:>
Description:
def _get_comments(group_tasks):
"""
Get the human readable comments and quantities for the task types.
"""
|
comments = {}
for status, human in _COMMENTS:
num_tasks = _get_number_of_tasks_for(status, group_tasks)
if num_tasks:
space = " " if status in _PENDING_SUB_STATUSES else ""
comments[status] = '{space}* {num_tasks} {human}:\n'.format(
space=space,
num_tasks=num_tasks,
human=human)
return comments
|
<SYSTEM_TASK:>
This returns a set of the tasks that are being run by other worker
<END_TASK>
<USER_TASK:>
Description:
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
|
task_sets = _get_external_workers(worker).values()
return functools.reduce(lambda a, b: a | b, task_sets, set())
|
<SYSTEM_TASK:>
This returns a dict with a set of tasks for all of the other workers
<END_TASK>
<USER_TASK:>
Description:
def _get_external_workers(worker):
"""
This returns a dict with a set of tasks for all of the other workers
"""
|
worker_that_blocked_task = collections.defaultdict(set)
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_worker_id == worker._id or not other_task:
continue
worker_that_blocked_task[other_worker_id].add(other_task)
return worker_that_blocked_task
|
<SYSTEM_TASK:>
Takes a dictionary with sets of tasks grouped by their status and
<END_TASK>
<USER_TASK:>
Description:
def _group_tasks_by_name_and_status(task_dict):
"""
Takes a dictionary with sets of tasks grouped by their status and
returns a dictionary with dictionaries with an array of tasks grouped by
their status and task name
"""
|
group_status = {}
for task in task_dict:
if task.task_family not in group_status:
group_status[task.task_family] = []
group_status[task.task_family].append(task)
return group_status
|
<SYSTEM_TASK:>
Returns a canonical string used to identify a particular task
<END_TASK>
<USER_TASK:>
Description:
def task_id_str(task_family, params):
"""
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
"""
|
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)
return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH])
|
<SYSTEM_TASK:>
If Luigi has forked, we have a different PID, and need to reconnect.
<END_TASK>
<USER_TASK:>
Description:
def get_bite(self):
"""
If Luigi has forked, we have a different PID, and need to reconnect.
"""
|
config = hdfs_config.hdfs()
if self.pid != os.getpid() or not self._bite:
client_kwargs = dict(filter(
lambda k_v: k_v[1] is not None and k_v[1] != '', six.iteritems({
'hadoop_version': config.client_version,
'effective_user': config.effective_user,
})
))
if config.snakebite_autoconfig:
"""
This is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
This client tries to read ``${HADOOP_PATH}/conf/hdfs-site.xml`` to get the address of the namenode.
The behaviour is the same as Client.
"""
from snakebite.client import AutoConfigClient
self._bite = AutoConfigClient(**client_kwargs)
else:
from snakebite.client import Client
self._bite = Client(config.namenode_host, config.namenode_port, **client_kwargs)
return self._bite
|
<SYSTEM_TASK:>
Use snakebite.rename, if available.
<END_TASK>
<USER_TASK:>
Description:
def move(self, path, dest):
"""
Use snakebite.rename, if available.
:param path: source file(s)
:type path: either a string or sequence of strings
:param dest: destination file (single input) or directory (multiple)
:type dest: string
:return: list of renamed items
"""
|
parts = dest.rstrip('/').split('/')
if len(parts) > 1:
dir_path = '/'.join(parts[0:-1])
if not self.exists(dir_path):
self.mkdir(dir_path, parents=True)
return list(self.get_bite().rename(self.list_path(path), dest))
|
<SYSTEM_TASK:>
Use snakebite.rename_dont_move, if available.
<END_TASK>
<USER_TASK:>
Description:
def rename_dont_move(self, path, dest):
"""
Use snakebite.rename_dont_move, if available.
:param path: source path (single input)
:type path: string
:param dest: destination path
:type dest: string
:return: True if succeeded
:raises: snakebite.errors.FileAlreadyExistsException
"""
|
from snakebite.errors import FileAlreadyExistsException
try:
self.get_bite().rename2(path, dest, overwriteDest=False)
except FileAlreadyExistsException:
# Unfortunately python2 don't allow exception chaining.
raise luigi.target.FileAlreadyExists()
|
<SYSTEM_TASK:>
Use snakebite.delete, if available.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, path, recursive=True, skip_trash=False):
"""
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \\*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
"""
|
return list(self.get_bite().delete(self.list_path(path), recurse=recursive))
|
<SYSTEM_TASK:>
Use snakebite.chmod, if available.
<END_TASK>
<USER_TASK:>
Description:
def chmod(self, path, permissions, recursive=False):
"""
Use snakebite.chmod, if available.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param permissions: \\*nix style permission number
:type permissions: octal
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
|
if type(permissions) == str:
permissions = int(permissions, 8)
return list(self.get_bite().chmod(self.list_path(path),
permissions, recursive))
|
<SYSTEM_TASK:>
Use snakebite.count, if available.
<END_TASK>
<USER_TASK:>
Description:
def count(self, path):
"""
Use snakebite.count, if available.
:param path: directory to count the contents of
:type path: string
:return: dictionary with content_size, dir_count and file_count keys
"""
|
try:
res = self.get_bite().count(self.list_path(path)).next()
dir_count = res['directoryCount']
file_count = res['fileCount']
content_size = res['spaceConsumed']
except StopIteration:
dir_count = file_count = content_size = 0
return {'content_size': content_size, 'dir_count': dir_count,
'file_count': file_count}
|
<SYSTEM_TASK:>
Use snakebite.copyToLocal, if available.
<END_TASK>
<USER_TASK:>
Description:
def get(self, path, local_destination):
"""
Use snakebite.copyToLocal, if available.
:param path: HDFS file
:type path: string
:param local_destination: path on the system running Luigi
:type local_destination: string
"""
|
return list(self.get_bite().copyToLocal(self.list_path(path),
local_destination))
|
<SYSTEM_TASK:>
Use snakebite.mkdir, if available.
<END_TASK>
<USER_TASK:>
Description:
def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False):
"""
Use snakebite.mkdir, if available.
Snakebite's mkdir method allows control over full path creation, so by
default, tell it to build a full path to work like ``hadoop fs -mkdir``.
:param path: HDFS path to create
:type path: string
:param parents: create any missing parent directories
:type parents: boolean, default is True
:param mode: \\*nix style owner/group/other permissions
:type mode: octal, default 0755
"""
|
result = list(self.get_bite().mkdir(self.list_path(path),
create_parent=parents, mode=mode))
if raise_if_exists and "ile exists" in result[0].get('error', ''):
raise luigi.target.FileAlreadyExists("%s exists" % (path, ))
return result
|
<SYSTEM_TASK:>
Use snakebite.ls to get the list of items in a directory.
<END_TASK>
<USER_TASK:>
Description:
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False,
recursive=False):
"""
Use snakebite.ls to get the list of items in a directory.
:param path: the directory to list
:type path: string
:param ignore_directories: if True, do not yield directory entries
:type ignore_directories: boolean, default is False
:param ignore_files: if True, do not yield file entries
:type ignore_files: boolean, default is False
:param include_size: include the size in bytes of the current item
:type include_size: boolean, default is False (do not include)
:param include_type: include the type (d or f) of the current item
:type include_type: boolean, default is False (do not include)
:param include_time: include the last modification time of the current item
:type include_time: boolean, default is False (do not include)
:param recursive: list subdirectory contents
:type recursive: boolean, default is False (do not recurse)
:return: yield with a string, or if any of the include_* settings are
true, a tuple starting with the path, and include_* items in order
"""
|
bite = self.get_bite()
for entry in bite.ls(self.list_path(path), recurse=recursive):
if ignore_directories and entry['file_type'] == 'd':
continue
if ignore_files and entry['file_type'] == 'f':
continue
rval = [entry['path'], ]
if include_size:
rval.append(entry['length'])
if include_type:
rval.append(entry['file_type'])
if include_time:
rval.append(datetime.datetime.fromtimestamp(entry['modification_time'] / 1000))
if len(rval) > 1:
yield tuple(rval)
else:
yield rval[0]
|
<SYSTEM_TASK:>
Imports task dynamically given a module and a task name.
<END_TASK>
<USER_TASK:>
Description:
def load_task(module, task_name, params_str):
"""
Imports task dynamically given a module and a task name.
"""
|
if module is not None:
__import__(module)
task_cls = Register.get_task_cls(task_name)
return task_cls.from_str_params(params_str)
|
<SYSTEM_TASK:>
Return all of the registered classes.
<END_TASK>
<USER_TASK:>
Description:
def _get_reg(cls):
"""Return all of the registered classes.
:return: an ``dict`` of task_family -> class
"""
|
# We have to do this on-demand in case task names have changed later
reg = dict()
for task_cls in cls._reg:
if not task_cls._visible_in_registry:
continue
name = task_cls.get_task_family()
if name in reg and \
(reg[name] == Register.AMBIGUOUS_CLASS or # Check so issubclass doesn't crash
not issubclass(task_cls, reg[name])):
# Registering two different classes - this means we can't instantiate them by name
# The only exception is if one class is a subclass of the other. In that case, we
# instantiate the most-derived class (this fixes some issues with decorator wrappers).
reg[name] = Register.AMBIGUOUS_CLASS
else:
reg[name] = task_cls
return reg
|
<SYSTEM_TASK:>
Returns an unambiguous class or raises an exception.
<END_TASK>
<USER_TASK:>
Description:
def get_task_cls(cls, name):
"""
Returns an unambiguous class or raises an exception.
"""
|
task_cls = cls._get_reg().get(name)
if not task_cls:
raise TaskClassNotFoundException(cls._missing_task_msg(name))
if task_cls == cls.AMBIGUOUS_CLASS:
raise TaskClassAmbigiousException('Task %r is ambiguous' % name)
return task_cls
|
<SYSTEM_TASK:>
Simple unweighted Levenshtein distance
<END_TASK>
<USER_TASK:>
Description:
def _editdistance(a, b):
""" Simple unweighted Levenshtein distance """
|
r0 = range(0, len(b) + 1)
r1 = [0] * (len(b) + 1)
for i in range(0, len(a)):
r1[0] = i + 1
for j in range(0, len(b)):
c = 0 if a[i] is b[j] else 1
r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c)
r0 = r1[:]
return r1[len(b)]
|
<SYSTEM_TASK:>
Override to perform custom queries.
<END_TASK>
<USER_TASK:>
Description:
def init_copy(self, connection):
"""
Override to perform custom queries.
Any code here will be formed in the same transaction as the main copy, just prior to copying data.
Example use cases include truncating the table or removing all data older than X in the database
to keep a rolling window of data available in the table.
"""
|
# TODO: remove this after sufficient time so most people using the
# clear_table attribtue will have noticed it doesn't work anymore
if hasattr(self, "clear_table"):
raise Exception("The clear_table attribute has been removed. Override init_copy instead!")
if self.enable_metadata_columns:
self._add_metadata_columns(connection.cursor())
|
<SYSTEM_TASK:>
Grab all the values in task_instance that are found in task_cls.
<END_TASK>
<USER_TASK:>
Description:
def common_params(task_instance, task_cls):
"""
Grab all the values in task_instance that are found in task_cls.
"""
|
if not isinstance(task_cls, task.Register):
raise TypeError("task_cls must be an uninstantiated Task")
task_instance_param_names = dict(task_instance.get_params()).keys()
task_cls_params_dict = dict(task_cls.get_params())
task_cls_param_names = task_cls_params_dict.keys()
common_param_names = set(task_instance_param_names).intersection(set(task_cls_param_names))
common_param_vals = [(key, task_cls_params_dict[key]) for key in common_param_names]
common_kwargs = dict((key, task_instance.param_kwargs[key]) for key in common_param_names)
vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs))
return vals
|
<SYSTEM_TASK:>
Return a previous Task of the same family.
<END_TASK>
<USER_TASK:>
Description:
def previous(task):
"""
Return a previous Task of the same family.
By default checks if this task family only has one non-global parameter and if
it is a DateParameter, DateHourParameter or DateIntervalParameter in which case
it returns with the time decremented by 1 (hour, day or interval)
"""
|
params = task.get_params()
previous_params = {}
previous_date_params = {}
for param_name, param_obj in params:
param_value = getattr(task, param_name)
if isinstance(param_obj, parameter.DateParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(days=1)
elif isinstance(param_obj, parameter.DateSecondParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(seconds=1)
elif isinstance(param_obj, parameter.DateMinuteParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(minutes=1)
elif isinstance(param_obj, parameter.DateHourParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(hours=1)
elif isinstance(param_obj, parameter.DateIntervalParameter):
previous_date_params[param_name] = param_value.prev()
else:
previous_params[param_name] = param_value
previous_params.update(previous_date_params)
if len(previous_date_params) == 0:
raise NotImplementedError("No task parameter - can't determine previous task")
elif len(previous_date_params) > 1:
raise NotImplementedError("Too many date-related task parameters - can't determine previous task")
else:
return task.clone(**previous_params)
|
<SYSTEM_TASK:>
No explicit -p switch, this version of Hadoop always creates parent directories.
<END_TASK>
<USER_TASK:>
Description:
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
No explicit -p switch, this version of Hadoop always creates parent directories.
"""
|
try:
self.call_check(load_hadoop_cmd() + ['fs', '-mkdir', path])
except hdfs_error.HDFSCliError as ex:
if "File exists" in ex.stderr:
if raise_if_exists:
raise FileAlreadyExists(ex.stderr)
else:
raise
|
<SYSTEM_TASK:>
Runs the `hive` from the command line, passing in the given args, and
<END_TASK>
<USER_TASK:>
Description:
def run_hive(args, check_return_code=True):
"""
Runs the `hive` from the command line, passing in the given args, and
returning stdout.
With the apache release of Hive, so of the table existence checks
(which are done using DESCRIBE do not exit with a return code of 0
so we need an option to ignore the return code and just return stdout for parsing
"""
|
cmd = load_hive_cmd() + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if check_return_code and p.returncode != 0:
raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode),
stdout, stderr)
return stdout.decode('utf-8')
|
<SYSTEM_TASK:>
Runs the contents of the given script in hive and returns stdout.
<END_TASK>
<USER_TASK:>
Description:
def run_hive_script(script):
"""
Runs the contents of the given script in hive and returns stdout.
"""
|
if not os.path.isfile(script):
raise RuntimeError("Hive script: {0} does not exist.".format(script))
return run_hive(['-f', script])
|
<SYSTEM_TASK:>
Called before job is started.
<END_TASK>
<USER_TASK:>
Description:
def prepare_outputs(self, job):
"""
Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
"""
|
outputs = flatten(job.output())
for o in outputs:
if isinstance(o, FileSystemTarget):
parent_dir = os.path.dirname(o.path)
if parent_dir and not o.fs.exists(parent_dir):
logger.info("Creating parent directory %r", parent_dir)
try:
# there is a possible race condition
# which needs to be handled here
o.fs.mkdir(parent_dir)
except FileAlreadyExists:
pass
|
<SYSTEM_TASK:>
Meant to be used as a context manager.
<END_TASK>
<USER_TASK:>
Description:
def global_instance(cls, cmdline_args, allow_override=False):
"""
Meant to be used as a context manager.
"""
|
orig_value = cls._instance
assert (orig_value is None) or allow_override
new_value = None
try:
new_value = CmdlineParser(cmdline_args)
cls._instance = new_value
yield new_value
finally:
assert cls._instance is new_value
cls._instance = orig_value
|
<SYSTEM_TASK:>
Compute path given current file and relative path.
<END_TASK>
<USER_TASK:>
Description:
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
|
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
|
<SYSTEM_TASK:>
Returns an array of args to pass to the job.
<END_TASK>
<USER_TASK:>
Description:
def args(self):
"""
Returns an array of args to pass to the job.
"""
|
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
|
<SYSTEM_TASK:>
Adds an event to the event file.
<END_TASK>
<USER_TASK:>
Description:
def add_event(self, event):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
"""
|
if not isinstance(event, event_pb2.Event):
raise TypeError("Expected an event_pb2.Event proto, "
" but got %s" % type(event))
self._async_writer.write(event.SerializeToString())
|
<SYSTEM_TASK:>
Extract device name from a tf.Event proto carrying tensor value.
<END_TASK>
<USER_TASK:>
Description:
def _extract_device_name_from_event(event):
"""Extract device name from a tf.Event proto carrying tensor value."""
|
plugin_data_content = json.loads(
tf.compat.as_str(event.summary.value[0].metadata.plugin_data.content))
return plugin_data_content['device']
|
<SYSTEM_TASK:>
Add a GraphDef.
<END_TASK>
<USER_TASK:>
Description:
def add_graph(self, run_key, device_name, graph_def, debug=False):
"""Add a GraphDef.
Args:
run_key: A key for the run, containing information about the feeds,
fetches, and targets.
device_name: The name of the device that the `GraphDef` is for.
graph_def: An instance of the `GraphDef` proto.
debug: Whether `graph_def` consists of the debug ops.
"""
|
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
if not run_key in graph_dict:
graph_dict[run_key] = dict() # Mapping device_name to GraphDef.
graph_dict[run_key][tf.compat.as_str(device_name)] = (
debug_graphs_helper.DebugGraphWrapper(graph_def))
|
<SYSTEM_TASK:>
Get the runtime GraphDef protos associated with a run key.
<END_TASK>
<USER_TASK:>
Description:
def get_graphs(self, run_key, debug=False):
"""Get the runtime GraphDef protos associated with a run key.
Args:
run_key: A Session.run kay.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `dict` mapping device name to `GraphDef` protos.
"""
|
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
graph_wrappers = graph_dict.get(run_key, {})
graph_defs = dict()
for device_name, wrapper in graph_wrappers.items():
graph_defs[device_name] = wrapper.graph_def
return graph_defs
|
<SYSTEM_TASK:>
Get the runtime GraphDef proto associated with a run key and a device.
<END_TASK>
<USER_TASK:>
Description:
def get_graph(self, run_key, device_name, debug=False):
"""Get the runtime GraphDef proto associated with a run key and a device.
Args:
run_key: A Session.run kay.
device_name: Name of the device in question.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `GraphDef` proto.
"""
|
return self.get_graphs(run_key, debug=debug).get(device_name, None)
|
<SYSTEM_TASK:>
Obtain possibly base-expanded node name.
<END_TASK>
<USER_TASK:>
Description:
def get_maybe_base_expanded_node_name(self, node_name, run_key, device_name):
"""Obtain possibly base-expanded node name.
Base-expansion is the transformation of a node name which happens to be the
name scope of other nodes in the same graph. For example, if two nodes,
called 'a/b' and 'a/b/read' in a graph, the name of the first node will
be base-expanded to 'a/b/(b)'.
This method uses caching to avoid unnecessary recomputation.
Args:
node_name: Name of the node.
run_key: The run key to which the node belongs.
graph_def: GraphDef to which the node belongs.
Raises:
ValueError: If `run_key` and/or `device_name` do not exist in the record.
"""
|
device_name = tf.compat.as_str(device_name)
if run_key not in self._run_key_to_original_graphs:
raise ValueError('Unknown run_key: %s' % run_key)
if device_name not in self._run_key_to_original_graphs[run_key]:
raise ValueError(
'Unknown device for run key "%s": %s' % (run_key, device_name))
return self._run_key_to_original_graphs[
run_key][device_name].maybe_base_expanded_node_name(node_name)
|
<SYSTEM_TASK:>
Implementation of the core metadata-carrying Event proto callback.
<END_TASK>
<USER_TASK:>
Description:
def on_core_metadata_event(self, event):
"""Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details.
"""
|
core_metadata = json.loads(event.log_message.message)
input_names = ','.join(core_metadata['input_names'])
output_names = ','.join(core_metadata['output_names'])
target_nodes = ','.join(core_metadata['target_nodes'])
self._run_key = RunKey(input_names, output_names, target_nodes)
if not self._graph_defs:
self._graph_defs_arrive_first = False
else:
for device_name in self._graph_defs:
self._add_graph_def(device_name, self._graph_defs[device_name])
self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))
# Wait for acknowledgement from client. Blocks until an item is got.
logger.info('on_core_metadata_event() waiting for client ack (meta)...')
self._incoming_channel.get()
logger.info('on_core_metadata_event() client ack received (meta).')
|
<SYSTEM_TASK:>
Implementation of the GraphDef-carrying Event proto callback.
<END_TASK>
<USER_TASK:>
Description:
def on_graph_def(self, graph_def, device_name, wall_time):
"""Implementation of the GraphDef-carrying Event proto callback.
Args:
graph_def: A GraphDef proto. N.B.: The GraphDef is from
the core runtime of a debugged Session::Run() call, after graph
partition. Therefore it may differ from the GraphDef available to
the general TensorBoard. For example, the GraphDef in general
TensorBoard may get partitioned for multiple devices (CPUs and GPUs),
each of which will generate a GraphDef event proto sent to this
method.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
"""
|
# For now, we do nothing with the graph def. However, we must define this
# method to satisfy the handler's interface. Furthermore, we may use the
# graph in the future (for instance to provide a graph if there is no graph
# provided otherwise).
del wall_time
self._graph_defs[device_name] = graph_def
if not self._graph_defs_arrive_first:
self._add_graph_def(device_name, graph_def)
self._incoming_channel.get()
|
<SYSTEM_TASK:>
Get the traceback of an op in the latest version of the TF graph.
<END_TASK>
<USER_TASK:>
Description:
def get_op_traceback(self, op_name):
"""Get the traceback of an op in the latest version of the TF graph.
Args:
op_name: Name of the op.
Returns:
Creation traceback of the op, in the form of a list of 2-tuples:
(file_path, lineno)
Raises:
ValueError: If the op with the given name cannot be found in the latest
version of the graph that this SourceManager instance has received, or
if this SourceManager instance has not received any graph traceback yet.
"""
|
if not self._graph_traceback:
raise ValueError('No graph traceback has been received yet.')
for op_log_entry in self._graph_traceback.log_entries:
if op_log_entry.name == op_name:
return self._code_def_to_traceback_list(op_log_entry.code_def)
raise ValueError(
'No op named "%s" can be found in the graph of the latest version '
' (%d).' % (op_name, self._graph_version))
|
<SYSTEM_TASK:>
Get the lists of ops created at lines of a specified source file.
<END_TASK>
<USER_TASK:>
Description:
def get_file_tracebacks(self, file_path):
"""Get the lists of ops created at lines of a specified source file.
Args:
file_path: Path to the source file.
Returns:
A dict mapping line number to a list of 2-tuples,
`(op_name, stack_position)`
`op_name` is the name of the name of the op whose creation traceback
includes the line.
`stack_position` is the position of the line in the op's creation
traceback, represented as a 0-based integer.
Raises:
ValueError: If `file_path` does not point to a source file that has been
received by this instance of `SourceManager`.
"""
|
if file_path not in self._source_file_content:
raise ValueError(
'Source file of path "%s" has not been received by this instance of '
'SourceManager.' % file_path)
lineno_to_op_names_and_stack_position = dict()
for op_log_entry in self._graph_traceback.log_entries:
for stack_pos, trace in enumerate(op_log_entry.code_def.traces):
if self._graph_traceback.id_to_string[trace.file_id] == file_path:
if trace.lineno not in lineno_to_op_names_and_stack_position:
lineno_to_op_names_and_stack_position[trace.lineno] = []
lineno_to_op_names_and_stack_position[trace.lineno].append(
(op_log_entry.name, stack_pos))
return lineno_to_op_names_and_stack_position
|
<SYSTEM_TASK:>
Query tensor store for a given debugged tensor value.
<END_TASK>
<USER_TASK:>
Description:
def query_tensor_store(self,
watch_key,
time_indices=None,
slicing=None,
mapping=None):
"""Query tensor store for a given debugged tensor value.
Args:
watch_key: The watch key of the debugged tensor being sought. Format:
<node_name>:<output_slot>:<debug_op>
E.g., Dense_1/MatMul:0:DebugIdentity.
time_indices: Optional time indices string By default, the lastest time
index ('-1') is returned.
slicing: Optional slicing string.
mapping: Optional mapping string, e.g., 'image/png'.
Returns:
If mapping is `None`, the possibly sliced values as a nested list of
values or its mapped format. A `list` of nested `list` of values,
If mapping is not `None`, the format of the return value will depend on
the mapping.
"""
|
return self._tensor_store.query(watch_key,
time_indices=time_indices,
slicing=slicing,
mapping=mapping)
|
<SYSTEM_TASK:>
Construct a werkzeug Response.
<END_TASK>
<USER_TASK:>
Description:
def Respond(request,
content,
content_type,
code=200,
expires=0,
content_encoding=None,
encoding='utf-8'):
"""Construct a werkzeug Response.
Responses are transmitted to the browser with compression if: a) the browser
supports it; b) it's sane to compress the content_type in question; and c)
the content isn't already compressed, as indicated by the content_encoding
parameter.
Browser and proxy caching is completely disabled by default. If the expires
parameter is greater than zero then the response will be able to be cached by
the browser for that many seconds; however, proxies are still forbidden from
caching so that developers can bypass the cache with Ctrl+Shift+R.
For textual content that isn't JSON, the encoding parameter is used as the
transmission charset which is automatically appended to the Content-Type
header. That is unless of course the content_type parameter contains a
charset parameter. If the two disagree, the characters in content will be
transcoded to the latter.
If content_type declares a JSON media type, then content MAY be a dict, list,
tuple, or set, in which case this function has an implicit composition with
json_util.Cleanse and json.dumps. The encoding parameter is used to decode
byte strings within the JSON object; therefore transmitting binary data
within JSON is not permitted. JSON is transmitted as ASCII unless the
content_type parameter explicitly defines a charset parameter, in which case
the serialized JSON bytes will use that instead of escape sequences.
Args:
request: A werkzeug Request object. Used mostly to check the
Accept-Encoding header.
content: Payload data as byte string, unicode string, or maybe JSON.
content_type: Media type and optionally an output charset.
code: Numeric HTTP status code to use.
expires: Second duration for browser caching.
content_encoding: Encoding if content is already encoded, e.g. 'gzip'.
encoding: Input charset if content parameter has byte strings.
Returns:
A werkzeug Response object (a WSGI application).
"""
|
mimetype = _EXTRACT_MIMETYPE_PATTERN.search(content_type).group(0)
charset_match = _EXTRACT_CHARSET_PATTERN.search(content_type)
charset = charset_match.group(1) if charset_match else encoding
textual = charset_match or mimetype in _TEXTUAL_MIMETYPES
if (mimetype in _JSON_MIMETYPES and
isinstance(content, (dict, list, set, tuple))):
content = json.dumps(json_util.Cleanse(content, encoding),
ensure_ascii=not charset_match)
if charset != encoding:
content = tf.compat.as_text(content, encoding)
content = tf.compat.as_bytes(content, charset)
if textual and not charset_match and mimetype not in _JSON_MIMETYPES:
content_type += '; charset=' + charset
gzip_accepted = _ALLOWS_GZIP_PATTERN.search(
request.headers.get('Accept-Encoding', ''))
# Automatically gzip uncompressed text data if accepted.
if textual and not content_encoding and gzip_accepted:
out = six.BytesIO()
# Set mtime to zero to make payload for a given input deterministic.
with gzip.GzipFile(fileobj=out, mode='wb', compresslevel=3, mtime=0) as f:
f.write(content)
content = out.getvalue()
content_encoding = 'gzip'
content_length = len(content)
direct_passthrough = False
# Automatically streamwise-gunzip precompressed data if not accepted.
if content_encoding == 'gzip' and not gzip_accepted:
gzip_file = gzip.GzipFile(fileobj=six.BytesIO(content), mode='rb')
# Last 4 bytes of gzip formatted data (little-endian) store the original
# content length mod 2^32; we just assume it's the content length. That
# means we can't streamwise-gunzip >4 GB precompressed file; this is ok.
content_length = struct.unpack('<I', content[-4:])[0]
content = werkzeug.wsgi.wrap_file(request.environ, gzip_file)
content_encoding = None
direct_passthrough = True
headers = []
headers.append(('Content-Length', str(content_length)))
if content_encoding:
headers.append(('Content-Encoding', content_encoding))
if expires > 0:
e = wsgiref.handlers.format_date_time(time.time() + float(expires))
headers.append(('Expires', e))
headers.append(('Cache-Control', 'private, max-age=%d' % expires))
else:
headers.append(('Expires', '0'))
headers.append(('Cache-Control', 'no-cache, must-revalidate'))
if request.method == 'HEAD':
content = None
return werkzeug.wrappers.Response(
response=content, status=code, headers=headers, content_type=content_type,
direct_passthrough=direct_passthrough)
|
<SYSTEM_TASK:>
Finds the longest "parent-path" of 'path' in 'path_set'.
<END_TASK>
<USER_TASK:>
Description:
def _find_longest_parent_path(path_set, path):
"""Finds the longest "parent-path" of 'path' in 'path_set'.
This function takes and returns "path-like" strings which are strings
made of strings separated by os.sep. No file access is performed here, so
these strings need not correspond to actual files in some file-system..
This function returns the longest ancestor path
For example, for path_set=["/foo/bar", "/foo", "/bar/foo"] and
path="/foo/bar/sub_dir", returns "/foo/bar".
Args:
path_set: set of path-like strings -- e.g. a list of strings separated by
os.sep. No actual disk-access is performed here, so these need not
correspond to actual files.
path: a path-like string.
Returns:
The element in path_set which is the longest parent directory of 'path'.
"""
|
# This could likely be more efficiently implemented with a trie
# data-structure, but we don't want to add an extra dependency for that.
while path not in path_set:
if not path:
return None
path = os.path.dirname(path)
return path
|
<SYSTEM_TASK:>
Returns the type of the google.protobuf.Value message as an api.DataType.
<END_TASK>
<USER_TASK:>
Description:
def _protobuf_value_type(value):
"""Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message.
"""
|
if value.HasField("number_value"):
return api_pb2.DATA_TYPE_FLOAT64
if value.HasField("string_value"):
return api_pb2.DATA_TYPE_STRING
if value.HasField("bool_value"):
return api_pb2.DATA_TYPE_BOOL
return None
|
<SYSTEM_TASK:>
Returns a string representation of given google.protobuf.Value message.
<END_TASK>
<USER_TASK:>
Description:
def _protobuf_value_to_string(value):
"""Returns a string representation of given google.protobuf.Value message.
Args:
value: google.protobuf.Value message. Assumed to be of type 'number',
'string' or 'bool'.
"""
|
value_in_json = json_format.MessageToJson(value)
if value.HasField("string_value"):
# Remove the quotations.
return value_in_json[1:-1]
return value_in_json
|
<SYSTEM_TASK:>
Finds the experiment associcated with the metadata.EXPERIMENT_TAG tag.
<END_TASK>
<USER_TASK:>
Description:
def _find_experiment_tag(self):
"""Finds the experiment associcated with the metadata.EXPERIMENT_TAG tag.
Caches the experiment if it was found.
Returns:
The experiment or None if no such experiment is found.
"""
|
with self._experiment_from_tag_lock:
if self._experiment_from_tag is None:
mapping = self.multiplexer.PluginRunToTagToContent(
metadata.PLUGIN_NAME)
for tag_to_content in mapping.values():
if metadata.EXPERIMENT_TAG in tag_to_content:
self._experiment_from_tag = metadata.parse_experiment_plugin_data(
tag_to_content[metadata.EXPERIMENT_TAG])
break
return self._experiment_from_tag
|
<SYSTEM_TASK:>
Computes a minimal Experiment protocol buffer by scanning the runs.
<END_TASK>
<USER_TASK:>
Description:
def _compute_experiment_from_runs(self):
"""Computes a minimal Experiment protocol buffer by scanning the runs."""
|
hparam_infos = self._compute_hparam_infos()
if not hparam_infos:
return None
metric_infos = self._compute_metric_infos()
return api_pb2.Experiment(hparam_infos=hparam_infos,
metric_infos=metric_infos)
|
<SYSTEM_TASK:>
Computes a list of api_pb2.HParamInfo from the current run, tag info.
<END_TASK>
<USER_TASK:>
Description:
def _compute_hparam_infos(self):
"""Computes a list of api_pb2.HParamInfo from the current run, tag info.
Finds all the SessionStartInfo messages and collects the hparams values
appearing in each one. For each hparam attempts to deduce a type that fits
all its values. Finally, sets the 'domain' of the resulting HParamInfo
to be discrete if the type is string and the number of distinct values is
small enough.
Returns:
A list of api_pb2.HParamInfo messages.
"""
|
run_to_tag_to_content = self.multiplexer.PluginRunToTagToContent(
metadata.PLUGIN_NAME)
# Construct a dict mapping an hparam name to its list of values.
hparams = collections.defaultdict(list)
for tag_to_content in run_to_tag_to_content.values():
if metadata.SESSION_START_INFO_TAG not in tag_to_content:
continue
start_info = metadata.parse_session_start_info_plugin_data(
tag_to_content[metadata.SESSION_START_INFO_TAG])
for (name, value) in six.iteritems(start_info.hparams):
hparams[name].append(value)
# Try to construct an HParamInfo for each hparam from its name and list
# of values.
result = []
for (name, values) in six.iteritems(hparams):
hparam_info = self._compute_hparam_info_from_values(name, values)
if hparam_info is not None:
result.append(hparam_info)
return result
|
<SYSTEM_TASK:>
Builds an HParamInfo message from the hparam name and list of values.
<END_TASK>
<USER_TASK:>
Description:
def _compute_hparam_info_from_values(self, name, values):
"""Builds an HParamInfo message from the hparam name and list of values.
Args:
name: string. The hparam name.
values: list of google.protobuf.Value messages. The list of values for the
hparam.
Returns:
An api_pb2.HParamInfo message.
"""
|
# Figure out the type from the values.
# Ignore values whose type is not listed in api_pb2.DataType
# If all values have the same type, then that is the type used.
# Otherwise, the returned type is DATA_TYPE_STRING.
result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET)
distinct_values = set(
_protobuf_value_to_string(v) for v in values if _protobuf_value_type(v))
for v in values:
v_type = _protobuf_value_type(v)
if not v_type:
continue
if result.type == api_pb2.DATA_TYPE_UNSET:
result.type = v_type
elif result.type != v_type:
result.type = api_pb2.DATA_TYPE_STRING
if result.type == api_pb2.DATA_TYPE_STRING:
# A string result.type does not change, so we can exit the loop.
break
# If we couldn't figure out a type, then we can't compute the hparam_info.
if result.type == api_pb2.DATA_TYPE_UNSET:
return None
# If the result is a string, set the domain to be the distinct values if
# there aren't too many of them.
if (result.type == api_pb2.DATA_TYPE_STRING
and len(distinct_values) <= self._max_domain_discrete_len):
result.domain_discrete.extend(distinct_values)
return result
|
<SYSTEM_TASK:>
Creates a summary that defines a hyperparameter-tuning experiment.
<END_TASK>
<USER_TASK:>
Description:
def experiment_pb(
hparam_infos,
metric_infos,
user='',
description='',
time_created_secs=None):
"""Creates a summary that defines a hyperparameter-tuning experiment.
Args:
hparam_infos: Array of api_pb2.HParamInfo messages. Describes the
hyperparameters used in the experiment.
metric_infos: Array of api_pb2.MetricInfo messages. Describes the metrics
used in the experiment. See the documentation at the top of this file
for how to populate this.
user: String. An id for the user running the experiment
description: String. A description for the experiment. May contain markdown.
time_created_secs: float. The time the experiment is created in seconds
since the UNIX epoch. If None uses the current time.
Returns:
A summary protobuffer containing the experiment definition.
"""
|
if time_created_secs is None:
time_created_secs = time.time()
experiment = api_pb2.Experiment(
description=description,
user=user,
time_created_secs=time_created_secs,
hparam_infos=hparam_infos,
metric_infos=metric_infos)
return _summary(metadata.EXPERIMENT_TAG,
plugin_data_pb2.HParamsPluginData(experiment=experiment))
|
<SYSTEM_TASK:>
Constructs a SessionStartInfo protobuffer.
<END_TASK>
<USER_TASK:>
Description:
def session_start_pb(hparams,
model_uri='',
monitor_url='',
group_name='',
start_time_secs=None):
"""Constructs a SessionStartInfo protobuffer.
Creates a summary that contains a training session metadata information.
One such summary per training session should be created. Each should have
a different run.
Args:
hparams: A dictionary with string keys. Describes the hyperparameter values
used in the session, mapping each hyperparameter name to its value.
Supported value types are `bool`, `int`, `float`, `str`, `list`,
`tuple`.
The type of value must correspond to the type of hyperparameter
(defined in the corresponding api_pb2.HParamInfo member of the
Experiment protobuf) as follows:
+-----------------+---------------------------------+
|Hyperparameter | Allowed (Python) value types |
|type | |
+-----------------+---------------------------------+
|DATA_TYPE_BOOL | bool |
|DATA_TYPE_FLOAT64| int, float |
|DATA_TYPE_STRING | six.string_types, tuple, list |
+-----------------+---------------------------------+
Tuple and list instances will be converted to their string
representation.
model_uri: See the comment for the field with the same name of
plugin_data_pb2.SessionStartInfo.
monitor_url: See the comment for the field with the same name of
plugin_data_pb2.SessionStartInfo.
group_name: See the comment for the field with the same name of
plugin_data_pb2.SessionStartInfo.
start_time_secs: float. The time to use as the session start time.
Represented as seconds since the UNIX epoch. If None uses
the current time.
Returns:
The summary protobuffer mentioned above.
"""
|
if start_time_secs is None:
start_time_secs = time.time()
session_start_info = plugin_data_pb2.SessionStartInfo(
model_uri=model_uri,
monitor_url=monitor_url,
group_name=group_name,
start_time_secs=start_time_secs)
for (hp_name, hp_val) in six.iteritems(hparams):
if isinstance(hp_val, (float, int)):
session_start_info.hparams[hp_name].number_value = hp_val
elif isinstance(hp_val, six.string_types):
session_start_info.hparams[hp_name].string_value = hp_val
elif isinstance(hp_val, bool):
session_start_info.hparams[hp_name].bool_value = hp_val
elif isinstance(hp_val, (list, tuple)):
session_start_info.hparams[hp_name].string_value = str(hp_val)
else:
raise TypeError('hparams[%s]=%s has type: %s which is not supported' %
(hp_name, hp_val, type(hp_val)))
return _summary(metadata.SESSION_START_INFO_TAG,
plugin_data_pb2.HParamsPluginData(
session_start_info=session_start_info))
|
<SYSTEM_TASK:>
Constructs a SessionEndInfo protobuffer.
<END_TASK>
<USER_TASK:>
Description:
def session_end_pb(status, end_time_secs=None):
"""Constructs a SessionEndInfo protobuffer.
Creates a summary that contains status information for a completed
training session. Should be exported after the training session is completed.
One such summary per training session should be created. Each should have
a different run.
Args:
status: A tensorboard.hparams.Status enumeration value denoting the
status of the session.
end_time_secs: float. The time to use as the session end time. Represented
as seconds since the unix epoch. If None uses the current time.
Returns:
The summary protobuffer mentioned above.
"""
|
if end_time_secs is None:
end_time_secs = time.time()
session_end_info = plugin_data_pb2.SessionEndInfo(status=status,
end_time_secs=end_time_secs)
return _summary(metadata.SESSION_END_INFO_TAG,
plugin_data_pb2.HParamsPluginData(
session_end_info=session_end_info))
|
<SYSTEM_TASK:>
List all the plugins that have registered assets in logdir.
<END_TASK>
<USER_TASK:>
Description:
def ListPlugins(logdir):
"""List all the plugins that have registered assets in logdir.
If the plugins_dir does not exist, it returns an empty list. This maintains
compatibility with old directories that have no plugins written.
Args:
logdir: A directory that was created by a TensorFlow events writer.
Returns:
a list of plugin names, as strings
"""
|
plugins_dir = os.path.join(logdir, _PLUGINS_DIR)
try:
entries = tf.io.gfile.listdir(plugins_dir)
except tf.errors.NotFoundError:
return []
# Strip trailing slashes, which listdir() includes for some filesystems
# for subdirectories, after using them to bypass IsDirectory().
return [x.rstrip('/') for x in entries
if x.endswith('/') or _IsDirectory(plugins_dir, x)]
|
<SYSTEM_TASK:>
List all the assets that are available for given plugin in a logdir.
<END_TASK>
<USER_TASK:>
Description:
def ListAssets(logdir, plugin_name):
"""List all the assets that are available for given plugin in a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: A string name of a plugin to list assets for.
Returns:
A string list of available plugin assets. If the plugin subdirectory does
not exist (either because the logdir doesn't exist, or because the plugin
didn't register) an empty list is returned.
"""
|
plugin_dir = PluginDirectory(logdir, plugin_name)
try:
# Strip trailing slashes, which listdir() includes for some filesystems.
return [x.rstrip('/') for x in tf.io.gfile.listdir(plugin_dir)]
except tf.errors.NotFoundError:
return []
|
<SYSTEM_TASK:>
Retrieve a particular plugin asset from a logdir.
<END_TASK>
<USER_TASK:>
Description:
def RetrieveAsset(logdir, plugin_name, asset_name):
"""Retrieve a particular plugin asset from a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: The plugin we want an asset from.
asset_name: The name of the requested asset.
Returns:
string contents of the plugin asset.
Raises:
KeyError: if the asset does not exist.
"""
|
asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)
try:
with tf.io.gfile.GFile(asset_path, "r") as f:
return f.read()
except tf.errors.NotFoundError:
raise KeyError("Asset path %s not found" % asset_path)
except tf.errors.OpError as e:
raise KeyError("Couldn't read asset path: %s, OpError %s" % (asset_path, e))
|
<SYSTEM_TASK:>
Given a tag and single run, return an array of compressed histograms.
<END_TASK>
<USER_TASK:>
Description:
def distributions_route(self, request):
"""Given a tag and single run, return an array of compressed histograms."""
|
tag = request.args.get('tag')
run = request.args.get('run')
try:
(body, mime_type) = self.distributions_impl(tag, run)
code = 200
except ValueError as e:
(body, mime_type) = (str(e), 'text/plain')
code = 400
return http_util.Respond(request, body, mime_type, code=code)
|
<SYSTEM_TASK:>
Loads new values.
<END_TASK>
<USER_TASK:>
Description:
def Load(self):
"""Loads new values.
The watcher will load from one path at a time; as soon as that path stops
yielding events, it will move on to the next path. We assume that old paths
are never modified after a newer path has been written. As a result, Load()
can be called multiple times in a row without losing events that have not
been yielded yet. In other words, we guarantee that every event will be
yielded exactly once.
Yields:
All values that have not been yielded yet.
Raises:
DirectoryDeletedError: If the directory has been permanently deleted
(as opposed to being temporarily unavailable).
"""
|
try:
for event in self._LoadInternal():
yield event
except tf.errors.OpError:
if not tf.io.gfile.exists(self._directory):
raise DirectoryDeletedError(
'Directory %s has been permanently deleted' % self._directory)
|
<SYSTEM_TASK:>
Sets the current path to watch for new events.
<END_TASK>
<USER_TASK:>
Description:
def _SetPath(self, path):
"""Sets the current path to watch for new events.
This also records the size of the old path, if any. If the size can't be
found, an error is logged.
Args:
path: The full path of the file to watch.
"""
|
old_path = self._path
if old_path and not io_wrapper.IsCloudPath(old_path):
try:
# We're done with the path, so store its size.
size = tf.io.gfile.stat(old_path).length
logger.debug('Setting latest size of %s to %d', old_path, size)
self._finalized_sizes[old_path] = size
except tf.errors.OpError as e:
logger.error('Unable to get size of %s: %s', old_path, e)
self._path = path
self._loader = self._loader_factory(path)
|
<SYSTEM_TASK:>
Gets the next path to load from.
<END_TASK>
<USER_TASK:>
Description:
def _GetNextPath(self):
"""Gets the next path to load from.
This function also does the checking for out-of-order writes as it iterates
through the paths.
Returns:
The next path to load events from, or None if there are no more paths.
"""
|
paths = sorted(path
for path in io_wrapper.ListDirectoryAbsolute(self._directory)
if self._path_filter(path))
if not paths:
return None
if self._path is None:
return paths[0]
# Don't bother checking if the paths are GCS (which we can't check) or if
# we've already detected an OOO write.
if not io_wrapper.IsCloudPath(paths[0]) and not self._ooo_writes_detected:
# Check the previous _OOO_WRITE_CHECK_COUNT paths for out of order writes.
current_path_index = bisect.bisect_left(paths, self._path)
ooo_check_start = max(0, current_path_index - self._OOO_WRITE_CHECK_COUNT)
for path in paths[ooo_check_start:current_path_index]:
if self._HasOOOWrite(path):
self._ooo_writes_detected = True
break
next_paths = list(path
for path in paths
if self._path is None or path > self._path)
if next_paths:
return min(next_paths)
else:
return None
|
<SYSTEM_TASK:>
Returns whether the path has had an out-of-order write.
<END_TASK>
<USER_TASK:>
Description:
def _HasOOOWrite(self, path):
"""Returns whether the path has had an out-of-order write."""
|
# Check the sizes of each path before the current one.
size = tf.io.gfile.stat(path).length
old_size = self._finalized_sizes.get(path, None)
if size != old_size:
if old_size is None:
logger.error('File %s created after file %s even though it\'s '
'lexicographically earlier', path, self._path)
else:
logger.error('File %s updated even though the current file is %s',
path, self._path)
return True
else:
return False
|
<SYSTEM_TASK:>
Returns a number of examples from the provided path.
<END_TASK>
<USER_TASK:>
Description:
def example_protos_from_path(path,
num_examples=10,
start_index=0,
parse_examples=True,
sampling_odds=1,
example_class=tf.train.Example):
"""Returns a number of examples from the provided path.
Args:
path: A string path to the examples.
num_examples: The maximum number of examples to return from the path.
parse_examples: If true then parses the serialized proto from the path into
proto objects. Defaults to True.
sampling_odds: Odds of loading an example, used for sampling. When >= 1
(the default), then all examples are loaded.
example_class: tf.train.Example or tf.train.SequenceExample class to load.
Defaults to tf.train.Example.
Returns:
A list of Example protos or serialized proto strings at the path.
Raises:
InvalidUserInputError: If examples cannot be procured from the path.
"""
|
def append_examples_from_iterable(iterable, examples):
for value in iterable:
if sampling_odds >= 1 or random.random() < sampling_odds:
examples.append(
example_class.FromString(value) if parse_examples else value)
if len(examples) >= num_examples:
return
examples = []
if path.endswith('.csv'):
def are_floats(values):
for value in values:
try:
float(value)
except ValueError:
return False
return True
csv.register_dialect('CsvDialect', skipinitialspace=True)
rows = csv.DictReader(open(path), dialect='CsvDialect')
for row in rows:
if sampling_odds < 1 and random.random() > sampling_odds:
continue
example = tf.train.Example()
for col in row.keys():
# Parse out individual values from vertical-bar-delimited lists
values = [val.strip() for val in row[col].split('|')]
if are_floats(values):
example.features.feature[col].float_list.value.extend(
[float(val) for val in values])
else:
example.features.feature[col].bytes_list.value.extend(
[val.encode('utf-8') for val in values])
examples.append(
example if parse_examples else example.SerializeToString())
if len(examples) >= num_examples:
break
return examples
filenames = filepath_to_filepath_list(path)
compression_types = [
'', # no compression (distinct from `None`!)
'GZIP',
'ZLIB',
]
current_compression_idx = 0
current_file_index = 0
while (current_file_index < len(filenames) and
current_compression_idx < len(compression_types)):
try:
record_iterator = tf.compat.v1.python_io.tf_record_iterator(
path=filenames[current_file_index],
options=tf.io.TFRecordOptions(
compression_types[current_compression_idx]))
append_examples_from_iterable(record_iterator, examples)
current_file_index += 1
if len(examples) >= num_examples:
break
except tf.errors.DataLossError:
current_compression_idx += 1
except (IOError, tf.errors.NotFoundError) as e:
raise common_utils.InvalidUserInputError(e)
if examples:
return examples
else:
raise common_utils.InvalidUserInputError(
'No examples found at ' + path +
'. Valid formats are TFRecord files.')
|
<SYSTEM_TASK:>
Send an RPC request to the Servomatic prediction service.
<END_TASK>
<USER_TASK:>
Description:
def call_servo(examples, serving_bundle):
"""Send an RPC request to the Servomatic prediction service.
Args:
examples: A list of examples that matches the model spec.
serving_bundle: A `ServingBundle` object that contains the information to
make the serving request.
Returns:
A ClassificationResponse or RegressionResponse proto.
"""
|
parsed_url = urlparse('http://' + serving_bundle.inference_address)
channel = implementations.insecure_channel(parsed_url.hostname,
parsed_url.port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
if serving_bundle.use_predict:
request = predict_pb2.PredictRequest()
elif serving_bundle.model_type == 'classification':
request = classification_pb2.ClassificationRequest()
else:
request = regression_pb2.RegressionRequest()
request.model_spec.name = serving_bundle.model_name
if serving_bundle.model_version is not None:
request.model_spec.version.value = serving_bundle.model_version
if serving_bundle.signature is not None:
request.model_spec.signature_name = serving_bundle.signature
if serving_bundle.use_predict:
# tf.compat.v1 API used here to convert tf.example into proto. This
# utility file is bundled in the witwidget pip package which has a dep
# on TensorFlow.
request.inputs[serving_bundle.predict_input_tensor].CopyFrom(
tf.compat.v1.make_tensor_proto(
values=[ex.SerializeToString() for ex in examples],
dtype=types_pb2.DT_STRING))
else:
request.input.example_list.examples.extend(examples)
if serving_bundle.use_predict:
return common_utils.convert_predict_response(
stub.Predict(request, 30.0), serving_bundle) # 30 secs timeout
elif serving_bundle.model_type == 'classification':
return stub.Classify(request, 30.0) # 30 secs timeout
else:
return stub.Regress(request, 30.0)
|
<SYSTEM_TASK:>
Convert `value` to a new-style value, if necessary and possible.
<END_TASK>
<USER_TASK:>
Description:
def migrate_value(value):
"""Convert `value` to a new-style value, if necessary and possible.
An "old-style" value is a value that uses any `value` field other than
the `tensor` field. A "new-style" value is a value that uses the
`tensor` field. TensorBoard continues to support old-style values on
disk; this method converts them to new-style values so that further
code need only deal with one data format.
Arguments:
value: A `Summary.Value` object. This argument is not modified.
Returns:
If the `value` is an old-style value for which there is a new-style
equivalent, the result is the new-style value. Otherwise---if the
value is already new-style or does not yet have a new-style
equivalent---the value will be returned unchanged.
:type value: Summary.Value
:rtype: Summary.Value
"""
|
handler = {
'histo': _migrate_histogram_value,
'image': _migrate_image_value,
'audio': _migrate_audio_value,
'simple_value': _migrate_scalar_value,
}.get(value.WhichOneof('value'))
return handler(value) if handler else value
|
<SYSTEM_TASK:>
Obtains a mapping between routes and handlers. Stores the logdir.
<END_TASK>
<USER_TASK:>
Description:
def get_plugin_apps(self):
"""Obtains a mapping between routes and handlers. Stores the logdir.
Returns:
A mapping between routes and handlers (functions that respond to
requests).
"""
|
return {
'/infer': self._infer,
'/update_example': self._update_example,
'/examples_from_path': self._examples_from_path_handler,
'/sprite': self._serve_sprite,
'/duplicate_example': self._duplicate_example,
'/delete_example': self._delete_example,
'/infer_mutants': self._infer_mutants_handler,
'/eligible_features': self._eligible_features_from_example_handler,
}
|
<SYSTEM_TASK:>
Returns JSON of the specified examples.
<END_TASK>
<USER_TASK:>
Description:
def _examples_from_path_handler(self, request):
"""Returns JSON of the specified examples.
Args:
request: A request that should contain 'examples_path' and 'max_examples'.
Returns:
JSON of up to max_examlpes of the examples in the path.
"""
|
examples_count = int(request.args.get('max_examples'))
examples_path = request.args.get('examples_path')
sampling_odds = float(request.args.get('sampling_odds'))
self.example_class = (tf.train.SequenceExample
if request.args.get('sequence_examples') == 'true'
else tf.train.Example)
try:
platform_utils.throw_if_file_access_not_allowed(examples_path,
self._logdir,
self._has_auth_group)
example_strings = platform_utils.example_protos_from_path(
examples_path, examples_count, parse_examples=False,
sampling_odds=sampling_odds, example_class=self.example_class)
self.examples = [
self.example_class.FromString(ex) for ex in example_strings]
self.generate_sprite(example_strings)
json_examples = [
json_format.MessageToJson(example) for example in self.examples
]
self.updated_example_indices = set(range(len(json_examples)))
return http_util.Respond(
request,
{'examples': json_examples,
'sprite': True if self.sprite else False}, 'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, {'error': e.message},
'application/json', code=400)
|
<SYSTEM_TASK:>
Updates the specified example.
<END_TASK>
<USER_TASK:>
Description:
def _update_example(self, request):
"""Updates the specified example.
Args:
request: A request that should contain 'index' and 'example'.
Returns:
An empty response.
"""
|
if request.method != 'POST':
return http_util.Respond(request, {'error': 'invalid non-POST request'},
'application/json', code=405)
example_json = request.form['example']
index = int(request.form['index'])
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
new_example = self.example_class()
json_format.Parse(example_json, new_example)
self.examples[index] = new_example
self.updated_example_indices.add(index)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
|
<SYSTEM_TASK:>
Duplicates the specified example.
<END_TASK>
<USER_TASK:>
Description:
def _duplicate_example(self, request):
"""Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
|
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
new_example = self.example_class()
new_example.CopyFrom(self.examples[index])
self.examples.append(new_example)
self.updated_example_indices.add(len(self.examples) - 1)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
|
<SYSTEM_TASK:>
Deletes the specified example.
<END_TASK>
<USER_TASK:>
Description:
def _delete_example(self, request):
"""Deletes the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
|
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
del self.examples[index]
self.updated_example_indices = set([
i if i < index else i - 1 for i in self.updated_example_indices])
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
|
<SYSTEM_TASK:>
Parses comma separated request arguments
<END_TASK>
<USER_TASK:>
Description:
def _parse_request_arguments(self, request):
"""Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
"""
|
inference_addresses = request.args.get('inference_address').split(',')
model_names = request.args.get('model_name').split(',')
model_versions = request.args.get('model_version').split(',')
model_signatures = request.args.get('model_signature').split(',')
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError('Every model should have a ' +
'name and address.')
return inference_addresses, model_names, model_versions, model_signatures
|
<SYSTEM_TASK:>
Returns a list of JSON objects for each feature in the example.
<END_TASK>
<USER_TASK:>
Description:
def _eligible_features_from_example_handler(self, request):
"""Returns a list of JSON objects for each feature in the example.
Args:
request: A request for features.
Returns:
A list with a JSON object for each feature.
Numeric features are represented as {name: observedMin: observedMax:}.
Categorical features are repesented as {name: samples:[]}.
"""
|
features_list = inference_utils.get_eligible_features(
self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
return http_util.Respond(request, features_list, 'application/json')
|
<SYSTEM_TASK:>
Serves a pre-gzipped static asset from the zip file.
<END_TASK>
<USER_TASK:>
Description:
def _serve_asset(self, path, gzipped_asset_bytes, request):
"""Serves a pre-gzipped static asset from the zip file."""
|
mimetype = mimetypes.guess_type(path)[0] or 'application/octet-stream'
return http_util.Respond(
request, gzipped_asset_bytes, mimetype, content_encoding='gzip')
|
<SYSTEM_TASK:>
Serve a JSON object containing some base properties used by the frontend.
<END_TASK>
<USER_TASK:>
Description:
def _serve_environment(self, request):
"""Serve a JSON object containing some base properties used by the frontend.
* data_location is either a path to a directory or an address to a
database (depending on which mode TensorBoard is running in).
* window_title is the title of the TensorBoard web page.
"""
|
return http_util.Respond(
request,
{
'data_location': self._logdir or self._db_uri,
'mode': 'db' if self._db_uri else 'logdir',
'window_title': self._window_title,
},
'application/json')
|
<SYSTEM_TASK:>
Serve a JSON array of run names, ordered by run started time.
<END_TASK>
<USER_TASK:>
Description:
def _serve_runs(self, request):
"""Serve a JSON array of run names, ordered by run started time.
Sort order is by started time (aka first event time) with empty times sorted
last, and then ties are broken by sorting on the run name.
"""
|
if self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute('''
SELECT
run_name,
started_time IS NULL as started_time_nulls_last,
started_time
FROM Runs
ORDER BY started_time_nulls_last, started_time, run_name
''')
run_names = [row[0] for row in cursor]
else:
# Python's list.sort is stable, so to order by started time and
# then by name, we can just do the sorts in the reverse order.
run_names = sorted(self._multiplexer.Runs())
def get_first_event_timestamp(run_name):
try:
return self._multiplexer.FirstEventTimestamp(run_name)
except ValueError as e:
logger.warn(
'Unable to get first event timestamp for run %s: %s', run_name, e)
# Put runs without a timestamp at the end.
return float('inf')
run_names.sort(key=get_first_event_timestamp)
return http_util.Respond(request, run_names, 'application/json')
|
<SYSTEM_TASK:>
Put a message into the outgoing message stack.
<END_TASK>
<USER_TASK:>
Description:
def put(self, message):
"""Put a message into the outgoing message stack.
Outgoing message will be stored indefinitely to support multi-users.
"""
|
with self._outgoing_lock:
self._outgoing.append(message)
self._outgoing_counter += 1
# Check to see if there are pending queues waiting for the item.
if self._outgoing_counter in self._outgoing_pending_queues:
for q in self._outgoing_pending_queues[self._outgoing_counter]:
q.put(message)
del self._outgoing_pending_queues[self._outgoing_counter]
|
<SYSTEM_TASK:>
Run custom scalar demo and generate event files.
<END_TASK>
<USER_TASK:>
Description:
def run():
"""Run custom scalar demo and generate event files."""
|
step = tf.compat.v1.placeholder(tf.float32, shape=[])
with tf.name_scope('loss'):
# Specify 2 different loss values, each tagged differently.
summary_lib.scalar('foo', tf.pow(0.9, step))
summary_lib.scalar('bar', tf.pow(0.85, step + 2))
# Log metric baz as well as upper and lower bounds for a margin chart.
middle_baz_value = step + 4 * tf.random.uniform([]) - 2
summary_lib.scalar('baz', middle_baz_value)
summary_lib.scalar('baz_lower',
middle_baz_value - 6.42 - tf.random.uniform([]))
summary_lib.scalar('baz_upper',
middle_baz_value + 6.42 + tf.random.uniform([]))
with tf.name_scope('trigFunctions'):
summary_lib.scalar('cosine', tf.cos(step))
summary_lib.scalar('sine', tf.sin(step))
summary_lib.scalar('tangent', tf.tan(step))
merged_summary = tf.compat.v1.summary.merge_all()
with tf.compat.v1.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer:
# We only need to specify the layout once (instead of per step).
layout_summary = summary_lib.custom_scalar_pb(
layout_pb2.Layout(category=[
layout_pb2.Category(
title='losses',
chart=[
layout_pb2.Chart(
title='losses',
multiline=layout_pb2.MultilineChartContent(
tag=[r'loss(?!.*margin.*)'],)),
layout_pb2.Chart(
title='baz',
margin=layout_pb2.MarginChartContent(
series=[
layout_pb2.MarginChartContent.Series(
value='loss/baz/scalar_summary',
lower='loss/baz_lower/scalar_summary',
upper='loss/baz_upper/scalar_summary'
),
],)),
]),
layout_pb2.Category(
title='trig functions',
chart=[
layout_pb2.Chart(
title='wave trig functions',
multiline=layout_pb2.MultilineChartContent(
tag=[
r'trigFunctions/cosine', r'trigFunctions/sine'
],)),
# The range of tangent is different. Give it its own chart.
layout_pb2.Chart(
title='tan',
multiline=layout_pb2.MultilineChartContent(
tag=[r'trigFunctions/tangent'],)),
],
# This category we care less about. Make it initially closed.
closed=True),
]))
writer.add_summary(layout_summary)
for i in xrange(42):
summary = sess.run(merged_summary, feed_dict={step: i})
writer.add_summary(summary, global_step=i)
|
<SYSTEM_TASK:>
Stores a config file used by the embedding projector.
<END_TASK>
<USER_TASK:>
Description:
def visualize_embeddings(summary_writer, config):
"""Stores a config file used by the embedding projector.
Args:
summary_writer: The summary writer used for writing events.
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
proto that holds the configuration for the projector such as paths to
checkpoint files and metadata files for the embeddings. If
`config.model_checkpoint_path` is none, it defaults to the
`logdir` used by the summary_writer.
Raises:
ValueError: If the summary writer does not have a `logdir`.
"""
|
logdir = summary_writer.get_logdir()
# Sanity checks.
if logdir is None:
raise ValueError('Summary writer must have a logdir')
# Saving the config file in the logdir.
config_pbtxt = _text_format.MessageToString(config)
path = os.path.join(logdir, _projector_plugin.PROJECTOR_FILENAME)
with tf.io.gfile.GFile(path, 'w') as f:
f.write(config_pbtxt)
|
<SYSTEM_TASK:>
Returns the last evaluations of the given metric at the given session.
<END_TASK>
<USER_TASK:>
Description:
def last_metric_eval(multiplexer, session_name, metric_name):
"""Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Returns:
A 3-tuples, of the form [wall-time, step, value], denoting
the last evaluation of the metric, where wall-time denotes the wall time
in seconds since UNIX epoch of the time of the evaluation, step denotes
the training step at which the model is evaluated, and value denotes the
(scalar real) value of the metric.
Raises:
KeyError if the given session does not have the metric.
"""
|
try:
run, tag = run_tag_from_session_and_metric(session_name, metric_name)
tensor_events = multiplexer.Tensors(run=run, tag=tag)
except KeyError as e:
raise KeyError(
'Can\'t find metric %s for session: %s. Underlying error message: %s'
% (metric_name, session_name, e))
last_event = tensor_events[-1]
# TODO(erez): Raise HParamsError if the tensor is not a 0-D real scalar.
return (last_event.wall_time,
last_event.step,
tf.make_ndarray(last_event.tensor_proto).item())
|
<SYSTEM_TASK:>
Obtains value for scalar event given blob and dtype enum.
<END_TASK>
<USER_TASK:>
Description:
def _get_value(self, scalar_data_blob, dtype_enum):
"""Obtains value for scalar event given blob and dtype enum.
Args:
scalar_data_blob: The blob obtained from the database.
dtype_enum: The enum representing the dtype.
Returns:
The scalar value.
"""
|
tensorflow_dtype = tf.DType(dtype_enum)
buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype)
return np.asscalar(buf)
|
<SYSTEM_TASK:>
Given a tag and single run, return array of ScalarEvents.
<END_TASK>
<USER_TASK:>
Description:
def scalars_route(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
|
# TODO: return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
experiment = request.args.get('experiment')
output_format = request.args.get('format')
(body, mime_type) = self.scalars_impl(tag, run, experiment, output_format)
return http_util.Respond(request, body, mime_type)
|
<SYSTEM_TASK:>
Add a run to the multiplexer.
<END_TASK>
<USER_TASK:>
Description:
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
|
name = name or path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(@dandelionmane) - Make it impossible to overwrite an old path
# with a new path (just give the new path a distinct name)
logger.warn('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logger.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
tensor_size_guidance=self._tensor_size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
|
<SYSTEM_TASK:>
Get index of runs and assets for a given plugin.
<END_TASK>
<USER_TASK:>
Description:
def PluginAssets(self, plugin_name):
"""Get index of runs and assets for a given plugin.
Args:
plugin_name: Name of the plugin we are checking for.
Returns:
A dictionary that maps from run_name to a list of plugin
assets for that run.
"""
|
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run: accum.PluginAssets(plugin_name) for run, accum in items}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.