text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Generate PR curve summaries.
<END_TASK>
<USER_TASK:>
Description:
def run_all(logdir, steps, thresholds, verbose=False):
"""Generate PR curve summaries.
Arguments:
logdir: The directory into which to store all the runs' data.
steps: The number of steps to run for.
verbose: Whether to print the names of runs into stdout during execution.
thresholds: The number of thresholds to use for PR curves.
"""
|
# First, we generate data for a PR curve that assigns even weights for
# predictions of all classes.
run_name = 'colors'
if verbose:
print('--- Running: %s' % run_name)
start_runs(
logdir=logdir,
steps=steps,
run_name=run_name,
thresholds=thresholds)
# Next, we generate data for a PR curve that assigns arbitrary weights to
# predictions.
run_name = 'mask_every_other_prediction'
if verbose:
print('--- Running: %s' % run_name)
start_runs(
logdir=logdir,
steps=steps,
run_name=run_name,
thresholds=thresholds,
mask_every_other_prediction=True)
|
<SYSTEM_TASK:>
Write an image summary.
<END_TASK>
<USER_TASK:>
Description:
def image(name,
data,
step=None,
max_outputs=3,
description=None):
"""Write an image summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
where `k` is the number of images, `h` and `w` are the height and
width of the images, and `c` is the number of channels, which
should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA).
Any of the dimensions may be statically unknown (i.e., `None`).
Floating point data will be clipped to the range [0,1).
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many images will be emitted at each step. When more than
`max_outputs` many images are provided, the first `max_outputs` many
images will be used and the rest silently discarded.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
|
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'image_summary', values=[data, max_outputs, step]) as (tag, _):
tf.debugging.assert_rank(data, 4)
tf.debugging.assert_non_negative(max_outputs)
images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)
limited_images = images[:max_outputs]
encoded_images = tf.map_fn(tf.image.encode_png, limited_images,
dtype=tf.string,
name='encode_each_image')
# Workaround for map_fn returning float dtype for an empty elems input.
encoded_images = tf.cond(
tf.shape(input=encoded_images)[0] > 0,
lambda: encoded_images, lambda: tf.constant([], tf.string))
image_shape = tf.shape(input=images)
dimensions = tf.stack([tf.as_string(image_shape[2], name='width'),
tf.as_string(image_shape[1], name='height')],
name='dimensions')
tensor = tf.concat([dimensions, encoded_images], axis=0)
return tf.summary.write(
tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
|
<SYSTEM_TASK:>
Sets the examples to be displayed in WIT.
<END_TASK>
<USER_TASK:>
Description:
def set_examples(self, examples):
"""Sets the examples to be displayed in WIT.
Args:
examples: List of example protos.
Returns:
self, in order to enabled method chaining.
"""
|
self.store('examples', examples)
if len(examples) > 0:
self.store('are_sequence_examples',
isinstance(examples[0], tf.train.SequenceExample))
return self
|
<SYSTEM_TASK:>
Sets the model for inference as a TF Estimator.
<END_TASK>
<USER_TASK:>
Description:
def set_estimator_and_feature_spec(self, estimator, feature_spec):
"""Sets the model for inference as a TF Estimator.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a TF Estimator object as the model to query. In order to
accomplish this, a feature_spec must also be provided to parse the
example protos for input into the estimator.
Args:
estimator: The TF Estimator which will be used for model inference.
feature_spec: The feature_spec object which will be used for example
parsing.
Returns:
self, in order to enabled method chaining.
"""
|
# If custom function is set, remove it before setting estimator
self.delete('custom_predict_fn')
self.store('estimator_and_spec', {
'estimator': estimator, 'feature_spec': feature_spec})
self.set_inference_address('estimator')
# If no model name has been set, give a default
if not self.has_model_name():
self.set_model_name('1')
return self
|
<SYSTEM_TASK:>
Sets a second model for inference as a TF Estimator.
<END_TASK>
<USER_TASK:>
Description:
def set_compare_estimator_and_feature_spec(self, estimator, feature_spec):
"""Sets a second model for inference as a TF Estimator.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a TF Estimator object as the model to query. In order to
accomplish this, a feature_spec must also be provided to parse the
example protos for input into the estimator.
Args:
estimator: The TF Estimator which will be used for model inference.
feature_spec: The feature_spec object which will be used for example
parsing.
Returns:
self, in order to enabled method chaining.
"""
|
# If custom function is set, remove it before setting estimator
self.delete('compare_custom_predict_fn')
self.store('compare_estimator_and_spec', {
'estimator': estimator, 'feature_spec': feature_spec})
self.set_compare_inference_address('estimator')
# If no model name has been set, give a default
if not self.has_compare_model_name():
self.set_compare_model_name('2')
return self
|
<SYSTEM_TASK:>
Sets a custom function for inference.
<END_TASK>
<USER_TASK:>
Description:
def set_custom_predict_fn(self, predict_fn):
"""Sets a custom function for inference.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a custom function as the model to query. In this case, the
provided function should accept example protos and return:
- For classification: A 2D list of numbers. The first dimension is for
each example being predicted. The second dimension are the probabilities
for each class ID in the prediction.
- For regression: A 1D list of numbers, with a regression score for each
example being predicted.
Args:
predict_fn: The custom python function which will be used for model
inference.
Returns:
self, in order to enabled method chaining.
"""
|
# If estimator is set, remove it before setting predict_fn
self.delete('estimator_and_spec')
self.store('custom_predict_fn', predict_fn)
self.set_inference_address('custom_predict_fn')
# If no model name has been set, give a default
if not self.has_model_name():
self.set_model_name('1')
return self
|
<SYSTEM_TASK:>
Sets a second custom function for inference.
<END_TASK>
<USER_TASK:>
Description:
def set_compare_custom_predict_fn(self, predict_fn):
"""Sets a second custom function for inference.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a custom function as the model to query. In this case, the
provided function should accept example protos and return:
- For classification: A 2D list of numbers. The first dimension is for
each example being predicted. The second dimension are the probabilities
for each class ID in the prediction.
- For regression: A 1D list of numbers, with a regression score for each
example being predicted.
Args:
predict_fn: The custom python function which will be used for model
inference.
Returns:
self, in order to enabled method chaining.
"""
|
# If estimator is set, remove it before setting predict_fn
self.delete('compare_estimator_and_spec')
self.store('compare_custom_predict_fn', predict_fn)
self.set_compare_inference_address('custom_predict_fn')
# If no model name has been set, give a default
if not self.has_compare_model_name():
self.set_compare_model_name('2')
return self
|
<SYSTEM_TASK:>
Decorator to define a function that lazily loads the module 'name'.
<END_TASK>
<USER_TASK:>
Description:
def lazy_load(name):
"""Decorator to define a function that lazily loads the module 'name'.
This can be used to defer importing troublesome dependencies - e.g. ones that
are large and infrequently used, or that cause a dependency cycle -
until they are actually used.
Args:
name: the fully-qualified name of the module; typically the last segment
of 'name' matches the name of the decorated function
Returns:
Decorator function that produces a lazy-loading module 'name' backed by the
underlying decorated function.
"""
|
def wrapper(load_fn):
# Wrap load_fn to call it exactly once and update __dict__ afterwards to
# make future lookups efficient (only failed lookups call __getattr__).
@_memoize
def load_once(self):
if load_once.loading:
raise ImportError("Circular import when resolving LazyModule %r" % name)
load_once.loading = True
try:
module = load_fn()
finally:
load_once.loading = False
self.__dict__.update(module.__dict__)
load_once.loaded = True
return module
load_once.loading = False
load_once.loaded = False
# Define a module that proxies getattr() and dir() to the result of calling
# load_once() the first time it's needed. The class is nested so we can close
# over load_once() and avoid polluting the module's attrs with our own state.
class LazyModule(types.ModuleType):
def __getattr__(self, attr_name):
return getattr(load_once(self), attr_name)
def __dir__(self):
return dir(load_once(self))
def __repr__(self):
if load_once.loaded:
return '<%r via LazyModule (loaded)>' % load_once(self)
return '<module %r via LazyModule (not yet loaded)>' % self.__name__
return LazyModule(name)
return wrapper
|
<SYSTEM_TASK:>
Memoizing decorator for f, which must have exactly 1 hashable argument.
<END_TASK>
<USER_TASK:>
Description:
def _memoize(f):
"""Memoizing decorator for f, which must have exactly 1 hashable argument."""
|
nothing = object() # Unique "no value" sentinel object.
cache = {}
# Use a reentrant lock so that if f references the resulting wrapper we die
# with recursion depth exceeded instead of deadlocking.
lock = threading.RLock()
@functools.wraps(f)
def wrapper(arg):
if cache.get(arg, nothing) is nothing:
with lock:
if cache.get(arg, nothing) is nothing:
cache[arg] = f(arg)
return cache[arg]
return wrapper
|
<SYSTEM_TASK:>
Provide the root module of a TF-like API for use within TensorBoard.
<END_TASK>
<USER_TASK:>
Description:
def tf():
"""Provide the root module of a TF-like API for use within TensorBoard.
By default this is equivalent to `import tensorflow as tf`, but it can be used
in combination with //tensorboard/compat:tensorflow (to fall back to a stub TF
API implementation if the real one is not available) or with
//tensorboard/compat:no_tensorflow (to force unconditional use of the stub).
Returns:
The root module of a TF-like API, if available.
Raises:
ImportError: if a TF-like API is not available.
"""
|
try:
from tensorboard.compat import notf # pylint: disable=g-import-not-at-top
except ImportError:
try:
import tensorflow # pylint: disable=g-import-not-at-top
return tensorflow
except ImportError:
pass
from tensorboard.compat import tensorflow_stub # pylint: disable=g-import-not-at-top
return tensorflow_stub
|
<SYSTEM_TASK:>
Provide the root module of a TF-2.0 API for use within TensorBoard.
<END_TASK>
<USER_TASK:>
Description:
def tf2():
"""Provide the root module of a TF-2.0 API for use within TensorBoard.
Returns:
The root module of a TF-2.0 API, if available.
Raises:
ImportError: if a TF-2.0 API is not available.
"""
|
# Import the `tf` compat API from this file and check if it's already TF 2.0.
if tf.__version__.startswith('2.'):
return tf
elif hasattr(tf, 'compat') and hasattr(tf.compat, 'v2'):
# As a fallback, try `tensorflow.compat.v2` if it's defined.
return tf.compat.v2
raise ImportError('cannot import tensorflow 2.0 API')
|
<SYSTEM_TASK:>
Provide pywrap_tensorflow access in TensorBoard.
<END_TASK>
<USER_TASK:>
Description:
def _pywrap_tensorflow():
"""Provide pywrap_tensorflow access in TensorBoard.
pywrap_tensorflow cannot be accessed from tf.python.pywrap_tensorflow
and needs to be imported using
`from tensorflow.python import pywrap_tensorflow`. Therefore, we provide
a separate accessor function for it here.
NOTE: pywrap_tensorflow is not part of TensorFlow API and this
dependency will go away soon.
Returns:
pywrap_tensorflow import, if available.
Raises:
ImportError: if we couldn't import pywrap_tensorflow.
"""
|
try:
from tensorboard.compat import notf # pylint: disable=g-import-not-at-top
except ImportError:
try:
from tensorflow.python import pywrap_tensorflow # pylint: disable=g-import-not-at-top
return pywrap_tensorflow
except ImportError:
pass
from tensorboard.compat.tensorflow_stub import pywrap_tensorflow # pylint: disable=g-import-not-at-top
return pywrap_tensorflow
|
<SYSTEM_TASK:>
Returns a summary proto buffer holding this experiment.
<END_TASK>
<USER_TASK:>
Description:
def create_experiment_summary():
"""Returns a summary proto buffer holding this experiment."""
|
# Convert TEMPERATURE_LIST to google.protobuf.ListValue
temperature_list = struct_pb2.ListValue()
temperature_list.extend(TEMPERATURE_LIST)
materials = struct_pb2.ListValue()
materials.extend(HEAT_COEFFICIENTS.keys())
return summary.experiment_pb(
hparam_infos=[
api_pb2.HParamInfo(name='initial_temperature',
display_name='Initial temperature',
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list),
api_pb2.HParamInfo(name='ambient_temperature',
display_name='Ambient temperature',
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list),
api_pb2.HParamInfo(name='material',
display_name='Material',
type=api_pb2.DATA_TYPE_STRING,
domain_discrete=materials)
],
metric_infos=[
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag='temperature/current/scalar_summary'),
display_name='Current Temp.'),
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag='temperature/difference_to_ambient/scalar_summary'),
display_name='Difference To Ambient Temp.'),
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag='delta/scalar_summary'),
display_name='Delta T')
]
)
|
<SYSTEM_TASK:>
Runs a temperature simulation.
<END_TASK>
<USER_TASK:>
Description:
def run(logdir, session_id, hparams, group_name):
"""Runs a temperature simulation.
This will simulate an object at temperature `initial_temperature`
sitting at rest in a large room at temperature `ambient_temperature`.
The object has some intrinsic `heat_coefficient`, which indicates
how much thermal conductivity it has: for instance, metals have high
thermal conductivity, while the thermal conductivity of water is low.
Over time, the object's temperature will adjust to match the
temperature of its environment. We'll track the object's temperature,
how far it is from the room's temperature, and how much it changes at
each time step.
Arguments:
logdir: the top-level directory into which to write summary data
session_id: an id for the session.
hparams: A dictionary mapping a hyperparameter name to its value.
group_name: an id for the session group this session belongs to.
"""
|
tf.reset_default_graph()
tf.set_random_seed(0)
initial_temperature = hparams['initial_temperature']
ambient_temperature = hparams['ambient_temperature']
heat_coefficient = HEAT_COEFFICIENTS[hparams['material']]
session_dir = os.path.join(logdir, session_id)
writer = tf.summary.FileWriter(session_dir)
writer.add_summary(summary.session_start_pb(hparams=hparams,
group_name=group_name))
writer.flush()
with tf.name_scope('temperature'):
# Create a mutable variable to hold the object's temperature, and
# create a scalar summary to track its value over time. The name of
# the summary will appear as 'temperature/current' due to the
# name-scope above.
temperature = tf.Variable(
tf.constant(initial_temperature),
name='temperature')
scalar_summary.op('current', temperature,
display_name='Temperature',
description='The temperature of the object under '
'simulation, in Kelvins.')
# Compute how much the object's temperature differs from that of its
# environment, and track this, too: likewise, as
# 'temperature/difference_to_ambient'.
ambient_difference = temperature - ambient_temperature
scalar_summary.op('difference_to_ambient', ambient_difference,
display_name='Difference to ambient temperature',
description=('The difference between the ambient '
'temperature and the temperature of the '
'object under simulation, in Kelvins.'))
# Newton suggested that the rate of change of the temperature of an
# object is directly proportional to this `ambient_difference` above,
# where the proportionality constant is what we called the heat
# coefficient. But in real life, not everything is quite so clean, so
# we'll add in some noise. (The value of 50 is arbitrary, chosen to
# make the data look somewhat interesting. :-) )
noise = 50 * tf.random.normal([])
delta = -heat_coefficient * (ambient_difference + noise)
scalar_summary.op('delta', delta,
description='The change in temperature from the previous '
'step, in Kelvins.')
# Collect all the scalars that we want to keep track of.
summ = tf.summary.merge_all()
# Now, augment the current temperature by this delta that we computed,
# blocking the assignment on summary collection to avoid race conditions
# and ensure that the summary always reports the pre-update value.
with tf.control_dependencies([summ]):
update_step = temperature.assign_add(delta)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in xrange(FLAGS.num_steps):
# By asking TensorFlow to compute the update step, we force it to
# change the value of the temperature variable. We don't actually
# care about this value, so we discard it; instead, we grab the
# summary data computed along the way.
(s, _) = sess.run([summ, update_step])
if (step % FLAGS.summary_freq) == 0:
writer.add_summary(s, global_step=step)
writer.add_summary(summary.session_end_pb(api_pb2.STATUS_SUCCESS))
writer.close()
|
<SYSTEM_TASK:>
Return the registered filesystem for the given file.
<END_TASK>
<USER_TASK:>
Description:
def get_filesystem(filename):
"""Return the registered filesystem for the given file."""
|
filename = compat.as_str_any(filename)
prefix = ""
index = filename.find("://")
if index >= 0:
prefix = filename[:index]
fs = _REGISTERED_FILESYSTEMS.get(prefix, None)
if fs is None:
raise ValueError("No recognized filesystem for prefix %s" % prefix)
return fs
|
<SYSTEM_TASK:>
Recursive directory tree generator for directories.
<END_TASK>
<USER_TASK:>
Description:
def walk(top, topdown=True, onerror=None):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
topdown: bool, Traverse pre order if True, post order if False.
onerror: optional handler for errors. Should be a function, it will be
called with the error as argument. Rethrowing the error aborts the walk.
Errors that happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files.
(dirname, [subdirname, subdirname, ...], [filename, filename, ...])
as strings
"""
|
top = compat.as_str_any(top)
fs = get_filesystem(top)
try:
listing = listdir(top)
except errors.NotFoundError as err:
if onerror:
onerror(err)
else:
return
files = []
subdirs = []
for item in listing:
full_path = fs.join(top, compat.as_str_any(item))
if isdir(full_path):
subdirs.append(item)
else:
files.append(item)
here = (top, subdirs, files)
if topdown:
yield here
for subdir in subdirs:
joined_subdir = fs.join(top, compat.as_str_any(subdir))
for subitem in walk(joined_subdir, topdown, onerror=onerror):
yield subitem
if not topdown:
yield here
|
<SYSTEM_TASK:>
Split an S3-prefixed URL into bucket and path.
<END_TASK>
<USER_TASK:>
Description:
def bucket_and_path(self, url):
"""Split an S3-prefixed URL into bucket and path."""
|
url = compat.as_str_any(url)
if url.startswith("s3://"):
url = url[len("s3://"):]
idx = url.index("/")
bucket = url[:idx]
path = url[(idx + 1):]
return bucket, path
|
<SYSTEM_TASK:>
Determines whether a path exists or not.
<END_TASK>
<USER_TASK:>
Description:
def exists(self, filename):
"""Determines whether a path exists or not."""
|
client = boto3.client("s3")
bucket, path = self.bucket_and_path(filename)
r = client.list_objects(Bucket=bucket, Prefix=path, Delimiter="/")
if r.get("Contents") or r.get("CommonPrefixes"):
return True
return False
|
<SYSTEM_TASK:>
Returns whether the path is a directory or not.
<END_TASK>
<USER_TASK:>
Description:
def isdir(self, dirname):
"""Returns whether the path is a directory or not."""
|
client = boto3.client("s3")
bucket, path = self.bucket_and_path(dirname)
if not path.endswith("/"):
path += "/" # This will now only retrieve subdir content
r = client.list_objects(Bucket=bucket, Prefix=path, Delimiter="/")
if r.get("Contents") or r.get("CommonPrefixes"):
return True
return False
|
<SYSTEM_TASK:>
Determine the most specific context that we're in.
<END_TASK>
<USER_TASK:>
Description:
def _get_context():
"""Determine the most specific context that we're in.
Returns:
_CONTEXT_COLAB: If in Colab with an IPython notebook context.
_CONTEXT_IPYTHON: If not in Colab, but we are in an IPython notebook
context (e.g., from running `jupyter notebook` at the command
line).
_CONTEXT_NONE: Otherwise (e.g., by running a Python script at the
command-line or using the `ipython` interactive shell).
"""
|
# In Colab, the `google.colab` module is available, but the shell
# returned by `IPython.get_ipython` does not have a `get_trait`
# method.
try:
import google.colab
import IPython
except ImportError:
pass
else:
if IPython.get_ipython() is not None:
# We'll assume that we're in a Colab notebook context.
return _CONTEXT_COLAB
# In an IPython command line shell or Jupyter notebook, we can
# directly query whether we're in a notebook context.
try:
import IPython
except ImportError:
pass
else:
ipython = IPython.get_ipython()
if ipython is not None and ipython.has_trait("kernel"):
return _CONTEXT_IPYTHON
# Otherwise, we're not in a known notebook context.
return _CONTEXT_NONE
|
<SYSTEM_TASK:>
Launch and display a TensorBoard instance as if at the command line.
<END_TASK>
<USER_TASK:>
Description:
def start(args_string):
"""Launch and display a TensorBoard instance as if at the command line.
Args:
args_string: Command-line arguments to TensorBoard, to be
interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0".
Shell metacharacters are not supported: e.g., "--logdir 2>&1" will
point the logdir at the literal directory named "2>&1".
"""
|
context = _get_context()
try:
import IPython
import IPython.display
except ImportError:
IPython = None
if context == _CONTEXT_NONE:
handle = None
print("Launching TensorBoard...")
else:
handle = IPython.display.display(
IPython.display.Pretty("Launching TensorBoard..."),
display_id=True,
)
def print_or_update(message):
if handle is None:
print(message)
else:
handle.update(IPython.display.Pretty(message))
parsed_args = shlex.split(args_string, comments=True, posix=True)
start_result = manager.start(parsed_args)
if isinstance(start_result, manager.StartLaunched):
_display(
port=start_result.info.port,
print_message=False,
display_handle=handle,
)
elif isinstance(start_result, manager.StartReused):
template = (
"Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. "
"(Use '!kill {pid}' to kill it.)"
)
message = template.format(
port=start_result.info.port,
pid=start_result.info.pid,
delta=_time_delta_from_info(start_result.info),
)
print_or_update(message)
_display(
port=start_result.info.port,
print_message=False,
display_handle=None,
)
elif isinstance(start_result, manager.StartFailed):
def format_stream(name, value):
if value == "":
return ""
elif value is None:
return "\n<could not read %s>" % name
else:
return "\nContents of %s:\n%s" % (name, value.strip())
message = (
"ERROR: Failed to launch TensorBoard (exited with %d).%s%s" %
(
start_result.exit_code,
format_stream("stderr", start_result.stderr),
format_stream("stdout", start_result.stdout),
)
)
print_or_update(message)
elif isinstance(start_result, manager.StartTimedOut):
message = (
"ERROR: Timed out waiting for TensorBoard to start. "
"It may still be running as pid %d."
% start_result.pid
)
print_or_update(message)
else:
raise TypeError(
"Unexpected result from `manager.start`: %r.\n"
"This is a TensorBoard bug; please report it."
% start_result
)
|
<SYSTEM_TASK:>
Format the elapsed time for the given TensorBoardInfo.
<END_TASK>
<USER_TASK:>
Description:
def _time_delta_from_info(info):
"""Format the elapsed time for the given TensorBoardInfo.
Args:
info: A TensorBoardInfo value.
Returns:
A human-readable string describing the time since the server
described by `info` started: e.g., "2 days, 0:48:58".
"""
|
delta_seconds = int(time.time()) - info.start_time
return str(datetime.timedelta(seconds=delta_seconds))
|
<SYSTEM_TASK:>
Display a TensorBoard instance already running on this machine.
<END_TASK>
<USER_TASK:>
Description:
def display(port=None, height=None):
"""Display a TensorBoard instance already running on this machine.
Args:
port: The port on which the TensorBoard server is listening, as an
`int`, or `None` to automatically select the most recently
launched TensorBoard.
height: The height of the frame into which to render the TensorBoard
UI, as an `int` number of pixels, or `None` to use a default value
(currently 800).
"""
|
_display(port=port, height=height, print_message=True, display_handle=None)
|
<SYSTEM_TASK:>
Internal version of `display`.
<END_TASK>
<USER_TASK:>
Description:
def _display(port=None, height=None, print_message=False, display_handle=None):
"""Internal version of `display`.
Args:
port: As with `display`.
height: As with `display`.
print_message: True to print which TensorBoard instance was selected
for display (if applicable), or False otherwise.
display_handle: If not None, an IPython display handle into which to
render TensorBoard.
"""
|
if height is None:
height = 800
if port is None:
infos = manager.get_all()
if not infos:
raise ValueError("Can't display TensorBoard: no known instances running.")
else:
info = max(manager.get_all(), key=lambda x: x.start_time)
port = info.port
else:
infos = [i for i in manager.get_all() if i.port == port]
info = (
max(infos, key=lambda x: x.start_time)
if infos
else None
)
if print_message:
if info is not None:
message = (
"Selecting TensorBoard with {data_source} "
"(started {delta} ago; port {port}, pid {pid})."
).format(
data_source=manager.data_source_from_info(info),
delta=_time_delta_from_info(info),
port=info.port,
pid=info.pid,
)
print(message)
else:
# The user explicitly provided a port, and we don't have any
# additional information. There's nothing useful to say.
pass
fn = {
_CONTEXT_COLAB: _display_colab,
_CONTEXT_IPYTHON: _display_ipython,
_CONTEXT_NONE: _display_cli,
}[_get_context()]
return fn(port=port, height=height, display_handle=display_handle)
|
<SYSTEM_TASK:>
Print a listing of known running TensorBoard instances.
<END_TASK>
<USER_TASK:>
Description:
def list():
"""Print a listing of known running TensorBoard instances.
TensorBoard instances that were killed uncleanly (e.g., with SIGKILL
or SIGQUIT) may appear in this list even if they are no longer
running. Conversely, this list may be missing some entries if your
operating system's temporary directory has been cleared since a
still-running TensorBoard instance started.
"""
|
infos = manager.get_all()
if not infos:
print("No known TensorBoard instances running.")
return
print("Known TensorBoard instances:")
for info in infos:
template = " - port {port}: {data_source} (started {delta} ago; pid {pid})"
print(template.format(
port=info.port,
data_source=manager.data_source_from_info(info),
delta=_time_delta_from_info(info),
pid=info.pid,
))
|
<SYSTEM_TASK:>
Check the path name to see if it is probably a TF Events file.
<END_TASK>
<USER_TASK:>
Description:
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file.
Args:
path: A file path to check if it is an event file.
Raises:
ValueError: If the path is an empty string.
Returns:
If path is formatted like a TensorFlowEventsFile.
"""
|
if not path:
raise ValueError('Path must be a nonempty string')
return 'tfevents' in tf.compat.as_str_any(os.path.basename(path))
|
<SYSTEM_TASK:>
Yields all files in the given directory. The paths are absolute.
<END_TASK>
<USER_TASK:>
Description:
def ListDirectoryAbsolute(directory):
"""Yields all files in the given directory. The paths are absolute."""
|
return (os.path.join(directory, path)
for path in tf.io.gfile.listdir(directory))
|
<SYSTEM_TASK:>
Escapes the glob characters in a path.
<END_TASK>
<USER_TASK:>
Description:
def _EscapeGlobCharacters(path):
"""Escapes the glob characters in a path.
Python 3 has a glob.escape method, but python 2 lacks it, so we manually
implement this method.
Args:
path: The absolute path to escape.
Returns:
The escaped path string.
"""
|
drive, path = os.path.splitdrive(path)
return '%s%s' % (drive, _ESCAPE_GLOB_CHARACTERS_REGEX.sub(r'[\1]', path))
|
<SYSTEM_TASK:>
Recursively lists all files within the directory.
<END_TASK>
<USER_TASK:>
Description:
def ListRecursivelyViaGlobbing(top):
"""Recursively lists all files within the directory.
This method does not list subdirectories (in addition to regular files), and
the file paths are all absolute. If the directory does not exist, this yields
nothing.
This method does so by glob-ing deeper and deeper directories, ie
foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file
paths are absolute, and this method lists subdirectories too.
For certain file systems, globbing via this method may prove significantly
faster than recursively walking a directory. Specifically, TF file systems
that implement TensorFlow's FileSystem.GetMatchingPaths method could save
costly disk reads by using this method. However, for other file systems, this
method might prove slower because the file system performs a walk per call to
glob (in which case it might as well just perform 1 walk).
Args:
top: A path to a directory.
Yields:
A (dir_path, file_paths) tuple for each directory/subdirectory.
"""
|
current_glob_string = os.path.join(_EscapeGlobCharacters(top), '*')
level = 0
while True:
logger.info('GlobAndListFiles: Starting to glob level %d', level)
glob = tf.io.gfile.glob(current_glob_string)
logger.info(
'GlobAndListFiles: %d files glob-ed at level %d', len(glob), level)
if not glob:
# This subdirectory level lacks files. Terminate.
return
# Map subdirectory to a list of files.
pairs = collections.defaultdict(list)
for file_path in glob:
pairs[os.path.dirname(file_path)].append(file_path)
for dir_name, file_paths in six.iteritems(pairs):
yield (dir_name, tuple(file_paths))
if len(pairs) == 1:
# If at any point the glob returns files that are all in a single
# directory, replace the current globbing path with that directory as the
# literal prefix. This should improve efficiency in cases where a single
# subdir is significantly deeper than the rest of the sudirs.
current_glob_string = os.path.join(list(pairs.keys())[0], '*')
# Iterate to the next level of subdirectories.
current_glob_string = os.path.join(current_glob_string, '*')
level += 1
|
<SYSTEM_TASK:>
Obtains all subdirectories with events files.
<END_TASK>
<USER_TASK:>
Description:
def GetLogdirSubdirectories(path):
"""Obtains all subdirectories with events files.
The order of the subdirectories returned is unspecified. The internal logic
that determines order varies by scenario.
Args:
path: The path to a directory under which to find subdirectories.
Returns:
A tuple of absolute paths of all subdirectories each with at least 1 events
file directly within the subdirectory.
Raises:
ValueError: If the path passed to the method exists and is not a directory.
"""
|
if not tf.io.gfile.exists(path):
# No directory to traverse.
return ()
if not tf.io.gfile.isdir(path):
raise ValueError('GetLogdirSubdirectories: path exists and is not a '
'directory, %s' % path)
if IsCloudPath(path):
# Glob-ing for files can be significantly faster than recursively
# walking through directories for some file systems.
logger.info(
'GetLogdirSubdirectories: Starting to list directories via glob-ing.')
traversal_method = ListRecursivelyViaGlobbing
else:
# For other file systems, the glob-ing based method might be slower because
# each call to glob could involve performing a recursive walk.
logger.info(
'GetLogdirSubdirectories: Starting to list directories via walking.')
traversal_method = ListRecursivelyViaWalking
return (
subdir
for (subdir, files) in traversal_method(path)
if any(IsTensorFlowEventsFile(f) for f in files)
)
|
<SYSTEM_TASK:>
Write an audio summary.
<END_TASK>
<USER_TASK:>
Description:
def audio(name,
data,
sample_rate,
step=None,
max_outputs=3,
encoding=None,
description=None):
"""Write an audio summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A `Tensor` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
be statically unknown (i.e., `None`).
sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
sample rate, in Hz. Must be positive.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many audio clips will be emitted at each step. When more than
`max_outputs` many clips are provided, the first `max_outputs`
many clips will be used and the rest silently discarded.
encoding: Optional constant `str` for the desired encoding. Only "wav"
is currently supported, but this is not guaranteed to remain the
default, so if you want "wav" in particular, set this explicitly.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
|
audio_ops = getattr(tf, 'audio', None)
if audio_ops is None:
# Fallback for older versions of TF without tf.audio.
from tensorflow.python.ops import gen_audio_ops as audio_ops
if encoding is None:
encoding = 'wav'
if encoding != 'wav':
raise ValueError('Unknown encoding: %r' % encoding)
summary_metadata = metadata.create_summary_metadata(
display_name=None,
description=description,
encoding=metadata.Encoding.Value('WAV'))
inputs = [data, sample_rate, max_outputs, step]
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'audio_summary', values=inputs) as (tag, _):
tf.debugging.assert_rank(data, 3)
tf.debugging.assert_non_negative(max_outputs)
limited_audio = data[:max_outputs]
encode_fn = functools.partial(audio_ops.encode_wav,
sample_rate=sample_rate)
encoded_audio = tf.map_fn(encode_fn, limited_audio,
dtype=tf.string,
name='encode_each_audio')
# Workaround for map_fn returning float dtype for an empty elems input.
encoded_audio = tf.cond(
tf.shape(input=encoded_audio)[0] > 0,
lambda: encoded_audio, lambda: tf.constant([], tf.string))
limited_labels = tf.tile([''], tf.shape(input=limited_audio)[:1])
tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels]))
return tf.summary.write(
tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
|
<SYSTEM_TASK:>
Determines whether a health pill event contains bad values.
<END_TASK>
<USER_TASK:>
Description:
def extract_numerics_alert(event):
"""Determines whether a health pill event contains bad values.
A bad value is one of NaN, -Inf, or +Inf.
Args:
event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary`
ops.
Returns:
An instance of `NumericsAlert`, if bad values are found.
`None`, if no bad values are found.
Raises:
ValueError: if the event does not have the expected tag prefix or the
debug op name is not the expected debug op name suffix.
"""
|
value = event.summary.value[0]
debugger_plugin_metadata_content = None
if value.HasField("metadata"):
plugin_data = value.metadata.plugin_data
if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME:
debugger_plugin_metadata_content = plugin_data.content
if not debugger_plugin_metadata_content:
raise ValueError("Event proto input lacks debugger plugin SummaryMetadata.")
debugger_plugin_metadata_content = tf.compat.as_text(
debugger_plugin_metadata_content)
try:
content_object = json.loads(debugger_plugin_metadata_content)
device_name = content_object["device"]
except (KeyError, ValueError) as e:
raise ValueError("Could not determine device from JSON string %r, %r" %
(debugger_plugin_metadata_content, e))
debug_op_suffix = ":DebugNumericSummary"
if not value.node_name.endswith(debug_op_suffix):
raise ValueError(
"Event proto input does not have the expected debug op suffix %s" %
debug_op_suffix)
tensor_name = value.node_name[:-len(debug_op_suffix)]
elements = tf_debug.load_tensor_from_event(event)
nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX]
neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX]
pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX]
if nan_count > 0 or neg_inf_count > 0 or pos_inf_count > 0:
return NumericsAlert(
device_name, tensor_name, event.wall_time, nan_count, neg_inf_count,
pos_inf_count)
return None
|
<SYSTEM_TASK:>
Obtain the first timestamp.
<END_TASK>
<USER_TASK:>
Description:
def first_timestamp(self, event_key=None):
"""Obtain the first timestamp.
Args:
event_key: the type key of the sought events (e.g., constants.NAN_KEY).
If None, includes all event type keys.
Returns:
First (earliest) timestamp of all the events of the given type (or all
event types if event_key is None).
"""
|
if event_key is None:
timestamps = [self._trackers[key].first_timestamp
for key in self._trackers]
return min(timestamp for timestamp in timestamps if timestamp >= 0)
else:
return self._trackers[event_key].first_timestamp
|
<SYSTEM_TASK:>
Obtain the last timestamp.
<END_TASK>
<USER_TASK:>
Description:
def last_timestamp(self, event_key=None):
"""Obtain the last timestamp.
Args:
event_key: the type key of the sought events (e.g., constants.NAN_KEY). If
None, includes all event type keys.
Returns:
Last (latest) timestamp of all the events of the given type (or all
event types if event_key is None).
"""
|
if event_key is None:
timestamps = [self._trackers[key].first_timestamp
for key in self._trackers]
return max(timestamp for timestamp in timestamps if timestamp >= 0)
else:
return self._trackers[event_key].last_timestamp
|
<SYSTEM_TASK:>
Register an alerting numeric event.
<END_TASK>
<USER_TASK:>
Description:
def register(self, numerics_alert):
"""Register an alerting numeric event.
Args:
numerics_alert: An instance of `NumericsAlert`.
"""
|
key = (numerics_alert.device_name, numerics_alert.tensor_name)
if key in self._data:
self._data[key].add(numerics_alert)
else:
if len(self._data) < self._capacity:
history = NumericsAlertHistory()
history.add(numerics_alert)
self._data[key] = history
|
<SYSTEM_TASK:>
Generate wave data of the given form.
<END_TASK>
<USER_TASK:>
Description:
def run(logdir, run_name, wave_name, wave_constructor):
"""Generate wave data of the given form.
The provided function `wave_constructor` should accept a scalar tensor
of type float32, representing the frequency (in Hz) at which to
construct a wave, and return a tensor of shape [1, _samples(), `n`]
representing audio data (for some number of channels `n`).
Waves will be generated at frequencies ranging from A4 to A5.
Arguments:
logdir: the top-level directory into which to write summary data
run_name: the name of this run; will be created as a subdirectory
under logdir
wave_name: the name of the wave being generated
wave_constructor: see above
"""
|
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(0)
# On each step `i`, we'll set this placeholder to `i`. This allows us
# to know "what time it is" at each step.
step_placeholder = tf.compat.v1.placeholder(tf.float32, shape=[])
# We want to linearly interpolate a frequency between A4 (440 Hz) and
# A5 (880 Hz).
with tf.name_scope('compute_frequency'):
f_min = 440.0
f_max = 880.0
t = step_placeholder / (FLAGS.steps - 1)
frequency = f_min * (1.0 - t) + f_max * t
# Let's log this frequency, just so that we can make sure that it's as
# expected.
tf.compat.v1.summary.scalar('frequency', frequency)
# Now, we pass this to the wave constructor to get our waveform. Doing
# so within a name scope means that any summaries that the wave
# constructor produces will be namespaced.
with tf.name_scope(wave_name):
waveform = wave_constructor(frequency)
# We also have the opportunity to annotate each audio clip with a
# label. This is a good place to include the frequency, because it'll
# be visible immediately next to the audio clip.
with tf.name_scope('compute_labels'):
samples = tf.shape(input=waveform)[0]
wave_types = tf.tile(["*Wave type:* `%s`." % wave_name], [samples])
frequencies = tf.strings.join([
"*Frequency:* ",
tf.tile([tf.as_string(frequency, precision=2)], [samples]),
" Hz.",
])
samples = tf.strings.join([
"*Sample:* ", tf.as_string(tf.range(samples) + 1),
" of ", tf.as_string(samples), ".",
])
labels = tf.strings.join([wave_types, frequencies, samples], separator=" ")
# We can place a description next to the summary in TensorBoard. This
# is a good place to explain what the summary represents, methodology
# for creating it, etc. Let's include the source code of the function
# that generated the wave.
source = '\n'.join(' %s' % line.rstrip()
for line in inspect.getsourcelines(wave_constructor)[0])
description = ("A wave of type `%r`, generated via:\n\n%s"
% (wave_name, source))
# Here's the crucial piece: we interpret this result as audio.
summary.op('waveform', waveform, FLAGS.sample_rate,
labels=labels,
display_name=wave_name,
description=description)
# Now, we can collect up all the summaries and begin the run.
summ = tf.compat.v1.summary.merge_all()
sess = tf.compat.v1.Session()
writer = tf.summary.FileWriter(os.path.join(logdir, run_name))
writer.add_graph(sess.graph)
sess.run(tf.compat.v1.global_variables_initializer())
for step in xrange(FLAGS.steps):
s = sess.run(summ, feed_dict={step_placeholder: float(step)})
writer.add_summary(s, global_step=step)
writer.close()
|
<SYSTEM_TASK:>
Emit a sine wave at the given frequency.
<END_TASK>
<USER_TASK:>
Description:
def sine_wave(frequency):
"""Emit a sine wave at the given frequency."""
|
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
ts = xs / FLAGS.sample_rate
return tf.sin(2 * math.pi * frequency * ts)
|
<SYSTEM_TASK:>
Emit a triangle wave at the given frequency.
<END_TASK>
<USER_TASK:>
Description:
def triangle_wave(frequency):
"""Emit a triangle wave at the given frequency."""
|
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
ts = xs / FLAGS.sample_rate
#
# A triangle wave looks like this:
#
# /\ /\
# / \ / \
# \ / \ /
# \/ \/
#
# If we look at just half a period (the first four slashes in the
# diagram above), we can see that it looks like a transformed absolute
# value function.
#
# Let's start by computing the times relative to the start of each
# half-wave pulse (each individual "mountain" or "valley", of which
# there are four in the above diagram).
half_pulse_index = ts * (frequency * 2)
half_pulse_angle = half_pulse_index % 1.0 # in [0, 1]
#
# Now, we can see that each positive half-pulse ("mountain") has
# amplitude given by A(z) = 0.5 - abs(z - 0.5), and then normalized:
absolute_amplitude = (0.5 - tf.abs(half_pulse_angle - 0.5)) / 0.5
#
# But every other half-pulse is negative, so we should invert these.
half_pulse_parity = tf.sign(1 - (half_pulse_index % 2.0))
amplitude = half_pulse_parity * absolute_amplitude
#
# This is precisely the desired result, so we're done!
return amplitude
|
<SYSTEM_TASK:>
Emit two sine waves, in stereo at different octaves.
<END_TASK>
<USER_TASK:>
Description:
def bisine_wave(frequency):
"""Emit two sine waves, in stereo at different octaves."""
|
#
# We can first our existing sine generator to generate two different
# waves.
f_hi = frequency
f_lo = frequency / 2.0
with tf.name_scope('hi'):
sine_hi = sine_wave(f_hi)
with tf.name_scope('lo'):
sine_lo = sine_wave(f_lo)
#
# Now, we have two tensors of shape [1, _samples(), 1]. By concatenating
# them along axis 2, we get a tensor of shape [1, _samples(), 2]---a
# stereo waveform.
return tf.concat([sine_lo, sine_hi], axis=2)
|
<SYSTEM_TASK:>
Emit two sine waves with balance oscillating left and right.
<END_TASK>
<USER_TASK:>
Description:
def bisine_wahwah_wave(frequency):
"""Emit two sine waves with balance oscillating left and right."""
|
#
# This is clearly intended to build on the bisine wave defined above,
# so we can start by generating that.
waves_a = bisine_wave(frequency)
#
# Then, by reversing axis 2, we swap the stereo channels. By mixing
# this with `waves_a`, we'll be able to create the desired effect.
waves_b = tf.reverse(waves_a, axis=[2])
#
# Let's have the balance oscillate from left to right four times.
iterations = 4
#
# Now, we compute the balance for each sample: `ts` has values
# in [0, 1] that indicate how much we should use `waves_a`.
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
thetas = xs / _samples() * iterations
ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2
#
# Finally, we can mix the two together, and we're done.
wave = ts * waves_a + (1.0 - ts) * waves_b
#
# Alternately, we can make the effect more pronounced by exaggerating
# the sample data. Let's emit both variations.
exaggerated_wave = wave ** 3.0
return tf.concat([wave, exaggerated_wave], axis=0)
|
<SYSTEM_TASK:>
Generate waves of the shapes defined above.
<END_TASK>
<USER_TASK:>
Description:
def run_all(logdir, verbose=False):
"""Generate waves of the shapes defined above.
Arguments:
logdir: the directory into which to store all the runs' data
verbose: if true, print out each run's name as it begins
"""
|
waves = [sine_wave, square_wave, triangle_wave,
bisine_wave, bisine_wahwah_wave]
for (i, wave_constructor) in enumerate(waves):
wave_name = wave_constructor.__name__
run_name = 'wave:%02d,%s' % (i + 1, wave_name)
if verbose:
print('--- Running: %s' % run_name)
run(logdir, run_name, wave_name, wave_constructor)
|
<SYSTEM_TASK:>
Returns a dict of all runs and tags and their data availabilities.
<END_TASK>
<USER_TASK:>
Description:
def info_impl(self):
"""Returns a dict of all runs and tags and their data availabilities."""
|
result = {}
def add_row_item(run, tag=None):
run_item = result.setdefault(run, {
'run': run,
'tags': {},
# A run-wide GraphDef of ops.
'run_graph': False})
tag_item = None
if tag:
tag_item = run_item.get('tags').setdefault(tag, {
'tag': tag,
'conceptual_graph': False,
# A tagged GraphDef of ops.
'op_graph': False,
'profile': False})
return (run_item, tag_item)
mapping = self._multiplexer.PluginRunToTagToContent(
_PLUGIN_NAME_RUN_METADATA_WITH_GRAPH)
for run_name, tag_to_content in six.iteritems(mapping):
for (tag, content) in six.iteritems(tag_to_content):
# The Summary op is defined in TensorFlow and does not use a stringified proto
# as a content of plugin data. It contains single string that denotes a version.
# https://github.com/tensorflow/tensorflow/blob/11f4ecb54708865ec757ca64e4805957b05d7570/tensorflow/python/ops/summary_ops_v2.py#L789-L790
if content != b'1':
logger.warn('Ignoring unrecognizable version of RunMetadata.')
continue
(_, tag_item) = add_row_item(run_name, tag)
tag_item['op_graph'] = True
# Tensors associated with plugin name _PLUGIN_NAME_RUN_METADATA contain
# both op graph and profile information.
mapping = self._multiplexer.PluginRunToTagToContent(
_PLUGIN_NAME_RUN_METADATA)
for run_name, tag_to_content in six.iteritems(mapping):
for (tag, content) in six.iteritems(tag_to_content):
if content != b'1':
logger.warn('Ignoring unrecognizable version of RunMetadata.')
continue
(_, tag_item) = add_row_item(run_name, tag)
tag_item['profile'] = True
tag_item['op_graph'] = True
# Tensors associated with plugin name _PLUGIN_NAME_KERAS_MODEL contain
# serialized Keras model in JSON format.
mapping = self._multiplexer.PluginRunToTagToContent(
_PLUGIN_NAME_KERAS_MODEL)
for run_name, tag_to_content in six.iteritems(mapping):
for (tag, content) in six.iteritems(tag_to_content):
if content != b'1':
logger.warn('Ignoring unrecognizable version of RunMetadata.')
continue
(_, tag_item) = add_row_item(run_name, tag)
tag_item['conceptual_graph'] = True
for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()):
if run_data.get(event_accumulator.GRAPH):
(run_item, _) = add_row_item(run_name, None)
run_item['run_graph'] = True
for (run_name, run_data) in six.iteritems(self._multiplexer.Runs()):
if event_accumulator.RUN_METADATA in run_data:
for tag in run_data[event_accumulator.RUN_METADATA]:
(_, tag_item) = add_row_item(run_name, tag)
tag_item['profile'] = True
return result
|
<SYSTEM_TASK:>
Given a single run, return the graph definition in protobuf format.
<END_TASK>
<USER_TASK:>
Description:
def graph_route(self, request):
"""Given a single run, return the graph definition in protobuf format."""
|
run = request.args.get('run')
tag = request.args.get('tag', '')
conceptual_arg = request.args.get('conceptual', False)
is_conceptual = True if conceptual_arg == 'true' else False
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
limit_attr_size = request.args.get('limit_attr_size', None)
if limit_attr_size is not None:
try:
limit_attr_size = int(limit_attr_size)
except ValueError:
return http_util.Respond(
request, 'query parameter `limit_attr_size` must be an integer',
'text/plain', 400)
large_attrs_key = request.args.get('large_attrs_key', None)
try:
result = self.graph_impl(run, tag, is_conceptual, limit_attr_size, large_attrs_key)
except ValueError as e:
return http_util.Respond(request, e.message, 'text/plain', code=400)
else:
if result is not None:
(body, mime_type) = result # pylint: disable=unpacking-non-sequence
return http_util.Respond(request, body, mime_type)
else:
return http_util.Respond(request, '404 Not Found', 'text/plain',
code=404)
|
<SYSTEM_TASK:>
Create a Keras model with the given hyperparameters.
<END_TASK>
<USER_TASK:>
Description:
def model_fn(hparams, seed):
"""Create a Keras model with the given hyperparameters.
Args:
hparams: A dict mapping hyperparameters in `HPARAMS` to values.
seed: A hashable object to be used as a random seed (e.g., to
construct dropout layers in the model).
Returns:
A compiled Keras model.
"""
|
rng = random.Random(seed)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(INPUT_SHAPE))
model.add(tf.keras.layers.Reshape(INPUT_SHAPE + (1,))) # grayscale channel
# Add convolutional layers.
conv_filters = 8
for _ in xrange(hparams[HP_CONV_LAYERS]):
model.add(tf.keras.layers.Conv2D(
filters=conv_filters,
kernel_size=hparams[HP_CONV_KERNEL_SIZE],
padding="same",
activation="relu",
))
model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding="same"))
conv_filters *= 2
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random()))
# Add fully connected layers.
dense_neurons = 32
for _ in xrange(hparams[HP_DENSE_LAYERS]):
model.add(tf.keras.layers.Dense(dense_neurons, activation="relu"))
dense_neurons *= 2
# Add the final output layer.
model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=hparams[HP_OPTIMIZER],
metrics=["accuracy"],
)
return model
|
<SYSTEM_TASK:>
Load and normalize data.
<END_TASK>
<USER_TASK:>
Description:
def prepare_data():
"""Load and normalize data."""
|
((x_train, y_train), (x_test, y_test)) = DATASET.load_data()
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255.0
x_test /= 255.0
return ((x_train, y_train), (x_test, y_test))
|
<SYSTEM_TASK:>
Perform random search over the hyperparameter space.
<END_TASK>
<USER_TASK:>
Description:
def run_all(logdir, verbose=False):
"""Perform random search over the hyperparameter space.
Arguments:
logdir: The top-level directory into which to write data. This
directory should be empty or nonexistent.
verbose: If true, print out each run's name as it begins.
"""
|
data = prepare_data()
rng = random.Random(0)
base_writer = tf.summary.create_file_writer(logdir)
with base_writer.as_default():
experiment = hp.Experiment(hparams=HPARAMS, metrics=METRICS)
experiment_string = experiment.summary_pb().SerializeToString()
tf.summary.experimental.write_raw_pb(experiment_string, step=0)
base_writer.flush()
base_writer.close()
sessions_per_group = 2
num_sessions = flags.FLAGS.num_session_groups * sessions_per_group
session_index = 0 # across all session groups
for group_index in xrange(flags.FLAGS.num_session_groups):
hparams = {h: sample_uniform(h.domain, rng) for h in HPARAMS}
hparams_string = str(hparams)
group_id = hashlib.sha256(hparams_string.encode("utf-8")).hexdigest()
for repeat_index in xrange(sessions_per_group):
session_id = str(session_index)
session_index += 1
if verbose:
print(
"--- Running training session %d/%d"
% (session_index, num_sessions)
)
print(hparams_string)
print("--- repeat #: %d" % (repeat_index + 1))
run(
data=data,
base_logdir=logdir,
session_id=session_id,
group_id=group_id,
hparams=hparams,
)
|
<SYSTEM_TASK:>
Sample a value uniformly from a domain.
<END_TASK>
<USER_TASK:>
Description:
def sample_uniform(domain, rng):
"""Sample a value uniformly from a domain.
Args:
domain: An `IntInterval`, `RealInterval`, or `Discrete` domain.
rng: A `random.Random` object; defaults to the `random` module.
Raises:
TypeError: If `domain` is not a known kind of domain.
IndexError: If the domain is empty.
"""
|
if isinstance(domain, hp.IntInterval):
return rng.randint(domain.min_value, domain.max_value)
elif isinstance(domain, hp.RealInterval):
return rng.uniform(domain.min_value, domain.max_value)
elif isinstance(domain, hp.Discrete):
return rng.choice(domain.values)
else:
raise TypeError("unknown domain type: %r" % (domain,))
|
<SYSTEM_TASK:>
A route that returns a JSON mapping between runs and PR curve data.
<END_TASK>
<USER_TASK:>
Description:
def pr_curves_route(self, request):
"""A route that returns a JSON mapping between runs and PR curve data.
Returns:
Given a tag and a comma-separated list of runs (both stored within GET
parameters), fetches a JSON object that maps between run name and objects
containing data required for PR curves for that run. Runs that either
cannot be found or that lack tags will be excluded from the response.
"""
|
runs = request.args.getlist('run')
if not runs:
return http_util.Respond(
request, 'No runs provided when fetching PR curve data', 400)
tag = request.args.get('tag')
if not tag:
return http_util.Respond(
request, 'No tag provided when fetching PR curve data', 400)
try:
response = http_util.Respond(
request, self.pr_curves_impl(runs, tag), 'application/json')
except ValueError as e:
return http_util.Respond(request, str(e), 'text/plain', 400)
return response
|
<SYSTEM_TASK:>
Converts a TensorEvent into a dict that encapsulates information on it.
<END_TASK>
<USER_TASK:>
Description:
def _process_tensor_event(self, event, thresholds):
"""Converts a TensorEvent into a dict that encapsulates information on it.
Args:
event: The TensorEvent to convert.
thresholds: An array of floats that ranges from 0 to 1 (in that
direction and inclusive of 0 and 1).
Returns:
A JSON-able dictionary of PR curve data for 1 step.
"""
|
return self._make_pr_entry(
event.step,
event.wall_time,
tensor_util.make_ndarray(event.tensor_proto),
thresholds)
|
<SYSTEM_TASK:>
Creates an entry for PR curve data. Each entry corresponds to 1 step.
<END_TASK>
<USER_TASK:>
Description:
def _make_pr_entry(self, step, wall_time, data_array, thresholds):
"""Creates an entry for PR curve data. Each entry corresponds to 1 step.
Args:
step: The step.
wall_time: The wall time.
data_array: A numpy array of PR curve data stored in the summary format.
thresholds: An array of floating point thresholds.
Returns:
A PR curve entry.
"""
|
# Trim entries for which TP + FP = 0 (precision is undefined) at the tail of
# the data.
true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]]
false_positives = [
int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]]
tp_index = metadata.TRUE_POSITIVES_INDEX
fp_index = metadata.FALSE_POSITIVES_INDEX
positives = data_array[[tp_index, fp_index], :].astype(int).sum(axis=0)
end_index_inclusive = len(positives) - 1
while end_index_inclusive > 0 and positives[end_index_inclusive] == 0:
end_index_inclusive -= 1
end_index = end_index_inclusive + 1
return {
'wall_time': wall_time,
'step': step,
'precision': data_array[metadata.PRECISION_INDEX, :end_index].tolist(),
'recall': data_array[metadata.RECALL_INDEX, :end_index].tolist(),
'true_positives': true_positives[:end_index],
'false_positives': false_positives[:end_index],
'true_negatives':
[int(v) for v in
data_array[metadata.TRUE_NEGATIVES_INDEX][:end_index]],
'false_negatives':
[int(v) for v in
data_array[metadata.FALSE_NEGATIVES_INDEX][:end_index]],
'thresholds': thresholds[:end_index],
}
|
<SYSTEM_TASK:>
Create a top-level experiment summary describing this experiment.
<END_TASK>
<USER_TASK:>
Description:
def summary_pb(self):
"""Create a top-level experiment summary describing this experiment.
The resulting summary should be written to a log directory that
encloses all the individual sessions' log directories.
Analogous to the low-level `experiment_pb` function in the
`hparams.summary` module.
"""
|
hparam_infos = []
for hparam in self._hparams:
info = api_pb2.HParamInfo(
name=hparam.name,
description=hparam.description,
display_name=hparam.display_name,
)
domain = hparam.domain
if domain is not None:
domain.update_hparam_info(info)
hparam_infos.append(info)
metric_infos = [metric.as_proto() for metric in self._metrics]
return summary.experiment_pb(
hparam_infos=hparam_infos,
metric_infos=metric_infos,
user=self._user,
description=self._description,
time_created_secs=self._time_created_secs,
)
|
<SYSTEM_TASK:>
Adds a named column of metadata values.
<END_TASK>
<USER_TASK:>
Description:
def add_column(self, column_name, column_values):
"""Adds a named column of metadata values.
Args:
column_name: Name of the column.
column_values: 1D array/list/iterable holding the column values. Must be
of length `num_points`. The i-th value corresponds to the i-th point.
Raises:
ValueError: If `column_values` is not 1D array, or of length `num_points`,
or the `name` is already used.
"""
|
# Sanity checks.
if isinstance(column_values, list) and isinstance(column_values[0], list):
raise ValueError('"column_values" must be a flat list, but we detected '
'that its first entry is a list')
if isinstance(column_values, np.ndarray) and column_values.ndim != 1:
raise ValueError('"column_values" should be of rank 1, '
'but is of rank %d' % column_values.ndim)
if len(column_values) != self.num_points:
raise ValueError('"column_values" should be of length %d, but is of '
'length %d' % (self.num_points, len(column_values)))
if column_name in self.name_to_values:
raise ValueError('The column name "%s" is already used' % column_name)
self.column_names.append(column_name)
self.name_to_values[column_name] = column_values
|
<SYSTEM_TASK:>
Retrieve the histogram events associated with a run and tag.
<END_TASK>
<USER_TASK:>
Description:
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
|
accumulator = self.GetAccumulator(run)
return accumulator.Histograms(tag)
|
<SYSTEM_TASK:>
Retrieve the compressed histogram events associated with a run and tag.
<END_TASK>
<USER_TASK:>
Description:
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
|
accumulator = self.GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
|
<SYSTEM_TASK:>
Retrieve the image events associated with a run and tag.
<END_TASK>
<USER_TASK:>
Description:
def Images(self, run, tag):
"""Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ImageEvents`.
"""
|
accumulator = self.GetAccumulator(run)
return accumulator.Images(tag)
|
<SYSTEM_TASK:>
Write a histogram summary.
<END_TASK>
<USER_TASK:>
Description:
def histogram(name, data, step=None, buckets=None, description=None):
"""Write a histogram summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A `Tensor` of any shape. Must be castable to `float64`.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
buckets: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
|
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'histogram_summary', values=[data, buckets, step]) as (tag, _):
tensor = _buckets(data, bucket_count=buckets)
return tf.summary.write(
tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
|
<SYSTEM_TASK:>
Create a histogram summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def histogram_pb(tag, data, buckets=None, description=None):
"""Create a histogram summary protobuf.
Arguments:
tag: String tag for the summary.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
buckets: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `summary_pb2.Summary` protobuf object.
"""
|
bucket_count = DEFAULT_BUCKET_COUNT if buckets is None else buckets
data = np.array(data).flatten().astype(float)
if data.size == 0:
buckets = np.array([]).reshape((0, 3))
else:
min_ = np.min(data)
max_ = np.max(data)
range_ = max_ - min_
if range_ == 0:
center = min_
buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]])
else:
bucket_width = range_ / bucket_count
offsets = data - min_
bucket_indices = np.floor(offsets / bucket_width).astype(int)
clamped_indices = np.minimum(bucket_indices, bucket_count - 1)
one_hots = (np.array([clamped_indices]).transpose()
== np.arange(0, bucket_count)) # broadcast
assert one_hots.shape == (data.size, bucket_count), (
one_hots.shape, (data.size, bucket_count))
bucket_counts = np.sum(one_hots, axis=0)
edges = np.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()
tensor = tensor_util.make_tensor_proto(buckets, dtype=np.float64)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary = summary_pb2.Summary()
summary.value.add(tag=tag,
metadata=summary_metadata,
tensor=tensor)
return summary
|
<SYSTEM_TASK:>
Makes recommended modifications to the environment.
<END_TASK>
<USER_TASK:>
Description:
def setup_environment():
"""Makes recommended modifications to the environment.
This functions changes global state in the Python process. Calling
this function is a good idea, but it can't appropriately be called
from library routines.
"""
|
absl.logging.set_verbosity(absl.logging.WARNING)
# The default is HTTP/1.0 for some strange reason. If we don't use
# HTTP/1.1 then a new TCP socket and Python thread is created for
# each HTTP request. The tradeoff is we must always specify the
# Content-Length header, or do chunked encoding for streaming.
serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
|
<SYSTEM_TASK:>
Opens stock TensorBoard web assets collection.
<END_TASK>
<USER_TASK:>
Description:
def get_default_assets_zip_provider():
"""Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
"""
|
path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))),
'webfiles.zip')
if not os.path.exists(path):
logger.warning('webfiles.zip static assets not found: %s', path)
return None
return lambda: open(path, 'rb')
|
<SYSTEM_TASK:>
Create a server factory that performs port scanning.
<END_TASK>
<USER_TASK:>
Description:
def with_port_scanning(cls):
"""Create a server factory that performs port scanning.
This function returns a callable whose signature matches the
specification of `TensorBoardServer.__init__`, using `cls` as an
underlying implementation. It passes through `flags` unchanged except
in the case that `flags.port is None`, in which case it repeatedly
instantiates the underlying server with new port suggestions.
Args:
cls: A valid implementation of `TensorBoardServer`. This class's
initializer should raise a `TensorBoardPortInUseError` upon
failing to bind to a port when it is expected that binding to
another nearby port might succeed.
The initializer for `cls` will only ever be invoked with `flags`
such that `flags.port is not None`.
Returns:
A function that implements the `__init__` contract of
`TensorBoardServer`.
"""
|
def init(wsgi_app, flags):
# base_port: what's the first port to which we should try to bind?
# should_scan: if that fails, shall we try additional ports?
# max_attempts: how many ports shall we try?
should_scan = flags.port is None
base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port
max_attempts = 10 if should_scan else 1
if base_port > 0xFFFF:
raise TensorBoardServerException(
'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF)
)
max_attempts = 10 if should_scan else 1
base_port = min(base_port + max_attempts, 0x10000) - max_attempts
for port in xrange(base_port, base_port + max_attempts):
subflags = argparse.Namespace(**vars(flags))
subflags.port = port
try:
return cls(wsgi_app=wsgi_app, flags=subflags)
except TensorBoardPortInUseError:
if not should_scan:
raise
# All attempts failed to bind.
raise TensorBoardServerException(
'TensorBoard could not bind to any port around %s '
'(tried %d times)'
% (base_port, max_attempts))
return init
|
<SYSTEM_TASK:>
Configures TensorBoard behavior via flags.
<END_TASK>
<USER_TASK:>
Description:
def configure(self, argv=('',), **kwargs):
"""Configures TensorBoard behavior via flags.
This method will populate the "flags" property with an argparse.Namespace
representing flag values parsed from the provided argv list, overridden by
explicit flags from remaining keyword arguments.
Args:
argv: Can be set to CLI args equivalent to sys.argv; the first arg is
taken to be the name of the path being executed.
kwargs: Additional arguments will override what was parsed from
argv. They must be passed as Python data structures, e.g.
`foo=1` rather than `foo="1"`.
Returns:
Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism
for absl.app.run() compatibility.
Raises:
ValueError: If flag values are invalid.
"""
|
parser = argparse_flags.ArgumentParser(
prog='tensorboard',
description=('TensorBoard is a suite of web applications for '
'inspecting and understanding your TensorFlow runs '
'and graphs. https://github.com/tensorflow/tensorboard '))
for loader in self.plugin_loaders:
loader.define_flags(parser)
arg0 = argv[0] if argv else ''
flags = parser.parse_args(argv[1:]) # Strip binary name from argv.
self.cache_key = manager.cache_key(
working_directory=os.getcwd(),
arguments=argv[1:],
configure_kwargs=kwargs,
)
if absl_flags and arg0:
# Only expose main module Abseil flags as TensorBoard native flags.
# This is the same logic Abseil's ArgumentParser uses for determining
# which Abseil flags to include in the short helpstring.
for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)):
if hasattr(flags, flag.name):
raise ValueError('Conflicting Abseil flag: %s' % flag.name)
setattr(flags, flag.name, flag.value)
for k, v in kwargs.items():
if not hasattr(flags, k):
raise ValueError('Unknown TensorBoard flag: %s' % k)
setattr(flags, k, v)
for loader in self.plugin_loaders:
loader.fix_flags(flags)
self.flags = flags
return [arg0]
|
<SYSTEM_TASK:>
Blocking main function for TensorBoard.
<END_TASK>
<USER_TASK:>
Description:
def main(self, ignored_argv=('',)):
"""Blocking main function for TensorBoard.
This method is called by `tensorboard.main.run_main`, which is the
standard entrypoint for the tensorboard command line program. The
configure() method must be called first.
Args:
ignored_argv: Do not pass. Required for Abseil compatibility.
Returns:
Process exit code, i.e. 0 if successful or non-zero on failure. In
practice, an exception will most likely be raised instead of
returning non-zero.
:rtype: int
"""
|
self._install_signal_handler(signal.SIGTERM, "SIGTERM")
if self.flags.inspect:
logger.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(self.flags.event_file)
efi.inspect(self.flags.logdir, event_file, self.flags.tag)
return 0
if self.flags.version_tb:
print(version.VERSION)
return 0
try:
server = self._make_server()
sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' %
(version.VERSION, server.get_url()))
sys.stderr.flush()
self._register_info(server)
server.serve_forever()
return 0
except TensorBoardServerException as e:
logger.error(e.msg)
sys.stderr.write('ERROR: %s\n' % e.msg)
sys.stderr.flush()
return -1
|
<SYSTEM_TASK:>
Python API for launching TensorBoard.
<END_TASK>
<USER_TASK:>
Description:
def launch(self):
"""Python API for launching TensorBoard.
This method is the same as main() except it launches TensorBoard in
a separate permanent thread. The configure() method must be called
first.
Returns:
The URL of the TensorBoard web server.
:rtype: str
"""
|
# Make it easy to run TensorBoard inside other programs, e.g. Colab.
server = self._make_server()
thread = threading.Thread(target=server.serve_forever, name='TensorBoard')
thread.daemon = True
thread.start()
return server.get_url()
|
<SYSTEM_TASK:>
Write a TensorBoardInfo file and arrange for its cleanup.
<END_TASK>
<USER_TASK:>
Description:
def _register_info(self, server):
"""Write a TensorBoardInfo file and arrange for its cleanup.
Args:
server: The result of `self._make_server()`.
"""
|
server_url = urllib.parse.urlparse(server.get_url())
info = manager.TensorBoardInfo(
version=version.VERSION,
start_time=int(time.time()),
port=server_url.port,
pid=os.getpid(),
path_prefix=self.flags.path_prefix,
logdir=self.flags.logdir,
db=self.flags.db,
cache_key=self.cache_key,
)
atexit.register(manager.remove_info_file)
manager.write_info_file(info)
|
<SYSTEM_TASK:>
Set a signal handler to gracefully exit on the given signal.
<END_TASK>
<USER_TASK:>
Description:
def _install_signal_handler(self, signal_number, signal_name):
"""Set a signal handler to gracefully exit on the given signal.
When this process receives the given signal, it will run `atexit`
handlers and then exit with `0`.
Args:
signal_number: The numeric code for the signal to handle, like
`signal.SIGTERM`.
signal_name: The human-readable signal name.
"""
|
old_signal_handler = None # set below
def handler(handled_signal_number, frame):
# In case we catch this signal again while running atexit
# handlers, take the hint and actually die.
signal.signal(signal_number, signal.SIG_DFL)
sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name)
# The main thread is the only non-daemon thread, so it suffices to
# exit hence.
if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL):
old_signal_handler(handled_signal_number, frame)
sys.exit(0)
old_signal_handler = signal.signal(signal_number, handler)
|
<SYSTEM_TASK:>
Constructs the TensorBoard WSGI app and instantiates the server.
<END_TASK>
<USER_TASK:>
Description:
def _make_server(self):
"""Constructs the TensorBoard WSGI app and instantiates the server."""
|
app = application.standard_tensorboard_wsgi(self.flags,
self.plugin_loaders,
self.assets_zip_provider)
return self.server_class(app, self.flags)
|
<SYSTEM_TASK:>
Returns a wildcard address for the port in question.
<END_TASK>
<USER_TASK:>
Description:
def _get_wildcard_address(self, port):
"""Returns a wildcard address for the port in question.
This will attempt to follow the best practice of calling getaddrinfo() with
a null host and AI_PASSIVE to request a server-side socket wildcard address.
If that succeeds, this returns the first IPv6 address found, or if none,
then returns the first IPv4 address. If that fails, then this returns the
hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0".
"""
|
fallback_address = '::' if socket.has_ipv6 else '0.0.0.0'
if hasattr(socket, 'AI_PASSIVE'):
try:
addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, socket.IPPROTO_TCP,
socket.AI_PASSIVE)
except socket.gaierror as e:
logger.warn('Failed to auto-detect wildcard address, assuming %s: %s',
fallback_address, str(e))
return fallback_address
addrs_by_family = defaultdict(list)
for family, _, _, _, sockaddr in addrinfos:
# Format of the "sockaddr" socket address varies by address family,
# but [0] is always the IP address portion.
addrs_by_family[family].append(sockaddr[0])
if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]:
return addrs_by_family[socket.AF_INET6][0]
if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]:
return addrs_by_family[socket.AF_INET][0]
logger.warn('Failed to auto-detect wildcard address, assuming %s',
fallback_address)
return fallback_address
|
<SYSTEM_TASK:>
Override to enable IPV4 mapping for IPV6 sockets when desired.
<END_TASK>
<USER_TASK:>
Description:
def server_bind(self):
"""Override to enable IPV4 mapping for IPV6 sockets when desired.
The main use case for this is so that when no host is specified, TensorBoard
can listen on all interfaces for both IPv4 and IPv6 connections, rather than
having to choose v4 or v6 and hope the browser didn't choose the other one.
"""
|
socket_is_v6 = (
hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6)
has_v6only_option = (
hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'))
if self._auto_wildcard and socket_is_v6 and has_v6only_option:
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except socket.error as e:
# Log a warning on failure to dual-bind, except for EAFNOSUPPORT
# since that's expected if IPv4 isn't supported at all (IPv6-only).
if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT:
logger.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e))
super(WerkzeugServer, self).server_bind()
|
<SYSTEM_TASK:>
Override to get rid of noisy EPIPE errors.
<END_TASK>
<USER_TASK:>
Description:
def handle_error(self, request, client_address):
"""Override to get rid of noisy EPIPE errors."""
|
del request # unused
# Kludge to override a SocketServer.py method so we can get rid of noisy
# EPIPE errors. They're kind of a red herring as far as errors go. For
# example, `curl -N http://localhost:6006/ | head` will cause an EPIPE.
exc_info = sys.exc_info()
e = exc_info[1]
if isinstance(e, IOError) and e.errno == errno.EPIPE:
logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address))
else:
logger.error('HTTP serving error', exc_info=exc_info)
|
<SYSTEM_TASK:>
Iterator over all catapult trace events, as python values.
<END_TASK>
<USER_TASK:>
Description:
def _events(self):
"""Iterator over all catapult trace events, as python values."""
|
for did, device in sorted(six.iteritems(self._proto.devices)):
if device.name:
yield dict(
ph=_TYPE_METADATA,
pid=did,
name='process_name',
args=dict(name=device.name))
yield dict(
ph=_TYPE_METADATA,
pid=did,
name='process_sort_index',
args=dict(sort_index=did))
for rid, resource in sorted(six.iteritems(device.resources)):
if resource.name:
yield dict(
ph=_TYPE_METADATA,
pid=did,
tid=rid,
name='thread_name',
args=dict(name=resource.name))
yield dict(
ph=_TYPE_METADATA,
pid=did,
tid=rid,
name='thread_sort_index',
args=dict(sort_index=rid))
# TODO(sammccall): filtering and downsampling?
for event in self._proto.trace_events:
yield self._event(event)
|
<SYSTEM_TASK:>
Converts a TraceEvent proto into a catapult trace event python value.
<END_TASK>
<USER_TASK:>
Description:
def _event(self, event):
"""Converts a TraceEvent proto into a catapult trace event python value."""
|
result = dict(
pid=event.device_id,
tid=event.resource_id,
name=event.name,
ts=event.timestamp_ps / 1000000.0)
if event.duration_ps:
result['ph'] = _TYPE_COMPLETE
result['dur'] = event.duration_ps / 1000000.0
else:
result['ph'] = _TYPE_INSTANT
result['s'] = _SCOPE_THREAD
for key in dict(event.args):
if 'args' not in result:
result['args'] = {}
result['args'][key] = event.args[key]
return result
|
<SYSTEM_TASK:>
Create a legacy scalar summary op.
<END_TASK>
<USER_TASK:>
Description:
def op(name,
data,
display_name=None,
description=None,
collections=None):
"""Create a legacy scalar summary op.
Arguments:
name: A unique name for the generated summary node.
data: A real numeric rank-0 `Tensor`. Must have `dtype` castable
to `float32`.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
with tf.control_dependencies([tf.assert_scalar(data)]):
return tf.summary.tensor_summary(name='scalar_summary',
tensor=tf.cast(data, tf.float32),
collections=collections,
summary_metadata=summary_metadata)
|
<SYSTEM_TASK:>
Create a legacy scalar summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def pb(name, data, display_name=None, description=None):
"""Create a legacy scalar summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A rank-0 `np.array` or array-like form (so raw `int`s and
`float`s are fine, too).
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
data = np.array(data)
if data.shape != ():
raise ValueError('Expected scalar shape for data, saw shape: %s.'
% data.shape)
if data.dtype.kind not in ('b', 'i', 'u', 'f'): # bool, int, uint, float
raise ValueError('Cast %s to float is not supported' % data.dtype.name)
tensor = tf.make_tensor_proto(data.astype(np.float32))
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/scalar_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary
|
<SYSTEM_TASK:>
Creates temp symlink tree, runs program, and copies back outputs.
<END_TASK>
<USER_TASK:>
Description:
def run(inputs, program, outputs):
"""Creates temp symlink tree, runs program, and copies back outputs.
Args:
inputs: List of fake paths to real paths, which are used for symlink tree.
program: List containing real path of program and its arguments. The
execroot directory will be appended as the last argument.
outputs: List of fake outputted paths to copy back to real paths.
Returns:
0 if succeeded or nonzero if failed.
"""
|
root = tempfile.mkdtemp()
try:
cwd = os.getcwd()
for fake, real in inputs:
parent = os.path.join(root, os.path.dirname(fake))
if not os.path.exists(parent):
os.makedirs(parent)
# Use symlink if possible and not on Windows, since on Windows 10
# symlinks exist but they require administrator privileges to use.
if hasattr(os, 'symlink') and not os.name == 'nt':
os.symlink(os.path.join(cwd, real), os.path.join(root, fake))
else:
shutil.copyfile(os.path.join(cwd, real), os.path.join(root, fake))
if subprocess.call(program + [root]) != 0:
return 1
for fake, real in outputs:
shutil.copyfile(os.path.join(root, fake), real)
return 0
finally:
try:
shutil.rmtree(root)
except EnvironmentError:
# Ignore "file in use" errors on Windows; ok since it's just a tmpdir.
pass
|
<SYSTEM_TASK:>
Invokes run function using a JSON file config.
<END_TASK>
<USER_TASK:>
Description:
def main(args):
"""Invokes run function using a JSON file config.
Args:
args: CLI args, which can be a JSON file containing an object whose
attributes are the parameters to the run function. If multiple JSON
files are passed, their contents are concatenated.
Returns:
0 if succeeded or nonzero if failed.
Raises:
Exception: If input data is missing.
"""
|
if not args:
raise Exception('Please specify at least one JSON config path')
inputs = []
program = []
outputs = []
for arg in args:
with open(arg) as fd:
config = json.load(fd)
inputs.extend(config.get('inputs', []))
program.extend(config.get('program', []))
outputs.extend(config.get('outputs', []))
if not program:
raise Exception('Please specify a program')
return run(inputs, program, outputs)
|
<SYSTEM_TASK:>
Initializes the TensorBoard sqlite schema using the given connection.
<END_TASK>
<USER_TASK:>
Description:
def initialize_schema(connection):
"""Initializes the TensorBoard sqlite schema using the given connection.
Args:
connection: A sqlite DB connection.
"""
|
cursor = connection.cursor()
cursor.execute("PRAGMA application_id={}".format(_TENSORBOARD_APPLICATION_ID))
cursor.execute("PRAGMA user_version={}".format(_TENSORBOARD_USER_VERSION))
with connection:
for statement in _SCHEMA_STATEMENTS:
lines = statement.strip('\n').split('\n')
message = lines[0] + ('...' if len(lines) > 1 else '')
logger.debug('Running DB init statement: %s', message)
cursor.execute(statement)
|
<SYSTEM_TASK:>
Returns a freshly created DB-wide unique ID.
<END_TASK>
<USER_TASK:>
Description:
def _create_id(self):
"""Returns a freshly created DB-wide unique ID."""
|
cursor = self._db.cursor()
cursor.execute('INSERT INTO Ids DEFAULT VALUES')
return cursor.lastrowid
|
<SYSTEM_TASK:>
Get the raw encoded image data, downloading it if necessary.
<END_TASK>
<USER_TASK:>
Description:
def image_data(verbose=False):
"""Get the raw encoded image data, downloading it if necessary."""
|
# This is a principled use of the `global` statement; don't lint me.
global _IMAGE_DATA # pylint: disable=global-statement
if _IMAGE_DATA is None:
if verbose:
logger.info("--- Downloading image.")
with contextlib.closing(urllib.request.urlopen(IMAGE_URL)) as infile:
_IMAGE_DATA = infile.read()
return _IMAGE_DATA
|
<SYSTEM_TASK:>
Perform a 2D pixel convolution on the given image.
<END_TASK>
<USER_TASK:>
Description:
def convolve(image, pixel_filter, channels=3, name=None):
"""Perform a 2D pixel convolution on the given image.
Arguments:
image: A 3D `float32` `Tensor` of shape `[height, width, channels]`,
where `channels` is the third argument to this function and the
first two dimensions are arbitrary.
pixel_filter: A 2D `Tensor`, representing pixel weightings for the
kernel. This will be used to create a 4D kernel---the extra two
dimensions are for channels (see `tf.nn.conv2d` documentation),
and the kernel will be constructed so that the channels are
independent: each channel only observes the data from neighboring
pixels of the same channel.
channels: An integer representing the number of channels in the
image (e.g., 3 for RGB).
Returns:
A 3D `float32` `Tensor` of the same shape as the input.
"""
|
with tf.name_scope(name, 'convolve'):
tf.compat.v1.assert_type(image, tf.float32)
channel_filter = tf.eye(channels)
filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) *
tf.expand_dims(tf.expand_dims(channel_filter, 0), 0))
result_batch = tf.nn.conv2d(tf.stack([image]), # batch
filter=filter_,
strides=[1, 1, 1, 1],
padding='SAME')
return result_batch[0]
|
<SYSTEM_TASK:>
Get the image as a TensorFlow variable.
<END_TASK>
<USER_TASK:>
Description:
def get_image(verbose=False):
"""Get the image as a TensorFlow variable.
Returns:
A `tf.Variable`, which must be initialized prior to use:
invoke `sess.run(result.initializer)`."""
|
base_data = tf.constant(image_data(verbose=verbose))
base_image = tf.image.decode_image(base_data, channels=3)
base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3))
parsed_image = tf.Variable(base_image, name='image', dtype=tf.uint8)
return parsed_image
|
<SYSTEM_TASK:>
Get the value of a feature from Example regardless of feature type.
<END_TASK>
<USER_TASK:>
Description:
def proto_value_for_feature(example, feature_name):
"""Get the value of a feature from Example regardless of feature type."""
|
feature = get_example_features(example)[feature_name]
if feature is None:
raise ValueError('Feature {} is not on example proto.'.format(feature_name))
feature_type = feature.WhichOneof('kind')
if feature_type is None:
raise ValueError('Feature {} on example proto has no declared type.'.format(
feature_name))
return getattr(feature, feature_type).value
|
<SYSTEM_TASK:>
Returns an `OriginalFeatureList` for the specified feature_name.
<END_TASK>
<USER_TASK:>
Description:
def parse_original_feature_from_example(example, feature_name):
"""Returns an `OriginalFeatureList` for the specified feature_name.
Args:
example: An example.
feature_name: A string feature name.
Returns:
A filled in `OriginalFeatureList` object representing the feature.
"""
|
feature = get_example_features(example)[feature_name]
feature_type = feature.WhichOneof('kind')
original_value = proto_value_for_feature(example, feature_name)
return OriginalFeatureList(feature_name, original_value, feature_type)
|
<SYSTEM_TASK:>
Returns packaged inference results from the provided proto.
<END_TASK>
<USER_TASK:>
Description:
def wrap_inference_results(inference_result_proto):
"""Returns packaged inference results from the provided proto.
Args:
inference_result_proto: The classification or regression response proto.
Returns:
An InferenceResult proto with the result from the response.
"""
|
inference_proto = inference_pb2.InferenceResult()
if isinstance(inference_result_proto,
classification_pb2.ClassificationResponse):
inference_proto.classification_result.CopyFrom(
inference_result_proto.result)
elif isinstance(inference_result_proto, regression_pb2.RegressionResponse):
inference_proto.regression_result.CopyFrom(inference_result_proto.result)
return inference_proto
|
<SYSTEM_TASK:>
Returns a list of feature names for float and int64 type features.
<END_TASK>
<USER_TASK:>
Description:
def get_numeric_feature_names(example):
"""Returns a list of feature names for float and int64 type features.
Args:
example: An example.
Returns:
A list of strings of the names of numeric features.
"""
|
numeric_features = ('float_list', 'int64_list')
features = get_example_features(example)
return sorted([
feature_name for feature_name in features
if features[feature_name].WhichOneof('kind') in numeric_features
])
|
<SYSTEM_TASK:>
Returns a list of feature names for byte type features.
<END_TASK>
<USER_TASK:>
Description:
def get_categorical_feature_names(example):
"""Returns a list of feature names for byte type features.
Args:
example: An example.
Returns:
A list of categorical feature names (e.g. ['education', 'marital_status'] )
"""
|
features = get_example_features(example)
return sorted([
feature_name for feature_name in features
if features[feature_name].WhichOneof('kind') == 'bytes_list'
])
|
<SYSTEM_TASK:>
Returns numerical features and their observed ranges.
<END_TASK>
<USER_TASK:>
Description:
def get_numeric_features_to_observed_range(examples):
"""Returns numerical features and their observed ranges.
Args:
examples: Examples to read to get ranges.
Returns:
A dict mapping feature_name -> {'observedMin': 'observedMax': } dicts,
with a key for each numerical feature.
"""
|
observed_features = collections.defaultdict(list) # name -> [value, ]
for example in examples:
for feature_name in get_numeric_feature_names(example):
original_feature = parse_original_feature_from_example(
example, feature_name)
observed_features[feature_name].extend(original_feature.original_value)
return {
feature_name: {
'observedMin': min(feature_values),
'observedMax': max(feature_values),
}
for feature_name, feature_values in iteritems(observed_features)
}
|
<SYSTEM_TASK:>
Returns categorical features and a sampling of their most-common values.
<END_TASK>
<USER_TASK:>
Description:
def get_categorical_features_to_sampling(examples, top_k):
"""Returns categorical features and a sampling of their most-common values.
The results of this slow function are used by the visualization repeatedly,
so the results are cached.
Args:
examples: Examples to read to get feature samples.
top_k: Max number of samples to return per feature.
Returns:
A dict of feature_name -> {'samples': ['Married-civ-spouse',
'Never-married', 'Divorced']}.
There is one key for each categorical feature.
Currently, the inner dict just has one key, but this structure leaves room
for further expansion, and mirrors the structure used by
`get_numeric_features_to_observed_range`.
"""
|
observed_features = collections.defaultdict(list) # name -> [value, ]
for example in examples:
for feature_name in get_categorical_feature_names(example):
original_feature = parse_original_feature_from_example(
example, feature_name)
observed_features[feature_name].extend(original_feature.original_value)
result = {}
for feature_name, feature_values in sorted(iteritems(observed_features)):
samples = [
word
for word, count in collections.Counter(feature_values).most_common(
top_k) if count > 1
]
if samples:
result[feature_name] = {'samples': samples}
return result
|
<SYSTEM_TASK:>
Return a list of `MutantFeatureValue`s that are variants of original.
<END_TASK>
<USER_TASK:>
Description:
def make_mutant_features(original_feature, index_to_mutate, viz_params):
"""Return a list of `MutantFeatureValue`s that are variants of original."""
|
lower = viz_params.x_min
upper = viz_params.x_max
examples = viz_params.examples
num_mutants = viz_params.num_mutants
if original_feature.feature_type == 'float_list':
return [
MutantFeatureValue(original_feature, index_to_mutate, value)
for value in np.linspace(lower, upper, num_mutants)
]
elif original_feature.feature_type == 'int64_list':
mutant_values = np.linspace(int(lower), int(upper),
num_mutants).astype(int).tolist()
# Remove duplicates that can occur due to integer constraint.
mutant_values = sorted(set(mutant_values))
return [
MutantFeatureValue(original_feature, index_to_mutate, value)
for value in mutant_values
]
elif original_feature.feature_type == 'bytes_list':
feature_to_samples = get_categorical_features_to_sampling(
examples, num_mutants)
# `mutant_values` looks like:
# [['Married-civ-spouse'], ['Never-married'], ['Divorced'], ['Separated']]
mutant_values = feature_to_samples[original_feature.feature_name]['samples']
return [
MutantFeatureValue(original_feature, None, value)
for value in mutant_values
]
else:
raise ValueError('Malformed original feature had type of: ' +
original_feature.feature_type)
|
<SYSTEM_TASK:>
Return a list of `MutantFeatureValue`s and a list of mutant Examples.
<END_TASK>
<USER_TASK:>
Description:
def make_mutant_tuples(example_protos, original_feature, index_to_mutate,
viz_params):
"""Return a list of `MutantFeatureValue`s and a list of mutant Examples.
Args:
example_protos: The examples to mutate.
original_feature: A `OriginalFeatureList` that encapsulates the feature to
mutate.
index_to_mutate: The index of the int64_list or float_list to mutate.
viz_params: A `VizParams` object that contains the UI state of the request.
Returns:
A list of `MutantFeatureValue`s and a list of mutant examples.
"""
|
mutant_features = make_mutant_features(original_feature, index_to_mutate,
viz_params)
mutant_examples = []
for example_proto in example_protos:
for mutant_feature in mutant_features:
copied_example = copy.deepcopy(example_proto)
feature_name = mutant_feature.original_feature.feature_name
try:
feature_list = proto_value_for_feature(copied_example, feature_name)
if index_to_mutate is None:
new_values = mutant_feature.mutant_value
else:
new_values = list(feature_list)
new_values[index_to_mutate] = mutant_feature.mutant_value
del feature_list[:]
feature_list.extend(new_values)
mutant_examples.append(copied_example)
except (ValueError, IndexError):
# If the mutant value can't be set, still add the example to the
# mutant_example even though no change was made. This is necessary to
# allow for computation of global PD plots when not all examples have
# the same number of feature values for a feature.
mutant_examples.append(copied_example)
return mutant_features, mutant_examples
|
<SYSTEM_TASK:>
Returns JSON formatted for rendering all charts for a feature.
<END_TASK>
<USER_TASK:>
Description:
def mutant_charts_for_feature(example_protos, feature_name, serving_bundles,
viz_params):
"""Returns JSON formatted for rendering all charts for a feature.
Args:
example_proto: The example protos to mutate.
feature_name: The string feature name to mutate.
serving_bundles: One `ServingBundle` object per model, that contains the
information to make the serving request.
viz_params: A `VizParams` object that contains the UI state of the request.
Raises:
InvalidUserInputError if `viz_params.feature_index_pattern` requests out of
range indices for `feature_name` within `example_proto`.
Returns:
A JSON-able dict for rendering a single mutant chart. parsed in
`tf-inference-dashboard.html`.
{
'chartType': 'numeric', # oneof('numeric', 'categorical')
'data': [A list of data] # parseable by vz-line-chart or vz-bar-chart
}
"""
|
def chart_for_index(index_to_mutate):
mutant_features, mutant_examples = make_mutant_tuples(
example_protos, original_feature, index_to_mutate, viz_params)
charts = []
for serving_bundle in serving_bundles:
inference_result_proto = run_inference(mutant_examples, serving_bundle)
charts.append(make_json_formatted_for_single_chart(
mutant_features, inference_result_proto, index_to_mutate))
return charts
try:
original_feature = parse_original_feature_from_example(
example_protos[0], feature_name)
except ValueError as e:
return {
'chartType': 'categorical',
'data': []
}
indices_to_mutate = viz_params.feature_indices or range(
original_feature.length)
chart_type = ('categorical' if original_feature.feature_type == 'bytes_list'
else 'numeric')
try:
return {
'chartType': chart_type,
'data': [
chart_for_index(index_to_mutate)
for index_to_mutate in indices_to_mutate
]
}
except IndexError as e:
raise common_utils.InvalidUserInputError(e)
|
<SYSTEM_TASK:>
Returns JSON formatted for a single mutant chart.
<END_TASK>
<USER_TASK:>
Description:
def make_json_formatted_for_single_chart(mutant_features,
inference_result_proto,
index_to_mutate):
"""Returns JSON formatted for a single mutant chart.
Args:
mutant_features: An iterable of `MutantFeatureValue`s representing the
X-axis.
inference_result_proto: A ClassificationResponse or RegressionResponse
returned by Servo, representing the Y-axis.
It contains one 'classification' or 'regression' for every Example that
was sent for inference. The length of that field should be the same length
of mutant_features.
index_to_mutate: The index of the feature being mutated for this chart.
Returns:
A JSON-able dict for rendering a single mutant chart, parseable by
`vz-line-chart` or `vz-bar-chart`.
"""
|
x_label = 'step'
y_label = 'scalar'
if isinstance(inference_result_proto,
classification_pb2.ClassificationResponse):
# classification_label -> [{x_label: y_label:}]
series = {}
# ClassificationResponse has a separate probability for each label
for idx, classification in enumerate(
inference_result_proto.result.classifications):
# For each example to use for mutant inference, we create a copied example
# with the feature in question changed to each possible mutant value. So
# when we get the inferences back, we get num_examples*num_mutants
# results. So, modding by len(mutant_features) allows us to correctly
# lookup the mutant value for each inference.
mutant_feature = mutant_features[idx % len(mutant_features)]
for class_index, classification_class in enumerate(
classification.classes):
# Fill in class index when labels are missing
if classification_class.label == '':
classification_class.label = str(class_index)
# Special case to not include the "0" class in binary classification.
# Since that just results in a chart that is symmetric around 0.5.
if len(
classification.classes) == 2 and classification_class.label == '0':
continue
key = classification_class.label
if index_to_mutate:
key += ' (index %d)' % index_to_mutate
if not key in series:
series[key] = {}
if not mutant_feature.mutant_value in series[key]:
series[key][mutant_feature.mutant_value] = []
series[key][mutant_feature.mutant_value].append(
classification_class.score)
# Post-process points to have separate list for each class
return_series = collections.defaultdict(list)
for key, mutant_values in iteritems(series):
for value, y_list in iteritems(mutant_values):
return_series[key].append({
x_label: value,
y_label: sum(y_list) / float(len(y_list))
})
return_series[key].sort(key=lambda p: p[x_label])
return return_series
elif isinstance(inference_result_proto, regression_pb2.RegressionResponse):
points = {}
for idx, regression in enumerate(inference_result_proto.result.regressions):
# For each example to use for mutant inference, we create a copied example
# with the feature in question changed to each possible mutant value. So
# when we get the inferences back, we get num_examples*num_mutants
# results. So, modding by len(mutant_features) allows us to correctly
# lookup the mutant value for each inference.
mutant_feature = mutant_features[idx % len(mutant_features)]
if not mutant_feature.mutant_value in points:
points[mutant_feature.mutant_value] = []
points[mutant_feature.mutant_value].append(regression.value)
key = 'value'
if (index_to_mutate != 0):
key += ' (index %d)' % index_to_mutate
list_of_points = []
for value, y_list in iteritems(points):
list_of_points.append({
x_label: value,
y_label: sum(y_list) / float(len(y_list))
})
list_of_points.sort(key=lambda p: p[x_label])
return {key: list_of_points}
else:
raise NotImplementedError('Only classification and regression implemented.')
|
<SYSTEM_TASK:>
Returns the non-sequence features from the provided example.
<END_TASK>
<USER_TASK:>
Description:
def get_example_features(example):
"""Returns the non-sequence features from the provided example."""
|
return (example.features.feature if isinstance(example, tf.train.Example)
else example.context.feature)
|
<SYSTEM_TASK:>
Calls servo and wraps the inference results.
<END_TASK>
<USER_TASK:>
Description:
def run_inference_for_inference_results(examples, serving_bundle):
"""Calls servo and wraps the inference results."""
|
inference_result_proto = run_inference(examples, serving_bundle)
inferences = wrap_inference_results(inference_result_proto)
infer_json = json_format.MessageToJson(
inferences, including_default_value_fields=True)
return json.loads(infer_json)
|
<SYSTEM_TASK:>
Returns a list of JSON objects for each feature in the examples.
<END_TASK>
<USER_TASK:>
Description:
def get_eligible_features(examples, num_mutants):
"""Returns a list of JSON objects for each feature in the examples.
This list is used to drive partial dependence plots in the plugin.
Args:
examples: Examples to examine to determine the eligible features.
num_mutants: The number of mutations to make over each feature.
Returns:
A list with a JSON object for each feature.
Numeric features are represented as {name: observedMin: observedMax:}.
Categorical features are repesented as {name: samples:[]}.
"""
|
features_dict = (
get_numeric_features_to_observed_range(
examples))
features_dict.update(
get_categorical_features_to_sampling(
examples, num_mutants))
# Massage the features_dict into a sorted list before returning because
# Polymer dom-repeat needs a list.
features_list = []
for k, v in sorted(features_dict.items()):
v['name'] = k
features_list.append(v)
return features_list
|
<SYSTEM_TASK:>
Returns a list of label strings loaded from the provided path.
<END_TASK>
<USER_TASK:>
Description:
def get_label_vocab(vocab_path):
"""Returns a list of label strings loaded from the provided path."""
|
if vocab_path:
try:
with tf.io.gfile.GFile(vocab_path, 'r') as f:
return [line.rstrip('\n') for line in f]
except tf.errors.NotFoundError as err:
tf.logging.error('error reading vocab file: %s', err)
return []
|
<SYSTEM_TASK:>
Returns an encoded sprite image for use in Facets Dive.
<END_TASK>
<USER_TASK:>
Description:
def create_sprite_image(examples):
"""Returns an encoded sprite image for use in Facets Dive.
Args:
examples: A list of serialized example protos to get images for.
Returns:
An encoded PNG.
"""
|
def generate_image_from_thubnails(thumbnails, thumbnail_dims):
"""Generates a sprite atlas image from a set of thumbnails."""
num_thumbnails = tf.shape(thumbnails)[0].eval()
images_per_row = int(math.ceil(math.sqrt(num_thumbnails)))
thumb_height = thumbnail_dims[0]
thumb_width = thumbnail_dims[1]
master_height = images_per_row * thumb_height
master_width = images_per_row * thumb_width
num_channels = 3
master = np.zeros([master_height, master_width, num_channels])
for idx, image in enumerate(thumbnails.eval()):
left_idx = idx % images_per_row
top_idx = int(math.floor(idx / images_per_row))
left_start = left_idx * thumb_width
left_end = left_start + thumb_width
top_start = top_idx * thumb_height
top_end = top_start + thumb_height
master[top_start:top_end, left_start:left_end, :] = image
return tf.image.encode_png(master)
image_feature_name = 'image/encoded'
sprite_thumbnail_dim_px = 32
with tf.compat.v1.Session():
keys_to_features = {
image_feature_name:
tf.FixedLenFeature((), tf.string, default_value=''),
}
parsed = tf.parse_example(examples, keys_to_features)
images = tf.zeros([1, 1, 1, 1], tf.float32)
i = tf.constant(0)
thumbnail_dims = (sprite_thumbnail_dim_px,
sprite_thumbnail_dim_px)
num_examples = tf.constant(len(examples))
encoded_images = parsed[image_feature_name]
# Loop over all examples, decoding the image feature value, resizing
# and appending to a list of all images.
def loop_body(i, encoded_images, images):
encoded_image = encoded_images[i]
image = tf.image.decode_jpeg(encoded_image, channels=3)
resized_image = tf.image.resize(image, thumbnail_dims)
expanded_image = tf.expand_dims(resized_image, 0)
images = tf.cond(
tf.equal(i, 0), lambda: expanded_image,
lambda: tf.concat([images, expanded_image], 0))
return i + 1, encoded_images, images
loop_out = tf.while_loop(
lambda i, encoded_images, images: tf.less(i, num_examples),
loop_body, [i, encoded_images, images],
shape_invariants=[
i.get_shape(),
encoded_images.get_shape(),
tf.TensorShape(None)
])
# Create the single sprite atlas image from these thumbnails.
sprite = generate_image_from_thubnails(loop_out[2], thumbnail_dims)
return sprite.eval()
|
<SYSTEM_TASK:>
Run inference on examples given model information
<END_TASK>
<USER_TASK:>
Description:
def run_inference(examples, serving_bundle):
"""Run inference on examples given model information
Args:
examples: A list of examples that matches the model spec.
serving_bundle: A `ServingBundle` object that contains the information to
make the inference request.
Returns:
A ClassificationResponse or RegressionResponse proto.
"""
|
batch_size = 64
if serving_bundle.estimator and serving_bundle.feature_spec:
# If provided an estimator and feature spec then run inference locally.
preds = serving_bundle.estimator.predict(
lambda: tf.data.Dataset.from_tensor_slices(
tf.parse_example([ex.SerializeToString() for ex in examples],
serving_bundle.feature_spec)).batch(batch_size))
if serving_bundle.use_predict:
preds_key = serving_bundle.predict_output_tensor
elif serving_bundle.model_type == 'regression':
preds_key = 'predictions'
else:
preds_key = 'probabilities'
values = []
for pred in preds:
values.append(pred[preds_key])
return common_utils.convert_prediction_values(values, serving_bundle)
elif serving_bundle.custom_predict_fn:
# If custom_predict_fn is provided, pass examples directly for local
# inference.
values = serving_bundle.custom_predict_fn(examples)
return common_utils.convert_prediction_values(values, serving_bundle)
else:
return platform_utils.call_servo(examples, serving_bundle)
|
<SYSTEM_TASK:>
Return items associated with given key.
<END_TASK>
<USER_TASK:>
Description:
def Items(self, key):
"""Return items associated with given key.
Args:
key: The key for which we are finding associated items.
Raises:
KeyError: If the key is not found in the reservoir.
Returns:
[list, of, items] associated with that key.
"""
|
with self._mutex:
if key not in self._buckets:
raise KeyError('Key %s was not found in Reservoir' % key)
bucket = self._buckets[key]
return bucket.Items()
|
<SYSTEM_TASK:>
Add a new item to the Reservoir with the given tag.
<END_TASK>
<USER_TASK:>
Description:
def AddItem(self, key, item, f=lambda x: x):
"""Add a new item to the Reservoir with the given tag.
If the reservoir has not yet reached full size, the new item is guaranteed
to be added. If the reservoir is full, then behavior depends on the
always_keep_last boolean.
If always_keep_last was set to true, the new item is guaranteed to be added
to the reservoir, and either the previous last item will be replaced, or
(with low probability) an older item will be replaced.
If always_keep_last was set to false, then the new item will replace an
old item with low probability.
If f is provided, it will be applied to transform item (lazily, iff item is
going to be included in the reservoir).
Args:
key: The key to store the item under.
item: The item to add to the reservoir.
f: An optional function to transform the item prior to addition.
"""
|
with self._mutex:
bucket = self._buckets[key]
bucket.AddItem(item, f)
|
<SYSTEM_TASK:>
Filter items within a Reservoir, using a filtering function.
<END_TASK>
<USER_TASK:>
Description:
def FilterItems(self, filterFn, key=None):
"""Filter items within a Reservoir, using a filtering function.
Args:
filterFn: A function that returns True for the items to be kept.
key: An optional bucket key to filter. If not specified, will filter all
all buckets.
Returns:
The number of items removed.
"""
|
with self._mutex:
if key:
if key in self._buckets:
return self._buckets[key].FilterItems(filterFn)
else:
return 0
else:
return sum(bucket.FilterItems(filterFn)
for bucket in self._buckets.values())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.