text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Return the contents for a specific plugin asset from a run.
<END_TASK>
<USER_TASK:>
Description:
def RetrievePluginAsset(self, run, plugin_name, asset_name):
"""Return the contents for a specific plugin asset from a run.
Args:
run: The string name of the run.
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
|
accumulator = self.GetAccumulator(run)
return accumulator.RetrievePluginAsset(plugin_name, asset_name)
|
<SYSTEM_TASK:>
Retrieve the scalar events associated with a run and tag.
<END_TASK>
<USER_TASK:>
Description:
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
|
accumulator = self.GetAccumulator(run)
return accumulator.Scalars(tag)
|
<SYSTEM_TASK:>
Retrieve the audio events associated with a run and tag.
<END_TASK>
<USER_TASK:>
Description:
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
|
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag)
|
<SYSTEM_TASK:>
Retrieve the tensor events associated with a run and tag.
<END_TASK>
<USER_TASK:>
Description:
def Tensors(self, run, tag):
"""Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s.
"""
|
accumulator = self.GetAccumulator(run)
return accumulator.Tensors(tag)
|
<SYSTEM_TASK:>
Return the summary metadata for the given tag on the given run.
<END_TASK>
<USER_TASK:>
Description:
def SummaryMetadata(self, run, tag):
"""Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf.
"""
|
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag)
|
<SYSTEM_TASK:>
Return all the run names in the `EventMultiplexer`.
<END_TASK>
<USER_TASK:>
Description:
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
```
"""
|
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
|
<SYSTEM_TASK:>
Write a text summary.
<END_TASK>
<USER_TASK:>
Description:
def text(name, data, step=None, description=None):
"""Write a text summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A UTF-8 string tensor value.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
|
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'text_summary', values=[data, step]) as (tag, _):
tf.debugging.assert_type(data, tf.string)
return tf.summary.write(
tag=tag, tensor=data, step=step, metadata=summary_metadata)
|
<SYSTEM_TASK:>
Create a text tf.Summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def text_pb(tag, data, description=None):
"""Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
|
try:
tensor = tensor_util.make_tensor_proto(data, dtype=np.object)
except TypeError as e:
raise TypeError('tensor must be of type string', e)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary = summary_pb2.Summary()
summary.value.add(tag=tag,
metadata=summary_metadata,
tensor=tensor)
return summary
|
<SYSTEM_TASK:>
Create a legacy audio summary op for use in a TensorFlow graph.
<END_TASK>
<USER_TASK:>
Description:
def op(name,
audio,
sample_rate,
labels=None,
max_outputs=3,
encoding=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy audio summary op for use in a TensorFlow graph.
Arguments:
name: A unique name for the generated summary node.
audio: A `Tensor` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`. Any of the dimensions may
be statically unknown (i.e., `None`).
sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the
sample rate, in Hz. Must be positive.
labels: Optional `string` `Tensor`, a vector whose length is the
first dimension of `audio`, where `labels[i]` contains arbitrary
textual information about `audio[i]`. (For instance, this could be
some text that a TTS system was supposed to produce.) Markdown is
supported. Contents should be UTF-8.
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many audio clips will be emitted at each step. When more than
`max_outputs` many clips are provided, the first `max_outputs`
many clips will be used and the rest silently discarded.
encoding: A constant `str` (not string tensor) indicating the
desired encoding. You can choose any format you like, as long as
it's "wav". Please see the "API compatibility note" below.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
API compatibility note: The default value of the `encoding`
argument is _not_ guaranteed to remain unchanged across TensorBoard
versions. In the future, we will by default encode as FLAC instead of
as WAV. If the specific format is important to you, please provide a
file format explicitly.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow # for contrib
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
if encoding is None:
encoding = 'wav'
if encoding == 'wav':
encoding = metadata.Encoding.Value('WAV')
encoder = functools.partial(tensorflow.contrib.ffmpeg.encode_audio,
samples_per_second=sample_rate,
file_format='wav')
else:
raise ValueError('Unknown encoding: %r' % encoding)
with tf.name_scope(name), \
tf.control_dependencies([tf.assert_rank(audio, 3)]):
limited_audio = audio[:max_outputs]
encoded_audio = tf.map_fn(encoder, limited_audio,
dtype=tf.string,
name='encode_each_audio')
if labels is None:
limited_labels = tf.tile([''], tf.shape(input=limited_audio)[:1])
else:
limited_labels = labels[:max_outputs]
tensor = tf.transpose(a=tf.stack([encoded_audio, limited_labels]))
summary_metadata = metadata.create_summary_metadata(
display_name=display_name,
description=description,
encoding=encoding)
return tf.summary.tensor_summary(name='audio_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata)
|
<SYSTEM_TASK:>
Create a legacy audio summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def pb(name,
audio,
sample_rate,
labels=None,
max_outputs=3,
encoding=None,
display_name=None,
description=None):
"""Create a legacy audio summary protobuf.
This behaves as if you were to create an `op` with the same arguments
(wrapped with constant tensors where appropriate) and then execute
that summary op in a TensorFlow session.
Arguments:
name: A unique name for the generated summary node.
audio: An `np.array` representing audio data with shape `[k, t, c]`,
where `k` is the number of audio clips, `t` is the number of
frames, and `c` is the number of channels. Elements should be
floating-point values in `[-1.0, 1.0]`.
sample_rate: An `int` that represents the sample rate, in Hz.
Must be positive.
labels: Optional list (or rank-1 `np.array`) of textstrings or UTF-8
bytestrings whose length is the first dimension of `audio`, where
`labels[i]` contains arbitrary textual information about
`audio[i]`. (For instance, this could be some text that a TTS
system was supposed to produce.) Markdown is supported.
max_outputs: Optional `int`. At most this many audio clips will be
emitted. When more than `max_outputs` many clips are provided, the
first `max_outputs` many clips will be used and the rest silently
discarded.
encoding: A constant `str` indicating the desired encoding. You
can choose any format you like, as long as it's "wav". Please see
the "API compatibility note" below.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
API compatibility note: The default value of the `encoding`
argument is _not_ guaranteed to remain unchanged across TensorBoard
versions. In the future, we will by default encode as FLAC instead of
as WAV. If the specific format is important to you, please provide a
file format explicitly.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
audio = np.array(audio)
if audio.ndim != 3:
raise ValueError('Shape %r must have rank 3' % (audio.shape,))
if encoding is None:
encoding = 'wav'
if encoding == 'wav':
encoding = metadata.Encoding.Value('WAV')
encoder = functools.partial(encoder_util.encode_wav,
samples_per_second=sample_rate)
else:
raise ValueError('Unknown encoding: %r' % encoding)
limited_audio = audio[:max_outputs]
if labels is None:
limited_labels = [b''] * len(limited_audio)
else:
limited_labels = [tf.compat.as_bytes(label)
for label in labels[:max_outputs]]
encoded_audio = [encoder(a) for a in limited_audio]
content = np.array([encoded_audio, limited_labels]).transpose()
tensor = tf.make_tensor_proto(content, dtype=tf.string)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name,
description=description,
encoding=encoding)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/audio_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary
|
<SYSTEM_TASK:>
Create a PR curve summary op for a single binary classifier.
<END_TASK>
<USER_TASK:>
Description:
def op(
name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None,
collections=None):
"""Create a PR curve summary op for a single binary classifier.
Computes true/false positive/negative values for the given `predictions`
against the ground truth `labels`, against a list of evenly distributed
threshold values in `[0, 1]` of length `num_thresholds`.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn
value at each threshold. This is then multiplied with `weights` which can be
used to reweight certain values, or more commonly used for masking values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values. A Tensor of `bool` values with arbitrary
shape.
predictions: A float32 `Tensor` whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
weights: Optional float32 `Tensor`. Individual counts are multiplied by this
value. This tensor must be either the same shape as or broadcastable to
the `labels` tensor.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. The float32 tensor
produced by the summary operation is of dimension (6, num_thresholds). The
first dimension (of length 6) is of the order: true positives,
false positives, true negatives, false negatives, precision, recall.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
dtype = predictions.dtype
with tf.name_scope(name, values=[labels, predictions, weights]):
tf.assert_type(labels, tf.bool)
# We cast to float to ensure we have 0.0 or 1.0.
f_labels = tf.cast(labels, dtype)
# Ensure predictions are all in range [0.0, 1.0].
predictions = tf.minimum(1.0, tf.maximum(0.0, predictions))
# Get weighted true/false labels.
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Before we begin, flatten predictions.
predictions = tf.reshape(predictions, [-1])
# Shape the labels so they are broadcast-able for later multiplication.
true_labels = tf.reshape(true_labels, [-1, 1])
false_labels = tf.reshape(false_labels, [-1, 1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
# Compute the bucket indices for each prediction value.
bucket_indices = tf.cast(
tf.floor(predictions * (num_thresholds - 1)), tf.int32)
# Bucket predictions.
tp_buckets = tf.reduce_sum(
input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels,
axis=0)
fp_buckets = tf.reduce_sum(
input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels,
axis=0)
# Set up the cumulative sums to compute the actual metrics.
tp = tf.cumsum(tp_buckets, reverse=True, name='tp')
fp = tf.cumsum(fp_buckets, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
|
<SYSTEM_TASK:>
Create a PR curves summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def pb(name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None):
"""Create a PR curves summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
labels: The ground truth values. A bool numpy array.
predictions: A float32 numpy array whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Optional number of thresholds, evenly distributed in
`[0, 1]`, to compute PR metrics for. When provided, should be an int of
value at least 2. Defaults to 201.
weights: Optional float or float32 numpy array. Individual counts are
multiplied by this value. This tensor must be either the same shape as
or broadcastable to the `labels` numpy array.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return raw_data_pb(name,
true_positive_counts=tp,
false_positive_counts=fp,
true_negative_counts=tn,
false_negative_counts=fn,
precision=precision,
recall=recall,
num_thresholds=num_thresholds,
display_name=display_name,
description=description)
|
<SYSTEM_TASK:>
Computes a precision-recall curve summary across batches of data.
<END_TASK>
<USER_TASK:>
Description:
def streaming_op(name,
labels,
predictions,
num_thresholds=None,
weights=None,
metrics_collections=None,
updates_collections=None,
display_name=None,
description=None):
"""Computes a precision-recall curve summary across batches of data.
This function is similar to op() above, but can be used to compute the PR
curve across multiple batches of labels and predictions, in the same style
as the metrics found in tf.metrics.
This function creates multiple local variables for storing true positives,
true negative, etc. accumulated over each batch of data, and uses these local
variables for computing the final PR curve summary. These variables can be
updated with the returned update_op.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
num_thresholds: The number of evenly spaced thresholds to generate for
computing the PR curve. Defaults to 201.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
pr_curve: A string `Tensor` containing a single value: the
serialized PR curve Tensor summary. The summary contains a
float32 `Tensor` of dimension (6, num_thresholds). The first
dimension (of length 6) is of the order: true positives, false
positives, true negatives, false negatives, precision, recall.
update_op: An operation that updates the summary with the latest data.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
thresholds = [i / float(num_thresholds - 1)
for i in range(num_thresholds)]
with tf.name_scope(name, values=[labels, predictions, weights]):
tp, update_tp = tf.metrics.true_positives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
fp, update_fp = tf.metrics.false_positives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
tn, update_tn = tf.metrics.true_negatives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
fn, update_fn = tf.metrics.false_negatives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
def compute_summary(tp, fp, tn, fn, collections):
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
pr_curve = compute_summary(tp, fp, tn, fn, metrics_collections)
update_op = tf.group(update_tp, update_fp, update_tn, update_fn)
if updates_collections:
for collection in updates_collections:
tf.add_to_collection(collection, update_op)
return pr_curve, update_op
|
<SYSTEM_TASK:>
Create an op that collects data for visualizing PR curves.
<END_TASK>
<USER_TASK:>
Description:
def raw_data_op(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""Create an op that collects data for visualizing PR curves.
Unlike the op above, this one avoids computing precision, recall, and the
intermediate counts. Instead, it accepts those tensors as arguments and
relies on the caller to ensure that the calculations are correct (and the
counts yield the provided precision and recall values).
This op is useful when a caller seeks to compute precision and recall
differently but still use the PR curves plugin.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 tensor of true positive counts. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
false_positive_counts: A rank-1 tensor of false positive counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
true_negative_counts: A rank-1 tensor of true negative counts. Must contain
`num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
false_negative_counts: A rank-1 tensor of false negative counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
precision: A rank-1 tensor of precision values. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
recall: A rank-1 tensor of recall values. Must contain `num_thresholds`
elements and be castable to float32. Values correspond to thresholds
that increase from left to right (from 0 to 1).
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with tf.name_scope(name, values=[
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
]):
return _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
|
<SYSTEM_TASK:>
Create a PR curves summary protobuf from raw data values.
<END_TASK>
<USER_TASK:>
Description:
def raw_data_pb(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None):
"""Create a PR curves summary protobuf from raw data values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 numpy array of true positive counts. Must
contain `num_thresholds` elements and be castable to float32.
false_positive_counts: A rank-1 numpy array of false positive counts. Must
contain `num_thresholds` elements and be castable to float32.
true_negative_counts: A rank-1 numpy array of true negative counts. Must
contain `num_thresholds` elements and be castable to float32.
false_negative_counts: A rank-1 numpy array of false negative counts. Must
contain `num_thresholds` elements and be castable to float32.
precision: A rank-1 numpy array of precision values. Must contain
`num_thresholds` elements and be castable to float32.
recall: A rank-1 numpy array of recall values. Must contain `num_thresholds`
elements and be castable to float32.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be an int `>= 2`.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
data = np.stack(
(true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall))
tensor = tf.make_tensor_proto(np.float32(data), dtype=tf.float32)
summary.value.add(tag='%s/pr_curves' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary
|
<SYSTEM_TASK:>
A private helper method for generating a tensor summary.
<END_TASK>
<USER_TASK:>
Description:
def _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""A private helper method for generating a tensor summary.
We use a helper method instead of having `op` directly call `raw_data_op`
to prevent the scope of `raw_data_op` from being embedded within `op`.
Arguments are the same as for raw_data_op.
Returns:
A tensor summary that collects data for PR curves.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
# Store the number of thresholds within the summary metadata because
# that value is constant for all pr curve summaries with the same tag.
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
# Store values within a tensor. We store them in the order:
# true positives, false positives, true negatives, false
# negatives, precision, and recall.
combined_data = tf.stack([
tf.cast(true_positive_counts, tf.float32),
tf.cast(false_positive_counts, tf.float32),
tf.cast(true_negative_counts, tf.float32),
tf.cast(false_negative_counts, tf.float32),
tf.cast(precision, tf.float32),
tf.cast(recall, tf.float32)])
return tf.summary.tensor_summary(
name='pr_curves',
tensor=combined_data,
collections=collections,
summary_metadata=summary_metadata)
|
<SYSTEM_TASK:>
Executes the request.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Executes the request.
Returns:
An array of tuples representing the metric evaluations--each of the form
(<wall time in secs>, <training step>, <metric value>).
"""
|
run, tag = metrics.run_tag_from_session_and_metric(
self._request.session_name, self._request.metric_name)
body, _ = self._scalars_plugin_instance.scalars_impl(
tag, run, None, scalars_plugin.OutputFormat.JSON)
return body
|
<SYSTEM_TASK:>
Given a tag and single run, return array of histogram values.
<END_TASK>
<USER_TASK:>
Description:
def histograms_route(self, request):
"""Given a tag and single run, return array of histogram values."""
|
tag = request.args.get('tag')
run = request.args.get('run')
try:
(body, mime_type) = self.histograms_impl(
tag, run, downsample_to=self.SAMPLE_SIZE)
code = 200
except ValueError as e:
(body, mime_type) = (str(e), 'text/plain')
code = 400
return http_util.Respond(request, body, mime_type, code=code)
|
<SYSTEM_TASK:>
Initialize the graph and session, if this has not yet been done.
<END_TASK>
<USER_TASK:>
Description:
def _lazily_initialize(self):
"""Initialize the graph and session, if this has not yet been done."""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with self._initialization_lock:
if self._session:
return
graph = tf.Graph()
with graph.as_default():
self.initialize_graph()
# Don't reserve GPU because libpng can't run on GPU.
config = tf.ConfigProto(device_count={'GPU': 0})
self._session = tf.Session(graph=graph, config=config)
|
<SYSTEM_TASK:>
Tries to get the scalars plugin.
<END_TASK>
<USER_TASK:>
Description:
def _get_scalars_plugin(self):
"""Tries to get the scalars plugin.
Returns:
The scalars plugin. Or None if it is not yet registered.
"""
|
if scalars_metadata.PLUGIN_NAME in self._plugin_name_to_instance:
# The plugin is registered.
return self._plugin_name_to_instance[scalars_metadata.PLUGIN_NAME]
# The plugin is not yet registered.
return None
|
<SYSTEM_TASK:>
This plugin is active if 2 conditions hold.
<END_TASK>
<USER_TASK:>
Description:
def is_active(self):
"""This plugin is active if 2 conditions hold.
1. The scalars plugin is registered and active.
2. There is a custom layout for the dashboard.
Returns: A boolean. Whether the plugin is active.
"""
|
if not self._multiplexer:
return False
scalars_plugin_instance = self._get_scalars_plugin()
if not (scalars_plugin_instance and
scalars_plugin_instance.is_active()):
return False
# This plugin is active if any run has a layout.
return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))
|
<SYSTEM_TASK:>
Provides a response for downloading scalars data for a data series.
<END_TASK>
<USER_TASK:>
Description:
def download_data_impl(self, run, tag, response_format):
"""Provides a response for downloading scalars data for a data series.
Args:
run: The run.
tag: The specific tag.
response_format: A string. One of the values of the OutputFormat enum of
the scalar plugin.
Raises:
ValueError: If the scalars plugin is not registered.
Returns:
2 entities:
- A JSON object response body.
- A mime type (string) for the response.
"""
|
scalars_plugin_instance = self._get_scalars_plugin()
if not scalars_plugin_instance:
raise ValueError(('Failed to respond to request for /download_data. '
'The scalars plugin is oddly not registered.'))
body, mime_type = scalars_plugin_instance.scalars_impl(
tag, run, None, response_format)
return body, mime_type
|
<SYSTEM_TASK:>
r"""Fetches the custom layout specified by the config file in the logdir.
<END_TASK>
<USER_TASK:>
Description:
def layout_route(self, request):
r"""Fetches the custom layout specified by the config file in the logdir.
If more than 1 run contains a layout, this method merges the layouts by
merging charts within individual categories. If 2 categories with the same
name are found, the charts within are merged. The merging is based on the
order of the runs to which the layouts are written.
The response is a JSON object mirroring properties of the Layout proto if a
layout for any run is found.
The response is an empty object if no layout could be found.
"""
|
body = self.layout_impl()
return http_util.Respond(request, body, 'application/json')
|
<SYSTEM_TASK:>
Given an iterable of string contents, make a table row.
<END_TASK>
<USER_TASK:>
Description:
def make_table_row(contents, tag='td'):
"""Given an iterable of string contents, make a table row.
Args:
contents: An iterable yielding strings.
tag: The tag to place contents in. Defaults to 'td', you might want 'th'.
Returns:
A string containing the content strings, organized into a table row.
Example: make_table_row(['one', 'two', 'three']) == '''
<tr>
<td>one</td>
<td>two</td>
<td>three</td>
</tr>'''
"""
|
columns = ('<%s>%s</%s>\n' % (tag, s, tag) for s in contents)
return '<tr>\n' + ''.join(columns) + '</tr>\n'
|
<SYSTEM_TASK:>
Given a numpy ndarray of strings, concatenate them into a html table.
<END_TASK>
<USER_TASK:>
Description:
def make_table(contents, headers=None):
"""Given a numpy ndarray of strings, concatenate them into a html table.
Args:
contents: A np.ndarray of strings. May be 1d or 2d. In the 1d case, the
table is laid out vertically (i.e. row-major).
headers: A np.ndarray or list of string header names for the table.
Returns:
A string containing all of the content strings, organized into a table.
Raises:
ValueError: If contents is not a np.ndarray.
ValueError: If contents is not 1d or 2d.
ValueError: If contents is empty.
ValueError: If headers is present and not a list, tuple, or ndarray.
ValueError: If headers is not 1d.
ValueError: If number of elements in headers does not correspond to number
of columns in contents.
"""
|
if not isinstance(contents, np.ndarray):
raise ValueError('make_table contents must be a numpy ndarray')
if contents.ndim not in [1, 2]:
raise ValueError('make_table requires a 1d or 2d numpy array, was %dd' %
contents.ndim)
if headers:
if isinstance(headers, (list, tuple)):
headers = np.array(headers)
if not isinstance(headers, np.ndarray):
raise ValueError('Could not convert headers %s into np.ndarray' % headers)
if headers.ndim != 1:
raise ValueError('Headers must be 1d, is %dd' % headers.ndim)
expected_n_columns = contents.shape[1] if contents.ndim == 2 else 1
if headers.shape[0] != expected_n_columns:
raise ValueError('Number of headers %d must match number of columns %d' %
(headers.shape[0], expected_n_columns))
header = '<thead>\n%s</thead>\n' % make_table_row(headers, tag='th')
else:
header = ''
n_rows = contents.shape[0]
if contents.ndim == 1:
# If it's a vector, we need to wrap each element in a new list, otherwise
# we would turn the string itself into a row (see test code)
rows = (make_table_row([contents[i]]) for i in range(n_rows))
else:
rows = (make_table_row(contents[i, :]) for i in range(n_rows))
return '<table>\n%s<tbody>\n%s</tbody>\n</table>' % (header, ''.join(rows))
|
<SYSTEM_TASK:>
Given a np.npdarray with nDims > 2, reduce it to 2d.
<END_TASK>
<USER_TASK:>
Description:
def reduce_to_2d(arr):
"""Given a np.npdarray with nDims > 2, reduce it to 2d.
It does this by selecting the zeroth coordinate for every dimension greater
than two.
Args:
arr: a numpy ndarray of dimension at least 2.
Returns:
A two-dimensional subarray from the input array.
Raises:
ValueError: If the argument is not a numpy ndarray, or the dimensionality
is too low.
"""
|
if not isinstance(arr, np.ndarray):
raise ValueError('reduce_to_2d requires a numpy.ndarray')
ndims = len(arr.shape)
if ndims < 2:
raise ValueError('reduce_to_2d requires an array of dimensionality >=2')
# slice(None) is equivalent to `:`, so we take arr[0,0,...0,:,:]
slices = ([0] * (ndims - 2)) + [slice(None), slice(None)]
return arr[slices]
|
<SYSTEM_TASK:>
Take a numpy.ndarray containing strings, and convert it into html.
<END_TASK>
<USER_TASK:>
Description:
def text_array_to_html(text_arr):
"""Take a numpy.ndarray containing strings, and convert it into html.
If the ndarray contains a single scalar string, that string is converted to
html via our sanitized markdown parser. If it contains an array of strings,
the strings are individually converted to html and then composed into a table
using make_table. If the array contains dimensionality greater than 2,
all but two of the dimensions are removed, and a warning message is prefixed
to the table.
Args:
text_arr: A numpy.ndarray containing strings.
Returns:
The array converted to html.
"""
|
if not text_arr.shape:
# It is a scalar. No need to put it in a table, just apply markdown
return plugin_util.markdown_to_safe_html(np.asscalar(text_arr))
warning = ''
if len(text_arr.shape) > 2:
warning = plugin_util.markdown_to_safe_html(WARNING_TEMPLATE
% len(text_arr.shape))
text_arr = reduce_to_2d(text_arr)
html_arr = [plugin_util.markdown_to_safe_html(x)
for x in text_arr.reshape(-1)]
html_arr = np.array(html_arr).reshape(text_arr.shape)
return warning + make_table(html_arr)
|
<SYSTEM_TASK:>
Convert a TensorEvent into a JSON-compatible response.
<END_TASK>
<USER_TASK:>
Description:
def process_string_tensor_event(event):
"""Convert a TensorEvent into a JSON-compatible response."""
|
string_arr = tensor_util.make_ndarray(event.tensor_proto)
html = text_array_to_html(string_arr)
return {
'wall_time': event.wall_time,
'step': event.step,
'text': html,
}
|
<SYSTEM_TASK:>
Return a field to `Observations` dict for the event generator.
<END_TASK>
<USER_TASK:>
Description:
def get_field_to_observations_map(generator, query_for_tag=''):
"""Return a field to `Observations` dict for the event generator.
Args:
generator: A generator over event protos.
query_for_tag: A string that if specified, only create observations for
events with this tag name.
Returns:
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
"""
|
def increment(stat, event, tag=''):
assert stat in TRACKED_FIELDS
field_to_obs[stat].append(Observation(step=event.step,
wall_time=event.wall_time,
tag=tag)._asdict())
field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])
for event in generator:
## Process the event
if event.HasField('graph_def') and (not query_for_tag):
increment('graph', event)
if event.HasField('session_log') and (not query_for_tag):
status = event.session_log.status
if status == event_pb2.SessionLog.START:
increment('sessionlog:start', event)
elif status == event_pb2.SessionLog.STOP:
increment('sessionlog:stop', event)
elif status == event_pb2.SessionLog.CHECKPOINT:
increment('sessionlog:checkpoint', event)
elif event.HasField('summary'):
for value in event.summary.value:
if query_for_tag and value.tag != query_for_tag:
continue
for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items():
if value.HasField(proto_name):
increment(display_name, event, value.tag)
return field_to_obs
|
<SYSTEM_TASK:>
Returns a dictionary of tags that a user could query over.
<END_TASK>
<USER_TASK:>
Description:
def get_unique_tags(field_to_obs):
"""Returns a dictionary of tags that a user could query over.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict that maps keys in `TAG_FIELDS` to a list of string tags present in
the event files. If the dict does not have any observations of the type,
maps to an empty list so that we can render this to console.
"""
|
return {field: sorted(set([x.get('tag', '') for x in observations]))
for field, observations in field_to_obs.items()
if field in TAG_FIELDS}
|
<SYSTEM_TASK:>
Prints a shallow dict to console.
<END_TASK>
<USER_TASK:>
Description:
def print_dict(d, show_missing=True):
"""Prints a shallow dict to console.
Args:
d: Dict to print.
show_missing: Whether to show keys with empty values.
"""
|
for k, v in sorted(d.items()):
if (not v) and show_missing:
# No instances of the key, so print missing symbol.
print('{} -'.format(k))
elif isinstance(v, list):
# Value is a list, so print each item of the list.
print(k)
for item in v:
print(' {}'.format(item))
elif isinstance(v, dict):
# Value is a dict, so print each (key, value) pair of the dict.
print(k)
for kk, vv in sorted(v.items()):
print(' {:<20} {}'.format(kk, vv))
|
<SYSTEM_TASK:>
Transform the field-to-obs mapping into a printable dictionary.
<END_TASK>
<USER_TASK:>
Description:
def get_dict_to_print(field_to_obs):
"""Transform the field-to-obs mapping into a printable dictionary.
Args:
field_to_obs: Dict that maps string field to `Observation` list.
Returns:
A dict with the keys and values to print to console.
"""
|
def compressed_steps(steps):
return {'num_steps': len(set(steps)),
'min_step': min(steps),
'max_step': max(steps),
'last_step': steps[-1],
'first_step': steps[0],
'outoforder_steps': get_out_of_order(steps)}
def full_steps(steps):
return {'steps': steps, 'outoforder_steps': get_out_of_order(steps)}
output = {}
for field, observations in field_to_obs.items():
if not observations:
output[field] = None
continue
steps = [x['step'] for x in observations]
if field in SHORT_FIELDS:
output[field] = compressed_steps(steps)
if field in LONG_FIELDS:
output[field] = full_steps(steps)
return output
|
<SYSTEM_TASK:>
Returns elements that break the monotonically non-decreasing trend.
<END_TASK>
<USER_TASK:>
Description:
def get_out_of_order(list_of_numbers):
"""Returns elements that break the monotonically non-decreasing trend.
This is used to find instances of global step values that are "out-of-order",
which may trigger TensorBoard event discarding logic.
Args:
list_of_numbers: A list of numbers.
Returns:
A list of tuples in which each tuple are two elements are adjacent, but the
second element is lower than the first.
"""
|
# TODO: Consider changing this to only check for out-of-order
# steps within a particular tag.
result = []
# pylint: disable=consider-using-enumerate
for i in range(len(list_of_numbers)):
if i == 0:
continue
if list_of_numbers[i] < list_of_numbers[i - 1]:
result.append((list_of_numbers[i - 1], list_of_numbers[i]))
return result
|
<SYSTEM_TASK:>
Returns a list of event generators for subdirectories with event files.
<END_TASK>
<USER_TASK:>
Description:
def generators_from_logdir(logdir):
"""Returns a list of event generators for subdirectories with event files.
The number of generators returned should equal the number of directories
within logdir that contain event files. If only logdir contains event files,
returns a list of length one.
Args:
logdir: A log directory that contains event files.
Returns:
List of event generators for each subdirectory with event files.
"""
|
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
generators = [
itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in tf.io.gfile.listdir(subdir)
if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))
]) for subdir in subdirs
]
return generators
|
<SYSTEM_TASK:>
Returns a list of InspectionUnit objects given either logdir or event_file.
<END_TASK>
<USER_TASK:>
Description:
def get_inspection_units(logdir='', event_file='', tag=''):
"""Returns a list of InspectionUnit objects given either logdir or event_file.
If logdir is given, the number of InspectionUnits should equal the
number of directories or subdirectories that contain event files.
If event_file is given, the number of InspectionUnits should be 1.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Returns:
A list of InspectionUnit objects.
"""
|
if logdir:
subdirs = io_wrapper.GetLogdirSubdirectories(logdir)
inspection_units = []
for subdir in subdirs:
generator = itertools.chain(*[
generator_from_event_file(os.path.join(subdir, f))
for f in tf.io.gfile.listdir(subdir)
if io_wrapper.IsTensorFlowEventsFile(os.path.join(subdir, f))
])
inspection_units.append(InspectionUnit(
name=subdir,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag)))
if inspection_units:
print('Found event files in:\n{}\n'.format('\n'.join(
[u.name for u in inspection_units])))
elif io_wrapper.IsTensorFlowEventsFile(logdir):
print(
'It seems that {} may be an event file instead of a logdir. If this '
'is the case, use --event_file instead of --logdir to pass '
'it in.'.format(logdir))
else:
print('No event files found within logdir {}'.format(logdir))
return inspection_units
elif event_file:
generator = generator_from_event_file(event_file)
return [InspectionUnit(
name=event_file,
generator=generator,
field_to_obs=get_field_to_observations_map(generator, tag))]
return []
|
<SYSTEM_TASK:>
Main function for inspector that prints out a digest of event files.
<END_TASK>
<USER_TASK:>
Description:
def inspect(logdir='', event_file='', tag=''):
"""Main function for inspector that prints out a digest of event files.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Raises:
ValueError: If neither logdir and event_file are given, or both are given.
"""
|
print(PRINT_SEPARATOR +
'Processing event files... (this can take a few minutes)\n' +
PRINT_SEPARATOR)
inspection_units = get_inspection_units(logdir, event_file, tag)
for unit in inspection_units:
if tag:
print('Event statistics for tag {} in {}:'.format(tag, unit.name))
else:
# If the user is not inspecting a particular tag, also print the list of
# all available tags that they can query.
print('These tags are in {}:'.format(unit.name))
print_dict(get_unique_tags(unit.field_to_obs))
print(PRINT_SEPARATOR)
print('Event statistics for {}:'.format(unit.name))
print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))
print(PRINT_SEPARATOR)
|
<SYSTEM_TASK:>
Returns the debugger plugin, if possible.
<END_TASK>
<USER_TASK:>
Description:
def load(self, context):
"""Returns the debugger plugin, if possible.
Args:
context: The TBContext flags including `add_arguments`.
Returns:
A DebuggerPlugin instance or None if it couldn't be loaded.
"""
|
if not (context.flags.debugger_data_server_grpc_port > 0 or
context.flags.debugger_port > 0):
return None
flags = context.flags
try:
# pylint: disable=g-import-not-at-top,unused-import
import tensorflow
except ImportError:
raise ImportError(
'To use the debugger plugin, you need to have TensorFlow installed:\n'
' pip install tensorflow')
try:
# pylint: disable=line-too-long,g-import-not-at-top
from tensorboard.plugins.debugger import debugger_plugin as debugger_plugin_lib
from tensorboard.plugins.debugger import interactive_debugger_plugin as interactive_debugger_plugin_lib
# pylint: enable=line-too-long,g-import-not-at-top
except ImportError as e:
e_type, e_value, e_traceback = sys.exc_info()
message = e.msg if hasattr(e, 'msg') else e.message # Handle py2 vs py3
if 'grpc' in message:
e_value = ImportError(
message +
'\n\nTo use the debugger plugin, you need to have '
'gRPC installed:\n pip install grpcio')
six.reraise(e_type, e_value, e_traceback)
if flags.debugger_port > 0:
interactive_plugin = (
interactive_debugger_plugin_lib.InteractiveDebuggerPlugin(context))
logger.info('Starting Interactive Debugger Plugin at gRPC port %d',
flags.debugger_data_server_grpc_port)
interactive_plugin.listen(flags.debugger_port)
return interactive_plugin
elif flags.debugger_data_server_grpc_port > 0:
noninteractive_plugin = debugger_plugin_lib.DebuggerPlugin(context)
logger.info('Starting Non-interactive Debugger Plugin at gRPC port %d',
flags.debugger_data_server_grpc_port)
noninteractive_plugin.listen(flags.debugger_data_server_grpc_port)
return noninteractive_plugin
raise AssertionError()
|
<SYSTEM_TASK:>
Returns a summary metadata for the HParams plugin.
<END_TASK>
<USER_TASK:>
Description:
def create_summary_metadata(hparams_plugin_data_pb):
"""Returns a summary metadata for the HParams plugin.
Returns a summary_pb2.SummaryMetadata holding a copy of the given
HParamsPluginData message in its plugin_data.content field.
Sets the version field of the hparams_plugin_data_pb copy to
PLUGIN_DATA_VERSION.
Args:
hparams_plugin_data_pb: the HParamsPluginData protobuffer to use.
"""
|
if not isinstance(hparams_plugin_data_pb, plugin_data_pb2.HParamsPluginData):
raise TypeError('Needed an instance of plugin_data_pb2.HParamsPluginData.'
' Got: %s' % type(hparams_plugin_data_pb))
content = plugin_data_pb2.HParamsPluginData()
content.CopyFrom(hparams_plugin_data_pb)
content.version = PLUGIN_DATA_VERSION
return tf.compat.v1.SummaryMetadata(
plugin_data=tf.compat.v1.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString()))
|
<SYSTEM_TASK:>
Returns a data oneof's field from plugin_data.content.
<END_TASK>
<USER_TASK:>
Description:
def _parse_plugin_data_as(content, data_oneof_field):
"""Returns a data oneof's field from plugin_data.content.
Raises HParamsError if the content doesn't have 'data_oneof_field' set or
this file is incompatible with the version of the metadata stored.
Args:
content: The SummaryMetadata.plugin_data.content to use.
data_oneof_field: string. The name of the data oneof field to return.
"""
|
plugin_data = plugin_data_pb2.HParamsPluginData.FromString(content)
if plugin_data.version != PLUGIN_DATA_VERSION:
raise error.HParamsError(
'Only supports plugin_data version: %s; found: %s in: %s' %
(PLUGIN_DATA_VERSION, plugin_data.version, plugin_data))
if not plugin_data.HasField(data_oneof_field):
raise error.HParamsError(
'Expected plugin_data.%s to be set. Got: %s' %
(data_oneof_field, plugin_data))
return getattr(plugin_data, data_oneof_field)
|
<SYSTEM_TASK:>
Writes an event proto to disk.
<END_TASK>
<USER_TASK:>
Description:
def write_event(self, event):
"""Writes an event proto to disk.
This method is threadsafe with respect to invocations of itself.
Args:
event: The event proto.
Raises:
IOError: If writing the event proto to disk fails.
"""
|
self._lock.acquire()
try:
self._events_writer.WriteEvent(event)
self._event_count += 1
if self._always_flush:
# We flush on every event within the integration test.
self._events_writer.Flush()
if self._event_count == self._check_this_often:
# Every so often, we check whether the size of the file is too big.
self._event_count = 0
# Flush to get an accurate size check.
self._events_writer.Flush()
file_path = os.path.join(self._events_directory,
self.get_current_file_name())
if not tf.io.gfile.exists(file_path):
# The events file does not exist. Perhaps the user had manually
# deleted it after training began. Create a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
elif tf.io.gfile.stat(file_path).length > self._single_file_size_cap_bytes:
# The current events file has gotten too big. Close the previous
# events writer. Make a new one.
self._events_writer.Close()
self._events_writer = self._create_events_writer(
self._events_directory)
except IOError as err:
logger.error(
"Writing to %s failed: %s", self.get_current_file_name(), err)
self._lock.release()
|
<SYSTEM_TASK:>
Disposes of this events writer manager, making it no longer usable.
<END_TASK>
<USER_TASK:>
Description:
def dispose(self):
"""Disposes of this events writer manager, making it no longer usable.
Call this method when this object is done being used in order to clean up
resources and handlers. This method should ever only be called once.
"""
|
self._lock.acquire()
self._events_writer.Close()
self._events_writer = None
self._lock.release()
|
<SYSTEM_TASK:>
Creates a new events writer.
<END_TASK>
<USER_TASK:>
Description:
def _create_events_writer(self, directory):
"""Creates a new events writer.
Args:
directory: The directory in which to write files containing events.
Returns:
A new events writer, which corresponds to a new events file.
"""
|
total_size = 0
events_files = self._fetch_events_files_on_disk()
for file_name in events_files:
file_path = os.path.join(self._events_directory, file_name)
total_size += tf.io.gfile.stat(file_path).length
if total_size >= self.total_file_size_cap_bytes:
# The total size written to disk is too big. Delete events files until
# the size is below the cap.
for file_name in events_files:
if total_size < self.total_file_size_cap_bytes:
break
file_path = os.path.join(self._events_directory, file_name)
file_size = tf.io.gfile.stat(file_path).length
try:
tf.io.gfile.remove(file_path)
total_size -= file_size
logger.info(
"Deleted %s because events files take up over %d bytes",
file_path, self.total_file_size_cap_bytes)
except IOError as err:
logger.error("Deleting %s failed: %s", file_path, err)
# We increment this index because each events writer must differ in prefix.
self._events_file_count += 1
file_path = "%s.%d.%d" % (
os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT),
time.time(), self._events_file_count)
logger.info("Creating events file %s", file_path)
return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path))
|
<SYSTEM_TASK:>
Obtains the names of debugger-related events files within the directory.
<END_TASK>
<USER_TASK:>
Description:
def _fetch_events_files_on_disk(self):
"""Obtains the names of debugger-related events files within the directory.
Returns:
The names of the debugger-related events files written to disk. The names
are sorted in increasing events file index.
"""
|
all_files = tf.io.gfile.listdir(self._events_directory)
relevant_files = [
file_name for file_name in all_files
if _DEBUGGER_EVENTS_FILE_NAME_REGEX.match(file_name)
]
return sorted(relevant_files, key=self._obtain_file_index)
|
<SYSTEM_TASK:>
Re-export all symbols from the original tf.summary.
<END_TASK>
<USER_TASK:>
Description:
def reexport_tf_summary():
"""Re-export all symbols from the original tf.summary.
This function finds the original tf.summary V2 API and re-exports all the
symbols from it within this module as well, so that when this module is
patched into the TF API namespace as the new tf.summary, the effect is an
overlay that just adds TensorBoard-provided symbols to the module.
Finding the original tf.summary V2 API module reliably is a challenge, since
this code runs *during* the overall TF API import process and depending on
the order of imports (which is subject to change), different parts of the API
may or may not be defined at the point in time we attempt to access them. This
code also may be inserted into two places in the API (tf and tf.compat.v2)
and may be re-executed multiple times even for the same place in the API (due
to the TF module import system not populating sys.modules properly), so it
needs to be robust to many different scenarios.
The one constraint we can count on is that everywhere this module is loaded
(via the component_api_helper mechanism in TF), it's going to be the 'summary'
submodule of a larger API package that already has a 'summary' attribute
that contains the TF-only summary API symbols we need to re-export. This
may either be the original TF-only summary module (the first time we load
this module) or a pre-existing copy of this module (if we're re-loading this
module again). We don't actually need to differentiate those two cases,
because it's okay if we re-import our own TensorBoard-provided symbols; they
will just be overwritten later on in this file.
So given that guarantee, the approach we take is to first attempt to locate
a TF V2 API package that already has a 'summary' attribute (most likely this
is the parent package into which we're being imported, but not necessarily),
and then do the dynamic version of "from tf_api_package.summary import *".
Lastly, this logic is encapsulated in a function to avoid symbol leakage.
"""
|
import sys # pylint: disable=g-import-not-at-top
# API packages to check for the original V2 summary API, in preference order
# to avoid going "under the hood" to the _api packages unless necessary.
packages = [
'tensorflow',
'tensorflow.compat.v2',
'tensorflow._api.v2',
'tensorflow._api.v2.compat.v2',
'tensorflow._api.v1.compat.v2',
]
# If we aren't sure we're on V2, don't use tf.summary since it could be V1.
# Note there may be false positives since the __version__ attribute may not be
# defined at this point in the import process.
if not getattr(tf, '__version__', '').startswith('2.'): # noqa: F821
packages.remove('tensorflow')
def dynamic_wildcard_import(module):
"""Implements the logic of "from module import *" for the given module."""
symbols = getattr(module, '__all__', None)
if symbols is None:
symbols = [k for k in module.__dict__.keys() if not k.startswith('_')]
globals().update({symbol: getattr(module, symbol) for symbol in symbols})
notfound = object() # sentinel value
for package_name in packages:
package = sys.modules.get(package_name, notfound)
if package is notfound:
# Either it isn't in this installation at all (e.g. the _api.vX packages
# are only in API version X), it isn't imported yet, or it was imported
# but not inserted into sys.modules under its user-facing name (for the
# non-'_api' packages), at which point we continue down the list to look
# "under the hood" for it via its '_api' package name.
continue
module = getattr(package, 'summary', None)
if module is None:
# This happens if the package hasn't been fully imported yet. For example,
# the 'tensorflow' package won't yet have 'summary' attribute if we are
# loading this code via the 'tensorflow.compat...' path and 'compat' is
# imported before 'summary' in the 'tensorflow' __init__.py file.
continue
# Success, we hope. Import all the public symbols into this module.
dynamic_wildcard_import(module)
return
|
<SYSTEM_TASK:>
Encode `image` to PNG on `thread_count` threads in parallel.
<END_TASK>
<USER_TASK:>
Description:
def bench(image, thread_count):
"""Encode `image` to PNG on `thread_count` threads in parallel.
Returns:
A `float` representing number of seconds that it takes all threads
to finish encoding `image`.
"""
|
threads = [threading.Thread(target=lambda: encoder.encode_png(image))
for _ in xrange(thread_count)]
start_time = datetime.datetime.now()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
end_time = datetime.datetime.now()
delta = (end_time - start_time).total_seconds()
return delta
|
<SYSTEM_TASK:>
Generate a square RGB test image of the given side length.
<END_TASK>
<USER_TASK:>
Description:
def _image_of_size(image_size):
"""Generate a square RGB test image of the given side length."""
|
return np.random.uniform(0, 256, [image_size, image_size, 3]).astype(np.uint8)
|
<SYSTEM_TASK:>
Format a line of a table.
<END_TASK>
<USER_TASK:>
Description:
def _format_line(headers, fields):
"""Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string.
"""
|
assert len(fields) == len(headers), (fields, headers)
fields = ["%2.4f" % field if isinstance(field, float) else str(field)
for field in fields]
return ' '.join(' ' * max(0, len(header) - len(field)) + field
for (header, field) in zip(headers, fields))
|
<SYSTEM_TASK:>
Extract all nodes with gated-gRPC debug ops attached.
<END_TASK>
<USER_TASK:>
Description:
def get_gated_grpc_tensors(self, matching_debug_op=None):
"""Extract all nodes with gated-gRPC debug ops attached.
Uses cached values if available.
This method is thread-safe.
Args:
graph_def: A tf.GraphDef proto.
matching_debug_op: Return tensors and nodes with only matching the
specified debug op name (optional). If `None`, will extract only
`DebugIdentity` debug ops.
Returns:
A list of (node_name, op_type, output_slot, debug_op) tuples.
"""
|
with self._grpc_gated_lock:
matching_debug_op = matching_debug_op or 'DebugIdentity'
if matching_debug_op not in self._grpc_gated_tensors:
# First, construct a map from node name to op type.
node_name_to_op_type = dict(
(node.name, node.op) for node in self._graph_def.node)
# Second, populate the output list.
gated = []
for node in self._graph_def.node:
if node.op == matching_debug_op:
for attr_key in node.attr:
if attr_key == 'gated_grpc' and node.attr[attr_key].b:
node_name, output_slot, _, debug_op = (
debug_graphs.parse_debug_node_name(node.name))
gated.append(
(node_name, node_name_to_op_type[node_name], output_slot,
debug_op))
break
self._grpc_gated_tensors[matching_debug_op] = gated
return self._grpc_gated_tensors[matching_debug_op]
|
<SYSTEM_TASK:>
Expand the base name if there are node names nested under the node.
<END_TASK>
<USER_TASK:>
Description:
def maybe_base_expanded_node_name(self, node_name):
"""Expand the base name if there are node names nested under the node.
For example, if there are two nodes in the graph, "a" and "a/read", then
calling this function on "a" will give "a/(a)", a form that points at
a leaf node in the nested TensorBoard graph. Calling this function on
"a/read" will just return "a/read", because there is no node nested under
it.
This method is thread-safe.
Args:
node_name: Name of the node.
graph_def: The `GraphDef` that the node is a part of.
Returns:
Possibly base-expanded node name.
"""
|
with self._node_name_lock:
# Lazily populate the map from original node name to base-expanded ones.
if self._maybe_base_expanded_node_names is None:
self._maybe_base_expanded_node_names = dict()
# Sort all the node names.
sorted_names = sorted(node.name for node in self._graph_def.node)
for i, name in enumerate(sorted_names):
j = i + 1
while j < len(sorted_names) and sorted_names[j].startswith(name):
if sorted_names[j].startswith(name + '/'):
self._maybe_base_expanded_node_names[name] = (
name + '/(' + name.split('/')[-1] + ')')
break
j += 1
return self._maybe_base_expanded_node_names.get(node_name, node_name)
|
<SYSTEM_TASK:>
Load events from every detected run.
<END_TASK>
<USER_TASK:>
Description:
def Reload(self):
"""Load events from every detected run."""
|
logger.info('Beginning DbImportMultiplexer.Reload()')
# Defer event sink creation until needed; this ensures it will only exist in
# the thread that calls Reload(), since DB connections must be thread-local.
if not self._event_sink:
self._event_sink = self._CreateEventSink()
# Use collections.deque() for speed when we don't need blocking since it
# also has thread-safe appends/pops.
loader_queue = collections.deque(six.itervalues(self._run_loaders))
loader_delete_queue = collections.deque()
def batch_generator():
while True:
try:
loader = loader_queue.popleft()
except IndexError:
return
try:
for batch in loader.load_batches():
yield batch
except directory_watcher.DirectoryDeletedError:
loader_delete_queue.append(loader)
except (OSError, IOError) as e:
logger.error('Unable to load run %r: %s', loader.subdir, e)
num_threads = min(self._max_reload_threads, len(self._run_loaders))
if num_threads <= 1:
logger.info('Importing runs serially on a single thread')
for batch in batch_generator():
self._event_sink.write_batch(batch)
else:
output_queue = queue.Queue()
sentinel = object()
def producer():
try:
for batch in batch_generator():
output_queue.put(batch)
finally:
output_queue.put(sentinel)
logger.info('Starting %d threads to import runs', num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=producer, name='Loader %d' % i)
thread.daemon = True
thread.start()
num_live_threads = num_threads
while num_live_threads > 0:
output = output_queue.get()
if output == sentinel:
num_live_threads -= 1
continue
self._event_sink.write_batch(output)
for loader in loader_delete_queue:
logger.warn('Deleting loader %r', loader.subdir)
del self._run_loaders[loader.subdir]
logger.info('Finished with DbImportMultiplexer.Reload()')
|
<SYSTEM_TASK:>
Returns a batched event iterator over the run directory event files.
<END_TASK>
<USER_TASK:>
Description:
def load_batches(self):
"""Returns a batched event iterator over the run directory event files."""
|
event_iterator = self._directory_watcher.Load()
while True:
events = []
event_bytes = 0
start = time.time()
for event_proto in event_iterator:
events.append(event_proto)
event_bytes += len(event_proto)
if len(events) >= self._BATCH_COUNT or event_bytes >= self._BATCH_BYTES:
break
elapsed = time.time() - start
logger.debug('RunLoader.load_batch() yielded in %0.3f sec for %s',
elapsed, self._subdir)
if not events:
return
yield _EventBatch(
events=events,
experiment_name=self._experiment_name,
run_name=self._run_name)
|
<SYSTEM_TASK:>
Processes a single tf.Event and records it in tagged_data.
<END_TASK>
<USER_TASK:>
Description:
def _process_event(self, event, tagged_data):
"""Processes a single tf.Event and records it in tagged_data."""
|
event_type = event.WhichOneof('what')
# Handle the most common case first.
if event_type == 'summary':
for value in event.summary.value:
value = data_compat.migrate_value(value)
tag, metadata, values = tagged_data.get(value.tag, (None, None, []))
values.append((event.step, event.wall_time, value.tensor))
if tag is None:
# Store metadata only from the first event.
tagged_data[value.tag] = sqlite_writer.TagData(
value.tag, value.metadata, values)
elif event_type == 'file_version':
pass # TODO: reject file version < 2 (at loader level)
elif event_type == 'session_log':
if event.session_log.status == event_pb2.SessionLog.START:
pass # TODO: implement purging via sqlite writer truncation method
elif event_type in ('graph_def', 'meta_graph_def'):
pass # TODO: support graphs
elif event_type == 'tagged_run_metadata':
pass
|
<SYSTEM_TASK:>
Create a TensorFlow op to group data into histogram buckets.
<END_TASK>
<USER_TASK:>
Description:
def _buckets(data, bucket_count=None):
"""Create a TensorFlow op to group data into histogram buckets.
Arguments:
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int` or scalar `int32` `Tensor`.
Returns:
A `Tensor` of shape `[k, 3]` and type `float64`. The `i`th row is
a triple `[left_edge, right_edge, count]` for a single bucket.
The value of `k` is either `bucket_count` or `1` or `0`.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if bucket_count is None:
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
with tf.name_scope('buckets', values=[data, bucket_count]), \
tf.control_dependencies([tf.assert_scalar(bucket_count),
tf.assert_type(bucket_count, tf.int32)]):
data = tf.reshape(data, shape=[-1]) # flatten
data = tf.cast(data, tf.float64)
is_empty = tf.equal(tf.size(input=data), 0)
def when_empty():
return tf.constant([], shape=(0, 3), dtype=tf.float64)
def when_nonempty():
min_ = tf.reduce_min(input_tensor=data)
max_ = tf.reduce_max(input_tensor=data)
range_ = max_ - min_
is_singular = tf.equal(range_, 0)
def when_nonsingular():
bucket_width = range_ / tf.cast(bucket_count, tf.float64)
offsets = data - min_
bucket_indices = tf.cast(tf.floor(offsets / bucket_width),
dtype=tf.int32)
clamped_indices = tf.minimum(bucket_indices, bucket_count - 1)
one_hots = tf.one_hot(clamped_indices, depth=bucket_count)
bucket_counts = tf.cast(tf.reduce_sum(input_tensor=one_hots, axis=0),
dtype=tf.float64)
edges = tf.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
return tf.transpose(a=tf.stack(
[left_edges, right_edges, bucket_counts]))
def when_singular():
center = min_
bucket_starts = tf.stack([center - 0.5])
bucket_ends = tf.stack([center + 0.5])
bucket_counts = tf.stack([tf.cast(tf.size(input=data), tf.float64)])
return tf.transpose(
a=tf.stack([bucket_starts, bucket_ends, bucket_counts]))
return tf.cond(is_singular, when_singular, when_nonsingular)
return tf.cond(is_empty, when_empty, when_nonempty)
|
<SYSTEM_TASK:>
Create a legacy histogram summary op.
<END_TASK>
<USER_TASK:>
Description:
def op(name,
data,
bucket_count=None,
display_name=None,
description=None,
collections=None):
"""Create a legacy histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
tensor = _buckets(data, bucket_count=bucket_count)
return tf.summary.tensor_summary(name='histogram_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata)
|
<SYSTEM_TASK:>
Create a legacy histogram summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def pb(name, data, bucket_count=None, display_name=None, description=None):
"""Create a legacy histogram summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if bucket_count is None:
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
data = np.array(data).flatten().astype(float)
if data.size == 0:
buckets = np.array([]).reshape((0, 3))
else:
min_ = np.min(data)
max_ = np.max(data)
range_ = max_ - min_
if range_ == 0:
center = min_
buckets = np.array([[center - 0.5, center + 0.5, float(data.size)]])
else:
bucket_width = range_ / bucket_count
offsets = data - min_
bucket_indices = np.floor(offsets / bucket_width).astype(int)
clamped_indices = np.minimum(bucket_indices, bucket_count - 1)
one_hots = (np.array([clamped_indices]).transpose()
== np.arange(0, bucket_count)) # broadcast
assert one_hots.shape == (data.size, bucket_count), (
one_hots.shape, (data.size, bucket_count))
bucket_counts = np.sum(one_hots, axis=0)
edges = np.linspace(min_, max_, bucket_count + 1)
left_edges = edges[:-1]
right_edges = edges[1:]
buckets = np.array([left_edges, right_edges, bucket_counts]).transpose()
tensor = tf.make_tensor_proto(buckets, dtype=tf.float64)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/histogram_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary
|
<SYSTEM_TASK:>
Add a tensor the watch store.
<END_TASK>
<USER_TASK:>
Description:
def add(self, value):
"""Add a tensor the watch store."""
|
if self._disposed:
raise ValueError(
'Cannot add value: this _WatchStore instance is already disposed')
self._data.append(value)
if hasattr(value, 'nbytes'):
self._in_mem_bytes += value.nbytes
self._ensure_bytes_limits()
|
<SYSTEM_TASK:>
Get the number of values discarded due to exceeding both limits.
<END_TASK>
<USER_TASK:>
Description:
def num_discarded(self):
"""Get the number of values discarded due to exceeding both limits."""
|
if not self._data:
return 0
n = 0
while n < len(self._data):
if not isinstance(self._data[n], _TensorValueDiscarded):
break
n += 1
return n
|
<SYSTEM_TASK:>
Query the values at given time indices.
<END_TASK>
<USER_TASK:>
Description:
def query(self, time_indices):
"""Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded).
"""
|
if self._disposed:
raise ValueError(
'Cannot query: this _WatchStore instance is already disposed')
if not isinstance(time_indices, (tuple, list)):
time_indices = [time_indices]
output = []
for time_index in time_indices:
if isinstance(self._data[time_index], _TensorValueDiscarded):
output.append(None)
else:
data_item = self._data[time_index]
if (hasattr(data_item, 'dtype') and
tensor_helper.translate_dtype(data_item.dtype) == 'string'):
_, _, data_item = tensor_helper.array_view(data_item)
data_item = np.array(
tensor_helper.process_buffers_for_display(data_item),
dtype=np.object)
output.append(data_item)
return output
|
<SYSTEM_TASK:>
Add a tensor value.
<END_TASK>
<USER_TASK:>
Description:
def add(self, watch_key, tensor_value):
"""Add a tensor value.
Args:
watch_key: A string representing the debugger tensor watch, e.g.,
'Dense_1/BiasAdd:0:DebugIdentity'.
tensor_value: The value of the tensor as a numpy.ndarray.
"""
|
if watch_key not in self._tensor_data:
self._tensor_data[watch_key] = _WatchStore(
watch_key,
mem_bytes_limit=self._watch_mem_bytes_limit)
self._tensor_data[watch_key].add(tensor_value)
|
<SYSTEM_TASK:>
Query tensor store for a given watch_key.
<END_TASK>
<USER_TASK:>
Description:
def query(self,
watch_key,
time_indices=None,
slicing=None,
mapping=None):
"""Query tensor store for a given watch_key.
Args:
watch_key: The watch key to query.
time_indices: A numpy-style slicing string for time indices. E.g.,
`-1`, `:-2`, `[::2]`. If not provided (`None`), will use -1.
slicing: A numpy-style slicing string for individual time steps.
mapping: An mapping string or a list of them. Supported mappings:
`{None, 'image/png', 'health-pill'}`.
Returns:
The potentially sliced values as a nested list of values or its mapped
format. A `list` of nested `list` of values.
Raises:
ValueError: If the shape of the sliced array is incompatible with mapping
mode. Or if the mapping type is invalid.
"""
|
if watch_key not in self._tensor_data:
raise KeyError("watch_key not found: %s" % watch_key)
if time_indices is None:
time_indices = '-1'
time_slicing = tensor_helper.parse_time_indices(time_indices)
all_time_indices = list(range(self._tensor_data[watch_key].num_total()))
sliced_time_indices = all_time_indices[time_slicing]
if not isinstance(sliced_time_indices, list):
sliced_time_indices = [sliced_time_indices]
recombine_and_map = False
step_mapping = mapping
if len(sliced_time_indices) > 1 and mapping not in (None, ):
recombine_and_map = True
step_mapping = None
output = []
for index in sliced_time_indices:
value = self._tensor_data[watch_key].query(index)[0]
if (value is not None and
not isinstance(value, debug_data.InconvertibleTensorProto)):
output.append(tensor_helper.array_view(
value, slicing=slicing, mapping=step_mapping)[2])
else:
output.append(None)
if recombine_and_map:
if mapping == 'image/png':
output = tensor_helper.array_to_base64_png(output)
elif mapping and mapping != 'none':
logger.warn(
'Unsupported mapping mode after recomining time steps: %s',
mapping)
return output
|
<SYSTEM_TASK:>
Obtains the health pills for a run sampled by the event multiplexer.
<END_TASK>
<USER_TASK:>
Description:
def _obtain_sampled_health_pills(self, run, node_names):
"""Obtains the health pills for a run sampled by the event multiplexer.
This is much faster than the alternative path of reading health pills from
disk.
Args:
run: The run to fetch health pills for.
node_names: A list of node names for which to retrieve health pills.
Returns:
A dictionary mapping from node name to a list of
event_accumulator.HealthPillEvents.
"""
|
runs_to_tags_to_content = self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME)
if run not in runs_to_tags_to_content:
# The run lacks health pills.
return {}
# This is also a mapping between node name and plugin content because this
# plugin tags by node name.
tags_to_content = runs_to_tags_to_content[run]
mapping = {}
for node_name in node_names:
if node_name not in tags_to_content:
# This node lacks health pill data.
continue
health_pills = []
for tensor_event in self._event_multiplexer.Tensors(run, node_name):
json_string = tags_to_content[node_name]
try:
content_object = json.loads(tf.compat.as_text(json_string))
device_name = content_object['device']
output_slot = content_object['outputSlot']
health_pills.append(
self._tensor_proto_to_health_pill(tensor_event, node_name,
device_name, output_slot))
except (KeyError, ValueError) as e:
logger.error('Could not determine device from JSON string '
'%r: %r', json_string, e)
mapping[node_name] = health_pills
return mapping
|
<SYSTEM_TASK:>
Converts an event_accumulator.TensorEvent to a HealthPillEvent.
<END_TASK>
<USER_TASK:>
Description:
def _tensor_proto_to_health_pill(self, tensor_event, node_name, device,
output_slot):
"""Converts an event_accumulator.TensorEvent to a HealthPillEvent.
Args:
tensor_event: The event_accumulator.TensorEvent to convert.
node_name: The name of the node (without the output slot).
device: The device.
output_slot: The integer output slot this health pill is relevant to.
Returns:
A HealthPillEvent.
"""
|
return self._process_health_pill_value(
wall_time=tensor_event.wall_time,
step=tensor_event.step,
device_name=device,
output_slot=output_slot,
node_name=node_name,
tensor_proto=tensor_event.tensor_proto)
|
<SYSTEM_TASK:>
Reads disk to obtain the health pills for a run at a specific step.
<END_TASK>
<USER_TASK:>
Description:
def _obtain_health_pills_at_step(self, events_directory, node_names, step):
"""Reads disk to obtain the health pills for a run at a specific step.
This could be much slower than the alternative path of just returning all
health pills sampled by the event multiplexer. It could take tens of minutes
to complete this call for large graphs for big step values (in the
thousands).
Args:
events_directory: The directory containing events for the desired run.
node_names: A list of node names for which to retrieve health pills.
step: The step to obtain health pills for.
Returns:
A dictionary mapping from node name to a list of health pill objects (see
docs for _serve_health_pills_handler for properties of those objects).
Raises:
IOError: If no files with health pill events could be found.
"""
|
# Obtain all files with debugger-related events.
pattern = os.path.join(events_directory, _DEBUGGER_EVENTS_GLOB_PATTERN)
file_paths = glob.glob(pattern)
if not file_paths:
raise IOError(
'No events files found that matches the pattern %r.' % pattern)
# Sort by name (and thus by timestamp).
file_paths.sort()
mapping = collections.defaultdict(list)
node_name_set = frozenset(node_names)
for file_path in file_paths:
should_stop = self._process_health_pill_event(
node_name_set, mapping, step, file_path)
if should_stop:
break
return mapping
|
<SYSTEM_TASK:>
Creates health pills out of data in an event.
<END_TASK>
<USER_TASK:>
Description:
def _process_health_pill_event(self, node_name_set, mapping, target_step,
file_path):
"""Creates health pills out of data in an event.
Creates health pills out of the event and adds them to the mapping.
Args:
node_name_set: A set of node names that are relevant.
mapping: The mapping from node name to HealthPillEvents.
This object may be destructively modified.
target_step: The target step at which to obtain health pills.
file_path: The path to the file with health pill events.
Returns:
Whether we should stop reading events because future events are no longer
relevant.
"""
|
events_loader = event_file_loader.EventFileLoader(file_path)
for event in events_loader.Load():
if not event.HasField('summary'):
logger.warn(
'An event in a debugger events file lacks a summary.')
continue
if event.step < target_step:
# This event is not of the relevant step. We perform this check
# first because the majority of events will be eliminated from
# consideration by this check.
continue
if event.step > target_step:
# We have passed the relevant step. No need to read more events.
return True
for value in event.summary.value:
# Obtain the device name from the metadata.
summary_metadata = value.metadata
plugin_data = summary_metadata.plugin_data
if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME:
try:
content = json.loads(
tf.compat.as_text(summary_metadata.plugin_data.content))
except ValueError as err:
logger.warn(
'Could not parse the JSON string containing data for '
'the debugger plugin: %r, %r', content, err)
continue
device_name = content['device']
output_slot = content['outputSlot']
else:
logger.error(
'No debugger plugin data found for event with tag %s and node '
'name %s.', value.tag, value.node_name)
continue
if not value.HasField('tensor'):
logger.warn(
'An event in a debugger events file lacks a tensor value.')
continue
match = re.match(r'^(.*):(\d+):DebugNumericSummary$', value.node_name)
if not match:
logger.warn(
('A event with a health pill has an invalid watch, (i.e., an '
'unexpected debug op): %r'), value.node_name)
return None
health_pill = self._process_health_pill_value(
wall_time=event.wall_time,
step=event.step,
device_name=device_name,
output_slot=output_slot,
node_name=match.group(1),
tensor_proto=value.tensor,
node_name_set=node_name_set)
if not health_pill:
continue
mapping[health_pill.node_name].append(health_pill)
# Keep reading events.
return False
|
<SYSTEM_TASK:>
Creates a HealthPillEvent containing various properties of a health pill.
<END_TASK>
<USER_TASK:>
Description:
def _process_health_pill_value(self,
wall_time,
step,
device_name,
output_slot,
node_name,
tensor_proto,
node_name_set=None):
"""Creates a HealthPillEvent containing various properties of a health pill.
Args:
wall_time: The wall time in seconds.
step: The session run step of the event.
device_name: The name of the node's device.
output_slot: The numeric output slot.
node_name: The name of the node (without the output slot).
tensor_proto: A tensor proto of data.
node_name_set: An optional set of node names that are relevant. If not
provided, no filtering by relevance occurs.
Returns:
An event_accumulator.HealthPillEvent. Or None if one could not be created.
"""
|
if node_name_set and node_name not in node_name_set:
# This event is not relevant.
return None
# Since we seek health pills for a specific step, this function
# returns 1 health pill per node per step. The wall time is the
# seconds since the epoch.
elements = list(tensor_util.make_ndarray(tensor_proto))
return HealthPillEvent(
wall_time=wall_time,
step=step,
device_name=device_name,
output_slot=output_slot,
node_name=node_name,
dtype=repr(tf.as_dtype(elements[12])),
shape=elements[14:],
value=elements)
|
<SYSTEM_TASK:>
Convert a `TensorBoardInfo` to string form to be stored on disk.
<END_TASK>
<USER_TASK:>
Description:
def _info_to_string(info):
"""Convert a `TensorBoardInfo` to string form to be stored on disk.
The format returned by this function is opaque and should only be
interpreted by `_info_from_string`.
Args:
info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type.
Returns:
A string representation of the provided `TensorBoardInfo`.
"""
|
for key in _TENSORBOARD_INFO_FIELDS:
field_type = _TENSORBOARD_INFO_FIELDS[key]
if not isinstance(getattr(info, key), field_type.runtime_type):
raise ValueError(
"expected %r of type %s, but found: %r" %
(key, field_type.runtime_type, getattr(info, key))
)
if info.version != version.VERSION:
raise ValueError(
"expected 'version' to be %r, but found: %r" %
(version.VERSION, info.version)
)
json_value = {
k: _TENSORBOARD_INFO_FIELDS[k].serialize(getattr(info, k))
for k in _TENSORBOARD_INFO_FIELDS
}
return json.dumps(json_value, sort_keys=True, indent=4)
|
<SYSTEM_TASK:>
Parse a `TensorBoardInfo` object from its string representation.
<END_TASK>
<USER_TASK:>
Description:
def _info_from_string(info_string):
"""Parse a `TensorBoardInfo` object from its string representation.
Args:
info_string: A string representation of a `TensorBoardInfo`, as
produced by a previous call to `_info_to_string`.
Returns:
A `TensorBoardInfo` value.
Raises:
ValueError: If the provided string is not valid JSON, or if it does
not represent a JSON object with a "version" field whose value is
`tensorboard.version.VERSION`, or if it has the wrong set of
fields, or if at least one field is of invalid type.
"""
|
try:
json_value = json.loads(info_string)
except ValueError:
raise ValueError("invalid JSON: %r" % (info_string,))
if not isinstance(json_value, dict):
raise ValueError("not a JSON object: %r" % (json_value,))
if json_value.get("version") != version.VERSION:
raise ValueError("incompatible version: %r" % (json_value,))
expected_keys = frozenset(_TENSORBOARD_INFO_FIELDS)
actual_keys = frozenset(json_value)
if expected_keys != actual_keys:
raise ValueError(
"bad keys on TensorBoardInfo (missing: %s; extraneous: %s)"
% (expected_keys - actual_keys, actual_keys - expected_keys)
)
# Validate and deserialize fields.
for key in _TENSORBOARD_INFO_FIELDS:
field_type = _TENSORBOARD_INFO_FIELDS[key]
if not isinstance(json_value[key], field_type.serialized_type):
raise ValueError(
"expected %r of type %s, but found: %r" %
(key, field_type.serialized_type, json_value[key])
)
json_value[key] = field_type.deserialize(json_value[key])
return TensorBoardInfo(**json_value)
|
<SYSTEM_TASK:>
Compute a `TensorBoardInfo.cache_key` field.
<END_TASK>
<USER_TASK:>
Description:
def cache_key(working_directory, arguments, configure_kwargs):
"""Compute a `TensorBoardInfo.cache_key` field.
The format returned by this function is opaque. Clients may only
inspect it by comparing it for equality with other results from this
function.
Args:
working_directory: The directory from which TensorBoard was launched
and relative to which paths like `--logdir` and `--db` are
resolved.
arguments: The command-line args to TensorBoard, as `sys.argv[1:]`.
Should be a list (or tuple), not an unparsed string. If you have a
raw shell command, use `shlex.split` before passing it to this
function.
configure_kwargs: A dictionary of additional argument values to
override the textual `arguments`, with the same semantics as in
`tensorboard.program.TensorBoard.configure`. May be an empty
dictionary.
Returns:
A string such that if two (prospective or actual) TensorBoard
invocations have the same cache key then it is safe to use one in
place of the other. The converse is not guaranteed: it is often safe
to change the order of TensorBoard arguments, or to explicitly set
them to their default values, or to move them between `arguments`
and `configure_kwargs`, but such invocations may yield distinct
cache keys.
"""
|
if not isinstance(arguments, (list, tuple)):
raise TypeError(
"'arguments' should be a list of arguments, but found: %r "
"(use `shlex.split` if given a string)"
% (arguments,)
)
datum = {
"working_directory": working_directory,
"arguments": arguments,
"configure_kwargs": configure_kwargs,
}
raw = base64.b64encode(
json.dumps(datum, sort_keys=True, separators=(",", ":")).encode("utf-8")
)
# `raw` is of type `bytes`, even though it only contains ASCII
# characters; we want it to be `str` in both Python 2 and 3.
return str(raw.decode("ascii"))
|
<SYSTEM_TASK:>
Get path to directory in which to store info files.
<END_TASK>
<USER_TASK:>
Description:
def _get_info_dir():
"""Get path to directory in which to store info files.
The directory returned by this function is "owned" by this module. If
the contents of the directory are modified other than via the public
functions of this module, subsequent behavior is undefined.
The directory will be created if it does not exist.
"""
|
path = os.path.join(tempfile.gettempdir(), ".tensorboard-info")
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
else:
os.chmod(path, 0o777)
return path
|
<SYSTEM_TASK:>
Write TensorBoardInfo to the current process's info file.
<END_TASK>
<USER_TASK:>
Description:
def write_info_file(tensorboard_info):
"""Write TensorBoardInfo to the current process's info file.
This should be called by `main` once the server is ready. When the
server shuts down, `remove_info_file` should be called.
Args:
tensorboard_info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type.
"""
|
payload = "%s\n" % _info_to_string(tensorboard_info)
with open(_get_info_file_path(), "w") as outfile:
outfile.write(payload)
|
<SYSTEM_TASK:>
Remove the current process's TensorBoardInfo file, if it exists.
<END_TASK>
<USER_TASK:>
Description:
def remove_info_file():
"""Remove the current process's TensorBoardInfo file, if it exists.
If the file does not exist, no action is taken and no error is raised.
"""
|
try:
os.unlink(_get_info_file_path())
except OSError as e:
if e.errno == errno.ENOENT:
# The user may have wiped their temporary directory or something.
# Not a problem: we're already in the state that we want to be in.
pass
else:
raise
|
<SYSTEM_TASK:>
Return TensorBoardInfo values for running TensorBoard processes.
<END_TASK>
<USER_TASK:>
Description:
def get_all():
"""Return TensorBoardInfo values for running TensorBoard processes.
This function may not provide a perfect snapshot of the set of running
processes. Its result set may be incomplete if the user has cleaned
their /tmp/ directory while TensorBoard processes are running. It may
contain extraneous entries if TensorBoard processes exited uncleanly
(e.g., with SIGKILL or SIGQUIT).
Returns:
A fresh list of `TensorBoardInfo` objects.
"""
|
info_dir = _get_info_dir()
results = []
for filename in os.listdir(info_dir):
filepath = os.path.join(info_dir, filename)
try:
with open(filepath) as infile:
contents = infile.read()
except IOError as e:
if e.errno == errno.EACCES:
# May have been written by this module in a process whose
# `umask` includes some bits of 0o444.
continue
else:
raise
try:
info = _info_from_string(contents)
except ValueError:
tb_logging.get_logger().warning(
"invalid info file: %r",
filepath,
exc_info=True,
)
else:
results.append(info)
return results
|
<SYSTEM_TASK:>
Start a new TensorBoard instance, or reuse a compatible one.
<END_TASK>
<USER_TASK:>
Description:
def start(arguments, timeout=datetime.timedelta(seconds=60)):
"""Start a new TensorBoard instance, or reuse a compatible one.
If the cache key determined by the provided arguments and the current
working directory (see `cache_key`) matches the cache key of a running
TensorBoard process (see `get_all`), that process will be reused.
Otherwise, a new TensorBoard process will be spawned with the provided
arguments, using the `tensorboard` binary from the system path.
Args:
arguments: List of strings to be passed as arguments to
`tensorboard`. (If you have a raw command-line string, see
`shlex.split`.)
timeout: `datetime.timedelta` object describing how long to wait for
the subprocess to initialize a TensorBoard server and write its
`TensorBoardInfo` file. If the info file is not written within
this time period, `start` will assume that the subprocess is stuck
in a bad state, and will give up on waiting for it and return a
`StartTimedOut` result. Note that in such a case the subprocess
will not be killed. Default value is 60 seconds.
Returns:
A `StartReused`, `StartLaunched`, `StartFailed`, or `StartTimedOut`
object.
"""
|
match = _find_matching_instance(
cache_key(
working_directory=os.getcwd(),
arguments=arguments,
configure_kwargs={},
),
)
if match:
return StartReused(info=match)
(stdout_fd, stdout_path) = tempfile.mkstemp(prefix=".tensorboard-stdout-")
(stderr_fd, stderr_path) = tempfile.mkstemp(prefix=".tensorboard-stderr-")
start_time_seconds = time.time()
try:
p = subprocess.Popen(
["tensorboard"] + arguments,
stdout=stdout_fd,
stderr=stderr_fd,
)
finally:
os.close(stdout_fd)
os.close(stderr_fd)
poll_interval_seconds = 0.5
end_time_seconds = start_time_seconds + timeout.total_seconds()
while time.time() < end_time_seconds:
time.sleep(poll_interval_seconds)
subprocess_result = p.poll()
if subprocess_result is not None:
return StartFailed(
exit_code=subprocess_result,
stdout=_maybe_read_file(stdout_path),
stderr=_maybe_read_file(stderr_path),
)
for info in get_all():
if info.pid == p.pid and info.start_time >= start_time_seconds:
return StartLaunched(info=info)
else:
return StartTimedOut(pid=p.pid)
|
<SYSTEM_TASK:>
Find a running TensorBoard instance compatible with the cache key.
<END_TASK>
<USER_TASK:>
Description:
def _find_matching_instance(cache_key):
"""Find a running TensorBoard instance compatible with the cache key.
Returns:
A `TensorBoardInfo` object, or `None` if none matches the cache key.
"""
|
infos = get_all()
candidates = [info for info in infos if info.cache_key == cache_key]
for candidate in sorted(candidates, key=lambda x: x.port):
# TODO(@wchargin): Check here that the provided port is still live.
return candidate
return None
|
<SYSTEM_TASK:>
Read the given file, if it exists.
<END_TASK>
<USER_TASK:>
Description:
def _maybe_read_file(filename):
"""Read the given file, if it exists.
Args:
filename: A path to a file.
Returns:
A string containing the file contents, or `None` if the file does
not exist.
"""
|
try:
with open(filename) as infile:
return infile.read()
except IOError as e:
if e.errno == errno.ENOENT:
return None
|
<SYSTEM_TASK:>
Whether this plugin is active and has any profile data to show.
<END_TASK>
<USER_TASK:>
Description:
def is_active(self):
"""Whether this plugin is active and has any profile data to show.
Detecting profile data is expensive, so this process runs asynchronously
and the value reported by this method is the cached value and may be stale.
Returns:
Whether any run has profile data.
"""
|
# If we are already active, we remain active and don't recompute this.
# Otherwise, try to acquire the lock without blocking; if we get it and
# we're still not active, launch a thread to check if we're active and
# release the lock once the computation is finished. Either way, this
# thread returns the current cached value to avoid blocking.
if not self._is_active and self._is_active_lock.acquire(False):
if self._is_active:
self._is_active_lock.release()
else:
def compute_is_active():
self._is_active = any(self.generate_run_to_tools())
self._is_active_lock.release()
new_thread = threading.Thread(
target=compute_is_active,
name='ProfilePluginIsActiveThread')
new_thread.start()
return self._is_active
|
<SYSTEM_TASK:>
Helper that maps a frontend run name to a profile "run" directory.
<END_TASK>
<USER_TASK:>
Description:
def _run_dir(self, run):
"""Helper that maps a frontend run name to a profile "run" directory.
The frontend run name consists of the TensorBoard run name (aka the relative
path from the logdir root to the directory containing the data) path-joined
to the Profile plugin's "run" concept (which is a subdirectory of the
plugins/profile directory representing an individual run of the tool), with
the special case that TensorBoard run is the logdir root (which is the run
named '.') then only the Profile plugin "run" name is used, for backwards
compatibility.
To convert back to the actual run directory, we apply the following
transformation:
- If the run name doesn't contain '/', prepend './'
- Split on the rightmost instance of '/'
- Assume the left side is a TensorBoard run name and map it to a directory
path using EventMultiplexer.RunPaths(), then map that to the profile
plugin directory via PluginDirectory()
- Assume the right side is a Profile plugin "run" and path-join it to
the preceding path to get the final directory
Args:
run: the frontend run name, as described above, e.g. train/run1.
Returns:
The resolved directory path, e.g. /logdir/train/plugins/profile/run1.
"""
|
run = run.rstrip('/')
if '/' not in run:
run = './' + run
tb_run_name, _, profile_run_name = run.rpartition('/')
tb_run_directory = self.multiplexer.RunPaths().get(tb_run_name)
if tb_run_directory is None:
# Check if logdir is a directory to handle case where it's actually a
# multipart directory spec, which this plugin does not support.
if tb_run_name == '.' and tf.io.gfile.isdir(self.logdir):
tb_run_directory = self.logdir
else:
raise RuntimeError("No matching run directory for run %s" % run)
plugin_directory = plugin_asset_util.PluginDirectory(
tb_run_directory, PLUGIN_NAME)
return os.path.join(plugin_directory, profile_run_name)
|
<SYSTEM_TASK:>
Generator for pairs of "run name" and a list of tools for that run.
<END_TASK>
<USER_TASK:>
Description:
def generate_run_to_tools(self):
"""Generator for pairs of "run name" and a list of tools for that run.
The "run name" here is a "frontend run name" - see _run_dir() for the
definition of a "frontend run name" and how it maps to a directory of
profile data for a specific profile "run". The profile plugin concept of
"run" is different from the normal TensorBoard run; each run in this case
represents a single instance of profile data collection, more similar to a
"step" of data in typical TensorBoard semantics. These runs reside in
subdirectories of the plugins/profile directory within any regular
TensorBoard run directory (defined as a subdirectory of the logdir that
contains at least one tfevents file) or within the logdir root directory
itself (even if it contains no tfevents file and would thus not be
considered a normal TensorBoard run, for backwards compatibility).
Within those "profile run directories", there are files in the directory
that correspond to different profiling tools. The file that contains profile
for a specific tool "x" will have a suffix name TOOLS["x"].
Example:
logs/
plugins/
profile/
run1/
hostA.trace
train/
events.out.tfevents.foo
plugins/
profile/
run1/
hostA.trace
hostB.trace
run2/
hostA.trace
validation/
events.out.tfevents.foo
plugins/
profile/
run1/
hostA.trace
Yields:
A sequence of tuples mapping "frontend run names" to lists of tool names
available for those runs. For the above example, this would be:
("run1", ["trace_viewer"])
("train/run1", ["trace_viewer"])
("train/run2", ["trace_viewer"])
("validation/run1", ["trace_viewer"])
"""
|
self.start_grpc_stub_if_necessary()
plugin_assets = self.multiplexer.PluginAssets(PLUGIN_NAME)
tb_run_names_to_dirs = self.multiplexer.RunPaths()
# Ensure that we also check the root logdir, even if it isn't a recognized
# TensorBoard run (i.e. has no tfevents file directly under it), to remain
# backwards compatible with previously profile plugin behavior. Note that we
# check if logdir is a directory to handle case where it's actually a
# multipart directory spec, which this plugin does not support.
if '.' not in plugin_assets and tf.io.gfile.isdir(self.logdir):
tb_run_names_to_dirs['.'] = self.logdir
plugin_assets['.'] = plugin_asset_util.ListAssets(
self.logdir, PLUGIN_NAME)
for tb_run_name, profile_runs in six.iteritems(plugin_assets):
tb_run_dir = tb_run_names_to_dirs[tb_run_name]
tb_plugin_dir = plugin_asset_util.PluginDirectory(
tb_run_dir, PLUGIN_NAME)
for profile_run in profile_runs:
# Remove trailing slash; some filesystem implementations emit this.
profile_run = profile_run.rstrip('/')
if tb_run_name == '.':
frontend_run = profile_run
else:
frontend_run = '/'.join([tb_run_name, profile_run])
profile_run_dir = os.path.join(tb_plugin_dir, profile_run)
if tf.io.gfile.isdir(profile_run_dir):
yield frontend_run, self._get_active_tools(profile_run_dir)
|
<SYSTEM_TASK:>
Returns available hosts for the run and tool in the log directory.
<END_TASK>
<USER_TASK:>
Description:
def host_impl(self, run, tool):
"""Returns available hosts for the run and tool in the log directory.
In the plugin log directory, each directory contains profile data for a
single run (identified by the directory name), and files in the run
directory contains data for different tools and hosts. The file that
contains profile for a specific tool "x" will have a prefix name TOOLS["x"].
Example:
log/
run1/
plugins/
profile/
host1.trace
host2.trace
run2/
plugins/
profile/
host1.trace
host2.trace
Returns:
A list of host names e.g.
{"host1", "host2", "host3"} for the example.
"""
|
hosts = {}
run_dir = self._run_dir(run)
if not run_dir:
logger.warn("Cannot find asset directory for: %s", run)
return hosts
tool_pattern = '*' + TOOLS[tool]
try:
files = tf.io.gfile.glob(os.path.join(run_dir, tool_pattern))
hosts = [os.path.basename(f).replace(TOOLS[tool], '') for f in files]
except tf.errors.OpError as e:
logger.warn("Cannot read asset directory: %s, OpError %s",
run_dir, e)
return hosts
|
<SYSTEM_TASK:>
Retrieves and processes the tool data for a run and a host.
<END_TASK>
<USER_TASK:>
Description:
def data_impl(self, request):
"""Retrieves and processes the tool data for a run and a host.
Args:
request: XMLHttpRequest
Returns:
A string that can be served to the frontend tool or None if tool,
run or host is invalid.
"""
|
run = request.args.get('run')
tool = request.args.get('tag')
host = request.args.get('host')
run_dir = self._run_dir(run)
# Profile plugin "run" is the last component of run dir.
profile_run = os.path.basename(run_dir)
if tool not in TOOLS:
return None
self.start_grpc_stub_if_necessary()
if tool == 'trace_viewer@' and self.stub is not None:
from tensorflow.contrib.tpu.profiler import tpu_profiler_analysis_pb2
grpc_request = tpu_profiler_analysis_pb2.ProfileSessionDataRequest()
grpc_request.repository_root = run_dir
grpc_request.session_id = profile_run[:-1]
grpc_request.tool_name = 'trace_viewer'
# Remove the trailing dot if present
grpc_request.host_name = host.rstrip('.')
grpc_request.parameters['resolution'] = request.args.get('resolution')
if request.args.get('start_time_ms') is not None:
grpc_request.parameters['start_time_ms'] = request.args.get(
'start_time_ms')
if request.args.get('end_time_ms') is not None:
grpc_request.parameters['end_time_ms'] = request.args.get('end_time_ms')
grpc_response = self.stub.GetSessionToolData(grpc_request)
return grpc_response.output
if tool not in TOOLS:
return None
tool_name = str(host) + TOOLS[tool]
asset_path = os.path.join(run_dir, tool_name)
raw_data = None
try:
with tf.io.gfile.GFile(asset_path, 'rb') as f:
raw_data = f.read()
except tf.errors.NotFoundError:
logger.warn('Asset path %s not found', asset_path)
except tf.errors.OpError as e:
logger.warn("Couldn't read asset path: %s, OpError %s", asset_path, e)
if raw_data is None:
return None
if tool == 'trace_viewer':
return process_raw_trace(raw_data)
if tool in _RAW_DATA_TOOLS:
return raw_data
return None
|
<SYSTEM_TASK:>
Run a temperature simulation.
<END_TASK>
<USER_TASK:>
Description:
def run(logdir, run_name,
initial_temperature, ambient_temperature, heat_coefficient):
"""Run a temperature simulation.
This will simulate an object at temperature `initial_temperature`
sitting at rest in a large room at temperature `ambient_temperature`.
The object has some intrinsic `heat_coefficient`, which indicates
how much thermal conductivity it has: for instance, metals have high
thermal conductivity, while the thermal conductivity of water is low.
Over time, the object's temperature will adjust to match the
temperature of its environment. We'll track the object's temperature,
how far it is from the room's temperature, and how much it changes at
each time step.
Arguments:
logdir: the top-level directory into which to write summary data
run_name: the name of this run; will be created as a subdirectory
under logdir
initial_temperature: float; the object's initial temperature
ambient_temperature: float; the temperature of the enclosing room
heat_coefficient: float; a measure of the object's thermal
conductivity
"""
|
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(0)
with tf.name_scope('temperature'):
# Create a mutable variable to hold the object's temperature, and
# create a scalar summary to track its value over time. The name of
# the summary will appear as "temperature/current" due to the
# name-scope above.
temperature = tf.Variable(tf.constant(initial_temperature),
name='temperature')
summary.op('current', temperature,
display_name='Temperature',
description='The temperature of the object under '
'simulation, in Kelvins.')
# Compute how much the object's temperature differs from that of its
# environment, and track this, too: likewise, as
# "temperature/difference_to_ambient".
ambient_difference = temperature - ambient_temperature
summary.op('difference_to_ambient', ambient_difference,
display_name='Difference to ambient temperature',
description='The difference between the ambient '
'temperature and the temperature of the '
'object under simulation, in Kelvins.')
# Newton suggested that the rate of change of the temperature of an
# object is directly proportional to this `ambient_difference` above,
# where the proportionality constant is what we called the heat
# coefficient. But in real life, not everything is quite so clean, so
# we'll add in some noise. (The value of 50 is arbitrary, chosen to
# make the data look somewhat interesting. :-) )
noise = 50 * tf.random.normal([])
delta = -heat_coefficient * (ambient_difference + noise)
summary.op('delta', delta,
description='The change in temperature from the previous '
'step, in Kelvins.')
# Collect all the scalars that we want to keep track of.
summ = tf.compat.v1.summary.merge_all()
# Now, augment the current temperature by this delta that we computed,
# blocking the assignment on summary collection to avoid race conditions
# and ensure that the summary always reports the pre-update value.
with tf.control_dependencies([summ]):
update_step = temperature.assign_add(delta)
sess = tf.compat.v1.Session()
writer = tf.summary.FileWriter(os.path.join(logdir, run_name))
writer.add_graph(sess.graph)
sess.run(tf.compat.v1.global_variables_initializer())
for step in xrange(STEPS):
# By asking TensorFlow to compute the update step, we force it to
# change the value of the temperature variable. We don't actually
# care about this value, so we discard it; instead, we grab the
# summary data computed along the way.
(s, _) = sess.run([summ, update_step])
writer.add_summary(s, global_step=step)
writer.close()
|
<SYSTEM_TASK:>
Makes Python object appropriate for JSON serialization.
<END_TASK>
<USER_TASK:>
Description:
def Cleanse(obj, encoding='utf-8'):
"""Makes Python object appropriate for JSON serialization.
- Replaces instances of Infinity/-Infinity/NaN with strings.
- Turns byte strings into unicode strings.
- Turns sets into sorted lists.
- Turns tuples into lists.
Args:
obj: Python data structure.
encoding: Charset used to decode byte strings.
Returns:
Unicode JSON data structure.
"""
|
if isinstance(obj, int):
return obj
elif isinstance(obj, float):
if obj == _INFINITY:
return 'Infinity'
elif obj == _NEGATIVE_INFINITY:
return '-Infinity'
elif math.isnan(obj):
return 'NaN'
else:
return obj
elif isinstance(obj, bytes):
return tf.compat.as_text(obj, encoding)
elif isinstance(obj, (list, tuple)):
return [Cleanse(i, encoding) for i in obj]
elif isinstance(obj, set):
return [Cleanse(i, encoding) for i in sorted(obj)]
elif isinstance(obj, dict):
return {Cleanse(k, encoding): Cleanse(v, encoding) for k, v in obj.items()}
else:
return obj
|
<SYSTEM_TASK:>
Create a legacy text summary op.
<END_TASK>
<USER_TASK:>
Description:
def op(name,
data,
display_name=None,
description=None,
collections=None):
"""Create a legacy text summary op.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markdown
in the strings, and will automatically organize 1D and 2D tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2D subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary API, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
data: A string-type Tensor to summarize. The text must be encoded in UTF-8.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of ops.GraphKeys. The collections to which to add
the summary. Defaults to [Graph Keys.SUMMARIES].
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
with tf.control_dependencies([tf.assert_type(data, tf.string)]):
return tf.summary.tensor_summary(name='text_summary',
tensor=data,
collections=collections,
summary_metadata=summary_metadata)
|
<SYSTEM_TASK:>
Create a legacy text summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def pb(name, data, display_name=None, description=None):
"""Create a legacy text summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
data: A Python bytestring (of type bytes), or Unicode string. Or a numpy
data array of those types.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Raises:
ValueError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
try:
tensor = tf.make_tensor_proto(data, dtype=tf.string)
except TypeError as e:
raise ValueError(e)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/text_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary
|
<SYSTEM_TASK:>
Create an event generator for file or directory at given path string.
<END_TASK>
<USER_TASK:>
Description:
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
|
if not path:
raise ValueError('path must be a valid string')
if io_wrapper.IsTensorFlowEventsFile(path):
return event_file_loader.EventFileLoader(path)
else:
return directory_watcher.DirectoryWatcher(
path,
event_file_loader.EventFileLoader,
io_wrapper.IsTensorFlowEventsFile)
|
<SYSTEM_TASK:>
Convert the string file_version in event.proto into a float.
<END_TASK>
<USER_TASK:>
Description:
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
|
tokens = file_version.split('brain.Event:')
try:
return float(tokens[-1])
except ValueError:
## This should never happen according to the definition of file_version
## specified in event.proto.
logger.warn(
('Invalid event.proto file_version. Defaulting to use of '
'out-of-order event.step logic for purging expired events.'))
return -1
|
<SYSTEM_TASK:>
Loads all events added since the last call to `Reload`.
<END_TASK>
<USER_TASK:>
Description:
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Returns:
The `EventAccumulator`.
"""
|
with self._generator_mutex:
for event in self._generator.Load():
self._ProcessEvent(event)
return self
|
<SYSTEM_TASK:>
Return the contents of a given plugin asset.
<END_TASK>
<USER_TASK:>
Description:
def RetrievePluginAsset(self, plugin_name, asset_name):
"""Return the contents of a given plugin asset.
Args:
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
|
return plugin_asset_util.RetrieveAsset(self.path, plugin_name, asset_name)
|
<SYSTEM_TASK:>
Returns the timestamp in seconds of the first event.
<END_TASK>
<USER_TASK:>
Description:
def FirstEventTimestamp(self):
"""Returns the timestamp in seconds of the first event.
If the first event has been loaded (either by this method or by `Reload`,
this returns immediately. Otherwise, it will load in the first event. Note
that this means that calling `Reload` will cause this to block until
`Reload` has finished.
Returns:
The timestamp in seconds of the first event that was loaded.
Raises:
ValueError: If no events have been loaded and there were no events found
on disk.
"""
|
if self._first_event_timestamp is not None:
return self._first_event_timestamp
with self._generator_mutex:
try:
event = next(self._generator.Load())
self._ProcessEvent(event)
return self._first_event_timestamp
except StopIteration:
raise ValueError('No event timestamp could be found')
|
<SYSTEM_TASK:>
Return the graph definition, if there is one.
<END_TASK>
<USER_TASK:>
Description:
def Graph(self):
"""Return the graph definition, if there is one.
If the graph is stored directly, return that. If no graph is stored
directly but a metagraph is stored containing a graph, return that.
Raises:
ValueError: If there is no graph for this run.
Returns:
The `graph_def` proto.
"""
|
graph = graph_pb2.GraphDef()
if self._graph is not None:
graph.ParseFromString(self._graph)
return graph
raise ValueError('There is no graph in this EventAccumulator')
|
<SYSTEM_TASK:>
Return the metagraph definition, if there is one.
<END_TASK>
<USER_TASK:>
Description:
def MetaGraph(self):
"""Return the metagraph definition, if there is one.
Raises:
ValueError: If there is no metagraph for this run.
Returns:
The `meta_graph_def` proto.
"""
|
if self._meta_graph is None:
raise ValueError('There is no metagraph in this EventAccumulator')
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
return meta_graph
|
<SYSTEM_TASK:>
Check and discard expired events using SessionLog.START.
<END_TASK>
<USER_TASK:>
Description:
def _CheckForRestartAndMaybePurge(self, event):
"""Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
"""
|
if event.HasField(
'session_log') and event.session_log.status == event_pb2.SessionLog.START:
self._Purge(event, by_tags=False)
|
<SYSTEM_TASK:>
Processes a proto histogram by adding it to accumulated state.
<END_TASK>
<USER_TASK:>
Description:
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a proto histogram by adding it to accumulated state."""
|
histo = self._ConvertHistogramProtoToTuple(histo)
histo_ev = HistogramEvent(wall_time, step, histo)
self.histograms.AddItem(tag, histo_ev)
self.compressed_histograms.AddItem(tag, histo_ev, self._CompressHistogram)
|
<SYSTEM_TASK:>
Processes an image by adding it to accumulated state.
<END_TASK>
<USER_TASK:>
Description:
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
|
event = ImageEvent(wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height)
self.images.AddItem(tag, event)
|
<SYSTEM_TASK:>
Processes a audio by adding it to accumulated state.
<END_TASK>
<USER_TASK:>
Description:
def _ProcessAudio(self, tag, wall_time, step, audio):
"""Processes a audio by adding it to accumulated state."""
|
event = AudioEvent(wall_time=wall_time,
step=step,
encoded_audio_string=audio.encoded_audio_string,
content_type=audio.content_type,
sample_rate=audio.sample_rate,
length_frames=audio.length_frames)
self.audios.AddItem(tag, event)
|
<SYSTEM_TASK:>
Processes a simple value by adding it to accumulated state.
<END_TASK>
<USER_TASK:>
Description:
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
|
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self.scalars.AddItem(tag, sv)
|
<SYSTEM_TASK:>
Loads all new events from disk as raw serialized proto bytestrings.
<END_TASK>
<USER_TASK:>
Description:
def Load(self):
"""Loads all new events from disk as raw serialized proto bytestrings.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All event proto bytestrings in the file that have not been yielded yet.
"""
|
logger.debug('Loading events from %s', self._file_path)
# GetNext() expects a status argument on TF <= 1.7.
get_next_args = inspect.getargspec(self._reader.GetNext).args # pylint: disable=deprecated-method
# First argument is self
legacy_get_next = (len(get_next_args) > 1)
while True:
try:
if legacy_get_next:
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
else:
self._reader.GetNext()
except (tf.errors.DataLossError, tf.errors.OutOfRangeError) as e:
logger.debug('Cannot read more events: %s', e)
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
yield self._reader.record()
logger.debug('No more events in %s', self._file_path)
|
<SYSTEM_TASK:>
Loads all new events from disk.
<END_TASK>
<USER_TASK:>
Description:
def Load(self):
"""Loads all new events from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All events in the file that have not been yielded yet.
"""
|
for record in super(EventFileLoader, self).Load():
yield event_pb2.Event.FromString(record)
|
<SYSTEM_TASK:>
Parses the session_run_index value from the event proto.
<END_TASK>
<USER_TASK:>
Description:
def _parse_session_run_index(self, event):
"""Parses the session_run_index value from the event proto.
Args:
event: The event with metadata that contains the session_run_index.
Returns:
The int session_run_index value. Or
constants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined.
"""
|
metadata_string = event.log_message.message
try:
metadata = json.loads(metadata_string)
except ValueError as e:
logger.error(
"Could not decode metadata string '%s' for step value: %s",
metadata_string, e)
return constants.SENTINEL_FOR_UNDETERMINED_STEP
try:
return metadata["session_run_index"]
except KeyError:
logger.error(
"The session_run_index is missing from the metadata: %s",
metadata_string)
return constants.SENTINEL_FOR_UNDETERMINED_STEP
|
<SYSTEM_TASK:>
Given a tag and list of runs, serve a list of metadata for images.
<END_TASK>
<USER_TASK:>
Description:
def _serve_image_metadata(self, request):
"""Given a tag and list of runs, serve a list of metadata for images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
|
tag = request.args.get('tag')
run = request.args.get('run')
sample = int(request.args.get('sample', 0))
response = self._image_response_for_run(run, tag, sample)
return http_util.Respond(request, response, 'application/json')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.