text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Add an item to the ReservoirBucket, replacing an old item if necessary.
<END_TASK>
<USER_TASK:>
Description:
def AddItem(self, item, f=lambda x: x):
"""Add an item to the ReservoirBucket, replacing an old item if necessary.
The new item is guaranteed to be added to the bucket, and to be the last
element in the bucket. If the bucket has reached capacity, then an old item
will be replaced. With probability (_max_size/_num_items_seen) a random item
in the bucket will be popped out and the new item will be appended
to the end. With probability (1 - _max_size/_num_items_seen)
the last item in the bucket will be replaced.
Since the O(n) replacements occur with O(1/_num_items_seen) likelihood,
the amortized runtime is O(1).
Args:
item: The item to add to the bucket.
f: A function to transform item before addition, if it will be kept in
the reservoir.
"""
|
with self._mutex:
if len(self.items) < self._max_size or self._max_size == 0:
self.items.append(f(item))
else:
r = self._random.randint(0, self._num_items_seen)
if r < self._max_size:
self.items.pop(r)
self.items.append(f(item))
elif self.always_keep_last:
self.items[-1] = f(item)
self._num_items_seen += 1
|
<SYSTEM_TASK:>
Filter items in a ReservoirBucket, using a filtering function.
<END_TASK>
<USER_TASK:>
Description:
def FilterItems(self, filterFn):
"""Filter items in a ReservoirBucket, using a filtering function.
Filtering items from the reservoir bucket must update the
internal state variable self._num_items_seen, which is used for determining
the rate of replacement in reservoir sampling. Ideally, self._num_items_seen
would contain the exact number of items that have ever seen by the
ReservoirBucket and satisfy filterFn. However, the ReservoirBucket does not
have access to all items seen -- it only has access to the subset of items
that have survived sampling (self.items). Therefore, we estimate
self._num_items_seen by scaling it by the same ratio as the ratio of items
not removed from self.items.
Args:
filterFn: A function that returns True for items to be kept.
Returns:
The number of items removed from the bucket.
"""
|
with self._mutex:
size_before = len(self.items)
self.items = list(filter(filterFn, self.items))
size_diff = size_before - len(self.items)
# Estimate a correction the number of items seen
prop_remaining = len(self.items) / float(
size_before) if size_before > 0 else 0
self._num_items_seen = int(round(self._num_items_seen * prop_remaining))
return size_diff
|
<SYSTEM_TASK:>
Returns the inferred dense dimensions of a list of lists.
<END_TASK>
<USER_TASK:>
Description:
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
|
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
|
<SYSTEM_TASK:>
Returns true if `other` is convertible with this Dimension.
<END_TASK>
<USER_TASK:>
Description:
def is_convertible_with(self, other):
"""Returns true if `other` is convertible with this Dimension.
Two known Dimensions are convertible if they have the same value.
An unknown Dimension is convertible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are convertible.
"""
|
other = as_dimension(other)
return self._value is None or other.value is None or self._value == other.value
|
<SYSTEM_TASK:>
Returns a Dimension that combines the information in `self` and `other`.
<END_TASK>
<USER_TASK:>
Description:
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None)
tf.Dimension(n) .merge_with(tf.Dimension(m)) # raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible (see
is_convertible_with).
"""
|
other = as_dimension(other)
self.assert_is_convertible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
|
<SYSTEM_TASK:>
Returns the rank of this shape, or None if it is unspecified.
<END_TASK>
<USER_TASK:>
Description:
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
|
if self._dims is None:
return None
else:
if self._ndims is None:
self._ndims = len(self._dims)
return self._ndims
|
<SYSTEM_TASK:>
Returns the total number of elements, or none for incomplete shapes.
<END_TASK>
<USER_TASK:>
Description:
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
|
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
|
<SYSTEM_TASK:>
Returns a `TensorShape` combining the information in `self` and `other`.
<END_TASK>
<USER_TASK:>
Description:
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible.
"""
|
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not convertible" % (self, other))
|
<SYSTEM_TASK:>
Returns the concatenation of the dimension in `self` and `other`.
<END_TASK>
<USER_TASK:>
Description:
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
|
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
|
<SYSTEM_TASK:>
Raises an exception if `self` and `other` do not have convertible ranks.
<END_TASK>
<USER_TASK:>
Description:
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have convertible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
|
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError(
"Shapes %s and %s must have the same rank" % (self, other)
)
|
<SYSTEM_TASK:>
Returns a shape based on `self` with the given rank.
<END_TASK>
<USER_TASK:>
Description:
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
|
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
|
<SYSTEM_TASK:>
Returns a shape based on `self` with at least the given rank.
<END_TASK>
<USER_TASK:>
Description:
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
|
if self.ndims is not None and self.ndims < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
|
<SYSTEM_TASK:>
Returns a shape based on `self` with at most the given rank.
<END_TASK>
<USER_TASK:>
Description:
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
|
if self.ndims is not None and self.ndims > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
|
<SYSTEM_TASK:>
Returns True iff `self` is convertible with `other`.
<END_TASK>
<USER_TASK:>
Description:
def is_convertible_with(self, other):
"""Returns True iff `self` is convertible with `other`.
Two possibly-partially-defined shapes are convertible if there
exists a fully-defined shape that both shapes can represent. Thus,
convertibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is convertible with all shapes.
* TensorShape([None, None]) is convertible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not convertible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is convertible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not convertible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is convertible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not convertible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The convertibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is convertible with
TensorShape(None), and TensorShape(None) is convertible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not convertible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is convertible with `other`.
"""
|
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_convertible_with(y_dim):
return False
return True
|
<SYSTEM_TASK:>
Returns the most specific TensorShape convertible with `self` and `other`.
<END_TASK>
<USER_TASK:>
Description:
def most_specific_convertible_shape(self, other):
"""Returns the most specific TensorShape convertible with `self` and `other`.
* TensorShape([None, 1]) is the most specific TensorShape convertible with
both TensorShape([2, 1]) and TensorShape([5, 1]). Note that
TensorShape(None) is also convertible with above mentioned TensorShapes.
* TensorShape([1, 2, 3]) is the most specific TensorShape convertible with
both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more
less specific TensorShapes convertible with above mentioned TensorShapes,
e.g. TensorShape([1, 2, None]), TensorShape(None).
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` which is the most specific convertible shape of `self`
and `other`.
"""
|
other = as_shape(other)
if self._dims is None or other.dims is None or self.ndims != other.ndims:
return unknown_shape()
dims = [(Dimension(None))] * self.ndims
for i, (d1, d2) in enumerate(zip(self._dims, other.dims)):
if d1 is not None and d2 is not None and d1 == d2:
dims[i] = d1
return TensorShape(dims)
|
<SYSTEM_TASK:>
Returns True iff `self` is fully defined in every dimension.
<END_TASK>
<USER_TASK:>
Description:
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
|
return self._dims is not None and all(
dim.value is not None for dim in self._dims
)
|
<SYSTEM_TASK:>
Returns a list of integers or `None` for each dimension.
<END_TASK>
<USER_TASK:>
Description:
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
|
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
|
<SYSTEM_TASK:>
Converts a PredictResponse to ClassificationResponse or RegressionResponse.
<END_TASK>
<USER_TASK:>
Description:
def convert_predict_response(pred, serving_bundle):
"""Converts a PredictResponse to ClassificationResponse or RegressionResponse.
Args:
pred: PredictResponse to convert.
serving_bundle: A `ServingBundle` object that contains the information about
the serving request that the response was generated by.
Returns:
A ClassificationResponse or RegressionResponse.
"""
|
output = pred.outputs[serving_bundle.predict_output_tensor]
raw_output = output.float_val
if serving_bundle.model_type == 'classification':
values = []
for example_index in range(output.tensor_shape.dim[0].size):
start = example_index * output.tensor_shape.dim[1].size
values.append(raw_output[start:start + output.tensor_shape.dim[1].size])
else:
values = raw_output
return convert_prediction_values(values, serving_bundle, pred.model_spec)
|
<SYSTEM_TASK:>
Converts tensor values into ClassificationResponse or RegressionResponse.
<END_TASK>
<USER_TASK:>
Description:
def convert_prediction_values(values, serving_bundle, model_spec=None):
"""Converts tensor values into ClassificationResponse or RegressionResponse.
Args:
values: For classification, a 2D list of numbers. The first dimension is for
each example being predicted. The second dimension are the probabilities
for each class ID in the prediction. For regression, a 1D list of numbers,
with a regression score for each example being predicted.
serving_bundle: A `ServingBundle` object that contains the information about
the serving request that the response was generated by.
model_spec: Optional model spec to put into the response.
Returns:
A ClassificationResponse or RegressionResponse.
"""
|
if serving_bundle.model_type == 'classification':
response = classification_pb2.ClassificationResponse()
for example_index in range(len(values)):
classification = response.result.classifications.add()
for class_index in range(len(values[example_index])):
class_score = classification.classes.add()
class_score.score = values[example_index][class_index]
class_score.label = str(class_index)
else:
response = regression_pb2.RegressionResponse()
for example_index in range(len(values)):
regression = response.result.regressions.add()
regression.value = values[example_index]
if model_spec:
response.model_spec.CopyFrom(model_spec)
return response
|
<SYSTEM_TASK:>
Updates input_to_in_layer, model_name_to_output, and prev_node_name
<END_TASK>
<USER_TASK:>
Description:
def _update_dicts(name_scope,
model_layer,
input_to_in_layer,
model_name_to_output,
prev_node_name):
"""Updates input_to_in_layer, model_name_to_output, and prev_node_name
based on the model_layer.
Args:
name_scope: a string representing a scope name, similar to that of tf.name_scope.
model_layer: a dict representing a Keras model configuration.
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
Returns:
A tuple of (input_to_in_layer, model_name_to_output, prev_node_name).
input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer.
model_name_to_output: a dict mapping Keras Model name to output layer of the model.
prev_node_name: a string representing a previous, in sequential model layout,
node name.
"""
|
layer_config = model_layer.get('config')
if not layer_config.get('layers'):
raise ValueError('layer is not a model.')
node_name = _scoped_name(name_scope, layer_config.get('name'))
input_layers = layer_config.get('input_layers')
output_layers = layer_config.get('output_layers')
inbound_nodes = model_layer.get('inbound_nodes')
is_functional_model = bool(input_layers and output_layers)
# In case of [1] and the parent model is functional, current layer
# will have the 'inbound_nodes' property.
is_parent_functional_model = bool(inbound_nodes)
if is_parent_functional_model and is_functional_model:
for (input_layer, inbound_node) in zip(input_layers, inbound_nodes):
input_layer_name = _scoped_name(node_name, input_layer)
inbound_node_name = _scoped_name(name_scope, inbound_node[0])
input_to_in_layer[input_layer_name] = inbound_node_name
elif is_parent_functional_model and not is_functional_model:
# Sequential model can take only one input. Make sure inbound to the
# model is linked to the first layer in the Sequential model.
prev_node_name = _scoped_name(name_scope, inbound_nodes[0][0][0])
elif not is_parent_functional_model and prev_node_name and is_functional_model:
assert len(input_layers) == 1, (
'Cannot have multi-input Functional model when parent model '
'is not Functional. Number of input layers: %d' % len(input_layer))
input_layer = input_layers[0]
input_layer_name = _scoped_name(node_name, input_layer)
input_to_in_layer[input_layer_name] = prev_node_name
if is_functional_model and output_layers:
layers = _norm_to_list_of_layers(output_layers)
layer_names = [_scoped_name(node_name, layer[0]) for layer in layers]
model_name_to_output[node_name] = layer_names
else:
last_layer = layer_config.get('layers')[-1]
last_layer_name = last_layer.get('config').get('name')
output_node = _scoped_name(node_name, last_layer_name)
model_name_to_output[node_name] = [output_node]
return (input_to_in_layer, model_name_to_output, prev_node_name)
|
<SYSTEM_TASK:>
Returns a GraphDef representation of the Keras model in a dict form.
<END_TASK>
<USER_TASK:>
Description:
def keras_model_to_graph_def(keras_layer):
"""Returns a GraphDef representation of the Keras model in a dict form.
Note that it only supports models that implemented to_json().
Args:
keras_layer: A dict from Keras model.to_json().
Returns:
A GraphDef representation of the layers in the model.
"""
|
input_to_layer = {}
model_name_to_output = {}
g = GraphDef()
# Sequential model layers do not have a field "inbound_nodes" but
# instead are defined implicitly via order of layers.
prev_node_name = None
for (name_scope, layer) in _walk_layers(keras_layer):
if _is_model(layer):
(input_to_layer, model_name_to_output, prev_node_name) = _update_dicts(
name_scope, layer, input_to_layer, model_name_to_output, prev_node_name)
continue
layer_config = layer.get('config')
node_name = _scoped_name(name_scope, layer_config.get('name'))
node_def = g.node.add()
node_def.name = node_name
if layer.get('class_name') is not None:
keras_cls_name = layer.get('class_name').encode('ascii')
node_def.attr['keras_class'].s = keras_cls_name
if layer_config.get('dtype') is not None:
tf_dtype = dtypes.as_dtype(layer_config.get('dtype'))
node_def.attr['dtype'].type = tf_dtype.as_datatype_enum
if layer.get('inbound_nodes') is not None:
for maybe_inbound_node in layer.get('inbound_nodes'):
inbound_nodes = _norm_to_list_of_layers(maybe_inbound_node)
for [name, size, index, _] in inbound_nodes:
inbound_name = _scoped_name(name_scope, name)
# An input to a layer can be output from a model. In that case, the name
# of inbound_nodes to a layer is a name of a model. Remap the name of the
# model to output layer of the model. Also, since there can be multiple
# outputs in a model, make sure we pick the right output_layer from the model.
inbound_node_names = model_name_to_output.get(
inbound_name, [inbound_name])
node_def.input.append(inbound_node_names[index])
elif prev_node_name is not None:
node_def.input.append(prev_node_name)
if node_name in input_to_layer:
node_def.input.append(input_to_layer.get(node_name))
prev_node_name = node_def.name
return g
|
<SYSTEM_TASK:>
Returns True if the hparams plugin is active.
<END_TASK>
<USER_TASK:>
Description:
def is_active(self):
"""Returns True if the hparams plugin is active.
The hparams plugin is active iff there is a tag with
the hparams plugin name as its plugin name and the scalars plugin is
registered and active.
"""
|
if not self._context.multiplexer:
return False
scalars_plugin = self._get_scalars_plugin()
if not scalars_plugin or not scalars_plugin.is_active():
return False
return bool(self._context.multiplexer.PluginRunToTagToContent(
metadata.PLUGIN_NAME))
|
<SYSTEM_TASK:>
Convert Markdown to HTML that's safe to splice into the DOM.
<END_TASK>
<USER_TASK:>
Description:
def markdown_to_safe_html(markdown_string):
"""Convert Markdown to HTML that's safe to splice into the DOM.
Arguments:
markdown_string: A Unicode string or UTF-8--encoded bytestring
containing Markdown source. Markdown tables are supported.
Returns:
A string containing safe HTML.
"""
|
warning = ''
# Convert to utf-8 whenever we have a binary input.
if isinstance(markdown_string, six.binary_type):
markdown_string_decoded = markdown_string.decode('utf-8')
# Remove null bytes and warn if there were any, since it probably means
# we were given a bad encoding.
markdown_string = markdown_string_decoded.replace(u'\x00', u'')
num_null_bytes = len(markdown_string_decoded) - len(markdown_string)
if num_null_bytes:
warning = ('<!-- WARNING: discarded %d null bytes in markdown string '
'after UTF-8 decoding -->\n') % num_null_bytes
string_html = markdown.markdown(
markdown_string, extensions=['markdown.extensions.tables'])
string_sanitized = bleach.clean(
string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES)
return warning + string_sanitized
|
<SYSTEM_TASK:>
Converts the given `type_value` to a `DType`.
<END_TASK>
<USER_TASK:>
Description:
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType` object. This may
currently be a `tf.DType` object, a [`DataType`
enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
|
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
try:
return _PYTHON_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
# The numpy dtype for strings is variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
if isinstance(type_value, (type, np.dtype)):
for key, val in _NP_TO_TF:
try:
if key == type_value:
return val
except TypeError as e:
raise TypeError(
"Cannot convert {} to a dtype. {}".format(type_value, e)
)
raise TypeError("Cannot convert value %r to a TensorFlow DType." % type_value)
|
<SYSTEM_TASK:>
Returns the dtype correspond to this dtype's real part.
<END_TASK>
<USER_TASK:>
Description:
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
|
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
|
<SYSTEM_TASK:>
Returns the minimum representable value in this data type.
<END_TASK>
<USER_TASK:>
Description:
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
|
if self.is_quantized or self.base_dtype in (
bool,
string,
complex64,
complex128,
):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
if self.base_dtype == bfloat16:
return _np_bfloat16(float.fromhex("-0x1.FEp127"))
raise TypeError("Cannot find minimum value of %s." % self)
|
<SYSTEM_TASK:>
Returns True if the `other` DType will be converted to this DType.
<END_TASK>
<USER_TASK:>
Description:
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
|
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum,
other.base_dtype.as_datatype_enum,
)
|
<SYSTEM_TASK:>
Obtains a mapping between routes and handlers.
<END_TASK>
<USER_TASK:>
Description:
def get_plugin_apps(self):
"""Obtains a mapping between routes and handlers.
This function also starts a debugger data server on separate thread if the
plugin has not started one yet.
Returns:
A mapping between routes and handlers (functions that respond to
requests).
"""
|
return {
_ACK_ROUTE: self._serve_ack,
_COMM_ROUTE: self._serve_comm,
_DEBUGGER_GRPC_HOST_PORT_ROUTE: self._serve_debugger_grpc_host_port,
_DEBUGGER_GRAPH_ROUTE: self._serve_debugger_graph,
_GATED_GRPC_ROUTE: self._serve_gated_grpc,
_TENSOR_DATA_ROUTE: self._serve_tensor_data,
_SOURCE_CODE_ROUTE: self._serve_source_code,
}
|
<SYSTEM_TASK:>
The audio plugin is active iff any run has at least one relevant tag.
<END_TASK>
<USER_TASK:>
Description:
def is_active(self):
"""The audio plugin is active iff any run has at least one relevant tag."""
|
if not self._multiplexer:
return False
return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))
|
<SYSTEM_TASK:>
Return information about the tags in each run.
<END_TASK>
<USER_TASK:>
Description:
def _index_impl(self):
"""Return information about the tags in each run.
Result is a dictionary of the form
{
"runName1": {
"tagName1": {
"displayName": "The first tag",
"description": "<p>Long ago there was just one tag...</p>",
"samples": 3
},
"tagName2": ...,
...
},
"runName2": ...,
...
}
For each tag, `samples` is the greatest number of audio clips that
appear at any particular step. (It's not related to "samples of a
waveform.") For example, if for tag `minibatch_input` there are
five audio clips at step 0 and ten audio clips at step 1, then the
dictionary for `"minibatch_input"` will contain `"samples": 10`.
"""
|
runs = self._multiplexer.Runs()
result = {run: {} for run in runs}
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(mapping):
for tag in tag_to_content:
summary_metadata = self._multiplexer.SummaryMetadata(run, tag)
tensor_events = self._multiplexer.Tensors(run, tag)
samples = max([self._number_of_samples(event.tensor_proto)
for event in tensor_events] + [0])
result[run][tag] = {'displayName': summary_metadata.display_name,
'description': plugin_util.markdown_to_safe_html(
summary_metadata.summary_description),
'samples': samples}
return result
|
<SYSTEM_TASK:>
Given a tag and list of runs, serve a list of metadata for audio.
<END_TASK>
<USER_TASK:>
Description:
def _serve_audio_metadata(self, request):
"""Given a tag and list of runs, serve a list of metadata for audio.
Note that the actual audio data are not sent; instead, we respond
with URLs to the audio. The frontend should treat these URLs as
opaque and should not try to parse information about them or
generate them itself, as the format may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
|
tag = request.args.get('tag')
run = request.args.get('run')
sample = int(request.args.get('sample', 0))
events = self._multiplexer.Tensors(run, tag)
response = self._audio_response_for_run(events, run, tag, sample)
return http_util.Respond(request, response, 'application/json')
|
<SYSTEM_TASK:>
Builds a JSON-serializable object with information about audio.
<END_TASK>
<USER_TASK:>
Description:
def _audio_response_for_run(self, tensor_events, run, tag, sample):
"""Builds a JSON-serializable object with information about audio.
Args:
tensor_events: A list of image event_accumulator.TensorEvent objects.
run: The name of the run.
tag: The name of the tag the audio entries all belong to.
sample: The zero-indexed sample of the audio sample for which to
retrieve information. For instance, setting `sample` to `2` will
fetch information about only the third audio clip of each batch,
and steps with fewer than three audio clips will be omitted from
the results.
Returns:
A list of dictionaries containing the wall time, step, URL, width, and
height for each audio entry.
"""
|
response = []
index = 0
filtered_events = self._filter_by_sample(tensor_events, sample)
content_type = self._get_mime_type(run, tag)
for (index, tensor_event) in enumerate(filtered_events):
data = tensor_util.make_ndarray(tensor_event.tensor_proto)
label = data[sample, 1]
response.append({
'wall_time': tensor_event.wall_time,
'step': tensor_event.step,
'label': plugin_util.markdown_to_safe_html(label),
'contentType': content_type,
'query': self._query_for_individual_audio(run, tag, sample, index)
})
return response
|
<SYSTEM_TASK:>
Builds a URL for accessing the specified audio.
<END_TASK>
<USER_TASK:>
Description:
def _query_for_individual_audio(self, run, tag, sample, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_audio_metadata. Note that the URL is
*not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio entries come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio entry. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th sampled audio
in the given run with the given tag.
"""
|
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'sample': sample,
'index': index,
})
return query_string
|
<SYSTEM_TASK:>
Serve encoded audio data.
<END_TASK>
<USER_TASK:>
Description:
def _serve_individual_audio(self, request):
"""Serve encoded audio data."""
|
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
sample = int(request.args.get('sample', 0))
events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)
data = tensor_util.make_ndarray(events[index].tensor_proto)[sample, 0]
mime_type = self._get_mime_type(run, tag)
return http_util.Respond(request, data, mime_type)
|
<SYSTEM_TASK:>
Create a legacy image summary op for use in a TensorFlow graph.
<END_TASK>
<USER_TASK:>
Description:
def op(name,
images,
max_outputs=3,
display_name=None,
description=None,
collections=None):
"""Create a legacy image summary op for use in a TensorFlow graph.
Arguments:
name: A unique name for the generated summary node.
images: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
where `k` is the number of images, `h` and `w` are the height and
width of the images, and `c` is the number of channels, which
should be 1, 3, or 4. Any of the dimensions may be statically
unknown (i.e., `None`).
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many images will be emitted at each step. When more than
`max_outputs` many images are provided, the first `max_outputs` many
images will be used and the rest silently discarded.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name), \
tf.control_dependencies([tf.assert_rank(images, 4),
tf.assert_type(images, tf.uint8),
tf.assert_non_negative(max_outputs)]):
limited_images = images[:max_outputs]
encoded_images = tf.map_fn(tf.image.encode_png, limited_images,
dtype=tf.string,
name='encode_each_image')
image_shape = tf.shape(input=images)
dimensions = tf.stack([tf.as_string(image_shape[2], name='width'),
tf.as_string(image_shape[1], name='height')],
name='dimensions')
tensor = tf.concat([dimensions, encoded_images], axis=0)
return tf.summary.tensor_summary(name='image_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata)
|
<SYSTEM_TASK:>
Create a legacy image summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def pb(name, images, max_outputs=3, display_name=None, description=None):
"""Create a legacy image summary protobuf.
This behaves as if you were to create an `op` with the same arguments
(wrapped with constant tensors where appropriate) and then execute
that summary op in a TensorFlow session.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
images: An `np.array` representing pixel data with shape
`[k, h, w, c]`, where `k` is the number of images, `w` and `h` are
the width and height of the images, and `c` is the number of
channels, which should be 1, 3, or 4.
max_outputs: Optional `int`. At most this many images will be
emitted. If more than this many images are provided, the first
`max_outputs` many images will be used and the rest silently
discarded.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Returns:
A `tf.Summary` protobuf object.
"""
|
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
images = np.array(images).astype(np.uint8)
if images.ndim != 4:
raise ValueError('Shape %r must have rank 4' % (images.shape, ))
limited_images = images[:max_outputs]
encoded_images = [encoder.encode_png(image) for image in limited_images]
(width, height) = (images.shape[2], images.shape[1])
content = [str(width), str(height)] + encoded_images
tensor = tf.make_tensor_proto(content, dtype=tf.string)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/image_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary
|
<SYSTEM_TASK:>
Apply user per-summary size guidance overrides.
<END_TASK>
<USER_TASK:>
Description:
def tensor_size_guidance_from_flags(flags):
"""Apply user per-summary size guidance overrides."""
|
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
if not flags or not flags.samples_per_plugin:
return tensor_size_guidance
for token in flags.samples_per_plugin.split(','):
k, v = token.strip().split('=')
tensor_size_guidance[k] = int(v)
return tensor_size_guidance
|
<SYSTEM_TASK:>
Constructs the TensorBoard application.
<END_TASK>
<USER_TASK:>
Description:
def TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval,
path_prefix='', reload_task='auto'):
"""Constructs the TensorBoard application.
Args:
logdir: the logdir spec that describes where data will be loaded.
may be a directory, or comma,separated list of directories, or colons
can be used to provide named directories
plugins: A list of base_plugin.TBPlugin subclass instances.
multiplexer: The EventMultiplexer with TensorBoard data to serve
reload_interval: How often (in seconds) to reload the Multiplexer.
Zero means reload just once at startup; negative means never load.
path_prefix: A prefix of the path when app isn't served from root.
reload_task: Indicates the type of background task to reload with.
Returns:
A WSGI application that implements the TensorBoard backend.
Raises:
ValueError: If something is wrong with the plugin configuration.
:type plugins: list[base_plugin.TBPlugin]
:rtype: TensorBoardWSGI
"""
|
path_to_run = parse_event_files_spec(logdir)
if reload_interval >= 0:
# We either reload the multiplexer once when TensorBoard starts up, or we
# continuously reload the multiplexer.
start_reloading_multiplexer(multiplexer, path_to_run, reload_interval,
reload_task)
return TensorBoardWSGI(plugins, path_prefix)
|
<SYSTEM_TASK:>
Parses `logdir` into a map from paths to run group names.
<END_TASK>
<USER_TASK:>
Description:
def parse_event_files_spec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
|
files = {}
if logdir is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon. If the spec looks like
# [a-zA-z]:\foo then we assume its a Windows path and not a single letter
# group
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/' and not os.path.splitdrive(specification)[0]):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(os.path.expanduser(path))
files[path] = run_name
return files
|
<SYSTEM_TASK:>
Starts automatically reloading the given multiplexer.
<END_TASK>
<USER_TASK:>
Description:
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval,
reload_task):
"""Starts automatically reloading the given multiplexer.
If `load_interval` is positive, the thread will reload the multiplexer
by calling `ReloadMultiplexer` every `load_interval` seconds, starting
immediately. Otherwise, reloads the multiplexer once and never again.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: An integer greater than or equal to 0. If positive, how many
seconds to wait after one load before starting the next load. Otherwise,
reloads the multiplexer once and never again (no continuous reloading).
reload_task: Indicates the type of background task to reload with.
Raises:
ValueError: If `load_interval` is negative.
"""
|
if load_interval < 0:
raise ValueError('load_interval is negative: %d' % load_interval)
def _reload():
while True:
start = time.time()
logger.info('TensorBoard reload process beginning')
for path, name in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logger.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logger.info('TensorBoard done reloading. Load took %0.3f secs', duration)
if load_interval == 0:
# Only load the multiplexer once. Do not continuously reload.
break
time.sleep(load_interval)
if reload_task == 'process':
logger.info('Launching reload in a child process')
import multiprocessing
process = multiprocessing.Process(target=_reload, name='Reloader')
# Best-effort cleanup; on exit, the main TB parent process will attempt to
# kill all its daemonic children.
process.daemon = True
process.start()
elif reload_task in ('thread', 'auto'):
logger.info('Launching reload in a daemon thread')
thread = threading.Thread(target=_reload, name='Reloader')
# Make this a daemon thread, which won't block TB from exiting.
thread.daemon = True
thread.start()
elif reload_task == 'blocking':
if load_interval != 0:
raise ValueError('blocking reload only allowed with load_interval=0')
_reload()
else:
raise ValueError('unrecognized reload_task: %s' % reload_task)
|
<SYSTEM_TASK:>
Returns TBContext fields relating to SQL database.
<END_TASK>
<USER_TASK:>
Description:
def get_database_info(db_uri):
"""Returns TBContext fields relating to SQL database.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A tuple with the db_module and db_connection_provider TBContext fields. If
db_uri was empty, then (None, None) is returned.
Raises:
ValueError: If db_uri scheme is not supported.
"""
|
if not db_uri:
return None, None
scheme = urlparse.urlparse(db_uri).scheme
if scheme == 'sqlite':
return sqlite3, create_sqlite_connection_provider(db_uri)
else:
raise ValueError('Only sqlite DB URIs are supported now: ' + db_uri)
|
<SYSTEM_TASK:>
Returns function that returns SQLite Connection objects.
<END_TASK>
<USER_TASK:>
Description:
def create_sqlite_connection_provider(db_uri):
"""Returns function that returns SQLite Connection objects.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A function that returns a new PEP-249 DB Connection, which must be closed,
each time it is called.
Raises:
ValueError: If db_uri is not a valid sqlite file URI.
"""
|
uri = urlparse.urlparse(db_uri)
if uri.scheme != 'sqlite':
raise ValueError('Scheme is not sqlite: ' + db_uri)
if uri.netloc:
raise ValueError('Can not connect to SQLite over network: ' + db_uri)
if uri.path == ':memory:':
raise ValueError('Memory mode SQLite not supported: ' + db_uri)
path = os.path.expanduser(uri.path)
params = _get_connect_params(uri.query)
# TODO(@jart): Add thread-local pooling.
return lambda: sqlite3.connect(path, **params)
|
<SYSTEM_TASK:>
Serves an object mapping plugin name to whether it is enabled.
<END_TASK>
<USER_TASK:>
Description:
def _serve_plugins_listing(self, request):
"""Serves an object mapping plugin name to whether it is enabled.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
|
response = {}
for plugin in self._plugins:
start = time.time()
response[plugin.plugin_name] = plugin.is_active()
elapsed = time.time() - start
logger.info(
'Plugin listing: is_active() for %s took %0.3f seconds',
plugin.plugin_name, elapsed)
return http_util.Respond(request, response, 'application/json')
|
<SYSTEM_TASK:>
Parse a string as time indices.
<END_TASK>
<USER_TASK:>
Description:
def parse_time_indices(s):
"""Parse a string as time indices.
Args:
s: A valid slicing string for time indices. E.g., '-1', '[:]', ':', '2:10'
Returns:
A slice object.
Raises:
ValueError: If `s` does not represent valid time indices.
"""
|
if not s.startswith('['):
s = '[' + s + ']'
parsed = command_parser._parse_slices(s)
if len(parsed) != 1:
raise ValueError(
'Invalid number of slicing objects in time indices (%d)' % len(parsed))
else:
return parsed[0]
|
<SYSTEM_TASK:>
Process a buffer for human-readable display.
<END_TASK>
<USER_TASK:>
Description:
def process_buffers_for_display(s, limit=40):
"""Process a buffer for human-readable display.
This function performs the following operation on each of the buffers in `s`.
1. Truncate input buffer if the length of the buffer is greater than
`limit`, to prevent large strings from overloading the frontend.
2. Apply `binascii.b2a_qp` on the truncated buffer to make the buffer
printable and convertible to JSON.
3. If truncation happened (in step 1), append a string at the end
describing the original length and the truncation.
Args:
s: The buffer to be processed, either a single buffer or a nested array of
them.
limit: Length limit for each buffer, beyond which truncation will occur.
Return:
A single processed buffer or a nested array of processed buffers.
"""
|
if isinstance(s, (list, tuple)):
return [process_buffers_for_display(elem, limit=limit) for elem in s]
else:
length = len(s)
if length > limit:
return (binascii.b2a_qp(s[:limit]) +
b' (length-%d truncated at %d bytes)' % (length, limit))
else:
return binascii.b2a_qp(s)
|
<SYSTEM_TASK:>
View a slice or the entirety of an ndarray.
<END_TASK>
<USER_TASK:>
Description:
def array_view(array, slicing=None, mapping=None):
"""View a slice or the entirety of an ndarray.
Args:
array: The input array, as an numpy.ndarray.
slicing: Optional slicing string, e.g., "[:, 1:3, :]".
mapping: Optional mapping string. Supported mappings:
`None` or case-insensitive `'None'`: Unmapped nested list.
`'image/png'`: Image encoding of a 2D sliced array or 3D sliced array
with 3 as the last dimension. If the sliced array is not 2D or 3D with
3 as the last dimension, a `ValueError` will be thrown.
`health-pill`: A succinct summary of the numeric values of a tensor.
See documentation in [`health_pill_calc.py`] for more details.
Returns:
1. dtype as a `str`.
2. shape of the sliced array, as a tuple of `int`s.
3. the potentially sliced values, as a nested `list`.
"""
|
dtype = translate_dtype(array.dtype)
sliced_array = (array[command_parser._parse_slices(slicing)] if slicing
else array)
if np.isscalar(sliced_array) and str(dtype) == 'string':
# When a string Tensor (for which dtype is 'object') is sliced down to only
# one element, it becomes a string, instead of an numpy array.
# We preserve the dimensionality of original array in the returned shape
# and slice.
ndims = len(array.shape)
slice_shape = []
for _ in range(ndims):
sliced_array = [sliced_array]
slice_shape.append(1)
return dtype, tuple(slice_shape), sliced_array
else:
shape = sliced_array.shape
if mapping == "image/png":
if len(sliced_array.shape) == 2:
return dtype, shape, array_to_base64_png(sliced_array)
elif len(sliced_array.shape) == 3:
raise NotImplementedError(
"image/png mapping for 3D array has not been implemented")
else:
raise ValueError("Invalid rank for image/png mapping: %d" %
len(sliced_array.shape))
elif mapping == 'health-pill':
health_pill = health_pill_calc.calc_health_pill(array)
return dtype, shape, health_pill
elif mapping is None or mapping == '' or mapping.lower() == 'none':
return dtype, shape, sliced_array.tolist()
else:
raise ValueError("Invalid mapping: %s" % mapping)
|
<SYSTEM_TASK:>
Convert an array into base64-enoded PNG image.
<END_TASK>
<USER_TASK:>
Description:
def array_to_base64_png(array):
"""Convert an array into base64-enoded PNG image.
Args:
array: A 2D np.ndarray or nested list of items.
Returns:
A base64-encoded string the image. The image is grayscale if the array is
2D. The image is RGB color if the image is 3D with lsat dimension equal to
3.
Raises:
ValueError: If the input `array` is not rank-2, or if the rank-2 `array` is
empty.
"""
|
# TODO(cais): Deal with 3D case.
# TODO(cais): If there are None values in here, replace them with all NaNs.
array = np.array(array, dtype=np.float32)
if len(array.shape) != 2:
raise ValueError(
"Expected rank-2 array; received rank-%d array." % len(array.shape))
if not np.size(array):
raise ValueError(
"Cannot encode an empty array (size: %s) as image." % (array.shape,))
is_infinity = np.isinf(array)
is_positive = array > 0.0
is_positive_infinity = np.logical_and(is_infinity, is_positive)
is_negative_infinity = np.logical_and(is_infinity,
np.logical_not(is_positive))
is_nan = np.isnan(array)
finite_indices = np.where(np.logical_and(np.logical_not(is_infinity),
np.logical_not(is_nan)))
if np.size(finite_indices):
# Finite subset is not empty.
minval = np.min(array[finite_indices])
maxval = np.max(array[finite_indices])
scaled = np.array((array - minval) / (maxval - minval) * 255,
dtype=np.uint8)
rgb = np.repeat(np.expand_dims(scaled, -1), IMAGE_COLOR_CHANNELS, axis=-1)
else:
rgb = np.zeros(array.shape + (IMAGE_COLOR_CHANNELS,), dtype=np.uint8)
# Color-code pixels that correspond to infinities and nans.
rgb[is_positive_infinity] = POSITIVE_INFINITY_RGB
rgb[is_negative_infinity] = NEGATIVE_INFINITY_RGB
rgb[is_nan] = NAN_RGB
image_encoded = base64.b64encode(encoder.encode_png(rgb))
return image_encoded
|
<SYSTEM_TASK:>
Safely merge values from `src_proto_list` into `dst_proto_list`.
<END_TASK>
<USER_TASK:>
Description:
def _safe_copy_proto_list_values(dst_proto_list, src_proto_list, get_key):
"""Safely merge values from `src_proto_list` into `dst_proto_list`.
Each element in `dst_proto_list` must be mapped by `get_key` to a key
value that is unique within that list; likewise for `src_proto_list`.
If an element of `src_proto_list` has the same key as an existing
element in `dst_proto_list`, then the elements must also be equal.
Args:
dst_proto_list: A `RepeatedCompositeContainer` or
`RepeatedScalarContainer` into which values should be copied.
src_proto_list: A container holding the same kind of values as in
`dst_proto_list` from which values should be copied.
get_key: A function that takes an element of `dst_proto_list` or
`src_proto_list` and returns a key, such that if two elements have
the same key then it is required that they be deep-equal. For
instance, if `dst_proto_list` is a list of nodes, then `get_key`
might be `lambda node: node.name` to indicate that if two nodes
have the same name then they must be the same node. All keys must
be hashable.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
_SameKeyDiffContentError: An item with the same key has different contents.
"""
|
def _assert_proto_container_unique_keys(proto_list, get_key):
"""Asserts proto_list to only contains unique keys.
Args:
proto_list: A `RepeatedCompositeContainer` or `RepeatedScalarContainer`.
get_key: A function that takes an element of `proto_list` and returns a
hashable key.
Raises:
_ProtoListDuplicateKeyError: A proto_list contains items with duplicate
keys.
"""
keys = set()
for item in proto_list:
key = get_key(item)
if key in keys:
raise _ProtoListDuplicateKeyError(key)
keys.add(key)
_assert_proto_container_unique_keys(dst_proto_list, get_key)
_assert_proto_container_unique_keys(src_proto_list, get_key)
key_to_proto = {}
for proto in dst_proto_list:
key = get_key(proto)
key_to_proto[key] = proto
for proto in src_proto_list:
key = get_key(proto)
if key in key_to_proto:
if proto != key_to_proto.get(key):
raise _SameKeyDiffContentError(key)
else:
dst_proto_list.add().CopyFrom(proto)
|
<SYSTEM_TASK:>
Combines two GraphDefs by adding nodes from from_proto into to_proto.
<END_TASK>
<USER_TASK:>
Description:
def combine_graph_defs(to_proto, from_proto):
"""Combines two GraphDefs by adding nodes from from_proto into to_proto.
All GraphDefs are expected to be of TensorBoard's.
It assumes node names are unique across GraphDefs if contents differ. The
names can be the same if the NodeDef content are exactly the same.
Args:
to_proto: A destination TensorBoard GraphDef.
from_proto: A TensorBoard GraphDef to copy contents from.
Returns:
to_proto
Raises:
ValueError in case any assumption about GraphDef is violated: A
GraphDef should have unique node, function, and gradient function
names. Also, when merging GraphDefs, they should have not have nodes,
functions, or gradient function mappings that share the name but details
do not match.
"""
|
if from_proto.version != to_proto.version:
raise ValueError('Cannot combine GraphDefs of different versions.')
try:
_safe_copy_proto_list_values(
to_proto.node,
from_proto.node,
lambda n: n.name)
except _ProtoListDuplicateKeyError as exc:
raise ValueError('A GraphDef contains non-unique node names: %s' % exc)
except _SameKeyDiffContentError as exc:
raise ValueError(
('Cannot combine GraphDefs because nodes share a name '
'but contents are different: %s') % exc)
try:
_safe_copy_proto_list_values(
to_proto.library.function,
from_proto.library.function,
lambda n: n.signature.name)
except _ProtoListDuplicateKeyError as exc:
raise ValueError('A GraphDef contains non-unique function names: %s' % exc)
except _SameKeyDiffContentError as exc:
raise ValueError(
('Cannot combine GraphDefs because functions share a name '
'but are different: %s') % exc)
try:
_safe_copy_proto_list_values(
to_proto.library.gradient,
from_proto.library.gradient,
lambda g: g.gradient_func)
except _ProtoListDuplicateKeyError as exc:
raise ValueError(
'A GraphDef contains non-unique gradient function names: %s' % exc)
except _SameKeyDiffContentError as exc:
raise ValueError(
('Cannot combine GraphDefs because gradients share a gradient_func name '
'but map to different functions: %s') % exc)
return to_proto
|
<SYSTEM_TASK:>
Write a scalar summary.
<END_TASK>
<USER_TASK:>
Description:
def scalar(name, data, step=None, description=None):
"""Write a scalar summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A real numeric scalar value, convertible to a `float32` Tensor.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
|
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
tf.debugging.assert_scalar(data)
return tf.summary.write(tag=tag,
tensor=tf.cast(data, tf.float32),
step=step,
metadata=summary_metadata)
|
<SYSTEM_TASK:>
Create a scalar summary_pb2.Summary protobuf.
<END_TASK>
<USER_TASK:>
Description:
def scalar_pb(tag, data, description=None):
"""Create a scalar summary_pb2.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A 0-dimensional `np.array` or a compatible python number type.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Raises:
ValueError: If the type or shape of the data is unsupported.
Returns:
A `summary_pb2.Summary` protobuf object.
"""
|
arr = np.array(data)
if arr.shape != ():
raise ValueError('Expected scalar shape for tensor, got shape: %s.'
% arr.shape)
if arr.dtype.kind not in ('b', 'i', 'u', 'f'): # bool, int, uint, float
raise ValueError('Cast %s to float is not supported' % arr.dtype.name)
tensor_proto = tensor_util.make_tensor_proto(arr.astype(np.float32))
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
summary = summary_pb2.Summary()
summary.value.add(tag=tag,
metadata=summary_metadata,
tensor=tensor_proto)
return summary
|
<SYSTEM_TASK:>
Dumps plugin data to the log directory.
<END_TASK>
<USER_TASK:>
Description:
def dump_data(logdir):
"""Dumps plugin data to the log directory."""
|
# Create a tfevents file in the logdir so it is detected as a run.
write_empty_event_file(logdir)
plugin_logdir = plugin_asset_util.PluginDirectory(
logdir, profile_plugin.ProfilePlugin.plugin_name)
_maybe_create_directory(plugin_logdir)
for run in profile_demo_data.RUNS:
run_dir = os.path.join(plugin_logdir, run)
_maybe_create_directory(run_dir)
if run in profile_demo_data.TRACES:
with open(os.path.join(run_dir, 'trace'), 'w') as f:
proto = trace_events_pb2.Trace()
text_format.Merge(profile_demo_data.TRACES[run], proto)
f.write(proto.SerializeToString())
if run not in profile_demo_data.TRACE_ONLY:
shutil.copyfile('tensorboard/plugins/profile/profile_demo.op_profile.json',
os.path.join(run_dir, 'op_profile.json'))
shutil.copyfile(
'tensorboard/plugins/profile/profile_demo.memory_viewer.json',
os.path.join(run_dir, 'memory_viewer.json'))
shutil.copyfile(
'tensorboard/plugins/profile/profile_demo.pod_viewer.json',
os.path.join(run_dir, 'pod_viewer.json'))
shutil.copyfile(
'tensorboard/plugins/profile/profile_demo.google_chart_demo.json',
os.path.join(run_dir, 'google_chart_demo.json'))
# Unsupported tool data should not be displayed.
run_dir = os.path.join(plugin_logdir, 'empty')
_maybe_create_directory(run_dir)
with open(os.path.join(run_dir, 'unsupported'), 'w') as f:
f.write('unsupported data')
|
<SYSTEM_TASK:>
Calculate health pill of a tensor.
<END_TASK>
<USER_TASK:>
Description:
def calc_health_pill(tensor):
"""Calculate health pill of a tensor.
Args:
tensor: An instance of `np.array` (for initialized tensors) or
`tensorflow.python.debug.lib.debug_data.InconvertibleTensorProto`
(for unininitialized tensors).
Returns:
If `tensor` is an initialized tensor of numeric or boolean types:
the calculated health pill, as a `list` of `float`s.
Else if `tensor` is an initialized tensor with `string`, `resource` or any
other non-numeric types:
`None`.
Else (i.e., if `tensor` is uninitialized): An all-zero `list`, with the
first element signifying that the tensor is uninitialized.
"""
|
health_pill = [0.0] * 14
# TODO(cais): Add unit test for this method that compares results with
# DebugNumericSummary output.
# Is tensor initialized.
if not isinstance(tensor, np.ndarray):
return health_pill
health_pill[0] = 1.0
if not (np.issubdtype(tensor.dtype, np.float) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer) or
tensor.dtype == np.bool):
return None
# Total number of elements.
health_pill[1] = float(np.size(tensor))
# TODO(cais): Further performance optimization?
nan_mask = np.isnan(tensor)
inf_mask = np.isinf(tensor)
# Number of NaN elements.
health_pill[2] = float(np.sum(nan_mask))
# Number of -Inf elements.
health_pill[3] = float(np.sum(tensor == -np.inf))
# Number of finite negative elements.
health_pill[4] = float(np.sum(
np.logical_and(np.logical_not(inf_mask), tensor < 0.0)))
# Number of zero elements.
health_pill[5] = float(np.sum(tensor == 0.0))
# Number finite positive elements.
health_pill[6] = float(np.sum(
np.logical_and(np.logical_not(inf_mask), tensor > 0.0)))
# Number of +Inf elements.
health_pill[7] = float(np.sum(tensor == np.inf))
finite_subset = tensor[
np.logical_and(np.logical_not(nan_mask), np.logical_not(inf_mask))]
if np.size(finite_subset):
# Finite subset is not empty.
# Minimum of the non-NaN non-Inf elements.
health_pill[8] = float(np.min(finite_subset))
# Maximum of the non-NaN non-Inf elements.
health_pill[9] = float(np.max(finite_subset))
# Mean of the non-NaN non-Inf elements.
health_pill[10] = float(np.mean(finite_subset))
# Variance of the non-NaN non-Inf elements.
health_pill[11] = float(np.var(finite_subset))
else:
# If no finite element exists:
# Set minimum to +inf.
health_pill[8] = np.inf
# Set maximum to -inf.
health_pill[9] = -np.inf
# Set mean to NaN.
health_pill[10] = np.nan
# Set variance to NaN.
health_pill[11] = np.nan
# DType encoded as a number.
# TODO(cais): Convert numpy dtype to corresponding tensorflow dtype enum.
health_pill[12] = -1.0
# ndims.
health_pill[13] = float(len(tensor.shape))
# Size of the dimensions.
health_pill.extend([float(x) for x in tensor.shape])
return health_pill
|
<SYSTEM_TASK:>
Creates extractors to extract properties corresponding to 'col_params'.
<END_TASK>
<USER_TASK:>
Description:
def _create_extractors(col_params):
"""Creates extractors to extract properties corresponding to 'col_params'.
Args:
col_params: List of ListSessionGroupsRequest.ColParam protobufs.
Returns:
A list of extractor functions. The ith element in the
returned list extracts the column corresponding to the ith element of
_request.col_params
"""
|
result = []
for col_param in col_params:
result.append(_create_extractor(col_param))
return result
|
<SYSTEM_TASK:>
Returns function that extracts a metric from a session group or a session.
<END_TASK>
<USER_TASK:>
Description:
def _create_metric_extractor(metric_name):
"""Returns function that extracts a metric from a session group or a session.
Args:
metric_name: tensorboard.hparams.MetricName protobuffer. Identifies the
metric to extract from the session group.
Returns:
A function that takes a tensorboard.hparams.SessionGroup or
tensorborad.hparams.Session protobuffer and returns the value of the metric
identified by 'metric_name' or None if the value doesn't exist.
"""
|
def extractor_fn(session_or_group):
metric_value = _find_metric_value(session_or_group,
metric_name)
return metric_value.value if metric_value else None
return extractor_fn
|
<SYSTEM_TASK:>
Returns the metric_value for a given metric in a session or session group.
<END_TASK>
<USER_TASK:>
Description:
def _find_metric_value(session_or_group, metric_name):
"""Returns the metric_value for a given metric in a session or session group.
Args:
session_or_group: A Session protobuffer or SessionGroup protobuffer.
metric_name: A MetricName protobuffer. The metric to search for.
Returns:
A MetricValue protobuffer representing the value of the given metric or
None if no such metric was found in session_or_group.
"""
|
# Note: We can speed this up by converting the metric_values field
# to a dictionary on initialization, to avoid a linear search here. We'll
# need to wrap the SessionGroup and Session protos in a python object for
# that.
for metric_value in session_or_group.metric_values:
if (metric_value.name.tag == metric_name.tag and
metric_value.name.group == metric_name.group):
return metric_value
|
<SYSTEM_TASK:>
Returns an extractor function that extracts an hparam from a session group.
<END_TASK>
<USER_TASK:>
Description:
def _create_hparam_extractor(hparam_name):
"""Returns an extractor function that extracts an hparam from a session group.
Args:
hparam_name: str. Identies the hparam to extract from the session group.
Returns:
A function that takes a tensorboard.hparams.SessionGroup protobuffer and
returns the value, as a native Python object, of the hparam identified by
'hparam_name'.
"""
|
def extractor_fn(session_group):
if hparam_name in session_group.hparams:
return _value_to_python(session_group.hparams[hparam_name])
return None
return extractor_fn
|
<SYSTEM_TASK:>
Creates filters for the given col_params.
<END_TASK>
<USER_TASK:>
Description:
def _create_filters(col_params, extractors):
"""Creates filters for the given col_params.
Args:
col_params: List of ListSessionGroupsRequest.ColParam protobufs.
extractors: list of extractor functions of the same length as col_params.
Each element should extract the column described by the corresponding
element of col_params.
Returns:
A list of filter functions. Each corresponding to a single
col_params.filter oneof field of _request
"""
|
result = []
for col_param, extractor in zip(col_params, extractors):
a_filter = _create_filter(col_param, extractor)
if a_filter:
result.append(a_filter)
return result
|
<SYSTEM_TASK:>
Creates a filter for the given col_param and extractor.
<END_TASK>
<USER_TASK:>
Description:
def _create_filter(col_param, extractor):
"""Creates a filter for the given col_param and extractor.
Args:
col_param: A tensorboard.hparams.ColParams object identifying the column
and describing the filter to apply.
extractor: A function that extract the column value identified by
'col_param' from a tensorboard.hparams.SessionGroup protobuffer.
Returns:
A boolean function taking a tensorboard.hparams.SessionGroup protobuffer
returning True if the session group passes the filter described by
'col_param'. If col_param does not specify a filter (i.e. any session
group passes) returns None.
"""
|
include_missing_values = not col_param.exclude_missing_values
if col_param.HasField('filter_regexp'):
value_filter_fn = _create_regexp_filter(col_param.filter_regexp)
elif col_param.HasField('filter_interval'):
value_filter_fn = _create_interval_filter(col_param.filter_interval)
elif col_param.HasField('filter_discrete'):
value_filter_fn = _create_discrete_set_filter(col_param.filter_discrete)
elif include_missing_values:
# No 'filter' field and include_missing_values is True.
# Thus, the resulting filter always returns True, so to optimize for this
# common case we do not include it in the list of filters to check.
return None
else:
value_filter_fn = lambda _: True
def filter_fn(session_group):
value = extractor(session_group)
if value is None:
return include_missing_values
return value_filter_fn(value)
return filter_fn
|
<SYSTEM_TASK:>
Returns a boolean function that filters strings based on a regular exp.
<END_TASK>
<USER_TASK:>
Description:
def _create_regexp_filter(regex):
"""Returns a boolean function that filters strings based on a regular exp.
Args:
regex: A string describing the regexp to use.
Returns:
A function taking a string and returns True if any of its substrings
matches regex.
"""
|
# Warning: Note that python's regex library allows inputs that take
# exponential time. Time-limiting it is difficult. When we move to
# a true multi-tenant tensorboard server, the regexp implementation here
# would need to be replaced by something more secure.
compiled_regex = re.compile(regex)
def filter_fn(value):
if not isinstance(value, six.string_types):
raise error.HParamsError(
'Cannot use a regexp filter for a value of type %s. Value: %s' %
(type(value), value))
return re.search(compiled_regex, value) is not None
return filter_fn
|
<SYSTEM_TASK:>
Returns a function that checkes whether a number belongs to an interval.
<END_TASK>
<USER_TASK:>
Description:
def _create_interval_filter(interval):
"""Returns a function that checkes whether a number belongs to an interval.
Args:
interval: A tensorboard.hparams.Interval protobuf describing the interval.
Returns:
A function taking a number (a float or an object of a type in
six.integer_types) that returns True if the number belongs to (the closed)
'interval'.
"""
|
def filter_fn(value):
if (not isinstance(value, six.integer_types) and
not isinstance(value, float)):
raise error.HParamsError(
'Cannot use an interval filter for a value of type: %s, Value: %s' %
(type(value), value))
return interval.min_value <= value and value <= interval.max_value
return filter_fn
|
<SYSTEM_TASK:>
Sets the metrics for the group to be the average of its sessions.
<END_TASK>
<USER_TASK:>
Description:
def _set_avg_session_metrics(session_group):
"""Sets the metrics for the group to be the average of its sessions.
The resulting session group metrics consist of the union of metrics across
the group's sessions. The value of each session group metric is the average
of that metric values across the sessions in the group. The 'step' and
'wall_time_secs' fields of the resulting MetricValue field in the session
group are populated with the corresponding averages (truncated for 'step')
as well.
Args:
session_group: A SessionGroup protobuffer.
"""
|
assert session_group.sessions, 'SessionGroup cannot be empty.'
# Algorithm: Iterate over all (session, metric) pairs and maintain a
# dict from _MetricIdentifier to _MetricStats objects.
# Then use the final dict state to compute the average for each metric.
metric_stats = collections.defaultdict(_MetricStats)
for session in session_group.sessions:
for metric_value in session.metric_values:
metric_name = _MetricIdentifier(group=metric_value.name.group,
tag=metric_value.name.tag)
stats = metric_stats[metric_name]
stats.total += metric_value.value
stats.count += 1
stats.total_step += metric_value.training_step
stats.total_wall_time_secs += metric_value.wall_time_secs
del session_group.metric_values[:]
for (metric_name, stats) in six.iteritems(metric_stats):
session_group.metric_values.add(
name=api_pb2.MetricName(group=metric_name.group, tag=metric_name.tag),
value=float(stats.total)/float(stats.count),
training_step=stats.total_step // stats.count,
wall_time_secs=stats.total_wall_time_secs / stats.count)
|
<SYSTEM_TASK:>
Sets the metrics for session_group to those of its "median session".
<END_TASK>
<USER_TASK:>
Description:
def _set_median_session_metrics(session_group, aggregation_metric):
"""Sets the metrics for session_group to those of its "median session".
The median session is the session in session_group with the median value
of the metric given by 'aggregation_metric'. The median is taken over the
subset of sessions in the group whose 'aggregation_metric' was measured
at the largest training step among the sessions in the group.
Args:
session_group: A SessionGroup protobuffer.
aggregation_metric: A MetricName protobuffer.
"""
|
measurements = sorted(_measurements(session_group, aggregation_metric),
key=operator.attrgetter('metric_value.value'))
median_session = measurements[(len(measurements) - 1) // 2].session_index
del session_group.metric_values[:]
session_group.metric_values.MergeFrom(
session_group.sessions[median_session].metric_values)
|
<SYSTEM_TASK:>
Sets the metrics for session_group to those of its "extremum session".
<END_TASK>
<USER_TASK:>
Description:
def _set_extremum_session_metrics(session_group, aggregation_metric,
extremum_fn):
"""Sets the metrics for session_group to those of its "extremum session".
The extremum session is the session in session_group with the extremum value
of the metric given by 'aggregation_metric'. The extremum is taken over the
subset of sessions in the group whose 'aggregation_metric' was measured
at the largest training step among the sessions in the group.
Args:
session_group: A SessionGroup protobuffer.
aggregation_metric: A MetricName protobuffer.
extremum_fn: callable. Must be either 'min' or 'max'. Determines the type of
extremum to compute.
"""
|
measurements = _measurements(session_group, aggregation_metric)
ext_session = extremum_fn(
measurements,
key=operator.attrgetter('metric_value.value')).session_index
del session_group.metric_values[:]
session_group.metric_values.MergeFrom(
session_group.sessions[ext_session].metric_values)
|
<SYSTEM_TASK:>
A generator for the values of the metric across the sessions in the group.
<END_TASK>
<USER_TASK:>
Description:
def _measurements(session_group, metric_name):
"""A generator for the values of the metric across the sessions in the group.
Args:
session_group: A SessionGroup protobuffer.
metric_name: A MetricName protobuffer.
Yields:
The next metric value wrapped in a _Measurement instance.
"""
|
for session_index, session in enumerate(session_group.sessions):
metric_value = _find_metric_value(session, metric_name)
if not metric_value:
continue
yield _Measurement(metric_value, session_index)
|
<SYSTEM_TASK:>
Returns a list of SessionGroups protobuffers from the summary data.
<END_TASK>
<USER_TASK:>
Description:
def _build_session_groups(self):
"""Returns a list of SessionGroups protobuffers from the summary data."""
|
# Algorithm: We keep a dict 'groups_by_name' mapping a SessionGroup name
# (str) to a SessionGroup protobuffer. We traverse the runs associated with
# the plugin--each representing a single session. We form a Session
# protobuffer from each run and add it to the relevant SessionGroup object
# in the 'groups_by_name' dict. We create the SessionGroup object, if this
# is the first session of that group we encounter.
groups_by_name = {}
run_to_tag_to_content = self._context.multiplexer.PluginRunToTagToContent(
metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(run_to_tag_to_content):
if metadata.SESSION_START_INFO_TAG not in tag_to_content:
continue
start_info = metadata.parse_session_start_info_plugin_data(
tag_to_content[metadata.SESSION_START_INFO_TAG])
end_info = None
if metadata.SESSION_END_INFO_TAG in tag_to_content:
end_info = metadata.parse_session_end_info_plugin_data(
tag_to_content[metadata.SESSION_END_INFO_TAG])
session = self._build_session(run, start_info, end_info)
if session.status in self._request.allowed_statuses:
self._add_session(session, start_info, groups_by_name)
# Compute the session group's aggregated metrics for each group.
groups = groups_by_name.values()
for group in groups:
# We sort the sessions in a group so that the order is deterministic.
group.sessions.sort(key=operator.attrgetter('name'))
self._aggregate_metrics(group)
return groups
|
<SYSTEM_TASK:>
Adds a new Session protobuffer to the 'groups_by_name' dictionary.
<END_TASK>
<USER_TASK:>
Description:
def _add_session(self, session, start_info, groups_by_name):
"""Adds a new Session protobuffer to the 'groups_by_name' dictionary.
Called by _build_session_groups when we encounter a new session. Creates
the Session protobuffer and adds it to the relevant group in the
'groups_by_name' dict. Creates the session group if this is the first time
we encounter it.
Args:
session: api_pb2.Session. The session to add.
start_info: The SessionStartInfo protobuffer associated with the session.
groups_by_name: A str to SessionGroup protobuffer dict. Representing the
session groups and sessions found so far.
"""
|
# If the group_name is empty, this session's group contains only
# this session. Use the session name for the group name since session
# names are unique.
group_name = start_info.group_name or session.name
if group_name in groups_by_name:
groups_by_name[group_name].sessions.extend([session])
else:
# Create the group and add the session as the first one.
group = api_pb2.SessionGroup(
name=group_name,
sessions=[session],
monitor_url=start_info.monitor_url)
# Copy hparams from the first session (all sessions should have the same
# hyperparameter values) into result.
# There doesn't seem to be a way to initialize a protobuffer map in the
# constructor.
for (key, value) in six.iteritems(start_info.hparams):
group.hparams[key].CopyFrom(value)
groups_by_name[group_name] = group
|
<SYSTEM_TASK:>
Sets the metrics of the group based on aggregation_type.
<END_TASK>
<USER_TASK:>
Description:
def _aggregate_metrics(self, session_group):
"""Sets the metrics of the group based on aggregation_type."""
|
if (self._request.aggregation_type == api_pb2.AGGREGATION_AVG or
self._request.aggregation_type == api_pb2.AGGREGATION_UNSET):
_set_avg_session_metrics(session_group)
elif self._request.aggregation_type == api_pb2.AGGREGATION_MEDIAN:
_set_median_session_metrics(session_group,
self._request.aggregation_metric)
elif self._request.aggregation_type == api_pb2.AGGREGATION_MIN:
_set_extremum_session_metrics(session_group,
self._request.aggregation_metric,
min)
elif self._request.aggregation_type == api_pb2.AGGREGATION_MAX:
_set_extremum_session_metrics(session_group,
self._request.aggregation_metric,
max)
else:
raise error.HParamsError('Unknown aggregation_type in request: %s' %
self._request.aggregation_type)
|
<SYSTEM_TASK:>
Sorts 'session_groups' in place according to _request.col_params.
<END_TASK>
<USER_TASK:>
Description:
def _sort(self, session_groups):
"""Sorts 'session_groups' in place according to _request.col_params."""
|
# Sort by session_group name so we have a deterministic order.
session_groups.sort(key=operator.attrgetter('name'))
# Sort by lexicographical order of the _request.col_params whose order
# is not ORDER_UNSPECIFIED. The first such column is the primary sorting
# key, the second is the secondary sorting key, etc. To achieve that we
# need to iterate on these columns in reverse order (thus the primary key
# is the key used in the last sort).
for col_param, extractor in reversed(list(zip(self._request.col_params,
self._extractors))):
if col_param.order == api_pb2.ORDER_UNSPECIFIED:
continue
if col_param.order == api_pb2.ORDER_ASC:
session_groups.sort(
key=_create_key_func(
extractor,
none_is_largest=not col_param.missing_values_first))
elif col_param.order == api_pb2.ORDER_DESC:
session_groups.sort(
key=_create_key_func(
extractor,
none_is_largest=col_param.missing_values_first),
reverse=True)
else:
raise error.HParamsError('Unknown col_param.order given: %s' %
col_param)
|
<SYSTEM_TASK:>
Computes harmonic ratio and pitch
<END_TASK>
<USER_TASK:>
Description:
def stHarmonic(frame, fs):
"""
Computes harmonic ratio and pitch
"""
|
M = numpy.round(0.016 * fs) - 1
R = numpy.correlate(frame, frame, mode='full')
g = R[len(frame)-1]
R = R[len(frame):-1]
# estimate m0 (as the first zero crossing of R)
[a, ] = numpy.nonzero(numpy.diff(numpy.sign(R)))
if len(a) == 0:
m0 = len(R)-1
else:
m0 = a[0]
if M > len(R):
M = len(R) - 1
Gamma = numpy.zeros((M), dtype=numpy.float64)
CSum = numpy.cumsum(frame ** 2)
Gamma[m0:M] = R[m0:M] / (numpy.sqrt((g * CSum[M:m0:-1])) + eps)
ZCR = stZCR(Gamma)
if ZCR > 0.15:
HR = 0.0
f0 = 0.0
else:
if len(Gamma) == 0:
HR = 1.0
blag = 0.0
Gamma = numpy.zeros((M), dtype=numpy.float64)
else:
HR = numpy.max(Gamma)
blag = numpy.argmax(Gamma)
# Get fundamental frequency:
f0 = fs / (blag + eps)
if f0 > 5000:
f0 = 0.0
if HR < 0.1:
f0 = 0.0
return (HR, f0)
|
<SYSTEM_TASK:>
Computes the MFCCs of a frame, given the fft mag
<END_TASK>
<USER_TASK:>
Description:
def stMFCC(X, fbank, n_mfcc_feats):
"""
Computes the MFCCs of a frame, given the fft mag
ARGUMENTS:
X: fft magnitude abs(FFT)
fbank: filter bank (see mfccInitFilterBanks)
RETURN
ceps: MFCCs (13 element vector)
Note: MFCC calculation is, in general, taken from the
scikits.talkbox library (MIT Licence),
# with a small number of modifications to make it more
compact and suitable for the pyAudioAnalysis Lib
"""
|
mspec = numpy.log10(numpy.dot(X, fbank.T)+eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:n_mfcc_feats]
return ceps
|
<SYSTEM_TASK:>
This function initializes the chroma matrices used in the calculation of the chroma features
<END_TASK>
<USER_TASK:>
Description:
def stChromaFeaturesInit(nfft, fs):
"""
This function initializes the chroma matrices used in the calculation of the chroma features
"""
|
freqs = numpy.array([((f + 1) * fs) / (2 * nfft) for f in range(nfft)])
Cp = 27.50
nChroma = numpy.round(12.0 * numpy.log2(freqs / Cp)).astype(int)
nFreqsPerChroma = numpy.zeros((nChroma.shape[0], ))
uChroma = numpy.unique(nChroma)
for u in uChroma:
idx = numpy.nonzero(nChroma == u)
nFreqsPerChroma[idx] = idx[0].shape
return nChroma, nFreqsPerChroma
|
<SYSTEM_TASK:>
This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
<END_TASK>
<USER_TASK:>
Description:
def stFeatureExtraction(signal, fs, win, step):
"""
This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a numpy matrix.
ARGUMENTS
signal: the input signal samples
fs: the sampling freq (in Hz)
win: the short-term window size (in samples)
step: the short-term window step (in samples)
RETURNS
st_features: a numpy array (n_feats x numOfShortTermWindows)
"""
|
win = int(win)
step = int(step)
# Signal normalization
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
cur_p = 0
count_fr = 0
nFFT = int(win / 2)
[fbank, freqs] = mfccInitFilterBanks(fs, nFFT) # compute the triangular filter banks used in the mfcc calculation
nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, fs)
n_time_spectral_feats = 8
n_harmonic_feats = 0
n_mfcc_feats = 13
n_chroma_feats = 13
n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats + n_chroma_feats
# n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats
feature_names = []
feature_names.append("zcr")
feature_names.append("energy")
feature_names.append("energy_entropy")
feature_names += ["spectral_centroid", "spectral_spread"]
feature_names.append("spectral_entropy")
feature_names.append("spectral_flux")
feature_names.append("spectral_rolloff")
feature_names += ["mfcc_{0:d}".format(mfcc_i)
for mfcc_i in range(1, n_mfcc_feats+1)]
feature_names += ["chroma_{0:d}".format(chroma_i)
for chroma_i in range(1, n_chroma_feats)]
feature_names.append("chroma_std")
st_features = []
while (cur_p + win - 1 < N): # for each short-term window until the end of signal
count_fr += 1
x = signal[cur_p:cur_p+win] # get current window
cur_p = cur_p + step # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
if count_fr == 1:
X_prev = X.copy() # keep previous fft mag (used in spectral flux)
curFV = numpy.zeros((n_total_feats, 1))
curFV[0] = stZCR(x) # zero crossing rate
curFV[1] = stEnergy(x) # short-term energy
curFV[2] = stEnergyEntropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, fs) # spectral centroid and spread
curFV[5] = stSpectralEntropy(X) # spectral entropy
curFV[6] = stSpectralFlux(X, X_prev) # spectral flux
curFV[7] = stSpectralRollOff(X, 0.90, fs) # spectral rolloff
curFV[n_time_spectral_feats:n_time_spectral_feats+n_mfcc_feats, 0] = \
stMFCC(X, fbank, n_mfcc_feats).copy() # MFCCs
chromaNames, chromaF = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma)
curFV[n_time_spectral_feats + n_mfcc_feats:
n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \
chromaF
curFV[n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \
chromaF.std()
st_features.append(curFV)
# delta features
'''
if count_fr>1:
delta = curFV - prevFV
curFVFinal = numpy.concatenate((curFV, delta))
else:
curFVFinal = numpy.concatenate((curFV, curFV))
prevFV = curFV
st_features.append(curFVFinal)
'''
# end of delta
X_prev = X.copy()
st_features = numpy.concatenate(st_features, 1)
return st_features, feature_names
|
<SYSTEM_TASK:>
This function extracts the mid-term features of the WAVE files of a particular folder.
<END_TASK>
<USER_TASK:>
Description:
def dirWavFeatureExtraction(dirName, mt_win, mt_step, st_win, st_step,
compute_beat=False):
"""
This function extracts the mid-term features of the WAVE files of a particular folder.
The resulting feature vector is extracted by long-term averaging the mid-term features.
Therefore ONE FEATURE VECTOR is extracted for each WAV file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
"""
|
all_mt_feats = numpy.array([])
process_times = []
types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(dirName, files)))
wav_file_list = sorted(wav_file_list)
wav_file_list2, mt_feature_names = [], []
for i, wavFile in enumerate(wav_file_list):
print("Analyzing file {0:d} of "
"{1:d}: {2:s}".format(i+1,
len(wav_file_list),
wavFile))
if os.stat(wavFile).st_size == 0:
print(" (EMPTY FILE -- SKIPPING)")
continue
[fs, x] = audioBasicIO.readAudioFile(wavFile)
if isinstance(x, int):
continue
t1 = time.clock()
x = audioBasicIO.stereo2mono(x)
if x.shape[0]<float(fs)/5:
print(" (AUDIO FILE TOO SMALL - SKIPPING)")
continue
wav_file_list2.append(wavFile)
if compute_beat:
[mt_term_feats, st_features, mt_feature_names] = \
mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win), round(fs * st_step))
[beat, beat_conf] = beatExtraction(st_features, st_step)
else:
[mt_term_feats, _, mt_feature_names] = \
mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win), round(fs * st_step))
mt_term_feats = numpy.transpose(mt_term_feats)
mt_term_feats = mt_term_feats.mean(axis=0)
# long term averaging of mid-term statistics
if (not numpy.isnan(mt_term_feats).any()) and \
(not numpy.isinf(mt_term_feats).any()):
if compute_beat:
mt_term_feats = numpy.append(mt_term_feats, beat)
mt_term_feats = numpy.append(mt_term_feats, beat_conf)
if len(all_mt_feats) == 0:
# append feature vector
all_mt_feats = mt_term_feats
else:
all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats))
t2 = time.clock()
duration = float(len(x)) / fs
process_times.append((t2 - t1) / duration)
if len(process_times) > 0:
print("Feature extraction complexity ratio: "
"{0:.1f} x realtime".format((1.0 / numpy.mean(numpy.array(process_times)))))
return (all_mt_feats, wav_file_list2, mt_feature_names)
|
<SYSTEM_TASK:>
This function extracts the mid-term features of the WAVE
<END_TASK>
<USER_TASK:>
Description:
def dirWavFeatureExtractionNoAveraging(dirName, mt_win, mt_step, st_win, st_step):
"""
This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames:
"""
|
all_mt_feats = numpy.array([])
signal_idx = numpy.array([])
process_times = []
types = ('*.wav', '*.aif', '*.aiff', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(dirName, files)))
wav_file_list = sorted(wav_file_list)
for i, wavFile in enumerate(wav_file_list):
[fs, x] = audioBasicIO.readAudioFile(wavFile)
if isinstance(x, int):
continue
x = audioBasicIO.stereo2mono(x)
[mt_term_feats, _, _] = mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win),
round(fs * st_step))
mt_term_feats = numpy.transpose(mt_term_feats)
if len(all_mt_feats) == 0: # append feature vector
all_mt_feats = mt_term_feats
signal_idx = numpy.zeros((mt_term_feats.shape[0], ))
else:
all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats))
signal_idx = numpy.append(signal_idx, i * numpy.ones((mt_term_feats.shape[0], )))
return (all_mt_feats, signal_idx, wav_file_list)
|
<SYSTEM_TASK:>
Start to run a strategy
<END_TASK>
<USER_TASK:>
Description:
def run(**kwargs):
"""
Start to run a strategy
"""
|
config_path = kwargs.get('config_path', None)
if config_path is not None:
config_path = os.path.abspath(config_path)
kwargs.pop('config_path')
if not kwargs.get('base__securities', None):
kwargs.pop('base__securities', None)
from rqalpha import main
source_code = kwargs.get("base__source_code")
cfg = parse_config(kwargs, config_path=config_path, click_type=True, source_code=source_code)
source_code = cfg.base.source_code
results = main.run(cfg, source_code=source_code)
# store results into ipython when running in ipython
from rqalpha.utils import is_run_from_ipython
if results is not None and is_run_from_ipython():
import IPython
from rqalpha.utils import RqAttrDict
ipy = IPython.get_ipython()
report = results.get("sys_analyser", {})
ipy.user_global_ns["results"] = results
ipy.user_global_ns["report"] = RqAttrDict(report)
if results is None:
sys.exit(1)
|
<SYSTEM_TASK:>
Generate example strategies to target folder
<END_TASK>
<USER_TASK:>
Description:
def examples(directory):
"""
Generate example strategies to target folder
"""
|
source_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples")
try:
shutil.copytree(source_dir, os.path.join(directory, "examples"))
except OSError as e:
if e.errno == errno.EEXIST:
six.print_("Folder examples is exists.")
|
<SYSTEM_TASK:>
Perform the actual request. Retrieve a connection from the connection
<END_TASK>
<USER_TASK:>
Description:
def perform_request(self, method, url, headers=None, params=None, body=None):
"""
Perform the actual request. Retrieve a connection from the connection
pool, pass all the information to it's perform_request method and
return the data.
If an exception was raised, mark the connection as failed and retry (up
to `max_retries` times).
If the operation was succesful and the connection used was previously
marked as dead, mark it as live, resetting it's failure count.
:arg method: HTTP method to use
:arg url: absolute url (without host) to target
:arg headers: dictionary of headers, will be handed over to the
underlying :class:`~elasticsearch.Connection` class
:arg params: dictionary of query parameters, will be handed over to the
underlying :class:`~elasticsearch.Connection` class for serialization
:arg body: body of the request, will be serializes using serializer and
passed to the connection
"""
|
if body is not None:
body = self.serializer.dumps(body)
# some clients or environments don't support sending GET with body
if method in ('HEAD', 'GET') and self.send_get_body_as != 'GET':
# send it as post instead
if self.send_get_body_as == 'POST':
method = 'POST'
# or as source parameter
elif self.send_get_body_as == 'source':
if params is None:
params = {}
params['source'] = body
body = None
if body is not None:
try:
body = body.encode('utf-8', 'surrogatepass')
except (UnicodeDecodeError, AttributeError):
# bytes/str - no need to re-encode
pass
ignore = ()
timeout = None
if params:
timeout = params.pop('request_timeout', None)
ignore = params.pop('ignore', ())
if isinstance(ignore, int):
ignore = (ignore, )
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
# add a delay before attempting the next retry
# 0, 1, 3, 7, etc...
delay = 2**attempt - 1
time.sleep(delay)
status, headers_response, data = connection.perform_request(method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
except TransportError as e:
if method == 'HEAD' and e.status_code == 404:
return False
retry = False
if isinstance(e, ConnectionTimeout):
retry = self.retry_on_timeout
elif isinstance(e, ConnectionError):
retry = True
elif e.status_code in self.retry_on_status:
retry = True
if retry:
# only mark as dead if we are retrying
self.mark_dead(connection)
# raise exception on last retry
if attempt == self.max_retries:
raise
else:
raise
else:
# connection didn't fail, confirm it's live status
self.connection_pool.mark_live(connection)
if method == 'HEAD':
return 200 <= status < 300
if data:
data = self.deserializer.loads(data, headers_response.get('content-type'))
return data
|
<SYSTEM_TASK:>
Go through the git repository log and generate a document per commit
<END_TASK>
<USER_TASK:>
Description:
def parse_commits(head, name):
"""
Go through the git repository log and generate a document per commit
containing all the metadata.
"""
|
for commit in head.traverse():
yield {
'_id': commit.hexsha,
'repository': name,
'committed_date': datetime.fromtimestamp(commit.committed_date),
'committer': {
'name': commit.committer.name,
'email': commit.committer.email,
},
'authored_date': datetime.fromtimestamp(commit.authored_date),
'author': {
'name': commit.author.name,
'email': commit.author.email,
},
'description': commit.message,
'parent_shas': [p.hexsha for p in commit.parents],
# we only care about the filenames, not the per-file stats
'files': list(commit.stats.files),
'stats': commit.stats.total,
}
|
<SYSTEM_TASK:>
Parse a git repository with all it's commits and load it into elasticsearch
<END_TASK>
<USER_TASK:>
Description:
def load_repo(client, path=None, index='git'):
"""
Parse a git repository with all it's commits and load it into elasticsearch
using `client`. If the index doesn't exist it will be created.
"""
|
path = dirname(dirname(abspath(__file__))) if path is None else path
repo_name = basename(path)
repo = git.Repo(path)
create_git_index(client, index)
# we let the streaming bulk continuously process the commits as they come
# in - since the `parse_commits` function is a generator this will avoid
# loading all the commits into memory
for ok, result in streaming_bulk(
client,
parse_commits(repo.refs.master.commit, repo_name),
index=index,
doc_type='doc',
chunk_size=50 # keep the batch sizes small for appearances only
):
action, result = result.popitem()
doc_id = '/%s/doc/%s' % (index, result['_id'])
# process the information from ES whether the document has been
# successfully indexed
if not ok:
print('Failed to %s document %s: %r' % (action, doc_id, result))
else:
print(doc_id)
|
<SYSTEM_TASK:>
Parallel version of the bulk helper run in multiple threads at once.
<END_TASK>
<USER_TASK:>
Description:
def parallel_bulk(
client,
actions,
thread_count=4,
chunk_size=500,
max_chunk_bytes=100 * 1024 * 1024,
queue_size=4,
expand_action_callback=expand_action,
*args,
**kwargs
):
"""
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
:arg queue_size: size of the task queue between the main thread (producing
chunks to send) and the processing threads.
"""
|
# Avoid importing multiprocessing unless parallel_bulk is used
# to avoid exceptions on restricted environments like App Engine
from multiprocessing.pool import ThreadPool
actions = map(expand_action_callback, actions)
class BlockingPool(ThreadPool):
def _setup_queues(self):
super(BlockingPool, self)._setup_queues()
# The queue must be at least the size of the number of threads to
# prevent hanging when inserting sentinel values during teardown.
self._inqueue = Queue(max(queue_size, thread_count))
self._quick_put = self._inqueue.put
pool = BlockingPool(thread_count)
try:
for result in pool.imap(
lambda bulk_chunk: list(
_process_bulk_chunk(
client, bulk_chunk[1], bulk_chunk[0], *args, **kwargs
)
),
_chunk_actions(
actions, chunk_size, max_chunk_bytes, client.transport.serializer
),
):
for item in result:
yield item
finally:
pool.close()
pool.join()
|
<SYSTEM_TASK:>
The force merge API allows to force merging of one or more indices
<END_TASK>
<USER_TASK:>
Description:
def forcemerge(self, index=None, params=None):
"""
The force merge API allows to force merging of one or more indices
through an API. The merge relates to the number of segments a Lucene
index holds within each shard. The force merge operation allows to
reduce the number of segments by merging them.
This call will block until the merge is complete. If the http
connection is lost, the request will continue in the background, and
any new requests will block until the previous force merge is complete.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg flush: Specify whether the index should be flushed after performing
the operation (default: true)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg max_num_segments: The number of segments the index should be merged
into (default: dynamic)
:arg only_expunge_deletes: Specify whether the operation should only
expunge deleted documents
:arg operation_threading: TODO: ?
"""
|
return self.transport.perform_request(
"POST", _make_path(index, "_forcemerge"), params=params
)
|
<SYSTEM_TASK:>
The rollover index API rolls an alias over to a new index when the
<END_TASK>
<USER_TASK:>
Description:
def rollover(self, alias, new_index=None, body=None, params=None):
"""
The rollover index API rolls an alias over to a new index when the
existing index is considered to be too large or too old.
The API accepts a single alias name and a list of conditions. The alias
must point to a single index only. If the index satisfies the specified
conditions then a new index is created and the alias is switched to
point to the new alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-rollover-index.html>`_
:arg alias: The name of the alias to rollover
:arg new_index: The name of the rollover index
:arg body: The conditions that needs to be met for executing rollover
:arg dry_run: If set to true the rollover action will only be validated
but not actually performed even if a condition matches. The default
is false
:arg master_timeout: Specify timeout for connection to master
:arg request_timeout: Explicit operation timeout
:arg wait_for_active_shards: Set the number of active shards to wait for
on the newly created rollover index before the operation returns.
:arg include_type_name: Specify whether requests and responses should include a
type name (default: depends on Elasticsearch version).
"""
|
if alias in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'alias'.")
return self.transport.perform_request(
"POST", _make_path(alias, "_rollover", new_index), params=params, body=body
)
|
<SYSTEM_TASK:>
Escape a single value of a URL string or a query parameter. If it is a list
<END_TASK>
<USER_TASK:>
Description:
def _escape(value):
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
|
# make sequences into comma-separated stings
if isinstance(value, (list, tuple)):
value = ",".join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
# don't decode bytestrings
elif isinstance(value, bytes):
return value
# encode strings to utf-8
if isinstance(value, string_types):
if PY2 and isinstance(value, unicode):
return value.encode("utf-8")
if not PY2 and isinstance(value, str):
return value.encode("utf-8")
return str(value)
|
<SYSTEM_TASK:>
Create a URL string from parts, omit all `None` values and empty strings.
<END_TASK>
<USER_TASK:>
Description:
def _make_path(*parts):
"""
Create a URL string from parts, omit all `None` values and empty strings.
Convert lists and tuples to comma separated values.
"""
|
# TODO: maybe only allow some parts to be lists/tuples ?
return "/" + "/".join(
# preserve ',' and '*' in url for nicer URLs in logs
quote_plus(_escape(p), b",*")
for p in parts
if p not in SKIP_IN_PATH
)
|
<SYSTEM_TASK:>
Decorator that pops all accepted parameters from method's kwargs and puts
<END_TASK>
<USER_TASK:>
Description:
def query_params(*es_query_params):
"""
Decorator that pops all accepted parameters from method's kwargs and puts
them in the params argument.
"""
|
def _wrapper(func):
@wraps(func)
def _wrapped(*args, **kwargs):
params = {}
if "params" in kwargs:
params = kwargs.pop("params").copy()
for p in es_query_params + GLOBAL_PARAMS:
if p in kwargs:
v = kwargs.pop(p)
if v is not None:
params[p] = _escape(v)
# don't treat ignore and request_timeout as other params to avoid escaping
for p in ("ignore", "request_timeout"):
if p in kwargs:
params[p] = kwargs.pop(p)
return func(*args, params=params, **kwargs)
return _wrapped
return _wrapper
|
<SYSTEM_TASK:>
Deal with incoming requests.
<END_TASK>
<USER_TASK:>
Description:
def post(self):
"""Deal with incoming requests."""
|
body = tornado.escape.json_decode(self.request.body)
try:
self._bo.register(
params=body["params"],
target=body["target"],
)
print("BO has registered: {} points.".format(len(self._bo.space)), end="\n\n")
except KeyError:
pass
finally:
suggested_params = self._bo.suggest(self._uf)
self.write(json.dumps(suggested_params))
|
<SYSTEM_TASK:>
Expect observation with known target
<END_TASK>
<USER_TASK:>
Description:
def register(self, params, target):
"""Expect observation with known target"""
|
self._space.register(params, target)
self.dispatch(Events.OPTMIZATION_STEP)
|
<SYSTEM_TASK:>
Most promissing point to probe next
<END_TASK>
<USER_TASK:>
Description:
def suggest(self, utility_function):
"""Most promissing point to probe next"""
|
if len(self._space) == 0:
return self._space.array_to_params(self._space.random_sample())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._gp.fit(self._space.params, self._space.target)
# Finding argmax of the acquisition function.
suggestion = acq_max(
ac=utility_function.utility,
gp=self._gp,
y_max=self._space.target.max(),
bounds=self._space.bounds,
random_state=self._random_state
)
return self._space.array_to_params(suggestion)
|
<SYSTEM_TASK:>
Make sure there's something in the queue at the very beginning.
<END_TASK>
<USER_TASK:>
Description:
def _prime_queue(self, init_points):
"""Make sure there's something in the queue at the very beginning."""
|
if self._queue.empty and self._space.empty:
init_points = max(init_points, 1)
for _ in range(init_points):
self._queue.add(self._space.random_sample())
|
<SYSTEM_TASK:>
Append a point and its target value to the known data.
<END_TASK>
<USER_TASK:>
Description:
def register(self, params, target):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
|
x = self._as_array(params)
if x in self:
raise KeyError('Data point {} is not unique'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]])
|
<SYSTEM_TASK:>
Evaulates a single point x, to obtain the value y and then records them
<END_TASK>
<USER_TASK:>
Description:
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
|
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target
|
<SYSTEM_TASK:>
Creates random points within the bounds of the space.
<END_TASK>
<USER_TASK:>
Description:
def random_sample(self):
"""
Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]])
"""
|
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
return data.ravel()
|
<SYSTEM_TASK:>
Get maximum target value found and corresponding parametes.
<END_TASK>
<USER_TASK:>
Description:
def max(self):
"""Get maximum target value found and corresponding parametes."""
|
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
|
<SYSTEM_TASK:>
Get all target values found and corresponding parametes.
<END_TASK>
<USER_TASK:>
Description:
def res(self):
"""Get all target values found and corresponding parametes."""
|
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
]
|
<SYSTEM_TASK:>
A method that allows changing the lower and upper searching bounds
<END_TASK>
<USER_TASK:>
Description:
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
|
for row, key in enumerate(self.keys):
if key in new_bounds:
self._bounds[row] = new_bounds[key]
|
<SYSTEM_TASK:>
SVC cross validation.
<END_TASK>
<USER_TASK:>
Description:
def svc_cv(C, gamma, data, targets):
"""SVC cross validation.
This function will instantiate a SVC classifier with parameters C and
gamma. Combined with data and targets this will in turn be used to perform
cross validation. The result of cross validation is returned.
Our goal is to find combinations of C and gamma that maximizes the roc_auc
metric.
"""
|
estimator = SVC(C=C, gamma=gamma, random_state=2)
cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4)
return cval.mean()
|
<SYSTEM_TASK:>
Random Forest cross validation.
<END_TASK>
<USER_TASK:>
Description:
def rfc_cv(n_estimators, min_samples_split, max_features, data, targets):
"""Random Forest cross validation.
This function will instantiate a random forest classifier with parameters
n_estimators, min_samples_split, and max_features. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of n_estimators, min_samples_split, and
max_features that minimzes the log loss.
"""
|
estimator = RFC(
n_estimators=n_estimators,
min_samples_split=min_samples_split,
max_features=max_features,
random_state=2
)
cval = cross_val_score(estimator, data, targets,
scoring='neg_log_loss', cv=4)
return cval.mean()
|
<SYSTEM_TASK:>
Creates a random number generator based on an optional seed. This can be
<END_TASK>
<USER_TASK:>
Description:
def ensure_rng(random_state=None):
"""
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
"""
|
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
return random_state
|
<SYSTEM_TASK:>
Expand abbreviations in a template name.
<END_TASK>
<USER_TASK:>
Description:
def expand_abbreviations(template, abbreviations):
"""Expand abbreviations in a template name.
:param template: The project template name.
:param abbreviations: Abbreviation definitions.
"""
|
if template in abbreviations:
return abbreviations[template]
# Split on colon. If there is no colon, rest will be empty
# and prefix will be the whole template
prefix, sep, rest = template.partition(':')
if prefix in abbreviations:
return abbreviations[prefix].format(rest)
return template
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.