docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Compute value Tensor v. Args: memory_antecedent: a Tensor with dimensions {memory_input_dim} + other_dims Returns: a Tensor with dimensions memory_heads_dims + {value_dim} + other_dims
def compute_v(self, memory_antecedent): if self.shared_kv: raise ValueError("compute_v cannot be called with shared_kv") ret = mtf.einsum( [memory_antecedent, self.wv], reduced_dims=[self.memory_input_dim]) if self.combine_dims: ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.v_dims) return ret
213,547
Compute output of multihead attention. Args: o: a Tensor with dimensions query_heads_dims + {value_dim} + other_dims output_shape: an optional Shape Returns: a Tensor with shape: {output_dim} + other_dims
def compute_output(self, o, output_shape=None): if self.combine_dims: o = mtf.transpose(o, o.shape - self.o_dims + self.o_dims) o = mtf.replace_dimensions(o, self.o_dims, self.wo.shape.dims[0]) reduced_dims = [self.wo.shape.dims[0]] else: reduced_dims = self.o_dims return mtf.einsum( [o, self.wo], output_shape=output_shape, reduced_dims=reduced_dims)
213,548
Create a T2tVocabulary. Args: filepath: a string
def __init__(self, filepath): self._filepath = filepath self._subword_text_encoder = text_encoder.SubwordTextEncoder(filepath)
213,550
Encode a tf.Scalar string to a tf.Tensor. This will be necessary for on-the-fly tokenization. Args: s: a tf.Scalar with dtype tf.string Returns: a 1d tf.Tensor with dtype tf.int32
def encode_tf(self, s): ids = subword_text_encoder_ops.subword_text_encoder_encode( s, self._filepath) # the c++ op apppends 1=EOS - drop it. return ids[:-1]
213,551
Create a layer stack. Args: include_encdec_attention: a boolean num_layers: an integer d_ff: an integer num_heads: an integer d_kv: an integer dropout_rate: a float Returns: a LayerStack
def simple_layer_stack(include_encdec_attention, num_layers=6, d_ff=2048, num_heads=8, d_kv=128, dropout_rate=0.1): ret = [] for _ in xrange(num_layers): ret.append( transformer_layers.SelfAttention( num_heads=num_heads, key_value_size=d_kv, attention_kwargs={"dropout_rate": dropout_rate})) if include_encdec_attention: ret.append( transformer_layers.EncDecAttention( num_heads=num_heads, key_value_size=d_kv, attention_kwargs={"dropout_rate": dropout_rate})) ret.append( transformer_layers.DenseReluDense( hidden_size=d_ff, dropout_rate=dropout_rate)) return transformer.LayerStack(ret)
213,552
The model. Args: image: tf.Tensor with shape [batch, 28*28] labels: a tf.Tensor with shape [batch] and dtype tf.int32 mesh: a mtf.Mesh Returns: logits: a mtf.Tensor with shape [batch, 10] loss: a mtf.Tensor with shape []
def mnist_model(image, labels, mesh): batch_dim = mtf.Dimension("batch", FLAGS.batch_size) row_blocks_dim = mtf.Dimension("row_blocks", 4) col_blocks_dim = mtf.Dimension("col_blocks", 4) rows_dim = mtf.Dimension("rows_size", 7) cols_dim = mtf.Dimension("cols_size", 7) classes_dim = mtf.Dimension("classes", 10) one_channel_dim = mtf.Dimension("one_channel", 1) x = mtf.import_tf_tensor( mesh, tf.reshape(image, [FLAGS.batch_size, 4, 7, 4, 7, 1]), mtf.Shape( [batch_dim, row_blocks_dim, rows_dim, col_blocks_dim, cols_dim, one_channel_dim])) x = mtf.transpose(x, [ batch_dim, row_blocks_dim, col_blocks_dim, rows_dim, cols_dim, one_channel_dim]) # add some convolutional layers to demonstrate that convolution works. fh_dim = mtf.Dimension("fh", 9) fw_dim = mtf.Dimension("fw", 9) filters1_dim = mtf.Dimension("filters1", 16) filters2_dim = mtf.Dimension("filters2", 16) kernel1 = mtf.get_variable( mesh, "kernel1", [fh_dim, fw_dim, one_channel_dim, filters1_dim]) kernel2 = mtf.get_variable( mesh, "kernel2", [fh_dim, fw_dim, filters1_dim, filters2_dim]) f1 = mtf.relu(mtf.conv2d_with_blocks( x, kernel1, strides=[1, 1, 1, 1], padding="SAME", h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim)) f2 = mtf.relu(mtf.conv2d_with_blocks( f1, kernel2, strides=[1, 1, 1, 1], padding="SAME", h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim)) x = mtf.reduce_mean(f2, reduced_dim=filters2_dim) # add some fully-connected dense layers. hidden_dim1 = mtf.Dimension("hidden1", FLAGS.hidden_size) hidden_dim2 = mtf.Dimension("hidden2", FLAGS.hidden_size) h1 = mtf.layers.dense( x, hidden_dim1, reduced_dims=x.shape.dims[-4:], activation=mtf.relu, name="hidden1") h2 = mtf.layers.dense( h1, hidden_dim2, activation=mtf.relu, name="hidden2") logits = mtf.layers.dense(h2, classes_dim, name="logits") if labels is None: loss = None else: labels = mtf.import_tf_tensor( mesh, tf.reshape(labels, [FLAGS.batch_size]), mtf.Shape([batch_dim])) loss = mtf.layers.softmax_cross_entropy_with_logits( logits, mtf.one_hot(labels, classes_dim), classes_dim) loss = mtf.reduce_mean(loss) return logits, loss
213,558
Prints the solution associated with solver. If solver has already had Solve() called on it, prints the solution. This includes each variable and its assignment, along with the objective function and its optimal value. If solver has not had Solve() called on it, or there is no feasible solution, this will probably crash. Args: model: A pywrapcp.CpModel object. solver: A pywrapcp.CpSolver object. Returns: Nothing, but prints the solution associated with solver.
def print_solution(model, solver): model_proto = model.Proto() response_proto = solver.ResponseProto() variables_in_objective_map = {} maximization = False if model_proto.HasField('objective'): objective = model_proto.objective for i in range(len(objective.vars)): variables_in_objective_map[objective.vars[i]] = objective.coeffs[i] if objective.scaling_factor < 0.0: maximization = True variable_assignments = [] variables_in_objective = [] num_vars = len(model_proto.variables) for var_index in range(num_vars): if not model_proto.variables[var_index].name: continue variable_name = model_proto.variables[var_index].name if var_index in variables_in_objective_map: coefficient = variables_in_objective_map[var_index] if coefficient: if maximization: coefficient *= -1 if coefficient < 0: variables_in_objective.append(' - {} * {}'.format( -coefficient, variable_name)) elif coefficient > 0: variables_in_objective.append(' + {} * {}'.format( coefficient, variable_name)) variable_assignments.append(' {} = {}\n'.format( variable_name, response_proto.solution[var_index])) print(''.join(variable_assignments), end='') # Strip the leading '+' if it exists. if variables_in_objective and variables_in_objective[0][1] == '+': variables_in_objective[0] = variables_in_objective[0][2:] print('{}:{}'.format('Maximize' if maximization else 'Minimize', ''.join(variables_in_objective))) print('Objective value: {}\n'.format(solver.ObjectiveValue()))
213,564
Name for a local variable. Args: splittable_dimensions: frozenset of names of splittable dimensions. assignment: dict from names of splittable dimensions to names of mesh dimensions. Returns: A string, the variable name.
def _local_var_name(splittable_dimensions, assignment): assignment_string = [] for splittable in sorted(splittable_dimensions): if splittable in assignment: assignment_string.append("{}:{}".format(splittable, assignment[splittable])) else: assignment_string.append("{}".format(splittable)) return "y_(" + ",".join(assignment_string) + ")"
213,565
Generates all ways to map splittable dimensions to mesh dimensions. Args: splittable_dimensions: a frozenset of the names of splittable dimensions. mesh_dimension_to_size: a dictionary from mesh dimension name to size. Returns: A list of the valid assignments. Each assignment is a dict keyed by every splittable dimension, whose value is either a mesh dimension or None.
def _generate_assignments(splittable_dimensions, mesh_dimension_to_size): assignments = [] for assignment_size in six.moves.xrange( 1 + min(len(splittable_dimensions), len(mesh_dimension_to_size))): for s_dims_chosen in itertools.combinations(splittable_dimensions, assignment_size): for m_dims_chosen in itertools.permutations(mesh_dimension_to_size, assignment_size): assignments.append(dict(zip(s_dims_chosen, m_dims_chosen))) return assignments
213,566
Uses a auto_mtf.memory_estimator to set up the integer program. Args: memory_estimator: a memory_estimator.MemoryEstimator. scheduler_alg: an optional string, see scheduler.MinimizePeakMemory.
def __init__(self, memory_estimator, scheduler_alg="LIST"): self._estimator = memory_estimator self._scheduler_alg = scheduler_alg self._layout_validator = self._estimator.get_layout_validator() self._graph = self._estimator.get_graph_interface() self._memory_contents = None # [frozenset(string)] # Initialize the model. self._model = cp_model.CpModel() self._preprocess_input() self._initialize_variables() self._add_constraints() self._build_objective_function()
213,567
Solves the current integer program and returns the computed layout. Args: print_solution: An optional boolean indicating whether to print the full solution in human-readable format. Returns: The computed layout (as a string). Raises: SolverError: the internal solver could not find a solution, or the solution found is infeasible.
def solve(self, print_solution=False): # Solve and see how well the solver did. self._cp_solver = cp_model.CpSolver() status = self._cp_solver.Solve(self._model) if status != cp_model.OPTIMAL: if status == cp_model.FEASIBLE: logging.warning("A potentially suboptimal solution was found.") else: logging.error("Solver returned status %d.", status) raise SolverError("The solver could not solve the problem and returned " "status {}.".format(status)) # TODO(joshuawang): Verify the solver's solution. if print_solution: print_cp_model_solution.print_solution(self._model, self._cp_solver) # Reconstruct layout from solution. layout = [] for mtf_dimension_name in ( self._layout_validator.splittable_mtf_dimension_names): for mesh_dimension_name in ( self._layout_validator.mesh_dimension_name_to_size): value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]) if value: # Value is integer. layout.append(mtf_dimension_name + ":" + mesh_dimension_name) layout.sort() return ";".join(layout)
213,572
The current objective value for the given layout. TODO(joshuawang): The current function does not check that the given layout is valid. Args: layout: a string, representing a layout to evaluate (e.g. "d_ff:m1;heads:m2"). Returns: A float, the objective value.
def evaluate_layout(self, layout): layout_dict = {} if layout: for pair in layout.split(";"): mtf_dimension_name, mesh_dimension_name = pair.split(":", 1) if (mtf_dimension_name in self._layout_validator.splittable_mtf_dimension_names): layout_dict[mtf_dimension_name] = mesh_dimension_name else: logging.warning("Skipping unsplittable dimension %s.", mtf_dimension_name) tensor_memory = {} # {string: float}, size of each tensor under our layout for tensor_name in self._graph.get_all_tensor_names(): if self._graph.is_tensor_on_canonical_device(tensor_name): tensor_memory[tensor_name] = self._graph.get_tensor_size( tensor_name, layout_dict, self._layout_validator.mesh_dimension_name_to_size) else: tensor_memory[tensor_name] = 0.0 peak_memory_usage = 0.0 for tensor_names in self._get_memory_contents(): memory_usage = 0.0 for tensor_name in tensor_names: memory_usage += tensor_memory[tensor_name] peak_memory_usage = max(peak_memory_usage, memory_usage) return peak_memory_usage
213,573
Choose a device for the input variable. Args: var: an Variable. Returns: The device for placing the var.
def device_function(self, var): if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'): tf.logging.debug('Place {} on last device: {}.'.format( var.name, self._last_device)) return self._last_device shape = tf.TensorShape(var.get_attr('shape')) assert shape.num_elements() is not None size = var.get_attr('dtype').size mem, device = heapq.heappop(self._mem_device_heap) mem += shape.num_elements() * size heapq.heappush(self._mem_device_heap, (mem, device)) tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format( var.name, device, mem)) self._last_device = device return device
213,575
Encode from strings to token ids. Args: dataset: a tf.data.Dataset with string values. vocabulary: a mesh_tensorflow.transformer.Vocabulary Returns: a tf.data.Dataset with integer-vector values ending in EOS=1
def encode_dataset(dataset, vocabulary): def encode(features): return {k: vocabulary.encode_tf(v) for k, v in features.items()} return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
213,580
Reads a tensorflow_datasets dataset. Args: dataset_name: a string text2self: a boolean tfds_data_dir: a boolean dataset_split: a string batch_size: an integer sequence_length: an integer vocabulary: ignored Returns: a tf.data.Dataset of batches
def pretokenized_tfds_dataset(dataset_name=gin.REQUIRED, text2self=gin.REQUIRED, tfds_data_dir=gin.REQUIRED, dataset_split=gin.REQUIRED, batch_size=gin.REQUIRED, sequence_length=gin.REQUIRED, vocabulary=None): del vocabulary dataset = tfds.load( dataset_name, split=dataset_split, as_supervised=True, data_dir=tfds_data_dir) if dataset_split == "train": dataset = dataset.repeat() dataset = dataset.shuffle(1000) def shift_and_append_eos(t): # tfds encoder does not reserve an EOS token, so we need to shift # in order to do so. We also append EOS=1. return tf.concat([t + 1, [1]], 0) def feature_map(inputs, targets): if text2self: return {"targets": shift_and_append_eos(targets)} else: return {"inputs": shift_and_append_eos(inputs), "targets": shift_and_append_eos(targets)} dataset = dataset.map(feature_map, num_parallel_calls=tf.data.experimental.AUTOTUNE) return pack_and_batch(dataset, batch_size, sequence_length)
213,581
Turns a supervised dataset into a dataset with a feature dictionary. if text2self, then the features dictionary contains a "targets" key. else, the features dictionary contains "inputs" and "targets" keys. Args: dataset: a tf.data.Dataset text2self: a boolean Returns: a tf.data.Dataset
def supervised_to_dict(dataset, text2self): def my_fn(inputs, targets): if text2self: return {"targets": targets} else: return {"inputs": inputs, "targets": targets} return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
213,584
Encode all features. Args: dataset: a tf.data.Dataset vocabulary: a vocabulary.Vocabulary Returns: a tf.data.Dataset
def encode_all_features(dataset, vocabulary): def my_fn(features): ret = {} for k, v in features.items(): v = vocabulary.encode_tf(v) v = tf.concat([tf.to_int64(v), [1]], 0) ret[k] = v return ret return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
213,585
Loads the Tensor2tensor dataset specified by dataset_name. Args: dataset_name: TensorFlow Datasets dataset name. text2self: a boolean data_dir: string, data_dir for TensorFlow Datasets dataset_split: a string - "train" or "dev" batch_size: an integer sequence_length: an integer vocabulary: ignored Returns: A tf.data.Dataset of batches
def pretokenized_t2t_dataset(dataset_name=gin.REQUIRED, text2self=False, data_dir=gin.REQUIRED, dataset_split="train", batch_size=gin.REQUIRED, sequence_length=gin.REQUIRED, vocabulary=None): del vocabulary filepattern = os.path.join( data_dir, dataset_name + "-" + dataset_split + "-*") filenames = tf.gfile.Glob(filepattern) tf.logging.info("Found %s files matching %s" % (len(filenames), filepattern)) if not filenames: raise ValueError("No matching files found") dataset = pretokenized_tfrecord_dataset( filenames=filenames, text2self=text2self, eos_included=True, repeat=dataset_split == "train", batch_size=batch_size, sequence_length=sequence_length) if dataset_split == "train": dataset = dataset.shuffle(1000) return dataset
213,587
Helper-function for packing a dataset which has already been batched. See pack_dataset() Uses tf.while_loop. Slow. Args: dataset: a dataset containing padded batches of examples. keys: a list of strings length: an integer Returns: a dataset.
def _pack_with_tf_ops(dataset, keys, length): empty_example = {} for k in keys: empty_example[k] = tf.zeros([0], dtype=tf.int32) empty_example[k + "_position"] = tf.zeros([0], dtype=tf.int32) keys_etc = empty_example.keys() def write_packed_example(partial, outputs): new_partial = empty_example.copy() new_outputs = {} for k in keys_etc: new_outputs[k] = outputs[k].write( outputs[k].size(), tf.pad(partial[k], [[0, length - tf.size(partial[k])]])) return new_partial, new_outputs def map_fn(x): partial = empty_example.copy() i = tf.zeros([], dtype=tf.int32) dynamic_batch_size = tf.shape(x[keys[0]])[0] outputs = {} for k in keys: outputs[k] = tf.TensorArray( tf.int32, size=0, dynamic_size=True, element_shape=[length]) outputs[k + "_position"] = tf.TensorArray( tf.int32, size=0, dynamic_size=True, element_shape=[length]) def cond_fn(i, partial, outputs): del partial, outputs return i < dynamic_batch_size def body_fn(i, partial, outputs): can_append = True one_example = {} for k in keys: val = tf.cast(x[k][i], tf.int32) val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))] one_example[k] = val for k in keys: can_append = tf.logical_and( can_append, tf.less_equal( tf.size(partial[k]) + tf.size(one_example[k]), length)) def false_fn(): return write_packed_example(partial, outputs) def true_fn(): return partial, outputs partial, outputs = tf.cond(can_append, true_fn, false_fn) new_partial = {} for k in keys: new_seq = one_example[k][:length] new_seq_len = tf.size(new_seq) new_partial[k] = tf.concat([partial[k], new_seq], 0) new_partial[k + "_position"] = tf.concat( [partial[k + "_position"], tf.range(new_seq_len, dtype=tf.int32)], 0) partial = new_partial return i+1, partial, outputs i, partial, outputs = tf.while_loop( cond_fn, body_fn, (i, partial, outputs), back_prop=False, shape_invariants=( tf.TensorShape([]), {k: tf.TensorShape([None]) for k in keys_etc}, {k: tf.TensorShape(None) for k in keys_etc}, )) partial, outputs = write_packed_example(partial, outputs) packed = {k: outputs[k].stack() for k in keys_etc} for k in keys: packed[k + "_segmentation"] = ( tf.cumsum( tf.cast(tf.equal(packed[k + "_position"], 0), tf.int32), axis=1) * tf.cast(tf.not_equal(packed[k], 0), tf.int32)) return packed dataset = dataset.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset.flat_map(tf.data.Dataset.from_tensor_slices)
213,589
Helper-function for packing a dataset which has already been batched. See pack_dataset() Relies on custom ops which require a custom compiled binary. Faster than _pack_with_tf_ops(), and denser packing. Args: dataset: a dataset containing padded batches of examples. keys: a list of strings (must have length 1 or 2) length: an integer Returns: a dataset.
def _pack_with_custom_ops(dataset, keys, length): from tensor2tensor.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top # faster and better packing but requires custom-built binary. if len(keys) == 1: k1, = keys k2 = k1 elif len(keys) == 2: k1, k2 = keys else: raise ValueError("must have 1 or 2 keys") def map_fn_custom(x): (k1_packed, k1_segmengation, k1_position, k2_packed, k2_segmentation, k2_position) = ( pack_sequences_ops.pack_sequences2(x[k1], x[k2], length)) packed = { k1: k1_packed, k1 + "_segmentation": k1_segmengation, k1 + "_position": k1_position, } if len(keys) == 2: packed.update({ k2: k2_packed, k2 + "_segmentation": k2_segmentation, k2 + "_position": k2_position, }) return packed dataset = dataset.map(map_fn_custom, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.flat_map(tf.data.Dataset.from_tensor_slices) return dataset
213,590
Trim/pad to the first axis of t to be of size length. Args: t: a tf.Tensor length: an integer Returns: a tf.Tensor
def _trim_and_pad(t, length): t = t[:length] paddings = [[0, length - tf.shape(t)[0]]] + [[0, 0]]*(t.get_shape().ndims - 1) t = tf.pad(t, paddings) return t
213,591
Converts input to a Dimension. Args: d: Dimension, tuple (string, int), or None. Returns: Dimension or None. Raises: ValueError: If d cannot be converted to a Dimension.
def convert_to_dimension(d): if d is None: return None if isinstance(d, Dimension): if not isinstance(d.name, str) or not isinstance(d.size, int): raise ValueError("Bad dimension %s" % (d,)) return d name, size = d if isinstance(name, str) and isinstance(size, int): return Dimension(name, size) else: raise ValueError("could not convert %s to Dimension" % (d,))
213,593
Converts input to a Shape. Args: x: Shape, str, or None. Returns: Shape or None. Raises: ValueError: If x cannot be converted to a Shape.
def convert_to_shape(x): if x is None: return None if isinstance(x, Shape): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x, seconds_to_int=True) return Shape(x)
213,594
Converts input to a LayoutRules. Args: x: LayoutRules, str, or set-like of string pairs. Returns: LayoutRules.
def convert_to_layout_rules(x): if isinstance(x, LayoutRules): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x) return LayoutRules(x)
213,595
Convert list elements to laid-out-tensors when possible. Args: xs: a list Returns: a list
def convert_args_to_laid_out_tensors(xs): ret = [] for x in xs: if hasattr(x, "to_laid_out_tensor"): ret.append(x.to_laid_out_tensor()) else: ret.append(x) return ret
213,596
Component-wise operation with no broadcasting. Args: tf_fn: a component-wise function taking n tf.Tensor inputs and producing a tf.Tensor output xs: n Tensors output_dtype: an optional dtype grad_function: an optional python function name: an optional string Returns: a Tensor
def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None): return slicewise( tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims, grad_function=grad_function, name=name or "cwise")
213,598
Convert argument of a binary operation to Tensors. Args: x1: a Tensor or something convertible to a tf Scalar x2: a Tensor or something convertible to a tf Scalar Returns: new_x1: a Tensor new_x2: a Tensor Raises: ValueError: on failure
def binary_arguments_to_tensors(x1, x2): if not isinstance(x1, Tensor) and not isinstance(x2, Tensor): raise ValueError("at least one of x1 and x2 must be an mtf Tensor") elif isinstance(x1, Tensor) and isinstance(x2, Tensor): return x1, x2 elif isinstance(x1, Tensor): return x1, import_tf_tensor( x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([])) else: return import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype), Shape([])), x2
213,612
Binary minimum with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
def minimum(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) with tf.name_scope(name, default_name="minimum"): x1, x2 = binary_arguments_to_tensors(x1, x2) return MinMaxOperation( tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape)).outputs[0]
213,623
Returns slicewise function and reduced mesh dimensions. Args: input_shape: a Shape output_shape: a Shape input_tensor_layout: a TensorLayout reduction_fn_string: "SUM" or "MAX" Returns: reduce_slice_fn: a function from tf.Tensor to tf.Tensor reduced_mesh_axes: a list of integers
def _reduce_helper(input_shape, output_shape, input_tensor_layout, reduction_fn_string="SUM"): reduce_dims_indices = [ i for i, d in enumerate(input_shape.dims) if d not in output_shape.dims] reduced_input_shape = Shape([ d for d in input_shape.dims if d in output_shape.dims]) perm = [reduced_input_shape.dims.index(d) for d in output_shape.dims] def reduce_slice_fn(xslice): ret = xslice if reduce_dims_indices: ret = reduction_fn(reduction_fn_string)(xslice, reduce_dims_indices) if perm != list(xrange(len(perm))): ret = tf.transpose(ret, perm) return ret reduced_mesh_axes = [] for i in reduce_dims_indices: mesh_axis = input_tensor_layout[i] if mesh_axis is not None: reduced_mesh_axes.append(mesh_axis) return reduce_slice_fn, reduced_mesh_axes
213,625
Like tf.split. Args: x: a Tensor split_dim: a Dimension in x.shape.dims num_or_size_splits: either an integer dividing split_dim.size or a list of integers adding up to split_dim.size name: an optional string Returns: a list of Tensors.
def split(x, split_dim, num_or_size_splits, name=None): return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs
213,626
Stack multiple Tensors to make a new dimension. Args: xs: a list of Tensors with identical shapes. dim_name: a string (name of the new dimension) axis: an integer (index of the new dimension in the output shape) name: an optional string Returns: a Tensor
def stack(xs, dim_name, axis=0, name=None): ret = StackOperation(xs, dim_name, axis, name).outputs[0] return ret
213,627
Cumulative sum. Args: x: a Tensor dim: a Dimension exclusive: a boolean Returns: a Tensor with the same shape as x.
def cumsum(x, dim, exclusive=False): with tf.variable_scope("cumsum"): new_name = "tmp_dim_cumsum" new_dim = Dimension(new_name, dim.size) new_shape = x.shape.rename_dimension(dim.name, new_name) comparator = less if exclusive else less_equal m = cast( comparator(mtf_range(x.mesh, dim, dtype=tf.float32), mtf_range(x.mesh, new_dim, dtype=tf.float32)), x.dtype) ret = einsum([x, m], output_shape=new_shape) return reshape(ret, x.shape)
213,628
Returns slicewise function and reduced mesh dimensions. Assumes the output shape contains no new dimensions. Args: input_shapes: a list of Shapes output_shape: a Shape mesh_impl: a MeshImpl Returns: einsum_slice_fn: a function from tf.Tensors to tf.Tensor reduced_mesh_axes: a list of integers
def _einsum_helper(input_shapes, output_shape, mesh_impl): input_shape_union = _shape_union(input_shapes) total_num_dims = input_shape_union.ndims # list of input shapes that contain all dimensions. full_shapes = [ s for s in input_shapes + [output_shape] if s.ndims == total_num_dims] full_shape = full_shapes[0] if full_shapes else input_shape_union reduce_slice_fn, reduced_mesh_axes = _reduce_helper( full_shape, output_shape, mesh_impl.tensor_layout(full_shape)) def einsum_slice_fn_naive(*slices): # naive einsum implementation where we broadcast all inputs to the full # shape, multiply componentwise, then reduce. return reduce_slice_fn(functools.reduce(tf.multiply, [ _expand_dims(x, input_shape, full_shape) for x, input_shape in zip(slices, input_shapes)])) if full_shapes: # it is not wasteful of space to broadcast fully and then reduce. # this helps to avoid some inefficient GPU implementations. einsum_slice_fn = einsum_slice_fn_naive else: # call tf.einsum equation = _einsum_equation(input_shapes, output_shape) def einsum_slice_fn(*slices): if slices[0].dtype.is_floating: return mesh_impl.einsum(equation, *slices) else: return einsum_slice_fn_naive(*slices) return einsum_slice_fn, reduced_mesh_axes
213,629
Shift operation. Shift x right by +offset in dimension dim. Args: x: a Tensor offset: an integer. If negative, shift left instead of right. dim: a Dimension of x wrap: a boolean - whether to wrap (True) or pad with zeros (False). name: an optional string Returns: a Tensor with the same shape and dtype as x
def shift(x, offset, dim, wrap, name=None): return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]
213,633
Import a laid_out_tensor. For expert users. The input must be laid out appropriately given the eventual MeshImpl, and layout. Args: mesh: a Mesh laid_out_tensor: a LaidOutTensor shape: a mtf.Shape name: an optional string Returns: a mtf.Tensor
def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None): return ImportLaidOutTensorOperation( mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0]
213,636
Assign a new value to a variable. Args: var: either a Variable operation or its output Tensor. new_val: a Tensor assign_fn: a function from (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation Returns: an Operation Raises: ValueError: if var is not a Variable and var.operation is not a Variable
def assign(var, new_val, assign_fn=assign_slice): if isinstance(var, Tensor): var = var.operation if not isinstance(var, Variable): raise ValueError("var must be a mtf.Variable or its output Tensor.") return Assign([var], [new_val], assign_fn=assign_fn)
213,642
Call tf.Print. Args: x: a Tensor. data: a list of Tensor message: a string **kwargs: keyword arguments to tf.Print Returns: a Tensor which is identical in value to x
def Print(x, data, message, **kwargs): # pylint: disable=invalid-name return PrintOperation(x, data, message, **kwargs).outputs[0]
213,646
Reshape a Tensor, renaming one dimension. Args: x: a Tensor old_name: a string new_name: a string Returns: a Tensor
def rename_dimension(x, old_name, new_name): return reshape(x, x.shape.rename_dimension(old_name, new_name))
213,649
Replace dimensions in a Tensor or Shape. old_dim_or_dims consists of a single dimension or a list of dimensions that must occur consecutively in the input shape. They are replaced by the dimensions in new_dim_or_dims. Args: tensor_or_shape: a Tensor or a Shape old_dim_or_dims: a Dimension or a list of Dimensions new_dim_or_dims: a Dimensions or a list of Dimensions Returns: a new Tensor or a Shape
def replace_dimensions(tensor_or_shape, old_dim_or_dims, new_dim_or_dims): if isinstance(tensor_or_shape, Tensor): return reshape(tensor_or_shape, replace_dimensions( tensor_or_shape.shape, old_dim_or_dims, new_dim_or_dims)) if not isinstance(tensor_or_shape, Shape): raise ValueError( "tensor_or_shape must be a Tensor or Shape got %s" % (tensor_or_shape,)) in_dims = tensor_or_shape.dims if isinstance(old_dim_or_dims, Dimension): old_dim_or_dims = [old_dim_or_dims] if isinstance(new_dim_or_dims, Dimension): new_dim_or_dims = [new_dim_or_dims] if not isinstance(old_dim_or_dims, list) or not old_dim_or_dims: raise ValueError( "old_dim_or_dims must be a Dimension or a list of Dimension got %s" % (old_dim_or_dims,)) if not isinstance(new_dim_or_dims, list) or not new_dim_or_dims: raise ValueError( "new_dim_or_dims must be a Dimension or a list of Dimension got %s" % (new_dim_or_dims,)) try: positions = [in_dims.index(d) for d in old_dim_or_dims] pos = positions[0] if positions != list(range(pos, pos + len(positions))): raise ValueError() except ValueError: raise ValueError( "old_dim_or_dims must be a subsequence of the input's dimensions" " old_dim_or_dims=%s input's dimensions=%s" % (old_dim_or_dims, in_dims)) return Shape(in_dims[:pos] + new_dim_or_dims + in_dims[pos + len(old_dim_or_dims):])
213,650
Reduction on 1 or more axes. If reduced_dim is present, then only that dimension is reduced out. Alternatively, specify output_shape. Do not specify both reduced_dim and output_shape. If neither is specified, then all dimensions are reduced out. Args: x: a Tensor disable_positional_args: None output_shape: an optional Shape. Must be a subsequence of x.shape. reduced_dim: a mtf.Dimension name: an optional string Returns: a Tensor
def reduce_sum(x, disable_positional_args=None, output_shape=None, reduced_dim=None, name=None): output_shape = convert_to_shape(output_shape) reduced_dim = convert_to_dimension(reduced_dim) assert disable_positional_args is None output_shape = _reduction_output_shape(x, output_shape, reduced_dim) if output_shape == x.shape: return x return ReduceOperation(x, output_shape, "SUM", name=name).outputs[0]
213,654
Reduction on 1 or more axes. If reduced_dim is present, then only that dimension is reduced out. Alternatively, specify output_shape. Do not specify both reduced_dim and output_shape. If neither is specified, then all dimensions are reduced out. Args: x: a Tensor disable_positional_args: None output_shape: an optional Shape. Must be a subsequence of x.shape. reduced_dim: a mtf.Dimension name: an optional string Returns: a Tensor
def reduce_mean(x, disable_positional_args=None, output_shape=None, reduced_dim=None, name=None): output_shape = convert_to_shape(output_shape) reduced_dim = convert_to_dimension(reduced_dim) assert disable_positional_args is None output_shape = _reduction_output_shape(x, output_shape, reduced_dim) with tf.variable_scope(name, default_name="reduce_mean"): if output_shape == x.shape: return x return reduce_sum( x, output_shape=output_shape) * (output_shape.size / x.shape.size)
213,655
Reduction on 1 or more axes. Args: x: a Tensor disable_positional_args: None output_shape: an optional Shape. Must be a subsequence of x.shape. reduced_dim: an optional Dimension name: an optional string Returns: a Tensor
def reduce_max(x, disable_positional_args=None, output_shape=None, reduced_dim=None, name=None): output_shape = convert_to_shape(output_shape) reduced_dim = convert_to_dimension(reduced_dim) assert disable_positional_args is None output_shape = _reduction_output_shape(x, output_shape, reduced_dim) if output_shape is None: output_shape = Shape([]) if output_shape == x.shape: return x return ReduceOperation( x, output_shape, "MAX", name=name or "reduce_max").outputs[0]
213,656
Argmax and Max. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims dtype: a tf.dtype (for the output) name: an optional string Returns: indices: a Tensor with given dtype values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)
def top_1(x, reduced_dim, dtype=tf.int32, name=None): reduced_dim = convert_to_dimension(reduced_dim) with tf.name_scope(name, default_name="top_1"): max_val = reduce_max(x, reduced_dim=reduced_dim) is_max = to_float(equal(x, max_val)) pos = mtf_range(x.mesh, reduced_dim, tf.float32) ret = reduce_max(is_max * pos, reduced_dim=reduced_dim) ret = cast(ret, dtype) return ret, max_val
213,659
Like tf.top_k. This operation returns two tensors with the same shape. The output shape is identical to the shape of x, except that reduced_dim is replaced by new_dim. Args: x: a Tensor reduced_dim: a Dimension in x.shape.dims. new_dim: a Dimension. The size determines k. dtype: optional dtype for indices. name: optional string. Returns: indices: a Tensor with given dtype. values: a Tensor with same type as x.
def top_k(x, reduced_dim, new_dim, dtype=tf.int32, name=None): reduced_dim = convert_to_dimension(reduced_dim) new_dim = convert_to_dimension(new_dim) indices = [] values = [] k = new_dim.size with tf.name_scope(name, default_name="top_k"): for i in xrange(k): max_index, max_val = top_1(x, reduced_dim, dtype) indices.append(max_index) values.append(max_val) if i + 1 < k: x += one_hot(max_index, reduced_dim, on_value=-1e9, dtype=x.dtype) axis = x.shape.dims.index(reduced_dim) return stack(indices, new_dim.name, axis), stack(values, new_dim.name, axis)
213,661
Either argmax or random sampling. Args: x: a Tensor. dim: a Dimension in x.shape.dims temperature: a float 0.0=argmax 1.0=random dtype: a tf.dtype (for the output) name: an optional string Returns: a Tensor with type dtype.
def sample_with_temperature(x, dim, temperature=1.0, dtype=tf.int32, name=None): dim = convert_to_dimension(dim) with tf.name_scope(name, default_name="sample_with_temperature"): if temperature != 0.0: # gumbel trick. # Note: we don't want to generate 0 or 1 because: # * -log(-log(0)) is -infinity # * -log(-log(1)) is +infinity. # np.finfo(x.dtype.as_numpy_dtype).tiny doesn't work on bfloat16 tiny_val = 1e-9 g = -log(-log( random_uniform( x.mesh, x.shape, minval=tiny_val, maxval=1., dtype=x.dtype))) x += g * temperature return argmax(x, dim, dtype, name)
213,662
Binary addition with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
def add(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarAddOperation(x1, x2).outputs[0] with tf.name_scope(name, default_name="add"): x1, x2 = binary_arguments_to_tensors(x1, x2) return AddOperation( x1, x2, output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape)).outputs[0]
213,663
Binary subtraction with broadcsting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
def sub(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarAddOperation(x1, -x2).outputs[0] with tf.name_scope(name, default_name="sub"): x1, x2 = binary_arguments_to_tensors(x1, x2) return add(x1, negative(x2), output_shape=output_shape)
213,664
Binary multiplication with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
def multiply(x1, x2, output_shape=None, name=None): if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, x2).outputs[0] with tf.name_scope(name, default_name="mul"): x1, x2 = binary_arguments_to_tensors(x1, x2) return einsum( [x1, x2], output_shape=_infer_binary_broadcast_shape( x1.shape, x2.shape, output_shape))
213,665
Binary division with broadcasting. Args: x1: a Tensor x2: a Tensor output_shape: an optional Shape name: an optional string Returns: a Tensor
def divide(x1, x2, output_shape=None, name=None): output_shape = convert_to_shape(output_shape) if not isinstance(x2, Tensor): return ScalarMultiplyOperation(x1, 1.0 / x2).outputs[0] with tf.name_scope(name, default_name="divide"): x1, x2 = binary_arguments_to_tensors(x1, x2) return multiply(x1, reciprocal(x2), output_shape=output_shape)
213,666
Slice operation. Call externally as mtf.slice() Args: x: a list of Tensors begin: integer, where to begin slicing from along the axis size: integer, size to slice from axis. slice_dim_name: string, dimension name of slicing axis. name: an optional string Returns: a Tensor with shape extended by output_shape for the last axis.
def mtf_slice(x, begin, size, slice_dim_name, name=None): return SliceOperation( x, begin, size, slice_dim_name, name=name).outputs[0]
213,667
Slice operation. Args: x: a list of Tensors paddings: list of integers of size 2, padding size before and after for dim. dim_name: string, name for the padding dim name: an optional string Returns: a Tensor with shape extended by output_shape for the last axis.
def pad(x, paddings, dim_name, name=None): return PadOperation( x, paddings, dim_name, name=name).outputs[0]
213,668
Shorthand for einsum([one_hot(indices, dim)], weights, reduced_dims=[dim]). Args: weights: a Tensor indices: a Tensor with integer type dim: a Dimension output_shape: an optional mtf.Shape Returns: a Tensor
def gather(weights, indices, dim, output_shape=None): dim = convert_to_dimension(dim) output_shape = convert_to_shape(output_shape) if weights.dtype == tf.bool: return cast(gather(to_float(weights), indices, dim, output_shape), tf.bool) return einsum([one_hot(indices, dim, dtype=weights.dtype), weights], reduced_dims=[dim], output_shape=output_shape)
213,670
Compute gradients in dtf. Args: ys: a list of Tensors xs: a list of Tensors grad_ys: an optional list of Tensors Returns: grad_xs: a list of Tensors
def gradients(ys, xs, grad_ys=None): graph = ys[0].graph if not grad_ys: grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys] # figure out what Tensors are downstream of xs downstream = set(xs) for op in graph.operations: if op.has_gradient: if set(op.inputs) & downstream: downstream |= set(op.outputs) tensor_to_gradient = dict(zip(ys, grad_ys)) for op in graph.operations[::-1]: grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs] if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream): with tf.variable_scope(op.name + "/gradients"): input_grads = op.gradient(grad_outputs) for inp, grad in zip(op.inputs, input_grads): if inp in downstream and grad is not None: if inp in tensor_to_gradient: tensor_to_gradient[inp] += grad else: tensor_to_gradient[inp] = grad return [tensor_to_gradient.get(x, None) for x in xs]
213,671
Infer shape of the output of a binary op with broadcasting. If the output shape is not given with given_output_shape, then we check to see if one of the shapes is a subsequence of the other one, and we return the one that is the supersequence. Otherwise, we list the dimensions of shape1, followed by all new dimensions in shape2. Args: shape1: a Shape shape2: a Shape given_output_shape: an optional Shape Returns: a Shape
def _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None): shape1 = convert_to_shape(shape1) shape2 = convert_to_shape(shape2) given_output_shape = convert_to_shape(given_output_shape) if given_output_shape is not None: return given_output_shape if is_subsequence(shape1.dims, shape2.dims): return shape2 if is_subsequence(shape2.dims, shape1.dims): return shape1 return Shape( shape1.dims + [d for d in shape2.dims if d not in shape1.dims])
213,672
Expand dimensions and transpose if necessary. Args: x: a tf.Tensor input_shape: a Shape output_shape: a Shape whose dimensions are a superset of those in input_shape Returns: a tf.Tensor
def _expand_dims(x, input_shape, output_shape): verify_no_new_dims([output_shape], input_shape) if input_shape == output_shape or input_shape.ndims == 0: return x perm = [input_shape.dims.index(d) for d in output_shape.dims if d in input_shape.dims] x = tf.transpose(x, perm) for i, d in enumerate(output_shape.dims): if d not in input_shape.dims: x = tf.expand_dims(x, i) return x
213,673
Turn shapes into an einsum equation. e.g. "ij,jk->ik" Args: input_shapes: a list of Shapes output_shape: a Shape Returns: a string
def _einsum_equation(input_shapes, output_shape): ret = [] next_letter = ord("a") dim_to_letter = {} for shape_num, shape in enumerate(input_shapes + [output_shape]): if shape_num == len(input_shapes): ret.append("->") elif shape_num > 0: ret.append(",") for d in shape.dims: if d not in dim_to_letter: dim_to_letter[d] = chr(next_letter) next_letter += 1 ret.append(dim_to_letter[d]) return "".join(ret)
213,674
Verifies that all dimensions in the output are in at least one input. Args: input_shapes: a list of Shapes output_shape: a Shape Raises: ValueError: if there are new dimensions in the output.
def verify_no_new_dims(input_shapes, output_shape): all_input_dims = set(sum([s.dims for s in input_shapes], [])) all_output_dims = set(output_shape.dims) if not all_output_dims.issubset(all_input_dims): raise ValueError( "No new dimensions allowed in output" " input_shapes = %s output_shape= %s" % ([s.dims for s in input_shapes], output_shape.dims))
213,676
Coordinates of a processor in the mesh. Args: mesh_shape: a Shape pnum: an integer less than len(mesh_shape) Returns: a list of integers with length len(mesh_shape)
def pnum_to_processor_coordinates(mesh_shape, pnum): ret = [] for dimsize in mesh_shape.to_integer_list[::-1]: ret.append(pnum % dimsize) pnum //= dimsize return ret[::-1]
213,677
Inverse of pnum_to_processor_coordinates. Args: mesh_shape: a Shape coord: a list of integers with length len(mesh_shape) Returns: an integer less than len(mesh_shape)
def processor_coordinates_to_pnum(mesh_shape, coord): ret = 0 multiplier = 1 for c, d in zip(coord[::-1], mesh_shape.to_integer_list[::-1]): ret += multiplier * c multiplier *= d return ret
213,678
Group number for grouped allreduce. Args: mesh_shape: a Shape group_dims: a list of integers (the dimensions reduced over) pnum: an integer Returns: an integer
def pnum_to_group(mesh_shape, group_dims, pnum): coord = pnum_to_processor_coordinates(mesh_shape, pnum) remaining_shape = Shape( [d for i, d in enumerate(mesh_shape) if i not in group_dims]) remaining_coord = [d for i, d in enumerate(coord) if i not in group_dims] return processor_coordinates_to_pnum(remaining_shape, remaining_coord)
213,679
Groups of processors which differ only in the given dimensions. Args: mesh_shape: a Shape group_dims: a list of integers Returns: a list of lists of integers (processor numbers)
def processor_groups(mesh_shape, group_dims): group_numbers = [ pnum_to_group(mesh_shape, group_dims, pnum) for pnum in xrange(mesh_shape.size)] ret = [] for pnum, g in enumerate(group_numbers): while len(ret) <= g: ret.append([]) ret[g].append(pnum) return ret
213,680
Numerically stable version of log(reduce_sum(exp(x))). Unlike other reductions, the output has the same shape as the input. Note: with a minor change, we could allow multiple reduced dimensions. Args: x: a Tensor reduced_dim: a dimension in x extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim) name: an optional string Returns: a Tensor with the same shape and dtype as x.
def reduce_logsumexp(x, reduced_dim, extra_logit=None, name=None): reduced_dim = convert_to_dimension(reduced_dim) with tf.variable_scope(name, default_name="reduce_logsumexp"): reduced_shape = x.shape - reduced_dim max_logit = reduce_max(stop_gradient(x), output_shape=reduced_shape) if extra_logit is not None: if isinstance(extra_logit, Tensor): extra_logit = stop_gradient(extra_logit) max_logit = maximum(max_logit, extra_logit) x -= max_logit exp_x = exp(x) sum_exp_x = reduce_sum(exp_x, output_shape=reduced_shape) if extra_logit is not None: sum_exp_x += exp(extra_logit - max_logit) return log(sum_exp_x) + max_logit
213,681
log(softmax(x)). Args: x: a Tensor whose shape contains vocab_dim reduced_dim: a Dimension extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim) name: an optional string Returns: a Tensor with the same shape as x
def log_softmax(x, reduced_dim, extra_logit=None, name=None): return x - reduce_logsumexp( x, reduced_dim, extra_logit=extra_logit, name=name)
213,682
Create a 1d mesh tensor with a range from [0, dim.size). Call externally as mtf.range() Args: mesh: a Mesh dim: a Dimension dtype: a tf.DType name: an optional string Returns: a Tensor
def mtf_range(mesh, dim, dtype, name=None): dim = convert_to_dimension(dim) with tf.variable_scope(name, default_name="range"): if dtype == tf.bfloat16: # tf.range(dtype=bfloat16) gives the wrong shape. # TODO(noam): report the bug. tf_range = tf.cast(tf.range(dim.size), tf.bfloat16) else: tf_range = tf.range(dim.size, dtype=dtype) return import_tf_tensor(mesh, tf_range, shape=Shape([dim]))
213,684
print counters hierarchically. Each counter is a pair of a string and a number. The string can have slashes, meaning that the number also counts towards each prefix. e.g. "parameters/trainable" counts towards both "parameters" and "parameters/trainable". Args: counters: a list of (string, number) pairs Returns: a string
def pretty_print_counters(counters): totals = collections.defaultdict(int) for (name, val) in counters: prefixes = [name[:i] for i in xrange(len(name)) if name[i] == "/"] + [name] for p in prefixes: totals[p] += val parts = [] for name, val in sorted(six.iteritems(totals)): parts.append(" " * name.count("/") + "%s: %.3g" % (name, val)) return "\n".join(parts)
213,685
r"""Parses a string into a list of pairs. In the input string, each pair is separated by a colon, and the delimiters between pairs are any of " ,.;". e.g. "rows:32,cols:32" Args: s: str to parse. seconds_to_int: Boolean. If True, then the second elements are returned as integers; otherwise they are strings. Returns: List of tuple pairs. Raises: ValueError: Badly formatted string.
def _parse_string_to_list_of_pairs(s, seconds_to_int=False): r ret = [] for p in [s.split(":") for s in re.sub("[,.;]", " ", s).split()]: if len(p) != 2: raise ValueError("bad input to _parse_string_to_list_of_pairs %s" % s) if seconds_to_int: ret.append((p[0], int(p[1]))) else: ret.append(tuple(p)) return ret
213,686
Call a function once on each device. Args: devices: a list of n devices fn: a function *args: arguments, each of which is a list of length n **kwargs: keyword-args, each of which is a list of length n Returns: a list of length n Raises: ValueError: if the arguments are not all lists of length n
def parallel(devices, fn, *args, **kwargs): if not isinstance(devices, list): raise ValueError("devices must be a list") for x in list(args) + list(six.itervalues(kwargs)): if not isinstance(x, list) or len(x) != len(devices): raise ValueError( "Argument not a list with same length as devices " "arg=%s devices=%s" % (x, devices)) ret = [] for i, device in enumerate(devices): with tf.device(device): with tf.variable_scope("parallel_%d" % i): my_args = [x[i] for x in args] my_kwargs = {k: v[i] for k, v in six.iteritems(kwargs)} ret.append(fn(*my_args, **my_kwargs)) return ret
213,687
Random uniform. Args: mesh: a Mesh shape: a Shape **kwargs: keyword args for tf.random.uniform, except seed Returns: a Tensor
def random_uniform(mesh, shape, **kwargs): shape = convert_to_shape(shape) return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
213,690
Dropout layer. Args: x: a Tensor keep_prob: a float between 0.0 and 1.0 noise_shape: an optional Shape (a subset of x.shape) name: an optional string Returns: a Tensor
def dropout(x, keep_prob, noise_shape=None, name=None): noise_shape = convert_to_shape(noise_shape) if noise_shape is None: noise_shape = x.shape with tf.variable_scope(name, default_name="dropout"): if keep_prob == 1.0: return x noise = cast(less(random_uniform( x.mesh, noise_shape, dtype=x.dtype), keep_prob), x.dtype) noise /= keep_prob return x * noise
213,691
Cumulative product of a list. Args: l: a list of integers Returns: a list with one more element (starting with 1)
def _cumprod(l): ret = [1] for item in l: ret.append(ret[-1] * item) return ret
213,692
Log the sizes and shapes of variables, and the total size. Args: var_list: a list of variables; defaults to trainable_variables tag: a string; defaults to "Trainable Variables" verbose: bool, if True, log every weight; otherwise, log total size only. mesh_to_impl: an optional map from Mesh to MeshImpl
def log_variable_sizes(var_list, tag, verbose=True, mesh_to_impl=None): if not var_list: return name_to_var = {v.name: v for v in var_list} total_size = 0 total_slice_size = 0 for v_name in sorted(list(name_to_var)): v = name_to_var[v_name] v_size = v.shape.size if mesh_to_impl is not None: slice_size = mesh_to_impl[v.mesh].slice_size(v.shape) else: slice_size = 0 total_slice_size += slice_size if verbose: tf.logging.info( "Variable %s size %s slice_size %s %s", v.name.ljust(60), str(v_size).ljust(12), str(slice_size).ljust(12), str(v.shape).ljust(60)) if isinstance(v, StackedVariable): for n in v.original_names: tf.logging.info(" " + n) total_size += v_size tf.logging.info("%s count: %s Total size: %s Total slice_size: %s", tag.ljust(30), str(len(var_list)).ljust(6), str(total_size).ljust(15), str(total_slice_size).ljust(15))
213,693
A shape containing the union of all dimensions in the input shapes. Args: shapes: a list of Shapes Returns: a Shape
def _shape_union(shapes): return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))
213,696
Flatten all but last num_nonbatch_dims into one dimension. Args: x: a tf.Tensor: num_nonbatch_dims: an integer Returns: a tf.Tensor with 1 + num_nonbatch_dims dimensions.
def _tf_flatten_batch_dims(x, num_nonbatch_dims): shape = x.shape.as_list() assert None not in shape new_shape = ([list_product(shape[:-num_nonbatch_dims])] + shape[-num_nonbatch_dims:]) if new_shape != shape: x = tf.reshape(x, new_shape) return x
213,697
Reverse op of _tf_flatten_batch_dims. Un-flatten the first dimension of x to match all but the last num_nonbatch_dims dimensions of prototype. Args: x: a tf.Tensor with 1 + num_nonbatch_dims dimensions num_nonbatch_dims: an integer prototype: a tf.Tensor Returns: a tf.Tensor
def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype): assert x.shape.ndims == 1 + num_nonbatch_dims new_shape = ( prototype.shape.as_list()[:-num_nonbatch_dims] + x.shape.as_list()[1:]) assert None not in new_shape if new_shape != x.shape.as_list(): x = tf.reshape(x, new_shape) return x
213,698
Concat each block with the margins of adjacent blocks. Get left and right blocks_dim and concatenate along block_size_dim. Args: x: a Tensor. blocks_dim: a Dimension in x.shape block_size_dim: a Dimension in x.shape halo_size: an integer wrap: a boolean Returns: a Tensor with the same shape as x, other than in block_size_dim, whose size is increased by 2*halo_size.
def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False): if halo_size == 0: return x block_size = block_size_dim.size partial_size = halo_size % block_size num_complete_blocks = halo_size // block_size parts = [x] for i in xrange(1, num_complete_blocks + 1): parts = ([shift(x, i, blocks_dim, wrap)] + parts + [shift(x, -i, blocks_dim, wrap)]) if partial_size > 0: left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name) right_margin = mtf_slice( x, block_size_dim.size - partial_size, partial_size, block_size_dim.name) parts = ( [shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)] + parts + [shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)]) return concat(parts, block_size_dim.name)
213,699
How many ways does a tensor dimension get split. This is used to "cheat" when building the mtf graph and peek at how a tensor dimension will be split. Returns 1 if the tensor dimension is not split. Args: layout: an input to convert_to_layout_rules mesh_shape: an input to convert_to_shape tensor_dim: a Dimension Returns: an integer
def tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim): layout_rules = convert_to_layout_rules(layout) mesh_shape = convert_to_shape(mesh_shape) mesh_axis = layout_rules.tensor_dimension_to_mesh_axis(tensor_dim, mesh_shape) if mesh_axis is None: return 1 else: return mesh_shape.dims[mesh_axis].size
213,701
Constructs a shape for a Tensor or Mesh. Args: dims: List-like of Dimensions. Raises: ValueError: If Dimensions are repeated.
def __init__(self, dims): self._dims = [convert_to_dimension(d) for d in tuple(dims)] if len(set(dims)) != len(dims): raise ValueError("Shape must not have repeated dimensions %s" % dims)
213,705
Mesh axis associated with tensor dimension (or None). Args: tensor_dimension: Dimension. mesh_shape: Shape. Returns: Integer or None. Raises: ValueError: If one Tensor dimension maps to two mesh dimensions.
def tensor_dimension_to_mesh_axis(self, tensor_dimension, mesh_shape): val = [i for i, mesh_dimension in enumerate(mesh_shape) if (tensor_dimension.name, mesh_dimension.name) in self._pairs] if len(val) > 1: raise ValueError( "Tensor dimension maps to multiple mesh dimensions" " tensor_dimension=%s mesh_shape=%s layout=%s" % (tensor_dimension, mesh_shape, self._pairs)) return val[0] if val else None
213,712
Computes TensorLayout given a Tensor Shape and a Mesh Shape. Args: tensor_shape: Shape. mesh_shape: Shape. Returns: TensorLayout. Raises: ValueError: If two Tensor Dimensions map to the same Mesh Dimensions.
def tensor_layout(self, tensor_shape, mesh_shape): ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape) for d in tensor_shape] not_nones = [a for a in ret if a is not None] if len(not_nones) != len(set(not_nones)): raise ValueError( "Two Tensor Dimensions may not map to the same Mesh Dimension:" " layout=%s tensor_shape=%s mesh_shape=%s " % (self, tensor_shape, mesh_shape)) return TensorLayout(ret)
213,713
For each mesh axis, which Tensor axis maps to it. Args: mesh_ndims: int. Returns: Tuple of optional integers, with length mesh_ndims.
def mesh_axis_to_tensor_axis(self, mesh_ndims): ta2ma = self._tensor_axis_to_mesh_axis return tuple( [ta2ma.index(mesh_axis) if mesh_axis in ta2ma else None for mesh_axis in xrange(mesh_ndims)])
213,714
Like tf.Graph.unique_name, returns a unique operation name for `name`. Args: name: The name for an operation. mark_as_used: whether to mark this name as being used. Returns: A string to use as the name for the operation.
def unique_name(self, name, mark_as_used=True): scope_name = tf.get_variable_scope().name if scope_name: name = scope_name + "/" + name # As in TensorFlow, treat names as case insensitive when deciding whether # they are in use. name_key = name.lower() i = self._names_in_use.get(name_key, 0) if mark_as_used: self._names_in_use[name_key] = i + 1 if i > 0: base_name_key = name_key while name_key in self._names_in_use: name_key = "%s_%d" % (base_name_key, i) i += 1 if mark_as_used: self._names_in_use[name_key] = 1 name = "%s_%d" % (name, i-1) return name
213,716
Turn a Tensor into a tf.Tensor. Args: x: Tensor. Returns: tf.Tensor.
def export_to_tf_tensor(self, x): mesh_impl = self.mesh_impl(x) return mesh_impl.export_to_tf_tensor( x, self.tensors[x].to_laid_out_tensor())
213,721
Creates a mesh implementation. Args: shape: Shape. layout_rules: LayoutRules.
def __init__(self, shape, layout_rules): self._shape = convert_to_shape(shape) self._layout_rules = convert_to_layout_rules(layout_rules)
213,729
Compute TensorLayout for a Tensor or a Shape. Args: arg: Tensor or Shape. Returns: TensorLayout.
def tensor_layout(self, arg): if isinstance(arg, Tensor): arg = arg.shape return self.layout_rules.tensor_layout(arg, self.shape)
213,730
For each mesh axis, give the product of previous tensor axes. Args: tensor_shape: Shape. Returns: list with length self.ndims where each element is an integer or None.
def mesh_axis_to_cumprod(self, tensor_shape): tensor_layout = self.tensor_layout(tensor_shape) ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims) ta2cumprod = tensor_shape.cumprod return [None if ta is None else ta2cumprod[ta] for ta in ma2ta]
213,731
Shape of each slice of the Tensor. Args: tensor_shape: Shape. Returns: list of integers with length tensor_shape.ndims. Raises: ValueError: If a Tensor dimension is not divisible by the corresponding Mesh dimension.
def slice_shape(self, tensor_shape): tensor_layout = self.tensor_layout(tensor_shape) ret = [] for tensor_dim, mesh_axis in zip( tensor_shape, tensor_layout.tensor_axis_to_mesh_axis): if mesh_axis is None: ret.append(tensor_dim.size) else: mesh_dim = self.shape[mesh_axis] if tensor_dim.size % mesh_dim.size != 0: raise ValueError( "Tensor dimension size not divisible by mesh dimension size:" " tensor_shape=%s tensor_layout=%s" % (tensor_shape, tensor_layout)) ret.append(tensor_dim.size // mesh_dim.size) return ret
213,732
Begin position for the tensor slice for the given processor. Args: tensor_shape: Shape. pnum: int <= self.size. Returns: list of integers with length tensor_shape.ndims.
def slice_begin(self, tensor_shape, pnum): tensor_layout = self.tensor_layout(tensor_shape) coordinates = pnum_to_processor_coordinates(self.shape, pnum) ret = [] for dim_size, mesh_axis in zip( tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis): if mesh_axis is None: ret.append(0) else: ret.append( dim_size // self.shape[mesh_axis].size * coordinates[mesh_axis]) return ret
213,733
Calls tf.Print. Args: x: LaidOutTensor. data: list of LaidOutTensor. message: str. **kwargs: keyword arguments to tf.print. Returns: LaidOutTensor.
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name del data, message, kwargs tf.logging.warning("Warning - mtf.Print not implemented for this mesh type") return x
213,734
Receive the slice from processor pcoord - offset. Args: x: a LaidOutTensor mesh_axis: an integer offset: an integer wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
def shift_by_n_processors(self, x, mesh_axis, offset, wrap): n = self.shape[mesh_axis].size source_pcoord = [] for i in xrange(n): c = i - offset if c != c % n: if wrap: c = c % n else: c = None source_pcoord.append(c) return self.receive(x, mesh_axis, source_pcoord)
213,736
Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar.
def laid_out_pcoord(self, mesh_axis): divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:]) modulus = self.shape[mesh_axis].size def my_fn(pnum): return (pnum // divisor) % modulus return self.slicewise(my_fn, self.laid_out_pnum())
213,737
A LaidOutTensor with an int32 scalar, identical for identical slices. This is useful for synchronizing random operations. Args: tensor_shape: a TensorShape Returns: a LaidOutTensor where each slice is an integer scalar.
def laid_out_slice_num(self, tensor_shape): ret = self.slicewise(lambda: tf.to_int32(0)) tensor_layout = self.tensor_layout(tensor_shape) for mesh_axis in tensor_layout.tensor_axis_to_mesh_axis: if mesh_axis is not None: def my_fn(x, pcoord, mesh_dim_size): return x * mesh_dim_size + pcoord ret = self.slicewise( my_fn, ret, self.laid_out_pcoord(mesh_axis), self.shape[mesh_axis].size) return ret
213,738
Implementation of a broadcast operation. Args: old_slices: LaidOutTensor. old_shape: Shape. new_shape: Shape. Returns: LaidOutTensor.
def broadcast_impl(self, old_slices, old_shape, new_shape): new_slice_shape = self.slice_shape(new_shape) def tf_fn(x): return (tf.zeros(new_slice_shape, dtype=x.dtype) + _expand_dims(x, old_shape, new_shape)) return self.slicewise(tf_fn, old_slices)
213,739
Turns a single tf.Tensor into a list of slices, one for each processor. Args: tf_tensor: tf.Tensor. tensor_shape: Shape. Returns: list of tf.tensor with length self.size.
def make_slices(self, tf_tensor, tensor_shape): tensor_layout = self.tensor_layout(tensor_shape) slice_shape = self.slice_shape(tensor_shape) def my_fn(pnum): if tensor_layout.is_fully_replicated: return tf_tensor else: slice_begin = self.slice_begin(tensor_shape, pnum) return tf.slice(tf_tensor, slice_begin, slice_shape) return parallel([tf_tensor.device] * self.size, my_fn, list(xrange(self.size)))
213,740
Turns a set of slices into a single tensor. Args: slices: list of tf.Tensor with length self.size. tensor_shape: Shape. device: optional str. If absent, we use the devices of the slices. Returns: tf.Tensor.
def combine_slices(self, slices, tensor_shape, device=None): if tensor_shape.ndims == 0: return slices[0] ret = slices[:] tensor_layout = self.tensor_layout(tensor_shape) for mesh_dim, tensor_axis in zip( self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)): slice_size = len(ret) // mesh_dim.size if tensor_axis is None: ret = ret[:slice_size] else: if device: devices = [device] * slice_size else: devices = [ret[i].device for i in xrange(slice_size)] concat_inputs = [] for i in xrange(slice_size): concat_inputs.append( [ret[i + slice_size * j] for j in xrange(mesh_dim.size)]) ret = parallel( devices, tf.concat, concat_inputs, axis=[tensor_axis] * len(devices)) assert len(ret) == 1 return ret[0]
213,741
Create a LazyAllreduceSum. Args: mesh_impl: a mesh_impl laid_out_input: a LaidOutTensor mesh_axes: a list of mesh axes add_counter_fn: a function taking no arguments which calls lowering.add_counter if and when the allreduce executes. Returns: a LazyAllreduceSum
def __init__(self, mesh_impl, laid_out_input, mesh_axes, add_counter_fn=None): self.mesh_impl = mesh_impl self.laid_out_input = laid_out_input self.mesh_axes = mesh_axes self.add_counter_fn = add_counter_fn self._reduced = None
213,742
Add to another LazyAllreduceSum. Args: other: a LazyAllreduceSum or a LaidOutTensor Returns: a LazyAllreduceSum or a LaidOutTensor
def __add__(self, other): if (isinstance(other, LazyAllreduceSum) and self.mesh_impl == other.mesh_impl and self.mesh_axes == other.mesh_axes): return LazyAllreduceSum( self.mesh_impl, self.mesh_impl.slicewise( tf.add, self.laid_out_input, other.laid_out_input), self.mesh_axes, add_counter_fn=self.add_counter_fn) else: return self.mesh_impl.slicewise( tf.add, self.to_laid_out_tensor(), other.to_laid_out_tensor())
213,744
Create a Tensor. Args: operation: the Operation that outputs this tensor shape: a Shape dtype: a tf.DType name: an optional string index: optional integer, the index among operation's output tensors
def __init__(self, operation, shape, dtype, name=None, index=0): if not isinstance(shape, Shape): raise ValueError("shape must be a Shape got %s" % shape.to_string) if not isinstance(dtype, tf.DType): raise ValueError("dtype must be a tf.DType got %s" % dtype) self._mesh = operation.mesh self._operation = operation self._shape = shape self._dtype = dtype if name is None: name = self.operation.name + ":" + str(index) self._name = name
213,745
Initializer. Args: inputs: a list of Tensor mesh: an optional Mesh (if unspecified, will be inferred from first input) name: a string, which will get uniquified (in TensorFlow style) Raises: ValueError: mesh was not provided and there were no inputs to infer from.
def __init__(self, inputs, mesh=None, name=None): if mesh is None: if not inputs: raise ValueError("mesh must be specified if no inputs") mesh = inputs[0].mesh self._inputs = inputs self._outputs = [] self._mesh = mesh # In a default operation, all dimensions are splittable. self._splittable_dims, self._unsplittable_dims = ( self._initialize_all_dimensions_as_splittable()) assert name is not None self._name = mesh.graph.unique_name(name) mesh.graph.operations.append(self)
213,746
Create a shift operation. Shift x right by +offset in dimension dim. If offset is negative, shift left. If wrap is true then wrap-around. Else, pad with zeros. Args: x: a Tensor offset: an integer dim: a Dimension of x wrap: a boolean - whether to wrap or pad. name: an optional string
def __init__(self, x, offset, dim, wrap, name=None): super(ShiftOperation, self).__init__([x], name=name or "shift") self._dim = dim self._axis = x.shape.dims.index(dim) self._offset = offset self._wrap = wrap self._outputs = [Tensor(self, x.shape, x.dtype)]
213,788
Create a StackedVariable. Args: vs: a list of Variables
def __init__(self, vs): shape = Shape([Dimension("stacked", len(vs))] + vs[0].shape.dims) name = "stacked/" + vs[0].name # TODO(noam): verify that vs are the same shape, etc. super(StackedVariable, self).__init__( vs[0].mesh, name, shape, vs[0].dtype, None, vs[0].trainable) self._name = name self._masters = [v.get_master() for v in vs] self._original_names = [v.name for v in vs] # Rerun to take the new output into account. self._splittable_dims, self._unsplittable_dims = ( self._initialize_all_dimensions_as_splittable())
213,806