docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Encode a set of proposals with respect to some reference boxes Arguments: reference_boxes (Tensor): reference boxes proposals (Tensor): boxes to be encoded
def encode(self, reference_boxes, proposals): TO_REMOVE = 1 # TODO remove ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths ex_ctr_y = proposals[:, 1] + 0.5 * ex_heights gt_widths = reference_boxes[:, 2] - reference_boxes[:, 0] + TO_REMOVE gt_heights = reference_boxes[:, 3] - reference_boxes[:, 1] + TO_REMOVE gt_ctr_x = reference_boxes[:, 0] + 0.5 * gt_widths gt_ctr_y = reference_boxes[:, 1] + 0.5 * gt_heights wx, wy, ww, wh = self.weights targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights targets_dw = ww * torch.log(gt_widths / ex_widths) targets_dh = wh * torch.log(gt_heights / ex_heights) targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1) return targets
123,156
From a set of original boxes and encoded relative box offsets, get the decoded boxes. Arguments: rel_codes (Tensor): encoded boxes boxes (Tensor): reference boxes.
def decode(self, rel_codes, boxes): boxes = boxes.to(rel_codes.dtype) TO_REMOVE = 1 # TODO remove widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = rel_codes[:, 0::4] / wx dy = rel_codes[:, 1::4] / wy dw = rel_codes[:, 2::4] / ww dh = rel_codes[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.bbox_xform_clip) dh = torch.clamp(dh, max=self.bbox_xform_clip) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes = torch.zeros_like(rel_codes) # x1 pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # y1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # x2 (note: "- 1" is correct; don't be fooled by the asymmetry) pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1 # y2 (note: "- 1" is correct; don't be fooled by the asymmetry) pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1 return pred_boxes
123,157
Create a single numpy array from a dataset. The dataset must have only one dimension, that is, the length of its `output_shapes` and `output_types` is 1, and its output shape must be `[]`, that is, every tensor in the dataset must be a scalar. Args: ds: a TF Dataset. batch_size: how many elements to read per pass Returns: a single numpy array.
def make_single_array(ds, batch_size=8*1024): if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple): raise ValueError('Dataset must have a single type and shape') nshapes = len(ds.output_shapes) if nshapes > 0: raise ValueError('Dataset must be comprised of scalars (TensorShape=[])') batches = [] with tf.Session() as sess: ds = ds.batch(batch_size) iterator = ds.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() with tqdm(desc='Elements', unit_scale=1) as pbar: try: while True: batches.append(sess.run(get_next)) pbar.update(len(batches[-1])) except tf.errors.OutOfRangeError: pass if batches: return np.concatenate(batches) return np.array([], dtype=ds.output_types.as_numpy_dtype)
123,179
Given dataset of key names, return histogram of moves/game. Move counts are written by the game players, so this is mostly useful for repair or backfill. Args: sess: TF session ds: TF dataset containing game move keys. batch_size: performance tuning parameter
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024): ds = ds.batch(batch_size) # Turns 'g_0000001234_m_133' into 'g_0000001234' ds = ds.map(lambda x: tf.strings.substr(x, 0, 12)) iterator = ds.make_initializable_iterator() sess.run(iterator.initializer) get_next = iterator.get_next() h = collections.Counter() try: while True: h.update(sess.run(get_next)) except tf.errors.OutOfRangeError: pass # NOTE: Cannot be truly sure the count is right till the end. return h
123,180
Constructor. Args: project_name: string name of GCP project having table. instance_name: string name of CBT instance in project. table_name: string name of CBT table in instance.
def __init__(self, project_name, instance_name, table_name): self.btspec = BigtableSpec(project_name, instance_name, table_name) self.bt_table = bigtable.Client( self.btspec.project, admin=True).instance( self.btspec.instance).table(self.btspec.table) self.tf_table = tf.contrib.cloud.BigtableClient( self.btspec.project, self.btspec.instance).table(self.btspec.table)
123,186
Delete rows related to the given game range. Args: format_str: a string to `.format()` by the game numbers in order to create the row prefixes. start_game: the starting game number of the deletion. end_game: the ending game number of the deletion.
def delete_row_range(self, format_str, start_game, end_game): row_keys = make_single_array( self.tf_table.keys_by_range_dataset( format_str.format(start_game), format_str.format(end_game))) row_keys = list(row_keys) if not row_keys: utils.dbg('No rows left for games %d..%d' % ( start_game, end_game)) return utils.dbg('Deleting %d rows: %s..%s' % ( len(row_keys), row_keys[0], row_keys[-1])) # Reverse the keys so that the queue is left in a more # sensible end state if you change your mind (say, due to a # mistake in the timestamp) and abort the process: there will # be a bit trimmed from the end, rather than a bit # trimmed out of the middle. row_keys.reverse() total_keys = len(row_keys) utils.dbg('Deleting total of %d keys' % total_keys) concurrency = min(MAX_BT_CONCURRENCY, multiprocessing.cpu_count() * 2) with multiprocessing.Pool(processes=concurrency) as pool: batches = [] with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar: for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS, row_keys): pbar.update(len(b)) batches.append((self.btspec, b)) if len(batches) >= concurrency: pool.map(_delete_rows, batches) batches = [] pool.map(_delete_rows, batches) batches = []
123,191
Require a given number of fresh games to be played. Args: number_fresh: integer, number of new fresh games needed Increments the cell `table_state=metadata:wait_for_game_number` by the given number of games. This will cause `self.wait_for_fresh_games()` to block until the game counter has reached this number.
def require_fresh_games(self, number_fresh): latest = self.latest_game_number table_state = self.bt_table.row(TABLE_STATE) table_state.set_cell(METADATA, WAIT_CELL, int(latest + number_fresh)) table_state.commit() print("== Setting wait cell to ", int(latest + number_fresh), flush=True)
123,194
Block caller until required new games have been played. Args: poll_interval: number of seconds to wait between checks If the cell `table_state=metadata:wait_for_game_number` exists, then block the caller, checking every `poll_interval` seconds, until `table_state=metadata:game_counter is at least the value in that cell.
def wait_for_fresh_games(self, poll_interval=15.0): wait_until_game = self.read_wait_cell() if not wait_until_game: return latest_game = self.latest_game_number last_latest = latest_game while latest_game < wait_until_game: utils.dbg('Latest game {} not yet at required game {} ' '(+{}, {:0.3f} games/sec)'.format( latest_game, wait_until_game, latest_game - last_latest, (latest_game - last_latest) / poll_interval )) time.sleep(poll_interval) last_latest = latest_game latest_game = self.latest_game_number
123,195
Count the total moves in a game range. Args: game_begin: integer, starting game game_end: integer, ending game Uses the `ct_` keyspace for rapid move summary.
def count_moves_in_game_range(self, game_begin, game_end): rows = self.bt_table.read_rows( ROWCOUNT_PREFIX.format(game_begin), ROWCOUNT_PREFIX.format(game_end), filter_=bigtable_row_filters.ColumnRangeFilter( METADATA, MOVE_COUNT, MOVE_COUNT)) return sum([int(r.cell_value(METADATA, MOVE_COUNT)) for r in rows])
123,197
Randomly choose a given number of moves from the last n games. Args: n: number of games at the end of this GameQueue to source. moves: number of moves to be sampled from `n` games. shuffle: if True, shuffle the selected moves. column_family: name of the column family containing move examples. column: name of the column containing move examples. Returns: a dataset containing the selected moves.
def moves_from_last_n_games(self, n, moves, shuffle, column_family, column): self.wait_for_fresh_games() latest_game = self.latest_game_number utils.dbg('Latest game in %s: %s' % (self.btspec.table, latest_game)) if latest_game == 0: raise ValueError('Cannot find a latest game in the table') start = int(max(0, latest_game - n)) ds = self.moves_from_games(start, latest_game, moves, shuffle, column_family, column) return ds
123,199
Add move counts from the given histogram to the table. Used to update the move counts in an existing table. Should not be needed except for backfill or repair. Args: sess: TF session to use for doing a Bigtable write. tf_table: TF Cloud Bigtable to use for writing. h: a dictionary keyed by game row prefix ("g_0023561") whose values are the move counts for each game.
def _write_move_counts(self, sess, h): def gen(): for k, v in h.items(): # The keys in the histogram may be of type 'bytes' k = str(k, 'utf-8') vs = str(v) yield (k.replace('g_', 'ct_') + '_%d' % v, vs) yield (k + '_m_000', vs) mc = tf.data.Dataset.from_generator(gen, (tf.string, tf.string)) wr_op = self.tf_table.write(mc, column_families=[METADATA], columns=[MOVE_COUNT]) sess.run(wr_op)
123,200
Input function which provides batches for train or eval. Args: is_training: A boolean denoting whether the input is for training. data_dir: The directory containing the input data. batch_size: The number of samples per batch. num_epochs: The number of epochs to repeat the dataset. num_gpus: The number of gpus used for training. dtype: Data type to use for images/features Returns: A dataset that can be used for iteration.
def input_fn(is_training, data_dir, batch_size, num_epochs=1, num_gpus=None, dtype=tf.float32): mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER) filenames = get_filenames(is_training, data_dir) dataset = tf.data.Dataset.from_tensor_slices(filenames) if is_training: # Shuffle the input files dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES) # Convert to individual records dataset = dataset.flat_map(tf.data.TFRecordDataset) return resnet_run_loop.process_record_dataset( dataset=dataset, is_training=is_training, batch_size=batch_size, shuffle_buffer=_SHUFFLE_BUFFER, parse_record_fn=parse_record, num_epochs=num_epochs, num_gpus=num_gpus, examples_per_epoch=_NUM_IMAGES['train'] if is_training else None, dtype=dtype )
123,205
Retrieve the size of each block_layer in the ResNet model. The number of block layers used for the Resnet model varies according to the size of the model. This helper grabs the layer set we want, throwing an error if a non-standard size has been selected. Args: resnet_size: The number of convolutional layers needed in the model. Returns: A list of block sizes to use in building the model. Raises: KeyError: if invalid resnet_size is received.
def _get_block_sizes(resnet_size): choices = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3] } try: return choices[resnet_size] except KeyError: err = ('Could not find layers for selected Resnet size.\n' 'Size received: {}; sizes allowed: {}.'.format( resnet_size, choices.keys())) raise ValueError(err)
123,206
Builds just the inference part of the model graph. Args: features: input features tensor. training: True if the model is training. params: A dictionary Returns: (policy_output, value_output, logits) tuple of tensors.
def model_inference_fn(features, training, params): mg_batchn = functools.partial( tf.layers.batch_normalization, axis=-1, momentum=.95, epsilon=1e-5, center=True, scale=True, fused=True, training=training) mg_conv2d = functools.partial( tf.layers.conv2d, filters=params['conv_width'], kernel_size=3, padding="same", data_format="channels_last", use_bias=False) mg_global_avgpool2d = functools.partial( tf.layers.average_pooling2d, pool_size=go.N, strides=1, padding="valid", data_format="channels_last") def mg_activation(inputs): if FLAGS.use_swish: return tf.nn.swish(inputs) return tf.nn.relu(inputs) def residual_inner(inputs): conv_layer1 = mg_batchn(mg_conv2d(inputs)) initial_output = mg_activation(conv_layer1) conv_layer2 = mg_batchn(mg_conv2d(initial_output)) return conv_layer2 def mg_res_layer(inputs): residual = residual_inner(inputs) output = mg_activation(inputs + residual) return output def mg_squeeze_excitation_layer(inputs): # Hu, J., Shen, L., & Sun, G. (2018). Squeeze-and-Excitation Networks. # 2018 IEEE/CVF Conference on Computer Vision, 7132-7141. # arXiv:1709.01507 [cs.CV] channels = params['conv_width'] ratio = FLAGS.SE_ratio assert channels % ratio == 0 residual = residual_inner(inputs) pool = mg_global_avgpool2d(residual) fc1 = tf.layers.dense(pool, units=channels // ratio) squeeze = mg_activation(fc1) if FLAGS.use_SE_bias: fc2 = tf.layers.dense(squeeze, units=2*channels) # Channels_last so axis = 3 = -1 gamma, bias = tf.split(fc2, 2, axis=3) else: gamma = tf.layers.dense(squeeze, units=channels) bias = 0 sig = tf.nn.sigmoid(gamma) # Explicitly signal the broadcast. scale = tf.reshape(sig, [-1, 1, 1, channels]) excitation = tf.multiply(scale, residual) + bias return mg_activation(inputs + excitation) initial_block = mg_activation(mg_batchn(mg_conv2d(features))) # the shared stack shared_output = initial_block for _ in range(params['trunk_layers']): if FLAGS.use_SE or FLAGS.use_SE_bias: shared_output = mg_squeeze_excitation_layer(shared_output) else: shared_output = mg_res_layer(shared_output) # Policy head policy_conv = mg_conv2d( shared_output, filters=params['policy_conv_width'], kernel_size=1) policy_conv = mg_activation(mg_batchn(policy_conv, center=False, scale=False)) logits = tf.layers.dense( tf.reshape( policy_conv, [-1, params['policy_conv_width'] * go.N * go.N]), go.N * go.N + 1) policy_output = tf.nn.softmax(logits, name='policy_output') # Value head value_conv = mg_conv2d( shared_output, filters=params['value_conv_width'], kernel_size=1) value_conv = mg_activation(mg_batchn(value_conv, center=False, scale=False)) value_fc_hidden = mg_activation(tf.layers.dense( tf.reshape(value_conv, [-1, params['value_conv_width'] * go.N * go.N]), params['fc_width'])) value_output = tf.nn.tanh( tf.reshape(tf.layers.dense(value_fc_hidden, 1), [-1]), name='value_output') return policy_output, value_output, logits
123,248
Take the latest checkpoint and copy it to model_path. Assumes that all relevant model files are prefixed by the same name. (For example, foo.index, foo.meta and foo.data-00000-of-00001). Args: model_path: The path (can be a gs:// path) to export model
def export_model(model_path): estimator = tf.estimator.Estimator(model_fn, model_dir=FLAGS.work_dir, params=FLAGS.flag_values_dict()) latest_checkpoint = estimator.latest_checkpoint() all_checkpoint_files = tf.gfile.Glob(latest_checkpoint + '*') for filename in all_checkpoint_files: suffix = filename.partition(latest_checkpoint)[2] destination_path = model_path + suffix print("Copying {} to {}".format(filename, destination_path)) tf.gfile.Copy(filename, destination_path)
123,254
Parses the output of --helpfull. Args: help_output: str, the full output of --helpfull. Returns: A set of flags that are valid flags.
def parse_helpfull_output(help_output, regex=FLAG_HELP_RE_PY): valid_flags = set() for _, no_prefix, flag_name in regex.findall(help_output): valid_flags.add('--' + flag_name) if no_prefix: valid_flags.add('--no' + flag_name) return valid_flags
123,274
Prepares a subprocess command by running --helpfull and masking flags. Args: subprocess_cmd: List[str], what would be passed into subprocess.call() i.e. ['python', 'train.py', '--flagfile=flags'] Returns: ['python', 'train.py', '--train_flag=blah', '--more_flags']
def prepare_subprocess_cmd(subprocess_cmd): help_cmd = subprocess_cmd + ['--helpfull'] help_output = subprocess.run(help_cmd, stdout=subprocess.PIPE).stdout help_output = help_output.decode('ascii') if 'python' in subprocess_cmd[0]: valid_flags = parse_helpfull_output(help_output) else: valid_flags = parse_helpfull_output(help_output, regex=FLAG_HELP_RE_CC) parsed_flags = flags.FlagValues().read_flags_from_files(subprocess_cmd[1:]) filtered_flags = filter_flags(parsed_flags, valid_flags) return [subprocess_cmd[0]] + filtered_flags
123,276
Split x into different heads, and transpose the resulting value. The tensor is transposed to insure the inner dimensions hold the correct values during the matrix multiplication. Args: x: A tensor with shape [batch_size, length, hidden_size] Returns: A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]
def split_heads(self, x): with tf.name_scope("split_heads"): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] # Calculate depth of last dimension after it has been split. depth = (self.hidden_size // self.num_heads) # Split the last dimension x = tf.reshape(x, [batch_size, length, self.num_heads, depth]) # Transpose the result return tf.transpose(x, [0, 2, 1, 3])
123,343
Combine tensor that has been split. Args: x: A tensor [batch_size, num_heads, length, hidden_size/num_heads] Returns: A tensor with shape [batch_size, length, hidden_size]
def combine_heads(self, x): with tf.name_scope("combine_heads"): batch_size = tf.shape(x)[0] length = tf.shape(x)[2] x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth] return tf.reshape(x, [batch_size, length, self.hidden_size])
123,344
r"""Replace characters that aren't in the alphabet and append "_" to token. Apply three transformations to the token: 1. Replace underline character "_" with "\u", and backslash "\" with "\\". 2. Replace characters outside of the alphabet with "\###;", where ### is the character's Unicode code point. 3. Appends "_" to mark the end of a token. Args: token: unicode string to be escaped alphabet: list of all known characters Returns: escaped string
def _escape_token(token, alphabet): r token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u") ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token] return u"".join(ret) + "_"
123,352
r"""Replaces escaped characters in the token with their unescaped versions. Applies inverse transformations as _escape_token(): 1. Replace "\u" with "_", and "\\" with "\". 2. Replace "\###;" with the unicode character the ### refers to. Args: token: escaped string Returns: unescaped string
def _unescape_token(token): r def match(m): r # Check if the matched strings are '\u' or '\\'. if m.group(1) is None: return u"_" if m.group(0) == u"\\u" else u"\\" # If m.group(1) exists, try and return unicode character. try: return six.unichr(int(m.group(1))) except (ValueError, OverflowError) as _: return _UNDEFINED_UNICODE # Use match function to replace escaped substrings in the token. return _UNESCAPE_REGEX.sub(match, token)
123,353
Return token counts of words in the files. Samples file_byte_limit bytes from each file, and counts the words that appear in the samples. The samples are semi-evenly distributed across the file. Args: files: List of filepaths file_byte_limit: Max number of bytes that will be read from each file. Returns: Dictionary mapping tokens to the number of times they appear in the sampled lines from the files.
def _count_tokens(files, file_byte_limit=1e6): token_counts = collections.defaultdict(int) for filepath in files: with tf.gfile.Open(filepath, mode="r") as reader: file_byte_budget = file_byte_limit counter = 0 lines_to_skip = int(reader.size() / (file_byte_budget * 2)) for line in reader: if counter < lines_to_skip: counter += 1 else: if file_byte_budget < 0: break line = line.strip() file_byte_budget -= len(line) counter = 0 # Add words to token counts for token in _split_string_to_tokens(_native_to_unicode(line)): token_counts[token] += 1 return token_counts
123,354
Return a bucketed list of subtokens that are filtered by count. Args: subtoken_counts: defaultdict mapping subtokens to their counts min_count: int count used to filter subtokens Returns: List of subtoken sets, where subtokens in set i have the same length=i.
def _filter_and_bucket_subtokens(subtoken_counts, min_count): # Create list of buckets, where subtokens in bucket i have length i. subtoken_buckets = [] for subtoken, count in six.iteritems(subtoken_counts): if count < min_count: # Filter out subtokens that don't appear enough continue while len(subtoken_buckets) <= len(subtoken): subtoken_buckets.append(set()) subtoken_buckets[len(subtoken)].add(subtoken) return subtoken_buckets
123,359
Add operations to classify a batch of input images. Args: inputs: A Tensor representing a batch of input images. training: A boolean. Set to True to add operations required only when training the classifier. Returns: A logits Tensor with shape [<batch_size>, self.num_classes].
def __call__(self, inputs, training): # Drop batch size from shape logging. mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_INITIAL_SHAPE, value=inputs.shape.as_list()[1:]) with self._model_variable_scope(): if self.data_format == 'channels_first': # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). # This provides a large performance boost on GPU. See # https://www.tensorflow.org/performance/performance_guide#data_formats inputs = tf.transpose(inputs, [0, 3, 1, 2]) inputs = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format) inputs = tf.identity(inputs, 'initial_conv') # We do not include batch normalization or activation functions in V2 # for the initial conv1 because the first ResNet unit will perform these # for both the shortcut and non-shortcut paths as part of the first # block's projection. Cf. Appendix of [2]. if self.resnet_version == 1: inputs = batch_norm(inputs, training, self.data_format) mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU) inputs = tf.nn.relu(inputs) if self.first_pool_size: pooled_inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) resnet_log_helper.log_max_pool(input_tensor=inputs, output_tensor=pooled_inputs) inputs = tf.identity(pooled_inputs, 'initial_max_pool') for i, num_blocks in enumerate(self.block_sizes): num_filters = self.num_filters * (2**i) inputs = block_layer( inputs=inputs, filters=num_filters, bottleneck=self.bottleneck, block_fn=self.block_fn, blocks=num_blocks, strides=self.block_strides[i], training=training, name='block_layer{}'.format(i + 1), data_format=self.data_format) # Only apply the BN and ReLU for model that does pre_activation in each # building/bottleneck block, eg resnet V2. if self.pre_activation: inputs = batch_norm(inputs, training, self.data_format) mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU) inputs = tf.nn.relu(inputs) # The current top layer has shape # `batch_size x pool_size x pool_size x final_size`. # ResNet does an Average Pooling layer over pool_size, # but that is the same as doing a reduce_mean. We do a reduce_mean # here because it performs better than AveragePooling2D. axes = [2, 3] if self.data_format == 'channels_first' else [1, 2] inputs = tf.reduce_mean(inputs, axes, keepdims=True) inputs = tf.identity(inputs, 'final_reduce_mean') inputs = tf.reshape(inputs, [-1, self.final_size]) mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_DENSE, value=self.num_classes) inputs = tf.layers.dense(inputs=inputs, units=self.num_classes) inputs = tf.identity(inputs, 'final_dense') # Drop batch size from shape logging. mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_FINAL_SHAPE, value=inputs.shape.as_list()[1:]) return inputs
123,431
Compile raw files into a single file for each language. Args: raw_dir: Directory containing downloaded raw files. raw_files: Dict containing filenames of input and target data. {"inputs": list of files containing data in input language "targets": list of files containing corresponding data in target language } tag: String to append to the compiled filename. Returns: Full path of compiled input and target files.
def compile_files(raw_dir, raw_files, tag): tf.logging.info("Compiling files with tag %s." % tag) filename = "%s-%s" % (_PREFIX, tag) input_compiled_file = os.path.join(raw_dir, filename + ".lang1") target_compiled_file = os.path.join(raw_dir, filename + ".lang2") with tf.gfile.Open(input_compiled_file, mode="w") as input_writer: with tf.gfile.Open(target_compiled_file, mode="w") as target_writer: for i in range(len(raw_files["inputs"])): input_file = raw_files["inputs"][i] target_file = raw_files["targets"][i] tf.logging.info("Reading files %s and %s." % (input_file, target_file)) write_file(input_writer, input_file) write_file(target_writer, target_file) return input_compiled_file, target_compiled_file
123,465
Initialize layers to build Transformer model. Args: params: hyperparameter object defining layer sizes, dropout values, etc. train: boolean indicating whether the model is in training mode. Used to determine if dropout layers should be added.
def __init__(self, params, train): self.train = train self.params = params self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights( params.vocab_size, params.hidden_size) self.encoder_stack = EncoderStack(params, train) self.decoder_stack = DecoderStack(params, train)
123,474
Generate continuous representation for inputs. Args: inputs: int tensor with shape [batch_size, input_length]. attention_bias: float tensor with shape [batch_size, 1, 1, input_length] Returns: float tensor with shape [batch_size, input_length, hidden_size]
def encode(self, inputs, attention_bias): with tf.name_scope("encode"): # Prepare inputs to the layer stack by adding positional encodings and # applying dropout. embedded_inputs = self.embedding_softmax_layer(inputs) inputs_padding = model_utils.get_padding(inputs) with tf.name_scope("add_pos_encoding"): length = tf.shape(embedded_inputs)[1] pos_encoding = model_utils.get_position_encoding( length, self.params.hidden_size) encoder_inputs = embedded_inputs + pos_encoding if self.train: mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=self.params.layer_postprocess_dropout) encoder_inputs = tf.nn.dropout( encoder_inputs, 1 - self.params.layer_postprocess_dropout) return self.encoder_stack(encoder_inputs, attention_bias, inputs_padding)
123,476
Generate logits for each value in the target sequence. Args: targets: target values for the output sequence. int tensor with shape [batch_size, target_length] encoder_outputs: continuous representation of input sequence. float tensor with shape [batch_size, input_length, hidden_size] attention_bias: float tensor with shape [batch_size, 1, 1, input_length] Returns: float32 tensor with shape [batch_size, target_length, vocab_size]
def decode(self, targets, encoder_outputs, attention_bias): with tf.name_scope("decode"): # Prepare inputs to decoder layers by shifting targets, adding positional # encoding and applying dropout. decoder_inputs = self.embedding_softmax_layer(targets) with tf.name_scope("shift_targets"): # Shift targets to the right, and remove the last element decoder_inputs = tf.pad( decoder_inputs, [[0, 0], [1, 0], [0, 0]])[:, :-1, :] with tf.name_scope("add_pos_encoding"): length = tf.shape(decoder_inputs)[1] decoder_inputs += model_utils.get_position_encoding( length, self.params.hidden_size) if self.train: mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=self.params.layer_postprocess_dropout) decoder_inputs = tf.nn.dropout( decoder_inputs, 1 - self.params.layer_postprocess_dropout) # Run values decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias( length) outputs = self.decoder_stack( decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias) logits = self.embedding_softmax_layer.linear(outputs) return logits
123,477
Separate the rating datafram into train and test sets. Filters out users with less than two distinct timestamps. Creates train set and test set. The test set contains all the last interactions of users with more than two distinct timestamps. Args: ratings_df: pandas dataframe with columns 'userId', 'movieId', 'rating', 'timestamp'. Returns: tuple of dataframes (filtered_ratings, train_ratings, test_ratings).
def _preprocess_movie_lens(ratings_df): ratings_df["data"] = 1.0 num_timestamps = ratings_df[["userId", "timestamp"]].groupby( "userId").nunique() last_user_timestamp = ratings_df[["userId", "timestamp"]].groupby( "userId").max() ratings_df["numberOfTimestamps"] = ratings_df["userId"].apply( lambda x: num_timestamps["timestamp"][x]) ratings_df["lastTimestamp"] = ratings_df["userId"].apply( lambda x: last_user_timestamp["timestamp"][x]) ratings_df = ratings_df[ratings_df["numberOfTimestamps"] > 2] ratings_df = _create_row_col_indices(ratings_df) train_ratings_df = ratings_df[ ratings_df["timestamp"] < ratings_df["lastTimestamp"]] test_ratings_df = ratings_df[ ratings_df["timestamp"] == ratings_df["lastTimestamp"]] return ratings_df, train_ratings_df, test_ratings_df
123,490
Get token embeddings of x. Args: x: An int64 tensor with shape [batch_size, length] Returns: embeddings: float32 tensor with shape [batch_size, length, embedding_size] padding: float32 tensor with shape [batch_size, length] indicating the locations of the padding tokens in x.
def call(self, x): with tf.name_scope("embedding"): embeddings = tf.gather(self.shared_weights, x) # Scale embedding by the sqrt of the hidden size embeddings *= self.hidden_size ** 0.5 # Create binary array of size [batch_size, length] # where 1 = padding, 0 = not padding padding = model_utils.get_padding(x) # Set all padding embedding values to 0 embeddings *= tf.expand_dims(1 - padding, -1) return embeddings
123,516
Computes logits by running x through a linear layer. Args: x: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size].
def linear(self, x): with tf.name_scope("presoftmax_linear"): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] x = tf.reshape(x, [-1, self.hidden_size]) logits = tf.matmul(x, self.shared_weights, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size])
123,517
Given a set of BoxList containing the `labels` field, return a set of BoxList for which `labels > 0`. Arguments: boxes (list of BoxList)
def keep_only_positive_boxes(boxes): assert isinstance(boxes, (list, tuple)) assert isinstance(boxes[0], BoxList) assert boxes[0].has_field("labels") positive_boxes = [] positive_inds = [] num_boxes = 0 for boxes_per_image in boxes: labels = boxes_per_image.get_field("labels") inds_mask = labels > 0 inds = inds_mask.nonzero().squeeze(1) positive_boxes.append(boxes_per_image[inds]) positive_inds.append(inds_mask) return positive_boxes, positive_inds
123,526
Prints the current MCTS search status to stderr. Reports the current search path, root node's child_Q, root node's child_N, the most visited path in a format that can be parsed by one of the STDERR_HANDLERS in minigui.ts. Args: leaves: list of leaf MCTSNodes returned by tree_search().
def _minigui_report_search_status(self, leaves): root = self._player.get_root() msg = { "id": hex(id(root)), "n": int(root.N), "q": float(root.Q), } msg["childQ"] = [int(round(q * 1000)) for q in root.child_Q] msg["childN"] = [int(n) for n in root.child_N] ranked_children = root.rank_children() variations = {} for i in ranked_children[:15]: if root.child_N[i] == 0 or i not in root.children: break c = coords.to_gtp(coords.from_flat(i)) child = root.children[i] nodes = child.most_visited_path_nodes() moves = [coords.to_gtp(coords.from_flat(m.fmove)) for m in nodes] variations[c] = { "n": int(root.child_N[i]), "q": float(root.child_Q[i]), "moves": [c] + moves, } if leaves: path = [] leaf = leaves[0] while leaf != root: path.append(leaf.fmove) leaf = leaf.parent if path: path.reverse() variations["live"] = { "n": int(root.child_N[path[0]]), "q": float(root.child_Q[path[0]]), "moves": [coords.to_gtp(coords.from_flat(m)) for m in path] } if variations: msg["variations"] = variations dbg("mg-update:%s" % json.dumps(msg, sort_keys=True))
123,552
Given a list of strings, removes blanks and replace space character with space. Option to remove repetitions (e.g. 'abbca' -> 'abca'). Arguments: sequences: list of 1-d array of integers remove_repetitions (boolean, optional): If true, repeating characters are removed. Defaults to False.
def process_strings(self, sequences, remove_repetitions=False): processed_strings = [] for sequence in sequences: string = self.process_string(remove_repetitions, sequence).strip() processed_strings.append(string) return processed_strings
123,589
Computes the Word Error Rate, defined as the edit distance between the two provided sentences after tokenizing to words. Arguments: s1 (string): space-separated sentence s2 (string): space-separated sentence
def wer(self, s1, s2): # build mapping of words to integers b = set(s1.split() + s2.split()) word2char = dict(zip(b, range(len(b)))) # map the words to a char array (Levenshtein packages only accepts # strings) w1 = [chr(word2char[w]) for w in s1.split()] w2 = [chr(word2char[w]) for w in s2.split()] return Lev.distance(''.join(w1), ''.join(w2))
123,591
Returns the argmax decoding given the probability matrix. Removes repeated elements in the sequence, as well as blanks. Arguments: probs: Tensor of character probabilities from the network. Expected shape of seq_length x batch x output_dim sizes(optional): Size of each sequence in the mini-batch Returns: strings: sequences of the model's best guess for the transcription on inputs
def decode(self, probs, sizes=None): _, max_probs = torch.max(probs.transpose(0, 1), 2) strings = self.convert_to_strings(max_probs.view(max_probs.size(0), max_probs.size(1)), sizes) return self.process_strings(strings, remove_repetitions=True)
123,594
Returns an iterable of tf.Examples. Args: data_extracts: An iterable of (position, pi, result) tuples
def make_dataset_from_selfplay(data_extracts): tf_examples = (make_tf_example(features_lib.extract_features(pos), pi, result) for pos, pi, result in data_extracts) return tf_examples
123,638
Return a boolean representing whether a model should be stopped. Args: stop_threshold: float, the threshold above which a model should stop training. eval_metric: float, the current value of the relevant metric to check. Returns: True if training should stop, False otherwise. Raises: ValueError: if either stop_threshold or eval_metric is not a number
def past_stop_threshold(stop_threshold, eval_metric): if stop_threshold is None: return False if not isinstance(stop_threshold, numbers.Number): raise ValueError("Threshold for checking stop conditions must be a number.") if not isinstance(eval_metric, numbers.Number): raise ValueError("Eval metric being checked against stop conditions " "must be a number.") if eval_metric >= stop_threshold: tf.logging.info( "Stop threshold of {} was passed with metric value {}.".format( stop_threshold, eval_metric)) return True return False
123,649
Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank
def all_gather(data): world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_size = torch.IntTensor([tensor.numel()]).to("cuda") size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) if local_size != max_size: padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list
123,655
evaluate dataset using different methods based on dataset type. Args: dataset: Dataset object predictions(list[BoxList]): each item in the list represents the prediction results for one image. output_folder: output folder, to save evaluation files or results. **kwargs: other args. Returns: evaluation result
def evaluate(dataset, predictions, output_folder, **kwargs): args = dict( dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs ) if isinstance(dataset, datasets.COCODataset): return coco_evaluation(**args) elif isinstance(dataset, datasets.PascalVOCDataset): return voc_evaluation(**args) else: dataset_name = dataset.__class__.__name__ raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name))
123,669
Factory for getting a list of TensorFlow hooks for training by name. Args: name_list: a list of strings to name desired hook classes. Allowed: LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined as keys in HOOKS **kwargs: a dictionary of arguments to the hooks. Returns: list of instantiated hooks, ready to be used in a classifier.train call. Raises: ValueError: if an unrecognized name is passed.
def get_train_hooks(name_list, **kwargs): if not name_list: return [] train_hooks = [] for name in name_list: hook_name = HOOKS.get(name.strip().lower()) if hook_name is None: raise ValueError('Unrecognized training hook requested: {}'.format(name)) else: train_hooks.append(hook_name(**kwargs)) return train_hooks
123,672
Function to get LoggingTensorHook. Args: every_n_iter: `int`, print the values of `tensors` once every N local steps taken on the current worker. tensors_to_log: List of tensor names or dictionary mapping labels to tensor names. If not set, log _TENSORS_TO_LOG by default. **kwargs: a dictionary of arguments to LoggingTensorHook. Returns: Returns a LoggingTensorHook with a standard set of tensors that will be printed to stdout.
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument if tensors_to_log is None: tensors_to_log = _TENSORS_TO_LOG return tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=every_n_iter)
123,673
Function to get LoggingMetricHook. Args: benchmark_log_dir: `string`, directory path to save the metric log. tensors_to_log: List of tensor names or dictionary mapping labels to tensor names. If not set, log _TENSORS_TO_LOG by default. every_n_secs: `int`, the frequency for logging the metric. Default to every 10 mins. Returns: Returns a ProfilerHook that writes out timelines that can be loaded into profiling tools like chrome://tracing.
def get_logging_metric_hook(benchmark_log_dir=None, tensors_to_log=None, every_n_secs=600, **kwargs): # pylint: disable=unused-argument if benchmark_log_dir is None: raise ValueError("metric_log_dir should be provided to use metric logger") if tensors_to_log is None: tensors_to_log = _TENSORS_TO_LOG return metric_hook.LoggingMetricHook( tensors=tensors_to_log, log_dir=benchmark_log_dir, every_n_secs=every_n_secs)
123,675
Called after each call to run(). Args: run_context: A SessionRunContext object. run_values: A SessionRunValues object.
def after_run(self, run_context, run_values): # pylint: disable=unused-argument global_step = run_values.results if self._timer.should_trigger_for_step( global_step) and global_step > self._warm_steps: elapsed_time, elapsed_steps = self._timer.update_last_triggered_step( global_step) if elapsed_time is not None: self._step_train_time += elapsed_time self._total_steps += elapsed_steps # average examples per second is based on the total (accumulative) # training steps and training time so far average_examples_per_sec = self._batch_size * ( self._total_steps / self._step_train_time) # current examples per second is based on the elapsed training steps # and training time per batch current_examples_per_sec = self._batch_size * ( elapsed_steps / elapsed_time) # Current examples/sec followed by average examples/sec tf.logging.info('Batch [%g]: current exp/sec = %g, average exp/sec = ' '%g', self._total_steps, current_examples_per_sec, average_examples_per_sec)
123,750
Adds the predicted boxes on top of the image Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `labels`.
def overlay_boxes(self, image, predictions): labels = predictions.get_field("labels") boxes = predictions.bbox colors = self.compute_colors_for_labels(labels).tolist() for box, color in zip(boxes, colors): box = box.to(torch.int64) top_left, bottom_right = box[:2].tolist(), box[2:].tolist() image = cv2.rectangle( image, tuple(top_left), tuple(bottom_right), tuple(color), 1 ) return image
123,758
Adds the instances contours for each predicted object. Each label has a different color. Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask` and `labels`.
def overlay_mask(self, image, predictions): masks = predictions.get_field("mask").numpy() labels = predictions.get_field("labels") colors = self.compute_colors_for_labels(labels).tolist() for mask, color in zip(masks, colors): thresh = mask[0, :, :, None] contours, hierarchy = cv2_util.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) image = cv2.drawContours(image, contours, -1, color, 3) composite = image return composite
123,759
Create a montage showing the probability heatmaps for each one one of the detected objects Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask`.
def create_mask_montage(self, image, predictions): masks = predictions.get_field("mask") masks_per_dim = self.masks_per_dim masks = L.interpolate( masks.float(), scale_factor=1 / masks_per_dim ).byte() height, width = masks.shape[-2:] max_masks = masks_per_dim ** 2 masks = masks[:max_masks] # handle case where we have less detections than max_masks if len(masks) < max_masks: masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8) masks_padded[: len(masks)] = masks masks = masks_padded masks = masks.reshape(masks_per_dim, masks_per_dim, height, width) result = torch.zeros( (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8 ) for y in range(masks_per_dim): start_y = y * height end_y = (y + 1) * height for x in range(masks_per_dim): start_x = x * width end_x = (x + 1) * width result[start_y:end_y, start_x:end_x] = masks[y, x] return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
123,761
Adds detected class names and scores in the positions defined by the top-left corner of the predicted bounding box Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `scores` and `labels`.
def overlay_class_names(self, image, predictions): scores = predictions.get_field("scores").tolist() labels = predictions.get_field("labels").tolist() labels = [self.CATEGORIES[i] for i in labels] boxes = predictions.bbox template = "{}: {:.2f}" for box, score, label in zip(boxes, scores, labels): x, y = box[:2] s = template.format(label, score) cv2.putText( image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1 ) return image
123,762
Log the evaluation result for a estimator. The evaluate result is a directory that contains metrics defined in model_fn. It also contains a entry for global_step which contains the value of the global step when evaluation was performed. Args: eval_results: dict, the result of evaluate() from a estimator.
def log_estimator_evaluation_result(self, eval_results): if not isinstance(eval_results, dict): tf.logging.warning("eval_results should be directory for logging. Got %s", type(eval_results)) return global_step = eval_results[tf.GraphKeys.GLOBAL_STEP] for key in sorted(eval_results): if key != tf.GraphKeys.GLOBAL_STEP: self.log_metric(key, eval_results[key], global_step=global_step)
123,772
Collect most of the TF runtime information for the local env. The schema of the run info follows official/benchmark/datastore/schema. Args: model_name: string, the name of the model.
def log_run_info(self, model_name): run_info = { "model_name": model_name, "machine_config": {}, "run_date": datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN)} _collect_tensorflow_info(run_info) _collect_tensorflow_environment_variables(run_info) _collect_cpu_info(run_info) _collect_gpu_info(run_info) _collect_memory_info(run_info) with tf.gfile.GFile(os.path.join( self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), "w") as f: try: json.dump(run_info, f) f.write("\n") except (TypeError, ValueError) as e: tf.logging.warning("Failed to dump benchmark run info to log file: %s", e)
123,774
Write all eval_records to eval_table In addition to writing new rows table_state must be updated in row `table_state` columns `metadata:eval_game_counter` Args: bt_table: bigtable table to add rows to. game_data: metadata pairs (column name, value) for each eval record. last_game: last_game in metadata:table_state
def write_eval_records(bt_table, game_data, last_game): eval_num = last_game # Each column counts as a mutation so max rows is ~10000 GAMES_PER_COMMIT = 2000 for games in grouper(tqdm(game_data), GAMES_PER_COMMIT): assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't exists" assert bt_table.read_row(EVAL_PREFIX.format(eval_num+1)) is None, "Row already exists" rows = [] for i, metadata in enumerate(games): eval_num += 1 row_name = EVAL_PREFIX.format(eval_num) row = bt_table.row(row_name) for column, value in metadata: row.set_cell(METADATA, column, value) rows.append(row) # For each batch of games print a couple of the rows being added. if i < 5 or i + 5 > len(games): print("\t", i, row_name, metadata[6][1]) if eval_num == last_game + len(games): test = input("Commit ('y'/'yes' required): ") if test.lower() not in ('y', 'yes'): break # TODO(derek): Figure out how to condition on atomic counter update. # Condition all updates on the current value of last_game game_num_update = bt_table.row(TABLE_STATE) game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num) print(TABLE_STATE, eval_num) response = bt_table.mutate_rows(rows) # validate that all rows written successfully any_bad = False for i, status in enumerate(response): if status.code is not 0: print("Row number {} failed to write {}".format(i, status)) any_bad = True if any_bad: break game_num_update.commit()
123,781
Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout as a list of strings, one line per element (stderr output is piped to stdout). Raises: RuntimeError: if the command returns a non-zero result.
async def run(*cmd): stdout = await checked_run(*cmd) log_path = os.path.join(FLAGS.base_dir, get_cmd_name(cmd) + '.log') with gfile.Open(log_path, 'a') as f: f.write(expand_cmd_str(cmd)) f.write('\n') f.write(stdout) f.write('\n') # Split stdout into lines. return stdout.split('\n')
123,785
Run selfplay and write a training chunk to the fsdb golden_chunk_dir. Args: state: the RL loop State instance. flagfile: the name of the flagfile to use for selfplay, either 'selfplay' (the default) or 'boostrap'.
async def selfplay(state, flagfile='selfplay'): output_dir = os.path.join(fsdb.selfplay_dir(), state.output_model_name) holdout_dir = os.path.join(fsdb.holdout_dir(), state.output_model_name) lines = await run( 'bazel-bin/cc/selfplay', '--flagfile={}.flags'.format(os.path.join(FLAGS.flags_dir, flagfile)), '--model={}'.format(state.best_model_path), '--output_dir={}'.format(output_dir), '--holdout_dir={}'.format(holdout_dir), '--seed={}'.format(state.seed)) result = '\n'.join(lines[-6:]) logging.info(result) stats = parse_win_stats_table(result, 1)[0] num_games = stats.total_wins logging.info('Black won %0.3f, white won %0.3f', stats.black_wins.total / num_games, stats.white_wins.total / num_games) # Write examples to a single record. pattern = os.path.join(output_dir, '*', '*.zz') random.seed(state.seed) tf.set_random_seed(state.seed) np.random.seed(state.seed) # TODO(tommadams): This method of generating one golden chunk per generation # is sub-optimal because each chunk gets reused multiple times for training, # introducing bias. Instead, a fresh dataset should be uniformly sampled out # of *all* games in the training window before the start of each training run. buffer = example_buffer.ExampleBuffer(sampling_frac=1.0) # TODO(tommadams): parallel_fill is currently non-deterministic. Make it not # so. logging.info('Writing golden chunk from "{}"'.format(pattern)) buffer.parallel_fill(tf.gfile.Glob(pattern)) buffer.flush(os.path.join(fsdb.golden_chunk_dir(), state.output_model_name + '.tfrecord.zz'))
123,787
Run training and write a new model to the fsdb models_dir. Args: state: the RL loop State instance. tf_records: a list of paths to TensorFlow records to train on.
async def train(state, tf_records): model_path = os.path.join(fsdb.models_dir(), state.train_model_name) await run( 'python3', 'train.py', *tf_records, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'train.flags')), '--work_dir={}'.format(fsdb.working_dir()), '--export_path={}'.format(model_path), '--training_seed={}'.format(state.seed), '--freeze=true') # Append the time elapsed from when the RL was started to when this model # was trained. elapsed = time.time() - state.start_time timestamps_path = os.path.join(fsdb.models_dir(), 'train_times.txt') with gfile.Open(timestamps_path, 'a') as f: print('{:.3f} {}'.format(elapsed, state.train_model_name), file=f)
123,788
Validate the trained model against holdout games. Args: state: the RL loop State instance. holdout_glob: a glob that matches holdout games.
async def validate(state, holdout_glob): if not glob.glob(holdout_glob): print('Glob "{}" didn\'t match any files, skipping validation'.format( holdout_glob)) else: await run( 'python3', 'validate.py', holdout_glob, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')), '--work_dir={}'.format(fsdb.working_dir()))
123,789
Evaluate one model against a target. Args: eval_model_path: the path to the model to evaluate. target_model_path: the path to the model to compare to. sgf_dif: directory path to write SGF output to. seed: random seed to use when running eval. Returns: The win-rate of eval_model against target_model in the range [0, 1].
async def evaluate_model(eval_model_path, target_model_path, sgf_dir, seed): lines = await run( 'bazel-bin/cc/eval', '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'eval.flags')), '--model={}'.format(eval_model_path), '--model_two={}'.format(target_model_path), '--sgf_dir={}'.format(sgf_dir), '--seed={}'.format(seed)) result = '\n'.join(lines[-7:]) logging.info(result) eval_stats, target_stats = parse_win_stats_table(result, 2) num_games = eval_stats.total_wins + target_stats.total_wins win_rate = eval_stats.total_wins / num_games logging.info('Win rate %s vs %s: %.3f', eval_stats.model_name, target_stats.model_name, win_rate) return win_rate
123,790
Evaluate the most recently trained model against the current best model. Args: state: the RL loop State instance.
async def evaluate_trained_model(state): return await evaluate_model( state.train_model_path, state.best_model_path, os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed)
123,791
Turn a game into SGF. Doesn't handle handicap games or positions with incomplete history. Args: move_history: iterable of PlayerMoves result_string: "B+R", "W+0.5", etc. comments: iterable of string/None. Will be zipped with move_history.
def make_sgf( move_history, result_string, ruleset="Chinese", komi=7.5, white_name=PROGRAM_IDENTIFIER, black_name=PROGRAM_IDENTIFIER, comments=[] ): boardsize = go.N game_moves = ''.join(translate_sgf_move(*z) for z in itertools.zip_longest(move_history, comments)) result = result_string return SGF_TEMPLATE.format(**locals())
123,836
Download content from a url. Args: path: string directory where file will be downloaded url: string url Returns: Full path to downloaded file
def download_from_url(path, url): filename = url.split("/")[-1] found_file = find_file(path, filename, max_depth=0) if found_file is None: filename = os.path.join(path, filename) tf.logging.info("Downloading from %s to %s." % (url, filename)) inprogress_filepath = filename + ".incomplete" inprogress_filepath, _ = urllib.request.urlretrieve( url, inprogress_filepath, reporthook=download_report_hook) # Print newline to clear the carriage return from the download progress. print() tf.gfile.Rename(inprogress_filepath, filename) return filename else: tf.logging.info("Already downloaded: %s (at %s)." % (url, found_file)) return found_file
123,889
Given segmentation masks and the bounding boxes corresponding to the location of the masks in the image, this function crops and resizes the masks in the position defined by the boxes. This prepares the masks for them to be fed to the loss computation as the targets. Arguments: segmentation_masks: an instance of SegmentationMask proposals: an instance of BoxList
def project_masks_on_boxes(segmentation_masks, proposals, discretization_size): masks = [] M = discretization_size device = proposals.bbox.device proposals = proposals.convert("xyxy") assert segmentation_masks.size == proposals.size, "{}, {}".format( segmentation_masks, proposals ) # TODO put the proposals on the CPU, as the representation for the # masks is not efficient GPU-wise (possibly several small tensors for # representing a single instance mask) proposals = proposals.bbox.to(torch.device("cpu")) for segmentation_mask, proposal in zip(segmentation_masks, proposals): # crop the masks, resize them to the desired resolution and # then convert them to the tensor representation, # instead of the list representation that was used cropped_mask = segmentation_mask.crop(proposal) scaled_mask = cropped_mask.resize((M, M)) mask = scaled_mask.convert(mode="mask") masks.append(mask) if len(masks) == 0: return torch.empty(0, dtype=torch.float32, device=device) return torch.stack(masks, dim=0).to(device, dtype=torch.float32)
123,891
Crops the given image to a random part of the image, and randomly flips. We use the fused decode_and_crop op, which performs better than the two ops used separately in series, but note that this requires that the image be passed in as an un-decoded string Tensor. Args: image_buffer: scalar string Tensor representing the raw JPEG image buffer. num_channels: Integer depth of the image buffer for decoding. Returns: 3-D tensor with cropped image.
def _decode_crop_and_flip(image_buffer, num_channels): # A large fraction of image datasets contain a human-annotated bounding box # delineating the region of the image containing the object of interest. We # choose to create a new bounding box for the object which is a randomly # distorted version of the human-annotated bounding box that obeys an # allowed range of aspect ratios, sizes and overlap with the human-annotated # bounding box. If no box is supplied, then we assume the bounding box is # the entire image. min_object_covered=0.1 aspect_ratio_range=[0.75, 1.33] area_range=[0.05, 1.0] max_attempts=100 mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MIN_OBJ_COV, value=min_object_covered) mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_RATIO_RANGE, value=aspect_ratio_range) mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_AREA_RANGE, value=area_range) mlperf_log.resnet_print(key=mlperf_log.INPUT_DISTORTED_CROP_MAX_ATTEMPTS, value=max_attempts) mlperf_log.resnet_print(key=mlperf_log.INPUT_CROP_USES_BBOXES, value=False) bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) #From the entire image sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( tf.image.extract_jpeg_shape(image_buffer), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, _ = sample_distorted_bounding_box # Reassemble the bounding box in the format the crop op requires. offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) # Use the fused decode and crop op here, which is faster than each in series. cropped = tf.image.decode_and_crop_jpeg( image_buffer, crop_window, channels=num_channels) # Flip to add a little more random distortion in. mlperf_log.resnet_print(key=mlperf_log.INPUT_RANDOM_FLIP) cropped = tf.image.random_flip_left_right(cropped) return cropped
123,910
Performs central crops of the given image list. Args: image: a 3-D image tensor crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: 3-D tensor with cropped image.
def _central_crop(image, crop_height, crop_width): shape = tf.shape(image) height, width = shape[0], shape[1] mlperf_log.resnet_print(key=mlperf_log.INPUT_CENTRAL_CROP, value=[crop_height, crop_width]) amount_to_be_cropped_h = (height - crop_height) crop_top = amount_to_be_cropped_h // 2 amount_to_be_cropped_w = (width - crop_width) crop_left = amount_to_be_cropped_w // 2 return tf.slice( image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
123,911
Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image.
def _aspect_preserving_resize(image, resize_min): mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE_ASPECT_PRESERVING, value={"min": resize_min}) shape = tf.shape(image) height, width = shape[0], shape[1] new_height, new_width = _smallest_size_at_least(height, width, resize_min) return _resize_image(image, new_height, new_width)
123,914
Simple wrapper around tf.resize_images. This is primarily to make sure we use the same `ResizeMethod` and other details each time. Args: image: A 3-D image `Tensor`. height: The target height for the resized image. width: The target width for the resized image. Returns: resized_image: A 3-D tensor containing the resized image. The first two dimensions have the shape [height, width].
def _resize_image(image, height, width): return tf.image.resize_images( image, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
123,915
Reshapes first two dimensions in to single dimension. Args: tensor: Tensor to reshape of shape [A, B, ...] Returns: Reshaped tensor of shape [A*B, ...]
def _flatten_beam_dim(tensor): shape = _shape_list(tensor) shape[0] *= shape[1] shape.pop(1) # Remove beam dim return tf.reshape(tensor, shape)
123,920
Reshapes first dimension back to [batch_size, beam_size]. Args: tensor: Tensor to reshape of shape [batch_size*beam_size, ...] batch_size: Tensor, original batch size. beam_size: int, original beam size. Returns: Reshaped tensor of shape [batch_size, beam_size, ...]
def _unflatten_beam_dim(tensor, batch_size, beam_size): shape = _shape_list(tensor) new_shape = [batch_size, beam_size] + shape[1:] return tf.reshape(tensor, new_shape)
123,921
Return initial state dictionary and its shape invariants. Args: initial_ids: initial ids to pass into the symbols_to_logits_fn. int tensor with shape [batch_size, 1] initial_cache: dictionary storing values to be passed into the symbols_to_logits_fn. Returns: state and shape invariant dictionaries with keys from _StateKeys
def _create_initial_state(self, initial_ids, initial_cache): # Current loop index (starts at 0) cur_index = tf.constant(0) # Create alive sequence with shape [batch_size, beam_size, 1] alive_seq = _expand_to_beam_size(initial_ids, self.beam_size) alive_seq = tf.expand_dims(alive_seq, axis=2) # Create tensor for storing initial log probabilities. # Assume initial_ids are prob 1.0 initial_log_probs = tf.constant( [[0.] + [-float("inf")] * (self.beam_size - 1)]) alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1]) # Expand all values stored in the dictionary to the beam size, so that each # beam has a separate cache. alive_cache = nest.map_structure( lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache) # Initialize tensor storing finished sequences with filler values. finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32) # Set scores of the initial finished seqs to negative infinity. finished_scores = tf.ones([self.batch_size, self.beam_size]) * -INF # Initialize finished flags with all False values. finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool) # Create state dictionary state = { _StateKeys.CUR_INDEX: cur_index, _StateKeys.ALIVE_SEQ: alive_seq, _StateKeys.ALIVE_LOG_PROBS: alive_log_probs, _StateKeys.ALIVE_CACHE: alive_cache, _StateKeys.FINISHED_SEQ: finished_seq, _StateKeys.FINISHED_SCORES: finished_scores, _StateKeys.FINISHED_FLAGS: finished_flags } # Create state invariants for each value in the state dictionary. Each # dimension must be a constant or None. A None dimension means either: # 1) the dimension's value is a tensor that remains the same but may # depend on the input sequence to the model (e.g. batch size). # 2) the dimension may have different values on different iterations. state_shape_invariants = { _StateKeys.CUR_INDEX: tf.TensorShape([]), _StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]), _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]), _StateKeys.ALIVE_CACHE: nest.map_structure( _get_shape_keep_last_dim, alive_cache), _StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]), _StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]), _StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size]) } return state, state_shape_invariants
123,926
Return whether to continue the search loop. The loops should terminate when 1) when decode length has been reached, or 2) when the worst score in the finished sequences is better than the best score in the alive sequences (i.e. the finished sequences are provably unchanging) Args: state: A dictionary with the current loop state. Returns: Bool tensor with value True if loop should continue, False if loop should terminate.
def _continue_search(self, state): i = state[_StateKeys.CUR_INDEX] alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS] finished_scores = state[_StateKeys.FINISHED_SCORES] finished_flags = state[_StateKeys.FINISHED_FLAGS] not_at_max_decode_length = tf.less(i, self.max_decode_length) # Calculate largest length penalty (the larger penalty, the better score). max_length_norm = _length_normalization(self.alpha, self.max_decode_length) # Get the best possible scores from alive sequences. best_alive_scores = alive_log_probs[:, 0] / max_length_norm # Compute worst score in finished sequences for each batch element finished_scores *= tf.to_float(finished_flags) # set filler scores to zero lowest_finished_scores = tf.reduce_min(finished_scores, axis=1) # If there are no finished sequences in a batch element, then set the lowest # finished score to -INF for that element. finished_batches = tf.reduce_any(finished_flags, 1) lowest_finished_scores += (1. - tf.to_float(finished_batches)) * -INF worst_finished_score_better_than_best_alive_score = tf.reduce_all( tf.greater(lowest_finished_scores, best_alive_scores) ) return tf.logical_and( not_at_max_decode_length, tf.logical_not(worst_finished_score_better_than_best_alive_score) )
123,927
Return positional encoding. Calculates the position encoding as a mix of sine and cosine functions with geometrically increasing wavelengths. Defined and formulized in Attention is All You Need, section 3.5. Args: length: Sequence length. hidden_size: Size of the min_timescale: Minimum scale that will be applied at each position max_timescale: Maximum scale that will be applied at each position Returns: Tensor with shape [length, hidden_size]
def get_position_encoding( length, hidden_size, min_timescale=1.0, max_timescale=1.0e4): position = tf.to_float(tf.range(length)) num_timescales = hidden_size // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) return signal
123,941
Calculate bias for decoder that maintains model's autoregressive property. Creates a tensor that masks out locations that correspond to illegal connections, so prediction at position i cannot draw information from future positions. Args: length: int length of sequences in batch. Returns: float tensor of shape [1, 1, length, length]
def get_decoder_self_attention_bias(length): with tf.name_scope("decoder_self_attention_bias"): valid_locs = tf.matrix_band_part(tf.ones([length, length]), -1, 0) valid_locs = tf.reshape(valid_locs, [1, 1, length, length]) decoder_bias = _NEG_INF * (1.0 - valid_locs) return decoder_bias
123,942
Return float tensor representing the padding values in x. Args: x: int tensor with any shape padding_value: int value that Returns: flaot tensor with same shape as x containing values 0 or 1. 0 -> non-padding, 1 -> padding
def get_padding(x, padding_value=0): with tf.name_scope("padding"): return tf.to_float(tf.equal(x, padding_value))
123,943
Calculate bias tensor from padding values in tensor. Bias tensor that is added to the pre-softmax multi-headed attention logits, which has shape [batch_size, num_heads, length, length]. The tensor is zero at non-padding locations, and -1e9 (negative infinity) at padding locations. Args: x: int tensor with shape [batch_size, length] Returns: Attention bias tensor of shape [batch_size, 1, 1, length].
def get_padding_bias(x): with tf.name_scope("attention_bias"): padding = get_padding(x) attention_bias = padding * _NEG_INF attention_bias = tf.expand_dims( tf.expand_dims(attention_bias, axis=1), axis=1) return attention_bias
123,944
Adds a result to the dictionary. Args: dict_entry: main dict to add entry entry: slot for this entry (likely an integer) dt: the timing for the entry start_time: when the entry started unix time float
def _add_result(self, dict_entry, entry, dt, start_time): time_entry = {} time_entry['dt'] = dt time_entry['start_time'] = start_time dict_entry[entry] = time_entry
123,953
Sorts dict of results based on log start_time. Sorts the results and returns an array with only the values but sorted by oldest value first.value Args: results_dicts: List of result dicts Returns: List of only the time but sorted oldest first.
def _sorted_results(self, results_dicts): print('results dicts:', results_dicts) sorted_dict = sorted(results_dicts, key=lambda k: k['start_time']) results = [] for entry in sorted_dict: results.append(entry['dt']) return results
123,954
Verifies and result and returns timing. Uses submodule mlp_compliance (https://github.com/bitfort/mlp_compliance) Args: log_file: Absolute path to result file. division: open, closed result_name: name of the benchmark, ncf, ssd, etc Returns: Time for the result or `INFINITE_TIME` if not a success Raises: Exception: If expected compliance level is not hit or cannot figure out expected compliance level.
def verify_and_extract_time(self, log_file, division, result_name): expected_level = constants.DIVISION_COMPLIANCE_CHECK_LEVEL.get( division, None) print(result_name) if expected_level is None: raise Exception('Unknown division: {}'.format(division)) start_time, level, dt, _, success = self.get_compliance(log_file) print(float(start_time)) if int(level) != expected_level: raise Exception('Error Level {} does not match needed level {}:{}'.format( level, expected_level, log_file)) # Sets failure to converge to "infinite time" per the rules if success and dt: return dt, start_time else: print('Result was not a success set to INFINITE_TIME({})'.format( INFINITE_TIME)) return INFINITE_TIME, start_time
123,959
Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Arguments: boxlist(BoxList) nms_thresh (float) max_proposals (int): if > 0, then only the top max_proposals are kept after non-maximum suppression score_field (str)
def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores"): if nms_thresh <= 0: return boxlist mode = boxlist.mode boxlist = boxlist.convert("xyxy") boxes = boxlist.bbox score = boxlist.get_field(score_field) keep = _box_nms(boxes, score, nms_thresh) if max_proposals > 0: keep = keep[: max_proposals] boxlist = boxlist[keep] return boxlist.convert(mode)
123,991
Only keep boxes with both sides >= min_size Arguments: boxlist (Boxlist) min_size (int)
def remove_small_boxes(boxlist, min_size): # TODO maybe add an API for querying the ws / hs xywh_boxes = boxlist.convert("xywh").bbox _, _, ws, hs = xywh_boxes.unbind(dim=1) keep = ( (ws >= min_size) & (hs >= min_size) ).nonzero().squeeze(1) return boxlist[keep]
123,992
Compute the intersection over union of two set of boxes. The box order must be (xmin, ymin, xmax, ymax). Arguments: box1: (BoxList) bounding boxes, sized [N,4]. box2: (BoxList) bounding boxes, sized [M,4]. Returns: (tensor) iou, sized [N,M]. Reference: https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
def boxlist_iou(boxlist1, boxlist2): if boxlist1.size != boxlist2.size: raise RuntimeError( "boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2)) N = len(boxlist1) M = len(boxlist2) area1 = boxlist1.area() area2 = boxlist2.area() box1, box2 = boxlist1.bbox, boxlist2.bbox lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2] rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2] TO_REMOVE = 1 wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2] inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] iou = inter / (area1[:, None] + area2 - inter) return iou
123,993
Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList])
def cat_boxlist(bboxes): assert isinstance(bboxes, (list, tuple)) assert all(isinstance(bbox, BoxList) for bbox in bboxes) size = bboxes[0].size assert all(bbox.size == size for bbox in bboxes) mode = bboxes[0].mode assert all(bbox.mode == mode for bbox in bboxes) fields = set(bboxes[0].fields()) assert all(set(bbox.fields()) == fields for bbox in bboxes) cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode) for field in fields: data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0) cat_boxes.add_field(field, data) return cat_boxes
123,994
Create min and max boundary lists up to max_length. For example, when max_length=24, min_boundary=4 and boundary_scale=2, the returned values will be: buckets_min = [0, 4, 8, 16, 24] buckets_max = [4, 8, 16, 24, 25] Args: max_length: The maximum length of example in dataset. min_boundary: Minimum length in boundary. boundary_scale: Amount to scale consecutive boundaries in the list. Returns: min and max boundary lists
def _create_min_max_boundaries( max_length, min_boundary=_MIN_BOUNDARY, boundary_scale=_BOUNDARY_SCALE): # Create bucket boundaries list by scaling the previous boundary or adding 1 # (to ensure increasing boundary sizes). bucket_boundaries = [] x = min_boundary while x < max_length: bucket_boundaries.append(x) x = max(x + 1, int(x * boundary_scale)) # Create min and max boundary lists from the initial list. buckets_min = [0] + bucket_boundaries buckets_max = bucket_boundaries + [max_length + 1] return buckets_min, buckets_max
124,025
Convert dtype string to tf dtype, and set loss_scale default as needed. Args: flags: namespace object returned by arg parser. Raises: ValueError: If an invalid dtype is provided.
def parse_dtype_info(flags): if flags.dtype in (i[0] for i in DTYPE_MAP.values()): return # Make function idempotent try: flags.dtype, default_loss_scale = DTYPE_MAP[flags.dtype] except KeyError: raise ValueError("Invalid dtype: {}".format(flags.dtype)) flags.loss_scale = flags.loss_scale or default_loss_scale
124,045
Propagate a virtual loss up to the root node. Args: up_to: The node to propagate until. (Keep track of this! You'll need it to reverse the virtual loss later.)
def add_virtual_loss(self, up_to): self.losses_applied += 1 # This is a "win" for the current node; hence a loss for its parent node # who will be deciding whether to investigate this node again. loss = self.position.to_play self.W += loss if self.parent is None or self is up_to: return self.parent.add_virtual_loss(up_to)
124,102
Propagates a value estimation up to the root node. Args: value: the value to be propagated (1 = black wins, -1 = white wins) up_to: the node to propagate until.
def backup_value(self, value, up_to): self.N += 1 self.W += value if self.parent is None or self is up_to: return self.parent.backup_value(value, up_to)
124,105
Convert a list of command arguments to types specified by the handler. Args: handler: a command handler function. args: the list of string arguments to pass to handler. Returns: A new list containing `args` that have been converted to the expected type for `handler`. For each function parameter of `handler` that has either an explicit type annotation or a non-None default value, the corresponding element in `args` is converted to that type.
def _convert_args(handler, args): args = list(args) params = inspect.signature(handler).parameters for i, (arg, name) in enumerate(zip(args, params)): default = params[name].default annotation = params[name].annotation if annotation != inspect.Parameter.empty: if isinstance(annotation, type) and annotation != str: # The parameter is annotated with a type that isn't str: convert # the arg to that type. args[i] = annotation(arg) elif default != inspect.Parameter.empty: if default is not None and not isinstance(default, str): # The parameter has a default value that isn't None or a str: # convert the arg to the default value's type. args[i] = type(default)(arg) return args
124,120
Registers a new command handler object. All methods on `handler_obj` whose name starts with "cmd_" are registered as a GTP command. For example, the method cmd_genmove will be invoked when the engine receives a genmove command. Args: handler_obj: the handler object to register.
def add_cmd_handler(self, handler_obj): for field in dir(handler_obj): if field.startswith("cmd_"): cmd = field[4:] fn = getattr(handler_obj, field) if cmd in self.cmds: print('Replacing {} with {}'.format( _handler_name(self.cmds[cmd]), _handler_name(fn)), file=sys.stderr) self.cmds[cmd] = fn
124,121
Calculate cross entropy loss while ignoring padding. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch_size, length_labels] smoothing: Label smoothing constant, used to determine the on and off values vocab_size: int size of the vocabulary Returns: Returns a float32 tensor with shape [batch_size, max(length_logits, length_labels)]
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): with tf.name_scope("loss", [logits, labels]): logits, labels = _pad_tensors_to_same_length(logits, labels) # Calculate smoothing cross entropy with tf.name_scope("smoothing_cross_entropy", [logits, labels]): confidence = 1.0 - smoothing low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1) soft_targets = tf.one_hot( tf.cast(labels, tf.int32), depth=vocab_size, on_value=confidence, off_value=low_confidence) xentropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits=logits, labels=soft_targets) # Calculate the best (lowest) possible value of cross entropy, and # subtract from the cross entropy loss. normalizing_constant = -( confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20)) xentropy -= normalizing_constant weights = tf.to_float(tf.not_equal(labels, 0)) return xentropy * weights, weights
124,133
Wrap a metric fn that returns scores and weights as an eval metric fn. The input metric_fn returns values for the current batch. The wrapper aggregates the return values collected over all of the batches evaluated. Args: metric_fn: function that returns scores and weights for the current batch's logits and predicted labels. Returns: function that aggregates the scores and weights from metric_fn.
def _convert_to_eval_metric(metric_fn): def problem_metric_fn(*args): (scores, weights) = metric_fn(*args) # The tf.metrics.mean function assures correct aggregation. return tf.metrics.mean(scores, weights) return problem_metric_fn
124,134
Approximate BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch-size, length_labels] Returns: bleu: int, approx bleu score
def bleu_score(logits, labels): predictions = tf.to_int32(tf.argmax(logits, axis=-1)) # TODO: Look into removing use of py_func bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32) return bleu, tf.constant(1.0)
124,140
Extracts all n-grams up to a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams upto max_order in segment with a count of how many times each n-gram occurred.
def _get_ngrams_with_counter(segment, max_order): ngram_counts = collections.Counter() for order in xrange(1, max_order + 1): for i in xrange(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts
124,141
Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. use_bp: boolean, whether to apply brevity penalty. Returns: BLEU score.
def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True): reference_length = 0 translation_length = 0 bp = 1.0 geo_mean = 0 matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order precisions = [] for (references, translations) in zip(reference_corpus, translation_corpus): reference_length += len(references) translation_length += len(translations) ref_ngram_counts = _get_ngrams_with_counter(references, max_order) translation_ngram_counts = _get_ngrams_with_counter(translations, max_order) overlap = dict((ngram, min(count, translation_ngram_counts[ngram])) for ngram, count in ref_ngram_counts.items()) for ngram in overlap: matches_by_order[len(ngram) - 1] += overlap[ngram] for ngram in translation_ngram_counts: possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[ ngram] precisions = [0] * max_order smooth = 1.0 for i in xrange(0, max_order): if possible_matches_by_order[i] > 0: precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i] if matches_by_order[i] > 0: precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[ i] else: smooth *= 2 precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) else: precisions[i] = 0.0 if max(precisions) > 0: p_log_sum = sum(math.log(p) for p in precisions if p) geo_mean = math.exp(p_log_sum / max_order) if use_bp: ratio = translation_length / reference_length bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0 bleu = geo_mean * bp return np.float32(bleu)
124,142
ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: logits: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score.
def rouge_2_fscore(logits, labels): predictions = tf.to_int32(tf.argmax(logits, axis=-1)) # TODO: Look into removing use of py_func rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
124,143
Computes ROUGE-N f1 score of two text collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Args: eval_sentences: Predicted sentences. ref_sentences: Sentences from the reference set n: Size of ngram. Defaults to 2. Returns: f1 score for ROUGE-N
def rouge_n(eval_sentences, ref_sentences, n=2): f1_scores = [] for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): eval_ngrams = _get_ngrams(n, eval_sentence) ref_ngrams = _get_ngrams(n, ref_sentence) ref_count = len(ref_ngrams) eval_count = len(eval_ngrams) # Count the overlapping ngrams between evaluated and reference overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) overlapping_count = len(overlapping_ngrams) # Handle edge case. This isn't mathematically correct, but it's good enough if eval_count == 0: precision = 0.0 else: precision = float(overlapping_count) / eval_count if ref_count == 0: recall = 0.0 else: recall = float(overlapping_count) / ref_count f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8))) # return overlapping_count / reference_count return np.mean(f1_scores, dtype=np.float32)
124,144
ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score.
def rouge_l_fscore(predictions, labels): outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), tf.float32) return rouge_l_f_score, tf.constant(1.0)
124,145
Extract files from downloaded compressed archive file. Args: path: string directory where the files will be downloaded url: url containing the compressed input and target files input_filename: name of file containing data in source language target_filename: name of file containing data in target language Returns: Full paths to extracted input and target files. Raises: OSError: if the the download/extraction fails.
def download_and_extract(path, url, input_filename, target_filename): logging.info('Downloading and extracting data to: %s' % path) # Check if extracted files already exist in path input_file = find_file(path, input_filename) target_file = find_file(path, target_filename) if input_file and target_file: logging.info("Already downloaded and extracted %s." % url) return input_file, target_file # Download archive file if it doesn't already exist. compressed_file = download_from_url(path, url) # Extract compressed files logging.info("Extracting %s." % compressed_file) with tarfile.open(compressed_file, "r:gz") as corpus_tar: corpus_tar.extractall(path) # Return filepaths of the requested files. input_file = find_file(path, input_filename) target_file = find_file(path, target_filename) if input_file and target_file: return input_file, target_file raise OSError("Download/extraction failed for url %s to path %s" % (url, path))
124,151
This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList])
def subsample(self, proposals, targets): labels, regression_targets = self.prepare_targets(proposals, targets) sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) proposals = list(proposals) # add corresponding label and regression_targets information to the bounding boxes for labels_per_image, regression_targets_per_image, proposals_per_image in zip( labels, regression_targets, proposals ): proposals_per_image.add_field("labels", labels_per_image) proposals_per_image.add_field( "regression_targets", regression_targets_per_image ) # distributed sampled proposals, that were obtained on all feature maps # concatenated via the fg_bg_sampler, into individual feature map levels for img_idx, (pos_inds_img, neg_inds_img) in enumerate( zip(sampled_pos_inds, sampled_neg_inds) ): img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1) proposals_per_image = proposals[img_idx][img_sampled_inds] proposals[img_idx] = proposals_per_image self._proposals = proposals return proposals
124,164
Computes the loss for Faster R-CNN. This requires that the subsample method has been called beforehand. Arguments: class_logits (list[Tensor]) box_regression (list[Tensor]) Returns: classification_loss (Tensor) box_loss (Tensor)
def __call__(self, class_logits, box_regression): class_logits = cat(class_logits, dim=0) box_regression = cat(box_regression, dim=0) device = class_logits.device if not hasattr(self, "_proposals"): raise RuntimeError("subsample needs to be called before") proposals = self._proposals labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) regression_targets = cat( [proposal.get_field("regression_targets") for proposal in proposals], dim=0 ) classification_loss = F.cross_entropy(class_logits, labels) # get indices that correspond to the regression targets for # the corresponding ground truth labels, to be used with # advanced indexing sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1) labels_pos = labels[sampled_pos_inds_subset] if self.cls_agnostic_bbox_reg: map_inds = torch.tensor([4, 5, 6, 7], device=device) else: map_inds = 4 * labels_pos[:, None] + torch.tensor( [0, 1, 2, 3], device=device) box_loss = smooth_l1_loss( box_regression[sampled_pos_inds_subset[:, None], map_inds], regression_targets[sampled_pos_inds_subset], size_average=False, beta=1, ) box_loss = box_loss / labels.numel() return classification_loss, box_loss
124,165
Run the given subprocess command in a coroutine. Args: *cmd: the command to run and its arguments. Returns: The output that the command wrote to stdout. Raises: RuntimeError: if the command returns a non-zero result.
async def checked_run(*cmd): # Start the subprocess. logging.info('Running: %s', expand_cmd_str(cmd)) with logged_timer('{} finished'.format(get_cmd_name(cmd))): p = await asyncio.create_subprocess_exec( *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT) # Stream output from the process stdout. chunks = [] while True: chunk = await p.stdout.read(16 * 1024) if not chunk: break chunks.append(chunk) # Wait for the process to finish, check it was successful & build stdout. await p.wait() stdout = b''.join(chunks).decode()[:-1] if p.returncode: raise RuntimeError('Return code {} from process: {}\n{}'.format( p.returncode, expand_cmd_str(cmd), stdout)) return stdout
124,169
Get frame by index. Args: frame_id (int): Index of the expected frame, 0-based. Returns: ndarray or None: Return the frame if successful, otherwise None.
def get_frame(self, frame_id): if frame_id < 0 or frame_id >= self._frame_cnt: raise IndexError( '"frame_id" must be between 0 and {}'.format(self._frame_cnt - 1)) if frame_id == self._position: return self.read() if self._cache: img = self._cache.get(frame_id) if img is not None: self._position = frame_id + 1 return img self._set_real_position(frame_id) ret, img = self._vcap.read() if ret: if self._cache: self._cache.put(self._position, img) self._position += 1 return img
126,334
Convert a video to frame images Args: frame_dir (str): Output directory to store all the frame images. file_start (int): Filenames will start from the specified number. filename_tmpl (str): Filename template with the index as the placeholder. start (int): The starting frame index. max_num (int): Maximum number of frames to be written. show_progress (bool): Whether to show a progress bar.
def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True): mkdir_or_exist(frame_dir) if max_num == 0: task_num = self.frame_cnt - start else: task_num = min(self.frame_cnt - start, max_num) if task_num <= 0: raise ValueError('start must be less than total frame number') if start > 0: self._set_real_position(start) def write_frame(file_idx): img = self.read() filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) cv2.imwrite(filename, img) if show_progress: track_progress(write_frame, range(file_start, file_start + task_num)) else: for i in range(task_num): img = self.read() if img is None: break filename = osp.join(frame_dir, filename_tmpl.format(i + file_start)) cv2.imwrite(filename, img)
126,335
Track the progress of tasks execution with a progress bar. Tasks are done with a simple for-loop. Args: func (callable): The function to be applied to each task. tasks (list or tuple[Iterable, int]): A list of tasks or (tasks, total num). bar_width (int): Width of progress bar. Returns: list: The task results.
def track_progress(func, tasks, bar_width=50, **kwargs): if isinstance(tasks, tuple): assert len(tasks) == 2 assert isinstance(tasks[0], collections_abc.Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, collections_abc.Iterable): task_num = len(tasks) else: raise TypeError( '"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width) results = [] for task in tasks: results.append(func(task, **kwargs)) prog_bar.update() sys.stdout.write('\n') return results
126,341