Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def rlmb_long_stochastic_discrete_simulation_deterministic_starts(): hparams = rlmb_base_stochastic_discrete() hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long" hparams.ppo_epochs_num = 1000 hparams.simulation_random_starts = False return hparams
[ "Long setting with stochastic discrete model & deterministic sim starts." ]
Please provide a description of the function:def rlmb_long_stochastic_discrete_100steps(): hparams = rlmb_long_stochastic_discrete() hparams.ppo_epoch_length = 100 hparams.simulated_rollout_length = 100 hparams.simulated_batch_size = 8 return hparams
[ "Long setting with stochastic discrete model, changed ppo steps." ]
Please provide a description of the function:def rlmb_long_stochastic_discrete_25steps(): hparams = rlmb_long_stochastic_discrete() hparams.ppo_epoch_length = 25 hparams.simulated_rollout_length = 25 hparams.simulated_batch_size = 32 return hparams
[ "Long setting with stochastic discrete model, changed ppo steps." ]
Please provide a description of the function:def rlmb_base_stochastic_discrete_noresize(): hparams = rlmb_base() hparams.generative_model = "next_frame_basic_stochastic_discrete" hparams.generative_model_params = "next_frame_basic_stochastic_discrete" hparams.resize_height_factor = 1 hparams.resize_width_factor = 1 return hparams
[ "Base setting with stochastic discrete model." ]
Please provide a description of the function:def rlmb_base_sv2p(): hparams = rlmb_base() hparams.learning_rate_bump = 1.0 hparams.generative_model = "next_frame_sv2p" hparams.generative_model_params = "next_frame_sv2p_atari" return hparams
[ "Base setting with sv2p as world model." ]
Please provide a description of the function:def _rlmb_tiny_overrides(): return dict( epochs=1, num_real_env_frames=128, model_train_steps=2, max_num_noops=1, eval_max_num_noops=1, generative_model_params="next_frame_tiny", stop_loop_early=True, resize_height_factor=2, resize_width_factor=2, wm_eval_rollout_ratios=[1], rl_env_max_episode_steps=7, eval_rl_env_max_episode_steps=7, simulated_rollout_length=2, eval_sampling_temps=[0.0, 1.0], )
[ "Parameters to override for tiny setting excluding agent-related hparams." ]
Please provide a description of the function:def rlmb_ppo_tiny(): hparams = rlmb_ppo_base() hparams = hparams.override_from_dict(_rlmb_tiny_overrides()) update_hparams(hparams, dict( ppo_epochs_num=2, ppo_epoch_length=10, real_ppo_epoch_length=36, real_ppo_effective_num_agents=2, real_batch_size=1, eval_batch_size=1, )) return hparams
[ "Tiny set for testing." ]
Please provide a description of the function:def rlmb_dqn_tiny(): hparams = rlmb_dqn_base() hparams = hparams.override_from_dict(_rlmb_tiny_overrides()) update_hparams(hparams, dict( simulated_rollout_length=2, dqn_time_limit=2, dqn_num_frames=128, real_dqn_replay_buffer_replay_capacity=100, dqn_replay_buffer_replay_capacity=100, real_dqn_agent_min_replay_history=10, dqn_agent_min_replay_history=10, )) return hparams
[ "Tiny set for testing." ]
Please provide a description of the function:def rlmb_tiny_stochastic(): hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_stochastic" hparams.generative_model_params = "next_frame_basic_stochastic" return hparams
[ "Tiny setting with a stochastic next-frame model." ]
Please provide a description of the function:def rlmb_tiny_recurrent(): hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_recurrent" hparams.generative_model_params = "next_frame_basic_recurrent" return hparams
[ "Tiny setting with a recurrent next-frame model." ]
Please provide a description of the function:def rlmb_tiny_sv2p(): hparams = rlmb_ppo_tiny() hparams.generative_model = "next_frame_sv2p" hparams.generative_model_params = "next_frame_sv2p_tiny" hparams.grayscale = False return hparams
[ "Tiny setting with a tiny sv2p model." ]
Please provide a description of the function:def rlmb_grid(rhp): rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"]) base = 100000 medium = base // 2 small = medium // 2 rhp.set_discrete("loop.num_real_env_frames", [base, medium, small]) # Dummy parameter to get 5 runs for each configuration rhp.set_discrete("model.moe_loss_coef", list(range(5)))
[ "Grid over games and frames, and 5 runs each for variance." ]
Please provide a description of the function:def merge_unscoped_hparams(scopes_and_hparams): merged_values = {} for (scope, hparams) in scopes_and_hparams: for key, value in six.iteritems(hparams.values()): scoped_key = "%s.%s" % (scope, key) merged_values[scoped_key] = value return hparam.HParams(**merged_values)
[ "Merge multiple HParams into one with scopes." ]
Please provide a description of the function:def split_scoped_hparams(scopes, merged_hparams): split_values = {scope: {} for scope in scopes} merged_values = merged_hparams.values() for scoped_key, value in six.iteritems(merged_values): scope = scoped_key.split(".")[0] key = scoped_key[len(scope) + 1:] split_values[scope][key] = value return [ hparam.HParams(**split_values[scope]) for scope in scopes ]
[ "Split single HParams with scoped keys into multiple." ]
Please provide a description of the function:def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id): trial_hp_overrides = scoped_overrides.values() # Create loop, model, and ppo base HParams loop_hp = create_loop_hparams() model_hp_name = trial_hp_overrides.get( "loop.generative_model_params", loop_hp.generative_model_params) model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams) base_algo_params_name = trial_hp_overrides.get( "loop.base_algo_params", loop_hp.base_algo_params) algo_hp = registry.hparams(base_algo_params_name) # Merge them and then override with the scoped overrides combined_hp = merge_unscoped_hparams( zip(HP_SCOPES, [loop_hp, model_hp, algo_hp])) combined_hp.override_from_dict(trial_hp_overrides) # Split out the component hparams loop_hp, model_hp, algo_hp = ( split_scoped_hparams(HP_SCOPES, combined_hp)) # Dynamic register the model hp and set the new name in loop_hp model_hp_name = "model_hp_%s" % str(trial_id) dynamic_register_hparams(model_hp_name, model_hp) loop_hp.generative_model_params = model_hp_name # Dynamic register the algo hp and set the new name in loop_hp algo_hp_name = "algo_hp_%s" % str(trial_id) dynamic_register_hparams(algo_hp_name, algo_hp) loop_hp.base_algo_params = algo_hp_name return loop_hp
[ "Create HParams suitable for training loop from scoped HParams.\n\n Args:\n scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These\n parameters are overrides for the base HParams created by\n create_loop_hparams.\n trial_id: str, trial identifier. This is used to register unique HParams\n names for the underlying model and ppo HParams.\n\n Returns:\n HParams suitable for passing to training_loop.\n " ]
Please provide a description of the function:def get_keys_to_action(self): # Based on gym AtariEnv.get_keys_to_action() keyword_to_key = { "UP": ord("w"), "DOWN": ord("s"), "LEFT": ord("a"), "RIGHT": ord("d"), "FIRE": ord(" "), } keys_to_action = {} for action_id, action_meaning in enumerate(self.action_meanings): keys_tuple = tuple(sorted([ key for keyword, key in keyword_to_key.items() if keyword in action_meaning])) assert keys_tuple not in keys_to_action keys_to_action[keys_tuple] = action_id # Special actions: keys_to_action[(ord("r"),)] = self.RETURN_DONE_ACTION keys_to_action[(ord("c"),)] = self.TOGGLE_WAIT_ACTION keys_to_action[(ord("n"),)] = self.WAIT_MODE_NOOP_ACTION return keys_to_action
[ "Get mapping from keyboard keys to actions.\n\n Required by gym.utils.play in environment or top level wrapper.\n\n Returns:\n {\n Unicode code point for keyboard key: action (formatted for step()),\n ...\n }\n " ]
Please provide a description of the function:def step(self, action): # Special codes if action in self._player_actions(): envs_step_tuples = self._player_actions()[action]() elif self._wait and action == self.name_to_action_num["NOOP"]: # Ignore no-op, do not pass to environment. envs_step_tuples = self._last_step_tuples else: # Run action on environment(s). if action == self.WAIT_MODE_NOOP_ACTION: action = self.name_to_action_num["NOOP"] # Perform action on underlying environment(s). envs_step_tuples = self._step_envs(action) self._update_statistics(envs_step_tuples) self._last_step_tuples = envs_step_tuples ob, reward, done, info = self._player_step_tuple(envs_step_tuples) return ob, reward, done, info
[ "Pass action to underlying environment(s) or perform special action." ]
Please provide a description of the function:def _augment_observation(self, ob, reward, cumulative_reward): img = PIL_Image().new("RGB", (ob.shape[1], self.HEADER_HEIGHT,)) draw = PIL_ImageDraw().Draw(img) draw.text( (1, 0), "c:{:3}, r:{:3}".format(int(cumulative_reward), int(reward)), fill=(255, 0, 0) ) draw.text( (1, 15), "fc:{:3}".format(int(self._frame_counter)), fill=(255, 0, 0) ) header = np.asarray(img) del img header.setflags(write=1) # Top row color indicates if WAIT MODE is on. if self._wait: pixel_fill = (0, 255, 0) else: pixel_fill = (255, 0, 0) header[0, :, :] = pixel_fill return np.concatenate([header, ob], axis=0)
[ "\"Expand observation array with additional information header (top rows).\n\n Args:\n ob: observation\n reward: reward to be included in header.\n cumulative_reward: total cumulated reward to be included in header.\n\n Returns:\n Expanded observation array.\n " ]
Please provide a description of the function:def _player_step_tuple(self, envs_step_tuples): ob_real, reward_real, _, _ = envs_step_tuples["real_env"] ob_sim, reward_sim, _, _ = envs_step_tuples["sim_env"] ob_err = absolute_hinge_difference(ob_sim, ob_real) ob_real_aug = self._augment_observation(ob_real, reward_real, self.cumulative_real_reward) ob_sim_aug = self._augment_observation(ob_sim, reward_sim, self.cumulative_sim_reward) ob_err_aug = self._augment_observation( ob_err, reward_sim - reward_real, self.cumulative_sim_reward - self.cumulative_real_reward ) ob = np.concatenate([ob_sim_aug, ob_real_aug, ob_err_aug], axis=1) _, reward, done, info = envs_step_tuples["real_env"] return ob, reward, done, info
[ "Construct observation, return usual step tuple.\n\n Args:\n envs_step_tuples: tuples.\n\n Returns:\n Step tuple: ob, reward, done, info\n ob: concatenated images [simulated observation, real observation,\n difference], with additional informations in header.\n reward: real environment reward\n done: True iff. envs_step_tuples['real_env'][2] is True\n info: real environment info\n " ]
Please provide a description of the function:def reset(self): self._frame_counter = 0 ob_real = self.real_env.reset() # Initialize simulated environment with frames from real one. self.sim_env.add_to_initial_stack(ob_real) for _ in range(3): ob_real, _, _, _ = self.real_env.step(self.name_to_action_num["NOOP"]) self.sim_env.add_to_initial_stack(ob_real) ob_sim = self.sim_env.reset() assert np.all(ob_real == ob_sim) self._last_step_tuples = self._pack_step_tuples((ob_real, 0, False, {}), (ob_sim, 0, False, {})) self.set_zero_cumulative_rewards() ob, _, _, _ = self._player_step_tuple(self._last_step_tuples) return ob
[ "Reset simulated and real environments." ]
Please provide a description of the function:def _step_envs(self, action): self._frame_counter += 1 real_env_step_tuple = self.real_env.step(action) sim_env_step_tuple = self.sim_env.step(action) self.sim_env.add_to_initial_stack(real_env_step_tuple[0]) return self._pack_step_tuples(real_env_step_tuple, sim_env_step_tuple)
[ "Perform step(action) on environments and update initial_frame_stack." ]
Please provide a description of the function:def _player_step_tuple(self, envs_step_tuples): ob, reward, done, info = envs_step_tuples["env"] ob = self._augment_observation(ob, reward, self.cumulative_reward) return ob, reward, done, info
[ "Augment observation, return usual step tuple." ]
Please provide a description of the function:def add_delta_deltas(filterbanks, name=None): delta_filter = np.array([2, 1, 0, -1, -2]) delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, "full") delta_filter_stack = np.array( [[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2, list(delta_delta_filter)], dtype=np.float32).T[:, None, None, :] delta_filter_stack /= np.sqrt( np.sum(delta_filter_stack**2, axis=0, keepdims=True)) filterbanks = tf.nn.conv2d( filterbanks, delta_filter_stack, [1, 1, 1, 1], "SAME", data_format="NHWC", name=name) return filterbanks
[ "Compute time first and second-order derivative channels.\n\n Args:\n filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]\n name: scope name\n\n Returns:\n float32 tensor with shape [batch_size, len, num_bins, 3]\n " ]
Please provide a description of the function:def compute_mel_filterbank_features( waveforms, sample_rate=16000, dither=1.0 / np.iinfo(np.int16).max, preemphasis=0.97, frame_length=25, frame_step=10, fft_length=None, window_fn=functools.partial(tf.contrib.signal.hann_window, periodic=True), lower_edge_hertz=80.0, upper_edge_hertz=7600.0, num_mel_bins=80, log_noise_floor=1e-3, apply_mask=True): # `stfts` is a complex64 Tensor representing the short-time Fourier # Transform of each signal in `signals`. Its shape is # [batch_size, ?, fft_unique_bins] # where fft_unique_bins = fft_length // 2 + 1 # Find the wave length: the largest index for which the value is !=0 # note that waveforms samples that are exactly 0.0 are quite common, so # simply doing sum(waveforms != 0, axis=-1) will not work correctly. wav_lens = tf.reduce_max( tf.expand_dims(tf.range(tf.shape(waveforms)[1]), 0) * tf.to_int32(tf.not_equal(waveforms, 0.0)), axis=-1) + 1 if dither > 0: waveforms += tf.random_normal(tf.shape(waveforms), stddev=dither) if preemphasis > 0: waveforms = waveforms[:, 1:] - preemphasis * waveforms[:, :-1] wav_lens -= 1 frame_length = int(frame_length * sample_rate / 1e3) frame_step = int(frame_step * sample_rate / 1e3) if fft_length is None: fft_length = int(2**(np.ceil(np.log2(frame_length)))) stfts = tf.contrib.signal.stft( waveforms, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length, window_fn=window_fn, pad_end=True) stft_lens = (wav_lens + (frame_step - 1)) // frame_step masks = tf.to_float(tf.less_equal( tf.expand_dims(tf.range(tf.shape(stfts)[1]), 0), tf.expand_dims(stft_lens, 1))) # An energy spectrogram is the magnitude of the complex-valued STFT. # A float32 Tensor of shape [batch_size, ?, 257]. magnitude_spectrograms = tf.abs(stfts) # Warp the linear-scale, magnitude spectrograms into the mel-scale. num_spectrogram_bins = magnitude_spectrograms.shape[-1].value linear_to_mel_weight_matrix = ( tf.contrib.signal.linear_to_mel_weight_matrix( num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz, upper_edge_hertz)) mel_spectrograms = tf.tensordot( magnitude_spectrograms, linear_to_mel_weight_matrix, 1) # Note: Shape inference for tensordot does not currently handle this case. mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate( linear_to_mel_weight_matrix.shape[-1:])) log_mel_sgram = tf.log(tf.maximum(log_noise_floor, mel_spectrograms)) if apply_mask: log_mel_sgram *= tf.expand_dims(tf.to_float(masks), -1) return tf.expand_dims(log_mel_sgram, -1, name="mel_sgrams")
[ "Implement mel-filterbank extraction using tf ops.\n\n Args:\n waveforms: float32 tensor with shape [batch_size, max_len]\n sample_rate: sampling rate of the waveform\n dither: stddev of Gaussian noise added to waveform to prevent quantization\n artefacts\n preemphasis: waveform high-pass filtering constant\n frame_length: frame length in ms\n frame_step: frame_Step in ms\n fft_length: number of fft bins\n window_fn: windowing function\n lower_edge_hertz: lowest frequency of the filterbank\n upper_edge_hertz: highest frequency of the filterbank\n num_mel_bins: filterbank size\n log_noise_floor: clip small values to prevent numeric overflow in log\n apply_mask: When working on a batch of samples, set padding frames to zero\n Returns:\n filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1]\n " ]
Please provide a description of the function:def play_env_problem_randomly(env_problem, num_steps): # Reset all environments. env_problem.reset() # Play all environments, sampling random actions each time. for _ in range(num_steps): # Sample batch_size actions from the action space and stack them. actions = np.stack([env_problem.action_space.sample() for _ in range( env_problem.batch_size)]) # Execute actions, observations are stored in `env_problem`. _, _, dones, _ = env_problem.step(actions) # Get the indices where we are done and reset those. env_problem.reset(indices=done_indices(dones))
[ "Plays the env problem by randomly sampling actions for `num_steps`." ]
Please provide a description of the function:def generate_plaintext_random(plain_vocab, distribution, train_samples, length): if distribution is not None: assert len(distribution) == len(plain_vocab) train_indices = np.random.choice( range(len(plain_vocab)), (train_samples, length), p=distribution) return train_indices
[ "Generates samples of text from the provided vocabulary.\n\n Args:\n plain_vocab: vocabulary.\n distribution: distribution.\n train_samples: samples for training.\n length: length.\n\n Returns:\n train_indices (np.array of Integers): random integers for training.\n shape = [num_samples, length]\n test_indices (np.array of Integers): random integers for testing.\n shape = [num_samples, length]\n plain_vocab (list of Integers): unique vocabularies.\n " ]
Please provide a description of the function:def encipher_shift(plaintext, plain_vocab, shift): ciphertext = [] cipher = ShiftEncryptionLayer(plain_vocab, shift) for _, sentence in enumerate(plaintext): cipher_sentence = [] for _, character in enumerate(sentence): encrypted_char = cipher.encrypt_character(character) cipher_sentence.append(encrypted_char) ciphertext.append(cipher_sentence) return ciphertext
[ "Encrypt plain text with a single shift layer.\n\n Args:\n plaintext (list of list of Strings): a list of plain text to encrypt.\n plain_vocab (list of Integer): unique vocabularies being used.\n shift (Integer): number of shift, shift to the right if shift is positive.\n Returns:\n ciphertext (list of Strings): encrypted plain text.\n " ]
Please provide a description of the function:def encipher_vigenere(plaintext, plain_vocab, key): ciphertext = [] # generate Vigenere table layers = [ ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab)) ] for i, sentence in enumerate(plaintext): cipher_sentence = [] for j, character in enumerate(sentence): key_idx = key[j % len(key)] encrypted_char = layers[key_idx].encrypt_character(character) cipher_sentence.append(encrypted_char) ciphertext.append(cipher_sentence) return ciphertext
[ "Encrypt plain text with given key.\n\n Args:\n plaintext (list of list of Strings): a list of plain text to encrypt.\n plain_vocab (list of Integer): unique vocabularies being used.\n key (list of Integer): key to encrypt cipher using Vigenere table.\n\n Returns:\n ciphertext (list of Strings): encrypted plain text.\n " ]
Please provide a description of the function:def _super_stack(inputs, attention_bias, hparams, mp, padding="LEFT"): layers = hparams.layers.strip(",").split(",") moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")] if hparams.diet_experts: hsize, = moe_hidden_sizes def _diet_expert(x): return diet.diet_expert(x, hsize, diet.diet_adam_optimizer_params()) expert_fn = _diet_expert else: expert_fn = expert_utils.ffn_expert_fn( hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size) # scaled_dot_product_attention_with_projections uses a 3d attention bias # (no heads), where multihead_attention uses 4d attention bias. attention_bias_3d = mp(tf.squeeze, attention_bias, 1) mix_size = int(hparams.mix_fraction * hparams.hidden_size) accumulator = inputs x = inputs extra_losses = [] for layer_num, layer_type in enumerate(layers): with tf.variable_scope("%s_%d" % (layer_type, layer_num)): tf.logging.info("%s_%d" % (layer_type, layer_num)) if layer_type == "a": # accumulate accumulator = mp(tf.add, x, accumulator) x = accumulator elif layer_type == "n": # normalize x = mp(common_layers.apply_norm, x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) elif layer_type == "d": # dropout x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout) elif layer_type == "m": # mix across shards def _split(t): return tuple(tf.split( t, [mix_size, hparams.hidden_size - mix_size], 2)) to_mix, to_keep = mp(_split, x) mixed = expert_utils.all_reduce_ring(to_mix, mp) mixed = mp(tf.multiply, mixed, mp.n ** -0.5) x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep) elif layer_type == "att": # single-head attention q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, name="q_transform") x = mp( common_attention.scaled_dot_product_attention_simple, q, x, x, attention_bias_3d) x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, name="o_transform") elif layer_type == "multihead-att": # multi-head attention x = mp( common_attention.multihead_attention, x, None, attention_bias, # bias hparams.multihead_attention_key_channels or hparams.hidden_size, hparams.multihead_attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.multihead_attention_num_heads, hparams.attention_dropout) elif layer_type == "ffn": x = mp( common_layers.dense_relu_dense, x, hparams.filter_size, hparams.hidden_size) elif layer_type == "conv": # convolution x = mp( common_layers.conv1d, x, hparams.hidden_size, hparams.kernel_height, activation=tf.nn.relu, padding=padding, ) elif layer_type == "moe": # mixture of experts - each model shard has its own local MoE. x, loss = mp( expert_utils.local_moe, x, train=hparams.mode == tf.estimator.ModeKeys.TRAIN, expert_fn=expert_fn, num_experts=hparams.moe_num_experts, k=hparams.moe_k, loss_coef=hparams.moe_loss_coef) extra_losses.extend(loss) else: assert False, "unknown sublayer %s" % layer_type if extra_losses: extra_loss = tf.add_n(extra_losses) else: extra_loss = None return x, extra_loss
[ "A stack of super_lm layers.\n\n Args:\n inputs: a list of Tensors\n attention_bias: list of bias Tensor for self-attention\n (see common_attention.attention_bias())\n hparams: hyperparameters for model\n mp: a Parallelism object\n padding: a string\n\n Returns:\n y: a list of Tensors\n extra_loss: an optional scalar\n " ]
Please provide a description of the function:def super_lm_base(): hparams = common_hparams.basic_params1() hparams.hidden_size = 512 hparams.moe_hidden_sizes = "512" hparams.batch_size = 16384 hparams.max_length = 0 # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. hparams.layer_prepostprocess_dropout = 0.0 hparams.symbol_dropout = 0.1 hparams.add_hparam("attention_dropout", 0.0) hparams.label_smoothing = 0.0 hparams.clip_grad_norm = 0. # i.e. no gradient clipping hparams.optimizer = "Adafactor" hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 8000 hparams.initializer_gain = 1.0 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.shared_embedding_and_softmax_weights = False hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" # we only want one data shard. hparams.no_data_parallelism = True # bypass the symbol modality so that we can use model parallelism. hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.add_hparam("filter_size", 512) hparams.add_hparam("mix_fraction", 0.5) # attention-related flags hparams.add_hparam("multihead_attention_num_heads", 4) hparams.add_hparam("multihead_attention_key_channels", 0) hparams.add_hparam("multihead_attention_value_channels", 0) hparams.add_hparam("pos", "timing") # timing, none hparams.add_hparam( "layers", ("n,att,m,d,a," "n,ffn,m,d,a,") * 4 + "n,ffn,d") # Number of model shards - each one has separate parameters. # Changing this number invalidates checkpoints. hparams.add_hparam("num_model_shards", 8) hparams.add_hparam("diet_experts", False) return hparams
[ "Set of hyperparameters." ]
Please provide a description of the function:def super_lm_moe(): hparams = super_lm_base() hparams.layers = ( ("n,att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d") hparams.moe_num_experts = 32 hparams.moe_hidden_sizes = "1024" return hparams
[ "Add mixture of experts with ~1B params." ]
Please provide a description of the function:def xmoe_tr_dense_2k(): hparams = mtf_transformer2.mtf_bitransformer_base() hparams.encoder_layers = ["self_att", "drd"] * 4 hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4 hparams.batch_size = 64 hparams.shared_embedding_and_softmax_weights = True hparams.mesh_shape = "batch:8" return hparams
[ "Series of architectural experiments on Translation.\n\n # run on 8-core setup\n\n 119M params, einsum=0.95e13\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe_tr_1d(): hparams = xmoe_tr_dense_2k() hparams.encoder_layers = ["self_att", "moe_1d"] * 4 hparams.decoder_layers = ["self_att", "enc_att", "moe_1d"] * 4 hparams.layout = "batch:batch;experts:batch" hparams.moe_hidden_size = 2048 hparams.moe_num_experts = 16 return hparams
[ "Mixture of experts (16 experts).\n\n\n 623M Params, einsum=1.09e13\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe_tr_2d(): hparams = xmoe_tr_dense_2k() hparams.mesh_shape = "b0:2;b1:4" hparams.outer_batch_size = 4 hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.encoder_layers = ["self_att", "moe_2d"] * 4 hparams.decoder_layers = ["self_att", "enc_att", "moe_2d"] * 4 hparams.moe_hidden_size = 2048 hparams.moe_experts_x = 4 hparams.moe_experts_y = 4 return hparams
[ "Mixture of experts (16 experts).\n\n 623M Params, einsum=1.09e13\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe_dense_4k(): hparams = mtf_transformer.mtf_transformer_base_lm() hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.layer_prepostprocess_dropout = 0.0 # The following hparams are constant across all these experiments. hparams.batch_size = 128 hparams.d_model = 512 hparams.d_kv = 128 hparams.num_heads = 4 hparams.decoder_layers = ["att", "drd"] * 4 hparams.shared_embedding_and_softmax_weights = False hparams.learning_rate_schedule = "rsqrt_decay" # We will vary the following parameters related to the ffn/moe layers. hparams.d_ff = 4096 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:8" return hparams
[ "Series of architectural experiments on cheap language models.\n\n For all of these architectures, we run on languagemodel_lm1b8k_packed\n for 32000 steps.\n\n All log-perplexities are per-token - multiply by 1.298 for per-word\n\n Results:\n model params(M) einsum alltoall mxu-util log-ppl\n xmoe_dense_4k 30 3.0e12 0 45% 3.31\n xmoe_dense_8k 46 4.7e12 0 49% 3.24\n xmoe_dense_64k 282 2.8e13 0 3.06\n xmoe_top_2 282 4.0e12 3.4e8 36% 3.07\n xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07\n xmoe_2d 282 5.3e12 7.6e8 34% 3.06\n\n Trained at 4x the batch size:\n xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07\n\n Note: configurations and code are likely to change without notice.\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe_top_2(): hparams = xmoe_dense_4k() moe.set_default_moe_hparams(hparams) hparams.mesh_shape = "all:8" hparams.layout = "batch:all;experts:all" return hparams
[ "Mixture of experts (16 experts)." ]
Please provide a description of the function:def xmoe_2d(): hparams = xmoe_top_2() hparams.decoder_layers = ["att", "hmoe"] * 4 hparams.mesh_shape = "b0:2;b1:4" hparams.outer_batch_size = 4 hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.moe_num_experts = [4, 4] return hparams
[ "Two-dimensional hierarchical mixture of 16 experts." ]
Please provide a description of the function:def xmoe2_dense(sz): hparams = mtf_transformer.mtf_transformer_paper_lm(sz) hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.max_length = 1024 hparams.batch_size = 128 hparams.learning_rate_schedule = "rsqrt_decay*linear_decay" hparams.learning_rate_decay_steps = 65536 hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" hparams.mesh_shape = "batch:32" return hparams
[ "Series of architectural experiments on language modeling.\n\n Larger models than the ones above.\n\n All models are trained on sequences of 1024 tokens.\n\n We assume infinite training data, so no dropout necessary.\n We process 2^36 tokens in training = 524288 steps at batch size 128\n\n TODO(noam): find a large enough dataset for these experiments.\n\n You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,\n (1 epoch = ~46000 steps) so training will cover about 11 epochs.\n\n Note: configurations and code are likely to change without notice.\n\n Run on TPU 4x4 for 524288 steps unless otherwise indicated.\n\n Args:\n sz: an integer\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe2_v1(): hparams = xmoe2_dense(0) moe.set_default_moe_hparams(hparams) hparams.decoder_layers = ( ["local_att", "local_att", "drd", "att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1] hparams.d_ff = 2048 hparams.d_kv = 128 hparams.moe_hidden_size = 32768 hparams.mesh_shape = "b0:4;b1:8" hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.outer_batch_size = 4 hparams.moe_num_experts = [8, 4] hparams.num_heads = 4 return hparams
[ "Model incorporating mixture-of-experts and local-attention.\n\n ~6B parameters\n\n 32 experts in 3 hierarchichal moe layers.\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def xmoe2_v1_x128(): hparams = xmoe2_v1() hparams.moe_num_experts = [16, 8] hparams.outer_batch_size = 8 hparams.mesh_shape = "b0:8;b1:16" hparams.batch_size = 512 hparams.learning_rate_decay_steps = 16384 return hparams
[ "128 experts, ~25B params - Train for 131072 steps on 8x8." ]
Please provide a description of the function:def xmoe2_tiny(): hparams = xmoe2_v1() hparams.decoder_layers = [ "local_att", "att", "compressed_att", "drd", "hmoe"] hparams.d_model = 128 hparams.moe_hidden_size = 512 hparams.outer_batch_size = 0 hparams.batch_size = 2 hparams.mesh_shape = "" hparams.activation_dtype = "float32" return hparams
[ "Test on local cpu." ]
Please provide a description of the function:def xmoe2_v1_l4k(): hparams = xmoe2_v1() hparams.batch_size = 32 hparams.max_length = 4096 hparams.split_to_length = 4096 hparams.reshape_logits_hack = True return hparams
[ "With sequence length 4096." ]
Please provide a description of the function:def xmoe2_v1_l4k_local_only(): hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "local_att" if l == "att" else l for l in hparams.decoder_layers] return hparams
[ "With sequence length 4096." ]
Please provide a description of the function:def xmoe2_v1_l4k_global_only(): hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "att" if l == "local_att" else l for l in hparams.decoder_layers] return hparams
[ "With sequence length 4096." ]
Please provide a description of the function:def xmoe2_v1_l4k_compressed_c4(): hparams = xmoe2_v1_l4k() hparams.decoder_layers = [ "compressed_att" if l == "att" else l for l in hparams.decoder_layers] hparams.compression_factor = 4 return hparams
[ "With compressed attention." ]
Please provide a description of the function:def wiki_2x2_base(): hparams = mtf_transformer.mtf_transformer_base_lm() hparams.shared_embedding_and_softmax_weights = False # no dropout - dataset is big enough to avoid overfitting. hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.max_length = 1024 # 4 sequences per core hparams.batch_size = 32 # We don't use linear decay in these experiments, since we don't want # a sharp jump in quality at the end of the training schedule. # You can insert this once you find the right architecture. hparams.learning_rate_schedule = "rsqrt_decay" hparams.mesh_shape = "all:8" hparams.layout = "batch:all;experts:all" # parameters for mixture-of-experts moe.set_default_moe_hparams(hparams) hparams.moe_num_experts = 16 hparams.moe_hidden_size = 8192 hparams.decoder_layers = ["att", "drd"] * 6 hparams.d_model = 1024 hparams.d_ff = 2048 hparams.d_kv = 128 hparams.num_heads = 4 return hparams
[ "Set of architectural experiments - language model on wikipedia on a 2x2.\n\n 1 epoch = ~180k steps at batch size 32 - we may never finish an epoch!\n\n Returns:\n a hparams\n " ]
Please provide a description of the function:def denoise_z15(): hparams = xmoe2_dense_0() hparams.decoder_type = "denoising" hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15} hparams.noising_use_eval_during_train = 0.25 return hparams
[ "Replace tokens instead of masking." ]
Please provide a description of the function:def denoise_v1_m15(): hparams = xmoe2_v1() # no local attention # TODO(noam): non-masked version of local-attention hparams.decoder_layers = [ "att" if l == "local_att" else l for l in hparams.decoder_layers] hparams.decoder_type = "denoising" hparams.noising_spec_train = {"type": "mask", "prob": 0.15} return hparams
[ "Denoising experiment." ]
Please provide a description of the function:def _download_mlu_data(tmp_dir, data_dir): if not tf.gfile.Exists(data_dir): tf.gfile.MakeDirs(data_dir) filename = os.path.basename(_URL) file_path = os.path.join(tmp_dir, filename) headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) " "AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/63.0.3239.132 Safari/537.36"} resp = requests.get(_URL, headers=headers) with open(file_path, "wb") as f: f.write(resp.content) with tarfile.open(file_path, "r:gz") as tar: tar.extractall(tmp_dir) return tmp_dir
[ "Downloads and extracts the dataset.\n\n Args:\n tmp_dir: temp directory to download and extract the dataset\n data_dir: The base directory where data and vocab files are stored.\n\n Returns:\n tmp_dir: temp directory containing the raw data.\n " ]
Please provide a description of the function:def _get_ngram_counter(ids, n): # Remove zero IDs used to pad the sequence. ids = [token_id for token_id in ids if token_id != 0] ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)] ngrams = set(ngram_list) counts = collections.Counter() for ngram in ngrams: counts[ngram] = 1 return counts
[ "Get a Counter with the ngrams of the given ID list.\n\n Args:\n ids: np.array or a list corresponding to a single sentence\n n: n-gram size\n\n Returns:\n collections.Counter with ID tuples as keys and 1s as values.\n " ]
Please provide a description of the function:def _get_fbeta_score(true_positives, selected, relevant, beta=1): precision = 1 if selected > 0: precision = true_positives / selected if beta == 0: return precision recall = 1 if relevant > 0: recall = true_positives / relevant if precision > 0 and recall > 0: beta2 = beta * beta return (1 + beta2) * precision * recall / (beta2 * precision + recall) else: return 0
[ "Compute Fbeta score.\n\n Args:\n true_positives: Number of true positive ngrams.\n selected: Number of selected ngrams.\n relevant: Number of relevant ngrams.\n beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only.\n\n Returns:\n Fbeta score.\n " ]
Please provide a description of the function:def get_addition_score(source_counts, prediction_counts, target_counts): added_to_prediction_counts = prediction_counts - source_counts true_positives = sum((added_to_prediction_counts & target_counts).values()) selected = sum(added_to_prediction_counts.values()) # Note that in the paper the summation is done over all the ngrams in the # output rather than the ngrams in the following set difference. Since the # former does not make as much sense we compute the latter, which is also done # in the GitHub implementation. relevant = sum((target_counts - source_counts).values()) return _get_fbeta_score(true_positives, selected, relevant)
[ "Compute the addition score (Equation 4 in the paper)." ]
Please provide a description of the function:def get_keep_score(source_counts, prediction_counts, target_counts): source_and_prediction_counts = source_counts & prediction_counts source_and_target_counts = source_counts & target_counts true_positives = sum((source_and_prediction_counts & source_and_target_counts).values()) selected = sum(source_and_prediction_counts.values()) relevant = sum(source_and_target_counts.values()) return _get_fbeta_score(true_positives, selected, relevant)
[ "Compute the keep score (Equation 5 in the paper)." ]
Please provide a description of the function:def get_deletion_score(source_counts, prediction_counts, target_counts, beta=0): source_not_prediction_counts = source_counts - prediction_counts source_not_target_counts = source_counts - target_counts true_positives = sum((source_not_prediction_counts & source_not_target_counts).values()) selected = sum(source_not_prediction_counts.values()) relevant = sum(source_not_target_counts.values()) return _get_fbeta_score(true_positives, selected, relevant, beta=beta)
[ "Compute the deletion score (Equation 6 in the paper)." ]
Please provide a description of the function:def get_sari_score(source_ids, prediction_ids, list_of_targets, max_gram_size=4, beta_for_deletion=0): addition_scores = [] keep_scores = [] deletion_scores = [] for n in range(1, max_gram_size + 1): source_counts = _get_ngram_counter(source_ids, n) prediction_counts = _get_ngram_counter(prediction_ids, n) # All ngrams in the targets with count 1. target_counts = collections.Counter() # All ngrams in the targets with count r/num_targets, where r is the number # of targets where the ngram occurs. weighted_target_counts = collections.Counter() num_nonempty_targets = 0 for target_ids_i in list_of_targets: target_counts_i = _get_ngram_counter(target_ids_i, n) if target_counts_i: weighted_target_counts += target_counts_i num_nonempty_targets += 1 for gram in weighted_target_counts.keys(): weighted_target_counts[gram] /= num_nonempty_targets target_counts[gram] = 1 keep_scores.append(get_keep_score(source_counts, prediction_counts, weighted_target_counts)) deletion_scores.append(get_deletion_score(source_counts, prediction_counts, weighted_target_counts, beta_for_deletion)) addition_scores.append(get_addition_score(source_counts, prediction_counts, target_counts)) avg_keep_score = sum(keep_scores) / max_gram_size avg_addition_score = sum(addition_scores) / max_gram_size avg_deletion_score = sum(deletion_scores) / max_gram_size sari = (avg_keep_score + avg_addition_score + avg_deletion_score) / 3.0 return sari, avg_keep_score, avg_addition_score, avg_deletion_score
[ "Compute the SARI score for a single prediction and one or more targets.\n\n Args:\n source_ids: a list / np.array of SentencePiece IDs\n prediction_ids: a list / np.array of SentencePiece IDs\n list_of_targets: a list of target ID lists / np.arrays\n max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,\n bigrams, and trigrams)\n beta_for_deletion: beta for deletion F score.\n\n Returns:\n the SARI score and its three components: add, keep, and deletion scores\n " ]
Please provide a description of the function:def get_sari(source_ids, prediction_ids, target_ids, max_gram_size=4): def get_sari_numpy(source_ids, prediction_ids, target_ids): sari_scores = [] keep_scores = [] add_scores = [] deletion_scores = [] # Iterate over elements in the batch. for source_ids_i, prediction_ids_i, target_ids_i in zip( source_ids, prediction_ids, target_ids): sari, keep, add, deletion = get_sari_score( source_ids_i, prediction_ids_i, target_ids_i, max_gram_size, BETA_FOR_SARI_DELETION_F_MEASURE) sari_scores.append(sari) keep_scores.append(keep) add_scores.append(add) deletion_scores.append(deletion) return (np.asarray(sari_scores), np.asarray(keep_scores), np.asarray(add_scores), np.asarray(deletion_scores)) sari, keep, add, deletion = tf.py_func( get_sari_numpy, [source_ids, prediction_ids, target_ids], [tf.float64, tf.float64, tf.float64, tf.float64]) return sari, keep, add, deletion
[ "Computes the SARI scores from the given source, prediction and targets.\n\n Args:\n source_ids: A 2D tf.Tensor of size (batch_size , sequence_length)\n prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length)\n target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets,\n sequence_length)\n max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams,\n bigrams, and trigrams)\n\n Returns:\n A 4-tuple of 1D float Tensors of size (batch_size) for the SARI score and\n the keep, addition and deletion scores.\n ", "Iterate over elements in the batch and call the SARI function." ]
Please provide a description of the function:def sari_score(predictions, labels, features, **unused_kwargs): if "inputs" not in features: raise ValueError("sari_score requires inputs feature") # Convert the inputs and outputs to a [batch_size, sequence_length] tensor. inputs = tf.squeeze(features["inputs"], axis=[-1, -2]) outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) outputs = tf.squeeze(outputs, axis=[-1, -2]) # Convert the labels to a [batch_size, 1, sequence_length] tensor. labels = tf.squeeze(labels, axis=[-1, -2]) labels = tf.expand_dims(labels, axis=1) score, _, _, _ = get_sari(inputs, outputs, labels) return score, tf.constant(1.0)
[ "Computes the SARI scores from the given source, prediction and targets.\n\n An approximate SARI scoring method since we do not glue word pieces or\n decode the ids and tokenize the output. By default, we use ngram order of 4.\n Also, this does not have beam search.\n\n Args:\n predictions: tensor, model predictions.\n labels: tensor, gold output.\n features: dict, containing inputs.\n\n Returns:\n sari: int, approx sari score\n " ]
Please provide a description of the function:def _get_mnist(directory): for filename in [ _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME ]: generator_utils.maybe_download(directory, filename, _MNIST_URL + filename)
[ "Download all MNIST files to directory unless they are there." ]
Please provide a description of the function:def _extract_mnist_images(filename, num_images): with gzip.open(filename) as bytestream: bytestream.read(16) buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images) data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1) return data
[ "Extract images from an MNIST file into a numpy array.\n\n Args:\n filename: The path to an MNIST images file.\n num_images: The number of images in the file.\n\n Returns:\n A numpy array of shape [number_of_images, height, width, channels].\n " ]
Please provide a description of the function:def _extract_mnist_labels(filename, num_labels): with gzip.open(filename) as bytestream: bytestream.read(8) buf = bytestream.read(num_labels) labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) return labels
[ "Extract labels from an MNIST file into integers.\n\n Args:\n filename: The path to an MNIST labels file.\n num_labels: The number of labels in the file.\n\n Returns:\n A int64 numpy array of shape [num_labels]\n " ]
Please provide a description of the function:def mnist_common_generator(tmp_dir, training, how_many, data_filename, label_filename, start_from=0): data_path = os.path.join(tmp_dir, data_filename) labels_path = os.path.join(tmp_dir, label_filename) images = _extract_mnist_images(data_path, 60000 if training else 10000) labels = _extract_mnist_labels(labels_path, 60000 if training else 10000) # Shuffle the data to make sure classes are well distributed. data = list(zip(images, labels)) random.shuffle(data) images, labels = list(zip(*data)) return image_utils.image_generator(images[start_from:start_from + how_many], labels[start_from:start_from + how_many])
[ "Image generator for MNIST.\n\n Args:\n tmp_dir: path to temporary storage directory.\n training: a Boolean; if true, we use the train set, otherwise the test set.\n how_many: how many images and labels to generate.\n data_filename: file that contains features data.\n label_filename: file that contains labels.\n start_from: from which image to start.\n\n Returns:\n An instance of image_generator that produces MNIST images.\n " ]
Please provide a description of the function:def mnist_generator(tmp_dir, training, how_many, start_from=0): _get_mnist(tmp_dir) d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
[ "Image generator for MNIST.\n\n Args:\n tmp_dir: path to temporary storage directory.\n training: a Boolean; if true, we use the train set, otherwise the test set.\n how_many: how many images and labels to generate.\n start_from: from which image to start.\n\n Returns:\n An instance of image_generator that produces MNIST images.\n " ]
Please provide a description of the function:def _get_fashion_mnist(directory): # Fashion mnist files have the same names as MNIST. # We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir. for filename in [ _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME ]: generator_utils.maybe_download(directory, _FASHION_MNIST_LOCAL_FILE_PREFIX + filename, _FASHION_MNIST_URL + filename)
[ "Download all FashionMNIST files to directory unless they are there." ]
Please provide a description of the function:def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0): _get_fashion_mnist(tmp_dir) d = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME) l = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME) return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
[ "Image generator for FashionMNIST.\n\n Args:\n tmp_dir: path to temporary storage directory.\n training: a Boolean; if true, we use the train set, otherwise the test set.\n how_many: how many images and labels to generate.\n start_from: from which image to start.\n\n Returns:\n An instance of image_generator that produces MNIST images.\n " ]
Please provide a description of the function:def generate_data(timeseries_length, timeseries_params): x = range(timeseries_length) multi_timeseries = [] for p in timeseries_params: # Trend y1 = [p["m"] * i + p["b"] for i in x] # Period y2 = [p["A"] * p["fn"](i / p["freqcoeff"]) for i in x] # Noise y3 = np.random.normal(0, p["rndA"], timeseries_length).tolist() # Sum of Trend, Period and Noise. Replace negative values with zero. y = [max(a + b + c, 0) for a, b, c in zip(y1, y2, y3)] multi_timeseries.append(y) return multi_timeseries
[ "Generates synthetic timeseries using input parameters.\n\n Each generated timeseries has timeseries_length data points.\n Parameters for each timeseries are specified by timeseries_params.\n\n Args:\n timeseries_length: Number of data points to generate for each timeseries.\n timeseries_params: Parameters used to generate the timeseries. The following\n parameters need to be specified for each timeseries:\n m = Slope of the timeseries used to compute the timeseries trend.\n b = y-intercept of the timeseries used to compute the timeseries trend.\n A = Timeseries amplitude used to compute timeseries period.\n freqcoeff = Frequency coefficient used to compute timeseries period.\n rndA = Random amplitude used to inject noise into the timeseries.\n fn = Base timeseries function (np.cos or np.sin).\n Example params for two timeseries.\n [{\"m\": 0.006, \"b\": 300.0, \"A\":50.0, \"freqcoeff\":1500.0, \"rndA\":15.0,\n \"fn\": np.sin},\n {\"m\": 0.000, \"b\": 500.0, \"A\":35.0, \"freqcoeff\":3500.0, \"rndA\":25.0,\n \"fn\": np.cos}]\n\n Returns:\n Multi-timeseries (list of list).\n " ]
Please provide a description of the function:def next_frame_basic_stochastic(): hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", -5.0) hparams.add_hparam("num_iterations_1st_stage", 15000) hparams.add_hparam("num_iterations_2nd_stage", 15000) hparams.add_hparam("latent_loss_multiplier", 1e-3) hparams.add_hparam("latent_loss_multiplier_dynamic", False) hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5) hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0) hparams.add_hparam("latent_loss_multiplier_schedule", "constant") hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames. hparams.add_hparam("anneal_end", 50000) hparams.add_hparam("information_capacity", 0.0) return hparams
[ "Basic 2-frame conv model with stochastic tower." ]
Please provide a description of the function:def next_frame_sampling_stochastic(): hparams = basic_deterministic_params.next_frame_sampling() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", -5.0) hparams.add_hparam("num_iterations_1st_stage", 15000) hparams.add_hparam("num_iterations_2nd_stage", 15000) hparams.add_hparam("latent_loss_multiplier", 1e-3) hparams.add_hparam("latent_loss_multiplier_dynamic", False) hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5) hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0) hparams.add_hparam("latent_loss_multiplier_schedule", "constant") hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames. hparams.add_hparam("anneal_end", 40000) hparams.add_hparam("information_capacity", 0.0) return hparams
[ "Basic 2-frame conv model with stochastic tower." ]
Please provide a description of the function:def next_frame_basic_stochastic_discrete(): hparams = basic_deterministic_params.next_frame_sampling() hparams.batch_size = 4 hparams.video_num_target_frames = 6 hparams.scheduled_sampling_mode = "prob_inverse_lin" hparams.scheduled_sampling_decay_steps = 40000 hparams.scheduled_sampling_max_prob = 1.0 hparams.dropout = 0.15 hparams.filter_double_steps = 3 hparams.hidden_size = 96 hparams.learning_rate_constant = 0.002 hparams.learning_rate_warmup_steps = 2000 hparams.learning_rate_schedule = "linear_warmup * constant" hparams.concat_internal_states = True hparams.video_modality_loss_cutoff = 0.03 hparams.add_hparam("bottleneck_bits", 128) hparams.add_hparam("bottleneck_noise", 0.1) hparams.add_hparam("discretize_warmup_steps", 40000) hparams.add_hparam("latent_rnn_warmup_steps", 40000) hparams.add_hparam("latent_rnn_max_sampling", 0.5) hparams.add_hparam("latent_use_max_probability", 0.8) hparams.add_hparam("full_latent_tower", False) hparams.add_hparam("latent_predictor_state_size", 128) hparams.add_hparam("latent_predictor_temperature", 1.0) hparams.add_hparam("complex_addn", True) hparams.add_hparam("recurrent_state_size", 64) return hparams
[ "Basic 2-frame conv model with stochastic discrete latent." ]
Please provide a description of the function:def next_frame_stochastic_discrete_range(rhp): rhp.set_float("learning_rate_constant", 0.001, 0.01) rhp.set_float("dropout", 0.2, 0.6) rhp.set_int("filter_double_steps", 3, 5) rhp.set_discrete("hidden_size", [64, 96, 128]) rhp.set_discrete("bottleneck_bits", [32, 64, 128, 256]) rhp.set_discrete("video_num_target_frames", [4]) rhp.set_float("bottleneck_noise", 0.0, 0.2)
[ "Next frame stochastic discrete tuning grid." ]
Please provide a description of the function:def nested_map(x, f): if isinstance(x, list): return [nested_map(y, f) for y in x] if isinstance(x, tuple): return tuple([nested_map(y, f) for y in x]) if isinstance(x, dict): return {k: nested_map(x[k], f) for k in x} return f(x)
[ "Map the function f to the nested structure x (dicts, tuples, lists)." ]
Please provide a description of the function:def shapes(x): def shape(x): try: return x.shape except Exception: # pylint: disable=broad-except return [] return nested_map(x, shape)
[ "Get a structure of shapes for a structure of nested arrays." ]
Please provide a description of the function:def sizes(x): def size(x): try: return x.size except Exception: # pylint: disable=broad-except return 0 return nested_map(x, size)
[ "Get a structure of sizes for a structure of nested arrays." ]
Please provide a description of the function:def _find_frame(stack, start=0): # We want to find the first place where the layer was called # that is *not* an __init__ function of an inheriting layer. frame = inspect.getframeinfo(stack[start][0]) # If we are in an init, move on. if frame.function == '__init__': return _find_frame(stack, start + 1) return frame
[ "Find the frame with the caller on the stack." ]
Please provide a description of the function:def _shorten_file_path(line): start = line.lower().find('file') if start < 0: return line first_quote = line.find('"', start) if first_quote < 0: return line second_quote = line.find('"', first_quote + 1) if second_quote < 0: return line path = line[first_quote + 1:second_quote] new_path = '/'.join(path.split('/')[-3:]) return line[:first_quote] + '[...]/' + new_path + line[second_quote + 1:]
[ "Shorten file path in error lines for more readable tracebacks." ]
Please provide a description of the function:def _short_traceback(skip=3): counter, res = 0, [] # Skipping 3 lines by default: the top (useless) and self-call. lines = traceback.format_exc().splitlines()[skip:] for l in lines: res.append(_shorten_file_path(l)) if counter % 2 == 1: res.append('') counter += 1 # If we see a LayerError, the traceback has already been processed. if l.startswith('LayerError'): # Skip 4 back except last as these are internal base-layer calls. res = res[:-4] + [res[-1]] res += lines[counter:] break return '\n'.join(res)
[ "Cleaned-up form of traceback." ]
Please provide a description of the function:def layer(output_shape=None, new_parameters=None): def layer_decorator(call): def output_shape_fun(self, input_shape): if output_shape is None: return input_shape kwargs = self._init_kwargs # pylint: disable=protected-access return output_shape(input_shape, **kwargs) def new_parameters_fun(self, input_shape, rng): if new_parameters is None: return () kwargs = self._init_kwargs # pylint: disable=protected-access return new_parameters(input_shape, rng, **kwargs) def call_fun(self, x, params=(), **kwargs): # Merge on-call kwargs with class-kwargs. call_kwargs = kwargs.copy() call_kwargs.update(self._init_kwargs) # pylint: disable=protected-access # Call with the merged kwargs. return call(x, params=params, **call_kwargs) # Set doc for python help. call_fun.__doc__ = call.__doc__ if output_shape is None: output_shape_fun.__doc__ = output_shape.__doc__ if new_parameters is None: new_parameters_fun.__doc__ = new_parameters.__doc__ # Create the class. cls = type(call.__name__, (Layer,), {'call': call_fun, 'output_shape': output_shape_fun, 'new_parameters': new_parameters_fun}) return cls return layer_decorator
[ "Create a layer class from a function.", "Decorating the call function.", "The call function of the created class, derived from call." ]
Please provide a description of the function:def initialize(self, input_shape, rng): try: # Re-using this layer, no new parameters. if not self._first_init: return () # First call of this layer, create parameters. self._first_init = False self._params = self.new_parameters(input_shape, rng) return self._params except Exception: name, trace = self.__class__.__name__, _short_traceback() raise LayerError(name, 'initialize', self._caller, input_shape, trace)
[ "Initialize the layer given an input shape and rng.\n\n Returns new_parameters(input_shape, rng) on the first call and () on any\n subsequent call, as the layer is already initialized. This is used for\n networks that share parameters, so the layer only produces them once.\n\n Note that all arguments and return values can be tuples or dictionaries\n or arbitraty nested structures composed of tuples and dictionaries.\n\n Args:\n input_shape: a tuple representing the shape of the input.\n rng: random number generator.\n\n Returns:\n Newly created parameters on the first call and () on all subsequent calls.\n " ]
Please provide a description of the function:def _references_content(ref_files): example_spec = { "url": tf.FixedLenFeature([], tf.string), "content": tf.FixedLenFeature([], tf.string), } data = {} for ex in generator_utils.tfrecord_iterator( ref_files, gzipped=True, example_spec=example_spec): data[ex["url"]] = text_encoder.to_unicode(ex["content"]) return data
[ "Returns dict<str ref_url, str ref_content>." ]
Please provide a description of the function:def _wiki_urls_for_shard(shard_id, urls_dir=None): urls_dir = urls_dir or WIKI_URLS_DIR urls_filepath = os.path.join(urls_dir, WIKI_URLS_FILE % shard_id) with tf.gfile.GFile(urls_filepath) as f: return json.loads(f.read())
[ "Urls for chunk: dict<str wiki_url, list<str> ref_urls>." ]
Please provide a description of the function:def _wiki_articles(shard_id, wikis_dir=None): if not wikis_dir: wikis_dir = WIKI_CONTENT_DIR with tf.Graph().as_default(): dataset = tf.data.TFRecordDataset( cc_utils.readahead( os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)), buffer_size=16 * 1000 * 1000) def _parse_example(ex_ser): features = { "url": tf.VarLenFeature(tf.string), "title": tf.VarLenFeature(tf.string), "section_titles": tf.VarLenFeature(tf.string), "section_texts": tf.VarLenFeature(tf.string), } ex = tf.parse_single_example(ex_ser, features) for k in ex.keys(): ex[k] = ex[k].values ex["url"] = ex["url"][0] ex["title"] = ex["title"][0] return ex dataset = dataset.map(_parse_example, num_parallel_calls=32) dataset = dataset.prefetch(100) record_it = dataset.make_one_shot_iterator().get_next() with tf.Session() as sess: while True: try: ex = sess.run(record_it) except tf.errors.OutOfRangeError: break sections = [ WikipediaSection(title=text_encoder.to_unicode(title), text=text_encoder.to_unicode(text)) for title, text in zip(ex["section_titles"], ex["section_texts"]) ] yield WikipediaArticle( url=text_encoder.to_unicode(ex["url"]), title=text_encoder.to_unicode(ex["title"]), sections=sections)
[ "Generates WikipediaArticles from GCS that are part of shard shard_id.", "Parse serialized Example containing Wikipedia article content." ]
Please provide a description of the function:def rank_reference_paragraphs(wiki_title, references_content, normalize=True): normalized_title = _normalize_text(wiki_title) title_tokens = _tokens_to_score( set(tokenizer.encode(text_encoder.native_to_unicode(normalized_title)))) ref_paragraph_info = [] doc_counts = collections.defaultdict(int) for ref in references_content: for paragraph in ref.split("\n"): normalized_paragraph = _normalize_text(paragraph) if cc_utils.filter_paragraph(normalized_paragraph): # Skip paragraph continue counts = _token_counts(normalized_paragraph, title_tokens) for token in title_tokens: if counts[token]: doc_counts[token] += 1 content = normalized_paragraph if normalize else paragraph info = {"content": content, "counts": counts} ref_paragraph_info.append(info) for info in ref_paragraph_info: score = 0. for token in title_tokens: term_frequency = info["counts"][token] inv_doc_frequency = ( float(len(ref_paragraph_info)) / max(doc_counts[token], 1)) score += term_frequency * math.log(inv_doc_frequency) info["score"] = score ref_paragraph_info.sort(key=lambda el: el["score"], reverse=True) return [info["content"] for info in ref_paragraph_info]
[ "Rank and return reference paragraphs by tf-idf score on title tokens." ]
Please provide a description of the function:def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path, out_filepaths): # * Join the Wikipedia articles with their references # * Run Tf-idf to sort reference paragraphs # * Encode the Wikipedia and reference text with the vocabulary # * Write out TFRecords of tensorflow.Example tf.logging.info("Processing %d input shards into %d output files.", len(shard_ids), len(out_filepaths)) vocab = text_encoder.SubwordTextEncoder(vocab_path) eot_ids = vocab.encode(EOT) def example_generator(): stats = dict(total_original_wikis=0, total_original_refs=0, total_found_refs=0, ref_lengths=[], wiki_original_refs=[], wiki_found_refs=[], wikis_skipped_no_refs=0, wikis_skipped_short_lead=0, num_wikis_written=0) ref_files_by_shard = _references_files_by_shard(refs_dir) for shard_id in shard_ids: tf.logging.info("Processing shard %d", shard_id) wiki_urls = _wiki_urls_for_shard(shard_id, urls_dir) tf.logging.info("Loaded wiki URLs for shard") refs_content = _references_content(ref_files_by_shard[shard_id]) tf.logging.info("Loaded reference content for shard") for i, wiki in enumerate(_wiki_articles(shard_id, wikis_dir)): if not i % 1000: tf.logging.info("Processing wiki index %d for shard %d", i, shard_id) stats["total_original_wikis"] += 1 # Get reference content wiki_ref_content = [] ref_urls = wiki_urls[wiki.url]["refs"] stats["total_original_refs"] += len(ref_urls) stats_wiki_original_refs = len(ref_urls) stats_wiki_found_refs = 0 for ref_url in ref_urls: ref_content = refs_content.get(ref_url) if not ref_content: continue stats["total_found_refs"] += 1 stats["ref_lengths"].append(len(ref_content)) stats_wiki_found_refs += 1 wiki_ref_content.append(ref_content) stats["wiki_original_refs"].append(stats_wiki_original_refs) stats["wiki_found_refs"].append(stats_wiki_found_refs) if not wiki_ref_content or len(wiki_ref_content) < _MIN_REFS: # No/few refs were found stats["wikis_skipped_no_refs"] += 1 continue # Rank reference paragraphs with TFIDF wiki_title = _normalize_text(wiki.title) ranked_paragraphs = rank_reference_paragraphs(wiki_title, wiki_ref_content) # Construct inputs from Wiki title and references inputs = [] inputs.extend(vocab.encode(wiki_title)) inputs.extend(eot_ids) for paragraph in ranked_paragraphs: if len(inputs) >= 1e6: break paragraph += " " inputs.extend(vocab.encode(paragraph)) # Construct targets from article sections targets, section_boundaries = _encode_wiki_sections( wiki.sections, vocab) # Skip if lead section is too short if (not section_boundaries or section_boundaries[0] < _MIN_LEADSECTION_TOKENS): stats["wikis_skipped_short_lead"] += 1 continue inputs.append(text_encoder.EOS_ID) targets.append(text_encoder.EOS_ID) stats["num_wikis_written"] += 1 yield { "inputs": inputs, "targets": targets, "section_boundaries": section_boundaries, } tf.logging.info("Total: %d, Skipped: %d", stats["num_wikis_written"], stats["total_original_wikis"] - stats["num_wikis_written"]) tf.logging.info("Total refs: %d, Skipped refs: %d", stats["total_found_refs"], stats["total_original_refs"] - stats["total_found_refs"]) stats_fname = os.path.join(os.path.split(out_filepaths[0])[0], "stats.%d.json" % shard_ids[0]) with tf.gfile.Open(stats_fname, "w") as f: f.write(json.dumps(stats)) generator_utils.generate_files(example_generator(), out_filepaths)
[ "Produce examples from shard_ids to out_filepaths.", "Generate Example dicts." ]
Please provide a description of the function:def _encode_wiki_sections(sections, vocab): ids = [] section_boundaries = [] for i, section in enumerate(sections): if i > 0: # Skip including article title ids.extend(vocab.encode(_format_title(_normalize_text(section.title)))) ids.extend(vocab.encode(_normalize_text(section.text))) section_boundaries.append(len(ids)) return ids, section_boundaries
[ "Encodes sections with vocab. Returns ids and section boundaries." ]
Please provide a description of the function:def extract_references_from_wets(wet_files, metadata_dir, out_dir, tmp_dir=None): # Setup output files shard_files = make_ref_shard_files(out_dir) num_refs = 0 for i, wet_file in enumerate(wet_files): num_refs_in_wet = 0 tf.logging.info("Processing file %d", i) # Read metadata file metadata_fname = os.path.join( metadata_dir, os.path.basename(wet_file)) + cc_utils.METADTA_SUFFIX with tf.gfile.Open(cc_utils.readahead(metadata_fname)) as f: wet_metadata = json.loads(f.read()) if not wet_metadata: # No references in this WET file continue if wet_file.startswith("http"): # download if not tmp_dir: tmp_dir = tempfile.gettempdir() record_gen = cc_utils.wet_records_from_url(wet_file, tmp_dir) else: # local record_gen = cc_utils.wet_records_from_file_obj( cc_utils.gzip_memfile(wet_file), take_ownership=True) for wet_record in record_gen: shard_ids = wet_metadata.get(wet_record.url) if not shard_ids: # URL not in dataset continue # Serialize and write out ex = _make_example_from_record(wet_record) ex_str = ex.SerializeToString() for shard_id in shard_ids: shard_files[shard_id].write(ex_str) num_refs += 1 num_refs_in_wet += 1 tf.logging.info("Wrote out %d references for this WET", num_refs_in_wet) tf.logging.info("Wrote out %d references total", num_refs) # Cleanup for shard_file in shard_files: shard_file.close()
[ "Extract references from WET files into sharded output files." ]
Please provide a description of the function:def _dump_to_pages(dump): pos = 0 ret = [] start_tag = u"<page>\n" end_tag = u"</page>\n" while True: start_pos = dump.find(start_tag, pos) if start_pos == -1: break start_pos += len(start_tag) end_pos = dump.find(end_tag, start_pos) if end_pos == -1: break ret.append(dump[start_pos:end_pos]) pos = end_pos + len(end_tag) return ret
[ "Extract pages from an xml dump.\n\n Args:\n dump: a unicode string\n Returns:\n a list of unicode strings\n " ]
Please provide a description of the function:def _page_to_title(page): # print("page=%s" % page) start_tag = u"<title>" end_tag = u"</title>" start_pos = page.find(start_tag) end_pos = page.find(end_tag) assert start_pos != -1 assert end_pos != -1 start_pos += len(start_tag) return page[start_pos:end_pos]
[ "Extract the title from a page.\n\n Args:\n page: a unicode string\n Returns:\n a unicode string\n " ]
Please provide a description of the function:def _page_to_text(page): # text start tag looks like "<text ..otherstuff>" start_pos = page.find(u"<text") assert start_pos != -1 end_tag_pos = page.find(u">", start_pos) assert end_tag_pos != -1 end_tag_pos += len(u">") end_pos = page.find(u"</text>") if end_pos == -1: return u"" return page[end_tag_pos:end_pos]
[ "Extract the text from a page.\n\n Args:\n page: a unicode string\n Returns:\n a unicode string\n " ]
Please provide a description of the function:def _find_and_replace(text, start_string, end_string, replace_fn): ret = u"" current_pos = 0 while True: start_pos = text.find(start_string, current_pos) if start_pos == -1: ret += text[current_pos:] break ret += text[current_pos:start_pos] end_pos = text.find(end_string, start_pos + len(start_string)) if end_pos == -1: break ret += replace_fn(text[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) return ret
[ "Remove everything found between instances of start_string and end_string.\n\n Replace each such instance with replace_fn(removed_text)\n\n e.g. _find_and_replace(u\"the [[fat]] cat [[sat]]\", u\"[[\", u\"]]\", lambda x: x)\n = u\"the fat cat sat\"\n\n Args:\n text: a unicode string\n start_string: a unicode string\n end_string: a unicode string\n replace_fn: a unary function from unicode string to unicode string\n\n Returns:\n a string\n " ]
Please provide a description of the function:def _remove_double_brackets(text): def replacement_fn(s): if u":" in s: # this is probably a category or something like that. return "" # keep the part after the bar. bar_pos = s.find(u"|") if bar_pos == -1: return s return s[bar_pos + 1:] return _find_and_replace(text, u"[[", u"]]", replacement_fn)
[ "Remove double brackets (internal links) but leave the viewable text.\n\n Args:\n text: a unicode string\n Returns:\n a unicode string\n " ]
Please provide a description of the function:def image_encoder(image_feat, hparams, name="image_encoder", save_weights_to=None, make_image_summary=True): x = image_feat image_hidden_size = hparams.image_hidden_size or hparams.hidden_size image_filter_size = hparams.image_filter_size or hparams.filter_size with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, None, hparams.attention_key_channels or image_hidden_size, hparams.attention_value_channels or image_hidden_size, image_hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.image_self_attention_type, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "image_feat_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), image_filter_size, image_hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "image_feat_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
[ "A stack of self attention layers." ]
Please provide a description of the function:def prepare_question_encoder(inputs, hparams): encoder_input = inputs # Usual case - not a packed dataset. encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding if hparams.pos == "timing": encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", None) return (encoder_input, encoder_self_attention_bias)
[ "Prepare question encoder.\n\n Args:\n inputs: a Tensor.\n hparams: run hyperparameters\n\n Returns:\n encoder_input: a Tensor, bottom of encoder stack\n encoder_self_attention_bias: a bias tensor for use in encoder self-attention\n " ]
Please provide a description of the function:def question_encoder(question, question_self_attention_bias, hparams, name="question_encoder", save_weights_to=None, make_image_summary=True): x = question with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, question_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.question_self_attention_type, block_length=hparams.block_length, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "query_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "query_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "query_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "query_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
[ "A stack of self attention layers." ]
Please provide a description of the function:def attn(image_feat, query, hparams, name="attn", save_weights_to=None, make_image_summary=True): with tf.variable_scope(name, "attn", values=[image_feat, query]): total_key_depth = hparams.attention_key_channels or hparams.hidden_size total_value_depth = hparams.attention_value_channels or hparams.hidden_size num_heads = hparams.num_heads query = tf.expand_dims(query, 1) q, k, v = common_attention.compute_qkv( query, image_feat, total_key_depth, total_value_depth, ) q = common_attention.split_heads(q, num_heads) k = common_attention.split_heads(k, num_heads) v = common_attention.split_heads(v, num_heads) if hparams.scale_dotproduct: key_depth_per_head = total_key_depth // num_heads q *= key_depth_per_head**-0.5 # image_feat is input as v x = common_attention.dot_product_attention( q, k, v, None, dropout_rate=hparams.attention_dropout, image_shapes=None, save_weights_to=save_weights_to, make_image_summary=make_image_summary) x = common_attention.combine_heads(x) return tf.squeeze(x, axis=1)
[ "Attention on image feature with question as query." ]
Please provide a description of the function:def mlp(feature, hparams, name="mlp"): with tf.variable_scope(name, "mlp", values=[feature]): num_mlp_layers = hparams.num_mlp_layers mlp_size = hparams.mlp_size for _ in range(num_mlp_layers): feature = common_layers.dense(feature, mlp_size, activation=None) utils.collect_named_outputs("norms", "mlp_feature", tf.norm(feature, axis=-1)) feature = common_layers.layer_norm(feature) feature = tf.nn.relu(feature) feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout) return feature
[ "Multi layer perceptron with dropout and relu activation." ]
Please provide a description of the function:def prepare_image_question_encoder(image_feat, question, hparams): encoder_input = tf.concat([image_feat, question], axis=1) encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding # Usual case - not a packed dataset. if hparams.pos == "timing": question = common_attention.add_timing_signal_1d(question) elif hparams.pos == "emb": question = common_attention.add_positional_embedding( question, hparams.max_length, "inputs_positional_embedding", None) encoder_input = tf.concat([image_feat, question], axis=1) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias)
[ "Prepare encoder.\n\n Args:\n image_feat: a Tensor.\n question: a Tensor.\n hparams: run hyperparameters\n\n Returns:\n encoder_input: a Tensor, bottom of encoder stack\n encoder_self_attention_bias: a bias tensor for use in encoder self-attention\n " ]
Please provide a description of the function:def image_question_encoder(encoder_inputs, encoder_self_attention_bias, hparams, query=None, name="image_question_encoder", save_weights_to=None, make_image_summary=True): x = encoder_inputs with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, block_length=hparams.block_length, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "encoder_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "encoder_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) if query is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), query, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, block_length=hparams.block_length, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "encoder_decoder_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "encoder_decoder_attention_post_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "encoder_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "encoder_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
[ "A stack of self attention layers." ]
Please provide a description of the function:def decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder", save_weights_to=None, make_image_summary=True,): x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): layer_name = "layer_%d" % layer with tf.variable_scope(layer_name): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, make_image_summary=make_image_summary, ) utils.collect_named_outputs("norms", "decoder_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "decoder_self_attention_post_%d"%(layer), tf.norm(x, axis=-1)) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, save_weights_to=save_weights_to, make_image_summary=make_image_summary, ) utils.collect_named_outputs( "norms", "decoder_encoder_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "decoder_encoder_attention_post_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs("norms", "decoder_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "decoder_ffn_post_%d"%(layer), tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
[ "A stack of transformer layers.\n\n Args:\n decoder_input: a Tensor\n encoder_output: a Tensor\n decoder_self_attention_bias: bias Tensor for self-attention\n (see common_attention.attention_bias())\n encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention\n (see common_attention.attention_bias())\n hparams: hyperparameters for model\n name: a string\n save_weights_to: an optional dictionary to capture attention weights\n for visualization; the weights tensor will be appended there under\n a string key created from the variable scope (including name).\n make_image_summary: Whether to make an attention image summary.\n\n Returns:\n y: a Tensors\n " ]
Please provide a description of the function:def iterative_encoder_decoder(encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias, query, hparams): for _ in range(hparams.num_rec_steps): with tf.variable_scope("step", reuse=tf.AUTO_REUSE): encoder_output = image_question_encoder( encoder_input, encoder_self_attention_bias, hparams, query) decoder_output = decoder( query, encoder_output, None, encoder_decoder_attention_bias, hparams) encoder_input = encoder_output query = decoder_output return decoder_output
[ "Iterative encoder decoder." ]
Please provide a description of the function:def vqa_self_attention_base(): hparams = common_hparams.basic_params1() hparams.batch_size = 128 hparams.use_fixed_batch_size = True, hparams.optimizer = "adam" hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.997 hparams.optimizer_adam_epsilon = 1e-9 hparams.weight_decay = 0. hparams.clip_grad_norm = 0. hparams.initializer = "xavier" hparams.learning_rate_schedule = ( "constant*linear_warmup*rsqrt_normalized_decay") hparams.learning_rate_warmup_steps = 8000 hparams.learning_rate_constant = 1e-3 hparams.learning_rate_decay_rate = 0.5 hparams.learning_rate_decay_steps = 50000 hparams.dropout = 0.5 hparams.summarize_grads = True hparams.summarize_vars = True # not used hparams hparams.label_smoothing = 0. hparams.multiply_embedding_mode = "sqrt_depth" # add new hparams # use raw image as input hparams.add_hparam("image_input_type", "image") hparams.add_hparam("image_model_fn", "resnet_v1_152") hparams.add_hparam("resize_side", 512) hparams.add_hparam("height", 448) hparams.add_hparam("width", 448) hparams.add_hparam("distort", True) hparams.add_hparam("train_resnet", False) # image parts hparams.add_hparam("image_feat_preprocess_proj", True) hparams.add_hparam("image_feat_preprocess_layernorm", True) hparams.add_hparam("image_feat_encode", True) hparams.add_hparam("image_hidden_size", 0) # default to hidden_size hparams.add_hparam("image_filter_size", 0) # defaults to filter_size # question hidden size hparams.hidden_size = 512 hparams.filter_size = 1024 hparams.num_hidden_layers = 4 hparams.add_hparam("multimodal_combine", "concat") hparams.add_hparam("num_mlp_layers", 1) hparams.add_hparam("mlp_size", 1024) # self attention parts hparams.norm_type = "layer" hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.1 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 hparams.add_hparam("pos", "timing") hparams.add_hparam("num_encoder_layers", 0) hparams.add_hparam("num_decoder_layers", 0) hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("self_attention_type", "dot_product") hparams.add_hparam("image_self_attention_type", "dot_product") hparams.add_hparam("question_self_attention_type", "dot_product") hparams.add_hparam("block_length", 1) hparams.add_hparam("scale_dotproduct", True) # iterative part hparams.add_hparam("num_rec_steps", 3) return hparams
[ "VQA attention baseline hparams." ]
Please provide a description of the function:def vqa_self_attention_feature_batch1024_big(): hparams = vqa_self_attention_feature_batch1024() hparams.learning_rate_constant = 7e-4 hparams.batch_size = 256 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_heads = 16 hparams.layer_prepostprocess_dropout = 0.3 hparams.attention_dropout = 0.3 hparams.relu_dropout = 0.3 return hparams
[ "Big model." ]