INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32` and shapes `[expert_batch_size_i]`
def expert_to_gates(self): """Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32` and shapes `[expert_batch_size_i]` """ return tf.split( self._nonzero_gates, self._part_sizes_tensor, 0, num=self._num_experts)
Batch indices corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64` and shapes `[expert_batch_size_i]`
def expert_to_batch_indices(self): """Batch indices corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64` and shapes `[expert_batch_size_i]` """ return tf.split( self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts)
Create one input Tensor for each expert. Args: inp: a list of length num_datashards `Tensor`s with shapes `[batch_size[d], <extra_input_dims>]`. Returns: a list of `num_experts` `Tensor`s with shapes `[num_examples[i], <extra_input_dims>]`.
def dispatch(self, inp): """Create one input Tensor for each expert. Args: inp: a list of length num_datashards `Tensor`s with shapes `[batch_size[d], <extra_input_dims>]`. Returns: a list of `num_experts` `Tensor`s with shapes `[num_examples[i], <extra_input_dims>]`. """ dispatched = self._dp(lambda a, b: a.dispatch(b), self._dispatchers, inp) ret = self._ep(tf.concat, transpose_list_of_lists(dispatched), 0) if ret[0].dtype == tf.float32: # see comments on common_layers.convert_gradient_to_tensor ret = self._ep(common_layers.convert_gradient_to_tensor, ret) return ret
Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`.
def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`. """ expert_part_sizes = tf.unstack( tf.stack([d.part_sizes for d in self._dispatchers]), num=self._ep.n, axis=1) # list of lists of shape [num_experts][num_datashards] expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes) expert_output_parts_t = transpose_list_of_lists(expert_output_parts) def my_combine(dispatcher, parts): return dispatcher.combine( common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)), multiply_by_gates=multiply_by_gates) return self._dp(my_combine, self._dispatchers, expert_output_parts_t)
Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s of type `tf.float32`.
def expert_to_gates(self): """Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s of type `tf.float32`. """ return self._ep( tf.concat, transpose_list_of_lists( self._dp(lambda d: d.expert_to_gates(), self._dispatchers)), 0)
Send the inputs to the experts. Args: inp: a `Tensor` of shape "[batch, length, depth]` Returns: a tensor with shape [batch, num_experts, expert_capacity, depth]
def dispatch(self, inp): """Send the inputs to the experts. Args: inp: a `Tensor` of shape "[batch, length, depth]` Returns: a tensor with shape [batch, num_experts, expert_capacity, depth] """ inp = tf.reshape(inp, [self._batch * self._length, -1]) # [batch, num_experts, expert_capacity, depth] ret = tf.gather(inp, self._flat_indices) return ret
Return the output from the experts. When one example goes to multiple experts, the outputs are summed. Args: x: a Tensor with shape [batch, num_experts, expert_capacity, depth] Returns: a `Tensor` with shape `[batch, length, depth]
def combine(self, x): """Return the output from the experts. When one example goes to multiple experts, the outputs are summed. Args: x: a Tensor with shape [batch, num_experts, expert_capacity, depth] Returns: a `Tensor` with shape `[batch, length, depth] """ depth = tf.shape(x)[-1] x *= tf.expand_dims(self._nonpadding, -1) ret = tf.unsorted_segment_sum( x, self._flat_indices, num_segments=self._batch * self._length) ret = tf.reshape(ret, [self._batch, self._length, depth]) return ret
Factory function for envs.
def make_env(env_type, real_env, sim_env_kwargs): """Factory function for envs.""" return { "real": lambda: real_env.new_like( # pylint: disable=g-long-lambda batch_size=sim_env_kwargs["batch_size"], store_rollouts=False, ), "simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda **sim_env_kwargs ), }[env_type]()
Factory function for Agents.
def make_agent( agent_type, env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs_fn=None, frame_stack_size=None, rollout_agent_type=None, batch_size=None, inner_batch_size=None, env_type=None, **planner_kwargs ): """Factory function for Agents.""" if batch_size is None: batch_size = env.batch_size return { "random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space ), "policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space, policy_hparams, policy_dir, sampling_temp ), "planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda batch_size, make_agent( rollout_agent_type, env, policy_hparams, policy_dir, sampling_temp, batch_size=inner_batch_size ), make_env(env_type, env.env, sim_env_kwargs_fn()), lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size), discount_factor=policy_hparams.gae_gamma, **planner_kwargs ), }[agent_type]()
Collects frames from real env for random starts of simulated env.
def collect_frames_for_random_starts( storage_env, stacked_env, agent, frame_stack_size, random_starts_step_limit, log_every_steps=None ): """Collects frames from real env for random starts of simulated env.""" del frame_stack_size storage_env.start_new_epoch(0) tf.logging.info( "Collecting %d frames for random starts.", random_starts_step_limit ) rl_utils.run_rollouts( stacked_env, agent, stacked_env.reset(), step_limit=random_starts_step_limit, many_rollouts_from_each_env=True, log_every_steps=log_every_steps, ) # Save unfinished rollouts to history. stacked_env.reset()
Creates an Agent from hparams.
def make_agent_from_hparams( agent_type, base_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, sampling_temp, video_writers=() ): """Creates an Agent from hparams.""" def sim_env_kwargs_fn(): return rl.make_simulated_env_kwargs( base_env, loop_hparams, batch_size=planner_hparams.batch_size, model_dir=model_dir ) planner_kwargs = planner_hparams.values() planner_kwargs.pop("batch_size") planner_kwargs.pop("rollout_agent_type") planner_kwargs.pop("env_type") return make_agent( agent_type, stacked_env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs_fn, loop_hparams.frame_stack_size, planner_hparams.rollout_agent_type, inner_batch_size=planner_hparams.batch_size, env_type=planner_hparams.env_type, video_writers=video_writers, **planner_kwargs )
Returns an out-of-graph eval_fn using the Agent API.
def make_eval_fn_with_agent( agent_type, eval_mode, planner_hparams, model_dir, log_every_steps=None, video_writers=(), random_starts_step_limit=None ): """Returns an out-of-graph eval_fn using the Agent API.""" def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp): """Eval function.""" base_env = env env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size) agent = make_agent_from_hparams( agent_type, base_env, env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, sampling_temp, video_writers ) if eval_mode == "agent_simulated": real_env = base_env.new_like(batch_size=1) stacked_env = rl_utils.BatchStackWrapper( real_env, loop_hparams.frame_stack_size ) collect_frames_for_random_starts( real_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) initial_frame_chooser = rl_utils.make_initial_frame_chooser( real_env, loop_hparams.frame_stack_size, simulation_random_starts=True, simulation_flip_first_random_for_beginning=False, split=None, ) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, loop_hparams, batch_size=loop_hparams.eval_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=model_dir ) sim_env = env_fn(in_graph=False) env = rl_utils.BatchStackWrapper(sim_env, loop_hparams.frame_stack_size) kwargs = {} if not agent.records_own_videos: kwargs["video_writers"] = video_writers step_limit = base_env.rl_env_max_episode_steps if step_limit == -1: step_limit = None rl_utils.run_rollouts( env, agent, env.reset(), log_every_steps=log_every_steps, step_limit=step_limit, **kwargs ) if eval_mode == "agent_real": assert len(base_env.current_epoch_rollouts()) == env.batch_size return eval_fn
Evaluates the world model.
def evaluate_world_model( agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, random_starts_step_limit, debug_video_path, log_every_steps ): """Evaluates the world model.""" if debug_video_path: debug_video_path = os.path.join(debug_video_path, "0.avi") storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0) stacked_env = rl_utils.BatchStackWrapper( storage_env, loop_hparams.frame_stack_size ) policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params) agent = make_agent_from_hparams( agent_type, storage_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, # TODO(koz4k): Loop over eval_sampling_temps? sampling_temp=loop_hparams.eval_sampling_temps[0], ) collect_frames_for_random_starts( storage_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) return rl_utils.evaluate_world_model( storage_env, loop_hparams, model_dir, debug_video_path, split=None )
Evaluate.
def evaluate( loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir, agent_type, eval_mode, eval_with_learner, log_every_steps, debug_video_path, num_debug_videos=1, random_starts_step_limit=None, report_fn=None, report_metric=None ): """Evaluate.""" if eval_with_learner: assert agent_type == "policy" if report_fn: assert report_metric is not None eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir) video_writers = () kwargs = {} if eval_mode in ["agent_real", "agent_simulated"]: if not eval_with_learner: if debug_video_path: tf.gfile.MakeDirs(debug_video_path) video_writers = [ common_video.WholeVideoWriter( # pylint: disable=g-complex-comprehension fps=10, output_path=os.path.join(debug_video_path, "{}.avi".format(i)), file_format="avi", ) for i in range(num_debug_videos) ] kwargs["eval_fn"] = make_eval_fn_with_agent( agent_type, eval_mode, planner_hparams, model_dir, log_every_steps=log_every_steps, video_writers=video_writers, random_starts_step_limit=random_starts_step_limit ) eval_metrics = rl_utils.evaluate_all_configs( loop_hparams, policy_dir, **kwargs ) else: eval_metrics = evaluate_world_model( agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, random_starts_step_limit, debug_video_path, log_every_steps ) rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0) for video_writer in video_writers: video_writer.finish_to_disk() # Report metrics if report_fn: if report_metric == "mean_reward": metric_name = rl_utils.get_metric_name( sampling_temp=loop_hparams.eval_sampling_temps[0], max_num_noops=loop_hparams.eval_max_num_noops, clipped=False ) report_fn(eval_metrics[metric_name], 0) else: report_fn(eval_metrics[report_metric], 0) return eval_metrics
Get game for the given worker (directory) id.
def get_game_for_worker(map_name, directory_id): """Get game for the given worker (directory) id.""" if map_name == "v100unfriendly": games = ["chopper_command", "boxing", "asterix", "seaquest"] worker_per_game = 5 elif map_name == "human_nice": games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE worker_per_game = 5 else: raise ValueError("Unknown worker to game map name: %s" % map_name) games.sort() game_id = (directory_id - 1) // worker_per_game tf.logging.info("Getting game %d from %s." % (game_id, games)) return games[game_id]
Given a representation of the board, returns a list of open spaces.
def get_open_spaces(board): """Given a representation of the board, returns a list of open spaces.""" open_spaces = [] for i in range(3): for j in range(3): if board[i][j] == 0: open_spaces.append(encode_pos(i, j)) return open_spaces
Given a representation of the board, returns reward and done.
def get_reward_and_done(board): """Given a representation of the board, returns reward and done.""" # Returns (reward, done) where: # reward: -1 means lost, +1 means win, 0 means draw or continuing. # done: True if the game is over, i.e. someone won or it is a draw. # Sum all rows ... all_sums = [np.sum(board[i, :]) for i in range(3)] # ... all columns all_sums.extend([np.sum(board[:, i]) for i in range(3)]) # and both diagonals. all_sums.append(np.sum([board[i, i] for i in range(3)])) all_sums.append(np.sum([board[i, 2 - i] for i in range(3)])) if -3 in all_sums: return -1, True if 3 in all_sums: return 1, True done = True if get_open_spaces(board): done = False return 0, done
Hyperparameters for decoding.
def decode_hparams(overrides=""): """Hyperparameters for decoding.""" hp = hparam.HParams( save_images=False, log_results=True, extra_length=100, min_length_ratio=0.0, batch_size=0, beam_size=4, alpha=0.6, eos_penalty=0.0, block_size=0, guess_and_check_top_k=0, guess_and_check_epsilon=-1, insertion_parallel=False, return_beams=False, write_beam_scores=False, max_input_size=-1, identity_output=False, num_samples=-1, # Number of examples to decode. delimiter="\n", decode_to_file="", # str. Prefix for filename to write decodings to. decode_reference="", # str. Filename to read references from. decode_in_memory=False, # How much decode should wait for the next checkpoint decode_timeout_mins=240, summaries_log_dir="decode", # Directory to write hook summaries. shards=1, # How many shards of data to decode (treating 1 as None). shard_id=0, # Which shard are we decoding if more than 1 above. shards_start_offset=0, # Number of the first shard to decode. shard_google_format=False, # If True use Google shard naming format. num_decodes=1, # Number of times to go over the dataset. force_decode_length=False, display_decoded_images=False, # Multi-problem decoding task id. multiproblem_task_id=-1, # Used for video decoding. frames_per_second=10, skip_eos_postprocess=False, # Creates a blue/red border covering border_percent of the frame. border_percent=2, # Maximum number of videos displayed. # number of videos displayed = max_display_outputs * max_display_decodes max_display_outputs=10, max_display_decodes=5, # Used in computation of VGG feature based video metrics. # Set this to be the path to a trained VGG ckpt to output # useful metrics. vgg_ckpt_path="", # Used for MLPerf compliance logging. mlperf_decode_step=0.0, mlperf_threshold=25.0, mlperf_success=False) hp.parse(overrides) return hp
Log inference results.
def log_decode_results(inputs, outputs, problem_name, prediction_idx, inputs_vocab, targets_vocab, targets=None, save_images=False, output_dir=None, identity_output=False, log_results=True, skip_eos_postprocess=False): """Log inference results.""" # TODO(lukaszkaiser) refactor this into feature_encoder is_video = "video" in problem_name or "gym" in problem_name if is_video: def fix_and_save_video(vid, prefix): save_path_template = os.path.join( output_dir, "%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx)) # this is only required for predictions if vid.shape[-1] == 1: vid = np.squeeze(vid, axis=-1) save_video(vid, save_path_template) tf.logging.info("Saving video: {}".format(prediction_idx)) fix_and_save_video(inputs, "inputs") fix_and_save_video(outputs, "outputs") fix_and_save_video(targets, "targets") is_image = "image" in problem_name is_text2class = isinstance(registry.problem(problem_name), text_problems.Text2ClassProblem) skip_eos_postprocess = is_image or is_text2class or skip_eos_postprocess decoded_inputs = None if is_image and save_images: save_path = os.path.join( output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx)) show_and_save_image(inputs / 255., save_path) elif inputs is not None and inputs_vocab: if identity_output: decoded_inputs = " ".join(map(str, inputs.flatten())) else: decoded_inputs = inputs_vocab.decode(_save_until_eos( inputs, skip_eos_postprocess)) if log_results and not is_video: tf.logging.info("Inference results INPUT: %s" % decoded_inputs) decoded_targets = None decoded_outputs = None if identity_output: decoded_outputs = " ".join(map(str, outputs.flatten())) if targets is not None: decoded_targets = " ".join(map(str, targets.flatten())) else: decoded_outputs = targets_vocab.decode(_save_until_eos( outputs, skip_eos_postprocess)) if targets is not None and log_results: decoded_targets = targets_vocab.decode(_save_until_eos( targets, skip_eos_postprocess)) if log_results and not is_video: tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs) if targets is not None and log_results and not is_video: tf.logging.info("Inference results TARGET: %s" % decoded_targets) return decoded_inputs, decoded_outputs, decoded_targets
Perform decoding from dataset.
def decode_from_dataset(estimator, problem_name, hparams, decode_hp, decode_to_file=None, dataset_split=None, checkpoint_path=None): """Perform decoding from dataset.""" tf.logging.info("Performing local inference from dataset for %s.", str(problem_name)) # We assume that worker_id corresponds to shard number. shard = decode_hp.shard_id if decode_hp.shards > 1 else None # Setup output directory for any artifacts that may be written out. output_dir = os.path.join(estimator.model_dir, "decode") tf.gfile.MakeDirs(output_dir) # If decode_hp.batch_size is specified, use a fixed batch size if decode_hp.batch_size: hparams.batch_size = decode_hp.batch_size hparams.use_fixed_batch_size = True dataset_kwargs = { "shard": shard, "dataset_split": dataset_split, "max_records": decode_hp.num_samples } # Build the inference input function problem = hparams.problem infer_input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs) predictions, output_dirs = [], [] for decode_id in range(decode_hp.num_decodes): tf.logging.info("Decoding {}".format(decode_id)) # Create decode directory if not in-memory decoding. if not decode_hp.decode_in_memory: output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id) tf.gfile.MakeDirs(output_dir) output_dirs.append(output_dir) result = decode_once(estimator, problem_name, hparams, infer_input_fn, decode_hp, decode_to_file, output_dir, log_results=decode_hp.log_results, checkpoint_path=checkpoint_path) if decode_hp.decode_in_memory: output_dirs = [output_dir] predictions.append(result) if decode_hp.decode_to_file: decode_hp.decode_to_file = _decode_filename( decode_hp.decode_to_file, problem_name, decode_hp) run_postdecode_hooks(DecodeHookArgs( estimator=estimator, problem=problem, output_dirs=output_dirs, hparams=hparams, decode_hparams=decode_hp, predictions=predictions ), dataset_split) return predictions
Decodes once. Args: estimator: tf.estimator.Estimator instance. Used to generate encoded predictions. problem_name: str. Name of problem. hparams: HParams instance. HParams for model training. infer_input_fn: zero-arg function. Input function for estimator. decode_hp: HParams instance. See decode_hparams() above. decode_to_file: str. Prefix for filenames. Used to generated filenames to which decoded predictions are written. output_dir: str. Output directory. Only used for writing images. log_results: bool. If False, return encoded predictions without any further processing. checkpoint_path: str. Path to load model checkpoint from. If unspecified, Estimator's default is used. Returns: If decode_hp.decode_in_memory is True: List of dicts, one per example. Values are either numpy arrays or decoded strings. If decode_hp.decode_in_memory is False: An empty list.
def decode_once(estimator, problem_name, hparams, infer_input_fn, decode_hp, decode_to_file, output_dir, log_results=True, checkpoint_path=None): """Decodes once. Args: estimator: tf.estimator.Estimator instance. Used to generate encoded predictions. problem_name: str. Name of problem. hparams: HParams instance. HParams for model training. infer_input_fn: zero-arg function. Input function for estimator. decode_hp: HParams instance. See decode_hparams() above. decode_to_file: str. Prefix for filenames. Used to generated filenames to which decoded predictions are written. output_dir: str. Output directory. Only used for writing images. log_results: bool. If False, return encoded predictions without any further processing. checkpoint_path: str. Path to load model checkpoint from. If unspecified, Estimator's default is used. Returns: If decode_hp.decode_in_memory is True: List of dicts, one per example. Values are either numpy arrays or decoded strings. If decode_hp.decode_in_memory is False: An empty list. """ # Get the predictions as an iterable predictions = estimator.predict(infer_input_fn, checkpoint_path=checkpoint_path) if not log_results: return list(predictions) # Prepare output file writers if decode_to_file passed decode_to_file = decode_to_file or decode_hp.decode_to_file if decode_to_file: output_filepath = _decode_filename(decode_to_file, problem_name, decode_hp) parts = output_filepath.split(".") parts[-1] = "targets" target_filepath = ".".join(parts) parts[-1] = "inputs" input_filepath = ".".join(parts) output_file = tf.gfile.Open(output_filepath, "w") target_file = tf.gfile.Open(target_filepath, "w") input_file = tf.gfile.Open(input_filepath, "w") problem_hparams = hparams.problem_hparams # Inputs vocabulary is set to targets if there are no inputs in the problem, # e.g., for language models where the inputs are just a prefix of targets. has_input = "inputs" in problem_hparams.vocabulary inputs_vocab_key = "inputs" if has_input else "targets" inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key] targets_vocab = problem_hparams.vocabulary["targets"] num_eval_samples = 0 # all_outputs[i][j] = (input: str, output: str, target: str). Input, # decoded output, and target strings for example i, beam rank j. all_outputs = [] for num_predictions, prediction in enumerate(predictions): num_eval_samples += 1 num_predictions += 1 inputs = prediction.get("inputs") targets = prediction.get("targets") outputs = prediction.get("outputs") # Log predictions decoded_outputs = [] # [(str, str, str)]. See all_outputs above. if decode_hp.decode_in_memory: all_outputs.append(decoded_outputs) decoded_scores = [] if decode_hp.return_beams: output_beams = np.split(outputs, decode_hp.beam_size, axis=0) scores = None if "scores" in prediction: scores = np.split(prediction["scores"], decode_hp.beam_size, axis=0) for i, beam in enumerate(output_beams): tf.logging.info("BEAM %d:" % i) score = scores and scores[i] decoded = log_decode_results( inputs, beam, problem_name, num_predictions, inputs_vocab, targets_vocab, save_images=decode_hp.save_images, output_dir=output_dir, identity_output=decode_hp.identity_output, targets=targets, log_results=log_results) decoded_outputs.append(decoded) if decode_hp.write_beam_scores: decoded_scores.append(score) else: decoded = log_decode_results( inputs, outputs, problem_name, num_predictions, inputs_vocab, targets_vocab, save_images=decode_hp.save_images, output_dir=output_dir, identity_output=decode_hp.identity_output, targets=targets, log_results=log_results, skip_eos_postprocess=decode_hp.skip_eos_postprocess) decoded_outputs.append(decoded) # Write out predictions if decode_to_file passed if decode_to_file: for i, (d_input, d_output, d_target) in enumerate(decoded_outputs): # Skip if all padding if d_input and re.match("^({})+$".format(text_encoder.PAD), d_input): continue beam_score_str = "" if decode_hp.write_beam_scores: beam_score_str = "\t%.2f" % decoded_scores[i] output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter) target_file.write(str(d_target) + decode_hp.delimiter) input_file.write(str(d_input) + decode_hp.delimiter) if (decode_hp.num_samples >= 0 and num_predictions >= decode_hp.num_samples): break mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE, value=num_eval_samples, hparams=hparams) if decode_to_file: output_file.close() target_file.close() input_file.close() return all_outputs
Compute predictions on entries in filename and write them out.
def decode_from_file(estimator, filename, hparams, decode_hp, decode_to_file=None, checkpoint_path=None): """Compute predictions on entries in filename and write them out.""" if not decode_hp.batch_size: decode_hp.batch_size = 32 tf.logging.info( "decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size) # Inputs vocabulary is set to targets if there are no inputs in the problem, # e.g., for language models where the inputs are just a prefix of targets. p_hp = hparams.problem_hparams has_input = "inputs" in p_hp.vocabulary inputs_vocab_key = "inputs" if has_input else "targets" inputs_vocab = p_hp.vocabulary[inputs_vocab_key] targets_vocab = p_hp.vocabulary["targets"] problem_name = FLAGS.problem filename = _add_shard_to_filename(filename, decode_hp) tf.logging.info("Performing decoding from file (%s)." % filename) if has_input: sorted_inputs, sorted_keys = _get_sorted_inputs( filename, decode_hp.delimiter) else: sorted_inputs = _get_language_modeling_inputs( filename, decode_hp.delimiter, repeat=decode_hp.num_decodes) sorted_keys = range(len(sorted_inputs)) num_sentences = len(sorted_inputs) num_decode_batches = (num_sentences - 1) // decode_hp.batch_size + 1 if estimator.config.use_tpu: length = getattr(hparams, "length", 0) or hparams.max_length batch_ids = [] for line in sorted_inputs: if has_input: ids = inputs_vocab.encode(line.strip()) + [1] else: ids = targets_vocab.encode(line) if len(ids) < length: ids.extend([0] * (length - len(ids))) else: ids = ids[:length] batch_ids.append(ids) np_ids = np.array(batch_ids, dtype=np.int32) def input_fn(params): batch_size = params["batch_size"] dataset = tf.data.Dataset.from_tensor_slices({"inputs": np_ids}) dataset = dataset.map( lambda ex: {"inputs": tf.reshape(ex["inputs"], (length, 1, 1))}) dataset = dataset.batch(batch_size) return dataset else: def input_fn(): input_gen = _decode_batch_input_fn( num_decode_batches, sorted_inputs, inputs_vocab, decode_hp.batch_size, decode_hp.max_input_size, task_id=decode_hp.multiproblem_task_id, has_input=has_input) gen_fn = make_input_fn_from_generator(input_gen) example = gen_fn() return _decode_input_tensor_to_features_dict(example, hparams) decodes = [] result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) start_time = time.time() total_time_per_step = 0 total_cnt = 0 def timer(gen): while True: try: start_time = time.time() item = next(gen) elapsed_time = time.time() - start_time yield elapsed_time, item except StopIteration: break for elapsed_time, result in timer(result_iter): if decode_hp.return_beams: beam_decodes = [] beam_scores = [] output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0) scores = None if "scores" in result: if np.isscalar(result["scores"]): result["scores"] = result["scores"].reshape(1) scores = np.split(result["scores"], decode_hp.beam_size, axis=0) for k, beam in enumerate(output_beams): tf.logging.info("BEAM %d:" % k) score = scores and scores[k] _, decoded_outputs, _ = log_decode_results( result["inputs"], beam, problem_name, None, inputs_vocab, targets_vocab, log_results=decode_hp.log_results, skip_eos_postprocess=decode_hp.skip_eos_postprocess) beam_decodes.append(decoded_outputs) if decode_hp.write_beam_scores: beam_scores.append(score) if decode_hp.write_beam_scores: decodes.append("\t".join([ "\t".join([d, "%.2f" % s]) for d, s in zip(beam_decodes, beam_scores) ])) else: decodes.append("\t".join(beam_decodes)) else: _, decoded_outputs, _ = log_decode_results( result["inputs"], result["outputs"], problem_name, None, inputs_vocab, targets_vocab, log_results=decode_hp.log_results, skip_eos_postprocess=decode_hp.skip_eos_postprocess) decodes.append(decoded_outputs) total_time_per_step += elapsed_time total_cnt += result["outputs"].shape[-1] duration = time.time() - start_time tf.logging.info("Elapsed Time: %5.5f" % duration) tf.logging.info("Averaged Single Token Generation Time: %5.7f " "(time %5.7f count %d)" % (total_time_per_step / total_cnt, total_time_per_step, total_cnt)) if decode_hp.batch_size == 1: tf.logging.info("Inference time %.4f seconds " "(Latency = %.4f ms/setences)" % (duration, 1000.0*duration/num_sentences)) else: tf.logging.info("Inference time %.4f seconds " "(Throughput = %.4f sentences/second)" % (duration, num_sentences/duration)) # If decode_to_file was provided use it as the output filename without change # (except for adding shard_id if using more shards for decoding). # Otherwise, use the input filename plus model, hp, problem, beam, alpha. decode_filename = decode_to_file if decode_to_file else filename if not decode_to_file: decode_filename = _decode_filename(decode_filename, problem_name, decode_hp) else: decode_filename = _add_shard_to_filename(decode_filename, decode_hp) tf.logging.info("Writing decodes into %s" % decode_filename) outfile = tf.gfile.Open(decode_filename, "w") for index in range(len(sorted_inputs)): outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter)) outfile.flush() outfile.close() output_dir = os.path.join(estimator.model_dir, "decode") tf.gfile.MakeDirs(output_dir) run_postdecode_hooks(DecodeHookArgs( estimator=estimator, problem=hparams.problem, output_dirs=[output_dir], hparams=hparams, decode_hparams=decode_hp, predictions=list(result_iter) ), None)
Generates decode filename. Args: base_filename: A string, base of the decode filename. problem_name: A string, name of the problem. decode_hp: HParams for decoding. Returns: A string, produced decode filename.
def _decode_filename(base_filename, problem_name, decode_hp): """Generates decode filename. Args: base_filename: A string, base of the decode filename. problem_name: A string, name of the problem. decode_hp: HParams for decoding. Returns: A string, produced decode filename. """ if decode_hp.shards > 1: base_filename = _add_shard_to_filename(base_filename, decode_hp) if ("beam{beam}.alpha{alpha}.decodes".format( beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)) in base_filename): return base_filename else: return ( "{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format( base=base_filename, model=FLAGS.model, hp=FLAGS.hparams_set, problem=problem_name, beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)))
Use py_func to yield elements from the given generator.
def make_input_fn_from_generator(gen): """Use py_func to yield elements from the given generator.""" first_ex = six.next(gen) flattened = tf.contrib.framework.nest.flatten(first_ex) types = [t.dtype for t in flattened] shapes = [[None] * len(t.shape) for t in flattened] first_ex_list = [first_ex] def py_func(): if first_ex_list: example = first_ex_list.pop() else: example = six.next(gen) return tf.contrib.framework.nest.flatten(example) def input_fn(): flat_example = tf.py_func(py_func, [], types) _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)] example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example) return example return input_fn
Interactive decoding.
def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None): """Interactive decoding.""" is_image = "image" in hparams.problem.name is_text2class = isinstance(hparams.problem, text_problems.Text2ClassProblem) skip_eos_postprocess = ( is_image or is_text2class or decode_hp.skip_eos_postprocess) def input_fn(): gen_fn = make_input_fn_from_generator( _interactive_input_fn(hparams, decode_hp)) example = gen_fn() example = _interactive_input_tensor_to_features_dict(example, hparams) return example result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) for result in result_iter: targets_vocab = hparams.problem_hparams.vocabulary["targets"] if decode_hp.return_beams: beams = np.split(result["outputs"], decode_hp.beam_size, axis=0) scores = None if "scores" in result: if np.isscalar(result["scores"]): result["scores"] = result["scores"].reshape(1) scores = np.split(result["scores"], decode_hp.beam_size, axis=0) for k, beam in enumerate(beams): tf.logging.info("BEAM %d:" % k) beam_string = targets_vocab.decode(_save_until_eos( beam, skip_eos_postprocess)) if scores is not None: tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k])) else: tf.logging.info("\"%s\"" % beam_string) else: if decode_hp.identity_output: tf.logging.info(" ".join(map(str, result["outputs"].flatten()))) else: tf.logging.info( targets_vocab.decode(_save_until_eos( result["outputs"], skip_eos_postprocess)))
Generator to produce batches of inputs.
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary, batch_size, max_input_size, task_id=-1, has_input=True): """Generator to produce batches of inputs.""" tf.logging.info(" batch %d" % num_decode_batches) for b in range(num_decode_batches): tf.logging.info("Decoding batch %d" % b) batch_length = 0 batch_inputs = [] for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]: input_ids = vocabulary.encode(inputs) if max_input_size > 0: # Subtract 1 for the EOS_ID. input_ids = input_ids[:max_input_size - 1] if has_input or task_id > -1: # Do not append EOS for pure LM tasks. final_id = text_encoder.EOS_ID if task_id < 0 else task_id input_ids.append(final_id) batch_inputs.append(input_ids) if len(input_ids) > batch_length: batch_length = len(input_ids) final_batch_inputs = [] for input_ids in batch_inputs: assert len(input_ids) <= batch_length x = input_ids + [0] * (batch_length - len(input_ids)) final_batch_inputs.append(x) yield { "inputs": np.array(final_batch_inputs).astype(np.int32), }
Generator that reads from the terminal and yields "interactive inputs". Due to temporary limitations in tf.learn, if we don't want to reload the whole graph, then we are stuck encoding all of the input as one fixed-size numpy array. We yield int32 arrays with shape [const_array_size]. The format is: [num_samples, decode_length, len(input ids), <input ids>, <padding>] Args: hparams: model hparams decode_hp: decode hparams Yields: numpy arrays Raises: Exception: when `input_type` is invalid.
def _interactive_input_fn(hparams, decode_hp): """Generator that reads from the terminal and yields "interactive inputs". Due to temporary limitations in tf.learn, if we don't want to reload the whole graph, then we are stuck encoding all of the input as one fixed-size numpy array. We yield int32 arrays with shape [const_array_size]. The format is: [num_samples, decode_length, len(input ids), <input ids>, <padding>] Args: hparams: model hparams decode_hp: decode hparams Yields: numpy arrays Raises: Exception: when `input_type` is invalid. """ num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1 decode_length = decode_hp.extra_length input_type = "text" p_hparams = hparams.problem_hparams has_input = "inputs" in p_hparams.modality vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"] # This should be longer than the longest input. const_array_size = 10000 # Import readline if available for command line editing and recall. try: import readline # pylint: disable=g-import-not-at-top,unused-variable except ImportError: pass while True: prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n" " it=<input_type> ('text' or 'image' or 'label', default: " "text)\n" " ns=<num_samples> (changes number of samples, default: 1)\n" " dl=<decode_length> (changes decode length, default: 100)\n" " <%s> (decode)\n" " q (quit)\n" ">" % (num_samples, decode_length, "source_string" if has_input else "target_prefix")) input_string = input(prompt) if input_string == "q": return elif input_string[:3] == "ns=": num_samples = int(input_string[3:]) elif input_string[:3] == "dl=": decode_length = int(input_string[3:]) elif input_string[:3] == "it=": input_type = input_string[3:] else: if input_type == "text": input_ids = vocabulary.encode(input_string) if has_input: input_ids.append(text_encoder.EOS_ID) x = [num_samples, decode_length, len(input_ids)] + input_ids assert len(x) < const_array_size x += [0] * (const_array_size - len(x)) features = { "inputs": np.array(x).astype(np.int32), } elif input_type == "image": input_path = input_string img = vocabulary.encode(input_path) features = { "inputs": img.astype(np.int32), } elif input_type == "label": input_ids = [int(input_string)] x = [num_samples, decode_length, len(input_ids)] + input_ids features = { "inputs": np.array(x).astype(np.int32), } else: raise Exception("Unsupported input type.") for k, v in six.iteritems( problem_lib.problem_hparams_to_features(p_hparams)): features[k] = np.array(v).astype(np.int32) yield features
Save frames of the videos into files.
def save_video(video, save_path_template): """Save frames of the videos into files.""" try: from PIL import Image # pylint: disable=g-import-not-at-top except ImportError as e: tf.logging.warning( "Showing and saving an image requires PIL library to be " "installed: %s", e) raise NotImplementedError("Image display and save not implemented.") for i, frame in enumerate(video): save_path = save_path_template.format(i) with tf.gfile.Open(save_path, "wb") as sp: Image.fromarray(np.uint8(frame)).save(sp)
Shows an image using matplotlib and saves it.
def show_and_save_image(img, save_path): """Shows an image using matplotlib and saves it.""" try: import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top except ImportError as e: tf.logging.warning( "Showing and saving an image requires matplotlib to be " "installed: %s", e) raise NotImplementedError("Image display and save not implemented.") plt.imshow(img) with tf.gfile.Open(save_path, "wb") as sp: plt.savefig(sp)
Read a file of partial texts to continue. The purpose of append_space_to_final_punctionation is that SubwordTokenizer groups punctuation and the ensuing space in the same token. Adding a space causes the token to be completed. Args: filename: a string delimiter: a string repeat: an integer - we repeat the entire file that many times. append_space_to_final_punctionation: a boolean Returns: a list of strings
def _get_language_modeling_inputs(filename, delimiter="\n", repeat=1, append_space_to_final_punctionation=True): """Read a file of partial texts to continue. The purpose of append_space_to_final_punctionation is that SubwordTokenizer groups punctuation and the ensuing space in the same token. Adding a space causes the token to be completed. Args: filename: a string delimiter: a string repeat: an integer - we repeat the entire file that many times. append_space_to_final_punctionation: a boolean Returns: a list of strings """ with tf.gfile.Open(filename) as f: text = f.read() inputs = text.split(delimiter) if not inputs[-1]: inputs.pop() inputs *= repeat if append_space_to_final_punctionation: inputs = [ s + " " if s and s[-1] in string.punctuation else s for s in inputs] return inputs
Returning inputs sorted according to decreasing length. This causes inputs of similar lengths to be processed in the same batch, facilitating early stopping for short sequences. Longer sequences are sorted first so that if you're going to get OOMs, you'll see it in the first batch. Args: filename: path to file with inputs, 1 per line. delimiter: str, delimits records in the file. Returns: a sorted list of inputs
def _get_sorted_inputs(filename, delimiter="\n"): """Returning inputs sorted according to decreasing length. This causes inputs of similar lengths to be processed in the same batch, facilitating early stopping for short sequences. Longer sequences are sorted first so that if you're going to get OOMs, you'll see it in the first batch. Args: filename: path to file with inputs, 1 per line. delimiter: str, delimits records in the file. Returns: a sorted list of inputs """ tf.logging.info("Getting sorted inputs") with tf.gfile.Open(filename) as f: text = f.read() records = text.split(delimiter) inputs = [record.strip() for record in records] # Strip the last empty line. if not inputs[-1]: inputs.pop() input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)] sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1)) # We'll need the keys to rearrange the inputs back into their original order sorted_keys = {} sorted_inputs = [] for i, (index, _) in enumerate(sorted_input_lens): sorted_inputs.append(inputs[index]) sorted_keys[index] = i return sorted_inputs, sorted_keys
Strips everything after the first <EOS> token, which is normally 1.
def _save_until_eos(ids, skip=False): """Strips everything after the first <EOS> token, which is normally 1.""" ids = ids.flatten() if skip: return ids try: index = list(ids).index(text_encoder.EOS_ID) return ids[0:index] except ValueError: # No EOS_ID: return the array as-is. return ids
Convert the interactive input format (see above) to a dictionary. Args: feature_map: dict with inputs. hparams: model hyperparameters Returns: a features dictionary, as expected by the decoder.
def _interactive_input_tensor_to_features_dict(feature_map, hparams): """Convert the interactive input format (see above) to a dictionary. Args: feature_map: dict with inputs. hparams: model hyperparameters Returns: a features dictionary, as expected by the decoder. """ inputs = tf.convert_to_tensor(feature_map["inputs"]) input_is_image = False if len(inputs.get_shape()) < 3 else True x = inputs if input_is_image: x = tf.image.resize_images(x, [299, 299]) x = tf.reshape(x, [1, 299, 299, -1]) x = tf.to_int32(x) else: # Remove the batch dimension. num_samples = x[0] length = x[2] x = tf.slice(x, [3], tf.to_int32([length])) x = tf.reshape(x, [1, -1, 1, 1]) # Transform into a batch of size num_samples to get that many random # decodes. x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1])) p_hparams = hparams.problem_hparams input_space_id = tf.constant(p_hparams.input_space_id) target_space_id = tf.constant(p_hparams.target_space_id) features = {} features["input_space_id"] = input_space_id features["target_space_id"] = target_space_id features["decode_length"] = ( IMAGE_DECODE_LENGTH if input_is_image else inputs[1]) features["inputs"] = x return features
Convert the interactive input format (see above) to a dictionary. Args: feature_map: dict with inputs. hparams: model hyperparameters Returns: a features dictionary, as expected by the decoder.
def _decode_input_tensor_to_features_dict(feature_map, hparams): """Convert the interactive input format (see above) to a dictionary. Args: feature_map: dict with inputs. hparams: model hyperparameters Returns: a features dictionary, as expected by the decoder. """ inputs = tf.convert_to_tensor(feature_map["inputs"]) input_is_image = False x = inputs p_hparams = hparams.problem_hparams # Add a third empty dimension x = tf.expand_dims(x, axis=[2]) x = tf.to_int32(x) input_space_id = tf.constant(p_hparams.input_space_id) target_space_id = tf.constant(p_hparams.target_space_id) features = {} features["input_space_id"] = input_space_id features["target_space_id"] = target_space_id features["decode_length"] = ( IMAGE_DECODE_LENGTH if input_is_image else tf.shape(x)[1] + 50) features["inputs"] = x return features
Run hooks after decodes have run.
def run_postdecode_hooks(decode_hook_args, dataset_split): """Run hooks after decodes have run.""" hooks = decode_hook_args.problem.decode_hooks if not hooks: return global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir) if global_step is None: tf.logging.info( "Skipping decode hooks because no checkpoint yet available.") return tf.logging.info("Running decode hooks.") parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir) child_dir = decode_hook_args.decode_hparams.summaries_log_dir if dataset_split is not None: child_dir += "_{}".format(dataset_split) final_dir = os.path.join(parent_dir, child_dir) summary_writer = tf.summary.FileWriter(final_dir) for hook in hooks: # Isolate each hook in case it creates TF ops with tf.Graph().as_default(): summaries = hook(decode_hook_args) if summaries: summary = tf.Summary(value=list(summaries)) summary_writer.add_summary(summary, global_step) summary_writer.close() tf.logging.info("Decode hooks done.")
Splits of data to produce and number of output shards for each.
def dataset_splits(self): """Splits of data to produce and number of output shards for each.""" return [{ "split": problem.DatasetSplit.TRAIN, "shards": _TRAIN_SHARDS, }, { "split": problem.DatasetSplit.EVAL, "shards": _DEV_SHARDS, }]
Image Transformer decoder with local1D spatial layers.
def local_attention1d_spatial_decoder(x, kv_dim, heads_dim, feedforward_dim, hparams): """Image Transformer decoder with local1D spatial layers.""" batch_dim, length_dim, model_dim = x.shape.dims blocks_w_dim = mtf.Dimension("blocksw", hparams.block_length) num_w_blocks_dim = mtf.Dimension("num_wblocks", length_dim.size // blocks_w_dim.size) x = mtf.reshape( x, mtf.Shape([batch_dim, num_w_blocks_dim, blocks_w_dim, model_dim])) # [ self attention - ffn - residual + dropout] x n for layer in range(hparams.num_decoder_layers): layer_name = "decoder_layer_%d" % layer with tf.variable_scope(layer_name): # Self attention layer x += layer_prepostprocess_dropout( mtf.layers.local_self_attention_spatial_blocks( mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"), kv_dim, heads_dim, memory_w_dim=blocks_w_dim, mask_right=True, name="self_att"), hparams) # ffn layer x += layer_prepostprocess_dropout( mtf.layers.dense_relu_dense( mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"), feedforward_dim, hparams.dropout, dropout_broadcast_dims=[length_dim]), hparams) output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm") return output
Image Transformer decoder with local2D spatial layers.
def local_attention2d_spatial_decoder(x, kv_dim, heads_dim, feedforward_dim, hparams): """Image Transformer decoder with local2D spatial layers.""" batch_dim, length_dim, model_dim = x.shape.dims blocks_h_dim = mtf.Dimension("blocksh", hparams.block_height) blocks_w_dim = mtf.Dimension("blocksw", hparams.block_width) num_h_blocks_dim = mtf.Dimension("num_h_blocks", hparams.img_len // hparams.block_height) num_w_blocks_dim = mtf.Dimension( "num_w_blocks", hparams.img_len * hparams.num_channels // hparams.block_width) x = mtf.transpose( mtf.reshape( x, mtf.Shape([ batch_dim, num_h_blocks_dim, blocks_h_dim, num_w_blocks_dim, blocks_w_dim, model_dim ])), mtf.Shape([ batch_dim, num_h_blocks_dim, num_w_blocks_dim, blocks_h_dim, blocks_w_dim, model_dim ])) # Image Transformer Decoder # [ self attention - ffn - residual + dropout] x n for layer in range(hparams.num_decoder_layers): layer_name = "decoder_layer_%d" % layer with tf.variable_scope(layer_name): # Self attention layer x += layer_prepostprocess_dropout( mtf.layers.local_2d_self_attention_spatial_blocks( mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"), kv_dim, heads_dim, memory_h_dim=num_h_blocks_dim, memory_w_dim=num_w_blocks_dim, name="self_att"), hparams) # ffn layer x += layer_prepostprocess_dropout( mtf.layers.dense_relu_dense( mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"), feedforward_dim, hparams.dropout, dropout_broadcast_dims=[length_dim]), hparams) output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm") return output
Image Transformer decoder with local1D masked layers.
def local_attention1d_masked_decoder(x, kv_dim, heads_dim, feedforward_dim, hparams): """Image Transformer decoder with local1D masked layers.""" print(x) _, length_dim, model_dim = x.shape.dims for layer in range(hparams.num_decoder_layers): layer_name = "decoder_layer_%d" % layer with tf.variable_scope(layer_name): # Self attention layer length_per_split = mtf.tensor_dim_to_size_per_split( hparams.layout, hparams.mesh_shape, length_dim) x += layer_prepostprocess_dropout( mtf.layers.masked_local_attention_1d( mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"), kv_dim, heads_dim, window_size=hparams.block_length, length_per_split=length_per_split, name="self_att"), hparams) # ffn layer x += layer_prepostprocess_dropout( mtf.layers.dense_relu_dense( mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"), feedforward_dim, hparams.dropout, dropout_broadcast_dims=[length_dim]), hparams) output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm") return output
Set of hyperparameters.
def mtf_image_transformer_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.no_data_parallelism = True hparams.use_fixed_batch_size = True hparams.batch_size = 1 hparams.max_length = 3072 hparams.hidden_size = 256 hparams.label_smoothing = 0.0 # 8-way model-parallelism hparams.add_hparam("mesh_shape", "batch:8") hparams.add_hparam("layout", "batch:batch") hparams.add_hparam("mtf_mode", True) hparams.add_hparam("num_heads", 8) hparams.add_hparam("filter_size", 1024) hparams.add_hparam("num_encoder_layers", 0) hparams.add_hparam("num_decoder_layers", 6) hparams.add_hparam("attention_key_size", 256) hparams.add_hparam("attention_value_size", 256) # Share weights between input and target embeddings hparams.shared_embedding = True # mixture of experts hparams hparams.add_hparam("ffn_layer", "dense_relu_dense") hparams.add_hparam("moe_overhead_train", 1.0) hparams.add_hparam("moe_overhead_eval", 2.0) hparams.moe_num_experts = 16 hparams.moe_loss_coef = 1e-3 hparams.shared_embedding_and_softmax_weights = True hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.add_hparam("d_kv", 64) hparams.add_hparam("d_ff", 2048) # Image related hparams hparams.add_hparam("img_len", 32) hparams.add_hparam("num_channels", 3) hparams.add_hparam("unconditional", True) # Local Attention related params hparams.add_hparam("block_length", 128) hparams.add_hparam("block_height", 16) hparams.add_hparam("block_width", 16) hparams.add_hparam("attention_type", "local1d") return hparams
Catch bugs locally...
def mtf_image_transformer_tiny(): """Catch bugs locally...""" hparams = mtf_image_transformer_base() hparams.hidden_size = 128 hparams.d_ff = 256 hparams.batch_size = 4 hparams.num_encoder_layers = 1 hparams.num_decoder_layers = 4 hparams.num_heads = 4 hparams.attention_key_size = 128 hparams.attention_value_size = 128 hparams.block_length = 32 # data parallelism and model-parallelism hparams.mesh_shape = "batch:2" hparams.layout = "batch:batch" return hparams
Small single parameters.
def mtf_image_transformer_single(): """Small single parameters.""" hparams = mtf_image_transformer_tiny() hparams.mesh_shape = "" hparams.layout = "" hparams.hidden_size = 32 hparams.filter_size = 32 hparams.batch_size = 1 hparams.num_encoder_layers = 1 hparams.num_decoder_layers = 1 hparams.num_heads = 2 hparams.attention_key_size = 32 hparams.attention_value_size = 32 hparams.block_length = 16 return hparams
Small single parameters.
def mtf_image_transformer_base_single(): """Small single parameters.""" hparams = mtf_image_transformer_base() hparams.num_decoder_layers = 6 hparams.filter_size = 256 hparams.block_length = 128 hparams.mesh_shape = "" hparams.layout = "" return hparams
Small single parameters.
def mtf_image_transformer_tiny_spatial1d(): """Small single parameters.""" hparams = mtf_image_transformer_tiny() hparams.num_decoder_layers = 6 hparams.filter_size = 128 hparams.block_height = 8 hparams.block_width = 8 hparams.attention_type = "local1d_spatial" hparams.mesh_shape = "" hparams.layout = "" return hparams
Data parallel CIFAR parameters.
def mtf_image_transformer_base_cifar(): """Data parallel CIFAR parameters.""" hparams = mtf_image_transformer_base() hparams.mesh_shape = "batch:8" hparams.layout = "batch:batch" hparams.learning_rate_decay_steps = 13600 # one epoch hparams.batch_size = 32 hparams.num_heads = 4 hparams.num_decoder_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.d_ff = 2048 hparams.learning_rate = 0.5 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 hparams.unconditional = True return hparams
Data parallel CIFAR parameters.
def mtf_image_transformer_cifar_4x(): """Data parallel CIFAR parameters.""" hparams = mtf_image_transformer_base_cifar() hparams.mesh_shape = "batch:32" hparams.layout = "batch:batch" hparams.batch_size = 128 return hparams
Data parallel CIFAR parameters.
def mtf_image_transformer_cifar_mp_4x(): """Data parallel CIFAR parameters.""" hparams = mtf_image_transformer_base_cifar() hparams.mesh_shape = "model:4;batch:8" hparams.layout = "batch:batch;d_ff:model;heads:model" hparams.batch_size = 32 hparams.num_heads = 8 hparams.d_ff = 8192 return hparams
Data parallel CIFAR parameters.
def mtf_image_transformer_base_imagenet(): """Data parallel CIFAR parameters.""" hparams = mtf_image_transformer_base_cifar() hparams.mesh_shape = "batch:32" hparams.layout = "batch:batch" hparams.batch_size = 128 hparams.d_ff = 2048 hparams.hidden_size = 512 hparams.num_decoder_layers = 12 hparams.learning_rate = 0.5 hparams.learning_rate_warmup_steps = 31250 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.1 hparams.unconditional = True return hparams
Model parallel ImageNet parameters.
def mtf_image_transformer_base_imagenet_mp(): """Model parallel ImageNet parameters.""" hparams = mtf_image_transformer_base_imagenet() hparams.mesh_shape = "model:4;batch:8" hparams.layout = "batch:batch;d_ff:model;heads:model" hparams.batch_size = 32 hparams.num_heads = 8 hparams.d_ff = 8192 hparams.learning_rate_warmup_steps = 31250 hparams.unconditional = True return hparams
Model parallel ImageNet parameters.
def mtf_image_transformer_base_imagenet_mp128(): """Model parallel ImageNet parameters.""" hparams = mtf_image_transformer_base_imagenet() hparams.mesh_shape = "model:8;batch:4" hparams.layout = "batch:batch;d_ff:model;heads:model" hparams.batch_size = 8 hparams.img_len = 128 hparams.block_length = 128 hparams.num_heads = 8 hparams.num_decoder_layers = 4 hparams.d_ff = 4096 hparams.learning_rate_warmup_steps = 31250 hparams.unconditional = True hparams.max_length = 256*256*3 return hparams
Model parallel ImageNet parameters.
def mtf_image_transformer_base_imagenet_mp_sp(): """Model parallel ImageNet parameters.""" hparams = mtf_image_transformer_base_imagenet_mp128() hparams.mesh_shape = "model:8;batch:4" hparams.layout = "batch:batch;d_ff:model;num_wblocks:model" hparams.batch_size = 8 hparams.img_len = 128 hparams.block_length = 128 hparams.attention_type = "local1d_spatial" return hparams
Model parallel ImageNet parameters.
def mtf_image_transformer_base_imagenet_mp64(): """Model parallel ImageNet parameters.""" hparams = mtf_image_transformer_base_imagenet() hparams.mesh_shape = "model:8;batch:4" hparams.layout = "batch:batch;d_ff:model;heads:model" hparams.batch_size = 8 hparams.img_len = 64 hparams.num_decoder_layers = 8 return hparams
Returns a list of degree vectors, one for each input and hidden layer. A unit with degree d can only receive input from units with degree < d. Output units always have the same degree as their associated input unit. Args: input_dim: Number of inputs. hidden_dims: list with the number of hidden units per layer. It does not include the output layer. Each hidden unit size must be at least the size of length (otherwise autoregressivity is not possible). input_order: Order of degrees to the input units: 'random', 'left-to-right', 'right-to-left', or an array of an explicit order. For example, 'left-to-right' builds an autoregressive model p(x) = p(x1) p(x2 | x1) ... p(xD | x<D). hidden_order: Order of degrees to the hidden units: 'random', 'left-to-right'. If 'left-to-right', hidden units are allocated equally (up to a remainder term) to each degree.
def create_degrees(input_dim, hidden_dims, input_order='left-to-right', hidden_order='left-to-right'): """Returns a list of degree vectors, one for each input and hidden layer. A unit with degree d can only receive input from units with degree < d. Output units always have the same degree as their associated input unit. Args: input_dim: Number of inputs. hidden_dims: list with the number of hidden units per layer. It does not include the output layer. Each hidden unit size must be at least the size of length (otherwise autoregressivity is not possible). input_order: Order of degrees to the input units: 'random', 'left-to-right', 'right-to-left', or an array of an explicit order. For example, 'left-to-right' builds an autoregressive model p(x) = p(x1) p(x2 | x1) ... p(xD | x<D). hidden_order: Order of degrees to the hidden units: 'random', 'left-to-right'. If 'left-to-right', hidden units are allocated equally (up to a remainder term) to each degree. """ if (isinstance(input_order, str) and input_order not in ('random', 'left-to-right', 'right-to-left')): raise ValueError('Input order is not valid.') if hidden_order not in ('random', 'left-to-right'): raise ValueError('Hidden order is not valid.') degrees = [] if isinstance(input_order, str): input_degrees = np.arange(1, input_dim + 1) if input_order == 'right-to-left': input_degrees = np.flip(input_degrees, 0) elif input_order == 'random': np.random.shuffle(input_degrees) else: input_order = np.array(input_order) if np.all(np.sort(input_order) != np.arange(1, input_dim + 1)): raise ValueError('invalid input order') input_degrees = input_order degrees.append(input_degrees) for units in hidden_dims: if hidden_order == 'random': min_prev_degree = min(np.min(degrees[-1]), input_dim - 1) hidden_degrees = np.random.randint( low=min_prev_degree, high=input_dim, size=units) elif hidden_order == 'left-to-right': hidden_degrees = (np.arange(units) % max(1, input_dim - 1) + min(1, input_dim - 1)) degrees.append(hidden_degrees) return degrees
Returns a list of binary mask matrices respecting autoregressive ordering. Args: input_dim: Number of inputs. hidden_dims: list with the number of hidden units per layer. It does not include the output layer; those number of units will always be set to input_dim downstream. Each hidden unit size must be at least the size of length (otherwise autoregressivity is not possible). input_order: Order of degrees to the input units: 'random', 'left-to-right', 'right-to-left', or an array of an explicit order. For example, 'left-to-right' builds an autoregressive model p(x) = p(x1) p(x2 | x1) ... p(xD | x<D). hidden_order: Order of degrees to the hidden units: 'random', 'left-to-right'. If 'left-to-right', hidden units are allocated equally (up to a remainder term) to each degree.
def create_masks(input_dim, hidden_dims, input_order='left-to-right', hidden_order='left-to-right'): """Returns a list of binary mask matrices respecting autoregressive ordering. Args: input_dim: Number of inputs. hidden_dims: list with the number of hidden units per layer. It does not include the output layer; those number of units will always be set to input_dim downstream. Each hidden unit size must be at least the size of length (otherwise autoregressivity is not possible). input_order: Order of degrees to the input units: 'random', 'left-to-right', 'right-to-left', or an array of an explicit order. For example, 'left-to-right' builds an autoregressive model p(x) = p(x1) p(x2 | x1) ... p(xD | x<D). hidden_order: Order of degrees to the hidden units: 'random', 'left-to-right'. If 'left-to-right', hidden units are allocated equally (up to a remainder term) to each degree. """ degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order) masks = [] # Create input-to-hidden and hidden-to-hidden masks. for input_degrees, output_degrees in zip(degrees[:-1], degrees[1:]): mask = tf.cast(input_degrees[:, np.newaxis] <= output_degrees, tf.float32) masks.append(mask) # Create hidden-to-output mask. mask = tf.cast(degrees[-1][:, np.newaxis] < degrees[0], tf.float32) masks.append(mask) return masks
Performs incomplete Sinkhorn normalization to inputs. By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix with positive entries can be turned into a doubly-stochastic matrix (i.e. its rows and columns add up to one) via the succesive row and column normalization. -To ensure positivity, the effective input to sinkhorn has to be exp(inputs) (elementwise). -However, for stability, sinkhorn works in the log-space. It is only at return time that entries are exponentiated. Code is adapted from Mena et al. [2]. [1] Richard Sinkhorn and Paul Knopp. Concerning nonnegative matrices and doubly stochastic matrices. Pacific Journal of Mathematics, 1967. [2] Gonzalo Mena, David Belanger, Scott Linderman, Jasper Snoek. Learning latent permutations with Gumbel-Sinkhorn networks. International Conference on Learning Representations, 2018. Args: inputs: A `Tensor` with shape `[..., vocab_size, vocab_size]`. n_iters: Number of sinkhorn iterations (in practice, as little as 20 iterations are needed to achieve decent convergence for `vocab_size` ~100) Returns: outputs: A `Tensor` of close-to-doubly-stochastic matrices with shape `[:, vocab_size, vocab_size]`.
def sinkhorn(inputs, n_iters=20): """Performs incomplete Sinkhorn normalization to inputs. By a theorem by Sinkhorn and Knopp [1], a sufficiently well-behaved matrix with positive entries can be turned into a doubly-stochastic matrix (i.e. its rows and columns add up to one) via the succesive row and column normalization. -To ensure positivity, the effective input to sinkhorn has to be exp(inputs) (elementwise). -However, for stability, sinkhorn works in the log-space. It is only at return time that entries are exponentiated. Code is adapted from Mena et al. [2]. [1] Richard Sinkhorn and Paul Knopp. Concerning nonnegative matrices and doubly stochastic matrices. Pacific Journal of Mathematics, 1967. [2] Gonzalo Mena, David Belanger, Scott Linderman, Jasper Snoek. Learning latent permutations with Gumbel-Sinkhorn networks. International Conference on Learning Representations, 2018. Args: inputs: A `Tensor` with shape `[..., vocab_size, vocab_size]`. n_iters: Number of sinkhorn iterations (in practice, as little as 20 iterations are needed to achieve decent convergence for `vocab_size` ~100) Returns: outputs: A `Tensor` of close-to-doubly-stochastic matrices with shape `[:, vocab_size, vocab_size]`. """ vocab_size = tf.shape(inputs)[-1] log_alpha = tf.reshape(inputs, [-1, vocab_size, vocab_size]) for _ in range(n_iters): log_alpha -= tf.reshape(tf.reduce_logsumexp(log_alpha, axis=2), [-1, vocab_size, 1]) log_alpha -= tf.reshape(tf.reduce_logsumexp(log_alpha, axis=1), [-1, 1, vocab_size]) outputs = tf.exp(log_alpha) return outputs
Random variable for f(x), where x ~ p(x) and f is reversible.
def TransformedRandomVariable(random_variable, # pylint: disable=invalid-name reversible_layer, name=None, sample_shape=(), value=None): """Random variable for f(x), where x ~ p(x) and f is reversible.""" return ed.RandomVariable( distribution=TransformedDistribution(random_variable.distribution, reversible_layer, name=name), sample_shape=sample_shape, value=value)
Returns log det | dx / dy | = num_events * sum log | scale |.
def log_det_jacobian(self, inputs): """Returns log det | dx / dy | = num_events * sum log | scale |.""" del inputs # unused # Number of events is number of all elements excluding the batch and # channel dimensions. num_events = tf.reduce_prod(tf.shape(inputs)[1:-1]) log_det_jacobian = num_events * tf.reduce_sum(self.log_scale) return log_det_jacobian
Slice encoder hidden state into block_dim. Args: x: Encoder hidden state of shape [-1, hidden_size]. Returns: Sliced states of shape [-1, num_blocks, block_dim].
def slice_hidden(self, x): """Slice encoder hidden state into block_dim. Args: x: Encoder hidden state of shape [-1, hidden_size]. Returns: Sliced states of shape [-1, num_blocks, block_dim]. """ x_sliced = tf.reshape( x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim]) return x_sliced
Find the nearest element in means to elements in x. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means of shape. Returns: Tensor with nearest element in mean encoded in one-hot notation.
def nearest_neighbor(self, x, means): """Find the nearest element in means to elements in x. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means of shape. Returns: Tensor with nearest element in mean encoded in one-hot notation. """ x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True) scalar_prod = tf.matmul( tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) dist = x_norm_sq + tf.transpose( means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod if self.hparams.soft_em: nearest_idx = tf.stack( [ tf.multinomial( -dist[:, i, :], num_samples=self.hparams.num_samples) for i in range(self.hparams.num_blocks) ], axis=1) nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size) nearest_hot = tf.reduce_mean(nearest_hot, axis=-2) else: if self.hparams.random_top_k > 1: _, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k) nearest_idx = tf.gather( top_k_idx, tf.random_uniform( [1], minval=0, maxval=self.hparams.random_top_k - 1, dtype=tf.int32), axis=-1) else: if self.hparams.use_scales: dist /= tf.reshape(self.hparams.scales, [1, 1, self.hparams.moe_num_experts]) nearest_idx = tf.argmax(-dist, axis=-1) nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size) return nearest_hot
Compute nearest neighbors and loss for training the embeddings. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means. Returns: The nearest neighbor in one hot form, the nearest neighbor itself, the commitment loss, embedding training loss.
def embedding_lookup(self, x, means): """Compute nearest neighbors and loss for training the embeddings. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means. Returns: The nearest neighbor in one hot form, the nearest neighbor itself, the commitment loss, embedding training loss. """ x_means_hot = self.nearest_neighbor(x, means) x_means_hot_flat = tf.reshape( x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) q_loss = tf.reduce_mean( tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss
Turn x_int representing numbers into a bitwise (lower-endian) tensor. Args: x_int: Tensor containing integer to be converted into base notation. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Corresponding number expressed in base.
def int_to_bit(self, x_int, num_bits, base=2): """Turn x_int representing numbers into a bitwise (lower-endian) tensor. Args: x_int: Tensor containing integer to be converted into base notation. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Corresponding number expressed in base. """ x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1)) # pylint: disable=g-complex-comprehension x_labels = [ tf.floormod( tf.floordiv(tf.to_int32(x_l), tf.to_int32(base)**i), tf.to_int32(base)) for i in range(num_bits)] res = tf.concat(x_labels, axis=-1) return tf.to_float(res)
Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments.
def embed(self, x): """Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments. """ shape_x = common_layers.shape_list(x) x_flat = tf.reshape(x, [-1, 1]) c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2) shape = common_layers.shape_list(c) new_shape = shape new_shape.append(self.hparams.num_blocks) new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(self.hparams.hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) c_int = self.bit_to_int( c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) c_hot_flat = tf.reshape( c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) h1 = tf.transpose(h1, perm=[1, 0, 2]) h1 = tf.reshape(h1, shape=h1_shape) h1_shape[0] = self.hparams.batch_size h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") res = tf.layers.dense( tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") return res
Discretization bottleneck for latent variables. Args: x: Input to the discretization bottleneck. Returns: Embedding to pass to the decoder, discrete latent, loss, and the embedding function. Raises: ValueError: If projection_tensors is None for reshape_method project, or ema_count or ema_means is None if we are using ema, or unknown args.
def discrete_bottleneck(self, x): """Discretization bottleneck for latent variables. Args: x: Input to the discretization bottleneck. Returns: Embedding to pass to the decoder, discrete latent, loss, and the embedding function. Raises: ValueError: If projection_tensors is None for reshape_method project, or ema_count or ema_means is None if we are using ema, or unknown args. """ x_reshaped = self.slice_hidden(x) x_means_hot = [] x_means = 0 loss = 0 x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup( x_reshaped, self.means) if self.hparams.ema: tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta)) updated_ema_count = \ moving_averages.assign_moving_average( self.ema_count, tf.reduce_sum( tf.reshape( x_means_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]), axis=0), self.hparams.decay, zero_debias=False) dw = tf.matmul( tf.transpose(x_means_hot, perm=[1, 2, 0]), tf.transpose(x_reshaped, perm=[1, 0, 2])) updated_ema_means = \ moving_averages.assign_moving_average( self.ema_means, dw, self.hparams.decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( n + 2**self.hparams.z_size * self.hparams.epsilon) * n) updated_ema_means = updated_ema_means / tf.expand_dims( updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]): update_means = tf.assign(self.means, updated_ema_means) with tf.control_dependencies([update_means]): loss += self.hparams.beta * e_loss else: # Use a gradient based loss for learning the cluster centers loss += q_loss + self.hparams.beta * e_loss # Get the discrete latent representation x_means_idx = tf.argmax(x_means_hot, axis=-1) # Get the binary representation num_bits = int(self.hparams.z_size // self.hparams.num_blocks) x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2) x_discrete = self.bit_to_int( tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2) # Reshape x_discrete shape_x = common_layers.shape_list(x) shape_discrete = shape_x[:-1] x_discrete = tf.reshape(x_discrete, shape_discrete) x_means = tf.reshape(x_means, shape=shape_x) h1 = x + tf.stop_gradient(x_means - x) h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") res = tf.layers.dense( tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") embed_fn = partial(self.embed) return { "dense": res, "discrete": x_discrete, "loss": loss, "embed": embed_fn }
Switch from Adam to Adafactor, approximating the behavior of Adam. Some minor things may be different, like epsilon and beta1 correction. Args: hparams: model hyperparameters where "adam" in hparams.optimizer
def mimic_adam_with_adafactor(hparams): """Switch from Adam to Adafactor, approximating the behavior of Adam. Some minor things may be different, like epsilon and beta1 correction. Args: hparams: model hyperparameters where "adam" in hparams.optimizer """ assert "adam" in hparams.optimizer hparams.optimizer = "adafactor" hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1 hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2 hparams.optimizer_adafactor_multiply_by_parameter_scale = False hparams.optimizer_adafactor_factored = False hparams.optimizer_adafactor_clipping_threshold = None hparams.optimizer_adafactor_decay_type = "adam"
Old version - Adam.
def afx_adam(): """Old version - Adam.""" hparams = transformer.transformer_base_v2() hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.999 hparams.symbol_modality_num_shards = 1 hparams.batch_size = 2048 hparams.optimizer = "adam" hparams.learning_rate_schedule = ( "constant*rsqrt_decay*linear_warmup*rsqrt_hidden_size") hparams.learning_rate_constant = 2.0 return hparams
Adafactor with recommended learning rate schedule.
def afx_adafactor(): """Adafactor with recommended learning rate schedule.""" hparams = afx_adam() hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 return hparams
Small transformer model with small batch size for fast step times.
def afx_small(): """Small transformer model with small batch size for fast step times.""" hparams = transformer.transformer_tpu() hparams.filter_size = 1024 hparams.num_heads = 4 hparams.num_hidden_layers = 3 hparams.batch_size = 512 return hparams
Emily's model hparams.
def next_frame_emily(): """Emily's model hparams.""" hparams = sv2p_params.next_frame_sv2p() hparams.video_num_input_frames = 2 hparams.video_num_target_frames = 10 hparams.learning_rate_constant = 1e-4 seq_length = hparams.video_num_input_frames + hparams.video_num_target_frames # The latent_loss_multiplier is divided by the number of frames because # the image sequence loss in t2t is averaged instead of added through # time as they do in the SVG-LP paper hparams.latent_loss_multiplier = 1e-4 / seq_length hparams.reward_prediction = False hparams.num_iterations_1st_stage = -1 hparams.num_iterations_2nd_stage = -1 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.999 hparams.optimizer_adam_epsilon = 1e-08 hparams.anneal_end = -1 hparams.clip_grad_norm = 5.0 hparams.add_hparam("learned_prior", True) hparams.add_hparam("z_dim", 64) hparams.add_hparam("g_dim", 128) hparams.add_hparam("rnn_size", 256) hparams.add_hparam("prior_rnn_layers", 1) hparams.add_hparam("posterior_rnn_layers", 1) hparams.add_hparam("predictor_rnn_layers", 2) hparams.add_hparam("has_skips", True) hparams.add_hparam("has_batchnorm", True) return hparams
Convert a file to examples.
def main(_): """Convert a file to examples.""" if FLAGS.subword_text_encoder_filename: encoder = text_encoder.SubwordTextEncoder( FLAGS.subword_text_encoder_filename) elif FLAGS.token_text_encoder_filename: encoder = text_encoder.TokenTextEncoder(FLAGS.token_text_encoder_filename) elif FLAGS.byte_text_encoder: encoder = text_encoder.ByteTextEncoder() else: encoder = None reader = tf.python_io.tf_record_iterator(FLAGS.input_filename) total_sequences = 0 total_input_tokens = 0 total_target_tokens = 0 nonpadding_input_tokens = 0 nonpadding_target_tokens = 0 max_input_length = 0 max_target_length = 0 for record in reader: x = tf.train.Example() x.ParseFromString(record) inputs = [int(i) for i in x.features.feature["inputs"].int64_list.value] targets = [int(i) for i in x.features.feature["targets"].int64_list.value] if FLAGS.print_inputs: print("INPUTS:\n" + encoder.decode(inputs) if encoder else inputs) if FLAGS.print_targets: print("TARGETS:\n" + encoder.decode(targets) if encoder else targets) nonpadding_input_tokens += len(inputs) - inputs.count(0) nonpadding_target_tokens += len(targets) - targets.count(0) total_input_tokens += len(inputs) total_target_tokens += len(targets) total_sequences += 1 max_input_length = max(max_input_length, len(inputs)) max_target_length = max(max_target_length, len(targets)) if FLAGS.print_all: for k, v in six.iteritems(x.features.feature): print("%s: %s" % (k, v.int64_list.value)) print("total_sequences: %d" % total_sequences) print("total_input_tokens: %d" % total_input_tokens) print("total_target_tokens: %d" % total_target_tokens) print("nonpadding_input_tokens: %d" % nonpadding_input_tokens) print("nonpadding_target_tokens: %d" % nonpadding_target_tokens) print("max_input_length: %d" % max_input_length) print("max_target_length: %d" % max_target_length)
Return a mix of env and video data fields and decoders.
def example_reading_spec(self): """Return a mix of env and video data fields and decoders.""" video_fields, video_decoders = ( video_utils.VideoProblem.example_reading_spec(self)) env_fields, env_decoders = env_problem.EnvProblem.example_reading_spec(self) # Remove raw observations field since we want to capture them as videos. env_fields.pop(env_problem.OBSERVATION_FIELD) env_decoders.pop(env_problem.OBSERVATION_FIELD) # Add frame number spec and decoder. env_fields[_FRAME_NUMBER_FIELD] = tf.FixedLenFeature((1,), tf.int64) env_decoders[ _FRAME_NUMBER_FIELD] = tf.contrib.slim.tfexample_decoder.Tensor( _FRAME_NUMBER_FIELD) # Add video fields and decoders env_fields.update(video_fields) env_decoders.update(video_decoders) return env_fields, env_decoders
Transforms time step observations to frames of a video.
def _generate_time_steps(self, trajectory_list): """Transforms time step observations to frames of a video.""" for time_step in env_problem.EnvProblem._generate_time_steps( self, trajectory_list): # Convert the rendered observations from numpy to png format. frame_np = np.array(time_step.pop(env_problem.OBSERVATION_FIELD)) frame_np = frame_np.reshape( [self.frame_height, self.frame_width, self.num_channels]) # TODO(msaffar) Add support for non RGB rendered environments frame = png.from_array(frame_np, "RGB", info={"bitdepth": 8}) frame_buffer = six.BytesIO() frame.save(frame_buffer) # Put the encoded frame back. time_step[_IMAGE_ENCODED_FIELD] = [frame_buffer.getvalue()] time_step[_IMAGE_FORMAT_FIELD] = [_FORMAT] time_step[_IMAGE_HEIGHT_FIELD] = [self.frame_height] time_step[_IMAGE_WIDTH_FIELD] = [self.frame_width] # Add the frame number time_step[_FRAME_NUMBER_FIELD] = time_step[env_problem.TIMESTEP_FIELD] yield time_step
Iterate through lines of file.
def txt_line_iterator(txt_path): """Iterate through lines of file.""" with tf.gfile.Open(txt_path) as f: for line in f: yield line.strip()
Yield dicts for Text2TextProblem.generate_samples from lines of files.
def text2text_txt_iterator(source_txt_path, target_txt_path): """Yield dicts for Text2TextProblem.generate_samples from lines of files.""" for inputs, targets in zip( txt_line_iterator(source_txt_path), txt_line_iterator(target_txt_path)): yield {"inputs": inputs, "targets": targets}
Yield dicts for Text2TextProblem.generate_samples from lines of files.
def text2text_distill_iterator(source_txt_path, target_txt_path, distill_txt_path): """Yield dicts for Text2TextProblem.generate_samples from lines of files.""" for inputs, targets, dist_targets in zip( txt_line_iterator(source_txt_path), txt_line_iterator(target_txt_path), txt_line_iterator(distill_txt_path)): yield {"inputs": inputs, "targets": targets, "dist_targets": dist_targets}
Yield dicts for Text2ClassProblem.generate_samples from lines of files. Args: source_txt_path: txt file with record per line. label_txt_path: txt file with label per line, either as int or str. If string, must provide class_strs. class_strs: list<str> of class label names. Must be in correct order (i.e. ["a", "b", "c"] means that "a" will get class ID 0, "b" ID 1, etc.). Yields: {"inputs": inputs, "label": label}
def text2class_txt_iterator(source_txt_path, label_txt_path, class_strs=None): """Yield dicts for Text2ClassProblem.generate_samples from lines of files. Args: source_txt_path: txt file with record per line. label_txt_path: txt file with label per line, either as int or str. If string, must provide class_strs. class_strs: list<str> of class label names. Must be in correct order (i.e. ["a", "b", "c"] means that "a" will get class ID 0, "b" ID 1, etc.). Yields: {"inputs": inputs, "label": label} """ if class_strs: class_strs = dict([(s, i) for i, s in enumerate(class_strs)]) for inputs, label in zip( txt_line_iterator(source_txt_path), txt_line_iterator(label_txt_path)): label = label.strip() if class_strs: label = class_strs[label] else: label = int(label) yield {"inputs": inputs, "label": label}
Yield dicts for Text2TextProblem.generate_samples from lines of txt_path. Args: txt_path: path to txt file with a record per line, source and target are tab-separated. Yields: {"inputs": inputs, "targets": targets}
def text2text_txt_tab_iterator(txt_path): """Yield dicts for Text2TextProblem.generate_samples from lines of txt_path. Args: txt_path: path to txt file with a record per line, source and target are tab-separated. Yields: {"inputs": inputs, "targets": targets} """ for line in txt_line_iterator(txt_path): if line and "\t" in line: parts = line.split("\t", 1) inputs, targets = parts[:2] yield {"inputs": inputs.strip(), "targets": targets.strip()}
Encode Text2Text samples from the generator with the vocab.
def text2text_generate_encoded(sample_generator, vocab, targets_vocab=None, has_inputs=True, inputs_prefix="", targets_prefix=""): """Encode Text2Text samples from the generator with the vocab.""" targets_vocab = targets_vocab or vocab for sample in sample_generator: if has_inputs: sample["inputs"] = vocab.encode(inputs_prefix + sample["inputs"]) sample["inputs"].append(text_encoder.EOS_ID) sample["targets"] = targets_vocab.encode(targets_prefix + sample["targets"]) sample["targets"].append(text_encoder.EOS_ID) yield sample
For packed datasets, returns a function to pack examples. Returns: None or a function from list of TFRecords to list of TFRecords
def _pack_fn(self): """For packed datasets, returns a function to pack examples. Returns: None or a function from list of TFRecords to list of TFRecords """ if not self.packed_length: return None def my_fn(records): """Function from list of TFRecords to list of TFRecords.""" examples = [] for record in records: x = tf.train.Example() x.ParseFromString(record) example_dict = {} if self.has_inputs: example_dict["inputs"] = [ int(i) for i in x.features.feature["inputs"].int64_list.value] example_dict["targets"] = [ int(i) for i in x.features.feature["targets"].int64_list.value] examples.append(example_dict) examples = list(self._maybe_pack_examples(examples)) return [ generator_utils.to_example(x).SerializeToString() for x in examples] return my_fn
Wraps generator with packer if self.packed_length.
def _maybe_pack_examples(self, generator): """Wraps generator with packer if self.packed_length.""" if not self.packed_length: return generator return generator_utils.pack_examples( generator, self.has_inputs, self.packed_length, spacing=self.packed_spacing, chop_long_sequences=not self.has_inputs)
List of input filepaths for a particular training or dev shard. Args: tmp_dir: a string task_id: an integer less than self.num_shards Returns: a list of tuples (filepath, start_pos, num_bytes)
def text_filepaths_for_task(self, tmp_dir, task_id): """List of input filepaths for a particular training or dev shard. Args: tmp_dir: a string task_id: an integer less than self.num_shards Returns: a list of tuples (filepath, start_pos, num_bytes) """ assert task_id >= 0 assert task_id < self.num_train_shards + self.num_dev_shards if task_id < self.num_train_shards: return [ f for i, f in enumerate(self.train_text_filepaths(tmp_dir)) if i % self.num_train_shards == task_id ] else: return [ f for i, f in enumerate(self.dev_text_filepaths(tmp_dir)) if i % self.num_dev_shards == task_id - self.num_train_shards ]
Read text out of an input file. The default just reads the text, converts to unicode and yields one unicode string. Subclasses can override this function in order to preprocess, and can yield any number of strings. Args: filepath: a string Yields: unicode strings.
def filepath_to_unicode_strings(self, filepath): """Read text out of an input file. The default just reads the text, converts to unicode and yields one unicode string. Subclasses can override this function in order to preprocess, and can yield any number of strings. Args: filepath: a string Yields: unicode strings. """ f = tf.gfile.Open(filepath) b = f.read() yield text_encoder.to_unicode_ignore_errors(b)
Read complete text of input files and yield unicode strings. By default, one unicode string is produced per file, but this is not guaranteed, since subclasses can override filepath_to_unicode_strings(). max_chars_per_file and max_chars_total can also be specified, in which case some strings may be truncated or dropped to limit the total amount of output. Args: filepaths: a list of strings max_chars_per_file: an optional integer max_chars_total: an optional integer Yields: unicode strings
def file_generator(self, filepaths, max_chars_per_file=None, max_chars_total=None): """Read complete text of input files and yield unicode strings. By default, one unicode string is produced per file, but this is not guaranteed, since subclasses can override filepath_to_unicode_strings(). max_chars_per_file and max_chars_total can also be specified, in which case some strings may be truncated or dropped to limit the total amount of output. Args: filepaths: a list of strings max_chars_per_file: an optional integer max_chars_total: an optional integer Yields: unicode strings """ chars_total = 0 for fname in filepaths: chars_this_file = 0 tf.logging.info("reading file %s" % fname) for text in self.filepath_to_unicode_strings(fname): if (max_chars_per_file and chars_this_file + len(text) > max_chars_per_file): text = text[:max_chars_per_file - chars_this_file] if max_chars_total and chars_total + len(text) > max_chars_total: text = text[:max_chars_total - chars_total] chars_total += len(text) chars_this_file += len(text) if text: yield text if max_chars_total and chars_total >= max_chars_total: return if max_chars_per_file and chars_this_file >= max_chars_per_file: break
Generator for examples. Args: encoder: a TextEncoder tmp_dir: a string task_id: an integer Yields: feature dictionaries
def example_generator(self, encoder, tmp_dir, task_id): """Generator for examples. Args: encoder: a TextEncoder tmp_dir: a string task_id: an integer Yields: feature dictionaries """ filepaths = self.text_filepaths_for_task(tmp_dir, task_id) if task_id >= self.num_train_shards: # this is dev data - limit the total length. max_chars_per_file = self.max_dev_chars // ( self.num_dev_shards * len(filepaths)) else: max_chars_per_file = None tokens = [] for ftext in self.file_generator( filepaths, max_chars_per_file=max_chars_per_file): tokens.extend(encoder.encode(ftext)) pos = 0 while pos + self.sequence_length <= len(tokens): yield {"targets": tokens[pos:pos + self.sequence_length]} pos += self.sequence_length if pos > 0: tokens = tokens[pos:] if self.remainder_policy == "pad": if tokens: targets = tokens + [0] * (self.sequence_length - len(tokens)) yield {"targets": targets} else: assert self.remainder_policy == "drop"
Make sure that the data is prepared and the vocab is generated.
def prepare_to_generate(self, data_dir, tmp_dir): """Make sure that the data is prepared and the vocab is generated.""" self.get_or_create_vocab(data_dir, tmp_dir) self.train_text_filepaths(tmp_dir) self.dev_text_filepaths(tmp_dir)
Generates training/dev data. Args: data_dir: a string tmp_dir: a string task_id: an optional integer Returns: shard or shards for which data was generated.
def generate_data(self, data_dir, tmp_dir, task_id=-1): """Generates training/dev data. Args: data_dir: a string tmp_dir: a string task_id: an optional integer Returns: shard or shards for which data was generated. """ tf.logging.info("generate_data task_id=%s" % task_id) encoder = self.get_or_create_vocab(data_dir, tmp_dir) assert task_id >= 0 and task_id < self.num_generate_tasks if task_id < self.num_train_shards: out_file = self.training_filepaths( data_dir, self.num_train_shards, shuffled=False)[task_id] else: out_file = self.dev_filepaths( data_dir, self.num_dev_shards, shuffled=False)[task_id - self.num_train_shards] generator_utils.generate_files( self.example_generator(encoder, tmp_dir, task_id), [out_file]) generator_utils.shuffle_dataset([out_file])
ResNet convolutional striding block.
def ConvBlock(kernel_size, filters, strides): """ResNet convolutional striding block.""" ks = kernel_size filters1, filters2, filters3 = filters main = layers.Serial( layers.Conv(filters1, (1, 1), strides), layers.BatchNorm(), layers.Relu(), layers.Conv(filters2, (ks, ks), padding='SAME'), layers.BatchNorm(), layers.Relu(), layers.Conv(filters3, (1, 1)), layers.BatchNorm() ) shortcut = layers.Serial( layers.Conv(filters3, (1, 1), strides), layers.BatchNorm() ) return layers.Serial( layers.Branch(), layers.Parallel(main, shortcut), layers.SumBranches(), layers.Relu() )
ResNet identical size block.
def IdentityBlock(kernel_size, filters): """ResNet identical size block.""" ks = kernel_size filters1, filters2, filters3 = filters main = layers.Serial( layers.Conv(filters1, (1, 1)), layers.BatchNorm(), layers.Relu(), layers.Conv(filters2, (ks, ks), padding='SAME'), layers.BatchNorm(), layers.Relu(), layers.Conv(filters3, (1, 1)), layers.BatchNorm() ) return layers.Serial( layers.Branch(), layers.Parallel(main, layers.Identity()), layers.SumBranches(), layers.Relu() )
ResNet. Args: hidden_size: the size of the first hidden layer (multiplied later). num_output_classes: how many classes to distinguish. mode: whether we are training or evaluating or doing inference. Returns: The ResNet model with the given layer and output sizes.
def Resnet50(hidden_size=64, num_output_classes=1001, mode='train'): """ResNet. Args: hidden_size: the size of the first hidden layer (multiplied later). num_output_classes: how many classes to distinguish. mode: whether we are training or evaluating or doing inference. Returns: The ResNet model with the given layer and output sizes. """ del mode return layers.Serial( layers.Conv(hidden_size, (7, 7), (2, 2), 'SAME'), layers.BatchNorm(), layers.Relu(), layers.MaxPool(pool_size=(3, 3), strides=(2, 2)), ConvBlock(3, [hidden_size, hidden_size, 4 * hidden_size], (1, 1)), IdentityBlock(3, [hidden_size, hidden_size, 4 * hidden_size]), IdentityBlock(3, [hidden_size, hidden_size, 4 * hidden_size]), ConvBlock(3, [2 * hidden_size, 2 * hidden_size, 8 * hidden_size], (2, 2)), IdentityBlock(3, [2 * hidden_size, 2 * hidden_size, 8 * hidden_size]), IdentityBlock(3, [2 * hidden_size, 2 * hidden_size, 8 * hidden_size]), IdentityBlock(3, [2 * hidden_size, 2 * hidden_size, 8 * hidden_size]), ConvBlock(3, [4 * hidden_size, 4 * hidden_size, 16*hidden_size], (2, 2)), IdentityBlock(3, [4 * hidden_size, 4 * hidden_size, 16 * hidden_size]), IdentityBlock(3, [4 * hidden_size, 4 * hidden_size, 16 * hidden_size]), IdentityBlock(3, [4 * hidden_size, 4 * hidden_size, 16 * hidden_size]), IdentityBlock(3, [4 * hidden_size, 4 * hidden_size, 16 * hidden_size]), IdentityBlock(3, [4 * hidden_size, 4 * hidden_size, 16 * hidden_size]), ConvBlock(3, [8 * hidden_size, 8 * hidden_size, 32*hidden_size], (2, 2)), IdentityBlock(3, [8 * hidden_size, 8 * hidden_size, 32 * hidden_size]), IdentityBlock(3, [8 * hidden_size, 8 * hidden_size, 32 * hidden_size]), layers.AvgPool(pool_size=(7, 7)), layers.Flatten(), layers.Dense(num_output_classes), layers.LogSoftmax())
WideResnet convolutational block.
def WideResnetBlock(channels, strides=(1, 1), channel_mismatch=False): """WideResnet convolutational block.""" main = layers.Serial(layers.BatchNorm(), layers.Relu(), layers.Conv(channels, (3, 3), strides, padding='SAME'), layers.BatchNorm(), layers.Relu(), layers.Conv(channels, (3, 3), padding='SAME')) shortcut = layers.Identity() if not channel_mismatch else layers.Conv( channels, (3, 3), strides, padding='SAME') return layers.Serial( layers.Branch(), layers.Parallel(main, shortcut), layers.SumBranches())
WideResnet from https://arxiv.org/pdf/1605.07146.pdf. Args: num_blocks: int, number of blocks in a group. hidden_size: the size of the first hidden layer (multiplied later). num_output_classes: int, number of classes to distinguish. mode: is it training or eval. Returns: The WideResnet model with given layer and output sizes.
def WideResnet(num_blocks=3, hidden_size=64, num_output_classes=10, mode='train'): """WideResnet from https://arxiv.org/pdf/1605.07146.pdf. Args: num_blocks: int, number of blocks in a group. hidden_size: the size of the first hidden layer (multiplied later). num_output_classes: int, number of classes to distinguish. mode: is it training or eval. Returns: The WideResnet model with given layer and output sizes. """ del mode return layers.Serial( layers.Conv(hidden_size, (3, 3), padding='SAME'), WideResnetGroup(num_blocks, hidden_size), WideResnetGroup(num_blocks, hidden_size * 2, (2, 2)), WideResnetGroup(num_blocks, hidden_size * 4, (2, 2)), layers.BatchNorm(), layers.Relu(), layers.AvgPool(pool_size=(8, 8)), layers.Flatten(), layers.Dense(num_output_classes), layers.LogSoftmax())
Builds a traditional GRU cell with dense internal transformations. Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555 Args: units: Number of hidden units. Returns: A Stax model representing a traditional GRU RNN cell.
def GRUCell(units): """Builds a traditional GRU cell with dense internal transformations. Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555 Args: units: Number of hidden units. Returns: A Stax model representing a traditional GRU RNN cell. """ return GeneralGRUCell( candidate_transform=lambda: core.Dense(units=units), memory_transform=combinators.Identity, gate_nonlinearity=core.Sigmoid, candidate_nonlinearity=core.Tanh)
Builds a convolutional GRU. Paper: https://arxiv.org/abs/1511.06432. Args: units: Number of hidden units kernel_size: Kernel size for convolution Returns: A Stax model representing a GRU cell with convolution transforms.
def ConvGRUCell(units, kernel_size=(3, 3)): """Builds a convolutional GRU. Paper: https://arxiv.org/abs/1511.06432. Args: units: Number of hidden units kernel_size: Kernel size for convolution Returns: A Stax model representing a GRU cell with convolution transforms. """ def BuildConv(): return core.Conv(filters=units, kernel_size=kernel_size, padding='SAME') return GeneralGRUCell( candidate_transform=BuildConv, memory_transform=combinators.Identity, gate_nonlinearity=core.Sigmoid, candidate_nonlinearity=core.Tanh)
r"""Parametrized Gated Recurrent Unit (GRU) cell construction. GRU update equations: $$ Update gate: u_t = \sigmoid(U' * s_{t-1} + B') $$ $$ Reset gate: r_t = \sigmoid(U'' * s_{t-1} + B'') $$ $$ Candidate memory: c_t = \tanh(U * (r_t \odot s_{t-1}) + B) $$ $$ New State: s_t = u_t \odot s_{t-1} + (1 - u_t) \odot c_t $$ See combinators.GateBranches for details on the gating function. Args: candidate_transform: Transform to apply inside the Candidate branch. Applied before nonlinearities. memory_transform: Optional transformation on the memory before gating. gate_nonlinearity: Function to use as gate activation. Allows trying alternatives to Sigmoid, such as HardSigmoid. candidate_nonlinearity: Nonlinearity to apply after candidate branch. Allows trying alternatives to traditional Tanh, such as HardTanh dropout_rate_c: Amount of dropout on the transform (c) gate. Dropout works best in a GRU when applied exclusively to this branch. sigmoid_bias: Constant to add before sigmoid gates. Generally want to start off with a positive bias. Returns: A model representing a GRU cell with specified transforms.
def GeneralGRUCell(candidate_transform, memory_transform=combinators.Identity, gate_nonlinearity=core.Sigmoid, candidate_nonlinearity=core.Tanh, dropout_rate_c=0.1, sigmoid_bias=0.5): r"""Parametrized Gated Recurrent Unit (GRU) cell construction. GRU update equations: $$ Update gate: u_t = \sigmoid(U' * s_{t-1} + B') $$ $$ Reset gate: r_t = \sigmoid(U'' * s_{t-1} + B'') $$ $$ Candidate memory: c_t = \tanh(U * (r_t \odot s_{t-1}) + B) $$ $$ New State: s_t = u_t \odot s_{t-1} + (1 - u_t) \odot c_t $$ See combinators.GateBranches for details on the gating function. Args: candidate_transform: Transform to apply inside the Candidate branch. Applied before nonlinearities. memory_transform: Optional transformation on the memory before gating. gate_nonlinearity: Function to use as gate activation. Allows trying alternatives to Sigmoid, such as HardSigmoid. candidate_nonlinearity: Nonlinearity to apply after candidate branch. Allows trying alternatives to traditional Tanh, such as HardTanh dropout_rate_c: Amount of dropout on the transform (c) gate. Dropout works best in a GRU when applied exclusively to this branch. sigmoid_bias: Constant to add before sigmoid gates. Generally want to start off with a positive bias. Returns: A model representing a GRU cell with specified transforms. """ return combinators.Serial( combinators.Branch(num_branches=3), combinators.Parallel( # s_{t-1} branch - optionally transform # Typically is an identity. memory_transform(), # u_t (Update gate) branch combinators.Serial( candidate_transform(), # Want bias to start out positive before sigmoids. core.AddConstant(constant=sigmoid_bias), gate_nonlinearity()), # c_t (Candidate) branch combinators.Serial( combinators.Branch(num_branches=2), combinators.Parallel( combinators.Identity(), # r_t (Reset) Branch combinators.Serial( candidate_transform(), # Want bias to start out positive before sigmoids. core.AddConstant(constant=sigmoid_bias), gate_nonlinearity())), ## Gate S{t-1} with sigmoid(candidate_transform(S{t-1})) combinators.MultiplyBranches(), # Final projection + tanh to get Ct candidate_transform(), candidate_nonlinearity()), # Candidate gate # Only apply dropout on the C gate. # Paper reports that 0.1 is a good default. core.Dropout(rate=dropout_rate_c)), # Gate memory and candidate combinators.GateBranches())
Create an attention mask to hide padding and future words.
def MakeTargetMask(target, pad=0): """Create an attention mask to hide padding and future words.""" target_mask = (target != pad)[ :, np.newaxis, :] target_dtype = target_mask.dtype causal_mask = onp.tril(onp.ones((1, target.shape[-1], target.shape[-1]), dtype=target_dtype), k=0) target_mask = target_mask & causal_mask return np.expand_dims(target_mask, axis=1)
Build masks for this batch. Args: source: (batch, source_len) array of integer-coded symbols for inputs target_in: (batch, batch_len) array of integer-coded symbols for targets pad: int: the padding symbol used to pad the above Returns: Prepared batch of tuple of arrays: source, input-target, shifted-target, source mask, target mask, source-target "memory" mask, minibatch token count
def PreparePairedSequenceBatch(source, target_in, pad=0): """Build masks for this batch. Args: source: (batch, source_len) array of integer-coded symbols for inputs target_in: (batch, batch_len) array of integer-coded symbols for targets pad: int: the padding symbol used to pad the above Returns: Prepared batch of tuple of arrays: source, input-target, shifted-target, source mask, target mask, source-target "memory" mask, minibatch token count """ target = target_in[:, :-1] target_y = target_in[:, 1:] source_mask = np.reshape(source != pad, (source.shape[0], 1, 1, source.shape[-1])) target_mask = MakeTargetMask(target, pad) memory_mask = ( np.reshape(np.arange(target.shape[-1]) < source.shape[-1], [-1, 1])) ntokens = np.sum(target_y != pad) return (source, target, target_y, source_mask, target_mask, memory_mask, ntokens)
Helper: create layer norm parameters.
def _layer_norm_new_params(input_shape, rng, epsilon=1e-6): # pylint: disable=invalid-name """Helper: create layer norm parameters.""" del rng, epsilon features = input_shape[-1] scale = np.ones(features) bias = np.zeros(features) return (scale, bias)
Helper: create positional encoding parameters.
def _positional_encoding_new_params(input_shape, rng, max_len=2048): # pylint: disable=invalid-name """Helper: create positional encoding parameters.""" del rng # Check if we are operating on chunked inputs by checking if the first # shape is a list/tuple of shapes (otherwise it's an int or numpy array). is_chunked = isinstance(input_shape[0], (list, tuple)) feature_depth = input_shape[0][-1] if is_chunked else input_shape[-1] pe = onp.zeros((max_len, feature_depth), dtype=onp.float32) position = onp.arange(0, max_len)[:, onp.newaxis] div_term = onp.exp( onp.arange(0, feature_depth, 2) * -(onp.log(10000.0) / feature_depth)) pe[:, 0::2] = onp.sin(position * div_term) pe[:, 1::2] = onp.cos(position * div_term) pe = pe[onp.newaxis, :, :] # [1, max_len, feature_depth] return np.array(pe)
Implements bare positional encoding.
def PositionalEncoding(x, params, **unused_kwargs): """Implements bare positional encoding.""" if not isinstance(x, (list, tuple)): # non-chunked inputs symbol_size = np.shape(x)[1] return x + params[:, :symbol_size, :] # Chunked case: apply to all chunks selecting as much as needed. offset = 0 results = [] for chunk in x: symbol_size = np.shape(chunk)[1] results.append(chunk + params[:, offset:offset + symbol_size, :]) offset += symbol_size return results
Core dot product self-attention. Args: query: array of representations key: array of representations value: array of representations mask: attention-mask, gates attention dropout: float: dropout rate mode: 'eval' or 'train': whether to use dropout rng: JAX PRNGKey: subkey for disposable use Returns: Self attention for q, k, v arrays.
def DotProductAttention(query, key, value, mask, dropout, mode, rng): """Core dot product self-attention. Args: query: array of representations key: array of representations value: array of representations mask: attention-mask, gates attention dropout: float: dropout rate mode: 'eval' or 'train': whether to use dropout rng: JAX PRNGKey: subkey for disposable use Returns: Self attention for q, k, v arrays. """ depth = np.shape(query)[-1] dots = np.matmul(query, np.swapaxes(key, -1, -2)) / np.sqrt(depth) if mask is not None: dots = np.where(mask, dots, -1e9) # Softmax. dots = np.exp(dots - backend.logsumexp(dots, axis=-1, keepdims=True)) if dropout >= 1.0: raise ValueError('Dropout rates must be lower than 1.') if dropout is not None and dropout > 0.0 and mode == 'train': keep = backend.random.bernoulli(rng, 1.0 - dropout, dots.shape) dots = np.where(keep, dots / (1.0 - dropout), 0) out = np.matmul(dots, value) return out
Pure single-headed self-attention. Args: dropout: float: dropout rate mode: str: 'train' or 'eval' Returns: Pure single-headed attention layer. (No Dense transforms on input.)
def PureDotProductAttention(dropout=0.0, mode='train'): """Pure single-headed self-attention. Args: dropout: float: dropout rate mode: str: 'train' or 'eval' Returns: Pure single-headed attention layer. (No Dense transforms on input.) """ def init_fun(_, input_shapes): # pylint: disable=invalid-name q_shape, _, v_shape, _ = input_shapes output_shape = q_shape[:-1] + (v_shape[-1],) return output_shape, () def apply_fun(params, inputs, **kwargs): # pylint: disable=invalid-name del params q, k, v, mask = inputs rng = kwargs.get('rng', None) return DotProductAttention(q, k, v, mask, dropout=dropout, mode=mode, rng=rng) return init_fun, apply_fun