desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Calculate entropy of distribution.'
def entropy(self, logits, sampling_dim, act_dim, act_type):
if self.env_spec.is_discrete(act_type): entropy = tf.reduce_sum(((- tf.nn.softmax(logits)) * tf.nn.log_softmax(logits)), (-1)) elif self.env_spec.is_box(act_type): means = logits[:, :(sampling_dim / 2)] std = logits[:, (sampling_dim / 2):] entropy = tf.reduce_sum((0.5 * (1 + tf.log(((2 * np.pi) * tf.square(std))))), (-1)) else: assert False return entropy
'Calculate KL of distribution with itself. Used layer only for the gradients.'
def self_kl(self, logits, sampling_dim, act_dim, act_type):
if self.env_spec.is_discrete(act_type): probs = tf.nn.softmax(logits) log_probs = tf.nn.log_softmax(logits) self_kl = tf.reduce_sum((tf.stop_gradient(probs) * (tf.stop_gradient(log_probs) - log_probs)), (-1)) elif self.env_spec.is_box(act_type): means = logits[:, :(sampling_dim / 2)] std = logits[:, (sampling_dim / 2):] my_means = tf.stop_gradient(means) my_std = tf.stop_gradient(std) self_kl = tf.reduce_sum(((tf.log((std / my_std)) + ((tf.square(my_std) + tf.square((my_means - means))) / (2.0 * tf.square(std)))) - 0.5), (-1)) else: assert False return self_kl
'Calculate log-prob of action sampled from distribution.'
def log_prob_action(self, action, logits, sampling_dim, act_dim, act_type):
if self.env_spec.is_discrete(act_type): act_log_prob = tf.reduce_sum((tf.one_hot(action, act_dim) * tf.nn.log_softmax(logits)), (-1)) elif self.env_spec.is_box(act_type): means = logits[:, :(sampling_dim / 2)] std = logits[:, (sampling_dim / 2):] act_log_prob = (((-0.5) * tf.log(((2 * np.pi) * tf.square(std)))) - ((0.5 * tf.square((action - means))) / tf.square(std))) act_log_prob = tf.reduce_sum(act_log_prob, (-1)) else: assert False return act_log_prob
'Sample all actions given output of core network.'
def sample_actions(self, output, actions=None, greedy=False):
sampled_actions = [] logits = [] log_probs = [] entropy = [] self_kl = [] start_idx = 0 for (i, (act_dim, act_type)) in enumerate(self.env_spec.act_dims_and_types): sampling_dim = self.env_spec.sampling_dim(act_dim, act_type) if (self.fixed_std and self.env_spec.is_box(act_type)): act_logits = output[:, start_idx:(start_idx + act_dim)] log_std = tf.get_variable(('std%d' % i), [1, (sampling_dim // 2)]) act_logits = tf.concat([act_logits, ((1e-06 + tf.exp(log_std)) + (0 * act_logits))], 1) else: act_logits = output[:, start_idx:(start_idx + sampling_dim)] if (actions is None): act = self.sample_action(act_logits, sampling_dim, act_dim, act_type, greedy=greedy) else: act = actions[i] ent = self.entropy(act_logits, sampling_dim, act_dim, act_type) kl = self.self_kl(act_logits, sampling_dim, act_dim, act_type) act_log_prob = self.log_prob_action(act, act_logits, sampling_dim, act_dim, act_type) sampled_actions.append(act) logits.append(act_logits) log_probs.append(act_log_prob) entropy.append(ent) self_kl.append(kl) start_idx += sampling_dim assert (start_idx == self.env_spec.total_sampling_act_dim) return (sampled_actions, logits, log_probs, entropy, self_kl)
'Calculate KL between one policy output and another.'
def get_kl(self, my_logits, other_logits):
kl = [] for (i, (act_dim, act_type)) in enumerate(self.env_spec.act_dims_and_types): sampling_dim = self.env_spec.sampling_dim(act_dim, act_type) single_my_logits = my_logits[i] single_other_logits = other_logits[i] if self.env_spec.is_discrete(act_type): my_probs = tf.nn.softmax(single_my_logits) my_log_probs = tf.nn.log_softmax(single_my_logits) other_log_probs = tf.nn.log_softmax(single_other_logits) my_kl = tf.reduce_sum((my_probs * (my_log_probs - other_log_probs)), (-1)) elif self.env_spec.is_box(act_type): my_means = single_my_logits[:, :(sampling_dim / 2)] my_std = single_my_logits[:, (sampling_dim / 2):] other_means = single_other_logits[:, :(sampling_dim / 2)] other_std = single_other_logits[:, (sampling_dim / 2):] my_kl = tf.reduce_sum(((tf.log((other_std / my_std)) + ((tf.square(my_std) + tf.square((my_means - other_means))) / (2.0 * tf.square(other_std)))) - 0.5), (-1)) else: assert False kl.append(my_kl) return kl
'Single RNN step. Equivalently, single-time-step sampled actions.'
def single_step(self, prev, cur, greedy=False):
(prev_internal_state, prev_actions, _, _, _, _) = prev (obs, actions) = cur (output, next_state) = self.core(obs, prev_internal_state, prev_actions) (actions, logits, log_probs, entropy, self_kl) = self.sample_actions(output, actions=actions, greedy=greedy) return (next_state, tuple(actions), tuple(logits), tuple(log_probs), tuple(entropy), tuple(self_kl))
'Sample single step from policy.'
def sample_step(self, obs, prev_internal_state, prev_actions, greedy=False):
(next_state, sampled_actions, logits, log_probs, entropies, self_kls) = self.single_step((prev_internal_state, prev_actions, None, None, None, None), (obs, None), greedy=greedy) return (next_state, sampled_actions)
'Calculate log-probs and other calculations on batch of episodes.'
def multi_step(self, all_obs, initial_state, all_actions):
batch_size = tf.shape(initial_state)[0] time_length = tf.shape(all_obs[0])[0] initial_actions = [act[0] for act in all_actions] all_actions = [tf.concat([act[1:], act[0:1]], 0) for act in all_actions] (internal_states, _, logits, log_probs, entropies, self_kls) = tf.scan(self.single_step, (all_obs, all_actions), initializer=self.get_initializer(batch_size, initial_state, initial_actions)) log_probs = [log_prob[:(-1)] for log_prob in log_probs] entropies = [entropy[:(-1)] for entropy in entropies] self_kls = [self_kl[:(-1)] for self_kl in self_kls] return (internal_states, logits, log_probs, entropies, self_kls)
'Get initializer for RNN.'
def get_initializer(self, batch_size, initial_state, initial_actions):
logits_init = [] log_probs_init = [] for (act_dim, act_type) in self.env_spec.act_dims_and_types: sampling_dim = self.env_spec.sampling_dim(act_dim, act_type) logits_init.append(tf.zeros([batch_size, sampling_dim])) log_probs_init.append(tf.zeros([batch_size])) entropy_init = [tf.zeros([batch_size]) for _ in self.env_spec.act_dims] self_kl_init = [tf.zeros([batch_size]) for _ in self.env_spec.act_dims] return (initial_state, tuple(initial_actions), tuple(logits_init), tuple(log_probs_init), tuple(entropy_init), tuple(self_kl_init))
'Calculate KL between one policy and another on batch of episodes.'
def calculate_kl(self, my_logits, other_logits):
batch_size = tf.shape(my_logits[0])[1] time_length = tf.shape(my_logits[0])[0] reshaped_my_logits = [tf.reshape(my_logit, [(batch_size * time_length), (-1)]) for my_logit in my_logits] reshaped_other_logits = [tf.reshape(other_logit, [(batch_size * time_length), (-1)]) for other_logit in other_logits] kl = self.get_kl(reshaped_my_logits, reshaped_other_logits) kl = [tf.reshape(kkl, [time_length, batch_size]) for kkl in kl] return kl
'Single step.'
def single_step(self, obs, actions, prev_actions, greedy=False):
batch_size = tf.shape(obs[0])[0] prev_internal_state = tf.zeros([batch_size, self.internal_dim]) (output, next_state) = self.core(obs, prev_internal_state, prev_actions) (actions, logits, log_probs, entropy, self_kl) = self.sample_actions(output, actions=actions, greedy=greedy) return (next_state, tuple(actions), tuple(logits), tuple(log_probs), tuple(entropy), tuple(self_kl))
'Sample single step from policy.'
def sample_step(self, obs, prev_internal_state, prev_actions, greedy=False):
(next_state, sampled_actions, logits, log_probs, entropies, self_kls) = self.single_step(obs, None, prev_actions, greedy=greedy) return (next_state, sampled_actions)
'Calculate log-probs and other calculations on batch of episodes.'
def multi_step(self, all_obs, initial_state, all_actions):
batch_size = tf.shape(initial_state)[0] time_length = tf.shape(all_obs[0])[0] reshaped_obs = [] for (obs, (obs_dim, obs_type)) in zip(all_obs, self.env_spec.obs_dims_and_types): if self.env_spec.is_discrete(obs_type): reshaped_obs.append(tf.reshape(obs, [(time_length * batch_size)])) elif self.env_spec.is_box(obs_type): reshaped_obs.append(tf.reshape(obs, [(time_length * batch_size), obs_dim])) reshaped_act = [] reshaped_prev_act = [] for (i, (act_dim, act_type)) in enumerate(self.env_spec.act_dims_and_types): act = tf.concat([all_actions[i][1:], all_actions[i][0:1]], 0) prev_act = all_actions[i] if self.env_spec.is_discrete(act_type): reshaped_act.append(tf.reshape(act, [(time_length * batch_size)])) reshaped_prev_act.append(tf.reshape(prev_act, [(time_length * batch_size)])) elif self.env_spec.is_box(act_type): reshaped_act.append(tf.reshape(act, [(time_length * batch_size), act_dim])) reshaped_prev_act.append(tf.reshape(prev_act, [(time_length * batch_size), act_dim])) (internal_states, _, logits, log_probs, entropies, self_kls) = self.single_step(reshaped_obs, reshaped_act, reshaped_prev_act) internal_states = tf.reshape(internal_states, [time_length, batch_size, (-1)]) logits = [tf.reshape(logit, [time_length, batch_size, (-1)]) for logit in logits] log_probs = [tf.reshape(log_prob, [time_length, batch_size])[:(-1)] for log_prob in log_probs] entropies = [tf.reshape(ent, [time_length, batch_size])[:(-1)] for ent in entropies] self_kls = [tf.reshape(self_kl, [time_length, batch_size])[:(-1)] for self_kl in self_kls] return (internal_states, logits, log_probs, entropies, self_kls)
'Create the Tensorflow placeholders.'
def setup_placeholders(self):
self.avg_episode_reward = tf.placeholder(tf.float32, [], 'avg_episode_reward') self.internal_state = tf.placeholder(tf.float32, [None, self.policy.rnn_state_dim], 'internal_state') self.single_observation = [] for (i, (obs_dim, obs_type)) in enumerate(self.env_spec.obs_dims_and_types): if self.env_spec.is_discrete(obs_type): self.single_observation.append(tf.placeholder(tf.int32, [None], ('obs%d' % i))) elif self.env_spec.is_box(obs_type): self.single_observation.append(tf.placeholder(tf.float32, [None, obs_dim], ('obs%d' % i))) else: assert False self.single_action = [] for (i, (action_dim, action_type)) in enumerate(self.env_spec.act_dims_and_types): if self.env_spec.is_discrete(action_type): self.single_action.append(tf.placeholder(tf.int32, [None], ('act%d' % i))) elif self.env_spec.is_box(action_type): self.single_action.append(tf.placeholder(tf.float32, [None, action_dim], ('act%d' % i))) else: assert False self.observations = [] for (i, (obs_dim, obs_type)) in enumerate(self.env_spec.obs_dims_and_types): if self.env_spec.is_discrete(obs_type): self.observations.append(tf.placeholder(tf.int32, [None, None], ('all_obs%d' % i))) else: self.observations.append(tf.placeholder(tf.float32, [None, None, obs_dim], ('all_obs%d' % i))) self.actions = [] self.other_logits = [] for (i, (action_dim, action_type)) in enumerate(self.env_spec.act_dims_and_types): if self.env_spec.is_discrete(action_type): self.actions.append(tf.placeholder(tf.int32, [None, None], ('all_act%d' % i))) if self.env_spec.is_box(action_type): self.actions.append(tf.placeholder(tf.float32, [None, None, action_dim], ('all_act%d' % i))) self.other_logits.append(tf.placeholder(tf.float32, [None, None, None], ('other_logits%d' % i))) self.rewards = tf.placeholder(tf.float32, [None, None], 'rewards') self.terminated = tf.placeholder(tf.float32, [None], 'terminated') self.pads = tf.placeholder(tf.float32, [None, None], 'pads') self.prev_log_probs = tf.placeholder(tf.float32, [None, None], 'prev_log_probs')
'Setup Tensorflow Graph.'
def setup(self):
self.setup_placeholders() tf.summary.scalar('avg_episode_reward', self.avg_episode_reward) with tf.variable_scope('model', reuse=None): with tf.variable_scope('policy_net'): (self.policy_internal_states, self.logits, self.log_probs, self.entropies, self.self_kls) = self.policy.multi_step(self.observations, self.internal_state, self.actions) self.out_log_probs = sum(self.log_probs) self.kl = self.policy.calculate_kl(self.other_logits, self.logits) self.avg_kl = (tf.reduce_sum((sum(self.kl)[:(-1)] * (1 - self.pads))) / tf.reduce_sum((1 - self.pads))) with tf.variable_scope('value_net'): (self.values, self.regression_input, self.regression_weight) = self.baseline.get_values(self.observations, self.actions, self.policy_internal_states, self.logits) with tf.variable_scope('target_policy_net'): (self.target_policy_internal_states, self.target_logits, self.target_log_probs, _, _) = self.policy.multi_step(self.observations, self.internal_state, self.actions) with tf.variable_scope('target_value_net'): (self.target_values, _, _) = self.baseline.get_values(self.observations, self.actions, self.target_policy_internal_states, self.target_logits) all_vars = tf.trainable_variables() online_vars = [p for p in all_vars if (('/policy_net' in p.name) or ('/value_net' in p.name))] target_vars = [p for p in all_vars if (('target_policy_net' in p.name) or ('target_value_net' in p.name))] online_vars.sort(key=(lambda p: p.name)) target_vars.sort(key=(lambda p: p.name)) aa = self.target_network_lag self.copy_op = tf.group(*[target_p.assign(((aa * target_p) + ((1 - aa) * online_p))) for (online_p, target_p) in zip(online_vars, target_vars)]) (self.loss, self.raw_loss, self.regression_target, self.gradient_ops, self.summary) = self.objective.get(self.rewards, self.pads, self.values[:(-1), :], (self.values[(-1), :] * (1 - self.terminated)), self.log_probs, self.prev_log_probs, self.target_log_probs, self.entropies, self.logits) self.regression_target = tf.reshape(self.regression_target, [(-1)]) self.policy_vars = [v for v in tf.trainable_variables() if ('/policy_net' in v.name)] self.value_vars = [v for v in tf.trainable_variables() if ('/value_net' in v.name)] if (self.trust_region_policy_opt is not None): with tf.variable_scope('trust_region_policy', reuse=None): avg_self_kl = (tf.reduce_sum((sum(self.self_kls) * (1 - self.pads))) / tf.reduce_sum((1 - self.pads))) self.trust_region_policy_opt.setup(self.policy_vars, self.raw_loss, avg_self_kl, self.avg_kl) if (self.value_opt is not None): with tf.variable_scope('trust_region_value', reuse=None): self.value_opt.setup(self.value_vars, tf.reshape(self.values[:(-1), :], [(-1)]), self.regression_target, tf.reshape(self.pads, [(-1)]), self.regression_input, self.regression_weight) with tf.variable_scope('model', reuse=True): scope = ('target_policy_net' if (self.sample_from == 'target') else 'policy_net') with tf.variable_scope(scope): (self.next_internal_state, self.sampled_actions) = self.policy.sample_step(self.single_observation, self.internal_state, self.single_action) (self.greedy_next_internal_state, self.greedy_sampled_actions) = self.policy.sample_step(self.single_observation, self.internal_state, self.single_action, greedy=True)
'Sample batch of steps from policy.'
def sample_step(self, sess, single_observation, internal_state, single_action, greedy=False):
if greedy: outputs = [self.greedy_next_internal_state, self.greedy_sampled_actions] else: outputs = [self.next_internal_state, self.sampled_actions] feed_dict = {self.internal_state: internal_state} for (action_place, action) in zip(self.single_action, single_action): feed_dict[action_place] = action for (obs_place, obs) in zip(self.single_observation, single_observation): feed_dict[obs_place] = obs return sess.run(outputs, feed_dict=feed_dict)
'Train network using standard gradient descent.'
def train_step(self, sess, observations, internal_state, actions, rewards, terminated, pads, avg_episode_reward=0):
outputs = [self.raw_loss, self.gradient_ops, self.summary] feed_dict = {self.internal_state: internal_state, self.rewards: rewards, self.terminated: terminated, self.pads: pads, self.avg_episode_reward: avg_episode_reward} for (action_place, action) in zip(self.actions, actions): feed_dict[action_place] = action for (obs_place, obs) in zip(self.observations, observations): feed_dict[obs_place] = obs return sess.run(outputs, feed_dict=feed_dict)
'Train policy using trust region step.'
def trust_region_step(self, sess, observations, internal_state, actions, rewards, terminated, pads, avg_episode_reward=0):
feed_dict = {self.internal_state: internal_state, self.rewards: rewards, self.terminated: terminated, self.pads: pads, self.avg_episode_reward: avg_episode_reward} for (action_place, action) in zip(self.actions, actions): feed_dict[action_place] = action for (obs_place, obs) in zip(self.observations, observations): feed_dict[obs_place] = obs (prev_log_probs, prev_logits) = sess.run([self.out_log_probs, self.logits], feed_dict=feed_dict) feed_dict[self.prev_log_probs] = prev_log_probs for (other_logit, prev_logit) in zip(self.other_logits, prev_logits): feed_dict[other_logit] = prev_logit self.trust_region_policy_opt.optimize(sess, feed_dict) ret = sess.run([self.raw_loss, self.summary], feed_dict=feed_dict) ret = [ret[0], None, ret[1]] return ret
'Train value network using value-specific optimizer.'
def fit_values(self, sess, observations, internal_state, actions, rewards, terminated, pads):
feed_dict = {self.internal_state: internal_state, self.rewards: rewards, self.terminated: terminated, self.pads: pads} for (action_place, action) in zip(self.actions, actions): feed_dict[action_place] = action for (obs_place, obs) in zip(self.observations, observations): feed_dict[obs_place] = obs if (self.value_opt is None): raise ValueError('Specific value optimizer does not exist') self.value_opt.optimize(sess, feed_dict)
'Get controller.'
def get_controller(self):
cls = controller.Controller return cls(self.env, self.env_spec, self.internal_dim, use_online_batch=self.use_online_batch, batch_by_steps=self.batch_by_steps, unify_episodes=self.unify_episodes, replay_batch_size=self.replay_batch_size, max_step=self.max_step, cutoff_agent=self.cutoff_agent, save_trajectories_file=self.save_trajectories_file, use_trust_region=self.trust_region_p, use_value_opt=(self.value_opt is not None), update_eps_lambda=self.update_eps_lambda, prioritize_by=self.prioritize_by, get_model=self.get_model, get_replay_buffer=self.get_replay_buffer, get_buffer_seeds=self.get_buffer_seeds)
'Run training.'
def run(self):
is_chief = ((FLAGS.task_id == 0) or (not FLAGS.supervisor)) sv = None def init_fn(sess, saver): ckpt = None if (FLAGS.save_dir and (sv is None)): load_dir = FLAGS.save_dir ckpt = tf.train.get_checkpoint_state(load_dir) if (ckpt and ckpt.model_checkpoint_path): logging.info('restoring from %s', ckpt.model_checkpoint_path) saver.restore(sess, ckpt.model_checkpoint_path) elif FLAGS.load_path: logging.info('restoring from %s', FLAGS.load_path) with gfile.AsUser('distbelief-brain-gpu'): saver.restore(sess, FLAGS.load_path) if FLAGS.supervisor: with tf.device(tf.ReplicaDeviceSetter(FLAGS.ps_tasks, merge_devices=True)): self.global_step = tf.contrib.framework.get_or_create_global_step() tf.set_random_seed(FLAGS.tf_seed) self.controller = self.get_controller() self.model = self.controller.model self.controller.setup() saver = tf.train.Saver(max_to_keep=10) step = self.model.global_step sv = tf.Supervisor(logdir=FLAGS.save_dir, is_chief=is_chief, saver=saver, save_model_secs=600, summary_op=None, save_summaries_secs=60, global_step=step, init_fn=(lambda sess: init_fn(sess, saver))) sess = sv.PrepareSession(FLAGS.master) else: tf.set_random_seed(FLAGS.tf_seed) self.global_step = tf.contrib.framework.get_or_create_global_step() self.controller = self.get_controller() self.model = self.controller.model self.controller.setup() saver = tf.train.Saver(max_to_keep=10) sess = tf.Session() sess.run(tf.initialize_all_variables()) init_fn(sess, saver) self.sv = sv self.sess = sess logging.info('hparams:\n%s', self.hparams_string()) model_step = sess.run(self.model.global_step) if (model_step >= self.num_steps): logging.info('training has reached final step') return losses = [] rewards = [] all_ep_rewards = [] for step in xrange((1 + self.num_steps)): if ((sv is not None) and sv.ShouldStop()): logging.info('stopping supervisor') break self.do_before_step(step) (loss, summary, total_rewards, episode_rewards) = self.controller.train(sess) losses.append(loss) rewards.append(total_rewards) all_ep_rewards.extend(episode_rewards) if ((random.random() < 1) and is_chief and sv and sv._summary_writer): sv.summary_computed(sess, summary) model_step = sess.run(self.model.global_step) if (is_chief and ((step % self.validation_frequency) == 0)): logging.info('at training step %d, model step %d: avg loss %f, avg reward %f, episode rewards: %f', step, model_step, np.mean(losses), np.mean(rewards), np.mean(all_ep_rewards)) losses = [] rewards = [] all_ep_rewards = [] if (model_step >= self.num_steps): logging.info('training has reached final step') break if (is_chief and (sv is not None)): logging.info('saving final model to %s', sv.save_path) sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
'Get inputs to network as single tensor.'
def get_inputs(self, time_step, obs, prev_actions, internal_policy_states):
inputs = [tf.ones_like(time_step)] input_dim = 1 if (not self.input_policy_state): for (i, (obs_dim, obs_type)) in enumerate(self.env_spec.obs_dims_and_types): if self.env_spec.is_discrete(obs_type): inputs.append(tf.one_hot(obs[i], obs_dim)) input_dim += obs_dim elif self.env_spec.is_box(obs_type): cur_obs = obs[i] inputs.append(cur_obs) inputs.append((cur_obs ** 2)) input_dim += (obs_dim * 2) else: assert False if self.input_prev_actions: for (i, (act_dim, act_type)) in enumerate(self.env_spec.act_dims_and_types): if self.env_spec.is_discrete(act_type): inputs.append(tf.one_hot(prev_actions[i], act_dim)) input_dim += act_dim elif self.env_spec.is_box(act_type): inputs.append(prev_actions[i]) input_dim += act_dim else: assert False if self.input_policy_state: inputs.append(internal_policy_states) input_dim += self.internal_policy_dim if self.input_time_step: scaled_time = (0.01 * time_step) inputs.extend([scaled_time, (scaled_time ** 2), (scaled_time ** 3)]) input_dim += 3 return (input_dim, tf.concat(inputs, 1))
'Reshape inputs from [time_length, batch_size, ...] to [time_length * batch_size, ...]. This allows for computing the value estimate in one go.'
def reshape_batched_inputs(self, all_obs, all_actions, internal_policy_states, policy_logits):
batch_size = tf.shape(all_obs[0])[1] time_length = tf.shape(all_obs[0])[0] reshaped_obs = [] for (obs, (obs_dim, obs_type)) in zip(all_obs, self.env_spec.obs_dims_and_types): if self.env_spec.is_discrete(obs_type): reshaped_obs.append(tf.reshape(obs, [(time_length * batch_size)])) elif self.env_spec.is_box(obs_type): reshaped_obs.append(tf.reshape(obs, [(time_length * batch_size), obs_dim])) reshaped_prev_act = [] reshaped_policy_logits = [] for (i, (act_dim, act_type)) in enumerate(self.env_spec.act_dims_and_types): prev_act = all_actions[i] if self.env_spec.is_discrete(act_type): reshaped_prev_act.append(tf.reshape(prev_act, [(time_length * batch_size)])) elif self.env_spec.is_box(act_type): reshaped_prev_act.append(tf.reshape(prev_act, [(time_length * batch_size), act_dim])) reshaped_policy_logits.append(tf.reshape(policy_logits[i], [(time_length * batch_size), (-1)])) reshaped_internal_policy_states = tf.reshape(internal_policy_states, [(time_length * batch_size), self.internal_policy_dim]) time_step = (float(self.input_time_step) * tf.expand_dims(tf.to_float((tf.range((time_length * batch_size)) / batch_size)), (-1))) return (time_step, reshaped_obs, reshaped_prev_act, reshaped_internal_policy_states, reshaped_policy_logits)
'Get value estimates given input.'
def get_values(self, all_obs, all_actions, internal_policy_states, policy_logits):
batch_size = tf.shape(all_obs[0])[1] time_length = tf.shape(all_obs[0])[0] (time_step, reshaped_obs, reshaped_prev_act, reshaped_internal_policy_states, reshaped_policy_logits) = self.reshape_batched_inputs(all_obs, all_actions, internal_policy_states, policy_logits) (input_dim, inputs) = self.get_inputs(time_step, reshaped_obs, reshaped_prev_act, reshaped_internal_policy_states) for depth in xrange(self.n_hidden_layers): with tf.variable_scope(('value_layer%d' % depth)): w = tf.get_variable('w', [input_dim, self.hidden_dim]) inputs = tf.nn.tanh(tf.matmul(inputs, w)) input_dim = self.hidden_dim w_v = tf.get_variable('w_v', [input_dim, 1], initializer=self.matrix_init) values = tf.matmul(inputs, w_v) values = tf.reshape(values, [time_length, batch_size]) inputs = inputs[:(- batch_size)] return (values, inputs, w_v)
'Sample episodes from environment using model.'
def _sample_episodes(self, sess, greedy=False):
obs_after_reset = self.env.reset_if(self.start_episode) for (i, obs) in enumerate(obs_after_reset): if (obs is not None): self.step_count[i] = 0 self.internal_state[i] = self.initial_internal_state() for j in xrange(len(self.env_spec.obs_dims)): self.last_obs[j][i] = obs[j] for j in xrange(len(self.env_spec.act_dims)): self.last_act[j][i] = (-1) self.last_pad[i] = 0 if self.unify_episodes: assert (len(obs_after_reset) == 1) new_ep = (obs_after_reset[0] is not None) else: new_ep = True self.start_id = (0 if new_ep else len(self.all_obs[:])) initial_state = self.internal_state all_obs = ([] if new_ep else self.all_obs[:]) all_act = ([self.last_act] if new_ep else self.all_act[:]) all_pad = ([] if new_ep else self.all_pad[:]) rewards = ([] if new_ep else self.rewards[:]) step = 0 while (not self.env.all_done()): self.step_count += (1 - np.array(self.env.dones)) (next_internal_state, sampled_actions) = self.model.sample_step(sess, self.last_obs, self.internal_state, self.last_act, greedy=greedy) env_actions = self.env_spec.convert_actions_to_env(sampled_actions) (next_obs, reward, next_dones, _) = self.env.step(env_actions) all_obs.append(self.last_obs) all_act.append(sampled_actions) all_pad.append(self.last_pad) rewards.append(reward) self.internal_state = next_internal_state self.last_obs = next_obs self.last_act = sampled_actions self.last_pad = np.array(next_dones).astype('float32') step += 1 if (self.max_step and (step >= self.max_step)): break self.all_obs = all_obs[:] self.all_act = all_act[:] self.all_pad = all_pad[:] self.rewards = rewards[:] all_obs.append(self.last_obs) return (initial_state, all_obs, all_act, rewards, all_pad)
'Sample steps from the environment until we have enough for a batch.'
def sample_episodes(self, sess):
if self.unify_episodes: self.all_new_ep = self.start_episode[0] episodes = [] total_steps = 0 while (total_steps < (self.max_step * len(self.env))): (initial_state, observations, actions, rewards, pads) = self._sample_episodes(sess) observations = zip(*observations) actions = zip(*actions) terminated = np.array(self.env.dones) self.total_rewards = np.sum((np.array(rewards[self.start_id:]) * (1 - np.array(pads[self.start_id:]))), axis=0) self.episode_running_rewards *= (1 - self.start_episode) self.episode_running_lengths *= (1 - self.start_episode) self.episode_running_rewards += self.total_rewards self.episode_running_lengths += np.sum((1 - np.array(pads[self.start_id:])), axis=0) episodes.extend(self.convert_from_batched_episodes(initial_state, observations, actions, rewards, terminated, pads)) total_steps += np.sum((1 - np.array(pads))) self.start_episode = np.logical_or(terminated, (self.step_count >= self.cutoff_agent)) episode_rewards = self.episode_running_rewards[self.start_episode].tolist() self.episode_rewards.extend(episode_rewards) self.episode_lengths.extend(self.episode_running_lengths[self.start_episode].tolist()) self.episode_rewards = self.episode_rewards[(-100):] self.episode_lengths = self.episode_lengths[(-100):] if ((self.save_trajectories_file is not None) and ((self.best_batch_rewards is None) or (np.mean(self.total_rewards) > self.best_batch_rewards))): self.best_batch_rewards = np.mean(self.total_rewards) my_episodes = self.convert_from_batched_episodes(initial_state, observations, actions, rewards, terminated, pads) with gfile.GFile(self.save_trajectories_file, 'w') as f: pickle.dump(my_episodes, f) if (not self.batch_by_steps): return (initial_state, observations, actions, rewards, terminated, pads) return self.convert_to_batched_episodes(episodes)
'Train model using batch.'
def _train(self, sess, observations, initial_state, actions, rewards, terminated, pads):
if self.use_trust_region: (loss, _, summary) = self.model.trust_region_step(sess, observations, initial_state, actions, rewards, terminated, pads, avg_episode_reward=np.mean(self.episode_rewards)) else: (loss, _, summary) = self.model.train_step(sess, observations, initial_state, actions, rewards, terminated, pads, avg_episode_reward=np.mean(self.episode_rewards)) if self.use_value_opt: self.model.fit_values(sess, observations, initial_state, actions, rewards, terminated, pads) return (loss, summary)
'Sample some episodes and train on some episodes.'
def train(self, sess):
cur_step = sess.run(self.model.inc_global_step) self.cur_step = cur_step if (self.cur_step == 0): for _ in xrange(100): sess.run(self.model.copy_op) sess.run(self.model.copy_op) (initial_state, observations, actions, rewards, terminated, pads) = self.sample_episodes(sess) self.add_to_replay_buffer(initial_state, observations, actions, rewards, terminated, pads) (loss, summary) = (0, None) if self.use_online_batch: (loss, summary) = self._train(sess, observations, initial_state, actions, rewards, terminated, pads) if self.update_eps_lambda: episode_rewards = np.array(self.episode_rewards) episode_lengths = np.array(self.episode_lengths) eps_lambda = find_best_eps_lambda(episode_rewards, episode_lengths) sess.run(self.model.objective.assign_eps_lambda, feed_dict={self.model.objective.new_eps_lambda: eps_lambda}) (replay_batch, replay_probs) = self.get_from_replay_buffer(self.replay_batch_size) if replay_batch: (initial_state, observations, actions, rewards, terminated, pads) = replay_batch (loss, summary) = self._train(sess, observations, initial_state, actions, rewards, terminated, pads) return (loss, summary, self.total_rewards, self.episode_rewards)
'Use greedy sampling.'
def eval(self, sess):
(initial_state, observations, actions, rewards, pads) = self._sample_episodes(sess, greedy=True) total_rewards = np.sum((np.array(rewards) * (1 - np.array(pads))), axis=0) return np.mean(total_rewards)
'Convert time-major batch of episodes to batch-major list of episodes.'
def convert_from_batched_episodes(self, initial_state, observations, actions, rewards, terminated, pads):
rewards = np.array(rewards) pads = np.array(pads) observations = [np.array(obs) for obs in observations] actions = [np.array(act) for act in actions] total_rewards = np.sum((rewards * (1 - pads)), axis=0) total_length = np.sum((1 - pads), axis=0).astype('int32') episodes = [] num_episodes = rewards.shape[1] for i in xrange(num_episodes): length = total_length[i] ep_initial = initial_state[i] ep_obs = [obs[:length, i, ...] for obs in observations] ep_act = [act[:(length + 1), i, ...] for act in actions] ep_rewards = rewards[:length, i] episodes.append([ep_initial, ep_obs, ep_act, ep_rewards, terminated[i]]) return episodes
'Convert batch-major list of episodes to time-major batch of episodes.'
def convert_to_batched_episodes(self, episodes, max_length=None):
lengths = [len(ep[(-2)]) for ep in episodes] max_length = (max_length or max(lengths)) new_episodes = [] for (ep, length) in zip(episodes, lengths): (initial, observations, actions, rewards, terminated) = ep observations = [np.resize(obs, ([(max_length + 1)] + list(obs.shape)[1:])) for obs in observations] actions = [np.resize(act, ([(max_length + 1)] + list(act.shape)[1:])) for act in actions] pads = np.array((([0] * length) + ([1] * (max_length - length)))) rewards = (np.resize(rewards, [max_length]) * (1 - pads)) new_episodes.append([initial, observations, actions, rewards, terminated, pads]) (initial, observations, actions, rewards, terminated, pads) = zip(*new_episodes) observations = [np.swapaxes(obs, 0, 1) for obs in zip(*observations)] actions = [np.swapaxes(act, 0, 1) for act in zip(*actions)] rewards = np.transpose(rewards) pads = np.transpose(pads) return (initial, observations, actions, rewards, terminated, pads)
'Add batch of episodes to replay buffer.'
def add_to_replay_buffer(self, initial_state, observations, actions, rewards, terminated, pads):
if (self.replay_buffer is None): return rewards = np.array(rewards) pads = np.array(pads) total_rewards = np.sum((rewards * (1 - pads)), axis=0) episodes = self.convert_from_batched_episodes(initial_state, observations, actions, rewards, terminated, pads) priorities = (total_rewards if (self.prioritize_by == 'reward') else self.cur_step) if ((not self.unify_episodes) or self.all_new_ep): self.last_idxs = self.replay_buffer.add(episodes, priorities) else: self.replay_buffer.add(episodes[:1], priorities, self.last_idxs[(-1):]) if (len(episodes) > 1): self.replay_buffer.add(episodes[1:], priorities)
'Sample a batch of episodes from the replay buffer.'
def get_from_replay_buffer(self, batch_size):
if ((self.replay_buffer is None) or (len(self.replay_buffer) < (1 * batch_size))): return (None, None) desired_count = (batch_size * self.max_step) while True: if (batch_size > len(self.replay_buffer)): batch_size = len(self.replay_buffer) (episodes, probs) = self.replay_buffer.get_batch(batch_size) count = sum((len(ep[(-2)]) for ep in episodes)) if ((count >= desired_count) or (not self.batch_by_steps)): break if (batch_size == len(self.replay_buffer)): return (None, None) batch_size *= 1.2 return (self.convert_to_batched_episodes(episodes), probs)
'Seed the replay buffer with some episodes.'
def seed_replay_buffer(self, episodes):
if (self.replay_buffer is None): return for i in xrange(len(episodes)): episodes[i] = ([self.initial_internal_state()] + episodes[i]) self.replay_buffer.seed_buffer(episodes)
'Update the dictionary with a comma separated list.'
def update_config(self, in_string):
pairs = in_string.split(',') pairs = [pair.split('=') for pair in pairs] for (key, val) in pairs: self.dict_[key] = type(self.dict_[key])(val) self.__dict__.update(self.dict_) return self
'Evaluate bits/dim.'
def eval_epoch(self, hps):
n_eval_dict = {'imnet': 50000, 'lsun': 300, 'celeba': 19962, 'svhn': 26032} if (FLAGS.eval_set_size == 0): num_examples_eval = n_eval_dict[FLAGS.dataset] else: num_examples_eval = FLAGS.eval_set_size n_epoch = (num_examples_eval / hps.batch_size) eval_costs = [] bar_len = 70 for epoch_idx in xrange(n_epoch): n_equal = (((epoch_idx * bar_len) * 1.0) / n_epoch) n_equal = numpy.ceil(n_equal) n_equal = int(n_equal) n_dash = (bar_len - n_equal) progress_bar = ((('[' + ('=' * n_equal)) + ('-' * n_dash)) + ']\r') print progress_bar, cost = self.bit_per_dim.eval() eval_costs.append(cost) print '' return float(numpy.mean(eval_costs))
'Initializes the vectors from a text vocabulary and binary data.'
def __init__(self, vocab_filename, rows_filename, cols_filename=None):
with open(vocab_filename, 'r') as lines: self.vocab = [line.split()[0] for line in lines] self.word_to_idx = {word: idx for (idx, word) in enumerate(self.vocab)} n = len(self.vocab) with open(rows_filename, 'r') as rows_fh: rows_fh.seek(0, os.SEEK_END) size = rows_fh.tell() if ((size % (4 * n)) != 0): raise IOError(('unexpected file size for binary vector file %s' % rows_filename)) dim = (size / (4 * n)) rows_mm = mmap.mmap(rows_fh.fileno(), 0, prot=mmap.PROT_READ) rows = np.matrix(np.frombuffer(rows_mm, dtype=np.float32).reshape(n, dim)) if cols_filename: with open(cols_filename, 'r') as cols_fh: cols_mm = mmap.mmap(cols_fh.fileno(), 0, prot=mmap.PROT_READ) cols_fh.seek(0, os.SEEK_END) if (cols_fh.tell() != size): raise IOError('row and column vector files have different sizes') cols = np.matrix(np.frombuffer(cols_mm, dtype=np.float32).reshape(n, dim)) rows += cols cols_mm.close() self.vecs = (rows / np.linalg.norm(rows, axis=1).reshape(n, 1)) rows_mm.close()
'Computes the similarity of two tokens.'
def similarity(self, word1, word2):
idx1 = self.word_to_idx.get(word1) idx2 = self.word_to_idx.get(word2) if ((not idx1) or (not idx2)): return None return float((self.vecs[idx1] * self.vecs[idx2].transpose()))
'Returns the nearest neighbors to the query (a word or vector).'
def neighbors(self, query):
if isinstance(query, basestring): idx = self.word_to_idx.get(query) if (idx is None): return None query = self.vecs[idx] neighbors = (self.vecs * query.transpose()) return sorted(zip(self.vocab, neighbors.flat), key=(lambda kv: kv[1]), reverse=True)
'Returns the embedding for a token, or None if no embedding exists.'
def lookup(self, word):
idx = self.word_to_idx.get(word) return (None if (idx is None) else self.vecs[idx])
'Creates a new Swivel model.'
def __init__(self, input_base_path, hparams):
(self.row_ix_to_word, self.row_word_to_ix) = self._read_vocab(os.path.join(input_base_path, 'row_vocab.txt')) (self.col_ix_to_word, self.col_word_to_ix) = self._read_vocab(os.path.join(input_base_path, 'col_vocab.txt')) row_sums = self._read_marginals_file(os.path.join(input_base_path, 'row_sums.txt')) col_sums = self._read_marginals_file(os.path.join(input_base_path, 'col_sums.txt')) count_matrix_files = glob.glob(os.path.join(input_base_path, 'shard-*.pb')) (global_rows, global_cols, counts) = self._count_matrix_input(count_matrix_files, hparams.submatrix_rows, hparams.submatrix_cols) sigma = (1.0 / np.sqrt(hparams.dim)) self.row_embedding = tf.get_variable('row_embedding', shape=[len(row_sums), hparams.dim], initializer=tf.random_normal_initializer(0, sigma), dtype=tf.float32) self.col_embedding = tf.get_variable('col_embedding', shape=[len(col_sums), hparams.dim], initializer=tf.random_normal_initializer(0, sigma), dtype=tf.float32) matrix_log_sum = np.log((np.sum(row_sums) + 1)) row_bias = tf.constant([np.log((x + 1)) for x in row_sums], dtype=tf.float32) col_bias = tf.constant([np.log((x + 1)) for x in col_sums], dtype=tf.float32) selected_rows = tf.nn.embedding_lookup(self.row_embedding, global_rows) selected_cols = tf.nn.embedding_lookup(self.col_embedding, global_cols) selected_row_bias = tf.gather(row_bias, global_rows) selected_col_bias = tf.gather(col_bias, global_cols) predictions = tf.matmul(selected_rows, selected_cols, transpose_b=True) count_is_nonzero = tf.to_float(tf.cast(counts, tf.bool)) count_is_zero = (1 - count_is_nonzero) objectives = (count_is_nonzero * tf.log((counts + 1e-30))) objectives -= tf.reshape(selected_row_bias, [(-1), 1]) objectives -= selected_col_bias objectives += matrix_log_sum err = (predictions - objectives) l2_confidence = (hparams.confidence_base + (hparams.confidence_scale * tf.pow(counts, hparams.confidence_exponent))) loss_multiplier = (1 / np.sqrt((hparams.submatrix_rows * hparams.submatrix_cols))) l2_loss = (loss_multiplier * tf.reduce_sum(((0.5 * l2_confidence) * tf.square(err)))) sigmoid_loss = (loss_multiplier * tf.reduce_sum((tf.nn.softplus(err) * count_is_zero))) self.loss_op = (l2_loss + sigmoid_loss) if (hparams.optimizer == 'adagrad'): opt = tf.train.AdagradOptimizer(hparams.learning_rate) elif (hparams.optimizer == 'rmsprop'): opt = tf.train.RMSPropOptimizer(hparams.learning_rate, hparams.momentum) else: raise ValueError(('unknown optimizer "%s"' % hparams.optimizer)) self.global_step = tf.get_variable('global_step', initializer=0, trainable=False) self.train_op = opt.minimize(self.loss_op, global_step=self.global_step) self.steps_per_epoch = ((len(row_sums) / hparams.submatrix_rows) * (len(col_sums) / hparams.submatrix_cols))
'Reads the vocabulary file.'
def _read_vocab(self, filename):
with open(filename) as lines: ix_to_word = [line.strip() for line in lines] word_to_ix = {word: ix for (ix, word) in enumerate(ix_to_word)} return (ix_to_word, word_to_ix)
'Reads text file with one number per line to an array.'
def _read_marginals_file(self, filename):
with open(filename) as lines: return [float(line.strip()) for line in lines]
'Creates ops that read submatrix shards from disk.'
def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols):
random.shuffle(filenames) filename_queue = tf.train.string_input_producer(filenames) reader = tf.WholeFileReader() (_, serialized_example) = reader.read(filename_queue) features = tf.parse_single_example(serialized_example, features={'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64), 'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64), 'sparse_local_row': tf.VarLenFeature(dtype=tf.int64), 'sparse_local_col': tf.VarLenFeature(dtype=tf.int64), 'sparse_value': tf.VarLenFeature(dtype=tf.float32)}) global_row = features['global_row'] global_col = features['global_col'] sparse_local_row = features['sparse_local_row'].values sparse_local_col = features['sparse_local_col'].values sparse_count = features['sparse_value'].values sparse_indices = tf.concat(axis=1, values=[tf.expand_dims(sparse_local_row, 1), tf.expand_dims(sparse_local_col, 1)]) count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols], sparse_count) return (global_row, global_col, count)
'Returns an op that runs an eval on a word similarity dataset. The eval dataset is assumed to be tab-separated, one scored word pair per line. The resulting value is Spearman\'s rho of the human judgements with the cosine similarity of the word embeddings. Args: filename: the filename containing the word similarity data. Returns: An operator that will compute Spearman\'s rho of the current row embeddings.'
def wordsim_eval_op(self, filename):
with open(filename, 'r') as fh: tuples = (line.strip().split(' DCTB ') for line in fh.read().splitlines()) (word1s, word2s, sims) = zip(*tuples) actuals = map(float, sims) v1s_t = tf.nn.embedding_lookup(self.row_embedding, [self.row_word_to_ix.get(w, 0) for w in word1s]) v2s_t = tf.nn.embedding_lookup(self.row_embedding, [self.row_word_to_ix.get(w, 0) for w in word2s]) preds_t = tf.reduce_sum((tf.nn.l2_normalize(v1s_t, dim=1) * tf.nn.l2_normalize(v2s_t, dim=1)), axis=1) def _op(preds): (rho, _) = scipy.stats.spearmanr(preds, actuals) return rho return tf.py_func(_op, [preds_t], tf.float64)
'Returns an op that runs an eval on an analogy dataset. The eval dataset is assumed to be tab-separated, with four tokens per line. The first three tokens are query terms, the last is the expected answer. For each line (e.g., "man king woman queen"), the vectors corresponding to the query terms are added ("king - man + woman") to produce a query vector. If the expected answer\'s vector is the nearest neighbor to the query vector (not counting any of the query vectors themselves), then the line is scored as correct. The reported accuracy is the number of correct rows divided by the total number of rows. Missing terms are replaced with an arbitrary vector and will almost certainly result in incorrect answers. Note that the results are approximate: for efficiency\'s sake, only the first `max_vocab_size` terms are included in the nearest neighbor search. Args: filename: the filename containing the analogy data. max_vocab_size: the maximum number of tokens to include in the nearest neighbor search. By default, 20000. Returns: The accuracy on the analogy task.'
def analogy_eval_op(self, filename, max_vocab_size=20000):
analogy_ixs = [] with open(filename, 'r') as lines: for line in lines: parts = line.strip().split(' DCTB ') if (len(parts) == 4): analogy_ixs.append([self.row_word_to_ix.get(w, 0) for w in parts]) (ix1s, ix2s, ix3s, _) = zip(*analogy_ixs) (v1s_t, v2s_t, v3s_t) = (tf.nn.l2_normalize(tf.nn.embedding_lookup(self.row_embedding, ixs), dim=1) for ixs in (ix1s, ix2s, ix3s)) preds_t = ((v2s_t - v1s_t) + v3s_t) sims_t = tf.matmul(preds_t, tf.nn.l2_normalize(self.row_embedding[:max_vocab_size], dim=1), transpose_b=True) (_, preds_ixs_t) = tf.nn.top_k(sims_t, 4) def _op(preds_ixs): (correct, total) = (0, 0) for (pred_ixs, actual_ixs) in itertools.izip(preds_ixs, analogy_ixs): pred_ixs = [ix for ix in pred_ixs if (ix not in actual_ixs[:3])] correct += (pred_ixs[0] == actual_ixs[3]) total += 1 return (correct / total) return tf.py_func(_op, [preds_ixs_t], tf.float64)
'Writes tensor to output_path as tsv.'
def _write_tensor(self, vocab_path, output_path, session, embedding):
embeddings = session.run(embedding) with open(output_path, 'w') as out_f: with open(vocab_path) as vocab_f: for (index, word) in enumerate(vocab_f): word = word.strip() embedding = embeddings[index] print(' DCTB '.join(([word.strip()] + [str(x) for x in embedding])), file=out_f)
'Writes row and column embeddings disk.'
def write_embeddings(self, config, session):
self._write_tensor(os.path.join(config.input_base_path, 'row_vocab.txt'), os.path.join(config.output_base_path, 'row_embedding.tsv'), session, self.row_embedding) self._write_tensor(os.path.join(config.input_base_path, 'col_vocab.txt'), os.path.join(config.output_base_path, 'col_embedding.tsv'), session, self.col_embedding)
'An alternative implemenation for the encoding coordinates. Args: net: a tensor of shape=[batch_size, height, width, num_features] Returns: a list of tensors with encoded image coordinates in them.'
def encode_coordinates_alt(self, net):
(batch_size, h, w, _) = net.shape.as_list() h_loc = [tf.tile(tf.reshape(tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=h), [h, 1]), [1, w]) for i in xrange(h)] h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2) w_loc = [tf.tile(tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=w), [h, 1]) for i in xrange(w)] w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2) loc = tf.concat([h_loc, w_loc], 2) loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1]) return tf.concat([net, loc], 3)
'Stores argument in member variable for further use. Args: net: A tensor with shape [batch_size, num_features, feature_size] which contains some extracted image features. labels_one_hot: An optional (can be None) ground truth labels for the input features. Is a tensor with shape [batch_size, seq_length, num_char_classes] model_params: A namedtuple with model parameters (model.ModelParams). method_params: A SequenceLayerParams instance.'
def __init__(self, net, labels_one_hot, model_params, method_params):
self._params = model_params self._mparams = method_params self._net = net self._labels_one_hot = labels_one_hot self._batch_size = net.get_shape().dims[0].value self._char_logits = {} regularizer = slim.l2_regularizer(self._mparams.weight_decay) self._softmax_w = slim.model_variable('softmax_w', [self._mparams.num_lstm_units, self._params.num_char_classes], initializer=orthogonal_initializer, regularizer=regularizer) self._softmax_b = slim.model_variable('softmax_b', [self._params.num_char_classes], initializer=tf.zeros_initializer(), regularizer=regularizer)
'Returns a sample to be used to predict a character during training. This function is used as a loop_function for an RNN decoder. Args: prev: output tensor from previous step of the RNN. A tensor with shape: [batch_size, num_char_classes]. i: index of a character in the output sequence. Returns: A tensor with shape [batch_size, ?] - depth depends on implementation details.'
@abc.abstractmethod def get_train_input(self, prev, i):
pass
'Returns a sample to be used to predict a character during inference. This function is used as a loop_function for an RNN decoder. Args: prev: output tensor from previous step of the RNN. A tensor with shape: [batch_size, num_char_classes]. i: index of a character in the output sequence. Returns: A tensor with shape [batch_size, ?] - depth depends on implementation details.'
@abc.abstractmethod def get_eval_input(self, prev, i):
raise AssertionError('Not implemented')
'Unrolls an RNN cell for all inputs. This is a placeholder to call some RNN decoder. It has a similar to tf.seq2seq.rnn_decode interface. Args: decoder_inputs: A list of 2D Tensors* [batch_size x input_size]. In fact, most of existing decoders in presence of a loop_function use only the first element to determine batch_size and length of the list to determine number of steps. initial_state: 2D Tensor with shape [batch_size x cell.state_size]. loop_function: function will be applied to the i-th output in order to generate the i+1-st input (see self.get_input). cell: rnn_cell.RNNCell defining the cell function and size. Returns: A tuple of the form (outputs, state), where: outputs: A list of character logits of the same length as decoder_inputs of 2D Tensors with shape [batch_size x num_characters]. state: The state of each cell at the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size].'
@abc.abstractmethod def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
pass
'Returns True if the layer is created for training stage.'
def is_training(self):
return (self._labels_one_hot is not None)
'Creates logits for a character if required. Args: inputs: A tensor with shape [batch_size, ?] (depth is implementation dependent). char_index: A integer index of a character in the output sequence. Returns: A tensor with shape [batch_size, num_char_classes]'
def char_logit(self, inputs, char_index):
if (char_index not in self._char_logits): self._char_logits[char_index] = tf.nn.xw_plus_b(inputs, self._softmax_w, self._softmax_b) return self._char_logits[char_index]
'Creates one hot encoding for a logit of a character. Args: logit: A tensor with shape [batch_size, num_char_classes]. Returns: A tensor with shape [batch_size, num_char_classes]'
def char_one_hot(self, logit):
prediction = tf.argmax(logit, dimension=1) return slim.one_hot_encoding(prediction, self._params.num_char_classes)
'A wrapper for get_train_input and get_eval_input. Args: prev: output tensor from previous step of the RNN. A tensor with shape: [batch_size, num_char_classes]. i: index of a character in the output sequence. Returns: A tensor with shape [batch_size, ?] - depth depends on implementation details.'
def get_input(self, prev, i):
if self.is_training(): return self.get_train_input(prev, i) else: return self.get_eval_input(prev, i)
'Creates character sequence logits for a net specified in the constructor. A "main" method for the sequence layer which glues together all pieces. Returns: A tensor with shape [batch_size, seq_length, num_char_classes].'
def create_logits(self):
with tf.variable_scope('LSTM'): first_label = self.get_input(prev=None, i=0) decoder_inputs = ([first_label] + ([None] * (self._params.seq_length - 1))) lstm_cell = tf.contrib.rnn.LSTMCell(self._mparams.num_lstm_units, use_peepholes=False, cell_clip=self._mparams.lstm_state_clip_value, state_is_tuple=True, initializer=orthogonal_initializer) (lstm_outputs, _) = self.unroll_cell(decoder_inputs=decoder_inputs, initial_state=lstm_cell.zero_state(self._batch_size, tf.float32), loop_function=self.get_input, cell=lstm_cell) with tf.variable_scope('logits'): logits_list = [tf.expand_dims(self.char_logit(logit, i), dim=1) for (i, logit) in enumerate(lstm_outputs)] return tf.concat(logits_list, 1)
'Returns a subset of image features for a character. Args: char_index: an index of a character. Returns: A tensor with shape [batch_size, ?]. The output depth depends on the depth of input net.'
def get_image_feature(self, char_index):
(batch_size, features_num, _) = [d.value for d in self._net.get_shape()] slice_len = int((features_num / self._params.seq_length)) net_slice = self._net[:, char_index:(char_index + slice_len), :] feature = tf.reshape(net_slice, [batch_size, (-1)]) logging.debug('Image feature: %s', feature) return feature
'See SequenceLayerBase.get_eval_input for details.'
def get_eval_input(self, prev, i):
del prev return self.get_image_feature(i)
'See SequenceLayerBase.get_train_input for details.'
def get_train_input(self, prev, i):
return self.get_eval_input(prev, i)
'See SequenceLayerBase.unroll_cell for details.'
def unroll_cell(self, decoder_inputs, initial_state, loop_function, cell):
return tf.contrib.legacy_seq2seq.rnn_decoder(decoder_inputs=decoder_inputs, initial_state=initial_state, cell=cell, loop_function=self.get_input)
'See SequenceLayerBase.get_eval_input for details.'
def get_eval_input(self, prev, i):
if (i == 0): prev = self._zero_label else: logit = self.char_logit(prev, char_index=(i - 1)) prev = self.char_one_hot(logit) image_feature = self.get_image_feature(char_index=i) return tf.concat([image_feature, prev], 1)
'See SequenceLayerBase.get_train_input for details.'
def get_train_input(self, prev, i):
if (i == 0): prev = self._zero_label else: prev = self._labels_one_hot[:, (i - 1), :] image_feature = self.get_image_feature(i) return tf.concat([image_feature, prev], 1)
'See SequenceLayerBase.get_eval_input for details.'
def get_eval_input(self, prev, i):
del prev, i return self._zero_label
'See SequenceLayerBase.get_train_input for details.'
def get_train_input(self, prev, i):
return self.get_eval_input(prev, i)
'See SequenceLayerBase.get_train_input for details.'
def get_train_input(self, prev, i):
if (i == 0): return self._zero_label else: return self._labels_one_hot[:, (i - 1), :]
'See SequenceLayerBase.get_eval_input for details.'
def get_eval_input(self, prev, i):
if (i == 0): return self._zero_label else: logit = self.char_logit(prev, char_index=(i - 1)) return self.char_one_hot(logit)
'Creates a lookup table. Args: charset: a dictionary with id-to-character mapping.'
def __init__(self, charset, default_character='?'):
mapping_strings = tf.constant(_dict_to_array(charset, default_character)) self.table = tf.contrib.lookup.index_to_string_table_from_tensor(mapping=mapping_strings, default_value=default_character)
'Returns a string corresponding to a sequence of character ids. Args: ids: a tensor with shape [batch_size, max_sequence_length]'
def get_text(self, ids):
return tf.reduce_join(self.table.lookup(tf.to_int64(ids)), reduction_indices=1)
'Initialized model parameters. Args: num_char_classes: size of character set. seq_length: number of characters in a sequence. num_views: Number of views (conv towers) to use. null_code: A character code corresponding to a character which indicates end of a sequence. mparams: a dictionary with hyper parameters for methods, keys - function names, values - corresponding namedtuples.'
def __init__(self, num_char_classes, seq_length, num_views, null_code, mparams=None):
super(Model, self).__init__() self._params = ModelParams(num_char_classes=num_char_classes, seq_length=seq_length, num_views=num_views, null_code=null_code) self._mparams = self.default_mparams() if mparams: self._mparams.update(mparams)
'Computes convolutional features using the InceptionV3 model. Args: images: A tensor of shape [batch_size, height, width, channels]. is_training: whether is training or not. reuse: whether or not the network and its variables should be reused. To be able to reuse \'scope\' must be given. Returns: A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of output feature map and N is number of output features (depends on the network architecture).'
def conv_tower_fn(self, images, is_training=True, reuse=None):
mparams = self._mparams['conv_tower_fn'] logging.debug('Using final_endpoint=%s', mparams.final_endpoint) with tf.variable_scope('conv_tower_fn/INCE'): if reuse: tf.get_variable_scope().reuse_variables() with slim.arg_scope(inception.inception_v3_arg_scope()): (net, _) = inception.inception_v3_base(images, final_endpoint=mparams.final_endpoint) return net
'Splits an input tensor into a list of tensors (features). Args: net: A feature map of shape [batch_size, num_features, feature_size]. Raises: AssertionError: if num_features is less than seq_length. Returns: A list with seq_length tensors of shape [batch_size, feature_size]'
def _create_lstm_inputs(self, net):
num_features = net.get_shape().dims[1].value if (num_features < self._params.seq_length): raise AssertionError(('Incorrect dimension #1 of input tensor %d should be bigger than %d (shape=%s)' % (num_features, self._params.seq_length, net.get_shape()))) elif (num_features > self._params.seq_length): logging.warning('Ignoring some features: use %d of %d (shape=%s)', self._params.seq_length, num_features, net.get_shape()) net = tf.slice(net, [0, 0, 0], [(-1), self._params.seq_length, (-1)]) return tf.unstack(net, axis=1)
'Max pool across all nets in spatial dimensions. Args: nets_list: A list of 4D tensors with identical size. Returns: A tensor with the same size as any input tensors.'
def max_pool_views(self, nets_list):
(batch_size, height, width, num_features) = [d.value for d in nets_list[0].get_shape().dims] xy_flat_shape = (batch_size, 1, (height * width), num_features) nets_for_merge = [] with tf.variable_scope('max_pool_views', values=nets_list): for net in nets_list: nets_for_merge.append(tf.reshape(net, xy_flat_shape)) merged_net = tf.concat(nets_for_merge, 1) net = slim.max_pool2d(merged_net, kernel_size=[len(nets_list), 1], stride=1) net = tf.reshape(net, (batch_size, height, width, num_features)) return net
'Combines output of multiple convolutional towers into a single tensor. It stacks towers one on top another (in height dim) in a 4x1 grid. The order is arbitrary design choice and shouldn\'t matter much. Args: nets: list of tensors of shape=[batch_size, height, width, num_features]. Returns: A tensor of shape [batch_size, seq_length, features_size].'
def pool_views_fn(self, nets):
with tf.variable_scope('pool_views_fn/STCK'): net = tf.concat(nets, 1) batch_size = net.get_shape().dims[0].value feature_size = net.get_shape().dims[3].value return tf.reshape(net, [batch_size, (-1), feature_size])
'Returns confidence scores (softmax values) for predicted characters. Args: chars_logit: chars logits, a tensor with shape [batch_size x seq_length x num_char_classes] Returns: A tuple (ids, log_prob, scores), where: ids - predicted characters, a int32 tensor with shape [batch_size x seq_length]; log_prob - a log probability of all characters, a float tensor with shape [batch_size, seq_length, num_char_classes]; scores - corresponding confidence scores for characters, a float tensor with shape [batch_size x seq_length].'
def char_predictions(self, chars_logit):
log_prob = utils.logits_to_log_prob(chars_logit) ids = tf.to_int32(tf.argmax(log_prob, dimension=2), name='predicted_chars') mask = tf.cast(slim.one_hot_encoding(ids, self._params.num_char_classes), tf.bool) all_scores = tf.nn.softmax(chars_logit) selected_scores = tf.boolean_mask(all_scores, mask, name='char_scores') scores = tf.reshape(selected_scores, shape=((-1), self._params.seq_length)) return (ids, log_prob, scores)
'Adds one-hot encoding of coordinates to different views in the networks. For each "pixel" of a feature map it adds a onehot encoded x and y coordinates. Args: net: a tensor of shape=[batch_size, height, width, num_features] Returns: a tensor with the same height and width, but altered feature_size.'
def encode_coordinates_fn(self, net):
mparams = self._mparams['encode_coordinates_fn'] if mparams.enabled: (batch_size, h, w, _) = net.shape.as_list() (x, y) = tf.meshgrid(tf.range(w), tf.range(h)) w_loc = slim.one_hot_encoding(x, num_classes=w) h_loc = slim.one_hot_encoding(y, num_classes=h) loc = tf.concat([h_loc, w_loc], 2) loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1]) return tf.concat([net, loc], 3) else: return net
'Creates a base part of the Model (no gradients, losses or summaries). Args: images: A tensor of shape [batch_size, height, width, channels]. labels_one_hot: Optional (can be None) one-hot encoding for ground truth labels. If provided the function will create a model for training. scope: Optional variable_scope. reuse: whether or not the network and its variables should be reused. To be able to reuse \'scope\' must be given. Returns: A named tuple OutputEndpoints.'
def create_base(self, images, labels_one_hot, scope='AttentionOcr_v1', reuse=None):
logging.debug('images: %s', images) is_training = (labels_one_hot is not None) with tf.variable_scope(scope, reuse=reuse): views = tf.split(value=images, num_or_size_splits=self._params.num_views, axis=2) logging.debug('Views=%d single view: %s', len(views), views[0]) nets = [self.conv_tower_fn(v, is_training, reuse=(i != 0)) for (i, v) in enumerate(views)] logging.debug('Conv tower: %s', nets[0]) nets = [self.encode_coordinates_fn(net) for net in nets] logging.debug('Conv tower w/ encoded coordinates: %s', nets[0]) net = self.pool_views_fn(nets) logging.debug('Pooled views: %s', net) chars_logit = self.sequence_logit_fn(net, labels_one_hot) logging.debug('chars_logit: %s', chars_logit) (predicted_chars, chars_log_prob, predicted_scores) = self.char_predictions(chars_logit) return OutputEndpoints(chars_logit=chars_logit, chars_log_prob=chars_log_prob, predicted_chars=predicted_chars, predicted_scores=predicted_scores)
'Creates all losses required to train the model. Args: data: InputEndpoints namedtuple. endpoints: Model namedtuple. Returns: Total loss.'
def create_loss(self, data, endpoints):
self.sequence_loss_fn(endpoints.chars_logit, data.labels) total_loss = slim.losses.get_total_loss() tf.summary.scalar('TotalLoss', total_loss) return total_loss
'Applies a label smoothing regularization. Uses the same method as in https://arxiv.org/abs/1512.00567. Args: chars_labels: ground truth ids of charactes, shape=[batch_size, seq_length]; weight: label-smoothing regularization weight. Returns: A sensor with the same shape as the input.'
def label_smoothing_regularization(self, chars_labels, weight=0.1):
one_hot_labels = tf.one_hot(chars_labels, depth=self._params.num_char_classes, axis=(-1)) pos_weight = (1.0 - weight) neg_weight = (weight / self._params.num_char_classes) return ((one_hot_labels * pos_weight) + neg_weight)
'Loss function for char sequence. Depending on values of hyper parameters it applies label smoothing and can also ignore all null chars after the first one. Args: chars_logits: logits for predicted characters, shape=[batch_size, seq_length, num_char_classes]; chars_labels: ground truth ids of characters, shape=[batch_size, seq_length]; mparams: method hyper parameters. Returns: A Tensor with shape [batch_size] - the log-perplexity for each sequence.'
def sequence_loss_fn(self, chars_logits, chars_labels):
mparams = self._mparams['sequence_loss_fn'] with tf.variable_scope('sequence_loss_fn/SLF'): if (mparams.label_smoothing > 0): smoothed_one_hot_labels = self.label_smoothing_regularization(chars_labels, mparams.label_smoothing) labels_list = tf.unstack(smoothed_one_hot_labels, axis=1) else: labels_list = tf.unstack(chars_labels, axis=1) (batch_size, seq_length, _) = chars_logits.shape.as_list() if mparams.ignore_nulls: weights = tf.ones((batch_size, seq_length), dtype=tf.float32) else: reject_char = tf.constant((self._params.num_char_classes - 1), shape=(batch_size, seq_length), dtype=tf.int64) known_char = tf.not_equal(chars_labels, reject_char) weights = tf.to_float(known_char) logits_list = tf.unstack(chars_logits, axis=1) weights_list = tf.unstack(weights, axis=1) loss = tf.contrib.legacy_seq2seq.sequence_loss(logits_list, labels_list, weights_list, softmax_loss_function=get_softmax_loss_fn(mparams.label_smoothing), average_across_timesteps=mparams.average_across_timesteps) tf.losses.add_loss(loss) return loss
'Creates all summaries for the model. Args: data: InputEndpoints namedtuple. endpoints: OutputEndpoints namedtuple. charset: A dictionary with mapping between character codes and unicode characters. Use the one provided by a dataset.charset. is_training: If True will create summary prefixes for training job, otherwise - for evaluation. Returns: A list of evaluation ops'
def create_summaries(self, data, endpoints, charset, is_training):
def sname(label): prefix = ('train' if is_training else 'eval') return ('%s/%s' % (prefix, label)) max_outputs = 4 tf.summary.image(sname('image'), data.images, max_outputs=max_outputs) if is_training: tf.summary.image(sname('image/orig'), data.images_orig, max_outputs=max_outputs) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) return None else: names_to_values = {} names_to_updates = {} def use_metric(name, value_update_tuple): names_to_values[name] = value_update_tuple[0] names_to_updates[name] = value_update_tuple[1] use_metric('CharacterAccuracy', metrics.char_accuracy(endpoints.predicted_chars, data.labels, streaming=True, rej_char=self._params.null_code)) use_metric('SequenceAccuracy', metrics.sequence_accuracy(endpoints.predicted_chars, data.labels, streaming=True, rej_char=self._params.null_code)) for (name, value) in names_to_values.iteritems(): summary_name = ('eval/' + name) tf.summary.scalar(summary_name, tf.Print(value, [value], summary_name)) return names_to_updates.values()
'Creates an init operations to restore weights from various checkpoints. Args: master_checkpoint: path to a checkpoint which contains all weights for the whole model. inception_checkpoint: path to a checkpoint which contains weights for the inception part only. Returns: a function to run initialization ops.'
def create_init_fn_to_restore(self, master_checkpoint, inception_checkpoint):
all_assign_ops = [] all_feed_dict = {} def assign_from_checkpoint(variables, checkpoint): logging.info('Request to re-store %d weights from %s', len(variables), checkpoint) if (not variables): logging.error("Can't find any variables to restore.") sys.exit(1) (assign_op, feed_dict) = slim.assign_from_checkpoint(checkpoint, variables) all_assign_ops.append(assign_op) all_feed_dict.update(feed_dict) if master_checkpoint: assign_from_checkpoint(utils.variables_to_restore(), master_checkpoint) if inception_checkpoint: variables = utils.variables_to_restore('AttentionOcr_v1/conv_tower_fn/INCE', strip_scope=True) assign_from_checkpoint(variables, inception_checkpoint) def init_assign_fn(sess): logging.info('Restoring checkpoint(s)') sess.run(all_assign_ops, all_feed_dict) return init_assign_fn
'Wrapper for test session context manager with required initialization. Yields: A session object that should be used as a context manager.'
@contextlib.contextmanager def initialized_session(self):
with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) (yield sess)
'Returns a RandomUniform Tensor between -param_init and param_init.'
def RandomUniformInit(self, shape):
param_seed = self.utility.FLAGS.param_seed self.init_seed_counter += 1 return tf.random_uniform(shape, ((-1.0) * np.float32(self.utility.FLAGS.param_init).astype(self.utility.np_data_type[self.utility.FLAGS.data_type])), np.float32(self.utility.FLAGS.param_init).astype(self.utility.np_data_type[self.utility.FLAGS.data_type]), self.utility.tf_data_type[self.utility.FLAGS.data_type], (param_seed + self.init_seed_counter))
'Build the graph corresponding to the progressive BRNN model.'
def BuildGraph(self, input_codes):
layer_depth = self._config['layer_depth'] layer_count = self._config['layer_count'] code_shape = input_codes.get_shape() code_depth = code_shape[(-1)].value if (self._config['coded_layer_count'] > 0): prefix_depth = (self._config['coded_layer_count'] * layer_depth) if (code_depth < prefix_depth): raise ValueError('Invalid prefix depth: {} VS {}'.format(prefix_depth, code_depth)) input_codes = input_codes[:, :, :, :prefix_depth] code_shape = input_codes.get_shape() code_depth = code_shape[(-1)].value if ((code_depth % layer_depth) != 0): raise ValueError('Code depth must be a multiple of the layer depth: {} vs {}'.format(code_depth, layer_depth)) code_layer_count = (code_depth // layer_depth) if (code_layer_count > layer_count): raise ValueError('Input codes have too many layers: {}, max={}'.format(code_layer_count, layer_count)) layer_prediction = LayerPrediction(layer_count, layer_depth) code_length_block = blocks.CodeLength() code_length = [] code_layers = tf.split(value=input_codes, num_or_size_splits=code_layer_count, axis=3) for k in xrange(code_layer_count): x = code_layers[k] predicted_x = layer_prediction(x) epsilon = 0.001 predicted_x = tf.clip_by_value(predicted_x, ((-1) + epsilon), ((+ 1) - epsilon)) code_length.append(code_length_block(blocks.ConvertSignCodeToZeroOneCode(x), blocks.ConvertSignCodeToZeroOneCode(predicted_x))) tf.summary.scalar('code_length_layer_{:02d}'.format(k), code_length[(-1)]) code_length = tf.stack(code_length) self.loss = tf.reduce_mean(code_length) tf.summary.scalar('loss', self.loss) dummy_x = tf.zeros_like(code_layers[0]) for _ in xrange((layer_count - code_layer_count)): dummy_predicted_x = layer_prediction(dummy_x) self.average_code_length = tf.reduce_mean(code_length) if self._optimizer: optim_op = self._optimizer.minimize(self.loss, global_step=self._global_step) block_updates = blocks.CreateBlockUpdates() if block_updates: with tf.get_default_graph().control_dependencies([optim_op]): self.train_op = tf.group(*block_updates) else: self.train_op = optim_op else: self.train_op = None
'Build the Tensorflow graph corresponding to the entropy coder model. Args: input_codes: Tensor of size: batch_size x height x width x bit_depth corresponding to the codes to compress. The input codes are {-1, +1} codes.'
def BuildGraph(self, input_codes):
raise NotImplementedError()
'Returns a default model configuration to be used for unit tests.'
def GetConfigStringForUnitTest(self):
return None
'Context manager that handles graph, namescope, and nested blocks.'
@contextlib.contextmanager def _BlockScope(self):
self._stack.append(self) try: with self._graph.as_default(): with self._OptionalNameScope(self._scope_str): (yield self) finally: self._stack.pop()
'Implementation of __call__().'
def _Apply(self, *args, **kwargs):
raise NotImplementedError()
'Creates a new variable. This function creates a variable, then returns a local copy created by Identity operation. To get the Variable class object, use LookupRef() method. Note that each time Variable class object is used as an input to an operation, Tensorflow will create a new Send/Recv pair. This hurts performance. If not for assign operations, use the local copy returned by this method. Args: value: Initialization value of the variable. The shape and the data type of the variable is determined by this initial value. **kwargs: Extra named arguments passed to Variable.__init__(). Returns: A local copy of the new variable.'
def NewVar(self, value, **kwargs):
v = tf.Variable(value, **kwargs) self._variables.append(v) return v
'Returns bool if the block is initialized. By default, BlockBase assumes that a block is initialized when __call__() is executed for the first time. If this is an incorrect assumption for some subclasses, override this property in those subclasses. Returns: True if initialized, False otherwise.'
@property def initialized(self):
return self._called
'Asserts initialized property.'
def AssertInitialized(self):
if (not self.initialized): raise RuntimeError('{} has not been initialized.'.format(self))
'Returns the list of all tensorflow variables used inside this block.'
def VariableList(self):
variables = list(itertools.chain(itertools.chain.from_iterable((t.VariableList() for t in self._subblocks)), self._VariableList())) return variables
'Returns the list of all tensorflow variables owned by this block.'
def _VariableList(self):
self.AssertInitialized() return self._variables
'Returns L2 loss list of (almost) all variables used inside this block. When this method needs to be overridden, there are two choices. 1. Override CreateWeightLoss() to change the weight loss of all variables that belong to this block, both directly and indirectly. 2. Override _CreateWeightLoss() to change the weight loss of all variables that directly belong to this block but not to the sub-blocks. Returns: A Tensor object or None.'
def CreateWeightLoss(self):
losses = list(itertools.chain(itertools.chain.from_iterable((t.CreateWeightLoss() for t in self._subblocks)), self._CreateWeightLoss())) return losses
'Returns weight loss list of variables that belong to this block.'
def _CreateWeightLoss(self):
self.AssertInitialized() with self._BlockScope(): return [tf.nn.l2_loss(v) for v in self._variables]
'Creates update operations for this block and its sub-blocks.'
def CreateUpdateOps(self):
ops = list(itertools.chain(itertools.chain.from_iterable((t.CreateUpdateOps() for t in self._subblocks)), self._CreateUpdateOps())) return ops
'Creates update operations for this block.'
def _CreateUpdateOps(self):
self.AssertInitialized() return []
'Mark all the variables of this block as non-trainable. All the variables owned directly or indirectly (through subblocks) are marked as non trainable. This function along with CheckpointInitOp can be used to load a pretrained model that consists in only one part of the whole graph.'
def MarkAsNonTrainable(self):
assert self._called all_variables = self.VariableList() collection = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES) for v in all_variables: if (v in collection): collection.remove(v)