INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Inform Metrics class that experience collection is done. | def end_experience_collection_timer(self):
"""
Inform Metrics class that experience collection is done.
"""
if self.time_start_experience_collection:
curr_delta = time() - self.time_start_experience_collection
if self.delta_last_experience_collection is None:
self.delta_last_experience_collection = curr_delta
else:
self.delta_last_experience_collection += curr_delta
self.time_start_experience_collection = None |
Inform Metrics class about time to step in environment. | def add_delta_step(self, delta: float):
"""
Inform Metrics class about time to step in environment.
"""
if self.delta_last_experience_collection:
self.delta_last_experience_collection += delta
else:
self.delta_last_experience_collection = delta |
Inform Metrics class that policy update has started.
:int number_experiences: Number of experiences in Buffer at this point.
:float mean_return: Return averaged across all cumulative returns since last policy update | def start_policy_update_timer(self, number_experiences: int, mean_return: float):
"""
Inform Metrics class that policy update has started.
:int number_experiences: Number of experiences in Buffer at this point.
:float mean_return: Return averaged across all cumulative returns since last policy update
"""
self.last_buffer_length = number_experiences
self.last_mean_return = mean_return
self.time_policy_update_start = time() |
Inform Metrics class that policy update has started. | def end_policy_update(self):
"""
Inform Metrics class that policy update has started.
"""
if self.time_policy_update_start:
self.delta_policy_update = time() - self.time_policy_update_start
else:
self.delta_policy_update = 0
delta_train_start = time() - self.time_training_start
LOGGER.debug(" Policy Update Training Metrics for {}: "
"\n\t\tTime to update Policy: {:0.3f} s \n"
"\t\tTime elapsed since training: {:0.3f} s \n"
"\t\tTime for experience collection: {:0.3f} s \n"
"\t\tBuffer Length: {} \n"
"\t\tReturns : {:0.3f}\n"
.format(self.brain_name, self.delta_policy_update,
delta_train_start, self.delta_last_experience_collection,
self.last_buffer_length, self.last_mean_return))
self._add_row(delta_train_start) |
Write Training Metrics to CSV | def write_training_metrics(self):
"""
Write Training Metrics to CSV
"""
with open(self.path, 'w') as file:
writer = csv.writer(file)
writer.writerow(FIELD_NAMES)
for row in self.rows:
writer.writerow(row) |
Creates TF ops to track and increment recent average cumulative reward. | def create_reward_encoder():
"""Creates TF ops to track and increment recent average cumulative reward."""
last_reward = tf.Variable(0, name="last_reward", trainable=False, dtype=tf.float32)
new_reward = tf.placeholder(shape=[], dtype=tf.float32, name='new_reward')
update_reward = tf.assign(last_reward, new_reward)
return last_reward, new_reward, update_reward |
Creates state encoders for current and future observations.
Used for implementation of Curiosity-driven Exploration by Self-supervised Prediction
See https://arxiv.org/abs/1705.05363 for more details.
:return: current and future state encoder tensors. | def create_curiosity_encoders(self):
"""
Creates state encoders for current and future observations.
Used for implementation of Curiosity-driven Exploration by Self-supervised Prediction
See https://arxiv.org/abs/1705.05363 for more details.
:return: current and future state encoder tensors.
"""
encoded_state_list = []
encoded_next_state_list = []
if self.vis_obs_size > 0:
self.next_visual_in = []
visual_encoders = []
next_visual_encoders = []
for i in range(self.vis_obs_size):
# Create input ops for next (t+1) visual observations.
next_visual_input = self.create_visual_input(self.brain.camera_resolutions[i],
name="next_visual_observation_" + str(i))
self.next_visual_in.append(next_visual_input)
# Create the encoder ops for current and next visual input. Not that these encoders are siamese.
encoded_visual = self.create_visual_observation_encoder(self.visual_in[i], self.curiosity_enc_size,
self.swish, 1, "stream_{}_visual_obs_encoder"
.format(i), False)
encoded_next_visual = self.create_visual_observation_encoder(self.next_visual_in[i],
self.curiosity_enc_size,
self.swish, 1,
"stream_{}_visual_obs_encoder".format(i),
True)
visual_encoders.append(encoded_visual)
next_visual_encoders.append(encoded_next_visual)
hidden_visual = tf.concat(visual_encoders, axis=1)
hidden_next_visual = tf.concat(next_visual_encoders, axis=1)
encoded_state_list.append(hidden_visual)
encoded_next_state_list.append(hidden_next_visual)
if self.vec_obs_size > 0:
# Create the encoder ops for current and next vector input. Not that these encoders are siamese.
# Create input op for next (t+1) vector observation.
self.next_vector_in = tf.placeholder(shape=[None, self.vec_obs_size], dtype=tf.float32,
name='next_vector_observation')
encoded_vector_obs = self.create_vector_observation_encoder(self.vector_in,
self.curiosity_enc_size,
self.swish, 2, "vector_obs_encoder",
False)
encoded_next_vector_obs = self.create_vector_observation_encoder(self.next_vector_in,
self.curiosity_enc_size,
self.swish, 2,
"vector_obs_encoder",
True)
encoded_state_list.append(encoded_vector_obs)
encoded_next_state_list.append(encoded_next_vector_obs)
encoded_state = tf.concat(encoded_state_list, axis=1)
encoded_next_state = tf.concat(encoded_next_state_list, axis=1)
return encoded_state, encoded_next_state |
Creates inverse model TensorFlow ops for Curiosity module.
Predicts action taken given current and future encoded states.
:param encoded_state: Tensor corresponding to encoded current state.
:param encoded_next_state: Tensor corresponding to encoded next state. | def create_inverse_model(self, encoded_state, encoded_next_state):
"""
Creates inverse model TensorFlow ops for Curiosity module.
Predicts action taken given current and future encoded states.
:param encoded_state: Tensor corresponding to encoded current state.
:param encoded_next_state: Tensor corresponding to encoded next state.
"""
combined_input = tf.concat([encoded_state, encoded_next_state], axis=1)
hidden = tf.layers.dense(combined_input, 256, activation=self.swish)
if self.brain.vector_action_space_type == "continuous":
pred_action = tf.layers.dense(hidden, self.act_size[0], activation=None)
squared_difference = tf.reduce_sum(tf.squared_difference(pred_action, self.selected_actions), axis=1)
self.inverse_loss = tf.reduce_mean(tf.dynamic_partition(squared_difference, self.mask, 2)[1])
else:
pred_action = tf.concat(
[tf.layers.dense(hidden, self.act_size[i], activation=tf.nn.softmax)
for i in range(len(self.act_size))], axis=1)
cross_entropy = tf.reduce_sum(-tf.log(pred_action + 1e-10) * self.selected_actions, axis=1)
self.inverse_loss = tf.reduce_mean(tf.dynamic_partition(cross_entropy, self.mask, 2)[1]) |
Creates forward model TensorFlow ops for Curiosity module.
Predicts encoded future state based on encoded current state and given action.
:param encoded_state: Tensor corresponding to encoded current state.
:param encoded_next_state: Tensor corresponding to encoded next state. | def create_forward_model(self, encoded_state, encoded_next_state):
"""
Creates forward model TensorFlow ops for Curiosity module.
Predicts encoded future state based on encoded current state and given action.
:param encoded_state: Tensor corresponding to encoded current state.
:param encoded_next_state: Tensor corresponding to encoded next state.
"""
combined_input = tf.concat([encoded_state, self.selected_actions], axis=1)
hidden = tf.layers.dense(combined_input, 256, activation=self.swish)
# We compare against the concatenation of all observation streams, hence `self.vis_obs_size + int(self.vec_obs_size > 0)`.
pred_next_state = tf.layers.dense(hidden, self.curiosity_enc_size * (self.vis_obs_size + int(self.vec_obs_size > 0)),
activation=None)
squared_difference = 0.5 * tf.reduce_sum(tf.squared_difference(pred_next_state, encoded_next_state), axis=1)
self.intrinsic_reward = tf.clip_by_value(self.curiosity_strength * squared_difference, 0, 1)
self.forward_loss = tf.reduce_mean(tf.dynamic_partition(squared_difference, self.mask, 2)[1]) |
Creates training-specific Tensorflow ops for PPO models.
:param probs: Current policy probabilities
:param old_probs: Past policy probabilities
:param value: Current value estimate
:param beta: Entropy regularization strength
:param entropy: Current policy entropy
:param epsilon: Value for policy-divergence threshold
:param lr: Learning rate
:param max_step: Total number of training steps. | def create_ppo_optimizer(self, probs, old_probs, value, entropy, beta, epsilon, lr, max_step):
"""
Creates training-specific Tensorflow ops for PPO models.
:param probs: Current policy probabilities
:param old_probs: Past policy probabilities
:param value: Current value estimate
:param beta: Entropy regularization strength
:param entropy: Current policy entropy
:param epsilon: Value for policy-divergence threshold
:param lr: Learning rate
:param max_step: Total number of training steps.
"""
self.returns_holder = tf.placeholder(shape=[None], dtype=tf.float32, name='discounted_rewards')
self.advantage = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='advantages')
self.learning_rate = tf.train.polynomial_decay(lr, self.global_step, max_step, 1e-10, power=1.0)
self.old_value = tf.placeholder(shape=[None], dtype=tf.float32, name='old_value_estimates')
decay_epsilon = tf.train.polynomial_decay(epsilon, self.global_step, max_step, 0.1, power=1.0)
decay_beta = tf.train.polynomial_decay(beta, self.global_step, max_step, 1e-5, power=1.0)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
clipped_value_estimate = self.old_value + tf.clip_by_value(tf.reduce_sum(value, axis=1) - self.old_value,
- decay_epsilon, decay_epsilon)
v_opt_a = tf.squared_difference(self.returns_holder, tf.reduce_sum(value, axis=1))
v_opt_b = tf.squared_difference(self.returns_holder, clipped_value_estimate)
self.value_loss = tf.reduce_mean(tf.dynamic_partition(tf.maximum(v_opt_a, v_opt_b), self.mask, 2)[1])
# Here we calculate PPO policy loss. In continuous control this is done independently for each action gaussian
# and then averaged together. This provides significantly better performance than treating the probability
# as an average of probabilities, or as a joint probability.
r_theta = tf.exp(probs - old_probs)
p_opt_a = r_theta * self.advantage
p_opt_b = tf.clip_by_value(r_theta, 1.0 - decay_epsilon, 1.0 + decay_epsilon) * self.advantage
self.policy_loss = -tf.reduce_mean(tf.dynamic_partition(tf.minimum(p_opt_a, p_opt_b), self.mask, 2)[1])
self.loss = self.policy_loss + 0.5 * self.value_loss - decay_beta * tf.reduce_mean(
tf.dynamic_partition(entropy, self.mask, 2)[1])
if self.use_curiosity:
self.loss += 10 * (0.2 * self.forward_loss + 0.8 * self.inverse_loss)
self.update_batch = optimizer.minimize(self.loss) |
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict. | def evaluate(self, brain_info):
"""
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo object containing inputs.
:return: Outputs from network as defined by self.inference_dict.
"""
feed_dict = {self.model.batch_size: len(brain_info.vector_observations),
self.model.sequence_length: 1}
epsilon = None
if self.use_recurrent:
if not self.use_continuous_act:
feed_dict[self.model.prev_action] = brain_info.previous_vector_actions.reshape(
[-1, len(self.model.act_size)])
if brain_info.memories.shape[1] == 0:
brain_info.memories = self.make_empty_memory(len(brain_info.agents))
feed_dict[self.model.memory_in] = brain_info.memories
if self.use_continuous_act:
epsilon = np.random.normal(
size=(len(brain_info.vector_observations), self.model.act_size[0]))
feed_dict[self.model.epsilon] = epsilon
feed_dict = self._fill_eval_dict(feed_dict, brain_info)
run_out = self._execute_model(feed_dict, self.inference_dict)
if self.use_continuous_act:
run_out['random_normal_epsilon'] = epsilon
return run_out |
Updates model using buffer.
:param num_sequences: Number of trajectories in batch.
:param mini_batch: Experience batch.
:return: Output from update process. | def update(self, mini_batch, num_sequences):
"""
Updates model using buffer.
:param num_sequences: Number of trajectories in batch.
:param mini_batch: Experience batch.
:return: Output from update process.
"""
feed_dict = {self.model.batch_size: num_sequences,
self.model.sequence_length: self.sequence_length,
self.model.mask_input: mini_batch['masks'].flatten(),
self.model.returns_holder: mini_batch['discounted_returns'].flatten(),
self.model.old_value: mini_batch['value_estimates'].flatten(),
self.model.advantage: mini_batch['advantages'].reshape([-1, 1]),
self.model.all_old_log_probs: mini_batch['action_probs'].reshape(
[-1, sum(self.model.act_size)])}
if self.use_continuous_act:
feed_dict[self.model.output_pre] = mini_batch['actions_pre'].reshape(
[-1, self.model.act_size[0]])
feed_dict[self.model.epsilon] = mini_batch['random_normal_epsilon'].reshape(
[-1, self.model.act_size[0]])
else:
feed_dict[self.model.action_holder] = mini_batch['actions'].reshape(
[-1, len(self.model.act_size)])
if self.use_recurrent:
feed_dict[self.model.prev_action] = mini_batch['prev_action'].reshape(
[-1, len(self.model.act_size)])
feed_dict[self.model.action_masks] = mini_batch['action_mask'].reshape(
[-1, sum(self.brain.vector_action_space_size)])
if self.use_vec_obs:
feed_dict[self.model.vector_in] = mini_batch['vector_obs'].reshape(
[-1, self.vec_obs_size])
if self.use_curiosity:
feed_dict[self.model.next_vector_in] = mini_batch['next_vector_in'].reshape(
[-1, self.vec_obs_size])
if self.model.vis_obs_size > 0:
for i, _ in enumerate(self.model.visual_in):
_obs = mini_batch['visual_obs%d' % i]
if self.sequence_length > 1 and self.use_recurrent:
(_batch, _seq, _w, _h, _c) = _obs.shape
feed_dict[self.model.visual_in[i]] = _obs.reshape([-1, _w, _h, _c])
else:
feed_dict[self.model.visual_in[i]] = _obs
if self.use_curiosity:
for i, _ in enumerate(self.model.visual_in):
_obs = mini_batch['next_visual_obs%d' % i]
if self.sequence_length > 1 and self.use_recurrent:
(_batch, _seq, _w, _h, _c) = _obs.shape
feed_dict[self.model.next_visual_in[i]] = _obs.reshape([-1, _w, _h, _c])
else:
feed_dict[self.model.next_visual_in[i]] = _obs
if self.use_recurrent:
mem_in = mini_batch['memory'][:, 0, :]
feed_dict[self.model.memory_in] = mem_in
self.has_updated = True
run_out = self._execute_model(feed_dict, self.update_dict)
return run_out |
Generates intrinsic reward used for Curiosity-based training.
:BrainInfo curr_info: Current BrainInfo.
:BrainInfo next_info: Next BrainInfo.
:return: Intrinsic rewards for all agents. | def get_intrinsic_rewards(self, curr_info, next_info):
"""
Generates intrinsic reward used for Curiosity-based training.
:BrainInfo curr_info: Current BrainInfo.
:BrainInfo next_info: Next BrainInfo.
:return: Intrinsic rewards for all agents.
"""
if self.use_curiosity:
if len(curr_info.agents) == 0:
return []
feed_dict = {self.model.batch_size: len(next_info.vector_observations),
self.model.sequence_length: 1}
if self.use_continuous_act:
feed_dict[self.model.selected_actions] = next_info.previous_vector_actions
else:
feed_dict[self.model.action_holder] = next_info.previous_vector_actions
for i in range(self.model.vis_obs_size):
feed_dict[self.model.visual_in[i]] = curr_info.visual_observations[i]
feed_dict[self.model.next_visual_in[i]] = next_info.visual_observations[i]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = curr_info.vector_observations
feed_dict[self.model.next_vector_in] = next_info.vector_observations
if self.use_recurrent:
if curr_info.memories.shape[1] == 0:
curr_info.memories = self.make_empty_memory(len(curr_info.agents))
feed_dict[self.model.memory_in] = curr_info.memories
intrinsic_rewards = self.sess.run(self.model.intrinsic_reward,
feed_dict=feed_dict) * float(self.has_updated)
return intrinsic_rewards
else:
return None |
Generates value estimates for bootstrapping.
:param brain_info: BrainInfo to be used for bootstrapping.
:param idx: Index in BrainInfo of agent.
:return: Value estimate. | def get_value_estimate(self, brain_info, idx):
"""
Generates value estimates for bootstrapping.
:param brain_info: BrainInfo to be used for bootstrapping.
:param idx: Index in BrainInfo of agent.
:return: Value estimate.
"""
feed_dict = {self.model.batch_size: 1, self.model.sequence_length: 1}
for i in range(len(brain_info.visual_observations)):
feed_dict[self.model.visual_in[i]] = [brain_info.visual_observations[i][idx]]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = [brain_info.vector_observations[idx]]
if self.use_recurrent:
if brain_info.memories.shape[1] == 0:
brain_info.memories = self.make_empty_memory(len(brain_info.agents))
feed_dict[self.model.memory_in] = [brain_info.memories[idx]]
if not self.use_continuous_act and self.use_recurrent:
feed_dict[self.model.prev_action] = brain_info.previous_vector_actions[idx].reshape(
[-1, len(self.model.act_size)])
value_estimate = self.sess.run(self.model.value, feed_dict)
return value_estimate |
Updates reward value for policy.
:param new_reward: New reward to save. | def update_reward(self, new_reward):
"""
Updates reward value for policy.
:param new_reward: New reward to save.
"""
self.sess.run(self.model.update_reward,
feed_dict={self.model.new_reward: new_reward}) |
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method. | def add_experiences(self, curr_info: AllBrainInfo, next_info: AllBrainInfo,
take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method.
"""
# Used to collect information about student performance.
info_student = curr_info[self.brain_name]
next_info_student = next_info[self.brain_name]
for agent_id in info_student.agents:
self.evaluation_buffer[agent_id].last_brain_info = info_student
for agent_id in next_info_student.agents:
stored_info_student = self.evaluation_buffer[agent_id].last_brain_info
if stored_info_student is None:
continue
else:
next_idx = next_info_student.agents.index(agent_id)
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info_student.rewards[next_idx]
if not next_info_student.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1 |
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo | def process_experiences(self, current_info: AllBrainInfo, next_info: AllBrainInfo):
"""
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo
"""
info_student = next_info[self.brain_name]
for l in range(len(info_student.agents)):
if info_student.local_done[l]:
agent_id = info_student.agents[l]
self.stats['Environment/Cumulative Reward'].append(
self.cumulative_rewards.get(agent_id, 0))
self.stats['Environment/Episode Length'].append(
self.episode_steps.get(agent_id, 0))
self.cumulative_rewards[agent_id] = 0
self.episode_steps[agent_id] = 0 |
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets. | def end_episode(self):
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
self.evaluation_buffer.reset_local_buffers()
for agent_id in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
for agent_id in self.episode_steps:
self.episode_steps[agent_id] = 0 |
Updates the policy. | def update_policy(self):
"""
Updates the policy.
"""
self.demonstration_buffer.update_buffer.shuffle()
batch_losses = []
num_batches = min(len(self.demonstration_buffer.update_buffer['actions']) //
self.n_sequences, self.batches_per_epoch)
for i in range(num_batches):
update_buffer = self.demonstration_buffer.update_buffer
start = i * self.n_sequences
end = (i + 1) * self.n_sequences
mini_batch = update_buffer.make_mini_batch(start, end)
run_out = self.policy.update(mini_batch, self.n_sequences)
loss = run_out['policy_loss']
batch_losses.append(loss)
if len(batch_losses) > 0:
self.stats['Losses/Cloning Loss'].append(np.mean(batch_losses))
else:
self.stats['Losses/Cloning Loss'].append(0) |
Creates TF ops to track and increment global training step. | def create_global_steps():
"""Creates TF ops to track and increment global training step."""
global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int32)
increment_step = tf.assign(global_step, tf.add(global_step, 1))
return global_step, increment_step |
Creates image input op.
:param camera_parameters: Parameters for visual observation from BrainInfo.
:param name: Desired name of input op.
:return: input op. | def create_visual_input(camera_parameters, name):
"""
Creates image input op.
:param camera_parameters: Parameters for visual observation from BrainInfo.
:param name: Desired name of input op.
:return: input op.
"""
o_size_h = camera_parameters['height']
o_size_w = camera_parameters['width']
bw = camera_parameters['blackAndWhite']
if bw:
c_channels = 1
else:
c_channels = 3
visual_in = tf.placeholder(shape=[None, o_size_h, o_size_w, c_channels], dtype=tf.float32,
name=name)
return visual_in |
Creates ops for vector observation input.
:param name: Name of the placeholder op.
:param vec_obs_size: Size of stacked vector observation.
:return: | def create_vector_input(self, name='vector_observation'):
"""
Creates ops for vector observation input.
:param name: Name of the placeholder op.
:param vec_obs_size: Size of stacked vector observation.
:return:
"""
self.vector_in = tf.placeholder(shape=[None, self.vec_obs_size], dtype=tf.float32,
name=name)
if self.normalize:
self.running_mean = tf.get_variable("running_mean", [self.vec_obs_size],
trainable=False, dtype=tf.float32,
initializer=tf.zeros_initializer())
self.running_variance = tf.get_variable("running_variance", [self.vec_obs_size],
trainable=False,
dtype=tf.float32,
initializer=tf.ones_initializer())
self.update_mean, self.update_variance = self.create_normalizer_update(self.vector_in)
self.normalized_state = tf.clip_by_value((self.vector_in - self.running_mean) / tf.sqrt(
self.running_variance / (tf.cast(self.global_step, tf.float32) + 1)), -5, 5,
name="normalized_state")
return self.normalized_state
else:
return self.vector_in |
Builds a set of hidden state encoders.
:param reuse: Whether to re-use the weights within the same scope.
:param scope: Graph scope for the encoder ops.
:param observation_input: Input vector.
:param h_size: Hidden layer size.
:param activation: What type of activation function to use for layers.
:param num_layers: number of hidden layers to create.
:return: List of hidden layer tensors. | def create_vector_observation_encoder(observation_input, h_size, activation, num_layers, scope,
reuse):
"""
Builds a set of hidden state encoders.
:param reuse: Whether to re-use the weights within the same scope.
:param scope: Graph scope for the encoder ops.
:param observation_input: Input vector.
:param h_size: Hidden layer size.
:param activation: What type of activation function to use for layers.
:param num_layers: number of hidden layers to create.
:return: List of hidden layer tensors.
"""
with tf.variable_scope(scope):
hidden = observation_input
for i in range(num_layers):
hidden = tf.layers.dense(hidden, h_size, activation=activation, reuse=reuse,
name="hidden_{}".format(i),
kernel_initializer=c_layers.variance_scaling_initializer(
1.0))
return hidden |
Builds a set of visual (CNN) encoders.
:param reuse: Whether to re-use the weights within the same scope.
:param scope: The scope of the graph within which to create the ops.
:param image_input: The placeholder for the image input to use.
:param h_size: Hidden layer size.
:param activation: What type of activation function to use for layers.
:param num_layers: number of hidden layers to create.
:return: List of hidden layer tensors. | def create_visual_observation_encoder(self, image_input, h_size, activation, num_layers, scope,
reuse):
"""
Builds a set of visual (CNN) encoders.
:param reuse: Whether to re-use the weights within the same scope.
:param scope: The scope of the graph within which to create the ops.
:param image_input: The placeholder for the image input to use.
:param h_size: Hidden layer size.
:param activation: What type of activation function to use for layers.
:param num_layers: number of hidden layers to create.
:return: List of hidden layer tensors.
"""
with tf.variable_scope(scope):
conv1 = tf.layers.conv2d(image_input, 16, kernel_size=[8, 8], strides=[4, 4],
activation=tf.nn.elu, reuse=reuse, name="conv_1")
conv2 = tf.layers.conv2d(conv1, 32, kernel_size=[4, 4], strides=[2, 2],
activation=tf.nn.elu, reuse=reuse, name="conv_2")
hidden = c_layers.flatten(conv2)
with tf.variable_scope(scope + '/' + 'flat_encoding'):
hidden_flat = self.create_vector_observation_encoder(hidden, h_size, activation,
num_layers, scope, reuse)
return hidden_flat |
Creates a masking layer for the discrete actions
:param all_logits: The concatenated unnormalized action probabilities for all branches
:param action_masks: The mask for the logits. Must be of dimension [None x total_number_of_action]
:param action_size: A list containing the number of possible actions for each branch
:return: The action output dimension [batch_size, num_branches] and the concatenated normalized logits | def create_discrete_action_masking_layer(all_logits, action_masks, action_size):
"""
Creates a masking layer for the discrete actions
:param all_logits: The concatenated unnormalized action probabilities for all branches
:param action_masks: The mask for the logits. Must be of dimension [None x total_number_of_action]
:param action_size: A list containing the number of possible actions for each branch
:return: The action output dimension [batch_size, num_branches] and the concatenated normalized logits
"""
action_idx = [0] + list(np.cumsum(action_size))
branches_logits = [all_logits[:, action_idx[i]:action_idx[i + 1]] for i in range(len(action_size))]
branch_masks = [action_masks[:, action_idx[i]:action_idx[i + 1]] for i in range(len(action_size))]
raw_probs = [tf.multiply(tf.nn.softmax(branches_logits[k]) + 1.0e-10, branch_masks[k])
for k in range(len(action_size))]
normalized_probs = [
tf.divide(raw_probs[k], tf.reduce_sum(raw_probs[k], axis=1, keepdims=True))
for k in range(len(action_size))]
output = tf.concat([tf.multinomial(tf.log(normalized_probs[k]), 1) for k in range(len(action_size))], axis=1)
return output, tf.concat([tf.log(normalized_probs[k] + 1.0e-10) for k in range(len(action_size))], axis=1) |
Creates encoding stream for observations.
:param num_streams: Number of streams to create.
:param h_size: Size of hidden linear layers in stream.
:param num_layers: Number of hidden linear layers in stream.
:return: List of encoded streams. | def create_observation_streams(self, num_streams, h_size, num_layers):
"""
Creates encoding stream for observations.
:param num_streams: Number of streams to create.
:param h_size: Size of hidden linear layers in stream.
:param num_layers: Number of hidden linear layers in stream.
:return: List of encoded streams.
"""
brain = self.brain
activation_fn = self.swish
self.visual_in = []
for i in range(brain.number_visual_observations):
visual_input = self.create_visual_input(brain.camera_resolutions[i],
name="visual_observation_" + str(i))
self.visual_in.append(visual_input)
vector_observation_input = self.create_vector_input()
final_hiddens = []
for i in range(num_streams):
visual_encoders = []
hidden_state, hidden_visual = None, None
if self.vis_obs_size > 0:
for j in range(brain.number_visual_observations):
encoded_visual = self.create_visual_observation_encoder(self.visual_in[j],
h_size,
activation_fn,
num_layers,
"main_graph_{}_encoder{}"
.format(i, j), False)
visual_encoders.append(encoded_visual)
hidden_visual = tf.concat(visual_encoders, axis=1)
if brain.vector_observation_space_size > 0:
hidden_state = self.create_vector_observation_encoder(vector_observation_input,
h_size, activation_fn,
num_layers,
"main_graph_{}".format(i),
False)
if hidden_state is not None and hidden_visual is not None:
final_hidden = tf.concat([hidden_visual, hidden_state], axis=1)
elif hidden_state is None and hidden_visual is not None:
final_hidden = hidden_visual
elif hidden_state is not None and hidden_visual is None:
final_hidden = hidden_state
else:
raise Exception("No valid network configuration possible. "
"There are no states or observations in this brain")
final_hiddens.append(final_hidden)
return final_hiddens |
Builds a recurrent encoder for either state or observations (LSTM).
:param sequence_length: Length of sequence to unroll.
:param input_state: The input tensor to the LSTM cell.
:param memory_in: The input memory to the LSTM cell.
:param name: The scope of the LSTM cell. | def create_recurrent_encoder(input_state, memory_in, sequence_length, name='lstm'):
"""
Builds a recurrent encoder for either state or observations (LSTM).
:param sequence_length: Length of sequence to unroll.
:param input_state: The input tensor to the LSTM cell.
:param memory_in: The input memory to the LSTM cell.
:param name: The scope of the LSTM cell.
"""
s_size = input_state.get_shape().as_list()[1]
m_size = memory_in.get_shape().as_list()[1]
lstm_input_state = tf.reshape(input_state, shape=[-1, sequence_length, s_size])
memory_in = tf.reshape(memory_in[:, :], [-1, m_size])
_half_point = int(m_size / 2)
with tf.variable_scope(name):
rnn_cell = tf.contrib.rnn.BasicLSTMCell(_half_point)
lstm_vector_in = tf.contrib.rnn.LSTMStateTuple(memory_in[:, :_half_point],
memory_in[:, _half_point:])
recurrent_output, lstm_state_out = tf.nn.dynamic_rnn(rnn_cell, lstm_input_state,
initial_state=lstm_vector_in)
recurrent_output = tf.reshape(recurrent_output, shape=[-1, _half_point])
return recurrent_output, tf.concat([lstm_state_out.c, lstm_state_out.h], axis=1) |
Creates Continuous control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers. | def create_cc_actor_critic(self, h_size, num_layers):
"""
Creates Continuous control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
"""
hidden_streams = self.create_observation_streams(2, h_size, num_layers)
if self.use_recurrent:
self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32,
name='recurrent_in')
_half_point = int(self.m_size / 2)
hidden_policy, memory_policy_out = self.create_recurrent_encoder(
hidden_streams[0], self.memory_in[:, :_half_point], self.sequence_length,
name='lstm_policy')
hidden_value, memory_value_out = self.create_recurrent_encoder(
hidden_streams[1], self.memory_in[:, _half_point:], self.sequence_length,
name='lstm_value')
self.memory_out = tf.concat([memory_policy_out, memory_value_out], axis=1,
name='recurrent_out')
else:
hidden_policy = hidden_streams[0]
hidden_value = hidden_streams[1]
mu = tf.layers.dense(hidden_policy, self.act_size[0], activation=None,
kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01))
log_sigma_sq = tf.get_variable("log_sigma_squared", [self.act_size[0]], dtype=tf.float32,
initializer=tf.zeros_initializer())
sigma_sq = tf.exp(log_sigma_sq)
self.epsilon = tf.placeholder(shape=[None, self.act_size[0]], dtype=tf.float32, name='epsilon')
# Clip and scale output to ensure actions are always within [-1, 1] range.
self.output_pre = mu + tf.sqrt(sigma_sq) * self.epsilon
output_post = tf.clip_by_value(self.output_pre, -3, 3) / 3
self.output = tf.identity(output_post, name='action')
self.selected_actions = tf.stop_gradient(output_post)
# Compute probability of model output.
all_probs = - 0.5 * tf.square(tf.stop_gradient(self.output_pre) - mu) / sigma_sq \
- 0.5 * tf.log(2.0 * np.pi) - 0.5 * log_sigma_sq
self.all_log_probs = tf.identity(all_probs, name='action_probs')
self.entropy = 0.5 * tf.reduce_mean(tf.log(2 * np.pi * np.e) + log_sigma_sq)
value = tf.layers.dense(hidden_value, 1, activation=None)
self.value = tf.identity(value, name="value_estimate")
self.all_old_log_probs = tf.placeholder(shape=[None, self.act_size[0]], dtype=tf.float32,
name='old_probabilities')
# We keep these tensors the same name, but use new nodes to keep code parallelism with discrete control.
self.log_probs = tf.reduce_sum((tf.identity(self.all_log_probs)), axis=1, keepdims=True)
self.old_log_probs = tf.reduce_sum((tf.identity(self.all_old_log_probs)), axis=1,
keepdims=True) |
Creates Discrete control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers. | def create_dc_actor_critic(self, h_size, num_layers):
"""
Creates Discrete control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
"""
hidden_streams = self.create_observation_streams(1, h_size, num_layers)
hidden = hidden_streams[0]
if self.use_recurrent:
self.prev_action = tf.placeholder(shape=[None, len(self.act_size)], dtype=tf.int32,
name='prev_action')
prev_action_oh = tf.concat([
tf.one_hot(self.prev_action[:, i], self.act_size[i]) for i in
range(len(self.act_size))], axis=1)
hidden = tf.concat([hidden, prev_action_oh], axis=1)
self.memory_in = tf.placeholder(shape=[None, self.m_size], dtype=tf.float32,
name='recurrent_in')
hidden, memory_out = self.create_recurrent_encoder(hidden, self.memory_in,
self.sequence_length)
self.memory_out = tf.identity(memory_out, name='recurrent_out')
policy_branches = []
for size in self.act_size:
policy_branches.append(tf.layers.dense(hidden, size, activation=None, use_bias=False,
kernel_initializer=c_layers.variance_scaling_initializer(factor=0.01)))
self.all_log_probs = tf.concat([branch for branch in policy_branches], axis=1, name="action_probs")
self.action_masks = tf.placeholder(shape=[None, sum(self.act_size)], dtype=tf.float32, name="action_masks")
output, normalized_logits = self.create_discrete_action_masking_layer(
self.all_log_probs, self.action_masks, self.act_size)
self.output = tf.identity(output)
self.normalized_logits = tf.identity(normalized_logits, name='action')
value = tf.layers.dense(hidden, 1, activation=None)
self.value = tf.identity(value, name="value_estimate")
self.action_holder = tf.placeholder(
shape=[None, len(policy_branches)], dtype=tf.int32, name="action_holder")
self.action_oh = tf.concat([
tf.one_hot(self.action_holder[:, i], self.act_size[i]) for i in range(len(self.act_size))], axis=1)
self.selected_actions = tf.stop_gradient(self.action_oh)
self.all_old_log_probs = tf.placeholder(
shape=[None, sum(self.act_size)], dtype=tf.float32, name='old_probabilities')
_, old_normalized_logits = self.create_discrete_action_masking_layer(
self.all_old_log_probs, self.action_masks, self.act_size)
action_idx = [0] + list(np.cumsum(self.act_size))
self.entropy = tf.reduce_sum((tf.stack([
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.nn.softmax(self.all_log_probs[:, action_idx[i]:action_idx[i + 1]]),
logits=self.all_log_probs[:, action_idx[i]:action_idx[i + 1]])
for i in range(len(self.act_size))], axis=1)), axis=1)
self.log_probs = tf.reduce_sum((tf.stack([
-tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.action_oh[:, action_idx[i]:action_idx[i + 1]],
logits=normalized_logits[:, action_idx[i]:action_idx[i + 1]]
)
for i in range(len(self.act_size))], axis=1)), axis=1, keepdims=True)
self.old_log_probs = tf.reduce_sum((tf.stack([
-tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.action_oh[:, action_idx[i]:action_idx[i + 1]],
logits=old_normalized_logits[:, action_idx[i]:action_idx[i + 1]]
)
for i in range(len(self.act_size))], axis=1)), axis=1, keepdims=True) |
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method. | def add_experiences(self, curr_info: AllBrainInfo, next_info: AllBrainInfo,
take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo).
:param take_action_outputs: The outputs of the take action method.
"""
# Used to collect teacher experience into training buffer
info_teacher = curr_info[self.brain_to_imitate]
next_info_teacher = next_info[self.brain_to_imitate]
for agent_id in info_teacher.agents:
self.demonstration_buffer[agent_id].last_brain_info = info_teacher
for agent_id in next_info_teacher.agents:
stored_info_teacher = self.demonstration_buffer[agent_id].last_brain_info
if stored_info_teacher is None:
continue
else:
idx = stored_info_teacher.agents.index(agent_id)
next_idx = next_info_teacher.agents.index(agent_id)
if stored_info_teacher.text_observations[idx] != "":
info_teacher_record, info_teacher_reset = \
stored_info_teacher.text_observations[idx].lower().split(",")
next_info_teacher_record, next_info_teacher_reset = \
next_info_teacher.text_observations[idx]. \
lower().split(",")
if next_info_teacher_reset == "true":
self.demonstration_buffer.reset_update_buffer()
else:
info_teacher_record, next_info_teacher_record = "true", "true"
if info_teacher_record == "true" and next_info_teacher_record == "true":
if not stored_info_teacher.local_done[idx]:
for i in range(self.policy.vis_obs_size):
self.demonstration_buffer[agent_id]['visual_obs%d' % i] \
.append(stored_info_teacher.visual_observations[i][idx])
if self.policy.use_vec_obs:
self.demonstration_buffer[agent_id]['vector_obs'] \
.append(stored_info_teacher.vector_observations[idx])
if self.policy.use_recurrent:
if stored_info_teacher.memories.shape[1] == 0:
stored_info_teacher.memories = np.zeros(
(len(stored_info_teacher.agents),
self.policy.m_size))
self.demonstration_buffer[agent_id]['memory'].append(
stored_info_teacher.memories[idx])
self.demonstration_buffer[agent_id]['actions'].append(
next_info_teacher.previous_vector_actions[next_idx])
super(OnlineBCTrainer, self).add_experiences(curr_info, next_info, take_action_outputs) |
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo | def process_experiences(self, current_info: AllBrainInfo, next_info: AllBrainInfo):
"""
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current AllBrainInfo
:param next_info: Next AllBrainInfo
"""
info_teacher = next_info[self.brain_to_imitate]
for l in range(len(info_teacher.agents)):
teacher_action_list = len(self.demonstration_buffer[info_teacher.agents[l]]['actions'])
horizon_reached = teacher_action_list > self.trainer_parameters['time_horizon']
teacher_filled = len(self.demonstration_buffer[info_teacher.agents[l]]['actions']) > 0
if (info_teacher.local_done[l] or horizon_reached) and teacher_filled:
agent_id = info_teacher.agents[l]
self.demonstration_buffer.append_update_buffer(
agent_id, batch_size=None, training_length=self.policy.sequence_length)
self.demonstration_buffer[agent_id].reset_agent()
super(OnlineBCTrainer, self).process_experiences(current_info, next_info) |
Yield items from any nested iterable; see REF. | def flatten(items,enter=lambda x:isinstance(x, list)):
# http://stackoverflow.com/a/40857703
# https://github.com/ctmakro/canton/blob/master/canton/misc.py
"""Yield items from any nested iterable; see REF."""
for x in items:
if enter(x):
yield from flatten(x)
else:
yield x |
A value in replace_with_strings can be either single string or list of strings | def replace_strings_in_list(array_of_strigs, replace_with_strings):
"A value in replace_with_strings can be either single string or list of strings"
potentially_nested_list = [replace_with_strings.get(s) or s for s in array_of_strigs]
return list(flatten(potentially_nested_list)) |
Preserves the order of elements in the list | def remove_duplicates_from_list(array):
"Preserves the order of elements in the list"
output = []
unique = set()
for a in array:
if a not in unique:
unique.add(a)
output.append(a)
return output |
Convert from NHWC|NCHW => HW | def pool_to_HW(shape, data_frmt):
""" Convert from NHWC|NCHW => HW
"""
if len(shape) != 4:
return shape # Not NHWC|NCHW, return as is
if data_frmt == 'NCHW':
return [shape[2], shape[3]]
return [shape[1], shape[2]] |
Converts a TensorFlow model into a Barracuda model.
:param source_file: The TensorFlow Model
:param target_file: The name of the file the converted model will be saved to
:param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed.
:param verbose: If True, will display debug messages
:param compress_f16: If true, the float values will be converted to f16
:return: | def convert(source_file, target_file, trim_unused_by_output="", verbose=False, compress_f16=False):
"""
Converts a TensorFlow model into a Barracuda model.
:param source_file: The TensorFlow Model
:param target_file: The name of the file the converted model will be saved to
:param trim_unused_by_output: The regexp to match output nodes to remain in the model. All other uconnected nodes will be removed.
:param verbose: If True, will display debug messages
:param compress_f16: If true, the float values will be converted to f16
:return:
"""
if (type(verbose)==bool):
args = Struct()
args.verbose = verbose
args.print_layers = verbose
args.print_source_json = verbose
args.print_barracuda_json = verbose
args.print_layer_links = verbose
args.print_patterns = verbose
args.print_tensors = verbose
else:
args = verbose
# Load Tensorflow model
print("Converting %s to %s" % (source_file, target_file))
f = open(source_file, 'rb')
i_model = tf.GraphDef()
i_model.ParseFromString(f.read())
if args.verbose:
print('OP_TYPES:', {layer.op for layer in i_model.node})
if args.print_source_json or args.verbose:
for layer in i_model.node:
if not layer.op == 'Const':
print('MODEL:', MessageToJson(layer) + ",")
# Convert
o_model = barracuda.Model()
o_model.layers, o_input_shapes, o_model.tensors, o_model.memories = \
process_model(i_model, args)
# Cleanup unconnected Identities (they might linger after processing complex node patterns like LSTM)
def cleanup_layers(layers):
all_layers = {l.name for l in layers}
all_inputs = {i for l in layers for i in l.inputs}
def is_unconnected_identity(layer):
if layer.class_name == 'Activation' and layer.activation == 0: # Identity
assert(len(layer.inputs) == 1)
if layer.inputs[0] not in all_layers and layer.name not in all_inputs:
return True;
return False;
return [l for l in layers if not is_unconnected_identity(l)]
o_model.layers = cleanup_layers(o_model.layers)
all_inputs = {i for l in o_model.layers for i in l.inputs}
embedded_tensors = {t.name for l in o_model.layers for t in l.tensors}
# Find global tensors
def dims_to_barracuda_shape(dims):
shape = list(dims)
while len(shape) < 4:
shape = [1] + shape
return shape
o_model.globals = [t for t in o_model.tensors if t not in all_inputs and t not in embedded_tensors]
#for x in global_tensors:
# shape = dims_to_barracuda_shape(get_tensor_dims(o_model.tensors[x]))
# o_globals += [Struct(
# name = x,
# shape = shape,
# data = np.reshape(get_tensor_data(o_model.tensors[x]), shape).astype(np.float32))]
# Trim
if trim_unused_by_output:
o_model.layers = barracuda.trim(o_model.layers, trim_unused_by_output, args.verbose)
# Create load layers for constants
const_tensors = [i for i in all_inputs if i in o_model.tensors]
const_tensors += o_model.globals
for x in const_tensors:
shape = dims_to_barracuda_shape(get_tensor_dims(o_model.tensors[x]))
o_l = Struct(
type = 255, # Load
class_name = "Const",
name = x,
pads = [0,0,0,0],
strides = [],
pool_size = [],
axis = -1,
alpha = 1,
beta = 0,
activation = 0,
inputs = [],
tensors = [Struct(
name = x,
shape = shape,
data = np.reshape(get_tensor_data(o_model.tensors[x]), shape).astype(np.float32))]
)
o_model.layers.insert(0, o_l)
# Find model inputs & outputs
all_layers = {l.name for l in o_model.layers}
# global inputs => are inputs that are NOT connected to any layer in the network
# global outputs => are outputs that are NOT feeding any layer in the network OR are coming from Identity layers
o_model.inputs = {i:o_input_shapes[i] for l in o_model.layers for i in l.inputs if i not in all_layers and i not in o_model.memories}
def is_output_layer(layer):
if layer.class_name == 'Const': # Constants never count as global output even when unconnected
return False;
if layer.name not in all_inputs: # this layer is not inputing to any other layer
return True
if layer.class_name == 'Activation' and layer.activation == 0: # Identity marks global output
return True
return False
o_model.outputs = [l.name for l in o_model.layers if is_output_layer(l)]
# Compress
if compress_f16:
o_model = barracuda.compress(o_model)
# Sort model so that layer inputs are always ready upfront
o_model.layers = barracuda.sort(o_model.layers, o_model.inputs, o_model.memories, args.verbose)
# Summary
barracuda.summary(o_model,
print_layer_links = args.print_layer_links or args.verbose,
print_barracuda_json = args.print_barracuda_json or args.verbose,
print_tensors = args.print_tensors or args.verbose)
# Write to file
barracuda.write(o_model, target_file)
print('DONE: wrote', target_file, 'file.') |
Loads demonstration file and uses it to fill training buffer.
:param file_path: Location of demonstration file (.demo).
:param sequence_length: Length of trajectories to fill buffer.
:return: | def demo_to_buffer(file_path, sequence_length):
"""
Loads demonstration file and uses it to fill training buffer.
:param file_path: Location of demonstration file (.demo).
:param sequence_length: Length of trajectories to fill buffer.
:return:
"""
brain_params, brain_infos, _ = load_demonstration(file_path)
demo_buffer = make_demo_buffer(brain_infos, brain_params, sequence_length)
return brain_params, demo_buffer |
Loads and parses a demonstration file.
:param file_path: Location of demonstration file (.demo).
:return: BrainParameter and list of BrainInfos containing demonstration data. | def load_demonstration(file_path):
"""
Loads and parses a demonstration file.
:param file_path: Location of demonstration file (.demo).
:return: BrainParameter and list of BrainInfos containing demonstration data.
"""
# First 32 bytes of file dedicated to meta-data.
INITIAL_POS = 33
if not os.path.isfile(file_path):
raise FileNotFoundError("The demonstration file {} does not exist.".format(file_path))
file_extension = pathlib.Path(file_path).suffix
if file_extension != '.demo':
raise ValueError("The file is not a '.demo' file. Please provide a file with the "
"correct extension.")
brain_params = None
brain_infos = []
data = open(file_path, "rb").read()
next_pos, pos, obs_decoded = 0, 0, 0
total_expected = 0
while pos < len(data):
next_pos, pos = _DecodeVarint32(data, pos)
if obs_decoded == 0:
meta_data_proto = DemonstrationMetaProto()
meta_data_proto.ParseFromString(data[pos:pos + next_pos])
total_expected = meta_data_proto.number_steps
pos = INITIAL_POS
if obs_decoded == 1:
brain_param_proto = BrainParametersProto()
brain_param_proto.ParseFromString(data[pos:pos + next_pos])
brain_params = BrainParameters.from_proto(brain_param_proto)
pos += next_pos
if obs_decoded > 1:
agent_info = AgentInfoProto()
agent_info.ParseFromString(data[pos:pos + next_pos])
brain_info = BrainInfo.from_agent_proto([agent_info], brain_params)
brain_infos.append(brain_info)
if len(brain_infos) == total_expected:
break
pos += next_pos
obs_decoded += 1
return brain_params, brain_infos, total_expected |
Saves current model to checkpoint folder.
:param steps: Current number of steps in training process.
:param saver: Tensorflow saver for session. | def _save_model(self, steps=0):
"""
Saves current model to checkpoint folder.
:param steps: Current number of steps in training process.
:param saver: Tensorflow saver for session.
"""
for brain_name in self.trainers.keys():
self.trainers[brain_name].save_model()
self.logger.info('Saved Model') |
Write all CSV metrics
:return: | def _write_training_metrics(self):
"""
Write all CSV metrics
:return:
"""
for brain_name in self.trainers.keys():
if brain_name in self.trainer_metrics:
self.trainers[brain_name].write_training_metrics() |
Exports latest saved models to .nn format for Unity embedding. | def _export_graph(self):
"""
Exports latest saved models to .nn format for Unity embedding.
"""
for brain_name in self.trainers.keys():
self.trainers[brain_name].export_model() |
Initialization of the trainers
:param trainer_config: The configurations of the trainers | def initialize_trainers(self, trainer_config: Dict[str, Dict[str, str]]):
"""
Initialization of the trainers
:param trainer_config: The configurations of the trainers
"""
trainer_parameters_dict = {}
for brain_name in self.external_brains:
trainer_parameters = trainer_config['default'].copy()
trainer_parameters['summary_path'] = '{basedir}/{name}'.format(
basedir=self.summaries_dir,
name=str(self.run_id) + '_' + brain_name)
trainer_parameters['model_path'] = '{basedir}/{name}'.format(
basedir=self.model_path,
name=brain_name)
trainer_parameters['keep_checkpoints'] = self.keep_checkpoints
if brain_name in trainer_config:
_brain_key = brain_name
while not isinstance(trainer_config[_brain_key], dict):
_brain_key = trainer_config[_brain_key]
for k in trainer_config[_brain_key]:
trainer_parameters[k] = trainer_config[_brain_key][k]
trainer_parameters_dict[brain_name] = trainer_parameters.copy()
for brain_name in self.external_brains:
if trainer_parameters_dict[brain_name]['trainer'] == 'offline_bc':
self.trainers[brain_name] = OfflineBCTrainer(
self.external_brains[brain_name],
trainer_parameters_dict[brain_name], self.train_model,
self.load_model, self.seed, self.run_id)
elif trainer_parameters_dict[brain_name]['trainer'] == 'online_bc':
self.trainers[brain_name] = OnlineBCTrainer(
self.external_brains[brain_name],
trainer_parameters_dict[brain_name], self.train_model,
self.load_model, self.seed, self.run_id)
elif trainer_parameters_dict[brain_name]['trainer'] == 'ppo':
self.trainers[brain_name] = PPOTrainer(
self.external_brains[brain_name],
self.meta_curriculum
.brains_to_curriculums[brain_name]
.min_lesson_length if self.meta_curriculum else 0,
trainer_parameters_dict[brain_name],
self.train_model, self.load_model, self.seed,
self.run_id)
self.trainer_metrics[brain_name] = self.trainers[brain_name].trainer_metrics
else:
raise UnityEnvironmentException('The trainer config contains '
'an unknown trainer type for '
'brain {}'
.format(brain_name)) |
Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment. | def _reset_env(self, env: BaseUnityEnvironment):
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
if self.meta_curriculum is not None:
return env.reset(train_mode=self.fast_simulation, config=self.meta_curriculum.get_config())
else:
return env.reset(train_mode=self.fast_simulation) |
Sends a shutdown signal to the unity environment, and closes the socket connection. | def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the socket connection.
"""
if self._socket is not None and self._conn is not None:
message_input = UnityMessage()
message_input.header.status = 400
self._communicator_send(message_input.SerializeToString())
if self._socket is not None:
self._socket.close()
self._socket = None
if self._socket is not None:
self._conn.close()
self._conn = None |
float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a; | def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon):
# https://github.com/Tencent/ncnn/blob/master/src/layer/batchnorm.cpp
""" float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a;
"""
scale = gamma / np.sqrt(var + epsilon)
bias = beta - gamma * mean / np.sqrt(var + epsilon)
return [scale, bias] |
- Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi) | def rnn(name, input, state, kernel, bias, new_state, number_of_gates = 2):
''' - Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
'''
nn = Build(name)
nn.tanh(
nn.mad(kernel=kernel, bias=bias,
x=nn.concat(input, state)),
out=new_state);
return nn.layers; |
- zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz)
- rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr)
- ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh)
- Ht = (1-zt).ht + zt.Ht_1 | def gru(name, input, state, kernel_r, kernel_u, kernel_c, bias_r, bias_u, bias_c, new_state, number_of_gates = 2):
''' - zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz)
- rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr)
- ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh)
- Ht = (1-zt).ht + zt.Ht_1
'''
nn = Build(name)
inputs = nn.concat(input, state)
u = nn.sigmoid(nn.mad(inputs, kernel_u, bias_u))
r = nn.sigmoid(nn.mad(inputs, kernel_r, bias_r))
r_state = nn.mul(r, state)
c = nn.tanh(nn.mad(kernel=kernel_c, bias=bias_c,
x=nn.concat(input, r_state)))
# new_h = u' * state + (1 - u') * c'
# = u' * state + c' - u' * c'
# u' * state + c'
nn.add(nn.mul(u, state), c)
# - u' * c'
nn.sub(nn._, nn.mul(u, c),
out=new_state)
return nn.layers; |
Full:
- it = f(Xt*Wi + Ht_1*Ri + Pi . Ct_1 + Wbi + Rbi)
- ft = f(Xt*Wf + Ht_1*Rf + Pf . Ct_1 + Wbf + Rbf)
- ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc)
- Ct = ft . Ct_1 + it . ct
- ot = f(Xt*Wo + Ht_1*Ro + Po . Ct + Wbo + Rbo)
- Ht = ot . h(Ct) | def lstm(name, input, state_c, state_h, kernel_i, kernel_j, kernel_f, kernel_o, bias_i, bias_j, bias_f, bias_o, new_state_c, new_state_h):
''' Full:
- it = f(Xt*Wi + Ht_1*Ri + Pi . Ct_1 + Wbi + Rbi)
- ft = f(Xt*Wf + Ht_1*Rf + Pf . Ct_1 + Wbf + Rbf)
- ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc)
- Ct = ft . Ct_1 + it . ct
- ot = f(Xt*Wo + Ht_1*Ro + Po . Ct + Wbo + Rbo)
- Ht = ot . h(Ct)
'''
''' No peephole:
- it = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi)
- ft = f(Xt*Wf + Ht_1*Rf + Wbf + Rbf)
- ct = g(Xt*Wc + Ht_1*Rc + Wbc + Rbc)
- Ct = ft . Ct_ + it . ct
- ot = f(Xt*Wo + Ht_1*Ro + Wbo + Rbo)
- Ht = ot . h(Ct)
'''
nn = Build(name)
inputs = nn.concat(input, state_h)
i = nn.sigmoid(nn.mad(x=inputs, kernel=kernel_i, bias=bias_i))
j = nn.tanh(nn.mad(inputs, kernel_j, bias_j))
f = nn.sigmoid(nn.mad(inputs, kernel_f, bias_f))
o = nn.sigmoid(nn.mad(inputs, kernel_o, bias_o))
# new_c = state_c * f' + i' * j'
nn.add(
nn.mul(state_c, f), nn.mul(i, j),
out=new_state_c)
# new_h =
nn.mul(o, nn.tanh(new_state_c),
out=new_state_h)
return nn.layers |
Performs update on model.
:param mini_batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update. | def update(self, mini_batch, num_sequences):
"""
Performs update on model.
:param mini_batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
"""
feed_dict = {self.model.dropout_rate: self.update_rate,
self.model.batch_size: num_sequences,
self.model.sequence_length: self.sequence_length}
if self.use_continuous_act:
feed_dict[self.model.true_action] = mini_batch['actions']. \
reshape([-1, self.brain.vector_action_space_size[0]])
else:
feed_dict[self.model.true_action] = mini_batch['actions'].reshape(
[-1, len(self.brain.vector_action_space_size)])
feed_dict[self.model.action_masks] = np.ones(
(num_sequences, sum(self.brain.vector_action_space_size)))
if self.use_vec_obs:
apparent_obs_size = self.brain.vector_observation_space_size * \
self.brain.num_stacked_vector_observations
feed_dict[self.model.vector_in] = mini_batch['vector_obs'] \
.reshape([-1,apparent_obs_size])
for i, _ in enumerate(self.model.visual_in):
visual_obs = mini_batch['visual_obs%d' % i]
feed_dict[self.model.visual_in[i]] = visual_obs
if self.use_recurrent:
feed_dict[self.model.memory_in] = np.zeros([num_sequences, self.m_size])
run_out = self._execute_model(feed_dict, self.update_dict)
return run_out |
Increments the lesson number depending on the progress given.
:param measure_val: Measure of progress (either reward or percentage
steps completed).
:return Whether the lesson was incremented. | def increment_lesson(self, measure_val):
"""
Increments the lesson number depending on the progress given.
:param measure_val: Measure of progress (either reward or percentage
steps completed).
:return Whether the lesson was incremented.
"""
if not self.data or not measure_val or math.isnan(measure_val):
return False
if self.data['signal_smoothing']:
measure_val = self.smoothing_value * 0.25 + 0.75 * measure_val
self.smoothing_value = measure_val
if self.lesson_num < self.max_lesson_num:
if measure_val > self.data['thresholds'][self.lesson_num]:
self.lesson_num += 1
config = {}
parameters = self.data['parameters']
for key in parameters:
config[key] = parameters[key][self.lesson_num]
logger.info('{0} lesson changed. Now in lesson {1}: {2}'
.format(self._brain_name,
self.lesson_num,
', '.join([str(x) + ' -> ' + str(config[x])
for x in config])))
return True
return False |
Returns reset parameters which correspond to the lesson.
:param lesson: The lesson you want to get the config of. If None, the
current lesson is returned.
:return: The configuration of the reset parameters. | def get_config(self, lesson=None):
"""
Returns reset parameters which correspond to the lesson.
:param lesson: The lesson you want to get the config of. If None, the
current lesson is returned.
:return: The configuration of the reset parameters.
"""
if not self.data:
return {}
if lesson is None:
lesson = self.lesson_num
lesson = max(0, min(lesson, self.max_lesson_num))
config = {}
parameters = self.data['parameters']
for key in parameters:
config[key] = parameters[key][lesson]
return config |
Computes generalized advantage estimate for use in updating policy.
:param rewards: list of rewards for time-steps t to T.
:param value_next: Value estimate for time-step T+1.
:param value_estimates: list of value estimates for time-steps t to T.
:param gamma: Discount factor.
:param lambd: GAE weighing factor.
:return: list of advantage estimates for time-steps t to T. | def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):
"""
Computes generalized advantage estimate for use in updating policy.
:param rewards: list of rewards for time-steps t to T.
:param value_next: Value estimate for time-step T+1.
:param value_estimates: list of value estimates for time-steps t to T.
:param gamma: Discount factor.
:param lambd: GAE weighing factor.
:return: list of advantage estimates for time-steps t to T.
"""
value_estimates = np.asarray(value_estimates.tolist() + [value_next])
delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]
advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)
return advantage |
Increment the step count of the trainer and Updates the last reward | def increment_step_and_update_last_reward(self):
"""
Increment the step count of the trainer and Updates the last reward
"""
if len(self.stats['Environment/Cumulative Reward']) > 0:
mean_reward = np.mean(self.stats['Environment/Cumulative Reward'])
self.policy.update_reward(mean_reward)
self.policy.increment_step()
self.step = self.policy.get_current_step() |
Constructs a BrainInfo which contains the most recent previous experiences for all agents info
which correspond to the agents in a provided next_info.
:BrainInfo next_info: A t+1 BrainInfo.
:return: curr_info: Reconstructed BrainInfo to match agents of next_info. | def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:
"""
Constructs a BrainInfo which contains the most recent previous experiences for all agents info
which correspond to the agents in a provided next_info.
:BrainInfo next_info: A t+1 BrainInfo.
:return: curr_info: Reconstructed BrainInfo to match agents of next_info.
"""
visual_observations = [[]]
vector_observations = []
text_observations = []
memories = []
rewards = []
local_dones = []
max_reacheds = []
agents = []
prev_vector_actions = []
prev_text_actions = []
action_masks = []
for agent_id in next_info.agents:
agent_brain_info = self.training_buffer[agent_id].last_brain_info
if agent_brain_info is None:
agent_brain_info = next_info
agent_index = agent_brain_info.agents.index(agent_id)
for i in range(len(next_info.visual_observations)):
visual_observations[i].append(agent_brain_info.visual_observations[i][agent_index])
vector_observations.append(agent_brain_info.vector_observations[agent_index])
text_observations.append(agent_brain_info.text_observations[agent_index])
if self.policy.use_recurrent:
if len(agent_brain_info.memories) > 0:
memories.append(agent_brain_info.memories[agent_index])
else:
memories.append(self.policy.make_empty_memory(1))
rewards.append(agent_brain_info.rewards[agent_index])
local_dones.append(agent_brain_info.local_done[agent_index])
max_reacheds.append(agent_brain_info.max_reached[agent_index])
agents.append(agent_brain_info.agents[agent_index])
prev_vector_actions.append(agent_brain_info.previous_vector_actions[agent_index])
prev_text_actions.append(agent_brain_info.previous_text_actions[agent_index])
action_masks.append(agent_brain_info.action_masks[agent_index])
if self.policy.use_recurrent:
memories = np.vstack(memories)
curr_info = BrainInfo(visual_observations, vector_observations, text_observations,
memories, rewards, agents, local_dones, prev_vector_actions,
prev_text_actions, max_reacheds, action_masks)
return curr_info |
Adds experiences to each agent's experience history.
:param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param next_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param take_action_outputs: The outputs of the Policy's get_action method. | def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):
"""
Adds experiences to each agent's experience history.
:param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param next_all_info: Dictionary of all current brains and corresponding BrainInfo.
:param take_action_outputs: The outputs of the Policy's get_action method.
"""
self.trainer_metrics.start_experience_collection_timer()
if take_action_outputs:
self.stats['Policy/Value Estimate'].append(take_action_outputs['value'].mean())
self.stats['Policy/Entropy'].append(take_action_outputs['entropy'].mean())
self.stats['Policy/Learning Rate'].append(take_action_outputs['learning_rate'])
curr_info = curr_all_info[self.brain_name]
next_info = next_all_info[self.brain_name]
for agent_id in curr_info.agents:
self.training_buffer[agent_id].last_brain_info = curr_info
self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs
if curr_info.agents != next_info.agents:
curr_to_use = self.construct_curr_info(next_info)
else:
curr_to_use = curr_info
intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)
for agent_id in next_info.agents:
stored_info = self.training_buffer[agent_id].last_brain_info
stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs
if stored_info is not None:
idx = stored_info.agents.index(agent_id)
next_idx = next_info.agents.index(agent_id)
if not stored_info.local_done[idx]:
for i, _ in enumerate(stored_info.visual_observations):
self.training_buffer[agent_id]['visual_obs%d' % i].append(
stored_info.visual_observations[i][idx])
self.training_buffer[agent_id]['next_visual_obs%d' % i].append(
next_info.visual_observations[i][next_idx])
if self.policy.use_vec_obs:
self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])
self.training_buffer[agent_id]['next_vector_in'].append(
next_info.vector_observations[next_idx])
if self.policy.use_recurrent:
if stored_info.memories.shape[1] == 0:
stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))
self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])
actions = stored_take_action_outputs['action']
if self.policy.use_continuous_act:
actions_pre = stored_take_action_outputs['pre_action']
self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])
epsilons = stored_take_action_outputs['random_normal_epsilon']
self.training_buffer[agent_id]['random_normal_epsilon'].append(
epsilons[idx])
else:
self.training_buffer[agent_id]['action_mask'].append(
stored_info.action_masks[idx], padding_value=1)
a_dist = stored_take_action_outputs['log_probs']
value = stored_take_action_outputs['value']
self.training_buffer[agent_id]['actions'].append(actions[idx])
self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])
self.training_buffer[agent_id]['masks'].append(1.0)
if self.use_curiosity:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +
intrinsic_rewards[next_idx])
else:
self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])
self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])
self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])
if agent_id not in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]
if self.use_curiosity:
if agent_id not in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0
self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]
if not next_info.local_done[next_idx]:
if agent_id not in self.episode_steps:
self.episode_steps[agent_id] = 0
self.episode_steps[agent_id] += 1
self.trainer_metrics.end_experience_collection_timer() |
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets. | def end_episode(self):
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
self.training_buffer.reset_local_buffers()
for agent_id in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
for agent_id in self.episode_steps:
self.episode_steps[agent_id] = 0
if self.use_curiosity:
for agent_id in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0 |
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run | def is_ready_update(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run
"""
size_of_buffer = len(self.training_buffer.update_buffer['actions'])
return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1) |
Uses demonstration_buffer to update the policy. | def update_policy(self):
"""
Uses demonstration_buffer to update the policy.
"""
self.trainer_metrics.start_policy_update_timer(
number_experiences=len(self.training_buffer.update_buffer['actions']),
mean_return=float(np.mean(self.cumulative_returns_since_policy_update)))
n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)
value_total, policy_total, forward_total, inverse_total = [], [], [], []
advantages = self.training_buffer.update_buffer['advantages'].get_batch()
self.training_buffer.update_buffer['advantages'].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10))
num_epoch = self.trainer_parameters['num_epoch']
for _ in range(num_epoch):
self.training_buffer.update_buffer.shuffle()
buffer = self.training_buffer.update_buffer
for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):
start = l * n_sequences
end = (l + 1) * n_sequences
run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)
value_total.append(run_out['value_loss'])
policy_total.append(np.abs(run_out['policy_loss']))
if self.use_curiosity:
inverse_total.append(run_out['inverse_loss'])
forward_total.append(run_out['forward_loss'])
self.stats['Losses/Value Loss'].append(np.mean(value_total))
self.stats['Losses/Policy Loss'].append(np.mean(policy_total))
if self.use_curiosity:
self.stats['Losses/Forward Loss'].append(np.mean(forward_total))
self.stats['Losses/Inverse Loss'].append(np.mean(inverse_total))
self.training_buffer.reset_update_buffer()
self.trainer_metrics.end_policy_update() |
Resets the state of the environment and returns an initial observation.
In the case of multi-agent environments, this is a list.
Returns: observation (object/list): the initial observation of the
space. | def reset(self):
"""Resets the state of the environment and returns an initial observation.
In the case of multi-agent environments, this is a list.
Returns: observation (object/list): the initial observation of the
space.
"""
info = self._env.reset()[self.brain_name]
n_agents = len(info.agents)
self._check_agents(n_agents)
self.game_over = False
if not self._multiagent:
obs, reward, done, info = self._single_step(info)
else:
obs, reward, done, info = self._multi_step(info)
return obs |
Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
In the case of multi-agent environments, these are lists.
Args:
action (object/list): an action provided by the environment
Returns:
observation (object/list): agent's observation of the current environment
reward (float/list) : amount of reward returned after previous action
done (boolean/list): whether the episode has ended.
info (dict): contains auxiliary diagnostic information, including BrainInfo. | def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
In the case of multi-agent environments, these are lists.
Args:
action (object/list): an action provided by the environment
Returns:
observation (object/list): agent's observation of the current environment
reward (float/list) : amount of reward returned after previous action
done (boolean/list): whether the episode has ended.
info (dict): contains auxiliary diagnostic information, including BrainInfo.
"""
# Use random actions for all other agents in environment.
if self._multiagent:
if not isinstance(action, list):
raise UnityGymException("The environment was expecting `action` to be a list.")
if len(action) != self._n_agents:
raise UnityGymException(
"The environment was expecting a list of {} actions.".format(self._n_agents))
else:
if self._flattener is not None:
# Action space is discrete and flattened - we expect a list of scalars
action = [self._flattener.lookup_action(_act) for _act in action]
action = np.array(action)
else:
if self._flattener is not None:
# Translate action into list
action = self._flattener.lookup_action(action)
info = self._env.step(action)[self.brain_name]
n_agents = len(info.agents)
self._check_agents(n_agents)
self._current_state = info
if not self._multiagent:
obs, reward, done, info = self._single_step(info)
self.game_over = done
else:
obs, reward, done, info = self._multi_step(info)
self.game_over = all(done)
return obs, reward, done, info |
Creates the GRPC server. | def create_server(self):
"""
Creates the GRPC server.
"""
self.check_port(self.port)
try:
# Establish communication grpc
self.server = grpc.server(ThreadPoolExecutor(max_workers=10))
self.unity_to_external = UnityToExternalServicerImplementation()
add_UnityToExternalServicer_to_server(self.unity_to_external, self.server)
# Using unspecified address, which means that grpc is communicating on all IPs
# This is so that the docker container can connect.
self.server.add_insecure_port('[::]:' + str(self.port))
self.server.start()
self.is_open = True
except:
raise UnityWorkerInUseException(self.worker_id) |
Creates a Dict that maps discrete actions (scalars) to branched actions (lists).
Each key in the Dict maps to one unique set of branched actions, and each value
contains the List of branched actions. | def _create_lookup(self, branched_action_space):
"""
Creates a Dict that maps discrete actions (scalars) to branched actions (lists).
Each key in the Dict maps to one unique set of branched actions, and each value
contains the List of branched actions.
"""
possible_vals = [range(_num) for _num in branched_action_space]
all_actions = [list(_action) for _action in itertools.product(*possible_vals)]
# Dict should be faster than List for large action spaces
action_lookup = {_scalar: _action for (_scalar, _action) in enumerate(all_actions)}
return action_lookup |
Attempts to bind to the requested communicator port, checking if it is already in use. | def check_port(self, port):
"""
Attempts to bind to the requested communicator port, checking if it is already in use.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("localhost", port))
except socket.error:
raise UnityWorkerInUseException(self.worker_id)
finally:
s.close() |
Sends a shutdown signal to the unity environment, and closes the grpc connection. | def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the grpc connection.
"""
if self.is_open:
message_input = UnityMessage()
message_input.header.status = 400
self.unity_to_external.parent_conn.send(message_input)
self.unity_to_external.parent_conn.close()
self.server.stop(False)
self.is_open = False |
Converts byte array observation image into numpy array, re-sizes it,
and optionally converts it to grey scale
:param gray_scale: Whether to convert the image to grayscale.
:param image_bytes: input byte array corresponding to image
:return: processed numpy array of observation from environment | def process_pixels(image_bytes, gray_scale):
"""
Converts byte array observation image into numpy array, re-sizes it,
and optionally converts it to grey scale
:param gray_scale: Whether to convert the image to grayscale.
:param image_bytes: input byte array corresponding to image
:return: processed numpy array of observation from environment
"""
s = bytearray(image_bytes)
image = Image.open(io.BytesIO(s))
s = np.array(image) / 255.0
if gray_scale:
s = np.mean(s, axis=2)
s = np.reshape(s, [s.shape[0], s.shape[1], 1])
return s |
Converts list of agent infos to BrainInfo. | def from_agent_proto(agent_info_list, brain_params):
"""
Converts list of agent infos to BrainInfo.
"""
vis_obs = []
for i in range(brain_params.number_visual_observations):
obs = [BrainInfo.process_pixels(x.visual_observations[i],
brain_params.camera_resolutions[i]['blackAndWhite'])
for x in agent_info_list]
vis_obs += [obs]
if len(agent_info_list) == 0:
memory_size = 0
else:
memory_size = max([len(x.memories) for x in agent_info_list])
if memory_size == 0:
memory = np.zeros((0, 0))
else:
[x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list]
memory = np.array([list(x.memories) for x in agent_info_list])
total_num_actions = sum(brain_params.vector_action_space_size)
mask_actions = np.ones((len(agent_info_list), total_num_actions))
for agent_index, agent_info in enumerate(agent_info_list):
if agent_info.action_mask is not None:
if len(agent_info.action_mask) == total_num_actions:
mask_actions[agent_index, :] = [
0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)]
if any([np.isnan(x.reward) for x in agent_info_list]):
logger.warning("An agent had a NaN reward for brain " + brain_params.brain_name)
if any([np.isnan(x.stacked_vector_observation).any() for x in agent_info_list]):
logger.warning("An agent had a NaN observation for brain " + brain_params.brain_name)
if len(agent_info_list) == 0:
vector_obs = np.zeros(
(0, brain_params.vector_observation_space_size * brain_params.num_stacked_vector_observations)
)
else:
vector_obs = np.nan_to_num(
np.array([x.stacked_vector_observation for x in agent_info_list])
)
brain_info = BrainInfo(
visual_observation=vis_obs,
vector_observation=vector_obs,
text_observations=[x.text_observation for x in agent_info_list],
memory=memory,
reward=[x.reward if not np.isnan(x.reward) else 0 for x in agent_info_list],
agents=[x.id for x in agent_info_list],
local_done=[x.done for x in agent_info_list],
vector_action=np.array([x.stored_vector_actions for x in agent_info_list]),
text_action=[list(x.stored_text_actions) for x in agent_info_list],
max_reached=[x.max_step_reached for x in agent_info_list],
custom_observations=[x.custom_observation for x in agent_info_list],
action_mask=mask_actions
)
return brain_info |
Converts brain parameter proto to BrainParameter object.
:param brain_param_proto: protobuf object.
:return: BrainParameter object. | def from_proto(brain_param_proto):
"""
Converts brain parameter proto to BrainParameter object.
:param brain_param_proto: protobuf object.
:return: BrainParameter object.
"""
resolution = [{
"height": x.height,
"width": x.width,
"blackAndWhite": x.gray_scale
} for x in brain_param_proto.camera_resolutions]
brain_params = BrainParameters(brain_param_proto.brain_name,
brain_param_proto.vector_observation_size,
brain_param_proto.num_stacked_vector_observations,
resolution,
list(brain_param_proto.vector_action_size),
list(brain_param_proto.vector_action_descriptions),
brain_param_proto.vector_action_space_type)
return brain_params |
Creates a new, blank dashboard and redirects to it in edit mode | def new(self):
"""Creates a new, blank dashboard and redirects to it in edit mode"""
new_dashboard = models.Dashboard(
dashboard_title='[ untitled dashboard ]',
owners=[g.user],
)
db.session.add(new_dashboard)
db.session.commit()
return redirect(f'/superset/dashboard/{new_dashboard.id}/?edit=true') |
List all tags a given object has. | def get(self, object_type, object_id):
"""List all tags a given object has."""
if object_id == 0:
return json_success(json.dumps([]))
query = db.session.query(TaggedObject).filter(and_(
TaggedObject.object_type == object_type,
TaggedObject.object_id == object_id))
tags = [{'id': obj.tag.id, 'name': obj.tag.name} for obj in query]
return json_success(json.dumps(tags)) |
Add new tags to an object. | def post(self, object_type, object_id):
"""Add new tags to an object."""
if object_id == 0:
return Response(status=404)
tagged_objects = []
for name in request.get_json(force=True):
if ':' in name:
type_name = name.split(':', 1)[0]
type_ = TagTypes[type_name]
else:
type_ = TagTypes.custom
tag = db.session.query(Tag).filter_by(name=name, type=type_).first()
if not tag:
tag = Tag(name=name, type=type_)
tagged_objects.append(
TaggedObject(
object_id=object_id,
object_type=object_type,
tag=tag,
),
)
db.session.add_all(tagged_objects)
db.session.commit()
return Response(status=201) |
Remove tags from an object. | def delete(self, object_type, object_id):
"""Remove tags from an object."""
tag_names = request.get_json(force=True)
if not tag_names:
return Response(status=403)
db.session.query(TaggedObject).filter(and_(
TaggedObject.object_type == object_type,
TaggedObject.object_id == object_id),
TaggedObject.tag.has(Tag.name.in_(tag_names)),
).delete(synchronize_session=False)
db.session.commit()
return Response(status=204) |
Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over. | def import_datasource(
session,
i_datasource,
lookup_database,
lookup_datasource,
import_time):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
make_transient(i_datasource)
logging.info('Started import of the datasource: {}'.format(
i_datasource.to_json()))
i_datasource.id = None
i_datasource.database_id = lookup_database(i_datasource).id
i_datasource.alter_params(import_time=import_time)
# override the datasource
datasource = lookup_datasource(i_datasource)
if datasource:
datasource.override(i_datasource)
session.flush()
else:
datasource = i_datasource.copy()
session.add(datasource)
session.flush()
for m in i_datasource.metrics:
new_m = m.copy()
new_m.table_id = datasource.id
logging.info('Importing metric {} from the datasource: {}'.format(
new_m.to_json(), i_datasource.full_name))
imported_m = i_datasource.metric_class.import_obj(new_m)
if (imported_m.metric_name not in
[m.metric_name for m in datasource.metrics]):
datasource.metrics.append(imported_m)
for c in i_datasource.columns:
new_c = c.copy()
new_c.table_id = datasource.id
logging.info('Importing column {} from the datasource: {}'.format(
new_c.to_json(), i_datasource.full_name))
imported_c = i_datasource.column_class.import_obj(new_c)
if (imported_c.column_name not in
[c.column_name for c in datasource.columns]):
datasource.columns.append(imported_c)
session.flush()
return datasource.id |
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. | def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: https://alembic.sqlalchemy.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
kwargs = {}
if engine.name in ('sqlite', 'mysql'):
kwargs = {
'transaction_per_migration': True,
'transactional_ddl': True,
}
configure_args = current_app.extensions['migrate'].configure_args
if configure_args:
kwargs.update(configure_args)
context.configure(connection=connection,
target_metadata=target_metadata,
# compare_type=True,
process_revision_directives=process_revision_directives,
**kwargs)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close() |
Returns a pandas dataframe based on the query object | def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
if not query_obj:
return None
self.error_msg = ''
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is not None and not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ('epoch_s', 'epoch_ms'):
# Column has already been formatted as a timestamp.
dttm_col = df[DTTM_ALIAS]
one_ts_val = dttm_col[0]
# convert time column to pandas Timestamp, but different
# ways to convert depending on string or int types
try:
int(one_ts_val)
is_integral = True
except ValueError:
is_integral = False
if is_integral:
unit = 's' if timestamp_format == 'epoch_s' else 'ms'
df[DTTM_ALIAS] = pd.to_datetime(dttm_col, utc=False, unit=unit,
origin='unix')
else:
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += self.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df |
Building a query object | def query_obj(self):
"""Building a query object"""
form_data = self.form_data
self.process_query_filters()
gb = form_data.get('groupby') or []
metrics = self.all_metrics or []
columns = form_data.get('columns') or []
groupby = []
for o in gb + columns:
if o not in groupby:
groupby.append(o)
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
granularity = (
form_data.get('granularity') or
form_data.get('granularity_sqla')
)
limit = int(form_data.get('limit') or 0)
timeseries_limit_metric = form_data.get('timeseries_limit_metric')
row_limit = int(form_data.get('row_limit') or config.get('ROW_LIMIT'))
# default order direction
order_desc = form_data.get('order_desc', True)
since, until = utils.get_since_until(relative_end=relative_end,
time_range=form_data.get('time_range'),
since=form_data.get('since'),
until=form_data.get('until'))
time_shift = form_data.get('time_shift', '')
self.time_shift = utils.parse_human_timedelta(time_shift)
from_dttm = None if since is None else (since - self.time_shift)
to_dttm = None if until is None else (until - self.time_shift)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise Exception(_('From date cannot be larger than to date'))
self.from_dttm = from_dttm
self.to_dttm = to_dttm
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get('where', ''),
'having': form_data.get('having', ''),
'having_druid': form_data.get('having_filters', []),
'time_grain_sqla': form_data.get('time_grain_sqla', ''),
'druid_time_origin': form_data.get('druid_time_origin', ''),
}
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': self.form_data.get('filters', []),
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'order_desc': order_desc,
'prequeries': [],
'is_prequery': False,
}
return d |
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped. | def cache_key(self, query_obj, **extra):
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ['from_dttm', 'to_dttm']:
del cache_dict[k]
cache_dict['time_range'] = self.form_data.get('time_range')
cache_dict['datasource'] = self.datasource.uid
json_data = self.json_dumps(cache_dict, sort_keys=True)
return hashlib.md5(json_data.encode('utf-8')).hexdigest() |
This is the data object serialized to the js layer | def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content |
Returns the query object for this visualization | def query_obj(self):
"""Returns the query object for this visualization"""
d = super().query_obj()
d['row_limit'] = self.form_data.get(
'row_limit', int(config.get('VIZ_ROW_LIMIT')))
numeric_columns = self.form_data.get('all_columns_x')
if numeric_columns is None:
raise Exception(_('Must have at least one numeric column specified'))
self.columns = numeric_columns
d['columns'] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d['groupby'] = []
return d |
Returns the chart data | def get_data(self, df):
"""Returns the chart data"""
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend([{
'key': self.labelify(keys, column),
'values': data[column].tolist()}
for column in self.columns])
return chart_data |
Compute the partition at each `level` from the dataframe. | def levels_for(self, time_op, groups, df):
"""
Compute the partition at each `level` from the dataframe.
"""
levels = {}
for i in range(0, len(groups) + 1):
agg_df = df.groupby(groups[:i]) if i else df
levels[i] = (
agg_df.mean() if time_op == 'agg_mean'
else agg_df.sum(numeric_only=True))
return levels |
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom. | def nest_values(self, levels, level=0, metric=None, dims=()):
"""
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
"""
if not level:
return [{
'name': m,
'val': levels[0][m],
'children': self.nest_values(levels, 1, m),
} for m in levels[0].index]
if level == 1:
return [{
'name': i,
'val': levels[1][metric][i],
'children': self.nest_values(levels, 2, metric, (i,)),
} for i in levels[1][metric].index]
if level >= len(levels):
return []
return [{
'name': i,
'val': levels[level][metric][dims][i],
'children': self.nest_values(
levels, level + 1, metric, dims + (i,),
),
} for i in levels[level][metric][dims].index] |
Data representation of the datasource sent to the frontend | def short_data(self):
"""Data representation of the datasource sent to the frontend"""
return {
'edit_url': self.url,
'id': self.id,
'uid': self.uid,
'schema': self.schema,
'name': self.name,
'type': self.type,
'connection': self.connection,
'creator': str(self.created_by),
} |
Data representation of the datasource sent to the frontend | def data(self):
"""Data representation of the datasource sent to the frontend"""
order_by_choices = []
# self.column_names return sorted column_names
for s in self.column_names:
s = str(s or '')
order_by_choices.append((json.dumps([s, True]), s + ' [asc]'))
order_by_choices.append((json.dumps([s, False]), s + ' [desc]'))
verbose_map = {'__timestamp': 'Time'}
verbose_map.update({
o.metric_name: o.verbose_name or o.metric_name
for o in self.metrics
})
verbose_map.update({
o.column_name: o.verbose_name or o.column_name
for o in self.columns
})
return {
# simple fields
'id': self.id,
'column_formats': self.column_formats,
'description': self.description,
'database': self.database.data, # pylint: disable=no-member
'default_endpoint': self.default_endpoint,
'filter_select': self.filter_select_enabled, # TODO deprecate
'filter_select_enabled': self.filter_select_enabled,
'name': self.name,
'datasource_name': self.datasource_name,
'type': self.type,
'schema': self.schema,
'offset': self.offset,
'cache_timeout': self.cache_timeout,
'params': self.params,
'perm': self.perm,
'edit_url': self.url,
# sqla-specific
'sql': self.sql,
# one to many
'columns': [o.data for o in self.columns],
'metrics': [o.data for o in self.metrics],
# TODO deprecate, move logic to JS
'order_by_choices': order_by_choices,
'owners': [owner.id for owner in self.owners],
'verbose_map': verbose_map,
'select_star': self.select_star,
} |
Update ORM one-to-many list from object list
Used for syncing metrics and columns using the same code | def get_fk_many_from_list(
self, object_list, fkmany, fkmany_class, key_attr):
"""Update ORM one-to-many list from object list
Used for syncing metrics and columns using the same code"""
object_dict = {o.get(key_attr): o for o in object_list}
object_keys = [o.get(key_attr) for o in object_list]
# delete fks that have been removed
fkmany = [o for o in fkmany if getattr(o, key_attr) in object_keys]
# sync existing fks
for fk in fkmany:
obj = object_dict.get(getattr(fk, key_attr))
for attr in fkmany_class.update_from_object_fields:
setattr(fk, attr, obj.get(attr))
# create new fks
new_fks = []
orm_keys = [getattr(o, key_attr) for o in fkmany]
for obj in object_list:
key = obj.get(key_attr)
if key not in orm_keys:
del obj['id']
orm_kwargs = {}
for k in obj:
if (
k in fkmany_class.update_from_object_fields and
k in obj
):
orm_kwargs[k] = obj[k]
new_obj = fkmany_class(**orm_kwargs)
new_fks.append(new_obj)
fkmany += new_fks
return fkmany |
Update datasource from a data structure
The UI's table editor crafts a complex data structure that
contains most of the datasource's properties as well as
an array of metrics and columns objects. This method
receives the object from the UI and syncs the datasource to
match it. Since the fields are different for the different
connectors, the implementation uses ``update_from_object_fields``
which can be defined for each connector and
defines which fields should be synced | def update_from_object(self, obj):
"""Update datasource from a data structure
The UI's table editor crafts a complex data structure that
contains most of the datasource's properties as well as
an array of metrics and columns objects. This method
receives the object from the UI and syncs the datasource to
match it. Since the fields are different for the different
connectors, the implementation uses ``update_from_object_fields``
which can be defined for each connector and
defines which fields should be synced"""
for attr in self.update_from_object_fields:
setattr(self, attr, obj.get(attr))
self.owners = obj.get('owners', [])
# Syncing metrics
metrics = self.get_fk_many_from_list(
obj.get('metrics'), self.metrics, self.metric_class, 'metric_name')
self.metrics = metrics
# Syncing columns
self.columns = self.get_fk_many_from_list(
obj.get('columns'), self.columns, self.column_class, 'column_name') |
Returns a pandas dataframe based on the query object | def get_query_result(self, query_object):
"""Returns a pandas dataframe based on the query object"""
# Here, we assume that all the queries will use the same datasource, which is
# is a valid assumption for current setting. In a long term, we may or maynot
# support multiple queries from different data source.
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_object.granularity)
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
result = self.datasource.query(query_object.to_dict())
df = result.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic
if df is not None and not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ('epoch_s', 'epoch_ms'):
# Column has already been formatted as a timestamp.
df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += query_object.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df, query_object)
df.replace([np.inf, -np.inf], np.nan)
return {
'query': result.query,
'status': result.status,
'error_message': result.error_message,
'df': df,
} |
Converting metrics to numeric when pandas.read_sql cannot | def df_metrics_to_num(self, df, query_object):
"""Converting metrics to numeric when pandas.read_sql cannot"""
metrics = [metric for metric in query_object.metrics]
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors='coerce') |
Returns a payload of metadata and data | def get_single_payload(self, query_obj):
"""Returns a payload of metadata and data"""
payload = self.get_df_payload(query_obj)
df = payload.get('df')
status = payload.get('status')
if status != utils.QueryStatus.FAILED:
if df is not None and df.empty:
payload['error'] = 'No data'
else:
payload['data'] = self.get_data(df)
if 'df' in payload:
del payload['df']
return payload |
Handles caching around the df paylod retrieval | def get_df_payload(self, query_obj, **kwargs):
"""Handles caching around the df paylod retrieval"""
cache_key = query_obj.cache_key(
datasource=self.datasource.uid, **kwargs) if query_obj else None
logging.info('Cache key: {}'.format(cache_key))
is_loaded = False
stacktrace = None
df = None
cached_dttm = datetime.utcnow().isoformat().split('.')[0]
cache_value = None
status = None
query = ''
error_message = None
if cache_key and cache and not self.force:
cache_value = cache.get(cache_key)
if cache_value:
stats_logger.incr('loaded_from_cache')
try:
cache_value = pkl.loads(cache_value)
df = cache_value['df']
query = cache_value['query']
status = utils.QueryStatus.SUCCESS
is_loaded = True
except Exception as e:
logging.exception(e)
logging.error('Error reading cache: ' +
utils.error_msg_from_exception(e))
logging.info('Serving from cache')
if query_obj and not is_loaded:
try:
query_result = self.get_query_result(query_obj)
status = query_result['status']
query = query_result['query']
error_message = query_result['error_message']
df = query_result['df']
if status != utils.QueryStatus.FAILED:
stats_logger.incr('loaded_from_source')
is_loaded = True
except Exception as e:
logging.exception(e)
if not error_message:
error_message = '{}'.format(e)
status = utils.QueryStatus.FAILED
stacktrace = traceback.format_exc()
if (
is_loaded and
cache_key and
cache and
status != utils.QueryStatus.FAILED):
try:
cache_value = dict(
dttm=cached_dttm,
df=df if df is not None else None,
query=query,
)
cache_value = pkl.dumps(
cache_value, protocol=pkl.HIGHEST_PROTOCOL)
logging.info('Caching {} chars at key {}'.format(
len(cache_value), cache_key))
stats_logger.incr('set_cache_key')
cache.set(
cache_key,
cache_value,
timeout=self.cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning('Could not cache key {}'.format(cache_key))
logging.exception(e)
cache.delete(cache_key)
return {
'cache_key': cache_key,
'cached_dttm': cache_value['dttm'] if cache_value is not None else None,
'cache_timeout': self.cache_timeout,
'df': df,
'error': error_message,
'is_cached': cache_key is not None,
'query': query,
'status': status,
'stacktrace': stacktrace,
'rowcount': len(df.index) if df is not None else 0,
} |
Data used to render slice in templates | def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
logging.exception(e)
d['error'] = str(e)
return {
'datasource': self.datasource_name,
'description': self.description,
'description_markeddown': self.description_markeddown,
'edit_url': self.edit_url,
'form_data': self.form_data,
'slice_id': self.id,
'slice_name': self.slice_name,
'slice_url': self.slice_url,
'modified': self.modified(),
'changed_on_humanized': self.changed_on_humanized,
'changed_on': self.changed_on.isoformat(),
} |
Creates :py:class:viz.BaseViz object from the url_params_multidict.
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz | def get_viz(self, force=False):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params)
slice_params['slice_id'] = self.id
slice_params['json'] = 'false'
slice_params['slice_name'] = self.slice_name
slice_params['viz_type'] = self.viz_type if self.viz_type else 'table'
return viz_types[slice_params.get('viz_type')](
self.datasource,
form_data=slice_params,
force=force,
) |
Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int | def import_obj(cls, slc_to_import, slc_to_override, import_time=None):
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(
remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
params = slc_to_import.params_dict
slc_to_import.datasource_id = ConnectorRegistry.get_datasource_by_name(
session, slc_to_import.datasource_type, params['datasource_name'],
params['schema'], params['database_name']).id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logging.info('Final slice: {}'.format(slc_to_import.to_json()))
session.flush()
return slc_to_import.id |
Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over. | def import_obj(cls, dashboard_to_import, import_time=None):
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
"""
def alter_positions(dashboard, old_to_new_slc_id_dict):
""" Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "DASHBOARD_CHART_TYPE",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
"""
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (isinstance(value, dict) and value.get('meta') and
value.get('meta').get('chartId')):
old_slice_id = value.get('meta').get('chartId')
if old_slice_id in old_to_new_slc_id_dict:
value['meta']['chartId'] = (
old_to_new_slc_id_dict[old_slice_id]
)
dashboard.position_json = json.dumps(position_data)
logging.info('Started import of the dashboard: {}'
.format(dashboard_to_import.to_json()))
session = db.session
logging.info('Dashboard has {} slices'
.format(len(dashboard_to_import.slices)))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
old_to_new_slc_id_dict = {}
new_filter_immune_slices = []
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict['remote_id']: slc
for slc in session.query(Slice).all()
if 'remote_id' in slc.params_dict
}
for slc in slices:
logging.info('Importing slice {} from the dashboard: {}'.format(
slc.to_json(), dashboard_to_import.dashboard_title))
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = Slice.import_obj(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = '{}'.format(new_slc_id)
old_slc_id_str = '{}'.format(slc.id)
if ('filter_immune_slices' in i_params_dict and
old_slc_id_str in i_params_dict['filter_immune_slices']):
new_filter_immune_slices.append(new_slc_id_str)
if ('timed_refresh_immune_slices' in i_params_dict and
old_slc_id_str in
i_params_dict['timed_refresh_immune_slices']):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if ('expanded_slices' in i_params_dict and
old_slc_id_str in i_params_dict['expanded_slices']):
new_expanded_slices[new_slc_id_str] = (
i_params_dict['expanded_slices'][old_slc_id_str])
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if ('remote_id' in dash.params_dict and
dash.params_dict['remote_id'] ==
dashboard_to_import.id):
existing_dashboard = dash
dashboard_to_import.id = None
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
if new_expanded_slices:
dashboard_to_import.alter_params(
expanded_slices=new_expanded_slices)
if new_filter_immune_slices:
dashboard_to_import.alter_params(
filter_immune_slices=new_filter_immune_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices)
new_slices = session.query(Slice).filter(
Slice.id.in_(old_to_new_slc_id_dict.values())).all()
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
else:
# session.add(dashboard_to_import) causes sqlachemy failures
# related to the attached users / slices. Creating new object
# allows to avoid conflicts in the sql alchemy state.
copied_dash = dashboard_to_import.copy()
copied_dash.slices = new_slices
session.add(copied_dash)
session.flush()
return copied_dash.id |
Get the effective user, especially during impersonation.
:param url: SQL Alchemy URL object
:param user_name: Default username
:return: The effective username | def get_effective_user(self, url, user_name=None):
"""
Get the effective user, especially during impersonation.
:param url: SQL Alchemy URL object
:param user_name: Default username
:return: The effective username
"""
effective_username = None
if self.impersonate_user:
effective_username = url.username
if user_name:
effective_username = user_name
elif (
hasattr(g, 'user') and hasattr(g.user, 'username') and
g.user.username is not None
):
effective_username = g.user.username
return effective_username |
Generates a ``select *`` statement in the proper dialect | def select_star(
self, table_name, schema=None, limit=100, show_cols=False,
indent=True, latest_partition=False, cols=None):
"""Generates a ``select *`` statement in the proper dialect"""
eng = self.get_sqla_engine(
schema=schema, source=utils.sources.get('sql_lab', None))
return self.db_engine_spec.select_star(
self, table_name, schema=schema, engine=eng,
limit=limit, show_cols=show_cols,
indent=indent, latest_partition=latest_partition, cols=cols) |
Parameters need to be passed as keyword arguments. | def all_table_names_in_database(self, cache=False,
cache_timeout=None, force=False):
"""Parameters need to be passed as keyword arguments."""
if not self.allow_multi_schema_metadata_fetch:
return []
return self.db_engine_spec.fetch_result_sets(self, 'table') |
Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:type schema: str
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: table list
:rtype: list | def all_table_names_in_schema(self, schema, cache=False,
cache_timeout=None, force=False):
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:type schema: str
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: table list
:rtype: list
"""
tables = []
try:
tables = self.db_engine_spec.get_table_names(
inspector=self.inspector, schema=schema)
except Exception as e:
logging.exception(e)
return tables |
Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:type schema: str
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: view list
:rtype: list | def all_view_names_in_schema(self, schema, cache=False,
cache_timeout=None, force=False):
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param schema: schema name
:type schema: str
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: view list
:rtype: list
"""
views = []
try:
views = self.db_engine_spec.get_view_names(
inspector=self.inspector, schema=schema)
except Exception as e:
logging.exception(e)
return views |
Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: schema list
:rtype: list | def all_schema_names(self, cache=False, cache_timeout=None, force=False):
"""Parameters need to be passed as keyword arguments.
For unused parameters, they are referenced in
cache_util.memoized_func decorator.
:param cache: whether cache is enabled for the function
:type cache: bool
:param cache_timeout: timeout in seconds for the cache
:type cache_timeout: int
:param force: whether to force refresh the cache
:type force: bool
:return: schema list
:rtype: list
"""
return self.db_engine_spec.get_schema_names(self.inspector) |
Allowing to lookup grain by either label or duration
For backward compatibility | def grains_dict(self):
"""Allowing to lookup grain by either label or duration
For backward compatibility"""
d = {grain.duration: grain for grain in self.grains()}
d.update({grain.label: grain for grain in self.grains()})
return d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.