docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Input layer. Args: names: A list of strings that name the inputs to merge axis: Axis to merge the inputs
def __init__( self, names, aggregation_type='concat', axis=1, named_tensors=None, scope='input', summary_labels=() ): self.names = names self.aggregation_type = aggregation_type self.axis = axis super(Input, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,897
Output layer. Args: output: A string that names the tensor, will be added to available inputs
def __init__( self, name, named_tensors=None, scope='output', summary_labels=() ): self.name = name super(Output, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,899
Creates a new layer instance of a TensorFlow layer. Args: name: The name of the layer, one of 'dense'. **kwargs: Additional arguments passed on to the TensorFlow layer constructor.
def __init__(self, layer, named_tensors=None, scope='tf-layer', summary_labels=(), **kwargs): self.layer_spec = layer self.layer = util.get_object(obj=layer, predefined_objects=TFLayer.tf_layers, kwargs=kwargs) self.first_scope = None super(TFLayer, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,901
2-dimensional pooling layer. Args: pooling_type: Either 'max' or 'average'. window: Pooling window size, either an integer or pair of integers. stride: Pooling stride, either an integer or pair of integers. padding: Pooling padding, one of 'VALID' or 'SAME'.
def __init__( self, pooling_type='max', window=2, stride=2, padding='SAME', named_tensors=None, scope='pool2d', summary_labels=() ): self.pooling_type = pooling_type if isinstance(window, int): self.window = (1, window, window, 1) elif len(window) == 2: self.window = (1, window[0], window[1], 1) else: raise TensorForceError('Invalid window {} for pool2d layer, must be of size 2'.format(window)) if isinstance(stride, int): self.stride = (1, stride, stride, 1) elif len(window) == 2: self.stride = (1, stride[0], stride[1], 1) else: raise TensorForceError('Invalid stride {} for pool2d layer, must be of size 2'.format(stride)) self.padding = padding super(Pool2d, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,910
Embedding layer. Args: indices: Number of embedding indices. size: Embedding size. l2_regularization: L2 regularization weight. l1_regularization: L1 regularization weight.
def __init__( self, indices, size, l2_regularization=0.0, l1_regularization=0.0, named_tensors=None, scope='embedding', summary_labels=() ): self.indices = indices self.size = size self.l2_regularization = l2_regularization self.l1_regularization = l1_regularization super(Embedding, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,912
Linear layer. Args: size: Layer size. weights: Weight initialization, random if None. bias: Bias initialization, random if True, no bias added if False. l2_regularization: L2 regularization weight. l1_regularization: L1 regularization weight.
def __init__( self, size, weights=None, bias=True, l2_regularization=0.0, l1_regularization=0.0, trainable=True, named_tensors=None, scope='linear', summary_labels=() ): self.size = size self.weights_init = weights self.bias_init = bias self.l2_regularization = l2_regularization self.l1_regularization = l1_regularization self.trainable = trainable super(Linear, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,915
1D convolutional layer. Args: size: Number of filters window: Convolution window size stride: Convolution stride padding: Convolution padding, one of 'VALID' or 'SAME' bias: If true, a bias is added activation: Type of nonlinearity, or dict with name & arguments l2_regularization: L2 regularization weight l1_regularization: L1 regularization weight
def __init__( self, size, window=3, stride=1, padding='SAME', bias=True, activation='relu', l2_regularization=0.0, l1_regularization=0.0, named_tensors=None, scope='conv1d', summary_labels=() ): self.size = size self.window = window self.stride = stride self.padding = padding self.bias = bias self.l2_regularization = l2_regularization self.l1_regularization = l1_regularization self.nonlinearity = Nonlinearity(summary_labels=summary_labels, **util.prepare_kwargs(activation)) super(Conv1d, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,925
LSTM layer. Args: size: LSTM size. dropout: Dropout rate.
def __init__(self, size, dropout=None, lstmcell_args={}, named_tensors=None, scope='internal_lstm', summary_labels=()): self.size = size self.dropout = dropout self.lstmcell_args = lstmcell_args super(InternalLstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,930
LSTM layer. Args: size: LSTM size. dropout: Dropout rate.
def __init__(self, size, dropout=None, named_tensors=None, scope='lstm', summary_labels=(), return_final_state=True): self.size = size self.dropout = dropout self.return_final_state = return_final_state super(Lstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
137,933
Executes this runner by starting all Agents in parallel (each one in one thread). Args: episodes (int): Deprecated; see num_episodes. max_timesteps (int): Deprecated; see max_episode_timesteps.
def run( self, num_episodes=-1, max_episode_timesteps=-1, episode_finished=None, summary_report=None, summary_interval=0, num_timesteps=None, deterministic=False, episodes=None, max_timesteps=None, testing=False, sleep=None ): # Renamed episodes into num_episodes to match BaseRunner's signature (fully backw. compatible). if episodes is not None: num_episodes = episodes warnings.warn("WARNING: `episodes` parameter is deprecated, use `num_episodes` instead.", category=DeprecationWarning) assert isinstance(num_episodes, int) # Renamed max_timesteps into max_episode_timesteps to match single Runner's signature (fully backw. compatible). if max_timesteps is not None: max_episode_timesteps = max_timesteps warnings.warn("WARNING: `max_timesteps` parameter is deprecated, use `max_episode_timesteps` instead.", category=DeprecationWarning) assert isinstance(max_episode_timesteps, int) if summary_report is not None: warnings.warn("WARNING: `summary_report` parameter is deprecated, use `episode_finished` callback " "instead to generate summaries every n episodes.", category=DeprecationWarning) self.reset() # Reset counts/stop-condition for this run. self.global_episode = 0 self.global_timestep = 0 self.should_stop = False # Create threads. threads = [threading.Thread(target=self._run_single, args=(t, self.agent[t], self.environment[t],), kwargs={"deterministic": deterministic, "max_episode_timesteps": max_episode_timesteps, "episode_finished": episode_finished, "testing": testing, "sleep": sleep}) for t in range(len(self.agent))] # Start threads. self.start_time = time.time() [t.start() for t in threads] # Stay idle until killed by SIGINT or a global stop condition is met. try: next_summary = 0 next_save = 0 if self.save_frequency_unit != "s" else time.time() while any([t.is_alive() for t in threads]) and self.global_episode < num_episodes or num_episodes == -1: self.time = time.time() # This is deprecated (but still supported) and should be covered by the `episode_finished` callable. if summary_report is not None and self.global_episode > next_summary: summary_report(self) next_summary += summary_interval if self.save_path and self.save_frequency is not None: do_save = True current = None if self.save_frequency_unit == "e" and self.global_episode > next_save: current = self.global_episode elif self.save_frequency_unit == "s" and self.time > next_save: current = self.time elif self.save_frequency_unit == "t" and self.global_timestep > next_save: current = self.global_timestep else: do_save = False if do_save: self.agent[0].save_model(self.save_path) # Make sure next save is later than right now. while next_save < current: next_save += self.save_frequency time.sleep(1) except KeyboardInterrupt: print('Keyboard interrupt, sending stop command to threads') self.should_stop = True # Join threads. [t.join() for t in threads] print('All threads stopped')
137,975
Initialize a single Runner object (one Agent/one Environment). Args: id_ (int): The ID of this Runner (for distributed TF runs).
def __init__(self, agent, environment, repeat_actions=1, history=None, id_=0): super(ParallelRunner, self).__init__(agent, environment, repeat_actions, history) self.id = id_ # the worker's ID in a distributed run (default=0) self.current_timestep = None # the time step in the current episode self.episode_actions = [] self.num_parallel = self.agent.execution['num_parallel'] print('ParallelRunner with {} parallel buffers.'.format(self.num_parallel))
137,977
Replay memory. Args: states (dict): States specification. internals (dict): Internal states specification. actions (dict): Actions specification. include_next_states (bool): Include subsequent state if true. capacity (int): Memory capacity (number of state/internals/action/(next-state)? records).
def __init__(self, states, internals, actions, include_next_states, capacity, scope='replay', summary_labels=None): super(Replay, self).__init__( states=states, internals=internals, actions=actions, include_next_states=include_next_states, capacity=capacity, scope=scope, summary_labels=summary_labels )
137,980
Initialize OpenAI universe environment. Args: env_id: string with id/descriptor of the universe environment, e.g. 'HarvestDay-v0'.
def __init__(self, env_id): self.env_id = env_id self.env = gym.make(env_id)
137,984
Returns x, y from flat_position integer. Args: flat_position: flattened position integer Returns: x, y
def _int_to_pos(self, flat_position): return flat_position % self.env.action_space.screen_shape[0],\ flat_position % self.env.action_space.screen_shape[1]
137,988
Applies the given (and already calculated) step deltas to the variable values. Args: variables: List of variables. deltas: List of deltas of same length. Returns: The step-applied operation. A tf.group of tf.assign_add ops.
def apply_step(self, variables, deltas): if len(variables) != len(deltas): raise TensorForceError("Invalid variables and deltas lists.") return tf.group( *(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas)) )
137,994
Categorical distribution. Args: shape: Action shape. num_actions: Number of discrete action alternatives. probabilities: Optional distribution bias.
def __init__(self, shape, num_actions, probabilities=None, scope='categorical', summary_labels=()): self.num_actions = num_actions action_size = util.prod(shape) * self.num_actions if probabilities is None: logits = 0.0 else: logits = [log(prob) for _ in range(util.prod(shape)) for prob in probabilities] self.logits = Linear(size=action_size, bias=logits, scope='logits', summary_labels=summary_labels) super(Categorical, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)
137,997
Translates dtype specifications in configurations to numpy data types. Args: dtype: String describing a numerical type (e.g. 'float') or numerical type primitive. Returns: Numpy data type
def np_dtype(dtype): if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32: return np.float32 elif dtype == np.float64 or dtype == tf.float64: return np.float64 elif dtype == np.float16 or dtype == tf.float16: return np.float16 elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32: return np.int32 elif dtype == np.int64 or dtype == tf.int64: return np.int64 elif dtype == np.int16 or dtype == tf.int16: return np.int16 elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool: return np.bool_ else: raise TensorForceError("Error: Type conversion from type {} not supported.".format(str(dtype)))
138,007
Utility method to get all dependencies (including placeholders) of a tensor (backwards through the graph). Args: tensor (tf.Tensor): The input tensor. Returns: Set of all dependencies (including needed placeholders) for the input tensor.
def get_tensor_dependencies(tensor): dependencies = set() dependencies.update(tensor.op.inputs) for sub_op in tensor.op.inputs: dependencies.update(get_tensor_dependencies(sub_op)) return dependencies
138,009
Utility method to convert raw string/diction input into a dictionary to pass into a function. Always returns a dictionary. Args: raw: string or dictionary, string is assumed to be the name of the activation activation function. Dictionary will be passed through unchanged. Returns: kwargs dictionary for **kwargs
def prepare_kwargs(raw, string_parameter='name'): kwargs = dict() if isinstance(raw, dict): kwargs.update(raw) elif isinstance(raw, str): kwargs[string_parameter] = raw return kwargs
138,011
Saves this component's managed variables. Args: sess: The session for which to save the managed variables. save_path: The path to save data to. timestep: Optional, the timestep to append to the file name. Returns: Checkpoint path where the model was saved.
def save(self, sess, save_path, timestep=None): if self._saver is None: raise TensorForceError("register_saver_ops should be called before save") return self._saver.save( sess=sess, save_path=save_path, global_step=timestep, write_meta_graph=False, write_state=True, # Do we need this? )
138,014
Restores the values of the managed variables from disk location. Args: sess: The session for which to save the managed variables. save_path: The path used to save the data to.
def restore(self, sess, save_path): if self._saver is None: raise TensorForceError("register_saver_ops should be called before restore") self._saver.restore(sess=sess, save_path=save_path)
138,015
Process state. Args: tensor: tensor to process Returns: processed state
def process(self, tensor): for processor in self.preprocessors: tensor = processor.process(tensor=tensor) return tensor
138,018
Shape of preprocessed state given original shape. Args: shape: original state shape Returns: processed state shape
def processed_shape(self, shape): for processor in self.preprocessors: shape = processor.processed_shape(shape=shape) return shape
138,019
Bernoulli distribution. Args: shape: Action shape. probability: Optional distribution bias.
def __init__(self, shape, probability=0.5, scope='bernoulli', summary_labels=()): self.shape = shape action_size = util.prod(self.shape) self.logit = Linear(size=action_size, bias=log(probability), scope='logit', summary_labels=summary_labels) super(Bernoulli, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)
138,024
Creates a new iterative solver instance. Args: max_iterations: Maximum number of iterations before termination. unroll_loop: Unrolls the TensorFlow while loop if true.
def __init__(self, max_iterations, unroll_loop=False): assert max_iterations >= 0 self.max_iterations = max_iterations assert isinstance(unroll_loop, bool) self.unroll_loop = unroll_loop super(Iterative, self).__init__() # TensorFlow functions self.initialize = tf.make_template(name_='initialize', func_=self.tf_initialize) self.step = tf.make_template(name_='step', func_=self.tf_step) self.next_step = tf.make_template(name_='next-step', func_=self.tf_next_step)
138,033
Iteratively solves an equation/optimization for $x$ involving an expression $f(x)$. Args: fn_x: A callable returning an expression $f(x)$ given $x$. x_init: Initial solution guess $x_0$. *args: Additional solver-specific arguments. Returns: A solution $x$ to the problem as given by the solver.
def tf_solve(self, fn_x, x_init, *args): self.fn_x = fn_x # Initialization step args = self.initialize(x_init, *args) # args = util.map_tensors(fn=tf.stop_gradient, tensors=args) # Iteration loop with termination condition if self.unroll_loop: # Unrolled for loop for _ in range(self.max_iterations): next_step = self.next_step(*args) step = (lambda: self.step(*args)) do_nothing = (lambda: args) args = tf.cond(pred=next_step, true_fn=step, false_fn=do_nothing) else: # TensorFlow while loop args = tf.while_loop(cond=self.next_step, body=self.step, loop_vars=args) # First argument contains solution return args[0]
138,034
Executes action, observes next state and reward. Args: actions: Actions to execute. Returns: Tuple of (next state, bool indicating terminal, reward)
def execute(self, action): next_state, rew, done, _ = self.env.step(action) return next_state, rew, done
138,036
Beta distribution. Args: shape: Action shape. min_value: Minimum value of continuous actions. max_value: Maximum value of continuous actions. alpha: Optional distribution bias for the alpha value. beta: Optional distribution bias for the beta value.
def __init__(self, shape, min_value, max_value, alpha=0.0, beta=0.0, scope='beta', summary_labels=()): assert min_value is None or max_value > min_value self.shape = shape self.min_value = min_value self.max_value = max_value action_size = util.prod(self.shape) self.alpha = Linear(size=action_size, bias=alpha, scope='alpha', summary_labels=summary_labels) self.beta = Linear(size=action_size, bias=beta, scope='beta', summary_labels=summary_labels) super(Beta, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)
138,039
CNN baseline. Args: conv_sizes: List of convolutional layer sizes dense_sizes: List of dense layer sizes
def __init__(self, conv_sizes, dense_sizes, scope='cnn-baseline', summary_labels=()): network = [] for size in conv_sizes: network.append(dict(type='conv2d', size=size)) # First layer has a larger window. network[0]['window'] = 5 network.append(dict(type='flatten')) # TODO: change to max pooling! for size in dense_sizes: network.append(dict(type='dense', size=size)) super(CNNBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)
138,047
Creates a new optimized step meta optimizer instance. Args: optimizer: The optimizer which is modified by this meta optimizer. ls_max_iterations: Maximum number of line search iterations. ls_accept_ratio: Line search acceptance ratio. ls_mode: Line search mode, see LineSearch solver. ls_parameter: Line search parameter, see LineSearch solver. ls_unroll_loop: Unroll line search loop if true.
def __init__( self, optimizer, ls_max_iterations=10, ls_accept_ratio=0.9, ls_mode='exponential', ls_parameter=0.5, ls_unroll_loop=False, scope='optimized-step', summary_labels=() ): self.solver = LineSearch( max_iterations=ls_max_iterations, accept_ratio=ls_accept_ratio, mode=ls_mode, parameter=ls_parameter, unroll_loop=ls_unroll_loop ) super(OptimizedStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
138,063
Distribution. Args: shape: Action shape.
def __init__(self, shape, scope='distribution', summary_labels=None): self.shape = shape self.scope = scope self.summary_labels = set(summary_labels or ()) self.variables = dict() self.all_variables = dict() def custom_getter(getter, name, registered=False, **kwargs): variable = getter(name=name, registered=True, **kwargs) if registered: pass elif name in self.all_variables: assert variable is self.all_variables[name] if kwargs.get('trainable', True): assert variable is self.variables[name] if 'variables' in self.summary_labels: tf.contrib.summary.histogram(name=name, tensor=variable) else: self.all_variables[name] = variable if kwargs.get('trainable', True): self.variables[name] = variable if 'variables' in self.summary_labels: tf.contrib.summary.histogram(name=name, tensor=variable) return variable self.parameterize = tf.make_template( name_=(scope + '/parameterize'), func_=self.tf_parameterize, custom_getter_=custom_getter ) self.sample = tf.make_template( name_=(scope + '/sample'), func_=self.tf_sample, custom_getter_=custom_getter ) self.log_probability = tf.make_template( name_=(scope + '/log-probability'), func_=self.tf_log_probability, custom_getter_=custom_getter ) self.entropy = tf.make_template( name_=(scope + '/entropy'), func_=self.tf_entropy, custom_getter_=custom_getter ) self.kl_divergence = tf.make_template( name_=(scope + '/kl-divergence'), func_=self.tf_kl_divergence, custom_getter_=custom_getter ) self.regularization_loss = tf.make_template( name_=(scope + '/regularization-loss'), func_=self.tf_regularization_loss, custom_getter_=custom_getter )
138,065
Creates a new meta optimizer instance. Args: optimizer: The optimizer which is modified by this meta optimizer.
def __init__(self, optimizer, scope='meta-optimizer', summary_labels=(), **kwargs): self.optimizer = Optimizer.from_spec(spec=optimizer, kwargs=kwargs) super(MetaOptimizer, self).__init__(scope=scope, summary_labels=summary_labels)
138,067
Observe experience from the environment to learn from. Optionally pre-processes rewards Child classes should call super to get the processed reward EX: terminal, reward = super()... Args: terminal (bool): boolean indicating if the episode terminated after the observation. reward (float): scalar reward that resulted from executing the action.
def observe(self, terminal, reward, index=0): self.current_terminal = terminal self.current_reward = reward if self.batched_observe: # Batched observe for better performance with Python. self.observe_terminal[index].append(self.current_terminal) self.observe_reward[index].append(self.current_reward) if self.current_terminal or len(self.observe_terminal[index]) >= self.batching_capacity: self.episode = self.model.observe( terminal=self.observe_terminal[index], reward=self.observe_reward[index], index=index ) self.observe_terminal[index] = list() self.observe_reward[index] = list() else: self.episode = self.model.observe( terminal=self.current_terminal, reward=self.current_reward )
138,071
Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is restored. If no checkpoint directory is given, the model's default saver directory is used (unless file specifies the entire path). Args: directory: Optional checkpoint directory. file: Optional checkpoint file, or path if directory not given.
def restore_model(self, directory=None, file=None): self.model.restore(directory=directory, file=file)
138,075
Initialize OpenSimulator environment. Args: visualize: render enviroment env: environment id to use ([0:Arm2DEnv, 1:L2RunEnv, 2:ProstheticsEnv])
def __init__(self,env_id,visualize=False): envs = [Arm2DEnv,L2RunEnv,ProstheticsEnv] self.env = envs[env_id](visualize=visualize) self.state_shape = len(self.env.reset()) self.num_actions = len(self.env.action_space.sample())
138,077
Single-stack layered network. Args: layers: List of layer specification dicts.
def __init__(self, layers, scope='layered-network', summary_labels=()): self.layers_spec = layers super(LayeredNetwork, self).__init__(scope=scope, summary_labels=summary_labels) self.parse_layer_spec(layer_spec=self.layers_spec, layer_counter=Counter())
138,097
Update internal priority sums when leaf priority has been changed. Args: index: leaf node index delta: change in priority
def _update_internal_nodes(self, index, delta): # Move up tree, increasing position, updating sum while index > 0: index = (index - 1) // 2 self._memory[index] += delta
138,104
Samples a batch of the specified size according to priority. Args: batch_size: The batch size next_states: A boolean flag indicating whether 'next_states' values should be included Returns: A dict containing states, actions, rewards, terminals, internal states (and next states)
def get_batch(self, batch_size, next_states=False): if batch_size > len(self.observations): raise TensorForceError( "Requested batch size is larger than observations in memory: increase config.first_update.") # Init empty states states = {name: np.zeros((batch_size,) + tuple(state['shape']), dtype=util.np_dtype( state['type'])) for name, state in self.states_spec.items()} internals = [np.zeros((batch_size,) + shape, dtype) for shape, dtype in self.internals_spec] actions = {name: np.zeros((batch_size,) + tuple(action['shape']), dtype=util.np_dtype(action['type'])) for name, action in self.actions_spec.items()} terminal = np.zeros((batch_size,), dtype=util.np_dtype('bool')) reward = np.zeros((batch_size,), dtype=util.np_dtype('float')) if next_states: next_states = {name: np.zeros((batch_size,) + tuple(state['shape']), dtype=util.np_dtype( state['type'])) for name, state in self.states_spec.items()} next_internals = [np.zeros((batch_size,) + shape, dtype) for shape, dtype in self.internals_spec] # Start with unseen observations unseen_indices = list(xrange( self.none_priority_index + self.observations._capacity - 1, len(self.observations) + self.observations._capacity - 1) ) self.batch_indices = unseen_indices[:batch_size] # Get remaining observations using weighted sampling remaining = batch_size - len(self.batch_indices) if remaining: samples = self.observations.sample_minibatch(remaining) sample_indices = [i for i, o in samples] self.batch_indices += sample_indices # Shuffle np.random.shuffle(self.batch_indices) # Collect observations for n, index in enumerate(self.batch_indices): observation, _ = self.observations._memory[index] for name, state in states.items(): state[n] = observation[0][name] for k, internal in enumerate(internals): internal[n] = observation[1][k] for name, action in actions.items(): action[n] = observation[2][name] terminal[n] = observation[3] reward[n] = observation[4] if next_states: for name, next_state in next_states.items(): next_state[n] = observation[5][name] for k, next_internal in enumerate(next_internals): next_internal[n] = observation[6][k] if next_states: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals ) else: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
138,111
Computes priorities according to loss. Args: loss_per_instance:
def update_batch(self, loss_per_instance): if self.batch_indices is None: raise TensorForceError("Need to call get_batch before each update_batch call.") # if len(loss_per_instance) != len(self.batch_indices): # raise TensorForceError("For all instances a loss value has to be provided.") for index, loss in zip(self.batch_indices, loss_per_instance): # Sampling priority is proportional to the largest absolute temporal difference error. new_priority = (np.abs(loss) + self.prioritization_constant) ** self.prioritization_weight self.observations._move(index, new_priority) self.none_priority_index += 1
138,112
Imports experiences. Args: experiences:
def import_experience(self, experiences): if isinstance(experiences, dict): if self.unique_state: experiences['states'] = dict(state=experiences['states']) if self.unique_action: experiences['actions'] = dict(action=experiences['actions']) self.model.import_experience(**experiences) else: if self.unique_state: states = dict(state=list()) else: states = {name: list() for name in experiences[0]['states']} internals = [list() for _ in experiences[0]['internals']] if self.unique_action: actions = dict(action=list()) else: actions = {name: list() for name in experiences[0]['actions']} terminal = list() reward = list() for experience in experiences: if self.unique_state: states['state'].append(experience['states']) else: for name in sorted(states): states[name].append(experience['states'][name]) for n, internal in enumerate(internals): internal.append(experience['internals'][n]) if self.unique_action: actions['action'].append(experience['actions']) else: for name in sorted(actions): actions[name].append(experience['actions'][name]) terminal.append(experience['terminal']) reward.append(experience['reward']) self.model.import_experience( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
138,115
A remote Environment that one can connect to through tcp. Implements a simple msgpack protocol to get the step/reset/etc.. commands to the remote server and simply waits (blocks) for a response. Args: host (str): The hostname to connect to. port (int): The port to connect to.
def __init__(self, host="localhost", port=6025): Environment.__init__(self) self.port = int(port) or 6025 self.host = host or "localhost" self.socket = None # The size of the response buffer (depends on the Env's observation-space). self.buffer_size = 8192 # Cache the last received observation (through socket) here. self.last_observation = None
138,116
Starts the server tcp connection on the given host:port. Args: timeout (int): The time (in seconds) for which we will attempt a connection to the remote (every 5sec). After that (or if timeout is None or 0), an error is raised.
def connect(self, timeout=600): # If we are already connected, return error. if self.socket: raise TensorForceError("Already connected to {}:{}. Only one connection allowed at a time. " + "Close first by calling `close`!".format(self.host, self.port)) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if timeout < 5 or timeout is None: timeout = 5 err = 0 start_time = time.time() while time.time() - start_time < timeout: self.socket.settimeout(5) err = self.socket.connect_ex((self.host, self.port)) if err == 0: break time.sleep(1) if err != 0: raise TensorForceError("Error when trying to connect to {}:{}: errno={} errcode='{}' '{}'". format(self.host, self.port, err, errno.errorcode[err], os.strerror(err)))
138,117
Sends a message (dict) to the socket. Message consists of a 8-byte len header followed by a msgpack-numpy encoded dict. Args: message: The message dict (e.g. {"cmd": "reset"}) socket_: The python socket object to use.
def send(self, message, socket_): if not socket_: raise TensorForceError("No socket given in call to `send`!") elif not isinstance(message, dict): raise TensorForceError("Message to be sent must be a dict!") message = msgpack.packb(message) len_ = len(message) # prepend 8-byte len field to all our messages socket_.send(bytes("{:08d}".format(len_), encoding="ascii") + message)
138,119
Receives a message as msgpack-numpy encoded byte-string from the given socket object. Blocks until something was received. Args: socket_: The python socket object to use. encoding (str): The encoding to use for unpacking messages from the socket. Returns: The decoded (as dict) message received.
def recv(self, socket_, encoding=None): unpacker = msgpack.Unpacker(encoding=encoding) # Wait for an immediate response. response = socket_.recv(8) # get the length of the message if response == b"": raise TensorForceError("No data received by socket.recv in call to method `recv` " + "(listener possibly closed)!") orig_len = int(response) received_len = 0 while True: data = socket_.recv(min(orig_len - received_len, self.max_msg_len)) # There must be a response. if not data: raise TensorForceError("No data of len {} received by socket.recv in call to method `recv`!". format(orig_len - received_len)) data_len = len(data) received_len += data_len unpacker.feed(data) if received_len == orig_len: break # Get the data. for message in unpacker: sts = message.get("status", message.get(b"status")) if sts: if sts == "ok" or sts == b"ok": return message else: raise TensorForceError("RemoteEnvironment server error: {}". format(message.get("message", "not specified"))) else: raise TensorForceError("Message without field 'status' received!") raise TensorForceError("No message encoded in data stream (data stream had len={})". format(orig_len))
138,120
Init the Game object. Args: state: Shape (4, 4) numpy array to initialize the state with. If None, the state will be initialized with with two random tiles (as done in the original game). initial_score: Score to initialize the Game with.
def __init__(self, state=None, initial_score=0): self._score = initial_score if state is None: self._state = np.zeros((4, 4), dtype=np.int) self.add_random_tile() self.add_random_tile() else: self._state = state
138,122
Allows child models to create model's component objects, such as optimizer(s), memory(s), etc.. Creates all tensorflow functions via tf.make_template calls on all the class' "tf_"-methods. Args: custom_getter: The `custom_getter_` object to use for `tf.make_template` when creating TensorFlow functions. If None, use a default custom_getter_. Returns: The custom_getter passed in (or a default one if custom_getter was None).
def setup_components_and_tf_funcs(self, custom_getter=None): if custom_getter is None: def custom_getter(getter, name, registered=False, **kwargs): if registered: self.registered_variables.add(name) elif name in self.registered_variables: registered = True # Top-level, hence no 'registered' argument. variable = getter(name=name, **kwargs) if registered: pass elif name in self.all_variables: assert variable is self.all_variables[name] if kwargs.get('trainable', True): assert variable is self.variables[name] if 'variables' in self.summary_labels: tf.contrib.summary.histogram(name=name, tensor=variable) else: self.all_variables[name] = variable if kwargs.get('trainable', True): self.variables[name] = variable if 'variables' in self.summary_labels: tf.contrib.summary.histogram(name=name, tensor=variable) return variable self.fn_initialize = tf.make_template( name_='initialize', func_=self.tf_initialize, custom_getter_=custom_getter ) self.fn_preprocess = tf.make_template( name_='preprocess', func_=self.tf_preprocess, custom_getter_=custom_getter ) self.fn_actions_and_internals = tf.make_template( name_='actions-and-internals', func_=self.tf_actions_and_internals, custom_getter_=custom_getter ) self.fn_observe_timestep = tf.make_template( name_='observe-timestep', func_=self.tf_observe_timestep, custom_getter_=custom_getter ) self.fn_action_exploration = tf.make_template( name_='action-exploration', func_=self.tf_action_exploration, custom_getter_=custom_getter ) return custom_getter
138,136
Creates and then enters the session for this model (finalizes the graph). Args: server (tf.train.Server): The tf.train.Server object to connect to (None for single execution). hooks (list): A list of (saver, summary, etc..) hooks to be passed to the session. graph_default_context: The graph as_default() context that we are currently in.
def setup_session(self, server, hooks, graph_default_context): if self.execution_type == "distributed": # if self.distributed_spec['task_index'] == 0: # TensorFlow chief session creator object session_creator = tf.train.ChiefSessionCreator( scaffold=self.scaffold, master=server.target, config=self.session_config, checkpoint_dir=None, checkpoint_filename_with_path=None ) # else: # # TensorFlow worker session creator object # session_creator = tf.train.WorkerSessionCreator( # scaffold=self.scaffold, # master=server.target, # config=self.execution_spec.get('session_config'), # ) # TensorFlow monitored session object self.monitored_session = tf.train.MonitoredSession( session_creator=session_creator, hooks=hooks, stop_grace_period_secs=120 # Default value. ) # Add debug session.run dumping? if self.tf_session_dump_dir != "": self.monitored_session = DumpingDebugWrapperSession(self.monitored_session, self.tf_session_dump_dir) else: # TensorFlow non-distributed monitored session object self.monitored_session = tf.train.SingularMonitoredSession( hooks=hooks, scaffold=self.scaffold, master='', # Default value. config=self.session_config, # self.execution_spec.get('session_config'), checkpoint_dir=None ) if graph_default_context: graph_default_context.__exit__(None, None, None) self.graph.finalize() # enter the session to be ready for acting/learning self.monitored_session.__enter__() self.session = self.monitored_session._tf_sess()
138,140
Applies preprocessing ops to the raw states/action/reward inputs. Args: states (dict): Dict of raw state tensors. actions (dict): Dict or raw action tensors. reward: 1D (float) raw rewards tensor. Returns: The preprocessed versions of the input tensors.
def tf_preprocess(self, states, actions, reward): # States preprocessing for name in sorted(self.states_preprocessing): states[name] = self.states_preprocessing[name].process(tensor=states[name]) # Reward preprocessing if self.reward_preprocessing is not None: reward = self.reward_preprocessing.process(tensor=reward) return states, actions, reward
138,143
Applies optional exploration to the action (post-processor for action outputs). Args: action (tf.Tensor): The original output action tensor (to be post-processed). exploration (Exploration): The Exploration object to use. action_spec (dict): Dict specifying the action space. Returns: The post-processed action output tensor.
def tf_action_exploration(self, action, exploration, action_spec): action_shape = tf.shape(input=action) exploration_value = exploration.tf_explore( episode=self.global_episode, timestep=self.global_timestep, shape=action_spec['shape'] ) exploration_value = tf.expand_dims(input=exploration_value, axis=0) if action_spec['type'] == 'bool': action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=(tf.random_uniform(shape=action_shape) < 0.5), y=action ) elif action_spec['type'] == 'int': action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=tf.random_uniform(shape=action_shape, maxval=action_spec['num_actions'], dtype=util.tf_dtype('int')), y=action ) elif action_spec['type'] == 'float': noise = tf.random_normal(shape=action_shape, dtype=util.tf_dtype('float')) action += noise * exploration_value if 'min_value' in action_spec: action = tf.clip_by_value( t=action, clip_value_min=action_spec['min_value'], clip_value_max=action_spec['max_value'] ) return action
138,144
Returns the TensorFlow variables used by the model. Args: include_submodules: Includes variables of submodules (e.g. baseline, target network) if true. include_nontrainable: Includes non-trainable variables if true. Returns: List of variables.
def get_variables(self, include_submodules=False, include_nontrainable=False): if include_nontrainable: model_variables = [self.all_variables[key] for key in sorted(self.all_variables)] states_preprocessing_variables = [ variable for name in sorted(self.states_preprocessing) for variable in self.states_preprocessing[name].get_variables() ] model_variables += states_preprocessing_variables actions_exploration_variables = [ variable for name in sorted(self.actions_exploration) for variable in self.actions_exploration[name].get_variables() ] model_variables += actions_exploration_variables if self.reward_preprocessing is not None: reward_preprocessing_variables = self.reward_preprocessing.get_variables() model_variables += reward_preprocessing_variables else: model_variables = [self.variables[key] for key in sorted(self.variables)] return model_variables
138,149
Adds an observation (reward and is-terminal) to the model without updating its trainable variables. Args: terminal (List[bool]): List of is-terminal signals. reward (List[float]): List of reward signals. index: (int) parallel episode you want to observe Returns: The value of the model-internal episode counter.
def observe(self, terminal, reward, index=0): fetches = self.episode_output feed_dict = self.get_feed_dict(terminal=terminal, reward=reward, index=index) episode = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict) return episode
138,153
Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is restored. If no checkpoint directory is given, the model's default saver directory is used (unless file specifies the entire path). Args: directory: Optional checkpoint directory. file: Optional checkpoint file, or path if directory not given.
def restore(self, directory=None, file=None): if file is None: file = tf.train.latest_checkpoint( checkpoint_dir=(self.saver_directory if directory is None else directory), # latest_filename=None # Corresponds to argument of saver.save() in Model.save(). ) elif directory is None: file = os.path.join(self.saver_directory, file) elif not os.path.isfile(file): file = os.path.join(directory, file) # if not os.path.isfile(file): # raise TensorForceError("Invalid model directory/file.") self.saver.restore(sess=self.session, save_path=file) self.session.run(fetches=self.list_buffer_index_reset_op)
138,156
Saves a component of this model to the designated location. Args: component_name: The component to save. save_path: The location to save to. Returns: Checkpoint path where the component was saved.
def save_component(self, component_name, save_path): component = self.get_component(component_name=component_name) self._validate_savable(component=component, component_name=component_name) return component.save(sess=self.session, save_path=save_path)
138,159
Restores a component's parameters from a save location. Args: component_name: The component to restore. save_path: The save location.
def restore_component(self, component_name, save_path): component = self.get_component(component_name=component_name) self._validate_savable(component=component, component_name=component_name) component.restore(sess=self.session, save_path=save_path)
138,160
Looks up a component by its name. Args: component_name: The name of the component to look up. Returns: The component for the provided name or None if there is no such component.
def get_component(self, component_name): mapping = self.get_components() return mapping[component_name] if component_name in mapping else None
138,161
Imports demonstrations, i.e. expert observations. Note that for large numbers of observations, set_demonstrations is more appropriate, which directly sets memory contents to an array an expects a different layout. Args: demonstrations: List of observation dicts
def import_demonstrations(self, demonstrations): if isinstance(demonstrations, dict): if self.unique_state: demonstrations['states'] = dict(state=demonstrations['states']) if self.unique_action: demonstrations['actions'] = dict(action=demonstrations['actions']) self.model.import_demo_experience(**demonstrations) else: if self.unique_state: states = dict(state=list()) else: states = {name: list() for name in demonstrations[0]['states']} internals = {name: list() for name in demonstrations[0]['internals']} if self.unique_action: actions = dict(action=list()) else: actions = {name: list() for name in demonstrations[0]['actions']} terminal = list() reward = list() for demonstration in demonstrations: if self.unique_state: states['state'].append(demonstration['states']) else: for name, state in states.items(): state.append(demonstration['states'][name]) for name, internal in internals.items(): internal.append(demonstration['internals'][name]) if self.unique_action: actions['action'].append(demonstration['actions']) else: for name, action in actions.items(): action.append(demonstration['actions'][name]) terminal.append(demonstration['terminal']) reward.append(demonstration['reward']) self.model.import_demo_experience( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
138,164
Sets the random seed of the environment to the given value (current time, if seed=None). Naturally deterministic Environments (e.g. ALE or some gym Envs) don't have to implement this method. Args: seed (int): The seed to use for initializing the pseudo-random number generator (default=epoch time in sec). Returns: The actual seed (int) used OR None if Environment did not override this method (no seeding supported).
def seed(self, seed): # pylint: disable=E0202 if seed is None: self.env.seed = round(time.time()) else: self.env.seed = seed return self.env.seed
138,166
Executes action, observes next state and reward. Args: actions: Action to execute. Returns: (Dict of) next state(s), boolean indicating terminal, and reward signal.
def execute(self, action): if self.env.game_over(): return self.env.getScreenRGB(), True, 0 action_space = self.env.getActionSet() reward = self.env.act(action_space[action]) new_state = self.env.getScreenRGB() done = self.env.game_over() return new_state, done, reward
138,167
Sanity checks a states dict, used to define the state space for an MDP. Throws an error or warns if mismatches are found. Args: states_spec (Union[None,dict]): The spec-dict to check (or None). Returns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space.
def sanity_check_states(states_spec): # Leave incoming states dict intact. states = copy.deepcopy(states_spec) # Unique state shortform. is_unique = ('shape' in states) if is_unique: states = dict(state=states) # Normalize states. for name, state in states.items(): # Convert int to unary tuple. if isinstance(state['shape'], int): state['shape'] = (state['shape'],) # Set default type to float. if 'type' not in state: state['type'] = 'float' return states, is_unique
138,171
Sanity checks an actions dict, used to define the action space for an MDP. Throws an error or warns if mismatches are found. Args: actions_spec (Union[None,dict]): The spec-dict to check (or None). Returns: Tuple of 1) the action space desc and 2) whether there is only one component in the action space.
def sanity_check_actions(actions_spec): # Leave incoming spec-dict intact. actions = copy.deepcopy(actions_spec) # Unique action shortform. is_unique = ('type' in actions) if is_unique: actions = dict(action=actions) # Normalize actions. for name, action in actions.items(): # Set default type to int if 'type' not in action: action['type'] = 'int' # Check required values if action['type'] == 'int': if 'num_actions' not in action: raise TensorForceError("Action requires value 'num_actions' set!") elif action['type'] == 'float': if ('min_value' in action) != ('max_value' in action): raise TensorForceError("Action requires both values 'min_value' and 'max_value' set!") # Set default shape to empty tuple (single-int, discrete action space) if 'shape' not in action: action['shape'] = () # Convert int to unary tuple if isinstance(action['shape'], int): action['shape'] = (action['shape'],) return actions, is_unique
138,172
Initialize MazeExplorer. Args: mode_id: Game mode ID. See https://github.com/mryellow/maze_explorer visible: Show output window
def __init__(self, mode_id=0, visible=True): self.mode_id = int(mode_id) # Might raise gym.error.UnregisteredEnv or gym.error.DeprecatedEnv self.engine = mx.MazeExplorer(mode_id, visible)
138,188
Resets the Runner's internal stats counters. If history is empty, use default values in history.get(). Args: history (dict): A dictionary containing an already run experiment's results. Keys should be: episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)
def reset(self, history=None): if not history: history = dict() self.episode_rewards = history.get("episode_rewards", list()) self.episode_timesteps = history.get("episode_timesteps", list()) self.episode_times = history.get("episode_times", list())
138,195
Initialize ViZDoom environment. Args: config_file: .cfg file path, which defines how a world works and look like (maps)
def __init__(self,config_file): self.game = DoomGame() # load configurations from file self.game.load_config(config_file) self.game.init() self.state_shape = self.featurize(self.game.get_state()).shape self.num_actions = len(self.game.get_available_buttons())
138,197
Fetches experiences for given indices by combining entries from buffer which have no priorities, and entries from priority memory. Args: buffer_elements: Number of buffer elements to retrieve priority_indices: Index tensor for priority memory Returns: Batch of experiences
def tf_retrieve_indices(self, buffer_elements, priority_indices): states = dict() buffer_start = self.buffer_index - buffer_elements buffer_end = self.buffer_index # Fetch entries from respective memories, concat. for name in sorted(self.states_memory): buffer_state_memory = self.states_buffer[name] # Slicing is more efficient than gathering, and buffer elements are always # fetched using contiguous indices. buffer_states = buffer_state_memory[buffer_start:buffer_end] # Memory indices are obtained via priority sampling, hence require gather. memory_states = tf.gather(params=self.states_memory[name], indices=priority_indices) states[name] = tf.concat(values=(buffer_states, memory_states), axis=0) internals = dict() for name in sorted(self.internals_memory): internal_buffer_memory = self.internals_buffer[name] buffer_internals = internal_buffer_memory[buffer_start:buffer_end] memory_internals = tf.gather(params=self.internals_memory[name], indices=priority_indices) internals[name] = tf.concat(values=(buffer_internals, memory_internals), axis=0) actions = dict() for name in sorted(self.actions_memory): action_buffer_memory = self.actions_buffer[name] buffer_action = action_buffer_memory[buffer_start:buffer_end] memory_action = tf.gather(params=self.actions_memory[name], indices=priority_indices) actions[name] = tf.concat(values=(buffer_action, memory_action), axis=0) buffer_terminal = self.terminal_buffer[buffer_start:buffer_end] priority_terminal = tf.gather(params=self.terminal_memory, indices=priority_indices) terminal = tf.concat(values=(buffer_terminal, priority_terminal), axis=0) buffer_reward = self.reward_buffer[buffer_start:buffer_end] priority_reward = tf.gather(params=self.reward_memory, indices=priority_indices) reward = tf.concat(values=(buffer_reward, priority_reward), axis=0) if self.include_next_states: assert util.rank(priority_indices) == 1 next_priority_indices = (priority_indices + 1) % self.capacity next_buffer_start = (buffer_start + 1) % self.buffer_size next_buffer_end = (buffer_end + 1) % self.buffer_size next_states = dict() for name in sorted(self.states_memory): buffer_state_memory = self.states_buffer[name] buffer_next_states = buffer_state_memory[next_buffer_start:next_buffer_end] memory_next_states = tf.gather(params=self.states_memory[name], indices=next_priority_indices) next_states[name] = tf.concat(values=(buffer_next_states, memory_next_states), axis=0) next_internals = dict() for name in sorted(self.internals_memory): buffer_internal_memory = self.internals_buffer[name] buffer_next_internals = buffer_internal_memory[next_buffer_start:next_buffer_end] memory_next_internals = tf.gather(params=self.internals_memory[name], indices=next_priority_indices) next_internals[name] = tf.concat(values=(buffer_next_internals, memory_next_internals), axis=0) return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals ) else: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
138,207
Categorical distribution. Args: shape: Action shape. mean: Optional distribution bias for the mean. log_stddev: Optional distribution bias for the standard deviation.
def __init__(self, shape, mean=0.0, log_stddev=0.0, scope='gaussian', summary_labels=()): self.shape = shape action_size = util.prod(self.shape) self.mean = Linear(size=action_size, bias=mean, scope='mean', summary_labels=summary_labels) self.log_stddev = Linear(size=action_size, bias=log_stddev, scope='log-stddev', summary_labels=summary_labels) super(Gaussian, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)
138,209
Creates a new subsampling-step meta optimizer instance. Args: optimizer: The optimizer which is modified by this meta optimizer. fraction: The fraction of instances of the batch to subsample.
def __init__(self, optimizer, fraction=0.1, scope='subsampling-step', summary_labels=()): assert isinstance(fraction, float) and fraction > 0.0 self.fraction = fraction super(SubsamplingStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
138,232
Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. arguments: Dict of arguments for callables, like fn_loss. **kwargs: Additional arguments passed on to the internal optimizer. Returns: List of delta tensors corresponding to the updates for each optimized variable.
def tf_step( self, time, variables, arguments, **kwargs ): # Get some (batched) argument to determine batch size. arguments_iter = iter(arguments.values()) some_argument = next(arguments_iter) try: while not isinstance(some_argument, tf.Tensor) or util.rank(some_argument) == 0: if isinstance(some_argument, dict): if some_argument: arguments_iter = iter(some_argument.values()) some_argument = next(arguments_iter) elif isinstance(some_argument, list): if some_argument: arguments_iter = iter(some_argument) some_argument = next(arguments_iter) elif some_argument is None or util.rank(some_argument) == 0: # Non-batched argument some_argument = next(arguments_iter) else: raise TensorForceError("Invalid argument type.") except StopIteration: raise TensorForceError("Invalid argument type.") batch_size = tf.shape(input=some_argument)[0] num_samples = tf.cast( x=(self.fraction * tf.cast(x=batch_size, dtype=util.tf_dtype('float'))), dtype=util.tf_dtype('int') ) num_samples = tf.maximum(x=num_samples, y=1) indices = tf.random_uniform(shape=(num_samples,), maxval=batch_size, dtype=tf.int32) subsampled_arguments = util.map_tensors( fn=(lambda arg: arg if util.rank(arg) == 0 else tf.gather(params=arg, indices=indices)), tensors=arguments ) return self.optimizer.step( time=time, variables=variables, arguments=subsampled_arguments, **kwargs )
138,233
Initialize a single Runner object (one Agent/one Environment). Args: id_ (int): The ID of this Runner (for distributed TF runs).
def __init__(self, agent, environment, repeat_actions=1, history=None, id_=0): super(Runner, self).__init__(agent, environment, repeat_actions, history) self.id = id_ # the worker's ID in a distributed run (default=0) self.current_timestep = None
138,243
Queue memory. Args: capacity: Memory capacity.
def __init__(self, states, internals, actions, include_next_states, capacity, scope='queue', summary_labels=None): self.capacity = capacity self.scope = scope # Pieces of the records are stored in different tensors: self.states_memory = dict() # keys=state space components self.internals_memory = dict() # keys=internal state components self.actions_memory = dict() # keys=action space components self.terminal_memory = None # 1D tensor self.reward_memory = None # 1D tensor self.memory_index = None # 0D (int) tensor (points to the next record to be overwritten) self.episode_indices = None # 1D tensor of indexes where episodes start. self.episode_count = None # 0D (int) tensor: How many episodes do we have stored? self.retrieve_indices = None super(Queue, self).__init__( states=states, internals=internals, actions=actions, include_next_states=include_next_states, scope=scope, summary_labels=summary_labels )
138,245
Fetches experiences for given indices. Args: indices: Index tensor Returns: Batch of experiences
def tf_retrieve_indices(self, indices): states = dict() for name in sorted(self.states_memory): states[name] = tf.gather(params=self.states_memory[name], indices=indices) internals = dict() for name in sorted(self.internals_memory): internals[name] = tf.gather(params=self.internals_memory[name], indices=indices) actions = dict() for name in sorted(self.actions_memory): actions[name] = tf.gather(params=self.actions_memory[name], indices=indices) terminal = tf.gather(params=self.terminal_memory, indices=indices) reward = tf.gather(params=self.reward_memory, indices=indices) if self.include_next_states: assert util.rank(indices) == 1 next_indices = (indices + 1) % self.capacity next_states = dict() for name in sorted(self.states_memory): next_states[name] = tf.gather(params=self.states_memory[name], indices=next_indices) next_internals = dict() for name in sorted(self.internals_memory): next_internals[name] = tf.gather(params=self.internals_memory[name], indices=next_indices) return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, next_states=next_states, next_internals=next_internals ) else: return dict( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
138,248
Aggregated baseline. Args: baselines: Dict of per-state baseline specification dicts
def __init__(self, baselines, scope='aggregated-baseline', summary_labels=()): self.baselines = dict() for name in sorted(baselines): self.baselines[name] = Baseline.from_spec( spec=baselines[name], kwargs=dict(summary_labels=summary_labels)) self.linear = Linear(size=1, bias=0.0, scope='prediction', summary_labels=summary_labels) super(AggregatedBaseline, self).__init__(scope, summary_labels)
138,249
Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$. Args: fn_x: A callable returning the value $f(x)$ at $x$. x_init: Initial solution guess $x_0$. base_value: Value $f(x')$ at $x = x'$. target_value: Value $f(x_0)$ at $x = x_0$. estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None. Returns: A solution $x$ to the problem as given by the solver.
def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None): return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)
138,254
Initialization step preparing the arguments for the first iteration of the loop body. Args: x_init: Initial solution guess $x_0$. base_value: Value $f(x')$ at $x = x'$. target_value: Value $f(x_0)$ at $x = x_0$. estimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None. Returns: Initial arguments for tf_step.
def tf_initialize(self, x_init, base_value, target_value, estimated_improvement): self.base_value = base_value if estimated_improvement is None: # TODO: Is this a good alternative? estimated_improvement = tf.abs(x=base_value) first_step = super(LineSearch, self).tf_initialize(x_init) improvement = tf.divide( x=(target_value - self.base_value), y=tf.maximum(x=estimated_improvement, y=util.epsilon) ) last_improvement = improvement - 1.0 if self.mode == 'linear': deltas = [-t * self.parameter for t in x_init] self.estimated_incr = -estimated_improvement * self.parameter elif self.mode == 'exponential': deltas = [-t * self.parameter for t in x_init] return first_step + (deltas, improvement, last_improvement, estimated_improvement)
138,255
Iteration loop body of the line search algorithm. Args: x: Current solution estimate $x_t$. iteration: Current iteration counter $t$. deltas: Current difference $x_t - x'$. improvement: Current improvement $(f(x_t) - f(x')) / v'$. last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$. estimated_improvement: Current estimated value $v'$. Returns: Updated arguments for next iteration.
def tf_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement): x, next_iteration, deltas, improvement, last_improvement, estimated_improvement = super(LineSearch, self).tf_step( x, iteration, deltas, improvement, last_improvement, estimated_improvement ) next_x = [t + delta for t, delta in zip(x, deltas)] if self.mode == 'linear': next_deltas = deltas next_estimated_improvement = estimated_improvement + self.estimated_incr elif self.mode == 'exponential': next_deltas = [delta * self.parameter for delta in deltas] next_estimated_improvement = estimated_improvement * self.parameter target_value = self.fn_x(next_deltas) next_improvement = tf.divide( x=(target_value - self.base_value), y=tf.maximum(x=next_estimated_improvement, y=util.epsilon) ) return next_x, next_iteration, next_deltas, next_improvement, improvement, next_estimated_improvement
138,256
Initialize Pycolab environment. Args: game: Pycolab Game Engine object. See https://github.com/deepmind/pycolab/tree/master/pycolab/examples ui: Pycolab CursesUI object. See https://github.com/deepmind/pycolab/tree/master/pycolab/examples visualize: If set True, the program will visualize the trainings of Pycolab game # TODO
def __init__(self, game, ui, visualize=False): self.game = game self.init_game = copy.deepcopy(self.game) self.ui = ui self.visualize = visualize first_obs, first_reward, _ = self.game.its_showtime() self._actions = DMPycolab.get_action_space(self.ui) self._states = DMPycolab.get_state_space(first_obs, self.ui._croppers)
138,322
Network baseline. Args: network_spec: Network specification dict
def __init__(self, network, scope='network-baseline', summary_labels=()): self.network = Network.from_spec( spec=network, kwargs=dict(summary_labels=summary_labels) ) assert len(self.network.internals_spec()) == 0 self.linear = Linear(size=1, bias=0.0, scope='prediction', summary_labels=summary_labels) super(NetworkBaseline, self).__init__(scope=scope, summary_labels=summary_labels)
138,327
Init the MetaPrameterRecord with "Agent" parameters by passing inspect.currentframe() from Agent Class. The Init will search back to find the parent class to capture all passed parameters and store them in "self.meta_params". NOTE: Currently only optimized for TensorBoard output. TODO: Add JSON Export, TEXT EXPORT Args: current_frame: Frame value from class to obtain metaparameters[= inspect.currentframe()]
def __init__(self, current_frame): self.ignore_unknown_dtypes = False self.meta_params = dict() self.method_calling = inspect.getframeinfo(current_frame)[2] _, _, __, self.vals_current = inspect.getargvalues(current_frame) # self is the class name of the frame involved if 'self' in self.vals_current: self.recorded_class_type = self.vals_current['self'] # Add explicit AgentName item so class can be deleted self.meta_params['AgentName'] = str(self.vals_current['self']) frame_list = inspect.getouterframes(current_frame) for frame in frame_list: # Rather than frame.frame (named tuple), use [0] for python2. args, varargs, keywords, vals = inspect.getargvalues(frame[0]) if 'self' in vals: if self.recorded_class_type == vals['self']: for i in args: self.meta_params[i] = vals[i] # Remove the "CLASS" from the dictionary, has no value "AgentName" contains STR of Class. del self.meta_params['self']
138,337
Creates a new natural gradient optimizer instance. Args: learning_rate: Learning rate, i.e. KL-divergence of distributions between optimization steps. cg_max_iterations: Conjugate gradient solver max iterations. cg_damping: Conjugate gradient solver damping factor. cg_unroll_loop: Unroll conjugate gradient loop if true.
def __init__( self, learning_rate, cg_max_iterations=20, cg_damping=1e-3, cg_unroll_loop=False, scope='natural-gradient', summary_labels=() ): assert learning_rate > 0.0 self.learning_rate = learning_rate self.solver = ConjugateGradient( max_iterations=cg_max_iterations, damping=cg_damping, unroll_loop=cg_unroll_loop ) super(NaturalGradient, self).__init__(scope=scope, summary_labels=summary_labels)
138,350
Creates a new multi-step meta optimizer instance. Args: optimizer: The optimizer which is modified by this meta optimizer. num_steps: Number of optimization steps to perform.
def __init__(self, optimizer, num_steps=10, unroll_loop=False, scope='multi-step', summary_labels=()): assert isinstance(num_steps, int) and num_steps > 0 self.num_steps = num_steps assert isinstance(unroll_loop, bool) self.unroll_loop = unroll_loop super(MultiStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
138,352
Creates the TensorFlow operations for calculating the baseline loss of a batch. Args: states: Dict of state tensors. internals: List of prior internal state tensors. reward: Reward tensor. update: Boolean tensor indicating whether this call happens during an update. reference: Optional reference tensor(s), in case of a comparative loss. Returns: Loss tensor.
def tf_baseline_loss(self, states, internals, reward, update, reference=None): if self.baseline_mode == 'states': loss = self.baseline.loss( states=states, internals=internals, reward=reward, update=update, reference=reference ) elif self.baseline_mode == 'network': loss = self.baseline.loss( states=self.network.apply(x=states, internals=internals, update=update), internals=internals, reward=reward, update=update, reference=reference ) regularization_loss = self.baseline.regularization_loss() if regularization_loss is not None: loss += regularization_loss return loss
138,363
Returns the baseline optimizer arguments including the time, the list of variables to optimize, and various functions which the optimizer might require to perform an update step. Args: states: Dict of state tensors. internals: List of prior internal state tensors. reward: Reward tensor. Returns: Baseline optimizer arguments as dict.
def baseline_optimizer_arguments(self, states, internals, reward): arguments = dict( time=self.global_timestep, variables=self.baseline.get_variables(), arguments=dict( states=states, internals=internals, reward=reward, update=tf.constant(value=True), ), fn_reference=self.baseline.reference, fn_loss=self.fn_baseline_loss, # source_variables=self.network.get_variables() ) if self.global_model is not None: arguments['global_variables'] = self.global_model.baseline.get_variables() return arguments
138,364
Creates a new synchronization optimizer instance. Args: sync_frequency: The interval between optimization calls actually performing a synchronization step. update_weight: The update weight, 1.0 meaning a full assignment of the source variables values.
def __init__(self, sync_frequency=1, update_weight=1.0, scope='synchronization', summary_labels=()): assert isinstance(sync_frequency, int) and sync_frequency > 0 self.sync_frequency = sync_frequency assert isinstance(update_weight, float) and update_weight > 0.0 self.update_weight = update_weight super(Synchronization, self).__init__(scope=scope, summary_labels=summary_labels)
138,368
Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. source_variables: List of source variables to synchronize with. **kwargs: Additional arguments, not used. Returns: List of delta tensors corresponding to the updates for each optimized variable.
def tf_step(self, time, variables, source_variables, **kwargs): assert all(util.shape(source) == util.shape(target) for source, target in zip(source_variables, variables)) last_sync = tf.get_variable( name='last-sync', shape=(), dtype=tf.int64, initializer=tf.constant_initializer(value=(-self.sync_frequency), dtype=tf.int64), trainable=False ) def sync(): deltas = list() for source_variable, target_variable in zip(source_variables, variables): delta = self.update_weight * (source_variable - target_variable) deltas.append(delta) applied = self.apply_step(variables=variables, deltas=deltas) last_sync_updated = last_sync.assign(value=time) with tf.control_dependencies(control_inputs=(applied, last_sync_updated)): # Trivial operation to enforce control dependency return [delta + 0.0 for delta in deltas] def no_sync(): deltas = list() for variable in variables: delta = tf.zeros(shape=util.shape(variable)) deltas.append(delta) return deltas do_sync = (time - last_sync >= self.sync_frequency) return tf.cond(pred=do_sync, true_fn=sync, false_fn=no_sync)
138,369
Creates a new evolutionary optimizer instance. Args: learning_rate: Learning rate. num_samples: Number of sampled perturbations.
def __init__(self, learning_rate, num_samples=1, unroll_loop=False, scope='evolutionary', summary_labels=()): assert isinstance(learning_rate, float) and learning_rate > 0.0 self.learning_rate = learning_rate assert isinstance(num_samples, int) and num_samples > 0 self.num_samples = num_samples assert isinstance(unroll_loop, bool) self.unroll_loop = unroll_loop super(Evolutionary, self).__init__(scope=scope, summary_labels=summary_labels)
138,370
Creates the TensorFlow operations for performing an optimization step. Args: time: Time tensor. variables: List of variables to optimize. arguments: Dict of arguments for callables, like fn_loss. fn_loss: A callable returning the loss of the current model. **kwargs: Additional arguments, not used. Returns: List of delta tensors corresponding to the updates for each optimized variable.
def tf_step( self, time, variables, arguments, fn_loss, **kwargs ): unperturbed_loss = fn_loss(**arguments) # First sample perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables] applied = self.apply_step(variables=variables, deltas=perturbations) with tf.control_dependencies(control_inputs=(applied,)): perturbed_loss = fn_loss(**arguments) direction = tf.sign(x=(unperturbed_loss - perturbed_loss)) deltas_sum = [direction * perturbation for perturbation in perturbations] if self.unroll_loop: # Unrolled for loop previous_perturbations = perturbations for sample in xrange(self.num_samples): with tf.control_dependencies(control_inputs=deltas_sum): perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables] perturbation_deltas = [ pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations) ] applied = self.apply_step(variables=variables, deltas=perturbation_deltas) previous_perturbations = perturbations with tf.control_dependencies(control_inputs=(applied,)): perturbed_loss = fn_loss(**arguments) direction = tf.sign(x=(unperturbed_loss - perturbed_loss)) deltas_sum = [delta + direction * perturbation for delta, perturbation in zip(deltas_sum, perturbations)] else: # TensorFlow while loop def body(iteration, deltas_sum, previous_perturbations): with tf.control_dependencies(control_inputs=deltas_sum): perturbations = [tf.random_normal(shape=util.shape(variable)) * self.learning_rate for variable in variables] perturbation_deltas = [ pert - prev_pert for pert, prev_pert in zip(perturbations, previous_perturbations) ] applied = self.apply_step(variables=variables, deltas=perturbation_deltas) with tf.control_dependencies(control_inputs=(applied,)): perturbed_loss = fn_loss(**arguments) direction = tf.sign(x=(unperturbed_loss - perturbed_loss)) deltas_sum = [delta + direction * perturbation for delta, perturbation in zip(deltas_sum, perturbations)] return iteration + 1, deltas_sum, perturbations def cond(iteration, deltas_sum, previous_perturbation): return iteration < self.num_samples - 1 _, deltas_sum, perturbations = tf.while_loop(cond=cond, body=body, loop_vars=(0, deltas_sum, perturbations)) with tf.control_dependencies(control_inputs=deltas_sum): deltas = [delta / self.num_samples for delta in deltas_sum] perturbation_deltas = [delta - pert for delta, pert in zip(deltas, perturbations)] applied = self.apply_step(variables=variables, deltas=perturbation_deltas) with tf.control_dependencies(control_inputs=(applied,)): # Trivial operation to enforce control dependency return [delta + 0.0 for delta in deltas]
138,371
Multi-layer perceptron baseline. Args: sizes: List of dense layer sizes
def __init__(self, sizes, scope='mlp-baseline', summary_labels=()): network = [] for size in sizes: network.append(dict(type='dense', size=size)) super(MLPBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)
138,372
Creates a new global optimizer instance. Args: optimizer: The optimizer which is modified by this meta optimizer.
def __init__(self, optimizer, scope='global-optimizer', summary_labels=()): super(GlobalOptimizer, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
138,373
Creates the TensorFlow operations for performing an optimization step on the given variables, including actually changing the values of the variables. Args: time: Time tensor. Not used for this optimizer. variables: List of variables to optimize. **kwargs: fn_loss : loss function tensor to differentiate. Returns: List of delta tensors corresponding to the updates for each optimized variable.
def tf_step(self, time, variables, **kwargs): fn_loss = kwargs["fn_loss"] if variables is None: variables = tf.trainable_variables return tf.gradients(fn_loss, variables)
138,383
Applies the given (and already calculated) step deltas to the variable values. Args: variables: List of variables. deltas: List of deltas of same length. loss_sampled : the sampled loss Returns: The step-applied operation. A tf.group of tf.assign_add ops.
def apply_step(self, variables, deltas, loss_sampled): update_stats_op = self.compute_and_apply_stats( loss_sampled, var_list=var_list) grads = [(a, b) for a, b in zip(deltas, varlist)] kfacOptim, _ = self.apply_gradients_kfac(grads) return kfacOptim
138,384
Performs an optimization step. Args: time: Time tensor. Not used for this variables: List of variables to optimize. **kwargs: fn_loss : loss function tensor that is differentiated sampled_loss : the sampled loss from running the model. Returns: The optimization operation.
def minimize(self, time, variables, **kwargs): loss = kwargs["fn_loss"] sampled_loss = kwargs["sampled_loss"] min_op, _ = self.minimize_(loss, sampled_loss, var_list=variables) return min_op
138,385
Creates a new optimizer instance of a TensorFlow optimizer. Args: optimizer: The name of the optimizer. Must be one of the keys of the tf_optimizers dict. **kwargs: Arguments passed on to the TensorFlow optimizer constructor as **kwargs.
def __init__(self, optimizer, scope=None, summary_labels=(), **kwargs): self.tf_optimizer_type = optimizer self.tf_optimizer = TFOptimizer.tf_optimizers[optimizer](**kwargs) super(TFOptimizer, self).__init__(scope=(scope or optimizer), summary_labels=summary_labels)
138,399
Returns either an ion object or composition object given a formula. Args: formula: String formula. Eg. of ion: NaOH(aq), Na[+]; Eg. of solid: Fe2O3(s), Fe(s), Na2O Returns: Composition/Ion object
def ion_or_solid_comp_object(formula): m = re.search(r"\[([^\[\]]+)\]|\(aq\)", formula) if m: comp_obj = Ion.from_formula(formula) elif re.search(r"\(s\)", formula): comp_obj = Composition(formula[:-3]) else: comp_obj = Composition(formula) return comp_obj
138,513
Generates a label for the pourbaix plotter Args: entry (PourbaixEntry or MultiEntry): entry to get a label for
def generate_entry_label(entry): if isinstance(entry, MultiEntry): return " + ".join([latexify_ion(e.name) for e in entry.entry_list]) else: return latexify_ion(latexify(entry.name))
138,514
Get free energy for a given pH and V Args: pH (float): pH at which to evaluate free energy V (float): voltage at which to evaluate free energy Returns: free energy at conditions
def energy_at_conditions(self, pH, V): return self.energy + self.npH * PREFAC * pH + self.nPhi * V
138,518
Energy at an electrochemical condition, compatible with numpy arrays for pH/V input Args: pH (float): pH at condition V (float): applied potential at condition Returns: energy normalized by number of non-O/H atoms at condition
def normalized_energy_at_conditions(self, pH, V): return self.energy_at_conditions(pH, V) * self.normalization_factor
138,519
Initializes a MultiEntry. Args: entry_list ([PourbaixEntry]): List of component PourbaixEntries weights ([float]): Weights associated with each entry. Default is None
def __init__(self, entry_list, weights=None): if weights is None: self.weights = [1.0] * len(entry_list) else: self.weights = weights self.entry_list = entry_list
138,524
Finds stable entry at a pH,V condition Args: pH (float): pH to find stable entry V (float): V to find stable entry Returns:
def find_stable_entry(self, pH, V): energies_at_conditions = [e.normalized_energy_at_conditions(pH, V) for e in self.stable_entries] return self.stable_entries[np.argmin(energies_at_conditions)]
138,536
Finds decomposition to most stable entry Args: entry (PourbaixEntry): PourbaixEntry corresponding to compound to find the decomposition for pH (float): pH at which to find the decomposition V (float): voltage at which to find the decomposition Returns: reaction corresponding to the decomposition
def get_decomposition_energy(self, entry, pH, V): # Find representative multientry if self._multielement and not isinstance(entry, MultiEntry): possible_entries = self._generate_multielement_entries( self._filtered_entries, forced_include=[entry]) # Filter to only include materials where the entry is only solid if entry.phase_type == "solid": possible_entries = [e for e in possible_entries if e.phase_type.count("Solid") == 1] possible_energies = [e.normalized_energy_at_conditions(pH, V) for e in possible_entries] else: possible_energies = [entry.normalized_energy_at_conditions(pH, V)] min_energy = np.min(possible_energies, axis=0) # Find entry and take the difference hull = self.get_hull_energy(pH, V) return min_energy - hull
138,537
Shows the pourbaix plot Args: *args: args to get_pourbaix_plot **kwargs: kwargs to get_pourbaix_plot Returns: None
def show(self, *args, **kwargs): plt = self.get_pourbaix_plot(*args, **kwargs) plt.show()
138,541
Plot Pourbaix diagram. Args: limits: 2D list containing limits of the Pourbaix diagram of the form [[xlo, xhi], [ylo, yhi]] title (str): Title to display on plot label_domains (bool): whether to label pourbaix domains plt (pyplot): Pyplot instance for plotting Returns: plt (pyplot) - matplotlib plot object with pourbaix diagram
def get_pourbaix_plot(self, limits=None, title="", label_domains=True, plt=None): if limits is None: limits = [[-2, 16], [-3, 3]] plt = plt or pretty_plot(16) xlim = limits[0] ylim = limits[1] h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC], [xlim[1], -xlim[1] * PREFAC]]) o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23], [xlim[1], -xlim[1] * PREFAC + 1.23]]) neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]]) V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]]) ax = plt.gca() ax.set_xlim(xlim) ax.set_ylim(ylim) lw = 3 plt.plot(h_line[0], h_line[1], "r--", linewidth=lw) plt.plot(o_line[0], o_line[1], "r--", linewidth=lw) plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw) plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw) for entry, vertices in self._pd._stable_domain_vertices.items(): center = np.average(vertices, axis=0) x, y = np.transpose(np.vstack([vertices, vertices[0]])) plt.plot(x, y, 'k-', linewidth=lw) if label_domains: plt.annotate(generate_entry_label(entry), center, ha='center', va='center', fontsize=20, color="b") plt.xlabel("pH") plt.ylabel("E (V)") plt.title(title, fontsize=20, fontweight='bold') return plt
138,542