Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/mlp_merge_model.py | """MLP Merge Model.
A model composed only of a multi-layer perceptron (MLP), which maps
real-valued inputs to real-valued outputs. This model is called an
MLP Merge Model because it takes two inputs and concatenates the second
input with the layer at a specified index. It can be merged with any layer
from the input layer to the last hidden layer.
"""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
class MLPMergeModel(Model):
"""MLP Merge Model.
Args:
output_dim (int): Dimension of the network output.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
concat_layer (int): The index of layers at which to concatenate
input_var2 with the network. The indexing works like standard
python list indexing. Index of 0 refers to the input layer
(input_var1) while an index of -1 points to the last hidden
layer. Default parameter points to second layer from the end.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
name='MLPMergeModel',
hidden_sizes=(32, 32),
concat_layer=-2,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
self._concat_layer = concat_layer
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['input_var1', 'input_var2']
# pylint: disable=arguments-differ
def _build(self, state_input, action_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
action_input (tf.Tensor): Tensor input for action.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
input_var2=action_input,
concat_layer=self._concat_layer,
name='mlp_concat',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
| 4,968 | 41.836207 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/mlp_model.py | """MLP Model.
A model composed only of a multi-layer perceptron (MLP), which maps
real-valued inputs to real-valued outputs.
"""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
class MLPModel(Model):
"""MLP Model.
Args:
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
name (str): Model name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
name='MLPModel',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
# pylint: disable=arguments-differ
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
name='mlp',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
| 3,904 | 40.542553 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/model.py | """Base model classes."""
import abc
from collections import namedtuple
import warnings
import tensorflow as tf
class BaseModel(abc.ABC):
"""Interface-only abstract class for models.
A Model contains the structure/configuration of a set of computation
graphs, or can be understood as a set of networks. Using a model
requires calling `build()` with given input placeholder, which can be
either tf.compat.v1.placeholder, or the output from another model. This
makes composition of complex models with simple models much easier.
Examples:
model = SimpleModel(output_dim=2)
# To use a model, first create a placeholder.
# In the case of TensorFlow, we create a tf.compat.v1.placeholder.
input_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 2))
# Building the model
output = model.build(input_ph)
# We can also pass the output of a model to another model.
# Here we pass the output from the above SimpleModel object.
model_2 = ComplexModel(output_dim=2)
output_2 = model_2.build(output)
"""
def build(self, *inputs, name=None):
"""Output of model with the given input placeholder(s).
This function is implemented by subclasses to create their computation
graphs, which will be managed by Model. Generally, subclasses should
implement `build()` directly.
Args:
inputs (object): Input(s) for the model.
name (str): Name of the model.
Return:
list[tf.Tensor]: Output(s) of the model.
"""
@property
@abc.abstractmethod
def name(self):
"""Name for this Model."""
@property
@abc.abstractmethod
def parameters(self):
"""Parameters of the Model.
The output of a model is determined by its parameter. It could be
the weights of a neural network model or parameters of a loss
function model.
Returns:
list[tf.Tensor]: Parameters.
"""
@parameters.setter
def parameters(self, parameters):
"""Set parameters of the Model.
Args:
parameters (list[tf.Tensor]): Parameters.
"""
class Network:
"""Network class For TensorFlow.
A Network contains connectivity information by inputs/outputs.
When a Network is built, it appears as a subgraph in the computation
graphs, scoped by the Network name. All Networks built with the same
model share the same parameters, i.e same inputs yield to same outputs.
"""
def __init__(self):
self._inputs = None
self._outputs = None
@property
def input(self):
"""Tensor input of the Network.
Returns:
tf.Tensor: Input.
"""
return self._inputs[0]
@property
def inputs(self):
"""Tensor inputs of the Network.
Returns:
list[tf.Tensor]: Inputs.
"""
return self._inputs
@property
def output(self):
"""Tensor output of the Network.
Returns:
tf.Tensor: Output.
"""
return self._outputs[0]
@property
def outputs(self):
"""Tensor outputs of the Network.
Returns:
list[tf.Tensor]: Outputs.
"""
return self._outputs
class Model(BaseModel):
r"""Model class for TensorFlow.
A TfModel only contains the structure/configuration of the underlying
computation graphs. Connectivity information are all in Network class.
A TfModel contains zero or more Network.
When a Network is created, it reuses the parameter from the
model. If a Network is built without given a name, the name "default" will
be used.
***
Do not call tf.global_variable_initializers() after building a model as it
will reassign random weights to the model.
The parameters inside a model will be initialized when calling build().
***
Pickling is handled automatcailly. The target weights should be assigned to
self._default_parameters before pickling, so that the newly created model
can check if target weights exist or not. When unpickled, the unserialized
model will load the weights from self._default_parameters.
The design is illustrated as the following:
input_1 input_2
| |
============== Model (TfModel)===================
| | | |
| | Parameters | |
| ============= / \ ============ |
| | default | / \ | Network2 | |
| | (Network) |/ \|(Network) | |
| ============= ============ |
| | | |
=================================================
| |
| |
(outputs from 'default' networks) |
outputs from ['Network2'] network
Examples are also available in tests/garage/tf/models/test_model.
Args:
name (str): Name of the model. It will also become the variable scope
of the model. Every model should have a unique name.
"""
def __init__(self, name):
super().__init__()
self._name = name or type(self).__name__ # name default to class
self._networks = {}
self._default_parameters = None
self._variable_scope = None
# pylint: disable=protected-access, assignment-from-no-return
def build(self, *inputs, name=None):
"""Build a Network with the given input(s).
***
Do not call tf.global_variable_initializers() after building a model
as it will reassign random weights to the model.
The parameters inside a model will be initialized when calling build().
***
It uses the same, fixed variable scope for all Networks, to ensure
parameter sharing. Different Networks must have an unique name.
Args:
inputs (list[tf.Tensor]) : Tensor input(s), recommended to be
positional arguments, for example,
def build(self, state_input, action_input, name=None).
name (str): Name of the model, which is also the name scope of the
model.
Raises:
ValueError: When a Network with the same name is already built.
Returns:
list[tf.Tensor]: Output tensors of the model with the given
inputs.
"""
network_name = name or 'default'
if not self._networks:
# First time building the model, so self._networks are empty
# We store the variable_scope to reenter later when we reuse it
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
with tf.name_scope(name=network_name):
network = Network()
network._inputs = inputs
network._outputs = self._build(*inputs, name)
variables = self._get_variables().values()
tf.compat.v1.get_default_session().run(
tf.compat.v1.variables_initializer(variables))
if self._default_parameters:
self.parameters = self._default_parameters
else:
if network_name in self._networks:
raise ValueError(
'Network {} already exists!'.format(network_name))
with tf.compat.v1.variable_scope(self._variable_scope,
reuse=True,
auxiliary_name_scope=False):
with tf.name_scope(name=network_name):
network = Network()
network._inputs = inputs
network._outputs = self._build(*inputs, name)
custom_in_spec = self.network_input_spec()
custom_out_spec = self.network_output_spec()
in_spec = ['input', 'inputs']
out_spec = ['output', 'outputs']
in_args = [network.input, network.inputs]
out_args = [network.output, network.outputs]
if isinstance(network.inputs, tuple) and len(network.inputs) > 1:
assert len(custom_in_spec) == len(network.inputs), (
'network_input_spec must have same length as inputs!')
in_spec.extend(custom_in_spec)
in_args.extend(network.inputs)
if isinstance(network.outputs, tuple) and len(network.outputs) > 1:
assert len(custom_out_spec) == len(network.outputs), (
'network_output_spec must have same length as outputs!')
out_spec.extend(custom_out_spec)
out_args.extend(network.outputs)
elif len(custom_out_spec) > 0:
if not isinstance(network.outputs, tuple):
assert len(custom_out_spec) == 1, (
'network_input_spec must have same length as outputs!')
out_spec.extend(custom_out_spec)
out_args.extend([network.outputs])
else:
assert len(custom_out_spec) == len(network.outputs), (
'network_input_spec must have same length as outputs!')
out_spec.extend(custom_out_spec)
out_args.extend(network.outputs)
c = namedtuple(network_name, [*in_spec, *out_spec])
all_args = in_args + out_args
out_network = c(*all_args)
self._networks[network_name] = out_network
return out_network
def _build(self, *inputs, name=None):
"""Output of the model given input placeholder(s).
User should implement _build() inside their subclassed model,
and construct the computation graphs in this function.
Args:
inputs: Tensor input(s), recommended to be position arguments, e.g.
def _build(self, state_input, action_input, name=None).
It would be usually same as the inputs in build().
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
list[tf.Tensor]: Tensor output(s) of the model.
"""
# pylint: disable=no-self-use
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network inputs.
"""
return []
# pylint: disable=no-self-use
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return []
@property
def parameters(self):
"""Parameters of the model.
Returns:
np.ndarray: Parameters
"""
_variables = self._get_variables()
if _variables:
return tf.compat.v1.get_default_session().run(_variables)
else:
return _variables
@parameters.setter
def parameters(self, parameters):
"""Set model parameters.
Args:
parameters (tf.Tensor): Parameters.
"""
variables = self._get_variables()
for name, var in variables.items():
found = False
# param name without model name
param_name = name[name.find(self.name) + len(self.name) + 1:]
for k, v in parameters.items():
if param_name in k:
var.load(v)
found = True
continue
if not found:
warnings.warn('No value provided for variable {}'.format(name))
@property
def name(self):
"""Name (str) of the model.
This is also the variable scope of the model.
Returns:
str: Name of the model.
"""
return self._name
@property
def input(self):
"""Default input of the model.
When the model is built the first time, by default it
creates the 'default' network. This property creates
a reference to the input of the network.
Returns:
tf.Tensor: Default input of the model.
"""
return self._networks['default'].input
@property
def output(self):
"""Default output of the model.
When the model is built the first time, by default it
creates the 'default' network. This property creates
a reference to the output of the network.
Returns:
tf.Tensor: Default output of the model.
"""
return self._networks['default'].output
@property
def inputs(self):
"""Default inputs of the model.
When the model is built the first time, by default it
creates the 'default' network. This property creates
a reference to the inputs of the network.
Returns:
list[tf.Tensor]: Default inputs of the model.
"""
return self._networks['default'].inputs
@property
def outputs(self):
"""Default outputs of the model.
When the model is built the first time, by default it
creates the 'default' network. This property creates
a reference to the outputs of the network.
Returns:
list[tf.Tensor]: Default outputs of the model.
"""
return self._networks['default'].outputs
def _get_variables(self):
"""Get variables of this model.
Returns:
dict[str: tf.Tensor]: Variables of this model.
"""
if self._variable_scope:
return {v.name: v for v in self._variable_scope.global_variables()}
else:
return dict()
def __getstate__(self):
"""Get the pickle state.
Returns:
dict: The pickled state.
"""
new_dict = self.__dict__.copy()
del new_dict['_networks']
new_dict['_default_parameters'] = self.parameters
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
self.__dict__.update(state)
self._networks = {}
| 14,453 | 30.907285 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/module.py | """Interface for primitives which build on top of models."""
import abc
import tensorflow as tf
from garage.misc.tensor_utils import flatten_tensors, unflatten_tensors
class Module(abc.ABC):
"""A module that builds on top of model.
Args:
name (str): Module name, also the variable scope.
"""
def __init__(self, name):
self._name = name
self._variable_scope = None
self._cached_params = None
self._cached_param_shapes = None
@property
def name(self):
"""str: Name of this module."""
return self._name
@property
@abc.abstractmethod
def vectorized(self):
"""bool: If this module supports vectorization input."""
def reset(self, do_resets=None):
"""Reset the module.
This is effective only to recurrent modules. do_resets is effective
only to vectoried modules.
For a vectorized modules, do_resets is an array of boolean indicating
which internal states to be reset. The length of do_resets should be
equal to the length of inputs.
Args:
do_resets (numpy.ndarray): Bool array indicating which states
to be reset.
"""
@property
def state_info_specs(self):
"""State info specification.
Returns:
List[str]: keys and shapes for the information related to the
module's state when taking an action.
"""
return list()
@property
def state_info_keys(self):
"""State info keys.
Returns:
List[str]: keys for the information related to the module's state
when taking an input.
"""
return [k for k, _ in self.state_info_specs]
def terminate(self):
"""Clean up operation."""
def get_trainable_vars(self):
"""Get trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._variable_scope.trainable_variables()
def get_global_vars(self):
"""Get global variables.
Returns:
List[tf.Variable]: A list of global variables in the current
variable scope.
"""
return self._variable_scope.global_variables()
def get_params(self):
"""Get the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
if self._cached_params is None:
self._cached_params = self.get_trainable_vars()
return self._cached_params
def get_param_shapes(self):
"""Get parameter shapes.
Returns:
List[tuple]: A list of variable shapes.
"""
if self._cached_param_shapes is None:
params = self.get_params()
param_values = tf.compat.v1.get_default_session().run(params)
self._cached_param_shapes = [val.shape for val in param_values]
return self._cached_param_shapes
def get_param_values(self):
"""Get param values.
Returns:
np.ndarray: Values of the parameters evaluated in
the current session
"""
params = self.get_params()
param_values = tf.compat.v1.get_default_session().run(params)
return flatten_tensors(param_values)
def set_param_values(self, param_values):
"""Set param values.
Args:
param_values (np.ndarray): A numpy array of parameter values.
"""
param_values = unflatten_tensors(param_values, self.get_param_shapes())
for param, value in zip(self.get_params(), param_values):
param.load(value)
def flat_to_params(self, flattened_params):
"""Unflatten tensors according to their respective shapes.
Args:
flattened_params (np.ndarray): A numpy array of flattened params.
Returns:
List[np.ndarray]: A list of parameters reshaped to the
shapes specified.
"""
return unflatten_tensors(flattened_params, self.get_param_shapes())
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_cached_params']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
self._cached_params = None
self.__dict__.update(state)
class StochasticModule(Module):
"""Stochastic Module."""
@property
@abc.abstractmethod
def distribution(self):
"""Distribution."""
| 4,847 | 25.347826 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/normalized_input_mlp_model.py | """NormalizedInputMLPModel."""
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.mlp_model import MLPModel
class NormalizedInputMLPModel(MLPModel):
"""NormalizedInputMLPModel based on garage.tf.models.Model class.
This class normalized the inputs and pass the normalized input to a
MLP model, which can be used to perform linear regression to the outputs.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_shape,
output_dim,
name='NormalizedInputMLPModel',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(output_dim=output_dim,
name=name,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._input_shape = input_shape
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['y_hat', 'x_mean', 'x_std']
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
with tf.compat.v1.variable_scope('normalized_vars'):
x_mean_var = tf.compat.v1.get_variable(
name='x_mean',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
x_std_var = tf.compat.v1.get_variable(
name='x_std_var',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
normalized_xs_var = (state_input - x_mean_var) / x_std_var
y_hat = super()._build(normalized_xs_var)
return y_hat, x_mean_var, x_std_var
| 4,573 | 40.207207 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/parameter.py | """Parameter layer in TensorFlow."""
import tensorflow as tf
def parameter(input_var,
length,
initializer=tf.zeros_initializer(),
dtype=tf.float32,
trainable=True,
name='parameter'):
"""Parameter layer.
Used as layer that could be broadcast to a certain shape to
match with input variable during training.
For recurrent usage, use garage.tf.models.recurrent_parameter().
Example: A trainable parameter variable with shape (2,), it needs to be
broadcasted to (32, 2) when applied to a batch with size 32.
Args:
input_var (tf.Tensor): Input tf.Tensor.
length (int): Integer dimension of the variable.
initializer (callable): Initializer of the variable. The function
should return a tf.Tensor.
dtype: Data type of the variable (default is tf.float32).
trainable (bool): Whether the variable is trainable.
name (str): Variable scope of the variable.
Return:
A tensor of the broadcasted variables.
"""
with tf.compat.v1.variable_scope(name):
p = tf.compat.v1.get_variable('parameter',
shape=(length, ),
dtype=dtype,
initializer=initializer,
trainable=trainable)
batch_dim = tf.shape(input_var)[0]
broadcast_shape = tf.concat(axis=0, values=[[batch_dim], [length]])
p_broadcast = tf.broadcast_to(p, shape=broadcast_shape)
return p_broadcast
def recurrent_parameter(input_var,
step_input_var,
length,
initializer=tf.zeros_initializer(),
dtype=tf.float32,
trainable=True,
name='recurrent_parameter'):
"""Parameter layer for recurrent networks.
Used as layer that could be broadcast to a certain shape to
match with input variable during training.
Example: A trainable parameter variable with shape (2,), it needs to be
broadcasted to (32, 4, 2) when applied to a batch with size 32 and
time-length 4.
Args:
input_var (tf.Tensor): Input tf.Tensor for full time-series inputs.
step_input_var (tf.Tensor): Input tf.Tensor for step inputs.
length (int): Integer dimension of the variable.
initializer (callable): Initializer of the variable. The function
should return a tf.Tensor.
dtype: Data type of the variable (default is tf.float32).
trainable (bool): Whether the variable is trainable.
name (str): Variable scope of the variable.
Return:
A tensor of the two broadcasted variables: one for full time-series
inputs, one for step inputs.
"""
with tf.compat.v1.variable_scope(name):
p = tf.compat.v1.get_variable('parameter',
shape=(length, ),
dtype=dtype,
initializer=initializer,
trainable=trainable)
batch_dim = tf.shape(input_var)[:2]
step_batch_dim = tf.shape(step_input_var)[:1]
broadcast_shape = tf.concat(axis=0, values=[batch_dim, [length]])
step_broadcast_shape = tf.concat(axis=0,
values=[step_batch_dim, [length]])
p_broadcast = tf.broadcast_to(p, shape=broadcast_shape)
step_p_broadcast = tf.broadcast_to(p, shape=step_broadcast_shape)
return p_broadcast, step_p_broadcast
| 3,651 | 39.577778 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/sequential.py | """Sequential Model.
A model composed of one or more models which are connected sequential,
according to the insertion order.
"""
from garage.tf.models.model import Model
class Sequential(Model):
"""Sequential Model.
Args:
name (str): Model name, also the variable scope.
models (list[garage.tf.models.Model]): The models to be connected
in sequential order.
"""
def __init__(self, *models, name=None):
super().__init__(name)
self._models = models
self._first_network = None
self._last_network = None
# pylint: disable=arguments-differ
def _build(self, input_var, name=None):
"""Build model given input placeholder(s).
Args:
input_var (tf.Tensor): Tensor input.
name (str): Inner model name, also the variable scope of the
inner model.
Return:
tf.Tensor: Tensor output of the model.
"""
out = input_var
for model in self._models:
self._last_network = model.build(out, name=name)
if self._first_network is None:
self._first_network = self._last_network
out = self._last_network.outputs
return out
@property
def input(self):
"""tf.Tensor: input of the model by default."""
return self._first_network.input
@property
def output(self):
"""tf.Tensor: output of the model by default."""
return self._last_network.output
@property
def inputs(self):
"""tf.Tensor: inputs of the model by default."""
return self._first_network.inputs
@property
def outputs(self):
"""tf.Tensor: outputs of the model by default."""
return self._last_network.outputs
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_first_network']
del new_dict['_last_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._first_network = None
self._last_network = None
| 2,332 | 25.213483 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/optimizers/__init__.py | from garage.tf.optimizers.conjugate_gradient_optimizer import (
ConjugateGradientOptimizer)
from garage.tf.optimizers.conjugate_gradient_optimizer import (
FiniteDifferenceHvp)
from garage.tf.optimizers.first_order_optimizer import FirstOrderOptimizer
from garage.tf.optimizers.lbfgs_optimizer import LbfgsOptimizer
from garage.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
__all__ = [
'ConjugateGradientOptimizer', 'FiniteDifferenceHvp', 'FirstOrderOptimizer',
'LbfgsOptimizer', 'PenaltyLbfgsOptimizer'
]
| 544 | 40.923077 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/optimizers/conjugate_gradient_optimizer.py | """Conjugate Gradient Optimizer.
Computes the decent direction using the conjugate gradient method, and then
computes the optimal step size that will satisfy the KL divergence constraint.
Finally, it performs a backtracking line search to optimize the objective.
"""
import abc
from dowel import logger
import numpy as np
import tensorflow as tf
from garage.tf.misc import tensor_utils
from garage.tf.optimizers.utils import LazyDict, sliced_fun
class HessianVectorProduct(abc.ABC):
"""Base class for computing Hessian-vector product.
Args:
num_slices (int): Hessian-vector product function's inputs will be
divided into num_slices and then averaged together to improve
performance.
"""
def __init__(self, num_slices=1):
self._target = None
self._reg_coeff = None
self._hvp_fun = None
self._num_slices = num_slices
@abc.abstractmethod
def update_hvp(self, f, target, inputs, reg_coeff, name=None):
"""Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
"""
def build_eval(self, inputs):
"""Build the evaluation function. # noqa: D202, E501 # https://github.com/PyCQA/pydocstyle/pull/395.
Args:
inputs (tuple[numpy.ndarray]): Function f will be evaluated on
these inputs.
Returns:
function: It can be called to get the final result.
"""
def _eval(v):
"""The evaluation function.
Args:
v (numpy.ndarray): The vector to be multiplied with Hessian.
Returns:
numpy.ndarray: The product of Hessian of function f and v.
"""
xs = tuple(self._target.flat_to_params(v))
ret = sliced_fun(self._hvp_fun['f_hx_plain'], self._num_slices)(
inputs, xs) + self._reg_coeff * v
return ret
return _eval
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_hvp_fun']
return new_dict
class PearlmutterHvp(HessianVectorProduct):
"""Computes Hessian-vector product using Pearlmutter's algorithm.
`Pearlmutter, Barak A. "Fast exact multiplication by the Hessian." Neural
computation 6.1 (1994): 147-160.`
"""
def update_hvp(self, f, target, inputs, reg_coeff, name='PearlmutterHvp'):
"""Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
"""
self._target = target
self._reg_coeff = reg_coeff
params = target.get_params()
with tf.name_scope(name):
constraint_grads = tf.gradients(f,
xs=params,
name='gradients_constraint')
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
xs = tuple([
tensor_utils.new_tensor_like(p.name.split(':')[0], p)
for p in params
])
def hx_plain():
"""Computes product of Hessian(f) and vector v.
Returns:
tf.Tensor: Symbolic result.
"""
with tf.name_scope('hx_plain'):
with tf.name_scope('hx_function'):
hx_f = tf.reduce_sum(
tf.stack([
tf.reduce_sum(g * x)
for g, x in zip(constraint_grads, xs)
])),
hx_plain_splits = tf.gradients(hx_f,
params,
name='gradients_hx_plain')
for idx, (hx,
param) in enumerate(zip(hx_plain_splits,
params)):
if hx is None:
hx_plain_splits[idx] = tf.zeros_like(param)
return tensor_utils.flatten_tensor_variables(
hx_plain_splits)
self._hvp_fun = LazyDict(
f_hx_plain=lambda: tensor_utils.compile_function(
inputs=inputs + xs,
outputs=hx_plain(),
log_name='f_hx_plain',
), )
class FiniteDifferenceHvp(HessianVectorProduct):
"""Computes Hessian-vector product using finite difference method.
Args:
base_eps (float): Base epsilon value.
symmetric (bool): Symmetric or not.
num_slices (int): Hessian-vector product function's inputs will be
divided into num_slices and then averaged together to improve
performance.
"""
def __init__(self, base_eps=1e-8, symmetric=True, num_slices=1):
super().__init__(num_slices=num_slices)
self.base_eps = base_eps
self.symmetric = symmetric
def update_hvp(self, f, target, inputs, reg_coeff, name='FiniteDifferenceHvp'):
"""Build the symbolic graph to compute the Hessian-vector product.
Args:
f (tf.Tensor): The function whose Hessian needs to be computed.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
inputs (tuple[tf.Tensor]): The inputs for function f.
reg_coeff (float): A small value so that A -> A + reg*I.
name (str): Name to be used in tf.name_scope.
"""
self._target = target
self._reg_coeff = reg_coeff
params = target.get_params()
with tf.name_scope(name):
constraint_grads = tf.gradients(f,
xs=params,
name='gradients_constraint')
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(constraint_grads)
def f_hx_plain(*args):
"""Computes product of Hessian(f) and vector v.
Args:
args (tuple[numpy.ndarray]): Contains inputs of function f
, and vector v.
Returns:
tf.Tensor: Symbolic result.
"""
with tf.name_scope('f_hx_plain'):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate(
[np.reshape(x, (-1, )) for x in xs])
param_val = self._target.get_param_values()
eps = np.cast['float32'](
self.base_eps / (np.linalg.norm(param_val) + 1e-8))
self._target.set_param_values(param_val + eps * flat_xs)
flat_grad_dvplus = self._hvp_fun['f_grad'](*inputs_)
self._target.set_param_values(param_val)
if self.symmetric:
self._target.set_param_values(param_val -
eps * flat_xs)
flat_grad_dvminus = self._hvp_fun['f_grad'](*inputs_)
hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
self._target.set_param_values(param_val)
else:
flat_grad = self._hvp_fun['f_grad'](*inputs_)
hx = (flat_grad_dvplus - flat_grad) / eps
return hx
self._hvp_fun = LazyDict(
f_grad=lambda: tensor_utils.compile_function(
inputs=inputs,
outputs=flat_grad,
log_name='f_grad',
),
f_hx_plain=lambda: f_hx_plain,
)
class ConjugateGradientOptimizer:
"""Performs constrained optimization via line search.
The search direction is computed using a conjugate gradient algorithm,
which gives x = A^{-1}g, where A is a second order approximation of the
constraint and g is the gradient of the loss function.
Args:
cg_iters (int): The number of CG iterations used to calculate A^-1 g
reg_coeff (float): A small value so that A -> A + reg*I
subsample_factor (float): Subsampling factor to reduce samples when
using "conjugate gradient. Since the computation time for the
descent direction dominates, this can greatly reduce the overall
computation time.
backtrack_ratio (float): backtrack ratio for backtracking line search.
max_backtracks (int): Max number of iterations for backtrack
linesearch.
accept_violation (bool): whether to accept the descent step if it
violates the line search condition after exhausting all
backtracking budgets.
hvp_approach (HessianVectorProduct): A class that computes
Hessian-vector products.
num_slices (int): Hessian-vector product function's inputs will be
divided into num_slices and then averaged together to improve
performance.
"""
def __init__(self,
cg_iters=10,
reg_coeff=1e-5,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
accept_violation=False,
hvp_approach=None,
num_slices=1):
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._accept_violation = accept_violation
if hvp_approach is None:
hvp_approach = PearlmutterHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(
self,
loss,
target,
leq_constraint,
inputs,
extra_inputs=None,
name='ConjugateGradientOptimizer',
constraint_name='constraint',
):
"""Update the optimizer.
Build the functions for computing loss, gradient, and
the constraint value.
Args:
loss (tf.Tensor): Symbolic expression for the loss function.
target (garage.tf.policies.Policy): A parameterized object to
optimize over.
leq_constraint (tuple[tf.Tensor, float]): A constraint provided
as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
inputs (list(tf.Tenosr)): A list of symbolic variables as inputs,
which could be subsampled if needed. It is assumed that the
first dimension of these inputs should correspond to the
number of data points.
extra_inputs (list[tf.Tenosr]): A list of symbolic variables as
extra inputs which should not be subsampled.
name (str): Name to be passed to tf.name_scope.
constraint_name (str): A constraint name for prupose of logging
and variable names.
"""
params = target.get_params()
ns_vals = [loss, target, leq_constraint, inputs, extra_inputs, params]
with tf.name_scope(name):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
constraint_term, constraint_value = leq_constraint
with tf.name_scope('loss_gradients'):
grads = tf.gradients(loss, xs=params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(grads)
self._hvp_approach.update_hvp(f=constraint_term,
target=target,
inputs=inputs + extra_inputs,
reg_coeff=self._reg_coeff,
name='update_opt_' + constraint_name)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = LazyDict(
f_loss=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
log_name='f_loss',
),
f_grad=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=flat_grad,
log_name='f_grad',
),
f_constraint=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=constraint_term,
log_name='constraint',
),
f_loss_constraint=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=[loss, constraint_term],
log_name='f_loss_constraint',
),
)
def loss(self, inputs, extra_inputs=None):
"""Compute the loss value.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
Returns:
float: Loss value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun['f_loss'],
self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
"""Constraint value.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
Returns:
float: Constraint value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun['f_constraint'],
self._num_slices)(inputs, extra_inputs)
def optimize(self,
inputs,
extra_inputs=None,
subsample_grouped_inputs=None,
name='optimize'):
"""Optimize the function.
Args:
inputs (list[numpy.ndarray]): A list inputs, which could be
subsampled if needed. It is assumed that the first dimension
of these inputs should correspond to the number of data points
extra_inputs (list[numpy.ndarray]): A list of extra inputs which
should not be subsampled.
subsample_grouped_inputs (list[numpy.ndarray]): Subsampled inputs
to be used when subsample_factor is less than one.
name (str): The name argument for tf.name_scope.
"""
with tf.name_scope(name):
prev_param = np.copy(self._target.get_param_values())
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
subsample_inputs = inputs
if self._subsample_factor < 1:
if subsample_grouped_inputs is None:
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(n_samples,
int(n_samples *
self._subsample_factor),
replace=False)
subsample_inputs += tuple(
[x[inds] for x in inputs_grouped])
logger.log(
('Start CG optimization: '
'#parameters: %d, #inputs: %d, #subsample_inputs: %d') %
(len(prev_param), len(inputs[0]), len(subsample_inputs[0])))
logger.log('computing loss before')
loss_before = sliced_fun(self._opt_fun['f_loss'],
self._num_slices)(inputs, extra_inputs)
logger.log('computing gradient')
flat_g = sliced_fun(self._opt_fun['f_grad'],
self._num_slices)(inputs, extra_inputs)
logger.log('gradient computed')
logger.log('computing descent direction')
hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
descent_direction = cg(hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(
2.0 * self._max_constraint_val *
(1. / (descent_direction.dot(hx(descent_direction)) + 1e-8)))
if np.isnan(initial_step_size):
initial_step_size = 1.
flat_descent_step = initial_step_size * descent_direction
logger.log('descent direction computed')
n_iter = 0
for n_iter, ratio in enumerate(self._backtrack_ratio**np.arange(
self._max_backtracks)): # yapf: disable
cur_step = ratio * flat_descent_step
cur_param = prev_param - cur_step
self._target.set_param_values(cur_param)
loss, constraint_val = sliced_fun(
self._opt_fun['f_loss_constraint'],
self._num_slices)(inputs, extra_inputs)
if (loss < loss_before
and constraint_val <= self._max_constraint_val):
break
if (np.isnan(loss) or np.isnan(constraint_val)
or loss >= loss_before or constraint_val >=
self._max_constraint_val) and not self._accept_violation:
logger.log(
'Line search condition violated. Rejecting the step!')
if np.isnan(loss):
logger.log('Violated because loss is NaN')
if np.isnan(constraint_val):
logger.log('Violated because constraint %s is NaN' %
self._constraint_name)
if loss >= loss_before:
logger.log('Violated because loss not improving')
if constraint_val >= self._max_constraint_val:
logger.log('Violated because constraint %s is violated' %
self._constraint_name)
self._target.set_param_values(prev_param)
logger.log('backtrack iters: %d' % n_iter)
logger.log('optimization finished')
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_opt_fun']
return new_dict
def cg(f_Ax, b, cg_iters=10, residual_tol=1e-10):
"""Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312.
Args:
f_Ax (function): A function to compute Hessian vector product.
b (numpy.ndarray): Right hand side of the equation to solve.
cg_iters (int): Number of iterations to run conjugate gradient
algorithm.
residual_tol (float): Tolerence for convergence.
Returns:
numpy.ndarray: Solution x* for equation Ax = b.
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
for _ in range(cg_iters):
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
return x
| 21,821 | 38.107527 | 109 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/optimizers/first_order_optimizer.py | """First order optimizer."""
import time
import click
from dowel import logger
import tensorflow as tf
from garage import _Default, make_optimizer
from garage.np.optimizers import BatchDataset
from garage.tf.misc import tensor_utils
from garage.tf.optimizers.utils import LazyDict
class FirstOrderOptimizer:
"""First order optimier.
Performs (stochastic) gradient descent, possibly using fancier methods like
ADAM etc.
Args:
optimizer (tf.Optimizer): Optimizer to be used.
learning_rate (dict): learning rate arguments.
learning rates are our main interest parameters to tune optimizers.
max_epochs (int): Maximum number of epochs for update.
tolerance (float): Tolerance for difference in loss during update.
batch_size (int): Batch size for optimization.
callback (callable): Function to call during each epoch. Default is
None.
verbose (bool): If true, intermediate log message will be printed.
name (str): Name scope of the optimizer.
"""
def __init__(self,
optimizer=None,
learning_rate=None,
max_epochs=1000,
tolerance=1e-6,
batch_size=32,
callback=None,
verbose=False,
name='FirstOrderOptimizer'):
self._opt_fun = None
self._target = None
self._callback = callback
if optimizer is None:
optimizer = tf.compat.v1.train.AdamOptimizer
learning_rate = learning_rate or dict(learning_rate=_Default(1e-3))
if not isinstance(learning_rate, dict):
learning_rate = dict(learning_rate=learning_rate)
self._tf_optimizer = optimizer
self._learning_rate = learning_rate
self._max_epochs = max_epochs
self._tolerance = tolerance
self._batch_size = batch_size
self._verbose = verbose
self._input_vars = None
self._train_op = None
self._name = name
def update_opt(self, loss, target, inputs, extra_inputs=None, **kwargs):
"""Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
inputs (list[tf.Tensor]): List of input placeholders.
extra_inputs (list[tf.Tensor]): List of extra input placeholders.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
"""
del kwargs
with tf.name_scope(self._name):
self._target = target
tf_optimizer = make_optimizer(self._tf_optimizer,
**self._learning_rate)
self._train_op = tf_optimizer.minimize(
loss, var_list=target.get_params())
if extra_inputs is None:
extra_inputs = list()
self._input_vars = inputs + extra_inputs
self._opt_fun = LazyDict(
f_loss=lambda: tensor_utils.compile_function(
inputs + extra_inputs, loss), )
def loss(self, inputs, extra_inputs=None):
"""The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
if extra_inputs is None:
extra_inputs = tuple()
return self._opt_fun['f_loss'](*(tuple(inputs) + extra_inputs))
# pylint: disable=too-many-branches
def optimize(self, inputs, extra_inputs=None, callback=None):
"""Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
callback (callable): Function to call during each epoch. Default
is None.
Raises:
NotImplementedError: If inputs are invalid.
Exception: If loss function is None, i.e. not defined.
"""
if not inputs:
# Assumes that we should always sample mini-batches
raise NotImplementedError('No inputs are fed to optimizer.')
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
f_loss = self._opt_fun['f_loss']
if extra_inputs is None:
extra_inputs = tuple()
last_loss = f_loss(*(tuple(inputs) + extra_inputs))
start_time = time.time()
dataset = BatchDataset(inputs,
self._batch_size,
extra_inputs=extra_inputs)
sess = tf.compat.v1.get_default_session()
for epoch in range(self._max_epochs):
if self._verbose:
logger.log('Epoch {}'.format(epoch))
with click.progressbar(length=len(inputs[0]),
label='Optimizing minibatches') as pbar:
for batch in dataset.iterate(update=True):
sess.run(self._train_op,
dict(list(zip(self._input_vars, batch))))
pbar.update(len(batch[0]))
new_loss = f_loss(*(tuple(inputs) + extra_inputs))
if self._verbose:
logger.log('Epoch: {} | Loss: {}'.format(epoch, new_loss))
if self._callback or callback:
elapsed = time.time() - start_time
callback_args = dict(
loss=new_loss,
params=self._target.get_param_values()
if self._target else None,
itr=epoch,
elapsed=elapsed,
)
if self._callback:
self._callback(callback_args)
if callback:
callback(**callback_args)
if abs(last_loss - new_loss) < self._tolerance:
break
last_loss = new_loss
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_opt_fun']
del new_dict['_tf_optimizer']
del new_dict['_train_op']
del new_dict['_input_vars']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
obj = type(self)()
self.__dict__.update(obj.__dict__)
self.__dict__.update(state)
| 7,062 | 33.286408 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/optimizers/lbfgs_optimizer.py | """Limited-memory BFGS (L-BFGS) optimizer."""
import time
import scipy.optimize
import tensorflow as tf
from garage.tf.misc import tensor_utils
from garage.tf.optimizers.utils import LazyDict
class LbfgsOptimizer:
"""Limited-memory BFGS (L-BFGS) optimizer.
Performs unconstrained optimization via L-BFGS.
Args:
max_opt_itr (int): Maximum iteration for update.
callback (callable): Function to call during optimization.
"""
def __init__(self, max_opt_itr=20, callback=None):
self._max_opt_itr = max_opt_itr
self._opt_fun = None
self._target = None
self._callback = callback
def update_opt(self,
loss,
target,
inputs,
extra_inputs=None,
name='LbfgsOptimizer',
**kwargs):
"""Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
inputs (list[tf.Tensor]): List of input placeholders.
extra_inputs (list[tf.Tensor]): List of extra input placeholders.
name (str): Name scope.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
"""
del kwargs
self._target = target
params = target.get_params()
with tf.name_scope(name):
def get_opt_output():
"""Helper function to construct graph.
Returns:
list[tf.Tensor]: Loss and gradient tensor.
"""
with tf.name_scope('get_opt_output'):
flat_grad = tensor_utils.flatten_tensor_variables(
tf.gradients(loss, params))
return [
tf.cast(loss, tf.float64),
tf.cast(flat_grad, tf.float64)
]
if extra_inputs is None:
extra_inputs = list()
self._opt_fun = LazyDict(
f_loss=lambda: tensor_utils.compile_function(
inputs + extra_inputs, loss),
f_opt=lambda: tensor_utils.compile_function(
inputs=inputs + extra_inputs,
outputs=get_opt_output(),
))
def loss(self, inputs, extra_inputs=None):
"""The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
if extra_inputs is None:
extra_inputs = list()
return self._opt_fun['f_loss'](*(list(inputs) + list(extra_inputs)))
def optimize(self, inputs, extra_inputs=None, name='optimize'):
"""Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
extra_inputs (list[numpy.ndarray]): List of extra input values.
name (str): Name scope.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
with tf.name_scope(name):
f_opt = self._opt_fun['f_opt']
if extra_inputs is None:
extra_inputs = list()
def f_opt_wrapper(flat_params):
"""Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flattened parameter values.
Returns:
list[tf.Tensor]: Loss and gradient tensor.
"""
self._target.set_param_values(flat_params)
ret = f_opt(*inputs)
return ret
itr = [0]
start_time = time.time()
if self._callback:
def opt_callback(params):
"""Callback function wrapper.
Args:
params (numpy.ndarray): Parameters.
"""
loss = self._opt_fun['f_loss'](*(inputs + extra_inputs))
elapsed = time.time() - start_time
self._callback(
dict(
loss=loss,
params=params,
itr=itr[0],
elapsed=elapsed,
))
itr[0] += 1
else:
opt_callback = None
scipy.optimize.fmin_l_bfgs_b(
func=f_opt_wrapper,
x0=self._target.get_param_values(),
maxiter=self._max_opt_itr,
callback=opt_callback,
)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_opt_fun']
return new_dict
| 5,520 | 30.016854 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/optimizers/penalty_lbfgs_optimizer.py | """Penalized Limited-memory BFGS (L-BFGS) optimizer."""
from dowel import logger
import numpy as np
import scipy.optimize
import tensorflow as tf
from garage.tf.misc import tensor_utils
from garage.tf.optimizers.utils import LazyDict
class PenaltyLbfgsOptimizer:
"""Penalized Limited-memory BFGS (L-BFGS) optimizer.
Performs constrained optimization via penalized L-BFGS. The penalty term is
adaptively adjusted to make sure that the constraint is satisfied.
Args:
max_opt_itr (int): Maximum iteration for update.
initial_penalty (float): Initial penalty.
min_penalty (float): Minimum penalty allowed. Penalty will be clipped
if lower than this value.
max_penalty (float): Maximum penalty allowed. Penalty will be clipped
if higher than this value.
increase_penalty_factor (float): Factor to increase penalty in each
penalty iteration.
decrease_penalty_factor (float): Factor to decrease penalty in each
penalty iteration.
max_penalty_itr (int): Maximum penalty iterations to perform.
adapt_penalty (bool): Whether the penalty is adaptive or not. If false,
penalty will not change.
"""
def __init__(self,
max_opt_itr=20,
initial_penalty=1.0,
min_penalty=1e-2,
max_penalty=1e6,
increase_penalty_factor=2,
decrease_penalty_factor=0.5,
max_penalty_itr=10,
adapt_penalty=True):
self._max_opt_itr = max_opt_itr
self._penalty = initial_penalty
self._initial_penalty = initial_penalty
self._min_penalty = min_penalty
self._max_penalty = max_penalty
self._increase_penalty_factor = increase_penalty_factor
self._decrease_penalty_factor = decrease_penalty_factor
self._max_penalty_itr = max_penalty_itr
self._adapt_penalty = adapt_penalty
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
def update_opt(self,
loss,
target,
leq_constraint,
inputs,
constraint_name='constraint',
name='PenaltyLbfgsOptimizer',
**kwargs):
"""Construct operation graph for the optimizer.
Args:
loss (tf.Tensor): Loss objective to minimize.
target (object): Target object to optimize. The object should
implemenet `get_params()` and `get_param_values`.
leq_constraint (tuple): It contains a tf.Tensor and a float value.
The tf.Tensor represents the constraint term, and the float
value is the constraint value.
inputs (list[tf.Tensor]): List of input placeholders.
constraint_name (str): Constraint name for logging.
name (str): Name scope.
kwargs (dict): Extra unused keyword arguments. Some optimizers
have extra input, e.g. KL constraint.
"""
del kwargs
params = target.get_params()
with tf.name_scope(name):
constraint_term, constraint_value = leq_constraint
penalty_var = tf.compat.v1.placeholder(tf.float32,
tuple(),
name='penalty')
penalized_loss = loss + penalty_var * constraint_term
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
def get_opt_output():
"""Helper function to construct graph.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
"""
with tf.name_scope('get_opt_output'):
grads = tf.gradients(penalized_loss, params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
flat_grad = tensor_utils.flatten_tensor_variables(grads)
return [
tf.cast(penalized_loss, tf.float64),
tf.cast(flat_grad, tf.float64),
]
self._opt_fun = LazyDict(
f_loss=lambda: tensor_utils.compile_function(
inputs, loss, log_name='f_loss'),
f_constraint=lambda: tensor_utils.compile_function(
inputs, constraint_term, log_name='f_constraint'),
f_penalized_loss=lambda: tensor_utils.compile_function(
inputs=inputs + [penalty_var],
outputs=[penalized_loss, loss, constraint_term],
log_name='f_penalized_loss',
),
f_opt=lambda: tensor_utils.compile_function(
inputs=inputs + [penalty_var],
outputs=get_opt_output(),
))
def loss(self, inputs):
"""The loss.
Args:
inputs (list[numpy.ndarray]): List of input values.
Returns:
float: Loss.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
return self._opt_fun['f_loss'](*inputs)
def constraint_val(self, inputs):
"""The constraint value.
Args:
inputs (list[numpy.ndarray]): List of input values.
Returns:
float: Constraint value.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
return self._opt_fun['f_constraint'](*inputs)
def optimize(self, inputs, name='optimize'):
"""Perform optimization.
Args:
inputs (list[numpy.ndarray]): List of input values.
name (str): Name scope.
Raises:
Exception: If loss function is None, i.e. not defined.
"""
if self._opt_fun is None:
raise Exception(
'Use update_opt() to setup the loss function first.')
with tf.name_scope(name):
inputs = tuple(inputs)
try_penalty = np.clip(self._penalty, self._min_penalty,
self._max_penalty)
penalty_scale_factor = None
f_opt = self._opt_fun['f_opt']
f_penalized_loss = self._opt_fun['f_penalized_loss']
def gen_f_opt(penalty): # noqa: D202
"""Return a function that set parameters values.
Args:
penalty (float): Penalty.
Returns:
callable: Function that set parameters values.
"""
def f(flat_params):
"""Helper function to set parameters values.
Args:
flat_params (numpy.ndarray): Flatten parameter values.
Returns:
list[tf.Tensor]: Penalized loss and gradient tensor.
"""
self._target.set_param_values(flat_params)
return f_opt(*(inputs + (penalty, )))
return f
cur_params = self._target.get_param_values().astype('float64')
opt_params = cur_params
for penalty_itr in range(self._max_penalty_itr):
logger.log('trying penalty=%.3f...' % try_penalty)
itr_opt_params, _, _ = scipy.optimize.fmin_l_bfgs_b(
func=gen_f_opt(try_penalty),
x0=cur_params,
maxiter=self._max_opt_itr)
_, try_loss, try_constraint_val = f_penalized_loss(*(
inputs + (try_penalty, )))
logger.log('penalty %f => loss %f, %s %f' %
(try_penalty, try_loss, self._constraint_name,
try_constraint_val))
# Either constraint satisfied, or we are at the last iteration
# already and no alternative parameter satisfies the constraint
if try_constraint_val < self._max_constraint_val or \
(penalty_itr == self._max_penalty_itr - 1 and
opt_params is None):
opt_params = itr_opt_params
if not self._adapt_penalty:
break
# Decide scale factor on the first iteration, or if constraint
# violation yields numerical error
if (penalty_scale_factor is None
or np.isnan(try_constraint_val)):
# Increase penalty if constraint violated, or if constraint
# term is NAN
if (try_constraint_val > self._max_constraint_val
or np.isnan(try_constraint_val)):
penalty_scale_factor = self._increase_penalty_factor
else:
# Otherwise (i.e. constraint satisfied), shrink penalty
penalty_scale_factor = self._decrease_penalty_factor
opt_params = itr_opt_params
else:
if (penalty_scale_factor > 1 and
try_constraint_val <= self._max_constraint_val):
break
if (penalty_scale_factor < 1 and
try_constraint_val >= self._max_constraint_val):
break
try_penalty *= penalty_scale_factor
try_penalty = np.clip(try_penalty, self._min_penalty,
self._max_penalty)
self._penalty = try_penalty
self._target.set_param_values(opt_params)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_opt_fun']
return new_dict
| 10,585 | 36.672598 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/optimizers/utils.py | """Utilities for TensorFlow optimizers."""
import numpy as np
def sliced_fun(f, n_slices):
"""Divide function f's inputs into several slices.
Evaluate f on those slices, and then average the result. It is useful when
memory is not enough to process all data at once.
Assume:
1. each of f's inputs is iterable and composed of multiple "samples"
2. outputs can be averaged over "samples"
"""
def _sliced_f(sliced_inputs, non_sliced_inputs=None): # yapf: disable
if non_sliced_inputs is None:
non_sliced_inputs = []
if isinstance(non_sliced_inputs, tuple):
non_sliced_inputs = list(non_sliced_inputs)
n_paths = len(sliced_inputs[0])
slice_size = max(1, n_paths // n_slices)
ret_vals = None
for start in range(0, n_paths, slice_size):
inputs_slice = [v[start:start + slice_size] for v in sliced_inputs]
slice_ret_vals = f(*(inputs_slice + non_sliced_inputs))
if not isinstance(slice_ret_vals, (tuple, list)):
slice_ret_vals_as_list = [slice_ret_vals]
else:
slice_ret_vals_as_list = slice_ret_vals
scaled_ret_vals = [
np.asarray(v) * len(inputs_slice[0])
for v in slice_ret_vals_as_list
]
if ret_vals is None:
ret_vals = scaled_ret_vals
else:
ret_vals = [x + y for x, y in zip(ret_vals, scaled_ret_vals)]
ret_vals = [v / n_paths for v in ret_vals]
if not isinstance(slice_ret_vals, (tuple, list)):
ret_vals = ret_vals[0]
elif isinstance(slice_ret_vals, tuple):
ret_vals = tuple(ret_vals)
return ret_vals
return _sliced_f
class LazyDict:
"""An immutable, lazily-evaluated dict."""
def __init__(self, **kwargs):
self._lazy_dict = kwargs
self._dict = {}
def __getitem__(self, key):
"""Implement `object.__getitem__`."""
if key not in self._dict:
self._dict[key] = self._lazy_dict[key]()
return self._dict[key]
def __setitem__(self, i, y):
"""Implement `object.__setitem__`."""
self.set(i, y)
def get(self, key, default=None):
"""Implement `dict.get`."""
if key in self._lazy_dict:
return self[key]
return default
def set(self, key, value):
"""Implement `dict.set`."""
self._lazy_dict[key] = value
| 2,501 | 33.273973 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/plotter/__init__.py | from garage.tf.plotter.plotter import Plotter
__all__ = ['Plotter']
| 69 | 16.5 | 45 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/plotter/plotter.py | """Renders rollouts of the policy as it trains."""
import atexit
from collections import namedtuple
from enum import Enum
import platform
from queue import Queue
from threading import Thread
import numpy as np
import tensorflow as tf
from garage.sampler.utils import rollout as default_rollout
__all__ = ['Plotter']
class Op(Enum):
"""Message types."""
STOP = 0
UPDATE = 1
DEMO = 2
Message = namedtuple('Message', ['op', 'args', 'kwargs'])
class Plotter:
"""Renders rollouts of the policy as it trains.
Usually, this class is used by sending plot=True to LocalRunner.train().
Args:
env (gym.Env): The environment to perform rollouts in. This will be
used without copying in the current process but in a separate thread,
so it should be given a unique copy (in particular, do not pass the
environment here, then try to pickle it, or you will occasionally get
crashes).
policy (garage.tf.Policy): The policy to do the rollouts with.
sess (tf.Session): The TensorFlow session to use.
graph (tf.Graph): The TensorFlow graph to use.
rollout (callable): The rollout function to call.
"""
# List containing all plotters instantiated in the process
__plotters = []
def __init__(self,
env,
policy,
sess=None,
graph=None,
rollout=default_rollout):
Plotter.__plotters.append(self)
self._env = env
self.sess = tf.compat.v1.Session() if sess is None else sess
self.graph = tf.compat.v1.get_default_graph(
) if graph is None else graph
with self.sess.as_default(), self.graph.as_default():
self._policy = policy.clone('plotter_policy')
self.rollout = rollout
self.worker_thread = Thread(target=self._start_worker, daemon=True)
self.queue = Queue()
# Needed in order to draw glfw window on the main thread
if 'Darwin' in platform.platform():
self.rollout(self._env,
self._policy,
max_path_length=np.inf,
animated=True,
speedup=5)
def _start_worker(self):
max_length = None
initial_rollout = True
try:
with self.sess.as_default(), self.sess.graph.as_default():
# Each iteration will process ALL messages currently in the
# queue
while True:
msgs = {}
# If true, block and yield processor
if initial_rollout:
msg = self.queue.get()
msgs[msg.op] = msg
# Only fetch the last message of each type
while not self.queue.empty():
msg = self.queue.get()
msgs[msg.op] = msg
else:
# Only fetch the last message of each type
while not self.queue.empty():
msg = self.queue.get_nowait()
msgs[msg.op] = msg
if Op.STOP in msgs:
self.queue.task_done()
break
if Op.UPDATE in msgs:
self._env, self._policy = msgs[Op.UPDATE].args
self.queue.task_done()
if Op.DEMO in msgs:
param_values, max_length = msgs[Op.DEMO].args
self._policy.set_param_values(param_values)
initial_rollout = False
self.rollout(self._env,
self._policy,
max_path_length=max_length,
animated=True,
speedup=5)
self.queue.task_done()
else:
if max_length:
self.rollout(self._env,
self._policy,
max_path_length=max_length,
animated=True,
speedup=5)
except KeyboardInterrupt:
pass
def close(self):
"""Stop the Plotter's worker thread."""
if self.worker_thread.is_alive():
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))
self.queue.join()
self.worker_thread.join()
@staticmethod
def get_plotters():
"""Return all garage.tf.Plotter's.
Returns:
list[garage.tf.Plotter]: All the garage.tf.Plotter's
"""
return Plotter.__plotters
def start(self):
"""Start the Plotter's worker thread."""
if not self.worker_thread.is_alive():
tf.compat.v1.get_variable_scope().reuse_variables()
self.worker_thread.start()
self.queue.put(
Message(op=Op.UPDATE,
args=(self._env, self._policy),
kwargs=None))
atexit.register(self.close)
def update_plot(self, policy, max_length=np.inf):
"""Update the policy being plotted.
Args:
policy (garage.tf.Policy): The policy to rollout.
max_length (int or float): The maximum length to allow a rollout to
be. Defaults to infinity.
"""
if self.worker_thread.is_alive():
self.queue.put(
Message(op=Op.DEMO,
args=(policy.get_param_values(), max_length),
kwargs=None))
| 5,972 | 34.766467 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/__init__.py | """Policies for TensorFlow-based algorithms."""
from garage.tf.policies.categorical_cnn_policy import CategoricalCNNPolicy
from garage.tf.policies.categorical_gru_policy import CategoricalGRUPolicy
from garage.tf.policies.categorical_lstm_policy import CategoricalLSTMPolicy
from garage.tf.policies.categorical_mlp_policy import CategoricalMLPPolicy
from garage.tf.policies.continuous_mlp_policy import ContinuousMLPPolicy
from garage.tf.policies.discrete_qf_derived_policy import (
DiscreteQfDerivedPolicy)
from garage.tf.policies.gaussian_gru_policy import GaussianGRUPolicy
from garage.tf.policies.gaussian_lstm_policy import GaussianLSTMPolicy
from garage.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from garage.tf.policies.gaussian_mlp_task_embedding_policy import (
GaussianMLPTaskEmbeddingPolicy)
from garage.tf.policies.policy import Policy, StochasticPolicy
from garage.tf.policies.task_embedding_policy import TaskEmbeddingPolicy
__all__ = [
'Policy', 'StochasticPolicy', 'CategoricalCNNPolicy',
'CategoricalGRUPolicy', 'CategoricalLSTMPolicy', 'CategoricalMLPPolicy',
'ContinuousMLPPolicy', 'DiscreteQfDerivedPolicy', 'GaussianGRUPolicy',
'GaussianLSTMPolicy', 'GaussianMLPPolicy',
'GaussianMLPTaskEmbeddingPolicy', 'TaskEmbeddingPolicy'
]
| 1,293 | 52.916667 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/categorical_cnn_policy.py | """Categorical CNN Policy.
A policy represented by a Categorical distribution
which is parameterized by a convolutional neural network (CNN)
followed a multilayer perceptron (MLP).
"""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import CategoricalCNNModel
from garage.tf.policies.policy import StochasticPolicy
class CategoricalCNNPolicy(StochasticPolicy):
"""CategoricalCNNPolicy.
A policy that contains a CNN and a MLP to make prediction based on
a categorical distribution.
It only works with akro.Discrete action space.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For
example, (1, 2) means there are two convolutional layers. The
stride of the filter for first layer is 1 and that of the second
layer is 2.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
name (str): Policy name, also the variable scope of the policy.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this policy consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
filters,
strides,
padding,
name='CategoricalCNNPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
assert isinstance(env_spec.action_space, akro.Discrete), (
'CategoricalCNNPolicy only works with akro.Discrete action '
'space.')
super().__init__(name, env_spec)
if isinstance(env_spec.observation_space, akro.Dict):
raise ValueError('CNN policies do not support'
'with akro.Dict observation spaces.')
self._env_spec = env_spec
self._obs_dim = env_spec.observation_space.shape
self._action_dim = env_spec.action_space.n
self._filters = filters
self._strides = strides
self._padding = padding
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._f_prob = None
self._dist = None
self.model = CategoricalCNNModel(
output_dim=self._action_dim,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._initialize()
def _initialize(self):
"""Initialize policy."""
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None) +
self._obs_dim)
if isinstance(self.env_spec.observation_space, akro.Image):
augmented_state_input = tf.cast(state_input, tf.float32)
augmented_state_input /= 255.0
else:
augmented_state_input = state_input
self._dist = self.model.build(augmented_state_input).dist
self._f_prob = tf.compat.v1.get_default_session().make_callable(
[
tf.argmax(
self._dist.sample(
seed=deterministic.get_tf_seed_stream()), -1),
self._dist.probs
],
feed_list=[state_input])
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
if isinstance(self.env_spec.observation_space, akro.Image):
augmented_state_input = tf.cast(state_input, tf.float32)
augmented_state_input /= 255.0
else:
augmented_state_input = state_input
return self.model.build(augmented_state_input, name=name)
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._obs_dim
@property
def distribution(self):
"""Policy distribution.
Returns:
tfp.Distribution.OneHotCategorical: Policy distribution.
"""
return self._dist
@property
def vectorized(self):
"""Vectorized or not.
Returns:
bool: True if primitive supports vectorized operations.
"""
return True
def get_action(self, observation):
"""Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
"""
sample, prob = self.get_actions([observation])
return sample, {k: v[0] for k, v in prob.items()}
def get_actions(self, observations):
"""Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
"""
if isinstance(self.env_spec.observation_space, akro.Image) and \
len(observations[0].shape) < \
len(self.env_spec.observation_space.shape):
observations = self.env_spec.observation_space.unflatten_n(
observations)
samples, probs = self._f_prob(np.expand_dims(observations, 1))
return np.squeeze(samples), dict(prob=np.squeeze(probs, axis=1))
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.CategoricalCNNPolicy: Newly cloned policy.
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
filters=self._filters,
strides=self._strides,
padding=self._padding,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_prob']
del new_dict['_dist']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 10,197 | 36.355311 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/categorical_gru_policy.py | """Categorical GRU Policy.
A policy represented by a Categorical distribution
which is parameterized by a Gated Recurrent Unit (GRU).
"""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import CategoricalGRUModel
from garage.tf.policies.policy import StochasticPolicy
class CategoricalGRUPolicy(StochasticPolicy):
"""Categorical GRU Policy.
A policy represented by a Categorical distribution
which is parameterized by a Gated Recurrent Unit (GRU).
It only works with akro.Discrete action space.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_dim (int): Hidden dimension for LSTM cell.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
state_include_action (bool): Whether the state includes action.
If True, input dimension will be
(observation dimension + action dimension).
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='CategoricalGRUPolicy',
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
state_include_action=True,
layer_normalization=False):
if not isinstance(env_spec.action_space, akro.Discrete):
raise ValueError('CategoricalGRUPolicy only works'
'with akro.Discrete action space.')
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.n
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._layer_normalization = layer_normalization
self._state_include_action = state_include_action
if state_include_action:
self._input_dim = self._obs_dim + self._action_dim
else:
self._input_dim = self._obs_dim
self._f_step_prob = None
self.model = CategoricalGRUModel(
output_dim=self._action_dim,
hidden_dim=self._hidden_dim,
name='prob_network',
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
recurrent_nonlinearity=recurrent_nonlinearity,
recurrent_w_init=recurrent_w_init,
hidden_state_init=hidden_state_init,
hidden_state_init_trainable=hidden_state_init_trainable,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._prev_actions = None
self._prev_hiddens = None
self._dist = None
self._init_hidden = None
self._initialize()
def _initialize(self):
"""Initialize policy."""
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(shape=(None, None,
self._input_dim),
name='state_input',
dtype=tf.float32)
step_input_var = tf.compat.v1.placeholder(shape=(None,
self._input_dim),
name='step_input',
dtype=tf.float32)
step_hidden_var = tf.compat.v1.placeholder(
shape=(None, self._hidden_dim),
name='step_hidden_input',
dtype=tf.float32)
(self._dist, step_out, step_hidden,
self._init_hidden) = self.model.build(state_input, step_input_var,
step_hidden_var).outputs
self._f_step_prob = tf.compat.v1.get_default_session().make_callable(
[step_out, step_hidden],
feed_list=[step_input_var, step_hidden_var])
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state , used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
_, step_input_var, step_hidden_var = self.model.inputs
return self.model.build(state_input,
step_input_var,
step_hidden_var,
name=name)
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._input_dim
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = [True]
do_resets = np.asarray(do_resets)
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
def get_action(self, observation):
"""Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
"""
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
probs, hidden_vec = self._f_step_prob(all_input, self._prev_hiddens)
actions = list(map(self.action_space.weighted_sample, probs))
prev_actions = self._prev_actions
self._prev_actions = self.action_space.flatten_n(actions)
self._prev_hiddens = hidden_vec
agent_info = dict(prob=probs)
if self._state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return actions, agent_info
@property
def distribution(self):
"""Policy distribution.
Returns:
tfp.Distribution.OneHotCategorical: Policy distribution.
"""
return self._dist
@property
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.CategoricalGRUPolicy: Newly cloned policy.
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
hidden_dim=self._hidden_dim,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
recurrent_nonlinearity=self._recurrent_nonlinearity,
recurrent_w_init=self._recurrent_w_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
state_include_action=self._state_include_action,
layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_prob']
del new_dict['_dist']
del new_dict['_init_hidden']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 13,151 | 37.568915 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/categorical_lstm_policy.py | """Categorical LSTM Policy.
A policy represented by a Categorical distribution
which is parameterized by a Long short-term memory (LSTM).
"""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import CategoricalLSTMModel
from garage.tf.policies.policy import StochasticPolicy
class CategoricalLSTMPolicy(StochasticPolicy):
"""Categorical LSTM Policy.
A policy represented by a Categorical distribution
which is parameterized by a Long short-term memory (LSTM).
It only works with akro.Discrete action space.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_dim (int): Hidden dimension for LSTM cell.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
state_include_action (bool): Whether the state includes action.
If True, input dimension will be
(observation dimension + action dimension).
forget_bias (bool): If True, add 1 to the bias of the forget gate
at initialization. It's used to reduce the scale of forgetting at
the beginning of the training.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='CategoricalLSTMPolicy',
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
cell_state_init=tf.zeros_initializer(),
cell_state_init_trainable=False,
state_include_action=True,
forget_bias=True,
layer_normalization=False):
if not isinstance(env_spec.action_space, akro.Discrete):
raise ValueError('CategoricalLSTMPolicy only works'
'with akro.Discrete action space.')
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.n
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._cell_state_init = cell_state_init
self._cell_state_init_trainable = cell_state_init_trainable
self._forget_bias = forget_bias
self._layer_normalization = layer_normalization
self._state_include_action = state_include_action
if state_include_action:
self._input_dim = self._obs_dim + self._action_dim
else:
self._input_dim = self._obs_dim
self._f_step_prob = None
self.model = CategoricalLSTMModel(
output_dim=self._action_dim,
hidden_dim=self._hidden_dim,
name='prob_network',
forget_bias=forget_bias,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
recurrent_nonlinearity=recurrent_nonlinearity,
recurrent_w_init=recurrent_w_init,
hidden_state_init=hidden_state_init,
hidden_state_init_trainable=hidden_state_init_trainable,
cell_state_init=cell_state_init,
cell_state_init_trainable=cell_state_init_trainable,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._prev_actions = None
self._prev_hiddens = None
self._prev_cells = None
self._dist = None
self._init_hidden = None
self._init_cell = None
self._initialize()
def _initialize(self):
"""Initialize policy."""
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(shape=(None, None,
self._input_dim),
name='state_input',
dtype=tf.float32)
step_input_var = tf.compat.v1.placeholder(shape=(None,
self._input_dim),
name='step_input',
dtype=tf.float32)
step_hidden_var = tf.compat.v1.placeholder(
shape=(None, self._hidden_dim),
name='step_hidden_input',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(None,
self._hidden_dim),
name='step_cell_input',
dtype=tf.float32)
(self._dist, step_out, step_hidden, step_cell, self._init_hidden,
self._init_cell) = self.model.build(state_input, step_input_var,
step_hidden_var,
step_cell_var).outputs
self._f_step_prob = tf.compat.v1.get_default_session().make_callable(
[step_out, step_hidden, step_cell],
feed_list=[step_input_var, step_hidden_var, step_cell_var])
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`
tf.Tensor: Initial hidden state, used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`
tf.Tensor: Initial cell state, used to reset the cell state
when policy resets. Shape: :math:`(S^*)`
"""
with tf.compat.v1.variable_scope(self._variable_scope):
_, step_input, step_hidden, step_cell = self.model.inputs
return self.model.build(state_input,
step_input,
step_hidden,
step_cell,
name=name)
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._input_dim
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = [True]
do_resets = np.asarray(do_resets)
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_cells = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
self._prev_cells[do_resets] = self._init_cell.eval()
def get_action(self, observation):
"""Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
"""
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
probs, hidden_vec, cell_vec = self._f_step_prob(
all_input, self._prev_hiddens, self._prev_cells)
actions = list(map(self.action_space.weighted_sample, probs))
prev_actions = self._prev_actions
self._prev_actions = self.action_space.flatten_n(actions)
self._prev_hiddens = hidden_vec
self._prev_cells = cell_vec
agent_info = dict(prob=probs)
if self._state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return actions, agent_info
@property
def distribution(self):
"""Policy distribution.
Returns:
tfp.Distribution.OneHotCategorical: Policy distribution.
"""
return self._dist
@property
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.CategoricalLSTMPolicy: Newly cloned policy.
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
hidden_dim=self._hidden_dim,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
recurrent_nonlinearity=self._recurrent_nonlinearity,
recurrent_w_init=self._recurrent_w_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
state_include_action=self._state_include_action,
forget_bias=self._forget_bias,
layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_prob']
del new_dict['_dist']
del new_dict['_init_hidden']
del new_dict['_init_cell']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 15,172 | 39.246684 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/categorical_mlp_policy.py | """Categorical MLP Policy.
A policy represented by a Categorical distribution
which is parameterized by a multilayer perceptron (MLP).
"""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import CategoricalMLPModel
from garage.tf.policies.policy import StochasticPolicy
class CategoricalMLPPolicy(StochasticPolicy):
"""Categorical MLP Policy.
A policy represented by a Categorical distribution
which is parameterized by a multilayer perceptron (MLP).
It only works with akro.Discrete action space.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this policy consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='CategoricalMLPPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
if not isinstance(env_spec.action_space, akro.Discrete):
raise ValueError('CategoricalMLPPolicy only works'
'with akro.Discrete action space.')
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.n
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._f_prob = None
self._dist = None
self.model = CategoricalMLPModel(
output_dim=self._action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization,
name='CategoricalMLPModel')
self._initialize()
def _initialize(self):
"""Initialize policy."""
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self._obs_dim))
self._dist = self.model.build(state_input).dist
self._f_prob = tf.compat.v1.get_default_session().make_callable(
[
tf.argmax(
self._dist.sample(
seed=deterministic.get_tf_seed_stream()), -1),
self._dist.probs
],
feed_list=[state_input])
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(state_input, name=name)
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._obs_dim
@property
def distribution(self):
"""Policy distribution.
Returns:
tfp.Distribution.OneHotCategorical: Policy distribution.
"""
return self._dist
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def get_action(self, observation):
"""Return a single action.
Args:
observation (numpy.ndarray): Observations.
Returns:
int: Action given input observation.
dict(numpy.ndarray): Distribution parameters.
"""
actions, agent_infos = self.get_actions([observation])
return actions, {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Return multiple actions.
Args:
observations (numpy.ndarray): Observations.
Returns:
list[int]: Actions given input observations.
dict(numpy.ndarray): Distribution parameters.
"""
observations = self.observation_space.flatten_n(observations)
samples, probs = self._f_prob(np.expand_dims(observations, 1))
return np.squeeze(samples), dict(prob=np.squeeze(probs, axis=1))
def get_regularizable_vars(self):
"""Get regularizable weight variables under the Policy scope.
Returns:
list[tf.Tensor]: Trainable variables.
"""
trainable = self.get_trainable_vars()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.Policy: Newly cloned policy.
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: State dictionary.
"""
new_dict = super().__getstate__()
del new_dict['_f_prob']
del new_dict['_dist']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 8,508 | 33.730612 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/continuous_mlp_policy.py | """This modules creates a continuous MLP policy network.
A continuous MLP network can be used as policy method in different RL
algorithms. It accepts an observation of the environment and predicts a
continuous action.
"""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import MLPModel
from garage.tf.policies.policy import Policy
class ContinuousMLPPolicy(Policy):
"""Continuous MLP Policy Network.
The policy network selects action based on the state of the environment.
It uses neural nets to fit the function of pi(s).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this policy consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='ContinuousMLPPolicy',
hidden_sizes=(64, 64),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=tf.nn.tanh,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name, env_spec)
action_dim = env_spec.action_space.flat_dim
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._obs_dim = env_spec.observation_space.flat_dim
self.model = MLPModel(output_dim=action_dim,
name='MLPModel',
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._initialize()
def _initialize(self):
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, self._obs_dim))
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
outputs = self.model.build(state_input).outputs
self._f_prob = tf.compat.v1.get_default_session().make_callable(
outputs, feed_list=[state_input])
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._obs_dim
def get_action_sym(self, obs_var, name=None):
"""Symbolic graph of the action.
Args:
obs_var (tf.Tensor): Tensor input for symbolic graph.
name (str): Name for symbolic graph.
Returns:
tf.Tensor: symbolic graph of the action.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(obs_var, name=name).outputs
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Predicted action.
dict: Empty dict since this policy does not model a distribution.
"""
actions, agent_infos = self.get_actions([observation])
action = actions[0]
return action, {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Predicted actions.
dict: Empty dict since this policy does not model a distribution.
"""
observations = self.observation_space.flatten_n(observations)
actions = self._f_prob(observations)
actions = self.action_space.unflatten_n(actions)
return actions, dict()
def get_regularizable_vars(self):
"""Get regularizable weight variables under the Policy scope.
Returns:
list(tf.Variable): List of regularizable variables.
"""
trainable = self.get_trainable_vars()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
@property
def vectorized(self):
"""Vectorized or not.
Returns:
bool: vectorized or not.
"""
return True
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy.
Returns:
garage.tf.policies.ContinuousMLPPolicy: Clone of this object
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled as the contents for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 7,850 | 35.347222 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/discrete_qf_derived_policy.py | """A Discrete QFunction-derived policy.
This policy chooses the action that yields to the largest Q-value.
"""
import akro
import numpy as np
import tensorflow as tf
from garage.tf.policies.policy import Policy
class DiscreteQfDerivedPolicy(Policy):
"""DiscreteQfDerived policy.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
qf (garage.q_functions.QFunction): The q-function used.
name (str): Name of the policy.
"""
def __init__(self, env_spec, qf, name='DiscreteQfDerivedPolicy'):
assert isinstance(env_spec.action_space, akro.Discrete), (
'DiscreteQfDerivedPolicy only supports akro.Discrete action spaces'
)
if isinstance(env_spec.observation_space, akro.Dict):
raise ValueError('CNN policies do not support'
'with akro.Dict observation spaces.')
super().__init__(name, env_spec)
self._env_spec = env_spec
self._qf = qf
self._initialize()
def _initialize(self):
self._f_qval = tf.compat.v1.get_default_session().make_callable(
self._qf.q_vals, feed_list=[self._qf.model.input])
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def get_action(self, observation):
"""Get action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Single optimal action from this policy.
dict: Predicted action and agent information. It returns an empty
dict since there is no parameterization.
"""
opt_actions, agent_infos = self.get_actions([observation])
return opt_actions[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Get actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Optimal actions from this policy.
dict: Predicted action and agent information. It returns an empty
dict since there is no parameterization.
"""
if isinstance(self.env_spec.observation_space, akro.Image) and \
len(observations[0].shape) < \
len(self.env_spec.observation_space.shape):
observations = self.env_spec.observation_space.unflatten_n(
observations)
q_vals = self._f_qval(observations)
opt_actions = np.argmax(q_vals, axis=1)
return opt_actions, dict()
def get_trainable_vars(self):
"""Get trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._qf.get_trainable_vars()
def get_global_vars(self):
"""Get global variables.
Returns:
List[tf.Variable]: A list of global variables in the current
variable scope.
"""
return self._qf.get_global_vars()
def get_regularizable_vars(self):
"""Get all network weight variables in the current scope.
Returns:
List[tf.Variable]: A list of network weight variables in the
current variable scope.
"""
return self._qf.get_regularizable_vars()
def get_params(self):
"""Get the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._qf.get_params()
def get_param_shapes(self):
"""Get parameter shapes.
Returns:
List[tuple]: A list of variable shapes.
"""
return self._qf.get_param_shapes()
def get_param_values(self):
"""Get param values.
Returns:
np.ndarray: Values of the parameters evaluated in
the current session
"""
return self._qf.get_param_values()
def set_param_values(self, param_values):
"""Set param values.
Args:
param_values (np.ndarray): A numpy array of parameter values.
"""
self._qf.set_param_values(param_values)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_f_qval']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
self.__dict__.update(state)
self._initialize()
| 4,923 | 27.298851 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/gaussian_gru_policy.py | """Gaussian GRU Policy.
A policy represented by a Gaussian distribution
which is parameterized by a Gated Recurrent Unit (GRU).
"""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import GaussianGRUModel
from garage.tf.policies.policy import StochasticPolicy
class GaussianGRUPolicy(StochasticPolicy):
"""Gaussian GRU Policy.
A policy represented by a Gaussian distribution
which is parameterized by a Gated Recurrent Unit (GRU).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Model name, also the variable scope.
hidden_dim (int): Hidden dimension for GRU cell for mean.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (Callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (Callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (Callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
learn_std (bool): Is std trainable.
std_share_network (bool): Boolean for whether mean and std share
the same network.
init_std (float): Initial value for std.
layer_normalization (bool): Bool for using layer normalization or not.
state_include_action (bool): Whether the state includes action.
If True, input dimension will be
(observation dimension + action dimension).
"""
def __init__(self,
env_spec,
hidden_dim=32,
name='GaussianGRUPolicy',
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
learn_std=True,
std_share_network=False,
init_std=1.0,
layer_normalization=False,
state_include_action=True):
if not isinstance(env_spec.action_space, akro.Box):
raise ValueError('GaussianGRUPolicy only works with '
'akro.Box action space, but not {}'.format(
env_spec.action_space))
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._learn_std = learn_std
self._std_share_network = std_share_network
self._init_std = init_std
self._layer_normalization = layer_normalization
self._state_include_action = state_include_action
if state_include_action:
self._input_dim = self._obs_dim + self._action_dim
else:
self._input_dim = self._obs_dim
self._f_step_mean_std = None
self.model = GaussianGRUModel(
output_dim=self._action_dim,
hidden_dim=hidden_dim,
name='GaussianGRUModel',
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
recurrent_nonlinearity=recurrent_nonlinearity,
recurrent_w_init=recurrent_w_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
hidden_state_init=hidden_state_init,
hidden_state_init_trainable=hidden_state_init_trainable,
layer_normalization=layer_normalization,
learn_std=learn_std,
std_share_network=std_share_network,
init_std=init_std)
self._prev_actions = None
self._prev_hiddens = None
self._dist = None
self._init_hidden = None
self._initialize()
def _initialize(self):
"""Initialize policy."""
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(shape=(None, None,
self._input_dim),
name='state_input',
dtype=tf.float32)
step_input_var = tf.compat.v1.placeholder(shape=(None,
self._input_dim),
name='step_input',
dtype=tf.float32)
step_hidden_var = tf.compat.v1.placeholder(
shape=(None, self._hidden_dim),
name='step_hidden_input',
dtype=tf.float32)
(self._dist, step_mean, step_log_std, step_hidden,
self._init_hidden) = self.model.build(state_input, step_input_var,
step_hidden_var).outputs
self._f_step_mean_std = (
tf.compat.v1.get_default_session().make_callable(
[step_mean, step_log_std, step_hidden],
feed_list=[step_input_var, step_hidden_var]))
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
_, step_input_var, step_hidden_var = self.model.inputs
return self.model.build(state_input,
step_input_var,
step_hidden_var,
name=name)
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._input_dim
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default `np.array([True])`
which implies the policy will not be "vectorized", i.e. number of
parallel environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = np.array([True])
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
means, log_stds, hidden_vec = self._f_step_mean_std(
all_input, self._prev_hiddens)
rnd = np.random.normal(size=means.shape)
samples = rnd * np.exp(log_stds) + means
samples = self.action_space.unflatten_n(samples)
prev_actions = self._prev_actions
self._prev_actions = samples
self._prev_hiddens = hidden_vec
agent_infos = dict(mean=means, log_std=log_stds)
if self._state_include_action:
agent_infos['prev_action'] = np.copy(prev_actions)
return samples, agent_infos
@property
def distribution(self):
"""Policy distribution.
Returns:
tfp.Distribution.MultivariateNormalDiag: Policy distribution.
"""
return self._dist
@property
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.GaussianGRUPolicy: Newly cloned policy.
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
hidden_dim=self._hidden_dim,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
recurrent_nonlinearity=self._recurrent_nonlinearity,
recurrent_w_init=self._recurrent_w_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
learn_std=self._learn_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
layer_normalization=self._layer_normalization,
state_include_action=self._state_include_action)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_mean_std']
del new_dict['_dist']
del new_dict['_init_hidden']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 14,761 | 38.365333 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/gaussian_lstm_policy.py | """Gaussian LSTM Policy.
A policy represented by a Gaussian distribution
which is parameterized by a Long short-term memory (LSTM).
"""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import GaussianLSTMModel
from garage.tf.policies.policy import StochasticPolicy
class GaussianLSTMPolicy(StochasticPolicy):
"""Gaussian LSTM Policy.
A policy represented by a Gaussian distribution
which is parameterized by a Long short-term memory (LSTM).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Model name, also the variable scope.
hidden_dim (int): Hidden dimension for LSTM cell for mean.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (Callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (Callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (Callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (Callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
forget_bias (bool): If True, add 1 to the bias of the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
learn_std (bool): Is std trainable.
std_share_network (bool): Boolean for whether mean and std share
the same network.
init_std (float): Initial value for std.
layer_normalization (bool): Bool for using layer normalization or not.
state_include_action (bool): Whether the state includes action.
If True, input dimension will be
(observation dimension + action dimension).
"""
def __init__(self,
env_spec,
hidden_dim=32,
name='GaussianLSTMPolicy',
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
cell_state_init=tf.zeros_initializer(),
cell_state_init_trainable=False,
forget_bias=True,
learn_std=True,
std_share_network=False,
init_std=1.0,
layer_normalization=False,
state_include_action=True):
if not isinstance(env_spec.action_space, akro.Box):
raise ValueError('GaussianLSTMPolicy only works with '
'akro.Box action space, but not {}'.format(
env_spec.action_space))
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._cell_state_init = cell_state_init
self._cell_state_init_trainable = cell_state_init_trainable
self._forget_bias = forget_bias
self._learn_std = learn_std
self._std_share_network = std_share_network
self._init_std = init_std
self._layer_normalization = layer_normalization
self._state_include_action = state_include_action
self._f_step_mean_std = None
if state_include_action:
self._input_dim = self._obs_dim + self._action_dim
else:
self._input_dim = self._obs_dim
self.model = GaussianLSTMModel(
output_dim=self._action_dim,
hidden_dim=hidden_dim,
name='GaussianLSTMModel',
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
recurrent_nonlinearity=recurrent_nonlinearity,
recurrent_w_init=recurrent_w_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
hidden_state_init=hidden_state_init,
hidden_state_init_trainable=hidden_state_init_trainable,
cell_state_init=cell_state_init,
cell_state_init_trainable=cell_state_init_trainable,
forget_bias=forget_bias,
layer_normalization=layer_normalization,
learn_std=learn_std,
std_share_network=std_share_network,
init_std=init_std)
self._prev_actions = None
self._prev_hiddens = None
self._prev_cells = None
self._dist = None
self._init_hidden = None
self._init_cell = None
self._initialize()
def _initialize(self):
"""Initialize policy."""
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(shape=(None, None,
self._input_dim),
name='state_input',
dtype=tf.float32)
step_input_var = tf.compat.v1.placeholder(shape=(None,
self._input_dim),
name='step_input',
dtype=tf.float32)
step_hidden_var = tf.compat.v1.placeholder(
shape=(None, self._hidden_dim),
name='step_hidden_input',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(None,
self._hidden_dim),
name='step_cell_input',
dtype=tf.float32)
(self._dist, step_mean, step_log_std, step_hidden, step_cell,
self._init_hidden,
self._init_cell) = self.model.build(state_input, step_input_var,
step_hidden_var,
step_cell_var).outputs
self._f_step_mean_std = tf.compat.v1.get_default_session(
).make_callable(
[step_mean, step_log_std, step_hidden, step_cell],
feed_list=[step_input_var, step_hidden_var, step_cell_var])
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
tf.Tensor: Initial cell state, with shape :math:`(S^*)`
"""
with tf.compat.v1.variable_scope(self._variable_scope):
_, step_input, step_hidden, step_cell = self.model.inputs
return self.model.build(state_input,
step_input,
step_hidden,
step_cell,
name=name)
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._input_dim
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def reset(self, do_resets=None):
"""Reset the policy.
Note:
If `do_resets` is None, it will be by default np.array([True]),
which implies the policy will not be "vectorized", i.e. number of
paralle environments for training data sampling = 1.
Args:
do_resets (numpy.ndarray): Bool that indicates terminal state(s).
"""
if do_resets is None:
do_resets = np.array([True])
if self._prev_actions is None or len(do_resets) != len(
self._prev_actions):
self._prev_actions = np.zeros(
(len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_cells = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.
self._prev_hiddens[do_resets] = self._init_hidden.eval()
self._prev_cells[do_resets] = self._init_cell.eval()
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
- prev_action (numpy.ndarray): Previous action, only present if
self._state_include_action is True.
"""
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert self._prev_actions is not None
all_input = np.concatenate([observations, self._prev_actions],
axis=-1)
else:
all_input = observations
means, log_stds, hidden_vec, cell_vec = self._f_step_mean_std(
all_input, self._prev_hiddens, self._prev_cells)
rnd = np.random.normal(size=means.shape)
samples = rnd * np.exp(log_stds) + means
samples = self.action_space.unflatten_n(samples)
prev_actions = self._prev_actions
self._prev_actions = samples
self._prev_hiddens = hidden_vec
self._prev_cells = cell_vec
agent_infos = dict(mean=means, log_std=log_stds)
if self._state_include_action:
agent_infos['prev_action'] = np.copy(prev_actions)
return samples, agent_infos
@property
def distribution(self):
"""Policy distribution.
Returns:
tfp.Distribution.MultivariateNormalDiag: Policy distribution.
"""
return self._dist
@property
def state_info_specs(self):
"""State info specifcation.
Returns:
List[str]: keys and shapes for the information related to the
policy's state when taking an action.
"""
if self._state_include_action:
return [
('prev_action', (self._action_dim, )),
]
return []
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.GaussianLSTMPolicy: Newly cloned policy.
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
hidden_dim=self._hidden_dim,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
recurrent_nonlinearity=self._recurrent_nonlinearity,
recurrent_w_init=self._recurrent_w_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
forget_bias=self._forget_bias,
learn_std=self._learn_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
layer_normalization=self._layer_normalization,
state_include_action=self._state_include_action)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_step_mean_std']
del new_dict['_dist']
del new_dict['_init_hidden']
del new_dict['_init_cell']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 16,711 | 39.860636 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/gaussian_mlp_policy.py | """Gaussian MLP Policy.
A policy represented by a Gaussian distribution
which is parameterized by a multilayer perceptron (MLP).
"""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import GaussianMLPModel
from garage.tf.policies.policy import StochasticPolicy
class GaussianMLPPolicy(StochasticPolicy):
"""Gaussian MLP Policy.
A policy represented by a Gaussian distribution
which is parameterized by a multilayer perceptron (MLP).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
init_std (float): Initial value for std.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network. The function should return a tf.Tensor.
std_output_nonlinearity (callable): Nonlinearity for output layer in
the std network. The function should return a
tf.Tensor.
std_parameterization (str): How the std should be parametrized. There
are a few options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
name='GaussianMLPPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=None,
std_parameterization='exp',
layer_normalization=False):
if not isinstance(env_spec.action_space, akro.Box):
raise ValueError('GaussianMLPPolicy only works with '
'akro.Box action space, but not {}'.format(
env_spec.action_space))
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._learn_std = learn_std
self._adaptive_std = adaptive_std
self._std_share_network = std_share_network
self._init_std = init_std
self._min_std = min_std
self._max_std = max_std
self._std_hidden_sizes = std_hidden_sizes
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_output_nonlinearity = std_output_nonlinearity
self._std_parameterization = std_parameterization
self._layer_normalization = layer_normalization
self._f_dist = None
self._dist = None
self.model = GaussianMLPModel(
output_dim=self._action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_output_nonlinearity=std_output_nonlinearity,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
name='GaussianMLPModel')
self._initialize()
def _initialize(self):
"""Initialize policy."""
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self._obs_dim))
self._dist, mean, log_std = self.model.build(state_input).outputs
self._f_dist = tf.compat.v1.get_default_session().make_callable(
[
self._dist.sample(seed=deterministic.get_tf_seed_stream()),
mean, log_std
],
feed_list=[state_input])
@property
def input_dim(self):
"""int: Dimension of the policy input."""
return self._obs_dim
def build(self, state_input, name=None):
"""Build policy.
Args:
state_input (tf.Tensor) : State input.
name (str): Name of the policy, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Distribution.
tf.tensor: Mean.
tf.Tensor: Log of standard deviation.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(state_input, name=name)
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns an action and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
"""
actions, agent_infos = self.get_actions([observation])
return actions[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information.
Note:
It returns actions and a dict, with keys
- mean (numpy.ndarray): Means of the distribution.
- log_std (numpy.ndarray): Log standard deviations of the
distribution.
"""
observations = self.observation_space.flatten_n(observations)
samples, means, log_stds = self._f_dist(np.expand_dims(
observations, 1))
samples = self.action_space.unflatten_n(np.squeeze(samples, 1))
means = self.action_space.unflatten_n(np.squeeze(means, 1))
log_stds = self.action_space.unflatten_n(np.squeeze(log_stds, 1))
return samples, dict(mean=means, log_std=log_stds)
@property
def distribution(self):
"""Policy distribution.
Returns:
tfp.Distribution.MultivariateNormalDiag: Policy distribution.
"""
return self._dist
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.GaussianMLPPolicy: Newly cloned policy.
"""
new_policy = self.__class__(
name=name,
env_spec=self._env_spec,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
learn_std=self._learn_std,
adaptive_std=self._adaptive_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
min_std=self._min_std,
max_std=self._max_std,
std_hidden_sizes=self._std_hidden_sizes,
std_hidden_nonlinearity=self._std_hidden_nonlinearity,
std_output_nonlinearity=self._std_output_nonlinearity,
std_parameterization=self._std_parameterization,
layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_dist']
del new_dict['_dist']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 12,090 | 38.003226 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/gaussian_mlp_task_embedding_policy.py | """GaussianMLPTaskEmbeddingPolicy."""
# pylint: disable=wrong-import-order
import akro
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import GaussianMLPModel
from garage.tf.policies.task_embedding_policy import TaskEmbeddingPolicy
class GaussianMLPTaskEmbeddingPolicy(TaskEmbeddingPolicy):
"""GaussianMLPTaskEmbeddingPolicy.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
encoder (garage.tf.embeddings.StochasticEncoder): Embedding network.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
init_std (float): Initial value for std.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
std_output_nonlinearity (callable): Nonlinearity for output layer in
the std network. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
std_parameterization (str): How the std should be parametrized. There
are a few options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
encoder,
name='GaussianMLPTaskEmbeddingPolicy',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=None,
std_parameterization='exp',
layer_normalization=False):
if not isinstance(env_spec.action_space, akro.Box):
raise ValueError('This task embedding policy does not support'
'non akro.Box action spaces.')
if not isinstance(env_spec.observation_space, akro.Box):
raise ValueError('This task embedding policy does not support'
'non akro.Box observation spaces.')
super().__init__(name, env_spec, encoder)
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._learn_std = learn_std
self._adaptive_std = adaptive_std
self._std_share_network = std_share_network
self._init_std = init_std
self._min_std = min_std
self._max_std = max_std
self._std_hidden_sizes = std_hidden_sizes
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_output_nonlinearity = std_output_nonlinearity
self._std_parameterization = std_parameterization
self._layer_normalization = layer_normalization
self.obs_dim = env_spec.observation_space.flat_dim
self.action_dim = env_spec.action_space.flat_dim
self._dist = None
self.model = GaussianMLPModel(
output_dim=self.action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_output_nonlinearity=std_output_nonlinearity,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
name='GaussianMLPModel')
self._initialize()
def _initialize(self):
"""Build policy to support sampling.
After build, get_action_*() methods will be available.
"""
obs_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, self.obs_dim))
latent_input = tf.compat.v1.placeholder(
tf.float32, shape=(None, None, self._encoder.output_dim))
# Encoder should be outside policy scope
with tf.compat.v1.variable_scope(self._encoder.name):
latent_var = self._encoder.distribution.sample(
seed=deterministic.get_tf_seed_stream())
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
with tf.compat.v1.variable_scope('concat_obs_latent'):
obs_latent_input = tf.concat([obs_input, latent_input], -1)
self._dist, mean_var, log_std_var = self.model.build(
obs_latent_input,
# Must named 'default' to
# compensate tf default worker
name='default').outputs
embed_state_input = tf.concat([obs_input, latent_var], -1)
dist_given_task, mean_g_t, log_std_g_t = self.model.build(
embed_state_input, name='given_task').outputs
self._f_dist_obs_latent = tf.compat.v1.get_default_session(
).make_callable([
self._dist.sample(seed=deterministic.get_tf_seed_stream()),
mean_var, log_std_var
],
feed_list=[obs_input, latent_input])
self._f_dist_obs_task = tf.compat.v1.get_default_session(
).make_callable([
dist_given_task.sample(seed=deterministic.get_tf_seed_stream()),
mean_g_t, log_std_g_t
],
feed_list=[obs_input, self._encoder.input])
def build(self, obs_input, task_input, name=None):
"""Build policy.
Args:
obs_input (tf.Tensor): Observation input.
task_input (tf.Tensor): One-hot task id input.
name (str): Name of the model, which is also the name scope.
Returns:
namedtuple: Policy network.
namedtuple: Encoder network.
"""
name = name or 'additional'
# Encoder should be outside policy scope
with tf.compat.v1.variable_scope(self._encoder.name):
enc_net = self._encoder.build(task_input, name=name)
latent_var = enc_net.dist.loc
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
embed_state_input = tf.concat([obs_input, latent_var], -1)
return self.model.build(embed_state_input, name=name), enc_net
@property
def distribution(self):
"""Policy action distribution.
Returns:
tfp.Distribution.MultivariateNormalDiag: Policy distribution.
"""
return self._dist
def get_action(self, observation):
"""Get action sampled from the policy.
Args:
observation (np.ndarray): Augmented observation from the
environment, with shape :math:`(O+N, )`. O is the dimension
of observation, N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of
action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`.
A is the dimension of action.
"""
obs, task = self.split_augmented_observation(observation)
return self.get_action_given_task(obs, task)
def get_actions(self, observations):
"""Get actions sampled from the policy.
Args:
observations (np.ndarray): Augmented observation from the
environment, with shape :math:`(T, O+N)`. T is the number of
environment steps, O is the dimension of observation, N is the
number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps, Z is the dimension of action.
"""
obses, tasks = zip(*[
self.split_augmented_observation(aug_obs)
for aug_obs in observations
])
return self.get_actions_given_tasks(np.array(obses), np.array(tasks))
def get_action_given_latent(self, observation, latent):
"""Sample an action given observation and latent.
Args:
observation (np.ndarray): Observation from the environment,
with shape :math:`(O, )`. O is the dimension of observation.
latent (np.ndarray): Latent, with shape :math:`(Z, )`. Z is the
dimension of the latent embedding.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`. A is the dimension
of action.
"""
flat_obs = self.observation_space.flatten(observation)
flat_obs = np.expand_dims([flat_obs], 1)
flat_latent = self.latent_space.flatten(latent)
flat_latent = np.expand_dims([flat_latent], 1)
sample, mean, log_std = self._f_dist_obs_latent(flat_obs, flat_latent)
sample = self.action_space.unflatten(np.squeeze(sample, 1)[0])
mean = self.action_space.unflatten(np.squeeze(mean, 1)[0])
log_std = self.action_space.unflatten(np.squeeze(log_std, 1)[0])
return sample, dict(mean=mean, log_std=log_std)
def get_actions_given_latents(self, observations, latents):
"""Sample a batch of actions given observations and latents.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps, O
is the dimension of observation.
latents (np.ndarray): Latents, with shape :math:`(T, Z)`. T is the
number of environment steps, Z is the dimension of
latent embedding.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, , with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
"""
flat_obses = self.observation_space.flatten_n(observations)
flat_obses = np.expand_dims(flat_obses, 1)
flat_latents = self.latent_space.flatten_n(latents)
flat_latents = np.expand_dims(flat_latents, 1)
samples, means, log_stds = self._f_dist_obs_latent(
flat_obses, flat_latents)
samples = self.action_space.unflatten_n(np.squeeze(samples, 1))
means = self.action_space.unflatten_n(np.squeeze(means, 1))
log_stds = self.action_space.unflatten_n(np.squeeze(log_stds, 1))
return samples, dict(mean=means, log_std=log_stds)
def get_action_given_task(self, observation, task_id):
"""Sample an action given observation and task id.
Args:
observation (np.ndarray): Observation from the environment, with
shape :math:`(O, )`. O is the dimension of the observation.
task_id (np.ndarray): One-hot task id, with shape :math:`(N, ).
N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy, with shape
:math:`(A, )`. A is the dimension of action.
dict: Action distribution information, with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(A, )`. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(A, )`. A is the dimension
of action.
"""
flat_obs = self.observation_space.flatten(observation)
flat_obs = np.expand_dims([flat_obs], 1)
task_id = np.expand_dims([task_id], 1)
sample, mean, log_std = self._f_dist_obs_task(flat_obs, task_id)
sample = self.action_space.unflatten(np.squeeze(sample, 1)[0])
mean = self.action_space.unflatten(np.squeeze(mean, 1)[0])
log_std = self.action_space.unflatten(np.squeeze(log_std, 1)[0])
return sample, dict(mean=mean, log_std=log_std)
def get_actions_given_tasks(self, observations, task_ids):
"""Sample a batch of actions given observations and task ids.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps,
O is the dimension of observation.
task_ids (np.ndarry): One-hot task ids, with shape :math:`(T, N)`.
T is the number of environment steps, N is the number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information, , with keys:
- mean (numpy.ndarray): Mean of the distribution,
with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
- log_std (numpy.ndarray): Log standard deviation of the
distribution, with shape :math:`(T, A)`. T is the number of
environment steps. A is the dimension of action.
"""
flat_obses = self.observation_space.flatten_n(observations)
flat_obses = np.expand_dims(flat_obses, 1)
task_ids = np.expand_dims(task_ids, 1)
samples, means, log_stds = self._f_dist_obs_task(flat_obses, task_ids)
samples = self.action_space.unflatten_n(np.squeeze(samples, 1))
means = self.action_space.unflatten_n(np.squeeze(means, 1))
log_stds = self.action_space.unflatten_n(np.squeeze(log_stds, 1))
return samples, dict(mean=means, log_std=log_stds)
def clone(self, name):
"""Return a clone of the policy.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created policy. It has to be
different from source policy if cloned under the same
computational graph.
Returns:
garage.tf.policies.GaussianMLPTaskEmbeddingPolicy: Cloned policy.
"""
new_policy = self.__class__(
env_spec=self.env_spec,
encoder=self.encoder.clone('{}_encoder'.format(name)),
name=name,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
learn_std=self._learn_std,
adaptive_std=self._adaptive_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
min_std=self._min_std,
max_std=self._max_std,
std_hidden_sizes=self._std_hidden_sizes,
std_hidden_nonlinearity=self._std_hidden_nonlinearity,
std_output_nonlinearity=self._std_output_nonlinearity,
std_parameterization=self._std_parameterization,
layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_dist_obs_latent']
del new_dict['_f_dist_obs_task']
del new_dict['_dist']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 20,755 | 43.445396 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/policy.py | """Base class for policies in TensorFlow."""
import abc
from garage.tf.models import Module, StochasticModule
class Policy(Module):
"""Base class for policies in TensorFlow.
Args:
name (str): Policy name, also the variable scope.
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
"""
def __init__(self, name, env_spec):
super().__init__(name)
self._env_spec = env_spec
@abc.abstractmethod
def get_action(self, observation):
"""Get action sampled from the policy.
Args:
observation (np.ndarray): Observation from the environment.
Returns:
(np.ndarray): Action sampled from the policy.
"""
@abc.abstractmethod
def get_actions(self, observations):
"""Get action sampled from the policy.
Args:
observations (list[np.ndarray]): Observations from the environment.
Returns:
(np.ndarray): Actions sampled from the policy.
"""
@property
def vectorized(self):
"""Boolean for vectorized.
Returns:
bool: Indicates whether the policy is vectorized. If True, it
should implement get_actions(), and support resetting with
multiple simultaneous states.
"""
return False
@property
def observation_space(self):
"""Observation space.
Returns:
akro.Space: The observation space of the environment.
"""
return self._env_spec.observation_space
@property
def action_space(self):
"""Action space.
Returns:
akro.Space: The action space of the environment.
"""
return self._env_spec.action_space
@property
def env_spec(self):
"""Policy environment specification.
Returns:
garage.EnvSpec: Environment specification.
"""
return self._env_spec
def log_diagnostics(self, paths):
"""Log extra information per iteration based on the collected paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
"""
# pylint: disable=abstract-method
class StochasticPolicy(Policy, StochasticModule):
"""Stochastic Policy."""
| 2,289 | 22.367347 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/task_embedding_policy.py | """Policy class for Task Embedding envs."""
import abc
import akro
from garage.tf.policies.policy import StochasticPolicy
class TaskEmbeddingPolicy(StochasticPolicy):
"""Base class for Task Embedding policies in TensorFlow.
This policy needs a task id in addition to observation to sample an action.
Args:
name (str): Policy name, also the variable scope.
env_spec (garage.envs.EnvSpec): Environment specification.
encoder (garage.tf.embeddings.StochasticEncoder):
A encoder that embeds a task id to a latent.
"""
# pylint: disable=too-many-public-methods
def __init__(self, name, env_spec, encoder):
super().__init__(name, env_spec)
self._encoder = encoder
self._augmented_observation_space = akro.concat(
self._env_spec.observation_space, self.task_space)
@property
def encoder(self):
"""garage.tf.embeddings.encoder.Encoder: Encoder."""
return self._encoder
def get_latent(self, task_id):
"""Get embedded task id in latent space.
Args:
task_id (np.ndarray): One-hot task id, with shape :math:`(N, )`. N
is the number of tasks.
Returns:
np.ndarray: An embedding sampled from embedding distribution, with
shape :math:`(Z, )`. Z is the dimension of the latent
embedding.
dict: Embedding distribution information.
"""
return self.encoder.get_latent(task_id)
@property
def latent_space(self):
"""akro.Box: Space of latent."""
return self.encoder.spec.output_space
@property
def task_space(self):
"""akro.Box: One-hot space of task id."""
return self.encoder.spec.input_space
@property
def augmented_observation_space(self):
"""akro.Box: Concatenated observation space and one-hot task id."""
return self._augmented_observation_space
@property
def encoder_distribution(self):
"""garage.tf.distributions.DiagonalGaussian: Encoder distribution."""
return self.encoder.distribution
@abc.abstractmethod
def get_action(self, observation):
"""Get action sampled from the policy.
Args:
observation (np.ndarray): Augmented observation from the
environment, with shape :math:`(O+N, )`. O is the dimension of
observation, N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
"""
@abc.abstractmethod
def get_actions(self, observations):
"""Get actions sampled from the policy.
Args:
observations (np.ndarray): Augmented observation from the
environment, with shape :math:`(T, O+N)`. T is the number of
environment steps, O is the dimension of observation, N is the
number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
"""
@abc.abstractmethod
def get_action_given_task(self, observation, task_id):
"""Sample an action given observation and task id.
Args:
observation (np.ndarray): Observation from the environment, with
shape :math:`(O, )`. O is the dimension of the observation.
task_id (np.ndarray): One-hot task id, with shape :math:`(N, ).
N is the number of tasks.
Returns:
np.ndarray: Action sampled from the policy, with shape
:math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
"""
@abc.abstractmethod
def get_actions_given_tasks(self, observations, task_ids):
"""Sample a batch of actions given observations and task ids.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps,
O is the dimension of observation.
task_ids (np.ndarry): One-hot task ids, with shape :math:`(T, N)`.
T is the number of environment steps, N is the number of tasks.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
"""
@abc.abstractmethod
def get_action_given_latent(self, observation, latent):
"""Sample an action given observation and latent.
Args:
observation (np.ndarray): Observation from the environment,
with shape :math:`(O, )`. O is the dimension of observation.
latent (np.ndarray): Latent, with shape :math:`(Z, )`. Z is the
dimension of latent embedding.
Returns:
np.ndarray: Action sampled from the policy,
with shape :math:`(A, )`. A is the dimension of action.
dict: Action distribution information.
"""
@abc.abstractmethod
def get_actions_given_latents(self, observations, latents):
"""Sample a batch of actions given observations and latents.
Args:
observations (np.ndarray): Observations from the environment, with
shape :math:`(T, O)`. T is the number of environment steps, O
is the dimension of observation.
latents (np.ndarray): Latents, with shape :math:`(T, Z)`. T is the
number of environment steps, Z is the dimension of
latent embedding.
Returns:
np.ndarray: Actions sampled from the policy,
with shape :math:`(T, A)`. T is the number of environment
steps, A is the dimension of action.
dict: Action distribution information.
"""
def get_trainable_vars(self):
"""Get trainable variables.
The trainable vars of a multitask policy should be the trainable vars
of its model and the trainable vars of its embedding model.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return (self._variable_scope.trainable_variables() +
self.encoder.get_trainable_vars())
def get_global_vars(self):
"""Get global variables.
The global vars of a multitask policy should be the global vars
of its model and the trainable vars of its embedding model.
Returns:
List[tf.Variable]: A list of global variables in the current
variable scope.
"""
return (self._variable_scope.global_variables() +
self.encoder.get_global_vars())
def split_augmented_observation(self, collated):
"""Splits up observation into one-hot task and environment observation.
Args:
collated (np.ndarray): Environment observation concatenated with
task one-hot, with shape :math:`(O+N, )`. O is the dimension of
observation, N is the number of tasks.
Returns:
np.ndarray: Vanilla environment observation,
with shape :math:`(O, )`. O is the dimension of observation.
np.ndarray: Task one-hot, with shape :math:`(N, )`. N is the number
of tasks.
"""
task_dim = self.task_space.flat_dim
return collated[:-task_dim], collated[-task_dim:]
| 7,890 | 34.38565 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/policies/uniform_control_policy.py | """Uniform control policy."""
from garage.tf.policies.policy import Policy
class UniformControlPolicy(Policy):
"""Policy that output random action uniformly.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
"""
def __init__(
self,
env_spec,
):
super().__init__(env_spec=env_spec)
@property
def vectorized(self):
"""Vectorized or not.
Returns:
Bool: True if primitive supports vectorized operations.
"""
return True
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Action
dict: Predicted action and agent information. It returns an empty
dict since there is no parameterization.
"""
return self.action_space.sample(), dict()
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Actions
dict: Predicted action and agent information. It returns an empty
dict since there is no parameterization.
"""
return self.action_space.sample_n(len(observations)), dict()
| 1,486 | 25.553571 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/q_functions/__init__.py | """Q-Functions for TensorFlow-based algorithms."""
# noqa: I100
from garage.tf.q_functions.q_function import QFunction
from garage.tf.q_functions.continuous_cnn_q_function import ( # noqa: I100
ContinuousCNNQFunction)
from garage.tf.q_functions.continuous_mlp_q_function import (
ContinuousMLPQFunction)
from garage.tf.q_functions.discrete_cnn_q_function import DiscreteCNNQFunction
from garage.tf.q_functions.discrete_mlp_q_function import DiscreteMLPQFunction
__all__ = [
'QFunction', 'ContinuousMLPQFunction', 'DiscreteCNNQFunction',
'DiscreteMLPQFunction', 'ContinuousCNNQFunction'
]
| 607 | 37 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/q_functions/continuous_cnn_q_function.py | """Continuous CNN QFunction with CNN-MLP structure."""
import akro
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import CNNMLPMergeModel
from garage.tf.q_functions import QFunction
class ContinuousCNNQFunction(QFunction):
"""Q function based on a CNN-MLP structure for continuous action space.
This class implements a Q value network to predict Q based on the
input state and action. It uses an CNN and a MLP to fit the function
of Q(s, a).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
hidden_sizes (tuple[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this q-function consists of
two hidden layers, each with 32 hidden units.
action_merge_layer (int): The index of layers at which to concatenate
action inputs with the network. The indexing works like standard
python list indexing. Index of 0 refers to the input layer
(observation input) while an index of -1 points to the last
hidden layer. Default parameter points to second layer from the
end.
name (str): Variable scope of the cnn.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
max_pooling (bool): Boolean for using max pooling layer or not.
pool_shapes (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
pool_strides (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
cnn_hidden_nonlinearity (callable): Activation function for
intermediate dense layer(s) in the CNN. It should return a
tf.Tensor. Set it to None to maintain a linear activation.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s) in the MLP. It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the MLP. The function should
return a tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the MLP. The function should
return a tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer in the MLP. It should return a tf.Tensor. Set it to None
to maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the MLP. The function should return
a tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s) in the MLP. The function should return
a tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
filters,
strides,
hidden_sizes=(256, ),
action_merge_layer=-2,
name=None,
padding='SAME',
max_pooling=False,
pool_strides=(2, 2),
pool_shapes=(2, 2),
cnn_hidden_nonlinearity=tf.nn.relu,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
if (not isinstance(env_spec.observation_space, akro.Box)
or not len(env_spec.observation_space.shape) in (2, 3)):
raise ValueError(
'{} can only process 2D, 3D akro.Image or'
' akro.Box observations, but received an env_spec with '
'observation_space of type {} and shape {}'.format(
type(self).__name__,
type(env_spec.observation_space).__name__,
env_spec.observation_space.shape))
super().__init__(name)
self._env_spec = env_spec
self._filters = filters
self._strides = strides
self._hidden_sizes = hidden_sizes
self._action_merge_layer = action_merge_layer
self._padding = padding
self._max_pooling = max_pooling
self._pool_strides = pool_strides
self._pool_shapes = pool_shapes
self._cnn_hidden_nonlinearity = cnn_hidden_nonlinearity
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._obs_dim = self._env_spec.observation_space.shape
self._action_dim = self._env_spec.action_space.shape
self.model = CNNMLPMergeModel(
filters=self._filters,
strides=self._strides,
hidden_sizes=self._hidden_sizes,
action_merge_layer=self._action_merge_layer,
padding=self._padding,
max_pooling=self._max_pooling,
pool_strides=self._pool_strides,
pool_shapes=self._pool_shapes,
cnn_hidden_nonlinearity=self._cnn_hidden_nonlinearity,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
self._initialize()
def _initialize(self):
action_ph = tf.compat.v1.placeholder(tf.float32,
(None, ) + self._action_dim,
name='action')
if isinstance(self._env_spec.observation_space, akro.Image):
obs_ph = tf.compat.v1.placeholder(tf.uint8,
(None, ) + self._obs_dim,
name='state')
augmented_obs_ph = tf.cast(obs_ph, tf.float32) / 255.0
else:
obs_ph = tf.compat.v1.placeholder(tf.float32,
(None, ) + self._obs_dim,
name='state')
augmented_obs_ph = obs_ph
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
outputs = self.model.build(augmented_obs_ph, action_ph).outputs
self._f_qval = tf.compat.v1.get_default_session().make_callable(
outputs, feed_list=[obs_ph, action_ph])
self._obs_input = obs_ph
self._act_input = action_ph
@property
def inputs(self):
"""tuple[tf.Tensor]: The observation and action input tensors.
The returned tuple contains two tensors. The first is the observation
tensor with shape :math:`(N, O*)`, and the second is the action tensor
with shape :math:`(N, A*)`.
"""
return self._obs_input, self._act_input
def get_qval(self, observation, action):
"""Q Value of the network.
Args:
observation (np.ndarray): Observation input of shape
:math:`(N, O*)`.
action (np.ndarray): Action input of shape :math:`(N, A*)`.
Returns:
np.ndarray: Array of shape :math:`(N, )` containing Q values
corresponding to each (obs, act) pair.
"""
if len(observation[0].shape) < len(self._obs_dim):
observation = self._env_spec.observation_space.unflatten_n(
observation)
return self._f_qval(observation, action)
# pylint: disable=arguments-differ
def get_qval_sym(self, state_input, action_input, name):
"""Symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor of shape
:math:`(N, O*)`.
action_input (tf.Tensor): The action input tf.Tensor of shape
:math:`(N, A*)`.
name (str): Network variable scope.
Return:
tf.Tensor: The output Q value tensor of shape :math:`(N, )`.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
augmented_state_input = state_input
if isinstance(self._env_spec.observation_space, akro.Image):
augmented_state_input = tf.cast(state_input,
tf.float32) / 255.0
return self.model.build(augmented_state_input,
action_input,
name=name).outputs
def clone(self, name):
"""Return a clone of the Q-function.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created q-function.
Return:
ContinuousCNNQFunction: Cloned Q function.
"""
new_qf = self.__class__(
name=name,
env_spec=self._env_spec,
filters=self._filters,
strides=self._strides,
hidden_sizes=self._hidden_sizes,
action_merge_layer=self._action_merge_layer,
padding=self._padding,
max_pooling=self._max_pooling,
pool_shapes=self._pool_shapes,
pool_strides=self._pool_strides,
cnn_hidden_nonlinearity=self._cnn_hidden_nonlinearity,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
new_qf.model.parameters = self.model.parameters
return new_qf
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state.
"""
new_dict = self.__dict__.copy()
del new_dict['_f_qval']
del new_dict['_obs_input']
del new_dict['_act_input']
return new_dict
def __setstate__(self, state):
"""See `Object.__setstate__.
Args:
state (dict): Unpickled state of this object.
"""
self.__dict__.update(state)
self._initialize()
| 11,845 | 41.611511 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/q_functions/continuous_mlp_q_function.py | """Continuous MLP QFunction."""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import MLPMergeModel
from garage.tf.q_functions.q_function import QFunction
class ContinuousMLPQFunction(QFunction):
"""Continuous MLP QFunction.
This class implements a q value network to predict q based on the input
state and action. It uses an MLP to fit the function of Q(s, a).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Name of the q-function, also serves as the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this q-function consists of
two hidden layers, each with 32 hidden units.
action_merge_layer (int): The index of layers at which to concatenate
action inputs with the network. The indexing works like standard
python list indexing. Index of 0 refers to the input layer
(observation input) while an index of -1 points to the last
hidden layer. Default parameter points to second layer from the
end.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization.
"""
def __init__(self,
env_spec,
name='ContinuousMLPQFunction',
hidden_sizes=(32, 32),
action_merge_layer=-2,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name)
self._env_spec = env_spec
self._hidden_sizes = hidden_sizes
self._action_merge_layer = action_merge_layer
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self.model = MLPMergeModel(output_dim=1,
hidden_sizes=hidden_sizes,
concat_layer=self._action_merge_layer,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._network = None
self._initialize()
def _initialize(self):
obs_ph = tf.compat.v1.placeholder(tf.float32, (None, self._obs_dim),
name='obs')
action_ph = tf.compat.v1.placeholder(tf.float32,
(None, self._action_dim),
name='act')
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self._network = self.model.build(obs_ph, action_ph)
self._f_qval = tf.compat.v1.get_default_session().make_callable(
self._network.outputs, feed_list=[obs_ph, action_ph])
def get_qval(self, observation, action):
"""Q Value of the network.
Args:
observation (np.ndarray): Observation input.
action (np.ndarray): Action input.
Returns:
np.ndarray: Q values.
"""
return self._f_qval(observation, action)
@property
def inputs(self):
"""Return the input tensor.
Returns:
tf.Tensor: The input tensors of the model.
"""
return self._network.inputs
# pylint: disable=arguments-differ
def get_qval_sym(self, state_input, action_input, name):
"""Symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor to the network.
action_input (tf.Tensor): The action input tf.Tensor to the
network.
name (str): Network variable scope.
Return:
tf.Tensor: The output of Continuous MLP QFunction.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(state_input, action_input,
name=name).outputs
def clone(self, name):
"""Return a clone of the Q-function.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created q-function.
Returns:
ContinuousMLPQFunction: A new instance with same arguments.
"""
new_qf = self.__class__(name=name,
env_spec=self._env_spec,
hidden_sizes=self._hidden_sizes,
action_merge_layer=self._action_merge_layer,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
new_qf.model.parameters = self.model.parameters
return new_qf
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state.
"""
new_dict = self.__dict__.copy()
del new_dict['_f_qval']
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""See `Object.__setstate__.
Args:
state (dict): Unpickled state of this object.
"""
self.__dict__.update(state)
self._initialize()
| 7,765 | 38.825641 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/q_functions/discrete_cnn_q_function.py | """Discrete CNN QFunction with CNN-MLP structure."""
import akro
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import CNNModel
from garage.tf.models import CNNModelWithMaxPooling
from garage.tf.models import MLPDuelingModel
from garage.tf.models import MLPModel
from garage.tf.models import Sequential
from garage.tf.q_functions.q_function import QFunction
class DiscreteCNNQFunction(QFunction):
"""Q function based on a CNN-MLP structure for discrete action space.
This class implements a Q value network to predict Q based on the
input state and action. It uses an CNN and a MLP to fit the function
of Q(s, a).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this q-function consists of
two hidden layers, each with 32 hidden units.
name (str): Variable scope of the cnn.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
max_pooling (bool): Boolean for using max pooling layer or not.
pool_shapes (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
pool_strides (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
cnn_hidden_nonlinearity (callable): Activation function for
intermediate dense layer(s) in the CNN. It should return a
tf.Tensor. Set it to None to maintain a linear activation.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s) in the MLP. It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the MLP. The function should
return a tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the MLP. The function should
return a tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer in the MLP. It should return a tf.Tensor. Set it to None
to maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the MLP. The function should return
a tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s) in the MLP. The function should return
a tf.Tensor.
dueling (bool): Bool for using dueling network or not.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
filters,
strides,
hidden_sizes=(256, ),
name=None,
padding='SAME',
max_pooling=False,
pool_strides=(2, 2),
pool_shapes=(2, 2),
cnn_hidden_nonlinearity=tf.nn.relu,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
dueling=False,
layer_normalization=False):
if not isinstance(env_spec.observation_space, akro.Box) or \
not len(env_spec.observation_space.shape) in (2, 3):
raise ValueError(
'{} can only process 2D, 3D akro.Image or'
' akro.Box observations, but received an env_spec with '
'observation_space of type {} and shape {}'.format(
type(self).__name__,
type(env_spec.observation_space).__name__,
env_spec.observation_space.shape))
super().__init__(name)
self._env_spec = env_spec
self._action_dim = env_spec.action_space.n
self._filters = filters
self._strides = strides
self._hidden_sizes = hidden_sizes
self._padding = padding
self._max_pooling = max_pooling
self._pool_strides = pool_strides
self._pool_shapes = pool_shapes
self._cnn_hidden_nonlinearity = cnn_hidden_nonlinearity
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._dueling = dueling
self.obs_dim = self._env_spec.observation_space.shape
action_dim = self._env_spec.action_space.flat_dim
if not max_pooling:
cnn_model = CNNModel(filters=filters,
strides=strides,
padding=padding,
hidden_nonlinearity=cnn_hidden_nonlinearity)
else:
cnn_model = CNNModelWithMaxPooling(
filters=filters,
strides=strides,
padding=padding,
pool_strides=pool_strides,
pool_shapes=pool_shapes,
hidden_nonlinearity=cnn_hidden_nonlinearity)
if not dueling:
output_model = MLPModel(output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
else:
output_model = MLPDuelingModel(
output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self.model = Sequential(cnn_model, output_model)
self._network = None
self._initialize()
def _initialize(self):
"""Initialize QFunction."""
if isinstance(self._env_spec.observation_space, akro.Image):
obs_ph = tf.compat.v1.placeholder(tf.uint8,
(None, ) + self.obs_dim,
name='obs')
augmented_obs_ph = tf.cast(obs_ph, tf.float32) / 255.0
else:
obs_ph = tf.compat.v1.placeholder(tf.float32,
(None, ) + self.obs_dim,
name='obs')
augmented_obs_ph = obs_ph
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self._network = self.model.build(augmented_obs_ph)
self._obs_input = obs_ph
@property
def q_vals(self):
"""Return the Q values, the output of the network.
Return:
list[tf.Tensor]: Q values.
"""
return self._network.outputs
@property
def input(self):
"""Get input.
Return:
tf.Tensor: QFunction Input.
"""
return self._obs_input
# pylint: disable=arguments-differ
def get_qval_sym(self, state_input, name):
"""Symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor to the network.
name (str): Network variable scope.
Return:
tf.Tensor: The tf.Tensor output of Discrete CNN QFunction.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
augmented_state_input = state_input
if isinstance(self._env_spec.observation_space, akro.Image):
augmented_state_input = tf.cast(state_input,
tf.float32) / 255.0
return self.model.build(augmented_state_input, name=name).outputs
def clone(self, name):
"""Return a clone of the Q-function.
It copies the configuration of the primitive and also the parameters.
Args:
name(str) : Name of the newly created q-function.
Returns:
garage.tf.q_functions.DiscreteCNNQFunction: Clone of this object
"""
new_qf = self.__class__(name=name,
env_spec=self._env_spec,
filters=self._filters,
strides=self._strides,
hidden_sizes=self._hidden_sizes,
padding=self._padding,
max_pooling=self._max_pooling,
pool_shapes=self._pool_shapes,
pool_strides=self._pool_strides,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
dueling=self._dueling,
layer_normalization=self._layer_normalization)
new_qf.model.parameters = self.model.parameters
return new_qf
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
self.__dict__.update(state)
self._initialize()
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state.
"""
new_dict = self.__dict__.copy()
del new_dict['_obs_input']
del new_dict['_network']
return new_dict
| 11,653 | 41.378182 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/q_functions/discrete_mlp_q_function.py | """Discrete MLP QFunction."""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import MLPDuelingModel
from garage.tf.models import MLPModel
from garage.tf.q_functions.q_function import QFunction
class DiscreteMLPQFunction(QFunction):
"""Discrete MLP Q Function.
This class implements a Q-value network. It predicts Q-value based on the
input state and action. It uses an MLP to fit the function Q(s, a).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Name of the q-function, also serves as the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this q-function consists of
two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
dueling (bool): Use dueling model or not.
layer_normalization (bool): Bool for using layer normalization.
"""
def __init__(self,
env_spec,
name=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
dueling=False,
layer_normalization=False):
super().__init__(name)
self._env_spec = env_spec
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._dueling = dueling
self._layer_normalization = layer_normalization
self.obs_dim = env_spec.observation_space.shape
action_dim = env_spec.action_space.flat_dim
if not dueling:
self.model = MLPModel(output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
else:
self.model = MLPDuelingModel(
output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._network = None
self._initialize()
def _initialize(self):
"""Initialize QFunction."""
obs_ph = tf.compat.v1.placeholder(tf.float32, (None, ) + self.obs_dim,
name='obs')
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self._network = self.model.build(obs_ph)
@property
def q_vals(self):
"""Return the Q values, the output of the network.
Return:
list[tf.Tensor]: Q values.
"""
return self._network.outputs
@property
def input(self):
"""Get input.
Return:
tf.Tensor: QFunction Input.
"""
return self._network.input
# pylint: disable=arguments-differ
def get_qval_sym(self, state_input, name):
"""Symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor to the network.
name (str): Network variable scope.
Return:
tf.Tensor: The tf.Tensor output of Discrete MLP QFunction.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(state_input, name=name).outputs
def clone(self, name):
"""Return a clone of the Q-function.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created q-function.
Returns:
garage.tf.q_functions.DiscreteMLPQFunction: Clone of this object
"""
new_qf = self.__class__(name=name,
env_spec=self._env_spec,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
new_qf.model.parameters = self.model.parameters
return new_qf
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state.
"""
new_dict = self.__dict__.copy()
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
self.__dict__.update(state)
self._initialize()
| 7,125 | 36.505263 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/q_functions/q_function.py | """Q-function base classes without Parameterized."""
import abc
class QFunction(abc.ABC):
"""Q-function base class without Parameterzied.
Args:
name (str): Name of the Q-fucntion, also the variable scope.
"""
def __init__(self, name):
self.name = name or type(self).__name__
self._variable_scope = None
def get_qval_sym(self, *input_phs):
"""Symbolic graph for q-network.
All derived classes should implement this function.
Args:
input_phs (list[tf.Tensor]): Recommended to be positional
arguments, e.g. def get_qval_sym(self, state_input,
action_input).
"""
def clone(self, name):
"""Return a clone of the Q-function.
It should only copy the configuration of the Q-function,
not the parameters.
Args:
name (str): Name of the newly created q-function.
"""
def get_trainable_vars(self):
"""Get all trainable variables under the QFunction scope."""
return self._variable_scope.trainable_variables()
def get_global_vars(self):
"""Get all global variables under the QFunction scope."""
return self._variable_scope.global_variables()
def get_regularizable_vars(self):
"""Get all network weight variables under the QFunction scope."""
trainable = self._variable_scope.global_variables()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
def log_diagnostics(self, paths):
"""Log extra information per iteration based on the collected paths."""
| 1,677 | 27.931034 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/__init__.py | """Regressors for TensorFlow-based algorithms."""
from garage.tf.regressors.bernoulli_mlp_regressor import BernoulliMLPRegressor
from garage.tf.regressors.categorical_mlp_regressor import (
CategoricalMLPRegressor)
from garage.tf.regressors.continuous_mlp_regressor import (
ContinuousMLPRegressor)
from garage.tf.regressors.gaussian_cnn_regressor import GaussianCNNRegressor
from garage.tf.regressors.gaussian_cnn_regressor_model import (
GaussianCNNRegressorModel)
from garage.tf.regressors.gaussian_mlp_regressor import GaussianMLPRegressor
from garage.tf.regressors.regressor import Regressor, StochasticRegressor
__all__ = [
'BernoulliMLPRegressor', 'CategoricalMLPRegressor',
'ContinuousMLPRegressor', 'GaussianCNNRegressor',
'GaussianCNNRegressorModel', 'GaussianMLPRegressor', 'Regressor',
'StochasticRegressor'
]
| 851 | 43.842105 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/bernoulli_mlp_regressor.py | """Bernoulli MLP Regressor based on MLP with Normalized Inputs."""
from dowel import tabular
import numpy as np
import tensorflow as tf
from garage import make_optimizer
from garage.experiment import deterministic
from garage.tf.distributions import Bernoulli
from garage.tf.misc import tensor_utils
from garage.tf.models import NormalizedInputMLPModel
from garage.tf.optimizers import ConjugateGradientOptimizer, LbfgsOptimizer
from garage.tf.regressors.regressor import StochasticRegressor
class BernoulliMLPRegressor(StochasticRegressor):
"""Fits data to a Bernoulli distribution, parameterized by an MLP.
Args:
input_shape (tuple[int]): Input shape of the training data. Since an
MLP model is used, implementation assumes flattened inputs. The
input shape of each data point should thus be of shape (x, ).
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for the network. For example, (32, 32) means the MLP
consists of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor. Default is Glorot uniform initializer.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor. Default is zero initializer.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor. Default is Glorot uniform initializer.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor. Default is zero initializer.
optimizer (garage.tf.Optimizer): Optimizer for minimizing the negative
log-likelihood. Defaults to LbsgsOptimizer
optimizer_args (dict): Arguments for the optimizer. Default is None,
which means no arguments.
tr_optimizer (garage.tf.Optimizer): Optimizer for trust region
approximation. Defaults to ConjugateGradientOptimizer.
tr_optimizer_args (dict): Arguments for the trust region optimizer.
Default is None, which means no arguments.
use_trust_region (bool): Whether to use trust region constraint.
max_kl_step (float): KL divergence constraint for each iteration.
normalize_inputs (bool): Bool for normalizing inputs or not.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_shape,
output_dim,
name='BernoulliMLPRegressor',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=tf.nn.sigmoid,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
optimizer=None,
optimizer_args=None,
tr_optimizer=None,
tr_optimizer_args=None,
use_trust_region=True,
max_kl_step=0.01,
normalize_inputs=True,
layer_normalization=False):
super().__init__(input_shape, output_dim, name)
self._use_trust_region = use_trust_region
self._max_kl_step = max_kl_step
self._normalize_inputs = normalize_inputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
optimizer_args = optimizer_args or dict()
tr_optimizer_args = tr_optimizer_args or dict()
if optimizer is None:
self._optimizer = make_optimizer(LbfgsOptimizer,
**optimizer_args)
else:
self._optimizer = make_optimizer(optimizer, **optimizer_args)
if tr_optimizer is None:
self._tr_optimizer = make_optimizer(ConjugateGradientOptimizer,
**tr_optimizer_args)
else:
self._tr_optimizer = make_optimizer(tr_optimizer,
**tr_optimizer_args)
self._first_optimized = False
self.model = NormalizedInputMLPModel(
input_shape,
output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._dist = Bernoulli(output_dim)
self._network = None
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self._input_shape)
with tf.compat.v1.variable_scope(self._variable_scope):
self._network = self.model.build(input_var)
ys_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='ys',
shape=(None, self._output_dim))
old_prob_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='old_prob',
shape=(None,
self._output_dim))
y_hat = self._network.y_hat
old_info_vars = dict(p=old_prob_var)
info_vars = dict(p=y_hat)
mean_kl = tf.reduce_mean(
self._dist.kl_sym(old_info_vars, info_vars))
loss = -tf.reduce_mean(
self._dist.log_likelihood_sym(ys_var, info_vars))
predicted = y_hat >= 0.5
self._f_predict = tensor_utils.compile_function([input_var],
predicted)
self._f_prob = tensor_utils.compile_function([input_var], y_hat)
self._optimizer.update_opt(loss=loss,
target=self,
inputs=[input_var, ys_var])
self._tr_optimizer.update_opt(
loss=loss,
target=self,
inputs=[input_var, ys_var, old_prob_var],
leq_constraint=(mean_kl, self._max_kl_step))
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Label of input data.
"""
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._network.x_std.load(np.std(xs, axis=0, keepdims=True) + 1e-8)
if self._use_trust_region and self._first_optimized:
# To use trust region constraint and optimizer
old_prob = self._f_prob(xs)
inputs = [xs, ys, old_prob]
optimizer = self._tr_optimizer
else:
inputs = [xs, ys]
optimizer = self._optimizer
loss_before = optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
optimizer.optimize(inputs)
loss_after = optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
self._first_optimized = True
def predict(self, xs):
"""Predict ys based on input xs.
Args:
xs (numpy.ndarray): Input data of shape (samples, input_dim)
Return:
numpy.ndarray: The deterministic predicted ys (one hot vectors)
of shape (samples, output_dim)
"""
return self._f_predict(xs)
def sample_predict(self, xs):
"""Do a Bernoulli sampling given input xs.
Args:
xs (numpy.ndarray): Input data of shape (samples, input_dim)
Returns:
numpy.ndarray: The stochastic sampled ys
of shape (samples, output_dim)
"""
p = self._f_prob(xs)
return self._dist.sample(dict(p=p))
def predict_log_likelihood(self, xs, ys):
"""Log likelihood of ys given input xs.
Args:
xs (numpy.ndarray): Input data of shape (samples, input_dim)
ys (numpy.ndarray): Output data of shape (samples, output_dim)
Returns:
numpy.ndarray: The log likelihood of shape (samples, )
"""
p = self._f_prob(xs)
return self._dist.log_likelihood(ys, dict(p=p))
def log_likelihood_sym(self, x_var, y_var, name=None):
"""Build a symbolic graph of the log-likelihood.
Args:
x_var (tf.Tensor): Input tf.Tensor for the input data.
y_var (tf.Tensor): Input tf.Tensor for the one hot label of data.
name (str): Name of the new graph.
Return:
tf.Tensor: Output of the symbolic log-likelihood graph.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
prob, _, _ = self.model.build(x_var, name=name).outputs
return self._dist.log_likelihood_sym(y_var, dict(p=prob))
# pylint: disable=unused-argument
def dist_info_sym(self, input_var, state_info_vars=None, name=None):
"""Build a symbolic graph of the distribution parameters.
Args:
input_var (tf.Tensor): Input tf.Tensor for the input data.
state_info_vars (dict): a dictionary whose values should contain
information about the state of the regressor at the time it
received the input.
name (str): Name of the new graph.
Return:
dict[tf.Tensor]: Output of the symbolic graph of the distribution
parameters.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
prob, _, _ = self.model.build(input_var, name=name).outputs
return dict(prob=prob)
@property
def recurrent(self):
"""bool: If this module has a hidden state."""
return False
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
@property
def distribution(self):
"""garage.tf.distributions.DiagonalGaussian: Distribution."""
return self._dist
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_f_prob']
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 12,189 | 38.196141 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/categorical_mlp_regressor.py | """A regressor based on MLP with Normalized Inputs."""
from dowel import tabular
import numpy as np
import tensorflow as tf
from garage import make_optimizer
from garage.experiment import deterministic
from garage.tf.misc import tensor_utils
from garage.tf.optimizers import ConjugateGradientOptimizer, LbfgsOptimizer
from garage.tf.regressors.categorical_mlp_regressor_model import (
CategoricalMLPRegressorModel)
from garage.tf.regressors.regressor import StochasticRegressor
class CategoricalMLPRegressor(StochasticRegressor):
"""Fits data to a Categorical with parameters are the output of an MLP.
A class for performing regression (or classification, really) by fitting
a Categorical distribution to the outputs. Assumes that the output will
always be a one hot vector
Args:
input_shape (tuple[int]): Input shape of the training data. Since an
MLP model is used, implementation assumes flattened inputs. The
input shape of each data point should thus be of shape (x, ).
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for the network. For example, (32, 32) means the MLP
consists of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a tanh activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor. Default is Glorot uniform initializer.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor. Default is zero initializer.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a softmax activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor. Default is Glorot uniform initializer.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor. Default is zero initializer.
optimizer (garage.tf.Optimizer): Optimizer for minimizing the negative
log-likelihood. Defaults to LbsgsOptimizer
optimizer_args (dict): Arguments for the optimizer. Default is None,
which means no arguments.
tr_optimizer (garage.tf.Optimizer): Optimizer for trust region
approximation. Defaults to ConjugateGradientOptimizer.
tr_optimizer_args (dict): Arguments for the trust region optimizer.
Default is None, which means no arguments.
use_trust_region (bool): Whether to use trust region constraint.
max_kl_step (float): KL divergence constraint for each iteration.
normalize_inputs (bool): Bool for normalizing inputs or not.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_shape,
output_dim,
name='CategoricalMLPRegressor',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=tf.nn.softmax,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
optimizer=None,
optimizer_args=None,
tr_optimizer=None,
tr_optimizer_args=None,
use_trust_region=True,
max_kl_step=0.01,
normalize_inputs=True,
layer_normalization=False):
super().__init__(input_shape, output_dim, name)
self._use_trust_region = use_trust_region
self._max_kl_step = max_kl_step
self._normalize_inputs = normalize_inputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
if optimizer_args is None:
optimizer_args = dict()
if tr_optimizer_args is None:
tr_optimizer_args = dict()
if optimizer is None:
self._optimizer = make_optimizer(LbfgsOptimizer,
**optimizer_args)
else:
self._optimizer = make_optimizer(optimizer, **optimizer_args)
if tr_optimizer is None:
self._tr_optimizer = make_optimizer(ConjugateGradientOptimizer,
**tr_optimizer_args)
else:
self._tr_optimizer = make_optimizer(tr_optimizer,
**tr_optimizer_args)
self._first_optimized = False
self.model = CategoricalMLPRegressorModel(
input_shape,
output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
# model for old distribution, used when trusted region is on
self._old_model = self.model.clone(name='model_for_old_dist')
self._network = None
self._old_network = None
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self._input_shape)
self._old_network = self._old_model.build(input_var)
with tf.compat.v1.variable_scope(self._variable_scope):
self._network = self.model.build(input_var)
self._old_model.parameters = self.model.parameters
ys_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='ys',
shape=(None, self._output_dim))
y_hat = self._network.y_hat
dist = self._network.dist
old_dist = self._old_network.dist
mean_kl = tf.reduce_mean(old_dist.kl_divergence(dist))
loss = -tf.reduce_mean(dist.log_prob(ys_var))
# pylint: disable=no-value-for-parameter
predicted = tf.one_hot(tf.argmax(y_hat, axis=1),
depth=self._output_dim)
self._f_predict = tensor_utils.compile_function([input_var],
predicted)
self._optimizer.update_opt(loss=loss,
target=self,
inputs=[input_var, ys_var])
self._tr_optimizer.update_opt(loss=loss,
target=self,
inputs=[input_var, ys_var],
leq_constraint=(mean_kl,
self._max_kl_step))
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Label of input data.
"""
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._network.x_std.load(np.std(xs, axis=0, keepdims=True))
self._old_network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._old_network.x_std.load(np.std(xs, axis=0, keepdims=True))
inputs = [xs, ys]
if self._use_trust_region:
# To use trust region constraint and optimizer
optimizer = self._tr_optimizer
else:
optimizer = self._optimizer
loss_before = optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
optimizer.optimize(inputs)
loss_after = optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
self._first_optimized = True
self._old_model.parameters = self.model.parameters
def predict(self, xs):
"""Predict ys based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
numpy.ndarray: The predicted ys (one hot vectors).
"""
return self._f_predict(xs)
@property
def recurrent(self):
"""bool: If this module has a hidden state."""
return False
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
@property
def distribution(self):
"""garage.tf.distributions.DiagonalGaussian: Distribution."""
return self._network.dist
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_network']
del new_dict['_old_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 10,257 | 40.362903 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/categorical_mlp_regressor_model.py | """CategoricalMLPRegressorModel."""
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models import NormalizedInputMLPModel
class CategoricalMLPRegressorModel(NormalizedInputMLPModel):
"""CategoricalMLPRegressorModel based on garage.tf.models.Model class.
This class can be used to perform regression by fitting a Categorical
distribution to the outputs.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_shape,
output_dim,
name='CategoricalMLPRegressorModel',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(input_shape=input_shape,
output_dim=output_dim,
name=name,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['y_hat', 'x_mean', 'x_std', 'dist']
def _build(self, state_input, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Observation inputs.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tf.Tensor: Tensor output of the model.
tf.Tensor: Mean for data.
tf.Tensor: log_std for data.
tfp.distributions.OneHotCategorical: Categorical distribution.
"""
y_hat, x_mean_var, x_std_var = super()._build(state_input, name=name)
dist = tfp.distributions.OneHotCategorical(probs=y_hat)
return y_hat, x_mean_var, x_std_var, dist
def clone(self, name):
"""Return a clone of the model.
It only copies the configuration of the primitive,
not the parameters.
Args:
name (str): Name of the newly created model. It has to be
different from source model if cloned under the same
computational graph.
Returns:
garage.tf.regressors.CategoricalMLPRegressorModel: Newly cloned
model.
"""
return self.__class__(name=name,
input_shape=self._input_shape,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
| 5,386 | 41.753968 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/continuous_mlp_regressor.py | """A regressor based on a MLP model."""
from dowel import tabular
import numpy as np
import tensorflow as tf
from garage import make_optimizer
from garage.experiment import deterministic
from garage.tf.misc import tensor_utils
from garage.tf.models import NormalizedInputMLPModel
from garage.tf.optimizers import LbfgsOptimizer
from garage.tf.regressors.regressor import Regressor
class ContinuousMLPRegressor(Regressor):
"""Fits continuously-valued data to an MLP model.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
optimizer (garage.tf.Optimizer): Optimizer for minimizing the negative
log-likelihood.
optimizer_args (dict): Arguments for the optimizer. Default is None,
which means no arguments.
normalize_inputs (bool): Bool for normalizing inputs or not.
"""
def __init__(self,
input_shape,
output_dim,
name='ContinuousMLPRegressor',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
optimizer=None,
optimizer_args=None,
normalize_inputs=True):
super().__init__(input_shape, output_dim, name)
self._normalize_inputs = normalize_inputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
if optimizer_args is None:
optimizer_args = dict()
if optimizer is None:
self._optimizer = make_optimizer(LbfgsOptimizer,
**optimizer_args)
else:
self._optimizer = make_optimizer(optimizer, **optimizer_args)
self.model = NormalizedInputMLPModel(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init)
self._network = None
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self._input_shape)
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
self._network = self.model.build(input_var)
ys_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='ys',
shape=(None, self._output_dim))
y_hat = self._network.y_hat
loss = tf.reduce_mean(tf.square(y_hat - ys_var))
self._f_predict = tensor_utils.compile_function([input_var], y_hat)
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[ys_var],
)
optimizer_args['inputs'] = [input_var, ys_var]
with tf.name_scope('update_opt'):
self._optimizer.update_opt(**optimizer_args)
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Output labels.
"""
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._network.x_std.load(np.std(xs, axis=0, keepdims=True) + 1e-8)
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
def predict(self, xs):
"""Predict y based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
numpy.ndarray: The predicted ys.
"""
return self._f_predict(xs)
def predict_sym(self, xs, name=None):
"""Build a symbolic graph of the model prediction.
Args:
xs (tf.Tensor): Input tf.Tensor for the input data.
name (str): Name of the new graph.
Return:
tf.Tensor: Output of the symbolic prediction graph.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
y_hat, _, _ = self.model.build(xs, name=name).outputs
return y_hat
@property
def recurrent(self):
"""bool: If this module has a hidden state."""
return False
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 7,327 | 35.457711 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/gaussian_cnn_regressor.py | """A regressor based on a GaussianMLP model."""
from dowel import tabular
import numpy as np
import tensorflow as tf
from garage import make_optimizer
from garage.experiment import deterministic
from garage.tf.misc import tensor_utils
from garage.tf.optimizers import LbfgsOptimizer, PenaltyLbfgsOptimizer
from garage.tf.regressors.gaussian_cnn_regressor_model import (
GaussianCNNRegressorModel)
from garage.tf.regressors.regressor import StochasticRegressor
class GaussianCNNRegressor(StochasticRegressor):
"""Fits a Gaussian distribution to the outputs of a CNN.
Args:
input_shape(tuple[int]): Input shape of the model (without the batch
dimension).
output_dim (int): Output dimension of the model.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Convolutional model for mean. For example, (32, 32) means the
network consists of two dense layers, each with 32 hidden units.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
name (str): Name of this model (also used as its scope).
learn_std (bool): Whether to train the standard deviation parameter of
the Gaussian distribution.
init_std (float): Initial standard deviation for the Gaussian
distribution.
adaptive_std (bool): Whether to use a neural network to learn the
standard deviation of the Gaussian distribution. Unless True, the
standard deviation is learned as a parameter which is not
conditioned on the inputs.
std_share_network (bool): Boolean for whether the mean and standard
deviation models share a CNN network. If True, each is a head from
a single body network. Otherwise, the parameters are estimated
using the outputs of two indepedent networks.
std_filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and
dimension of filters. For example, ((3, (3, 5)), (32, (3, 3)))
means there are two convolutional layers. The filter for the first
layer have 3 channels and its shape is (3 x 5), while the filter
for the second layer have 32 channels and its shape is (3 x 3).
std_strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
std_padding (str): The type of padding algorithm to use in std network,
either 'SAME' or 'VALID'.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Conv for std. For example, (32, 32) means the Conv consists
of two hidden layers, each with 32 hidden units.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_output_nonlinearity (Callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
layer_normalization (bool): Bool for using layer normalization or not.
normalize_inputs (bool): Bool for normalizing inputs or not.
normalize_outputs (bool): Bool for normalizing outputs or not.
subsample_factor (float): The factor to subsample the data. By default
it is 1.0, which means using all the data.
optimizer (garage.tf.Optimizer): Optimizer used for fitting the model.
optimizer_args (dict): Arguments for the optimizer. Default is None,
which means no arguments.
use_trust_region (bool): Whether to use a KL-divergence constraint.
max_kl_step (float): KL divergence constraint for each iteration, if
`use_trust_region` is active.
"""
def __init__(self,
input_shape,
output_dim,
filters,
strides,
padding,
hidden_sizes,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
name='GaussianCNNRegressor',
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_filters=(),
std_strides=(),
std_padding='SAME',
std_hidden_sizes=(),
std_hidden_nonlinearity=None,
std_output_nonlinearity=None,
layer_normalization=False,
normalize_inputs=True,
normalize_outputs=True,
subsample_factor=1.,
optimizer=None,
optimizer_args=None,
use_trust_region=True,
max_kl_step=0.01):
super().__init__(input_shape, output_dim, name)
self._use_trust_region = use_trust_region
self._subsample_factor = subsample_factor
self._max_kl_step = max_kl_step
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
if optimizer_args is None:
optimizer_args = dict()
if optimizer is None:
if use_trust_region:
self._optimizer = make_optimizer(PenaltyLbfgsOptimizer,
**optimizer_args)
else:
self._optimizer = make_optimizer(LbfgsOptimizer,
**optimizer_args)
else:
self._optimizer = make_optimizer(optimizer, **optimizer_args)
self.model = GaussianCNNRegressorModel(
input_shape=input_shape,
output_dim=output_dim,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=None,
max_std=None,
std_filters=std_filters,
std_strides=std_strides,
std_padding=std_padding,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_output_nonlinearity=std_output_nonlinearity,
std_parameterization='exp',
layer_normalization=layer_normalization)
self._network = None
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self._input_shape)
with tf.compat.v1.variable_scope(self._variable_scope):
self._network = self.model.build(input_var)
ys_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='ys',
shape=(None, self._output_dim))
old_means_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='old_means',
shape=(None,
self._output_dim))
old_log_stds_var = tf.compat.v1.placeholder(
dtype=tf.float32,
name='old_log_stds',
shape=(None, self._output_dim))
y_mean_var = self._network.y_mean
y_std_var = self._network.y_std
means_var = self._network.means
log_stds_var = self._network.log_stds
normalized_means_var = self._network.normalized_means
normalized_log_stds_var = self._network.normalized_log_stds
normalized_ys_var = (ys_var - y_mean_var) / y_std_var
normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
normalized_old_log_stds_var = (old_log_stds_var -
tf.math.log(y_std_var))
normalized_dist_info_vars = dict(mean=normalized_means_var,
log_std=normalized_log_stds_var)
mean_kl = tf.reduce_mean(
self._network.dist.kl_sym(
dict(mean=normalized_old_means_var,
log_std=normalized_old_log_stds_var),
normalized_dist_info_vars,
))
loss = -tf.reduce_mean(
self._network.dist.log_likelihood_sym(
normalized_ys_var, normalized_dist_info_vars))
self._f_predict = tensor_utils.compile_function([input_var],
means_var)
self._f_pdists = tensor_utils.compile_function(
[input_var], [means_var, log_stds_var])
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[
normalized_means_var, normalized_log_stds_var
],
)
if self._use_trust_region:
optimizer_args['leq_constraint'] = (mean_kl, self._max_kl_step)
optimizer_args['inputs'] = [
input_var, ys_var, old_means_var, old_log_stds_var
]
else:
optimizer_args['inputs'] = [input_var, ys_var]
with tf.name_scope('update_opt'):
self._optimizer.update_opt(**optimizer_args)
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Label of input data.
"""
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(
0, num_samples_tot,
int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._network.x_std.load(np.std(xs, axis=0, keepdims=True) + 1e-8)
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._network.y_mean.load(np.mean(ys, axis=0, keepdims=True))
self._network.y_std.load(np.std(ys, axis=0, keepdims=True) + 1e-8)
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
if self._use_trust_region:
tabular.record('{}/MeanKL'.format(self._name),
self._optimizer.constraint_val(inputs))
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
def predict(self, xs):
"""Predict ys based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
numpy.ndarray: The predicted ys.
"""
return self._f_predict(xs)
def log_likelihood_sym(self, x_var, y_var, name=None):
"""Create a symbolic graph of the log likelihood.
Args:
x_var (tf.Tensor): Input tf.Tensor for the input data.
y_var (tf.Tensor): Input tf.Tensor for the label of data.
name (str): Name of the new graph.
Return:
tf.Tensor: Output of the symbolic log-likelihood graph.
"""
params = self.dist_info_sym(x_var, name=name)
means_var = params['mean']
log_stds_var = params['log_std']
return self._network.dist.log_likelihood_sym(
y_var, dict(mean=means_var, log_std=log_stds_var))
def dist_info_sym(self, input_var, state_info_vars=None, name=None):
"""Create a symbolic graph of the distribution parameters.
Args:
input_var (tf.Tensor): tf.Tensor of the input data.
state_info_vars (dict): a dictionary whose values should contain
information about the state of the regressor at the time it
received the input.
name (str): Name of the new graph.
Return:
dict[tf.Tensor]: Outputs of the symbolic distribution parameter
graph.
"""
del state_info_vars
with tf.compat.v1.variable_scope(self._variable_scope):
network = self.model.build(input_var, name=name)
means_var = network.means
log_stds_var = network.log_stds
return dict(mean=means_var, log_std=log_stds_var)
@property
def recurrent(self):
"""bool: If this module has a hidden state."""
return False
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
@property
def distribution(self):
"""garage.tf.distributions.DiagonalGaussian: Distribution."""
return self._network.dist
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_f_pdists']
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 16,558 | 41.898964 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/gaussian_cnn_regressor_model.py | """GaussianCNNRegressorModel."""
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models import GaussianCNNModel
class GaussianCNNRegressorModel(GaussianCNNModel):
"""GaussianCNNRegressor based on garage.tf.models.Model class.
This class can be used to perform regression by fitting a Gaussian
distribution to the outputs.
Args:
input_shape(tuple[int]): Input shape of the model (without the batch
dimension).
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Convolutional model for mean. For example, (32, 32) means the
network consists of two dense layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and
dimension of filters. For example, ((3, (3, 5)), (32, (3, 3)))
means there are two convolutional layers. The filter for the first
layer have 3 channels and its shape is (3 x 5), while the filter
for the second layer have 32 channels and its shape is (3 x 3).
std_strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
std_padding (str): The type of padding algorithm to use in std network,
either 'SAME' or 'VALID'.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Conv for std. For example, (32, 32) means the Conv consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the std network.
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the std network.
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_shape,
output_dim,
filters,
strides,
padding,
hidden_sizes,
name='GaussianCNNRegressorModel',
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_filters=(),
std_strides=(),
std_padding='SAME',
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_hidden_b_init=tf.zeros_initializer(),
std_output_nonlinearity=None,
std_output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_parameterization='exp',
layer_normalization=False):
super().__init__(output_dim=output_dim,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_filters=std_filters,
std_strides=std_strides,
std_padding=std_padding,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_hidden_w_init=std_hidden_w_init,
std_hidden_b_init=std_hidden_b_init,
std_output_nonlinearity=std_output_nonlinearity,
std_output_w_init=std_output_w_init,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization)
self._input_shape = input_shape
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'sample', 'means', 'log_stds', 'std_param', 'normalized_means',
'normalized_log_stds', 'x_mean', 'x_std', 'y_mean', 'y_std', 'dist'
]
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Sampled action.
tf.Tensor: Mean.
tf.Tensor: Parameterized log_std.
tf.Tensor: log_std.
garage.tf.distributions.DiagonalGaussian: Policy distribution.
"""
with tf.compat.v1.variable_scope('normalized_vars'):
x_mean_var = tf.compat.v1.get_variable(
name='x_mean',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
x_std_var = tf.compat.v1.get_variable(
name='x_std_var',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
y_mean_var = tf.compat.v1.get_variable(
name='y_mean_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
y_std_var = tf.compat.v1.get_variable(
name='y_std_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
normalized_xs_var = (state_input - x_mean_var) / x_std_var
sample, normalized_mean, normalized_log_std, std_param, dist = super(
)._build(normalized_xs_var)
with tf.name_scope('mean_network'):
means_var = normalized_mean * y_std_var + y_mean_var
with tf.name_scope('std_network'):
log_stds_var = normalized_log_std + tf.math.log(y_std_var)
return (sample, means_var, log_stds_var, std_param, normalized_mean,
normalized_log_std, x_mean_var, x_std_var, y_mean_var,
y_std_var, dist)
| 10,901 | 46.815789 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/gaussian_mlp_regressor.py | """A regressor based on a GaussianMLP model."""
from dowel import tabular
import numpy as np
import tensorflow as tf
from garage import make_optimizer
from garage.experiment import deterministic
from garage.tf.misc import tensor_utils
from garage.tf.optimizers import LbfgsOptimizer, PenaltyLbfgsOptimizer
from garage.tf.regressors.gaussian_mlp_regressor_model import (
GaussianMLPRegressorModel)
from garage.tf.regressors.regressor import StochasticRegressor
class GaussianMLPRegressor(StochasticRegressor):
"""Fits data to a Gaussian whose parameters are estimated by an MLP.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (Callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (Callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (Callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (Callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (Callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (Callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
optimizer (garage.tf.Optimizer): Optimizer for minimizing the negative
log-likelihood.
optimizer_args (dict): Arguments for the optimizer. Default is None,
which means no arguments.
use_trust_region (bool): Whether to use trust region constraint.
max_kl_step (float): KL divergence constraint for each iteration.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
std_nonlinearity (Callable): Nonlinearity for each hidden layer in
the std network.
layer_normalization (bool): Bool for using layer normalization or not.
normalize_inputs (bool): Bool for normalizing inputs or not.
normalize_outputs (bool): Bool for normalizing outputs or not.
subsample_factor (float): The factor to subsample the data. By default
it is 1.0, which means using all the data.
"""
def __init__(self,
input_shape,
output_dim,
name='GaussianMLPRegressor',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
optimizer=None,
optimizer_args=None,
use_trust_region=True,
max_kl_step=0.01,
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
std_nonlinearity=None,
layer_normalization=False,
normalize_inputs=True,
normalize_outputs=True,
subsample_factor=1.0):
super().__init__(input_shape, output_dim, name)
self._use_trust_region = use_trust_region
self._subsample_factor = subsample_factor
self._max_kl_step = max_kl_step
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
if optimizer_args is None:
optimizer_args = dict()
if optimizer is None:
if use_trust_region:
self._optimizer = make_optimizer(PenaltyLbfgsOptimizer,
**optimizer_args)
else:
self._optimizer = make_optimizer(LbfgsOptimizer,
**optimizer_args)
else:
self._optimizer = make_optimizer(optimizer, **optimizer_args)
self.model = GaussianMLPRegressorModel(
input_shape=input_shape,
output_dim=self._output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=None,
max_std=None,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_nonlinearity,
std_output_nonlinearity=None,
std_parameterization='exp',
layer_normalization=layer_normalization)
# model for old distribution, used when trusted region is on
self._old_model = self.model.clone(name='model_for_old_dist')
self._network = None
self._old_network = None
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self._input_shape)
self._old_network = self._old_model.build(input_var)
with tf.compat.v1.variable_scope(self._variable_scope):
self._network = self.model.build(input_var)
self._old_model.parameters = self.model.parameters
ys_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='ys',
shape=(None, self._output_dim))
y_mean_var = self._network.y_mean
y_std_var = self._network.y_std
means_var = self._network.mean
normalized_means_var = self._network.normalized_mean
normalized_log_stds_var = self._network.normalized_log_std
normalized_ys_var = (ys_var - y_mean_var) / y_std_var
old_normalized_dist = self._old_network.normalized_dist
normalized_dist = self._network.normalized_dist
mean_kl = tf.reduce_mean(
old_normalized_dist.kl_divergence(normalized_dist))
loss = -tf.reduce_mean(normalized_dist.log_prob(normalized_ys_var))
self._f_predict = tensor_utils.compile_function([input_var],
means_var)
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[
normalized_means_var, normalized_log_stds_var
],
)
if self._use_trust_region:
optimizer_args['leq_constraint'] = (mean_kl, self._max_kl_step)
optimizer_args['inputs'] = [input_var, ys_var]
with tf.name_scope('update_opt'):
self._optimizer.update_opt(**optimizer_args)
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Label of input data.
"""
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(
0, num_samples_tot,
int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._network.x_std.load(np.std(xs, axis=0, keepdims=True) + 1e-8)
self._old_network.x_mean.load(np.mean(xs, axis=0, keepdims=True))
self._old_network.x_std.load(
np.std(xs, axis=0, keepdims=True) + 1e-8)
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._network.y_mean.load(np.mean(ys, axis=0, keepdims=True))
self._network.y_std.load(np.std(ys, axis=0, keepdims=True) + 1e-8)
self._old_network.y_mean.load(np.mean(ys, axis=0, keepdims=True))
self._old_network.y_std.load(
np.std(ys, axis=0, keepdims=True) + 1e-8)
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
if self._use_trust_region:
tabular.record('{}/MeanKL'.format(self._name),
self._optimizer.constraint_val(inputs))
tabular.record('{}/dLoss'.format(self._name), loss_before - loss_after)
self._old_model.parameters = self.model.parameters
def predict(self, xs):
"""Predict ys based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
np.ndarray: The predicted ys.
"""
return self._f_predict(xs)
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
@property
def distribution(self):
"""garage.tf.distributions.DiagonalGaussian: Distribution."""
return self._network.dist
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_network']
del new_dict['_old_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 11,511 | 40.261649 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/gaussian_mlp_regressor_model.py | """GaussianMLPRegressorModel."""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models import GaussianMLPModel
class GaussianMLPRegressorModel(GaussianMLPModel):
"""GaussianMLPRegressor based on garage.tf.models.Model class.
This class can be used to perform regression by fitting a Gaussian
distribution to the outputs.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the std network.
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the std network.
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_shape,
output_dim,
name='GaussianMLPRegressorModel',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_hidden_b_init=tf.zeros_initializer(),
std_output_nonlinearity=None,
std_output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_parameterization='exp',
layer_normalization=False):
super().__init__(output_dim=output_dim,
name=name,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_output_nonlinearity=std_output_nonlinearity,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization)
self._input_shape = input_shape
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'normalized_dist', 'normalized_mean', 'normalized_log_std', 'dist',
'mean', 'log_std', 'x_mean', 'x_std', 'y_mean', 'y_std'
]
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tfp.distributions.MultivariateNormalDiag: Normlizaed distribution.
tf.Tensor: Normalized mean.
tf.Tensor: Normalized log_std.
tfp.distributions.MultivariateNormalDiag: Vanilla distribution.
tf.Tensor: Vanilla mean.
tf.Tensor: Vanilla log_std.
tf.Tensor: Mean for data.
tf.Tensor: log_std for data.
tf.Tensor: Mean for label.
tf.Tensor: log_std for label.
"""
with tf.compat.v1.variable_scope('normalized_vars'):
x_mean_var = tf.compat.v1.get_variable(
name='x_mean',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
x_std_var = tf.compat.v1.get_variable(
name='x_std_var',
shape=(1, ) + self._input_shape,
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
y_mean_var = tf.compat.v1.get_variable(
name='y_mean_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.zeros_initializer(),
trainable=False)
y_std_var = tf.compat.v1.get_variable(
name='y_std_var',
shape=(1, self._output_dim),
dtype=np.float32,
initializer=tf.ones_initializer(),
trainable=False)
normalized_xs_var = (state_input - x_mean_var) / x_std_var
_, normalized_dist_mean, normalized_dist_log_std = super()._build(
normalized_xs_var)
# Since regressor expects [N, *dims], we need to squeeze the extra
# dimension
normalized_dist_log_std = tf.squeeze(normalized_dist_log_std, 1)
with tf.name_scope('mean_network'):
means_var = normalized_dist_mean * y_std_var + y_mean_var
with tf.name_scope('std_network'):
log_stds_var = normalized_dist_log_std + tf.math.log(y_std_var)
normalized_dist = tfp.distributions.MultivariateNormalDiag(
loc=normalized_dist_mean,
scale_diag=tf.exp(normalized_dist_log_std))
vanilla_dist = tfp.distributions.MultivariateNormalDiag(
loc=means_var, scale_diag=tf.exp(log_stds_var))
return (normalized_dist, normalized_dist_mean, normalized_dist_log_std,
vanilla_dist, means_var, log_stds_var, x_mean_var, x_std_var,
y_mean_var, y_std_var)
def clone(self, name):
"""Return a clone of the model.
It copies the configuration and parameters of the primitive.
Args:
name (str): Name of the newly created model. It has to be
different from source model if cloned under the same
computational graph.
Returns:
garage.tf.policies.GaussianMLPModel: Newly cloned model.
"""
new_regressor = self.__class__(
name=name,
input_shape=self._input_shape,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
learn_std=self._learn_std,
adaptive_std=self._adaptive_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
min_std=self._min_std,
max_std=self._max_std,
std_hidden_sizes=self._std_hidden_sizes,
std_hidden_nonlinearity=self._std_hidden_nonlinearity,
std_hidden_w_init=self._std_hidden_w_init,
std_hidden_b_init=self._std_hidden_b_init,
std_output_nonlinearity=self._std_output_nonlinearity,
std_output_w_init=self._std_output_w_init,
std_parameterization=self._std_parameterization,
layer_normalization=self._layer_normalization)
new_regressor.parameters = self.parameters
return new_regressor
| 11,294 | 43.821429 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/regressors/regressor.py | """Regressor base classes without Parameterized."""
from garage.tf.models import Module, StochasticModule
class Regressor(Module):
"""Regressor base class.
Args:
input_shape (tuple[int]): Input shape.
output_dim (int): Output dimension.
name (str): Name of the regressor.
"""
# pylint: disable=abstract-method
def __init__(self, input_shape, output_dim, name):
super().__init__(name)
self._input_shape = input_shape
self._output_dim = output_dim
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Label of input data.
"""
def predict(self, xs):
"""Predict ys based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
The predicted ys.
"""
class StochasticRegressor(Regressor, StochasticModule):
"""StochasticRegressor base class."""
# pylint: disable=abstract-method
def log_likelihood_sym(self, x_var, y_var, name=None):
"""Symbolic graph of the log likelihood.
Args:
x_var (tf.Tensor): Input tf.Tensor for the input data.
y_var (tf.Tensor): Input tf.Tensor for the label of data.
name (str): Name of the new graph.
Return:
tf.Tensor output of the symbolic log likelihood.
"""
| 1,447 | 23.133333 | 69 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/samplers/__init__.py | """Samplers which run agents that use Tensorflow in environments."""
from garage.tf.samplers.batch_sampler import BatchSampler
from garage.tf.samplers.worker import TFWorkerClassWrapper, TFWorkerWrapper
__all__ = ['BatchSampler', 'TFWorkerClassWrapper', 'TFWorkerWrapper']
| 275 | 38.428571 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/samplers/batch_sampler.py | """Collects samples in parallel using a stateful pool of workers."""
import tensorflow as tf
from garage.sampler import parallel_sampler
from garage.sampler.sampler_deprecated import BaseSampler
from garage.sampler.stateful_pool import singleton_pool
from garage.sampler.utils import truncate_paths
def worker_init_tf(g):
"""Initialize the tf.Session on a worker.
Args:
g (object): Global state object.
"""
g.sess = tf.compat.v1.Session()
g.sess.__enter__()
def worker_init_tf_vars(g):
"""Initialize the policy parameters on a worker.
Args:
g (object): Global state object.
"""
g.sess.run(tf.compat.v1.global_variables_initializer())
class BatchSampler(BaseSampler):
"""Collects samples in parallel using a stateful pool of workers.
Args:
algo (garage.np.algos.RLAlgorithm): The algorithm.
env (gym.Env): The environment.
n_envs (int): Number of environments.
"""
def __init__(self, algo, env, n_envs):
super().__init__(algo, env)
self.n_envs = n_envs
def start_worker(self):
"""Initialize the sampler."""
assert singleton_pool.initialized, (
'Use singleton_pool.initialize(n_parallel) to setup workers.')
if singleton_pool.n_parallel > 1:
singleton_pool.run_each(worker_init_tf)
parallel_sampler.populate_task(self.env, self.algo.policy)
if singleton_pool.n_parallel > 1:
singleton_pool.run_each(worker_init_tf_vars)
def shutdown_worker(self):
"""Terminate workers if necessary."""
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr, batch_size=None, whole_paths=True):
"""Collect samples for the given iteration number.
Args:
itr (int): Number of iteration.
batch_size (int): Number of environment steps in one batch.
whole_paths (bool): Whether to use whole path or truncated.
Returns:
list[dict]: A list of paths.
"""
if not batch_size:
batch_size = self.algo.max_path_length * self.n_envs
cur_policy_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(
policy_params=cur_policy_params,
max_samples=batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
if whole_paths:
return paths
else:
paths_truncated = truncate_paths(paths, batch_size)
return paths_truncated
| 2,615 | 29.068966 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/samplers/worker.py | """Default TensorFlow sampler Worker."""
import tensorflow as tf
from garage.sampler import Worker
class TFWorkerClassWrapper:
"""Acts like a Worker class, but is actually an object.
When called, constructs the wrapped class and wraps it in a
TFWorkerWrapper.
Args:
wrapped_class (type): The class to wrap. Should be a subclass of
garage.sampler.Worker.
"""
# pylint: disable=too-few-public-methods
def __init__(self, wrapped_class):
self._wrapped_class = wrapped_class
def __call__(self, *args, **kwargs):
"""Construct the inner class and wrap it.
Args:
*args: Passed on to inner worker class.
**kwargs: Passed on to inner worker class.
Returns:
TFWorkerWrapper: The wrapped worker.
"""
wrapper = TFWorkerWrapper()
# Need to construct the wrapped class after we've entered the Session.
wrapper._inner_worker = self._wrapped_class(*args, **kwargs)
return wrapper
class TFWorkerWrapper(Worker):
"""Wrapper around another workers that initializes a TensorFlow Session."""
def __init__(self):
# pylint: disable=super-init-not-called
self._inner_worker = None
self._sess = None
self._sess_entered = None
self.worker_init()
def worker_init(self):
"""Initialize a worker."""
self._sess = tf.compat.v1.get_default_session()
if not self._sess:
# create a tf session for all
# sampler worker processes in
# order to execute the policy.
self._sess = tf.compat.v1.Session()
self._sess_entered = True
self._sess.__enter__()
def shutdown(self):
"""Perform shutdown processes for TF."""
self._inner_worker.shutdown()
if tf.compat.v1.get_default_session() and self._sess_entered:
self._sess_entered = False
self._sess.__exit__(None, None, None)
@property
def agent(self):
"""Returns the worker's agent.
Returns:
garage.Policy: the worker's agent.
"""
return self._inner_worker.agent
@agent.setter
def agent(self, agent):
"""Sets the worker's agent.
Args:
agent (garage.Policy): The agent.
"""
self._inner_worker.agent = agent
@property
def env(self):
"""Returns the worker's environment.
Returns:
gym.Env: the worker's environment.
"""
return self._inner_worker.env
@env.setter
def env(self, env):
"""Sets the worker's environment.
Args:
env (gym.Env): The environment.
"""
self._inner_worker.env = env
def update_agent(self, agent_update):
"""Update the worker's agent, using agent_update.
Args:
agent_update(object): An agent update. The exact type of this
argument depends on the `Worker` implementation.
"""
self._inner_worker.update_agent(agent_update)
def update_env(self, env_update):
"""Update the worker's env, using env_update.
Args:
env_update(object): An environment update. The exact type of this
argument depends on the `Worker` implementation.
"""
self._inner_worker.update_env(env_update)
def rollout(self):
"""Sample a single rollout of the agent in the environment.
Returns:
garage.TrajectoryBatch: Batch of sampled trajectories. May be
truncated if max_path_length is set.
"""
return self._inner_worker.rollout()
def start_rollout(self):
"""Begin a new rollout."""
self._inner_worker.start_rollout()
def step_rollout(self):
"""Take a single time-step in the current rollout.
Returns:
bool: True iff the path is done, either due to the environment
indicating termination of due to reaching `max_path_length`.
"""
return self._inner_worker.step_rollout()
def collect_rollout(self):
"""Collect the current rollout, clearing the internal buffer.
Returns:
garage.TrajectoryBatch: Batch of sampled trajectories. May be
truncated if the rollouts haven't completed yet.
"""
return self._inner_worker.collect_rollout()
| 4,467 | 26.580247 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/__init__.py | """PyTorch-backed modules and algorithms."""
from garage.torch._functions import compute_advantages
from garage.torch._functions import dict_np_to_torch
from garage.torch._functions import filter_valids
from garage.torch._functions import flatten_batch
from garage.torch._functions import global_device
from garage.torch._functions import pad_to_last
from garage.torch._functions import product_of_gaussians
from garage.torch._functions import set_gpu_mode
from garage.torch._functions import torch_to_np
from garage.torch._functions import update_module_params
__all__ = [
'compute_advantages', 'dict_np_to_torch', 'filter_valids', 'flatten_batch',
'global_device', 'pad_to_last', 'product_of_gaussians', 'set_gpu_mode',
'torch_to_np', 'update_module_params'
]
| 775 | 42.111111 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/_functions.py | """Utility functions for PyTorch algorithms.
A collection of common functions that are used by Pytorch algos.
This collection of functions can be used to manage the following:
- Pytorch GPU usage
- setting the default Pytorch GPU
- converting Tensors to GPU Tensors
- Converting Tensors into `numpy.ndarray` format and vice versa
- Updating model parameters
"""
import torch
import torch.nn.functional as F
_USE_GPU = False
_DEVICE = None
_GPU_ID = 0
def compute_advantages(discount, gae_lambda, max_path_length, baselines,
rewards):
"""Calculate advantages.
Advantages are a discounted cumulative sum.
Calculate advantages using a baseline according to Generalized Advantage
Estimation (GAE)
The discounted cumulative sum can be computed using conv2d with filter.
filter:
[1, (discount * gae_lambda), (discount * gae_lambda) ^ 2, ...]
where the length is same with max_path_length.
baselines and rewards are also has same shape.
baselines:
[ [b_11, b_12, b_13, ... b_1n],
[b_21, b_22, b_23, ... b_2n],
...
[b_m1, b_m2, b_m3, ... b_mn] ]
rewards:
[ [r_11, r_12, r_13, ... r_1n],
[r_21, r_22, r_23, ... r_2n],
...
[r_m1, r_m2, r_m3, ... r_mn] ]
Args:
discount (float): RL discount factor (i.e. gamma).
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_path_length (int): Maximum length of a single rollout.
baselines (torch.Tensor): A 2D vector of value function estimates with
shape (N, T), where N is the batch dimension (number of episodes)
and T is the maximum path length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining
elements in that episode should be set to 0.
rewards (torch.Tensor): A 2D vector of per-step rewards with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent. If an episode
terminates in fewer than T time steps, the remaining elements in
that episode should be set to 0.
Returns:
torch.Tensor: A 2D vector of calculated advantage values with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent. If an episode
terminates in fewer than T time steps, the remaining values in that
episode should be set to 0.
"""
adv_filter = torch.full((1, 1, 1, max_path_length - 1),
discount * gae_lambda,
dtype=torch.float)
adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)
deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)
deltas = F.pad(deltas, (0, max_path_length - 1)).unsqueeze(0).unsqueeze(0)
advantages = F.conv2d(deltas, adv_filter, stride=1).reshape(rewards.shape)
return advantages
def pad_to_last(nums, total_length, axis=-1, val=0):
"""Pad val to last in nums in given axis.
length of the result in given axis should be total_length.
Raises:
IndexError: If the input axis value is out of range of the nums array
Args:
nums (numpy.ndarray): The array to pad.
total_length (int): The final width of the Array.
axis (int): Axis along which a sum is performed.
val (int): The value to set the padded value.
Returns:
torch.Tensor: Padded array
"""
tensor = torch.Tensor(nums)
axis = (axis + len(tensor.shape)) if axis < 0 else axis
if len(tensor.shape) <= axis:
raise IndexError('axis {} is out of range {}'.format(
axis, tensor.shape))
padding_config = [0, 0] * len(tensor.shape)
padding_idx = abs(axis - len(tensor.shape)) * 2 - 1
padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)
return F.pad(tensor, padding_config)
def filter_valids(tensor, valids):
"""Filter out tensor using valids (last index of valid tensors).
valids contains last indices of each rows.
Args:
tensor (torch.Tensor): The tensor to filter
valids (list[int]): Array of length of the valid values
Returns:
torch.Tensor: Filtered Tensor
"""
return [tensor[i][:valid] for i, valid in enumerate(valids)]
def dict_np_to_torch(array_dict):
"""Convert a dict whose values are numpy arrays to PyTorch tensors.
Modifies array_dict in place.
Args:
array_dict (dict): Dictionary of data in numpy arrays
Returns:
dict: Dictionary of data in PyTorch tensors
"""
for key, value in array_dict.items():
array_dict[key] = torch.from_numpy(value).float().to(global_device())
return array_dict
def torch_to_np(tensors):
"""Convert PyTorch tensors to numpy arrays.
Args:
tensors (tuple): Tuple of data in PyTorch tensors.
Returns:
tuple[numpy.ndarray]: Tuple of data in numpy arrays.
Note: This method is deprecated and now replaced by
`garage.torch._functions.to_numpy`.
"""
value_out = tuple(v.numpy() for v in tensors)
return value_out
def flatten_batch(tensor):
"""Flatten a batch of observations.
Reshape a tensor of size (X, Y, Z) into (X*Y, Z)
Args:
tensor (torch.Tensor): Tensor to flatten.
Returns:
torch.Tensor: Flattened tensor.
"""
return tensor.reshape((-1, ) + tensor.shape[2:])
def update_module_params(module, new_params): # noqa: D202
"""Load parameters to a module.
This function acts like `torch.nn.Module._load_from_state_dict()`, but
it replaces the tensors in module with those in new_params, while
`_load_from_state_dict()` loads only the value. Use this function so
that the `grad` and `grad_fn` of `new_params` can be restored
Args:
module (torch.nn.Module): A torch module.
new_params (dict): A dict of torch tensor used as the new
parameters of this module. This parameters dict should be
generated by `torch.nn.Module.named_parameters()`
"""
# pylint: disable=protected-access
def update(m, name, param):
del m._parameters[name] # noqa: E501
setattr(m, name, param)
m._parameters[name] = param # noqa: E501
named_modules = dict(module.named_modules())
for name, new_param in new_params.items():
if '.' in name:
module_name, param_name = tuple(name.rsplit('.', 1))
if module_name in named_modules:
update(named_modules[module_name], param_name, new_param)
else:
update(module, name, new_param)
def set_gpu_mode(mode, gpu_id=0):
"""Set GPU mode and device ID.
Args:
mode (bool): Whether or not to use GPU
gpu_id (int): GPU ID
"""
# pylint: disable=global-statement
global _GPU_ID
global _USE_GPU
global _DEVICE
_GPU_ID = gpu_id
_USE_GPU = mode
_DEVICE = torch.device(('cuda:' + str(_GPU_ID)) if _USE_GPU else 'cpu')
def global_device():
"""Returns the global device that torch.Tensors should be placed on.
Note: The global device is set by using the function
`garage.torch._functions.set_gpu_mode.`
If this functions is never called
`garage.torch._functions.device()` returns None.
Returns:
`torch.Device`: The global device that newly created torch.Tensors
should be placed on.
"""
# pylint: disable=global-statement
global _DEVICE
return _DEVICE
def product_of_gaussians(mus, sigmas_squared):
"""Compute mu, sigma of product of gaussians.
Args:
mus (torch.Tensor): Means, with shape :math:`(N, M)`. M is the number
of mean values.
sigmas_squared (torch.Tensor): Variances, with shape :math:`(N, V)`. V
is the number of variance values.
Returns:
torch.Tensor: Mu of product of gaussians, with shape :math:`(N, 1)`.
torch.Tensor: Sigma of product of gaussians, with shape :math:`(N, 1)`.
"""
sigmas_squared = torch.clamp(sigmas_squared, min=1e-7)
sigma_squared = 1. / torch.sum(torch.reciprocal(sigmas_squared), dim=0)
mu = sigma_squared * torch.sum(mus / sigmas_squared, dim=0)
return mu, sigma_squared
| 8,551 | 31.51711 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/__init__.py | """PyTorch algorithms."""
from garage.torch.algos.ddpg import DDPG
# VPG has to been import first because it is depended by PPO and TRPO.
from garage.torch.algos.vpg import VPG
from garage.torch.algos.ppo import PPO # noqa: I100
from garage.torch.algos.trpo import TRPO
from garage.torch.algos.maml_ppo import MAMLPPO # noqa: I100
from garage.torch.algos.maml_trpo import MAMLTRPO
from garage.torch.algos.maml_vpg import MAMLVPG
from garage.torch.algos.pearl import PEARL
from garage.torch.algos.sac import SAC
from garage.torch.algos.mtsac import MTSAC # noqa: I100
__all__ = [
'DDPG', 'VPG', 'PPO', 'TRPO', 'MAMLPPO', 'MAMLTRPO', 'MAMLVPG', 'MTSAC',
'PEARL', 'SAC'
]
| 681 | 36.888889 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/ddpg.py | """This modules creates a DDPG model in PyTorch."""
from collections import deque
import copy
from dowel import logger, tabular
import numpy as np
import torch
from garage import _Default, make_optimizer
from garage import log_performance
from garage.np import obtain_evaluation_samples
from garage.np import samples_to_tensors
from garage.np.algos import RLAlgorithm
from garage.sampler import OffPolicyVectorizedSampler
from garage.torch import dict_np_to_torch, torch_to_np
class DDPG(RLAlgorithm):
"""A DDPG model implemented with PyTorch.
DDPG, also known as Deep Deterministic Policy Gradient, uses actor-critic
method to optimize the policy and Q-function prediction. It uses a
supervised method to update the critic network and policy gradient to
update the actor network. And there are exploration strategy, replay
buffer and target networks involved to stabilize the training process.
Args:
env_spec (EnvSpec): Environment specification.
policy (garage.torch.policies.Policy): Policy.
qf (object): Q-value network.
replay_buffer (garage.replay_buffer.ReplayBuffer): Replay buffer.
steps_per_epoch (int): Number of train_once calls per epoch.
n_train_steps (int): Training steps.
max_path_length (int): Maximum path length. The episode will
terminate when length of trajectory reaches max_path_length.
max_eval_path_length (int or None): Maximum length of paths used for
off-policy evaluation. If None, defaults to `max_path_length`.
buffer_batch_size (int): Batch size of replay buffer.
min_buffer_size (int): The minimum buffer size for replay buffer.
rollout_batch_size (int): Roll out batch size.
exploration_policy (garage.np.exploration_policies.ExplorationPolicy): # noqa: E501
Exploration strategy.
target_update_tau (float): Interpolation parameter for doing the
soft target update.
discount(float): Discount factor for the cumulative return.
policy_weight_decay (float): L2 weight decay factor for parameters
of the policy network.
qf_weight_decay (float): L2 weight decay factor for parameters
of the q value network.
policy_optimizer (Union[type, tuple[type, dict]]): Type of optimizer
for training policy network. This can be an optimizer type such as
`torch.optim.Adam` or a tuple of type and dictionary, where
dictionary contains arguments to initialize the optimizer
e.g. `(torch.optim.Adam, {'lr' : 1e-3})`.
qf_optimizer (Union[type, tuple[type, dict]]): Type of optimizer
for training Q-value network. This can be an optimizer type such
as `torch.optim.Adam` or a tuple of type and dictionary, where
dictionary contains arguments to initialize the optimizer
e.g. `(torch.optim.Adam, {'lr' : 1e-3})`.
policy_lr (float): Learning rate for policy network parameters.
qf_lr (float): Learning rate for Q-value network parameters.
clip_pos_returns (bool): Whether or not clip positive returns.
clip_return (float): Clip return to be in [-clip_return,
clip_return].
max_action (float): Maximum action magnitude.
reward_scale (float): Reward scale.
smooth_return (bool): Whether to smooth the return for logging.
"""
def __init__(
self,
env_spec,
policy,
qf,
replay_buffer,
*, # Everything after this is numbers.
steps_per_epoch=20,
n_train_steps=50,
max_path_length=None,
max_eval_path_length=None,
buffer_batch_size=64,
min_buffer_size=int(1e4),
rollout_batch_size=1,
exploration_policy=None,
target_update_tau=0.01,
discount=0.99,
policy_weight_decay=0,
qf_weight_decay=0,
policy_optimizer=torch.optim.Adam,
qf_optimizer=torch.optim.Adam,
policy_lr=_Default(1e-4),
qf_lr=_Default(1e-3),
clip_pos_returns=False,
clip_return=np.inf,
max_action=None,
reward_scale=1.,
smooth_return=True):
action_bound = env_spec.action_space.high
self._tau = target_update_tau
self._policy_weight_decay = policy_weight_decay
self._qf_weight_decay = qf_weight_decay
self._clip_pos_returns = clip_pos_returns
self._clip_return = clip_return
self._max_action = action_bound if max_action is None else max_action
self._steps_per_epoch = steps_per_epoch
self._success_history = deque(maxlen=100)
self._episode_rewards = []
self._episode_policy_losses = []
self._episode_qf_losses = []
self._epoch_ys = []
self._epoch_qs = []
self._policy = policy
self._qf = qf
self._n_train_steps = n_train_steps
self._min_buffer_size = min_buffer_size
self._qf = qf
self._steps_per_epoch = steps_per_epoch
self._n_train_steps = n_train_steps
self._buffer_batch_size = buffer_batch_size
self._discount = discount
self._reward_scale = reward_scale
self._smooth_return = smooth_return
self.max_path_length = max_path_length
self._max_eval_path_length = max_eval_path_length
# used by OffPolicyVectorizedSampler
self.env_spec = env_spec
self.rollout_batch_size = rollout_batch_size
self.replay_buffer = replay_buffer
self.policy = policy
self.exploration_policy = exploration_policy
self._target_policy = copy.deepcopy(self.policy)
self._target_qf = copy.deepcopy(self._qf)
self._policy_optimizer = make_optimizer(policy_optimizer,
module=self.policy,
lr=policy_lr)
self._qf_optimizer = make_optimizer(qf_optimizer,
module=self._qf,
lr=qf_lr)
self.sampler_cls = OffPolicyVectorizedSampler
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
runner.enable_logging = False
for _ in runner.step_epochs():
for cycle in range(self._steps_per_epoch):
runner.step_path = runner.obtain_samples(runner.step_itr)
for path in runner.step_path:
path['rewards'] *= self._reward_scale
last_return = self.train_once(runner.step_itr,
runner.step_path)
if (cycle == 0 and self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
runner.enable_logging = True
log_performance(runner.step_itr,
obtain_evaluation_samples(
self.policy, runner.get_env_copy()),
discount=self._discount)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one iteration of training.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths
Returns:
float: Average return.
"""
paths = samples_to_tensors(paths)
epoch = itr / self._steps_per_epoch
self._episode_rewards.extend([
path for path, complete in zip(paths['undiscounted_returns'],
paths['complete']) if complete
])
self._success_history.extend([
path for path, complete in zip(paths['success_history'],
paths['complete']) if complete
])
# Avoid calculating the mean of an empty list in cases where
# all paths were non-terminal.
last_average_return = np.NaN
avg_success_rate = 0
if self._episode_rewards:
last_average_return = np.mean(self._episode_rewards)
if self._success_history:
if (itr % self._steps_per_epoch == 0
and (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size)):
avg_success_rate = np.mean(self._success_history)
for _ in range(self._n_train_steps):
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
samples = self.replay_buffer.sample_transitions(
self._buffer_batch_size)
qf_loss, y, q, policy_loss = torch_to_np(
self.optimize_policy(samples))
self._episode_policy_losses.append(policy_loss)
self._episode_qf_losses.append(qf_loss)
self._epoch_ys.append(y)
self._epoch_qs.append(q)
if itr % self._steps_per_epoch == 0:
logger.log('Training finished')
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
tabular.record('Epoch', epoch)
tabular.record('Policy/AveragePolicyLoss',
np.mean(self._episode_policy_losses))
tabular.record('QFunction/AverageQFunctionLoss',
np.mean(self._episode_qf_losses))
tabular.record('QFunction/AverageQ', np.mean(self._epoch_qs))
tabular.record('QFunction/MaxQ', np.max(self._epoch_qs))
tabular.record('QFunction/AverageAbsQ',
np.mean(np.abs(self._epoch_qs)))
tabular.record('QFunction/AverageY', np.mean(self._epoch_ys))
tabular.record('QFunction/MaxY', np.max(self._epoch_ys))
tabular.record('QFunction/AverageAbsY',
np.mean(np.abs(self._epoch_ys)))
tabular.record('AverageSuccessRate', avg_success_rate)
if not self._smooth_return:
self._episode_rewards = []
self._episode_policy_losses = []
self._episode_qf_losses = []
self._epoch_ys = []
self._epoch_qs = []
self._success_history.clear()
return last_average_return
def optimize_policy(self, samples_data):
"""Perform algorithm optimizing.
Args:
samples_data (dict): Processed batch data.
Returns:
action_loss: Loss of action predicted by the policy network.
qval_loss: Loss of Q-value predicted by the Q-network.
ys: y_s.
qval: Q-value predicted by the Q-network.
"""
transitions = dict_np_to_torch(samples_data)
observations = transitions['observations']
rewards = transitions['rewards'].reshape(-1, 1)
actions = transitions['actions']
next_observations = transitions['next_observations']
terminals = transitions['terminals'].reshape(-1, 1)
next_inputs = next_observations
inputs = observations
with torch.no_grad():
next_actions = self._target_policy(next_inputs)
target_qvals = self._target_qf(next_inputs, next_actions)
clip_range = (-self._clip_return,
0. if self._clip_pos_returns else self._clip_return)
y_target = rewards + (1.0 - terminals) * self._discount * target_qvals
y_target = torch.clamp(y_target, clip_range[0], clip_range[1])
# optimize critic
qval = self._qf(inputs, actions)
qf_loss = torch.nn.MSELoss()
qval_loss = qf_loss(qval, y_target)
self._qf_optimizer.zero_grad()
qval_loss.backward()
self._qf_optimizer.step()
# optimize actor
actions = self.policy(inputs)
action_loss = -1 * self._qf(inputs, actions).mean()
self._policy_optimizer.zero_grad()
action_loss.backward()
self._policy_optimizer.step()
# update target networks
self.update_target()
return (qval_loss.detach(), y_target, qval.detach(),
action_loss.detach())
def update_target(self):
"""Update parameters in the target policy and Q-value network."""
for t_param, param in zip(self._target_qf.parameters(),
self._qf.parameters()):
t_param.data.copy_(t_param.data * (1.0 - self._tau) +
param.data * self._tau)
for t_param, param in zip(self._target_policy.parameters(),
self.policy.parameters()):
t_param.data.copy_(t_param.data * (1.0 - self._tau) +
param.data * self._tau)
| 13,459 | 40.036585 | 91 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/maml.py | """Model-Agnostic Meta-Learning (MAML) algorithm implementation for RL."""
import collections
import copy
from dowel import tabular
import numpy as np
import torch
from garage import _Default, make_optimizer
from garage import log_multitask_performance
from garage import TrajectoryBatch
from garage.misc import tensor_utils
from garage.sampler import OnPolicyVectorizedSampler
from garage.torch import update_module_params
from garage.torch.optimizers import ConjugateGradientOptimizer
from garage.torch.optimizers import DifferentiableSGD
class MAML:
"""Model-Agnostic Meta-Learning (MAML).
Args:
inner_algo (garage.torch.algos.VPG): The inner algorithm used for
computing loss.
env (garage.envs.GarageEnv): A gym environment.
policy (garage.torch.policies.Policy): Policy.
meta_optimizer (Union[torch.optim.Optimizer, tuple]):
Type of optimizer.
This can be an optimizer type such as `torch.optim.Adam` or a tuple
of type and dictionary, where dictionary contains arguments to
initialize the optimizer e.g. `(torch.optim.Adam, {'lr' : 1e-3})`.
meta_batch_size (int): Number of tasks sampled per batch.
inner_lr (float): Adaptation learning rate.
outer_lr (float): Meta policy learning rate.
num_grad_updates (int): Number of adaptation gradient steps.
meta_evaluator (garage.experiment.MetaEvaluator): A meta evaluator for
meta-testing. If None, don't do meta-testing.
evaluate_every_n_epochs (int): Do meta-testing every this epochs.
"""
def __init__(self,
inner_algo,
env,
policy,
meta_optimizer,
meta_batch_size=40,
inner_lr=0.1,
outer_lr=1e-3,
num_grad_updates=1,
meta_evaluator=None,
evaluate_every_n_epochs=1):
self.sampler_cls = OnPolicyVectorizedSampler
self.max_path_length = inner_algo.max_path_length
self._meta_evaluator = meta_evaluator
self._policy = policy
self._env = env
self._value_function = copy.deepcopy(inner_algo._value_function)
self._initial_vf_state = self._value_function.state_dict()
self._num_grad_updates = num_grad_updates
self._meta_batch_size = meta_batch_size
self._inner_algo = inner_algo
self._inner_optimizer = DifferentiableSGD(self._policy, lr=inner_lr)
self._meta_optimizer = make_optimizer(meta_optimizer,
module=policy,
lr=_Default(outer_lr),
eps=_Default(1e-5))
self._evaluate_every_n_epochs = evaluate_every_n_epochs
def train(self, runner):
"""Obtain samples and start training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in runner.step_epochs():
all_samples, all_params = self._obtain_samples(runner)
last_return = self.train_once(runner, all_samples, all_params)
runner.step_itr += 1
return last_return
def train_once(self, runner, all_samples, all_params):
"""Train the algorithm once.
Args:
runner (garage.experiment.LocalRunner): The experiment runner.
all_samples (list[list[MAMLTrajectoryBatch]]): A two
dimensional list of MAMLTrajectoryBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
Returns:
float: Average return.
"""
itr = runner.step_itr
old_theta = dict(self._policy.named_parameters())
kl_before = self._compute_kl_constraint(all_samples,
all_params,
set_grad=False)
meta_objective = self._compute_meta_loss(all_samples, all_params)
self._meta_optimizer.zero_grad()
meta_objective.backward()
self._meta_optimize(all_samples, all_params)
# Log
loss_after = self._compute_meta_loss(all_samples,
all_params,
set_grad=False)
kl_after = self._compute_kl_constraint(all_samples,
all_params,
set_grad=False)
with torch.no_grad():
policy_entropy = self._compute_policy_entropy(
[task_samples[0] for task_samples in all_samples])
average_return = self.log_performance(itr, all_samples,
meta_objective.item(),
loss_after.item(),
kl_before.item(),
kl_after.item(),
policy_entropy.mean().item())
if self._meta_evaluator and itr % self._evaluate_every_n_epochs == 0:
self._meta_evaluator.evaluate(self)
update_module_params(self._old_policy, old_theta)
return average_return
def _train_value_function(self, paths):
"""Train the value function.
Args:
paths (list[dict]): A list of collected paths.
Returns:
torch.Tensor: Calculated mean scalar value of value function loss
(float).
"""
# MAML resets a value function to its initial state before training.
self._value_function.load_state_dict(self._initial_vf_state)
obs = np.concatenate([path['observations'] for path in paths], axis=0)
returns = np.concatenate([path['returns'] for path in paths])
obs = torch.Tensor(obs)
returns = torch.Tensor(returns)
vf_loss = self._value_function.compute_loss(obs, returns)
# pylint: disable=protected-access
self._inner_algo._vf_optimizer.zero_grad()
vf_loss.backward()
# pylint: disable=protected-access
self._inner_algo._vf_optimizer.step()
return vf_loss
def _obtain_samples(self, runner):
"""Obtain samples for each task before and after the fast-adaptation.
Args:
runner (LocalRunner): A local runner instance to obtain samples.
Returns:
tuple: Tuple of (all_samples, all_params).
all_samples (list[MAMLTrajectoryBatch]): A list of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter
dictionaries.
"""
tasks = self._env.sample_tasks(self._meta_batch_size)
all_samples = [[] for _ in range(len(tasks))]
all_params = []
theta = dict(self._policy.named_parameters())
for i, task in enumerate(tasks):
self._set_task(runner, task)
for j in range(self._num_grad_updates + 1):
paths = runner.obtain_samples(runner.step_itr)
batch_samples = self._process_samples(paths)
all_samples[i].append(batch_samples)
# The last iteration does only sampling but no adapting
if j < self._num_grad_updates:
# A grad need to be kept for the next grad update
# Except for the last grad update
require_grad = j < self._num_grad_updates - 1
self._adapt(batch_samples, set_grad=require_grad)
all_params.append(dict(self._policy.named_parameters()))
# Restore to pre-updated policy
update_module_params(self._policy, theta)
return all_samples, all_params
def _adapt(self, batch_samples, set_grad=True):
"""Performs one MAML inner step to update the policy.
Args:
batch_samples (MAMLTrajectoryBatch): Samples data for one
task and one gradient step.
set_grad (bool): if False, update policy parameters in-place.
Else, allow taking gradient of functions of updated parameters
with respect to pre-updated parameters.
"""
# pylint: disable=protected-access
loss = self._inner_algo._compute_loss(*batch_samples[1:])
# Update policy parameters with one SGD step
self._inner_optimizer.zero_grad()
loss.backward(create_graph=set_grad)
with torch.set_grad_enabled(set_grad):
self._inner_optimizer.step()
def _meta_optimize(self, all_samples, all_params):
if isinstance(self._meta_optimizer, ConjugateGradientOptimizer):
self._meta_optimizer.step(
f_loss=lambda: self._compute_meta_loss(
all_samples, all_params, set_grad=False),
f_constraint=lambda: self._compute_kl_constraint(
all_samples, all_params))
else:
self._meta_optimizer.step(lambda: self._compute_meta_loss(
all_samples, all_params, set_grad=False))
def _compute_meta_loss(self, all_samples, all_params, set_grad=True):
"""Compute loss to meta-optimize.
Args:
all_samples (list[list[MAMLTrajectoryBatch]]): A two
dimensional list of MAMLTrajectoryBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
set_grad (bool): Whether to enable gradient calculation or not.
Returns:
torch.Tensor: Calculated mean value of loss.
"""
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
losses = []
for task_samples, task_params in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = i < self._num_grad_updates - 1 or set_grad
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
# pylint: disable=protected-access
last_update = task_samples[-1]
loss = self._inner_algo._compute_loss(*last_update[1:])
losses.append(loss)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(losses).mean()
def _compute_kl_constraint(self, all_samples, all_params, set_grad=True):
"""Compute KL divergence.
For each task, compute the KL divergence between the old policy
distribution and current policy distribution.
Args:
all_samples (list[list[MAMLTrajectoryBatch]]): Two
dimensional list of MAMLTrajectoryBatch of size
[meta_batch_size * (num_grad_updates + 1)]
all_params (list[dict]): A list of named parameter dictionaries.
Each dictionary contains key value pair of names (str) and
parameters (torch.Tensor).
set_grad (bool): Whether to enable gradient calculation or not.
Returns:
torch.Tensor: Calculated mean value of KL divergence.
"""
theta = dict(self._policy.named_parameters())
old_theta = dict(self._old_policy.named_parameters())
kls = []
for task_samples, task_params in zip(all_samples, all_params):
for i in range(self._num_grad_updates):
require_grad = i < self._num_grad_updates - 1 or set_grad
self._adapt(task_samples[i], set_grad=require_grad)
update_module_params(self._old_policy, task_params)
with torch.set_grad_enabled(set_grad):
# pylint: disable=protected-access
kl = self._inner_algo._compute_kl_constraint(
task_samples[-1].observations)
kls.append(kl)
update_module_params(self._policy, theta)
update_module_params(self._old_policy, old_theta)
return torch.stack(kls).mean()
def _compute_policy_entropy(self, task_samples):
"""Compute policy entropy.
Args:
task_samples (list[MAMLTrajectoryBatch]): Samples data for
one task.
Returns:
torch.Tensor: Computed entropy value.
"""
obs = torch.stack([samples.observations for samples in task_samples])
# pylint: disable=protected-access
entropies = self._inner_algo._compute_policy_entropy(obs)
return entropies.mean()
def _set_task(self, runner, task):
# pylint: disable=protected-access, no-self-use
for env in runner._sampler._vec_env.envs:
env.set_task(task)
@property
def policy(self):
"""Current policy of the inner algorithm.
Returns:
garage.torch.policies.Policy: Current policy of the inner
algorithm.
"""
return self._policy
@property
def _old_policy(self):
"""Old policy of the inner algorithm.
Returns:
garage.torch.policies.Policy: Old policy of the inner algorithm.
"""
# pylint: disable=protected-access
return self._inner_algo._old_policy
def _process_samples(self, paths):
"""Process sample data based on the collected paths.
Args:
paths (list[dict]): A list of collected paths.
Returns:
MAMLTrajectoryBatch: Processed samples data.
"""
for path in paths:
path['returns'] = tensor_utils.discount_cumsum(
path['rewards'], self._inner_algo.discount).copy()
self._train_value_function(paths)
obs, actions, rewards, _, valids, baselines \
= self._inner_algo.process_samples(paths)
return MAMLTrajectoryBatch(paths, obs, actions, rewards, valids,
baselines)
def log_performance(self, itr, all_samples, loss_before, loss_after,
kl_before, kl, policy_entropy):
"""Evaluate performance of this batch.
Args:
itr (int): Iteration number.
all_samples (list[list[MAMLTrajectoryBatch]]): Two
dimensional list of MAMLTrajectoryBatch of size
[meta_batch_size * (num_grad_updates + 1)]
loss_before (float): Loss before optimization step.
loss_after (float): Loss after optimization step.
kl_before (float): KL divergence before optimization step.
kl (float): KL divergence after optimization step.
policy_entropy (float): Policy entropy.
Returns:
float: The average return in last epoch cycle.
"""
tabular.record('Iteration', itr)
name_map = None
if hasattr(self._env, 'all_task_names'):
names = self._env.all_task_names
name_map = dict(zip(names, names))
rtns = log_multitask_performance(
itr,
TrajectoryBatch.from_trajectory_list(
env_spec=self._env.spec,
paths=[
path for task_paths in all_samples
for path in task_paths[self._num_grad_updates].paths
]),
discount=self._inner_algo.discount,
name_map=name_map)
with tabular.prefix(self._policy.name + '/'):
tabular.record('LossBefore', loss_before)
tabular.record('LossAfter', loss_after)
tabular.record('dLoss', loss_before - loss_after)
tabular.record('KLBefore', kl_before)
tabular.record('KLAfter', kl)
tabular.record('Entropy', policy_entropy)
return np.mean(rtns)
def get_exploration_policy(self):
"""Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
garage.Policy: The policy used to obtain samples that are later
used for meta-RL adaptation.
"""
return copy.deepcopy(self._policy)
def adapt_policy(self, exploration_policy, exploration_trajectories):
"""Adapt the policy by one gradient steps for a task.
Args:
exploration_policy (garage.Policy): A policy which was returned
from get_exploration_policy(), and which generated
exploration_trajectories by interacting with an environment.
The caller may not use this object after passing it into this
method.
exploration_trajectories (garage.TrajectoryBatch): Trajectories to
adapt to, generated by exploration_policy exploring the
environment.
Returns:
garage.Policy: A policy adapted to the task represented by the
exploration_trajectories.
"""
old_policy, self._policy = self._policy, exploration_policy
self._inner_algo.policy = exploration_policy
self._inner_optimizer.module = exploration_policy
paths = exploration_trajectories.to_trajectory_list()
batch_samples = self._process_samples(paths)
self._adapt(batch_samples, set_grad=False)
self._policy = old_policy
self._inner_algo.policy = self._inner_optimizer.module = old_policy
return exploration_policy
class MAMLTrajectoryBatch(
collections.namedtuple('MAMLTrajectoryBatch', [
'paths', 'observations', 'actions', 'rewards', 'valids',
'baselines'
])):
r"""A tuple representing a batch of whole trajectories in MAML.
A :class:`MAMLTrajectoryBatch` represents a batch of whole trajectories
produced from one environment.
+-----------------------+-------------------------------------------------+
| Symbol | Description |
+=======================+=================================================+
| :math:`N` | Trajectory index dimension |
+-----------------------+-------------------------------------------------+
| :math:`T` | Maximum length of a trajectory |
+-----------------------+-------------------------------------------------+
| :math:`S^*` | Single-step shape of a time-series tensor |
+-----------------------+-------------------------------------------------+
Attributes:
paths (list[dict[str, np.ndarray or dict[str, np.ndarray]]]):
Nonflatten original paths from sampler.
observations (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T, O^*)` containing the (possibly
multi-dimensional) observations for all time steps in this batch.
These must conform to :obj:`env_spec.observation_space`.
actions (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T, A^*)` containing the (possibly
multi-dimensional) actions for all time steps in this batch. These
must conform to :obj:`env_spec.action_space`.
rewards (torch.Tensor): A torch tensor of shape
:math:`(N \bullet T)` containing the rewards for all time
steps in this batch.
valids (numpy.ndarray): An integer numpy array of shape :math:`(N, )`
containing the length of each trajectory in this batch. This may be
used to reconstruct the individual trajectories.
baselines (numpy.ndarray): An numpy array of shape
:math:`(N \bullet T, )` containing the value function estimation
at all time steps in this batch.
Raises:
ValueError: If any of the above attributes do not conform to their
prescribed types and shapes.
"""
| 20,849 | 38.714286 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/maml_ppo.py | """Model-Agnostic Meta-Learning (MAML) algorithm applied to PPO."""
import torch
from garage import _Default
from garage.torch.algos import PPO
from garage.torch.algos.maml import MAML
from garage.torch.optimizers import OptimizerWrapper
class MAMLPPO(MAML):
"""Model-Agnostic Meta-Learning (MAML) applied to PPO.
Args:
env (garage.envs.GarageEnv): A multi-task environment.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.np.baselines.Baseline): The value function.
inner_lr (float): Adaptation learning rate.
outer_lr (float): Meta policy learning rate.
lr_clip_range (float): The limit on the likelihood ratio between
policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
meta_batch_size (int): Number of tasks sampled per batch.
num_grad_updates (int): Number of adaptation gradient steps.
meta_evaluator (garage.experiment.MetaEvaluator): A meta evaluator for
meta-testing. If None, don't do meta-testing.
evaluate_every_n_epochs (int): Do meta-testing every this epochs.
"""
def __init__(self,
env,
policy,
value_function,
inner_lr=_Default(1e-1),
outer_lr=1e-3,
lr_clip_range=5e-1,
max_path_length=100,
discount=0.99,
gae_lambda=1.0,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
meta_batch_size=20,
num_grad_updates=1,
meta_evaluator=None,
evaluate_every_n_epochs=1):
policy_optimizer = OptimizerWrapper(
(torch.optim.Adam, dict(lr=inner_lr)), policy)
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=inner_lr)),
value_function)
inner_algo = PPO(env.spec,
policy,
value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
lr_clip_range=lr_clip_range,
max_path_length=max_path_length,
num_train_per_epoch=1,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method)
super().__init__(inner_algo=inner_algo,
env=env,
policy=policy,
meta_optimizer=torch.optim.Adam,
meta_batch_size=meta_batch_size,
inner_lr=inner_lr,
outer_lr=outer_lr,
num_grad_updates=num_grad_updates,
meta_evaluator=meta_evaluator,
evaluate_every_n_epochs=evaluate_every_n_epochs)
| 4,658 | 44.23301 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/maml_trpo.py | """Model-Agnostic Meta-Learning (MAML) algorithm applied to TRPO."""
import torch
from garage import _Default
from garage.torch.algos import VPG
from garage.torch.algos.maml import MAML
from garage.torch.optimizers import (ConjugateGradientOptimizer,
OptimizerWrapper)
class MAMLTRPO(MAML):
"""Model-Agnostic Meta-Learning (MAML) applied to TRPO.
Args:
env (garage.envs.GarageEnv): A multi-task environment.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.np.baselines.Baseline): The value function.
inner_lr (float): Adaptation learning rate.
outer_lr (float): Meta policy learning rate.
max_kl_step (float): The maximum KL divergence between old and new
policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
meta_batch_size (int): Number of tasks sampled per batch.
num_grad_updates (int): Number of adaptation gradient steps.
meta_evaluator (garage.experiment.MetaEvaluator): A meta evaluator for
meta-testing. If None, don't do meta-testing.
evaluate_every_n_epochs (int): Do meta-testing every this epochs.
"""
def __init__(self,
env,
policy,
value_function,
inner_lr=_Default(1e-2),
outer_lr=1e-3,
max_kl_step=0.01,
max_path_length=500,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
meta_batch_size=40,
num_grad_updates=1,
meta_evaluator=None,
evaluate_every_n_epochs=1):
policy_optimizer = OptimizerWrapper(
(torch.optim.Adam, dict(lr=inner_lr)), policy)
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=inner_lr)),
value_function)
inner_algo = VPG(env.spec,
policy,
value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
max_path_length=max_path_length,
num_train_per_epoch=1,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method)
meta_optimizer = (ConjugateGradientOptimizer,
dict(max_constraint_value=max_kl_step))
super().__init__(inner_algo=inner_algo,
env=env,
policy=policy,
meta_optimizer=meta_optimizer,
meta_batch_size=meta_batch_size,
inner_lr=inner_lr,
outer_lr=outer_lr,
num_grad_updates=num_grad_updates,
meta_evaluator=meta_evaluator,
evaluate_every_n_epochs=evaluate_every_n_epochs)
| 4,791 | 44.207547 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/maml_vpg.py | """Model-Agnostic Meta-Learning (MAML) algorithm applied to VPG."""
import torch
from garage import _Default
from garage.torch.algos import VPG
from garage.torch.algos.maml import MAML
from garage.torch.optimizers import OptimizerWrapper
class MAMLVPG(MAML):
"""Model-Agnostic Meta-Learning (MAML) applied to VPG.
Args:
env (garage.envs.GarageEnv): A multi-task environment.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.np.baselines.Baseline): The value function.
inner_lr (float): Adaptation learning rate.
outer_lr (float): Meta policy learning rate.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
meta_batch_size (int): Number of tasks sampled per batch.
num_grad_updates (int): Number of adaptation gradient steps.
meta_evaluator (garage.experiment.MetaEvaluator): A meta evaluator for
meta-testing. If None, don't do meta-testing.
evaluate_every_n_epochs (int): Do meta-testing every this epochs.
"""
def __init__(self,
env,
policy,
value_function,
inner_lr=_Default(1e-1),
outer_lr=1e-3,
max_path_length=100,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
meta_batch_size=20,
num_grad_updates=1,
meta_evaluator=None,
evaluate_every_n_epochs=1):
policy_optimizer = OptimizerWrapper(
(torch.optim.Adam, dict(lr=inner_lr)), policy)
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=inner_lr)),
value_function)
inner_algo = VPG(env.spec,
policy,
value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
max_path_length=max_path_length,
num_train_per_epoch=1,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method)
super().__init__(inner_algo=inner_algo,
env=env,
policy=policy,
meta_optimizer=torch.optim.Adam,
meta_batch_size=meta_batch_size,
inner_lr=inner_lr,
outer_lr=outer_lr,
num_grad_updates=num_grad_updates,
meta_evaluator=meta_evaluator,
evaluate_every_n_epochs=evaluate_every_n_epochs)
| 4,469 | 44.612245 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/mtsac.py | """This modules creates a MTSAC model in PyTorch."""
import numpy as np
import torch
from garage import log_multitask_performance, TrajectoryBatch
from garage.np import obtain_evaluation_samples
from garage.torch import global_device
from garage.torch.algos import SAC
class MTSAC(SAC):
"""A MTSAC Model in Torch.
This MTSAC implementation uses is the same as SAC except for a small change
called "disentangled alphas". Alpha is the entropy coefficient that is used
to control exploration of the agent/policy. Disentangling alphas refers to
having a separate alpha coefficients for every task learned by the policy.
The alphas are accessed by using a the one-hot encoding of an id that is
assigned to each task.
Args:
policy (garage.torch.policy.Policy): Policy/Actor/Agent that is being
optimized by SAC.
qf1 (garage.torch.q_function.ContinuousMLPQFunction): QFunction/Critic
used for actor/policy optimization. See Soft Actor-Critic and
Applications.
qf2 (garage.torch.q_function.ContinuousMLPQFunction): QFunction/Critic
used for actor/policy optimization. See Soft Actor-Critic and
Applications.
replay_buffer (garage.replay_buffer.ReplayBuffer): Stores transitions
that are previously collected by the sampler.
env_spec (garage.envs.env_spec.EnvSpec): The env_spec attribute of the
environment that the agent is being trained in. Usually accessable
by calling env.spec.
num_tasks (int): The number of tasks being learned.
max_path_length (int): The max path length of the algorithm.
max_eval_path_length (int or None): Maximum length of paths used for
off-policy evaluation. If None, defaults to `max_path_length`.
eval_env (garage.envs.GarageEnv): The environment used for collecting
evaluation trajectories.
gradient_steps_per_itr (int): Number of optimization steps that should
occur before the training step is over and a new batch of
transitions is collected by the sampler.
fixed_alpha (float): The entropy/temperature to be used if temperature
is not supposed to be learned.
target_entropy (float): target entropy to be used during
entropy/temperature optimization. If None, the default heuristic
from Soft Actor-Critic Algorithms and Applications is used.
initial_log_entropy (float): initial entropy/temperature coefficient
to be used if a fixed_alpha is not being used (fixed_alpha=None),
and the entropy/temperature coefficient is being learned.
discount (float): The discount factor to be used during sampling and
critic/q_function optimization.
buffer_batch_size (int): The number of transitions sampled from the
replay buffer that are used during a single optimization step.
min_buffer_size (int): The minimum number of transitions that need to
be in the replay buffer before training can begin.
target_update_tau (float): A coefficient that controls the rate at
which the target q_functions update over optimization iterations.
policy_lr (float): Learning rate for policy optimizers.
qf_lr (float): Learning rate for q_function optimizers.
reward_scale (float): Reward multiplier. Changing this hyperparameter
changes the effect that the reward from a transition will have
during optimization.
optimizer (torch.optim.Optimizer): Optimizer to be used for
policy/actor, q_functions/critics, and temperature/entropy
optimizations.
steps_per_epoch (int): Number of train_once calls per epoch.
num_evaluation_trajectories (int): The number of evaluation
trajectories used for computing eval stats at the end of every
epoch.
"""
def __init__(
self,
policy,
qf1,
qf2,
replay_buffer,
env_spec,
num_tasks,
*, # Everything after this is numbers.
max_path_length,
max_eval_path_length=None,
eval_env,
gradient_steps_per_itr,
fixed_alpha=None,
target_entropy=None,
initial_log_entropy=0.,
discount=0.99,
buffer_batch_size=64,
min_buffer_size=int(1e4),
target_update_tau=5e-3,
policy_lr=3e-4,
qf_lr=3e-4,
reward_scale=1.0,
optimizer=torch.optim.Adam,
steps_per_epoch=1,
# yapf: disable
num_evaluation_trajectories=5):
# yapf: enable
super().__init__(
policy=policy,
qf1=qf1,
qf2=qf2,
replay_buffer=replay_buffer,
env_spec=env_spec,
max_path_length=max_path_length,
max_eval_path_length=max_eval_path_length,
gradient_steps_per_itr=gradient_steps_per_itr,
fixed_alpha=fixed_alpha,
target_entropy=target_entropy,
initial_log_entropy=initial_log_entropy,
discount=discount,
buffer_batch_size=buffer_batch_size,
min_buffer_size=min_buffer_size,
target_update_tau=target_update_tau,
policy_lr=policy_lr,
qf_lr=qf_lr,
reward_scale=reward_scale,
optimizer=optimizer,
steps_per_epoch=steps_per_epoch,
num_evaluation_trajectories=num_evaluation_trajectories,
eval_env=eval_env)
self._num_tasks = num_tasks
self._eval_env = eval_env
self._use_automatic_entropy_tuning = fixed_alpha is None
self._fixed_alpha = fixed_alpha
if self._use_automatic_entropy_tuning:
if target_entropy:
self._target_entropy = target_entropy
else:
self._target_entropy = -np.prod(
self.env_spec.action_space.shape).item()
self._log_alpha = torch.Tensor([self._initial_log_entropy] *
self._num_tasks).requires_grad_()
self._alpha_optimizer = optimizer([self._log_alpha] *
self._num_tasks,
lr=self._policy_lr)
else:
self._log_alpha = torch.Tensor([self._fixed_alpha] *
self._num_tasks).log()
self._epoch_mean_success_rate = []
self._epoch_median_success_rate = []
def _get_log_alpha(self, samples_data):
"""Return the value of log_alpha.
Args:
samples_data (dict): Transitions(S,A,R,S') that are sampled from
the replay buffer. It should have the keys 'observation',
'action', 'reward', 'terminal', and 'next_observations'.
Raises:
ValueError: If the number of tasks, num_tasks passed to
this algorithm doesn't match the length of the task
one-hot id in the observation vector.
Note:
samples_data's entries should be torch.Tensor's with the following
shapes:
observation: :math:`(N, O^*)`
action: :math:`(N, A^*)`
reward: :math:`(N, 1)`
terminal: :math:`(N, 1)`
next_observation: :math:`(N, O^*)`
Returns:
torch.Tensor: log_alpha. shape is (1, self.buffer_batch_size)
"""
obs = samples_data['observation']
log_alpha = self._log_alpha
one_hots = obs[:, -self._num_tasks:]
if (log_alpha.shape[0] != one_hots.shape[1]
or one_hots.shape[1] != self._num_tasks
or log_alpha.shape[0] != self._num_tasks):
raise ValueError(
'The number of tasks in the environment does '
'not match self._num_tasks. Are you sure that you passed '
'The correct number of tasks?')
ret = torch.mm(one_hots, log_alpha.unsqueeze(0).t()).squeeze()
return ret
def _evaluate_policy(self, epoch):
"""Evaluate the performance of the policy via deterministic rollouts.
Statistics such as (average) discounted return and success rate are
recorded.
Args:
epoch (int): The current training epoch.
Returns:
float: The average return across self._num_evaluation_trajectories
trajectories
"""
eval_trajs = []
for _ in range(self._num_tasks):
eval_trajs.append(
obtain_evaluation_samples(
self.policy,
self._eval_env,
max_path_length=self._max_eval_path_length,
num_trajs=self._num_evaluation_trajectories))
eval_trajs = TrajectoryBatch.concatenate(*eval_trajs)
last_return = log_multitask_performance(epoch, eval_trajs,
self._discount)
return last_return
def to(self, device=None):
"""Put all the networks within the model on device.
Args:
device (str): ID of GPU or CPU.
"""
super().to(device)
if device is None:
device = global_device()
if not self._use_automatic_entropy_tuning:
self._log_alpha = torch.Tensor([self._fixed_alpha] *
self._num_tasks).log().to(device)
else:
self._log_alpha = torch.Tensor(
[self._initial_log_entropy] *
self._num_tasks).to(device).requires_grad_()
self._alpha_optimizer = self._optimizer([self._log_alpha],
lr=self._policy_lr)
| 10,046 | 42.120172 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/pearl.py | """PEARL and PEARLWorker in Pytorch.
Code is adapted from https://github.com/katerakelly/oyster.
"""
import copy
import akro
from dowel import logger
import numpy as np
import torch
from garage import InOutSpec, TimeStep
from garage.envs import EnvSpec
from garage.experiment import MetaEvaluator
from garage.np.algos import MetaRLAlgorithm
from garage.replay_buffer import PathBuffer
from garage.sampler import DefaultWorker
from garage.torch import global_device
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import ContextConditionedPolicy
class PEARL(MetaRLAlgorithm):
r"""A PEARL model based on https://arxiv.org/abs/1903.08254.
PEARL, which stands for Probablistic Embeddings for Actor-Critic
Reinforcement Learning, is an off-policy meta-RL algorithm. It is built
on top of SAC using two Q-functions and a value function with an addition
of an inference network that estimates the posterior :math:`q(z \| c)`.
The policy is conditioned on the latent variable Z in order to adpat its
behavior to specific tasks.
Args:
env (list[GarageEnv]): Batch of sampled environment updates(EnvUpdate),
which, when invoked on environments, will configure them with new
tasks.
policy_class (garage.torch.policies.Policy): Context-conditioned policy
class.
encoder_class (garage.torch.embeddings.ContextEncoder): Encoder class
for the encoder in context-conditioned policy.
inner_policy (garage.torch.policies.Policy): Policy.
qf (torch.nn.Module): Q-function.
vf (torch.nn.Module): Value function.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks for testing.
latent_dim (int): Size of latent context vector.
encoder_hidden_sizes (list[int]): Output dimension of dense layer(s) of
the context encoder.
test_env_sampler (garage.experiment.SetTaskSampler): Sampler for test
tasks.
policy_lr (float): Policy learning rate.
qf_lr (float): Q-function learning rate.
vf_lr (float): Value function learning rate.
context_lr (float): Inference network learning rate.
policy_mean_reg_coeff (float): Policy mean regulation weight.
policy_std_reg_coeff (float): Policy std regulation weight.
policy_pre_activation_coeff (float): Policy pre-activation weight.
soft_target_tau (float): Interpolation parameter for doing the
soft target update.
kl_lambda (float): KL lambda value.
optimizer_class (callable): Type of optimizer for training networks.
use_information_bottleneck (bool): False means latent context is
deterministic.
use_next_obs_in_context (bool): Whether or not to use next observation
in distinguishing between tasks.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_steps_posterior (int): Number of transitions to obtain per task
with z ~ posterior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_path_length (int): Maximum path length.
discount (float): RL discount factor.
replay_buffer_size (int): Maximum samples in replay buffer.
reward_scale (int): Reward scale.
update_post_train (int): How often to resample context when obtaining
data during training (in trajectories).
"""
# pylint: disable=too-many-statements
def __init__(self,
env,
inner_policy,
qf,
vf,
num_train_tasks,
num_test_tasks,
latent_dim,
encoder_hidden_sizes,
test_env_sampler,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
policy_lr=3E-4,
qf_lr=3E-4,
vf_lr=3E-4,
context_lr=3E-4,
policy_mean_reg_coeff=1E-3,
policy_std_reg_coeff=1E-3,
policy_pre_activation_coeff=0.,
soft_target_tau=0.005,
kl_lambda=.1,
optimizer_class=torch.optim.Adam,
use_information_bottleneck=True,
use_next_obs_in_context=False,
meta_batch_size=64,
num_steps_per_epoch=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=0,
num_extra_rl_steps_posterior=100,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
update_post_train=1):
self._env = env
self._qf1 = qf
self._qf2 = copy.deepcopy(qf)
self._vf = vf
self._num_train_tasks = num_train_tasks
self._num_test_tasks = num_test_tasks
self._latent_dim = latent_dim
self._policy_mean_reg_coeff = policy_mean_reg_coeff
self._policy_std_reg_coeff = policy_std_reg_coeff
self._policy_pre_activation_coeff = policy_pre_activation_coeff
self._soft_target_tau = soft_target_tau
self._kl_lambda = kl_lambda
self._use_information_bottleneck = use_information_bottleneck
self._use_next_obs_in_context = use_next_obs_in_context
self._meta_batch_size = meta_batch_size
self._num_steps_per_epoch = num_steps_per_epoch
self._num_initial_steps = num_initial_steps
self._num_tasks_sample = num_tasks_sample
self._num_steps_prior = num_steps_prior
self._num_steps_posterior = num_steps_posterior
self._num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self._batch_size = batch_size
self._embedding_batch_size = embedding_batch_size
self._embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self._discount = discount
self._replay_buffer_size = replay_buffer_size
self._reward_scale = reward_scale
self._update_post_train = update_post_train
self._task_idx = None
self._is_resuming = False
worker_args = dict(deterministic=True, accum_context=True)
self._evaluator = MetaEvaluator(test_task_sampler=test_env_sampler,
max_path_length=max_path_length,
worker_class=PEARLWorker,
worker_args=worker_args,
n_test_tasks=num_test_tasks)
encoder_spec = self.get_env_spec(env[0](), latent_dim, 'encoder')
encoder_in_dim = int(np.prod(encoder_spec.input_space.shape))
encoder_out_dim = int(np.prod(encoder_spec.output_space.shape))
context_encoder = encoder_class(input_dim=encoder_in_dim,
output_dim=encoder_out_dim,
hidden_sizes=encoder_hidden_sizes)
self._policy = policy_class(
latent_dim=latent_dim,
context_encoder=context_encoder,
policy=inner_policy,
use_information_bottleneck=use_information_bottleneck,
use_next_obs=use_next_obs_in_context)
# buffer for training RL update
self._replay_buffers = {
i: PathBuffer(replay_buffer_size)
for i in range(num_train_tasks)
}
self._context_replay_buffers = {
i: PathBuffer(replay_buffer_size)
for i in range(num_train_tasks)
}
self.target_vf = copy.deepcopy(self._vf)
self.vf_criterion = torch.nn.MSELoss()
self._policy_optimizer = optimizer_class(
self._policy.networks[1].parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self._qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self._qf2.parameters(),
lr=qf_lr,
)
self.vf_optimizer = optimizer_class(
self._vf.parameters(),
lr=vf_lr,
)
self.context_optimizer = optimizer_class(
self._policy.networks[0].parameters(),
lr=context_lr,
)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
data = self.__dict__.copy()
del data['_replay_buffers']
del data['_context_replay_buffers']
return data
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
self.__dict__.update(state)
self._replay_buffers = {
i: PathBuffer(self._replay_buffer_size)
for i in range(self._num_train_tasks)
}
self._context_replay_buffers = {
i: PathBuffer(self._replay_buffer_size)
for i in range(self._num_train_tasks)
}
self._is_resuming = True
def train(self, runner):
"""Obtain samples, train, and evaluate for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
"""
for _ in runner.step_epochs():
epoch = runner.step_itr / self._num_steps_per_epoch
# obtain initial set of samples from all train tasks
if epoch == 0 or self._is_resuming:
for idx in range(self._num_train_tasks):
self._task_idx = idx
self._obtain_samples(runner, epoch,
self._num_initial_steps, np.inf)
self._is_resuming = False
# obtain samples from random tasks
for _ in range(self._num_tasks_sample):
idx = np.random.randint(self._num_train_tasks)
self._task_idx = idx
self._context_replay_buffers[idx].clear()
# obtain samples with z ~ prior
if self._num_steps_prior > 0:
self._obtain_samples(runner, epoch, self._num_steps_prior,
np.inf)
# obtain samples with z ~ posterior
if self._num_steps_posterior > 0:
self._obtain_samples(runner, epoch,
self._num_steps_posterior,
self._update_post_train)
# obtain extras samples for RL training but not encoder
if self._num_extra_rl_steps_posterior > 0:
self._obtain_samples(runner,
epoch,
self._num_extra_rl_steps_posterior,
self._update_post_train,
add_to_enc_buffer=False)
logger.log('Training...')
# sample train tasks and optimize networks
self._train_once()
runner.step_itr += 1
logger.log('Evaluating...')
# evaluate
self._policy.reset_belief()
self._evaluator.evaluate(self)
def _train_once(self):
"""Perform one iteration of training."""
for _ in range(self._num_steps_per_epoch):
indices = np.random.choice(range(self._num_train_tasks),
self._meta_batch_size)
self._optimize_policy(indices)
def _optimize_policy(self, indices):
"""Perform algorithm optimizing.
Args:
indices (list): Tasks used for training.
"""
num_tasks = len(indices)
context = self._sample_context(indices)
# clear context and reset belief of policy
self._policy.reset_belief(num_tasks=num_tasks)
# data shape is (task, batch, feat)
obs, actions, rewards, next_obs, terms = self._sample_data(indices)
policy_outputs, task_z = self._policy(obs, context)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
# flatten out the task dimension
t, b, _ = obs.size()
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
# optimize qf and encoder networks
q1_pred = self._qf1(torch.cat([obs, actions], dim=1), task_z)
q2_pred = self._qf2(torch.cat([obs, actions], dim=1), task_z)
v_pred = self._vf(obs, task_z.detach())
with torch.no_grad():
target_v_values = self.target_vf(next_obs, task_z)
# KL constraint on z if probabilistic
self.context_optimizer.zero_grad()
if self._use_information_bottleneck:
kl_div = self._policy.compute_kl_div()
kl_loss = self._kl_lambda * kl_div
kl_loss.backward(retain_graph=True)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
rewards_flat = rewards.view(self._batch_size * num_tasks, -1)
rewards_flat = rewards_flat * self._reward_scale
terms_flat = terms.view(self._batch_size * num_tasks, -1)
q_target = rewards_flat + (
1. - terms_flat) * self._discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target)**2) + torch.mean(
(q2_pred - q_target)**2)
qf_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.step()
self.context_optimizer.step()
# compute min Q on the new actions
q1 = self._qf1(torch.cat([obs, new_actions], dim=1), task_z.detach())
q2 = self._qf2(torch.cat([obs, new_actions], dim=1), task_z.detach())
min_q = torch.min(q1, q2)
# optimize vf
v_target = min_q - log_pi
vf_loss = self.vf_criterion(v_pred, v_target.detach())
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
self._update_target_network()
# optimize policy
log_policy_target = min_q
policy_loss = (log_pi - log_policy_target).mean()
mean_reg_loss = self._policy_mean_reg_coeff * (policy_mean**2).mean()
std_reg_loss = self._policy_std_reg_coeff * (policy_log_std**2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self._policy_pre_activation_coeff * (
(pre_tanh_value**2).sum(dim=1).mean())
policy_reg_loss = (mean_reg_loss + std_reg_loss +
pre_activation_reg_loss)
policy_loss = policy_loss + policy_reg_loss
self._policy_optimizer.zero_grad()
policy_loss.backward()
self._policy_optimizer.step()
def _obtain_samples(self,
runner,
itr,
num_samples,
update_posterior_rate,
add_to_enc_buffer=True):
"""Obtain samples.
Args:
runner (LocalRunner): LocalRunner.
itr (int): Index of iteration (epoch).
num_samples (int): Number of samples to obtain.
update_posterior_rate (int): How often (in trajectories) to infer
posterior of policy.
add_to_enc_buffer (bool): Whether or not to add samples to encoder
buffer.
"""
self._policy.reset_belief()
total_samples = 0
if update_posterior_rate != np.inf:
num_samples_per_batch = (update_posterior_rate *
self.max_path_length)
else:
num_samples_per_batch = num_samples
while total_samples < num_samples:
paths = runner.obtain_samples(itr, num_samples_per_batch,
self._policy,
self._env[self._task_idx])
total_samples += sum([len(path['rewards']) for path in paths])
for path in paths:
p = {
'observations': path['observations'],
'actions': path['actions'],
'rewards': path['rewards'].reshape(-1, 1),
'next_observations': path['next_observations'],
'dones': path['dones'].reshape(-1, 1)
}
self._replay_buffers[self._task_idx].add_path(p)
if add_to_enc_buffer:
self._context_replay_buffers[self._task_idx].add_path(p)
if update_posterior_rate != np.inf:
context = self._sample_context(self._task_idx)
self._policy.infer_posterior(context)
def _sample_data(self, indices):
"""Sample batch of training data from a list of tasks.
Args:
indices (list): List of task indices to sample from.
Returns:
torch.Tensor: Obervations, with shape :math:`(X, N, O^*)` where X
is the number of tasks. N is batch size.
torch.Tensor: Actions, with shape :math:`(X, N, A^*)`.
torch.Tensor: Rewards, with shape :math:`(X, N, 1)`.
torch.Tensor: Next obervations, with shape :math:`(X, N, O^*)`.
torch.Tensor: Dones, with shape :math:`(X, N, 1)`.
"""
# transitions sampled randomly from replay buffer
initialized = False
for idx in indices:
batch = self._replay_buffers[idx].sample_transitions(
self._batch_size)
if not initialized:
o = batch['observations'][np.newaxis]
a = batch['actions'][np.newaxis]
r = batch['rewards'][np.newaxis]
no = batch['next_observations'][np.newaxis]
d = batch['dones'][np.newaxis]
initialized = True
else:
o = np.vstack((o, batch['observations'][np.newaxis]))
a = np.vstack((a, batch['actions'][np.newaxis]))
r = np.vstack((r, batch['rewards'][np.newaxis]))
no = np.vstack((no, batch['next_observations'][np.newaxis]))
d = np.vstack((d, batch['dones'][np.newaxis]))
o = torch.as_tensor(o, device=global_device()).float()
a = torch.as_tensor(a, device=global_device()).float()
r = torch.as_tensor(r, device=global_device()).float()
no = torch.as_tensor(no, device=global_device()).float()
d = torch.as_tensor(d, device=global_device()).float()
return o, a, r, no, d
def _sample_context(self, indices):
"""Sample batch of context from a list of tasks.
Args:
indices (list): List of task indices to sample from.
Returns:
torch.Tensor: Context data, with shape :math:`(X, N, C)`. X is the
number of tasks. N is batch size. C is the combined size of
observation, action, reward, and next observation if next
observation is used in context. Otherwise, C is the combined
size of observation, action, and reward.
"""
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
initialized = False
for idx in indices:
batch = self._context_replay_buffers[idx].sample_transitions(
self._embedding_batch_size)
o = batch['observations']
a = batch['actions']
r = batch['rewards']
context = np.hstack((np.hstack((o, a)), r))
if self._use_next_obs_in_context:
context = np.hstack((context, batch['next_observations']))
if not initialized:
final_context = context[np.newaxis]
initialized = True
else:
final_context = np.vstack((final_context, context[np.newaxis]))
final_context = torch.as_tensor(final_context,
device=global_device()).float()
if len(indices) == 1:
final_context = final_context.unsqueeze(0)
return final_context
def _update_target_network(self):
"""Update parameters in the target vf network."""
for target_param, param in zip(self.target_vf.parameters(),
self._vf.parameters()):
target_param.data.copy_(target_param.data *
(1.0 - self._soft_target_tau) +
param.data * self._soft_target_tau)
@property
def policy(self):
"""Return all the policy within the model.
Returns:
garage.torch.policies.Policy: Policy within the model.
"""
return self._policy
@property
def networks(self):
"""Return all the networks within the model.
Returns:
list: A list of networks.
"""
return self._policy.networks + [self._policy] + [
self._qf1, self._qf2, self._vf, self.target_vf
]
def get_exploration_policy(self):
"""Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
garage.Policy: The policy used to obtain samples that are later
used for meta-RL adaptation.
"""
return self._policy
def adapt_policy(self, exploration_policy, exploration_trajectories):
"""Produce a policy adapted for a task.
Args:
exploration_policy (garage.Policy): A policy which was returned
from get_exploration_policy(), and which generated
exploration_trajectories by interacting with an environment.
The caller may not use this object after passing it into this
method.
exploration_trajectories (garage.TrajectoryBatch): Trajectories to
adapt to, generated by exploration_policy exploring the
environment.
Returns:
garage.Policy: A policy adapted to the task represented by the
exploration_trajectories.
"""
total_steps = sum(exploration_trajectories.lengths)
o = exploration_trajectories.observations
a = exploration_trajectories.actions
r = exploration_trajectories.rewards.reshape(total_steps, 1)
ctxt = np.hstack((o, a, r)).reshape(1, total_steps, -1)
context = torch.as_tensor(ctxt, device=global_device()).float()
self._policy.infer_posterior(context)
return self._policy
def to(self, device=None):
"""Put all the networks within the model on device.
Args:
device (str): ID of GPU or CPU.
"""
device = device or global_device()
for net in self.networks:
net.to(device)
@classmethod
def augment_env_spec(cls, env_spec, latent_dim):
"""Augment environment by a size of latent dimension.
Args:
env_spec (garage.envs.EnvSpec): Environment specs to be augmented.
latent_dim (int): Latent dimension.
Returns:
garage.envs.EnvSpec: Augmented environment specs.
"""
obs_dim = int(np.prod(env_spec.observation_space.shape))
action_dim = int(np.prod(env_spec.action_space.shape))
aug_obs = akro.Box(low=-1,
high=1,
shape=(obs_dim + latent_dim, ),
dtype=np.float32)
aug_act = akro.Box(low=-1,
high=1,
shape=(action_dim, ),
dtype=np.float32)
return EnvSpec(aug_obs, aug_act)
@classmethod
def get_env_spec(cls, env_spec, latent_dim, module):
"""Get environment specs of encoder with latent dimension.
Args:
env_spec (garage.envs.EnvSpec): Environment specs.
latent_dim (int): Latent dimension.
module (str): Module to get environment specs for.
Returns:
garage.envs.InOutSpec: Module environment specs with latent
dimension.
"""
obs_dim = int(np.prod(env_spec.observation_space.shape))
action_dim = int(np.prod(env_spec.action_space.shape))
if module == 'encoder':
in_dim = obs_dim + action_dim + 1
out_dim = latent_dim * 2
elif module == 'vf':
in_dim = obs_dim
out_dim = latent_dim
in_space = akro.Box(low=-1, high=1, shape=(in_dim, ), dtype=np.float32)
out_space = akro.Box(low=-1,
high=1,
shape=(out_dim, ),
dtype=np.float32)
if module == 'encoder':
spec = InOutSpec(in_space, out_space)
elif module == 'vf':
spec = EnvSpec(in_space, out_space)
return spec
class PEARLWorker(DefaultWorker):
"""A worker class used in sampling for PEARL.
It stores context and resample belief in the policy every step.
Args:
seed(int): The seed to use to intialize random number generators.
max_path_length(int or float): The maximum length paths which will
be sampled. Can be (floating point) infinity.
worker_number(int): The number of the worker where this update is
occurring. This argument is used to set a different seed for each
worker.
deterministic(bool): If true, use the mean action returned by the
stochastic policy instead of sampling from the returned action
distribution.
accum_context(bool): If true, update context of the agent.
Attributes:
agent(Policy or None): The worker's agent.
env(gym.Env or None): The worker's environment.
"""
def __init__(self,
*,
seed,
max_path_length,
worker_number,
deterministic=False,
accum_context=False):
self._deterministic = deterministic
self._accum_context = accum_context
super().__init__(seed=seed,
max_path_length=max_path_length,
worker_number=worker_number)
def start_rollout(self):
"""Begin a new rollout."""
self._path_length = 0
self._prev_obs = self.env.reset()
def step_rollout(self):
"""Take a single time-step in the current rollout.
Returns:
bool: True iff the path is done, either due to the environment
indicating termination of due to reaching `max_path_length`.
"""
if self._path_length < self._max_path_length:
a, agent_info = self.agent.get_action(self._prev_obs)
if self._deterministic:
a = agent_info['mean']
next_o, r, d, env_info = self.env.step(a)
self._observations.append(self._prev_obs)
self._rewards.append(r)
self._actions.append(a)
for k, v in agent_info.items():
self._agent_infos[k].append(v)
for k, v in env_info.items():
self._env_infos[k].append(v)
self._path_length += 1
self._terminals.append(d)
if self._accum_context:
s = TimeStep(env_spec=self.env,
observation=self._prev_obs,
next_observation=next_o,
action=a,
reward=float(r),
terminal=d,
env_info=env_info,
agent_info=agent_info)
self.agent.update_context(s)
if not d:
self._prev_obs = next_o
return False
self._lengths.append(self._path_length)
self._last_observations.append(self._prev_obs)
return True
def rollout(self):
"""Sample a single rollout of the agent in the environment.
Returns:
garage.TrajectoryBatch: The collected trajectory.
"""
self.agent.sample_from_belief()
self.start_rollout()
while not self.step_rollout():
pass
self._agent_infos['context'] = [self.agent.z.detach().cpu().numpy()
] * self._max_path_length
return self.collect_rollout()
| 30,030 | 38.052016 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/ppo.py | """Proximal Policy Optimization (PPO)."""
import torch
from garage.torch.algos import VPG
from garage.torch.optimizers import OptimizerWrapper
class PPO(VPG):
"""Proximal Policy Optimization (PPO).
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
max_path_length (int): Maximum length of a single rollout.
lr_clip_range (float): The limit on the likelihood ratio between
policies.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
"""
def __init__(self,
env_spec,
policy,
value_function,
policy_optimizer=None,
vf_optimizer=None,
max_path_length=500,
lr_clip_range=2e-1,
num_train_per_epoch=1,
discount=0.99,
gae_lambda=0.97,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy'):
if policy_optimizer is None:
policy_optimizer = OptimizerWrapper(
(torch.optim.Adam, dict(lr=2.5e-4)),
policy,
max_optimization_epochs=10,
minibatch_size=64)
if vf_optimizer is None:
vf_optimizer = OptimizerWrapper(
(torch.optim.Adam, dict(lr=2.5e-4)),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
super().__init__(env_spec=env_spec,
policy=policy,
value_function=value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
max_path_length=max_path_length,
num_train_per_epoch=num_train_per_epoch,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method)
self._lr_clip_range = lr_clip_range
def _compute_objective(self, advantages, obs, actions, rewards):
r"""Compute objective value.
Args:
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated objective values
with shape :math:`(N \dot [T], )`.
"""
# Compute constraint
with torch.no_grad():
old_ll = self._old_policy(obs)[0].log_prob(actions)
new_ll = self.policy(obs)[0].log_prob(actions)
likelihood_ratio = (new_ll - old_ll).exp()
# Calculate surrogate
surrogate = likelihood_ratio * advantages
# Clipping the constraint
likelihood_ratio_clip = torch.clamp(likelihood_ratio,
min=1 - self._lr_clip_range,
max=1 + self._lr_clip_range)
# Calculate surrotate clip
surrogate_clip = likelihood_ratio_clip * advantages
return torch.min(surrogate, surrogate_clip)
| 5,484 | 40.240602 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/sac.py | """This modules creates a sac model in PyTorch."""
from collections import deque
import copy
from dowel import tabular
import numpy as np
import torch
import torch.nn.functional as F
from garage import log_performance
from garage.np import obtain_evaluation_samples
from garage.np.algos import RLAlgorithm
from garage.sampler import OffPolicyVectorizedSampler
from garage.torch import dict_np_to_torch, global_device
class SAC(RLAlgorithm):
"""A SAC Model in Torch.
Based on Soft Actor-Critic and Applications:
https://arxiv.org/abs/1812.05905
Soft Actor-Critic (SAC) is an algorithm which optimizes a stochastic
policy in an off-policy way, forming a bridge between stochastic policy
optimization and DDPG-style approaches.
A central feature of SAC is entropy regularization. The policy is trained
to maximize a trade-off between expected return and entropy, a measure of
randomness in the policy. This has a close connection to the
exploration-exploitation trade-off: increasing entropy results in more
exploration, which can accelerate learning later on. It can also prevent
the policy from prematurely converging to a bad local optimum.
Args:
policy (garage.torch.policy.Policy): Policy/Actor/Agent that is being
optimized by SAC.
qf1 (garage.torch.q_function.ContinuousMLPQFunction): QFunction/Critic
used for actor/policy optimization. See Soft Actor-Critic and
Applications.
qf2 (garage.torch.q_function.ContinuousMLPQFunction): QFunction/Critic
used for actor/policy optimization. See Soft Actor-Critic and
Applications.
replay_buffer (garage.replay_buffer.ReplayBuffer): Stores transitions
that are previously collected by the sampler.
env_spec (garage.envs.env_spec.EnvSpec): The env_spec attribute of the
environment that the agent is being trained in. Usually accessable
by calling env.spec.
max_path_length (int): Max path length of the environment.
max_eval_path_length (int or None): Maximum length of paths used for
off-policy evaluation. If None, defaults to `max_path_length`.
gradient_steps_per_itr (int): Number of optimization steps that should
gradient_steps_per_itr(int): Number of optimization steps that should
occur before the training step is over and a new batch of
transitions is collected by the sampler.
fixed_alpha (float): The entropy/temperature to be used if temperature
is not supposed to be learned.
target_entropy (float): target entropy to be used during
entropy/temperature optimization. If None, the default heuristic
from Soft Actor-Critic Algorithms and Applications is used.
initial_log_entropy (float): initial entropy/temperature coefficient
to be used if a fixed_alpha is not being used (fixed_alpha=None),
and the entropy/temperature coefficient is being learned.
discount (float): Discount factor to be used during sampling and
critic/q_function optimization.
buffer_batch_size (int): The number of transitions sampled from the
replay buffer that are used during a single optimization step.
min_buffer_size (int): The minimum number of transitions that need to
be in the replay buffer before training can begin.
target_update_tau (float): coefficient that controls the rate at which
the target q_functions update over optimization iterations.
policy_lr (float): learning rate for policy optimizers.
qf_lr (float): learning rate for q_function optimizers.
reward_scale (float): reward scale. Changing this hyperparameter
changes the effect that the reward from a transition will have
during optimization.
optimizer (torch.optim.Optimizer): optimizer to be used for
policy/actor, q_functions/critics, and temperature/entropy
optimizations.
steps_per_epoch (int): Number of train_once calls per epoch.
num_evaluation_trajectories (int): The number of evaluation
trajectories used for computing eval stats at the end of every
epoch.
eval_env (garage.envs.GarageEnv): environment used for collecting
evaluation trajectories. If None, a copy of the train env is used.
"""
def __init__(
self,
env_spec,
policy,
qf1,
qf2,
replay_buffer,
*, # Everything after this is numbers.
max_path_length,
max_eval_path_length=None,
gradient_steps_per_itr,
fixed_alpha=None,
target_entropy=None,
initial_log_entropy=0.,
discount=0.99,
buffer_batch_size=64,
min_buffer_size=int(1e4),
target_update_tau=5e-3,
policy_lr=3e-4,
qf_lr=3e-4,
reward_scale=1.0,
optimizer=torch.optim.Adam,
steps_per_epoch=1,
num_evaluation_trajectories=10,
eval_env=None):
self._qf1 = qf1
self._qf2 = qf2
self.replay_buffer = replay_buffer
self._tau = target_update_tau
self._policy_lr = policy_lr
self._qf_lr = qf_lr
self._initial_log_entropy = initial_log_entropy
self._gradient_steps = gradient_steps_per_itr
self._optimizer = optimizer
self._num_evaluation_trajectories = num_evaluation_trajectories
self._eval_env = eval_env
self._min_buffer_size = min_buffer_size
self._steps_per_epoch = steps_per_epoch
self._buffer_batch_size = buffer_batch_size
self._discount = discount
self._reward_scale = reward_scale
self.max_path_length = max_path_length
self._max_eval_path_length = (max_eval_path_length or max_path_length)
# used by OffPolicyVectorizedSampler
self.policy = policy
self.env_spec = env_spec
self.replay_buffer = replay_buffer
self.exploration_policy = None
self.sampler_cls = OffPolicyVectorizedSampler
self._reward_scale = reward_scale
# use 2 target q networks
self._target_qf1 = copy.deepcopy(self._qf1)
self._target_qf2 = copy.deepcopy(self._qf2)
self._policy_optimizer = self._optimizer(self.policy.parameters(),
lr=self._policy_lr)
self._qf1_optimizer = self._optimizer(self._qf1.parameters(),
lr=self._qf_lr)
self._qf2_optimizer = self._optimizer(self._qf2.parameters(),
lr=self._qf_lr)
# automatic entropy coefficient tuning
self._use_automatic_entropy_tuning = fixed_alpha is None
self._fixed_alpha = fixed_alpha
if self._use_automatic_entropy_tuning:
if target_entropy:
self._target_entropy = target_entropy
else:
self._target_entropy = -np.prod(
self.env_spec.action_space.shape).item()
self._log_alpha = torch.Tensor([self._initial_log_entropy
]).requires_grad_()
self._alpha_optimizer = optimizer([self._log_alpha],
lr=self._policy_lr)
else:
self._log_alpha = torch.Tensor([self._fixed_alpha]).log()
self.episode_rewards = deque(maxlen=30)
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
if not self._eval_env:
self._eval_env = runner.get_env_copy()
last_return = None
for _ in runner.step_epochs():
for _ in range(self._steps_per_epoch):
if not (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
batch_size = int(self._min_buffer_size)
else:
batch_size = None
runner.step_path = runner.obtain_samples(
runner.step_itr, batch_size)
path_returns = []
for path in runner.step_path:
self.replay_buffer.add_path(
dict(observation=path['observations'],
action=path['actions'],
reward=path['rewards'].reshape(-1, 1),
next_observation=path['next_observations'],
terminal=path['dones'].reshape(-1, 1)))
path_returns.append(sum(path['rewards']))
assert len(path_returns) == len(runner.step_path)
self.episode_rewards.append(np.mean(path_returns))
for _ in range(self._gradient_steps):
policy_loss, qf1_loss, qf2_loss = self.train_once()
last_return = self._evaluate_policy(runner.step_itr)
self._log_statistics(policy_loss, qf1_loss, qf2_loss)
tabular.record('TotalEnvSteps', runner.total_env_steps)
runner.step_itr += 1
return np.mean(last_return)
def train_once(self, itr=None, paths=None):
"""Complete 1 training iteration of SAC.
Args:
itr (int): Iteration number. This argument is deprecated.
paths (list[dict]): A list of collected paths.
This argument is deprecated.
Returns:
torch.Tensor: loss from actor/policy network after optimization.
torch.Tensor: loss from 1st q-function after optimization.
torch.Tensor: loss from 2nd q-function after optimization.
"""
del itr
del paths
if self.replay_buffer.n_transitions_stored >= self._min_buffer_size:
samples = self.replay_buffer.sample_transitions(
self._buffer_batch_size)
samples = dict_np_to_torch(samples)
policy_loss, qf1_loss, qf2_loss = self.optimize_policy(samples)
self._update_targets()
return policy_loss, qf1_loss, qf2_loss
def _get_log_alpha(self, samples_data):
"""Return the value of log_alpha.
Args:
samples_data (dict): Transitions(S,A,R,S') that are sampled from
the replay buffer. It should have the keys 'observation',
'action', 'reward', 'terminal', and 'next_observations'.
This function exists in case there are versions of sac that need
access to a modified log_alpha, such as multi_task sac.
Note:
samples_data's entries should be torch.Tensor's with the following
shapes:
observation: :math:`(N, O^*)`
action: :math:`(N, A^*)`
reward: :math:`(N, 1)`
terminal: :math:`(N, 1)`
next_observation: :math:`(N, O^*)`
Returns:
torch.Tensor: log_alpha
"""
del samples_data
log_alpha = self._log_alpha
return log_alpha
def _temperature_objective(self, log_pi, samples_data):
"""Compute the temperature/alpha coefficient loss.
Args:
log_pi(torch.Tensor): log probability of actions that are sampled
from the replay buffer. Shape is (1, buffer_batch_size).
samples_data (dict): Transitions(S,A,R,S') that are sampled from
the replay buffer. It should have the keys 'observation',
'action', 'reward', 'terminal', and 'next_observations'.
Note:
samples_data's entries should be torch.Tensor's with the following
shapes:
observation: :math:`(N, O^*)`
action: :math:`(N, A^*)`
reward: :math:`(N, 1)`
terminal: :math:`(N, 1)`
next_observation: :math:`(N, O^*)`
Returns:
torch.Tensor: the temperature/alpha coefficient loss.
"""
alpha_loss = 0
if self._use_automatic_entropy_tuning:
alpha_loss = (-(self._get_log_alpha(samples_data)) *
(log_pi.detach() + self._target_entropy)).mean()
return alpha_loss
def _actor_objective(self, samples_data, new_actions, log_pi_new_actions):
"""Compute the Policy/Actor loss.
Args:
samples_data (dict): Transitions(S,A,R,S') that are sampled from
the replay buffer. It should have the keys 'observation',
'action', 'reward', 'terminal', and 'next_observations'.
new_actions(torch.Tensor): Actions resampled from the policy based
based on the Observations, obs, which were sampled from the
replay buffer. Shape is (action_dim, buffer_batch_size).
log_pi_new_actions(torch.Tensor): Log probability of the new
actions on the TanhNormal distributions that they were sampled
from. Shape is (1, buffer_batch_size).
Note:
samples_data's entries should be torch.Tensor's with the following
shapes:
observation: :math:`(N, O^*)`
action: :math:`(N, A^*)`
reward: :math:`(N, 1)`
terminal: :math:`(N, 1)`
next_observation: :math:`(N, O^*)`
Returns:
torch.Tensor: loss from the Policy/Actor.
"""
obs = samples_data['observation']
with torch.no_grad():
alpha = self._get_log_alpha(samples_data).exp()
min_q_new_actions = torch.min(self._qf1(obs, new_actions),
self._qf2(obs, new_actions))
policy_objective = ((alpha * log_pi_new_actions) -
min_q_new_actions.flatten()).mean()
return policy_objective
def _critic_objective(self, samples_data):
"""Compute the Q-function/critic loss.
Args:
samples_data (dict): Transitions(S,A,R,S') that are sampled from
the replay buffer. It should have the keys 'observation',
'action', 'reward', 'terminal', and 'next_observations'.
Note:
samples_data's entries should be torch.Tensor's with the following
shapes:
observation: :math:`(N, O^*)`
action: :math:`(N, A^*)`
reward: :math:`(N, 1)`
terminal: :math:`(N, 1)`
next_observation: :math:`(N, O^*)`
Returns:
torch.Tensor: loss from 1st q-function after optimization.
torch.Tensor: loss from 2nd q-function after optimization.
"""
obs = samples_data['observation']
actions = samples_data['action']
rewards = samples_data['reward'].flatten()
terminals = samples_data['terminal'].flatten()
next_obs = samples_data['next_observation']
with torch.no_grad():
alpha = self._get_log_alpha(samples_data).exp()
q1_pred = self._qf1(obs, actions)
q2_pred = self._qf2(obs, actions)
new_next_actions_dist = self.policy(next_obs)[0]
new_next_actions_pre_tanh, new_next_actions = (
new_next_actions_dist.rsample_with_pre_tanh_value())
new_log_pi = new_next_actions_dist.log_prob(
value=new_next_actions, pre_tanh_value=new_next_actions_pre_tanh)
target_q_values = torch.min(
self._target_qf1(next_obs, new_next_actions),
self._target_qf2(
next_obs, new_next_actions)).flatten() - (alpha * new_log_pi)
with torch.no_grad():
q_target = rewards * self._reward_scale + (
1. - terminals) * self._discount * target_q_values
qf1_loss = F.mse_loss(q1_pred.flatten(), q_target)
qf2_loss = F.mse_loss(q2_pred.flatten(), q_target)
return qf1_loss, qf2_loss
def _update_targets(self):
"""Update parameters in the target q-functions."""
target_qfs = [self._target_qf1, self._target_qf2]
qfs = [self._qf1, self._qf2]
for target_qf, qf in zip(target_qfs, qfs):
for t_param, param in zip(target_qf.parameters(), qf.parameters()):
t_param.data.copy_(t_param.data * (1.0 - self._tau) +
param.data * self._tau)
def optimize_policy(self, samples_data):
"""Optimize the policy q_functions, and temperature coefficient.
Args:
samples_data (dict): Transitions(S,A,R,S') that are sampled from
the replay buffer. It should have the keys 'observation',
'action', 'reward', 'terminal', and 'next_observations'.
Note:
samples_data's entries should be torch.Tensor's with the following
shapes:
observation: :math:`(N, O^*)`
action: :math:`(N, A^*)`
reward: :math:`(N, 1)`
terminal: :math:`(N, 1)`
next_observation: :math:`(N, O^*)`
Returns:
torch.Tensor: loss from actor/policy network after optimization.
torch.Tensor: loss from 1st q-function after optimization.
torch.Tensor: loss from 2nd q-function after optimization.
"""
obs = samples_data['observation']
qf1_loss, qf2_loss = self._critic_objective(samples_data)
self._qf1_optimizer.zero_grad()
qf1_loss.backward()
self._qf1_optimizer.step()
self._qf2_optimizer.zero_grad()
qf2_loss.backward()
self._qf2_optimizer.step()
action_dists = self.policy(obs)[0]
new_actions_pre_tanh, new_actions = (
action_dists.rsample_with_pre_tanh_value())
log_pi_new_actions = action_dists.log_prob(
value=new_actions, pre_tanh_value=new_actions_pre_tanh)
policy_loss = self._actor_objective(samples_data, new_actions,
log_pi_new_actions)
self._policy_optimizer.zero_grad()
policy_loss.backward()
self._policy_optimizer.step()
if self._use_automatic_entropy_tuning:
alpha_loss = self._temperature_objective(log_pi_new_actions,
samples_data)
self._alpha_optimizer.zero_grad()
alpha_loss.backward()
self._alpha_optimizer.step()
return policy_loss, qf1_loss, qf2_loss
def _evaluate_policy(self, epoch):
"""Evaluate the performance of the policy via deterministic rollouts.
Statistics such as (average) discounted return and success rate are
recorded.
Args:
epoch(int): The current training epoch.
Returns:
float: The average return across self._num_evaluation_trajectories
trajectories
"""
eval_trajectories = obtain_evaluation_samples(
self.policy,
self._eval_env,
max_path_length=self._max_eval_path_length,
num_trajs=self._num_evaluation_trajectories)
last_return = log_performance(epoch,
eval_trajectories,
discount=self._discount)
return last_return
def _log_statistics(self, policy_loss, qf1_loss, qf2_loss):
"""Record training statistics to dowel such as losses and returns.
Args:
policy_loss(torch.Tensor): loss from actor/policy network.
qf1_loss(torch.Tensor): loss from 1st qf/critic network.
qf2_loss(torch.Tensor): loss from 2nd qf/critic network.
"""
with torch.no_grad():
tabular.record('AlphaTemperature/mean',
self._log_alpha.exp().mean().item())
tabular.record('Policy/Loss', policy_loss.item())
tabular.record('QF/{}'.format('Qf1Loss'), float(qf1_loss))
tabular.record('QF/{}'.format('Qf2Loss'), float(qf2_loss))
tabular.record('ReplayBuffer/buffer_size',
self.replay_buffer.n_transitions_stored)
tabular.record('Average/TrainAverageReturn',
np.mean(self.episode_rewards))
@property
def networks(self):
"""Return all the networks within the model.
Returns:
list: A list of networks.
"""
return [
self.policy, self._qf1, self._qf2, self._target_qf1,
self._target_qf2
]
def to(self, device=None):
"""Put all the networks within the model on device.
Args:
device (str): ID of GPU or CPU.
"""
if device is None:
device = global_device()
for net in self.networks:
net.to(device)
if not self._use_automatic_entropy_tuning:
self._log_alpha = torch.Tensor([self._fixed_alpha
]).log().to(device)
else:
self._log_alpha = torch.Tensor([self._initial_log_entropy
]).to(device).requires_grad_()
self._alpha_optimizer = self._optimizer([self._log_alpha],
lr=self._policy_lr)
| 21,875 | 40.908046 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/trpo.py | """Trust Region Policy Optimization."""
import torch
from garage.torch.algos import VPG
from garage.torch.optimizers import ConjugateGradientOptimizer
from garage.torch.optimizers import OptimizerWrapper
class TRPO(VPG):
"""Trust Region Policy Optimization (TRPO).
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
max_path_length (int): Maximum length of a single rollout.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
"""
def __init__(self,
env_spec,
policy,
value_function,
policy_optimizer=None,
vf_optimizer=None,
max_path_length=100,
num_train_per_epoch=1,
discount=0.99,
gae_lambda=0.98,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy'):
if policy_optimizer is None:
policy_optimizer = OptimizerWrapper(
(ConjugateGradientOptimizer, dict(max_constraint_value=0.01)),
policy)
if vf_optimizer is None:
vf_optimizer = OptimizerWrapper(
(torch.optim.Adam, dict(lr=2.5e-4)),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
super().__init__(env_spec=env_spec,
policy=policy,
value_function=value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
max_path_length=max_path_length,
num_train_per_epoch=num_train_per_epoch,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method)
def _compute_objective(self, advantages, obs, actions, rewards):
r"""Compute objective value.
Args:
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated objective values
with shape :math:`(N \dot [T], )`.
"""
with torch.no_grad():
old_ll = self._old_policy(obs)[0].log_prob(actions)
new_ll = self.policy(obs)[0].log_prob(actions)
likelihood_ratio = (new_ll - old_ll).exp()
# Calculate surrogate
surrogate = likelihood_ratio * advantages
return surrogate
def _train_policy(self, obs, actions, rewards, advantages):
r"""Train the policy.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N, A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, )`.
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated mean scalar value of policy loss (float).
"""
self._policy_optimizer.zero_grad()
loss = self._compute_loss_with_adv(obs, actions, rewards, advantages)
loss.backward()
self._policy_optimizer.step(
f_loss=lambda: self._compute_loss_with_adv(obs, actions, rewards,
advantages),
f_constraint=lambda: self._compute_kl_constraint(obs))
return loss
| 5,987 | 40.296552 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/algos/vpg.py | """Vanilla Policy Gradient (REINFORCE)."""
import collections
import copy
from dowel import tabular
import numpy as np
import torch
import torch.nn.functional as F
from garage import log_performance, TrajectoryBatch
from garage.misc import tensor_utils as tu
from garage.np.algos.rl_algorithm import RLAlgorithm
from garage.sampler import OnPolicyVectorizedSampler
from garage.torch import (compute_advantages, filter_valids, pad_to_last)
from garage.torch.optimizers import OptimizerWrapper
class VPG(RLAlgorithm):
"""Vanilla Policy Gradient (REINFORCE).
VPG, also known as Reinforce, trains stochastic policy in an on-policy way.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.torch.policies.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
max_path_length (int): Maximum length of a single rollout.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
"""
def __init__(
self,
env_spec,
policy,
value_function,
policy_optimizer=None,
vf_optimizer=None,
max_path_length=500,
num_train_per_epoch=1,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
):
self.discount = discount
self.policy = policy
self.max_path_length = max_path_length
self._value_function = value_function
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._policy_ent_coeff = policy_ent_coeff
self._use_softplus_entropy = use_softplus_entropy
self._stop_entropy_gradient = stop_entropy_gradient
self._entropy_method = entropy_method
self._n_samples = num_train_per_epoch
self._env_spec = env_spec
self._maximum_entropy = (entropy_method == 'max')
self._entropy_regularzied = (entropy_method == 'regularized')
self._check_entropy_configuration(entropy_method, center_adv,
stop_entropy_gradient,
policy_ent_coeff)
self._episode_reward_mean = collections.deque(maxlen=100)
self.sampler_cls = OnPolicyVectorizedSampler
if policy_optimizer:
self._policy_optimizer = policy_optimizer
else:
self._policy_optimizer = OptimizerWrapper(torch.optim.Adam, policy)
if vf_optimizer:
self._vf_optimizer = vf_optimizer
else:
self._vf_optimizer = OptimizerWrapper(torch.optim.Adam,
value_function)
self._old_policy = copy.deepcopy(self.policy)
@staticmethod
def _check_entropy_configuration(entropy_method, center_adv,
stop_entropy_gradient, policy_ent_coeff):
if entropy_method not in ('max', 'regularized', 'no_entropy'):
raise ValueError('Invalid entropy_method')
if entropy_method == 'max':
if center_adv:
raise ValueError('center_adv should be False when '
'entropy_method is max')
if not stop_entropy_gradient:
raise ValueError('stop_gradient should be True when '
'entropy_method is max')
if entropy_method == 'no_entropy':
if policy_ent_coeff != 0.0:
raise ValueError('policy_ent_coeff should be zero '
'when there is no entropy method')
def train_once(self, itr, paths):
"""Train the algorithm once.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Calculated mean value of undiscounted returns.
"""
obs, actions, rewards, returns, valids, baselines = \
self.process_samples(paths)
if self._maximum_entropy:
policy_entropies = self._compute_policy_entropy(obs)
rewards += self._policy_ent_coeff * policy_entropies
obs_flat = torch.cat(filter_valids(obs, valids))
actions_flat = torch.cat(filter_valids(actions, valids))
rewards_flat = torch.cat(filter_valids(rewards, valids))
returns_flat = torch.cat(filter_valids(returns, valids))
advs_flat = self._compute_advantage(rewards, valids, baselines)
with torch.no_grad():
policy_loss_before = self._compute_loss_with_adv(
obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_before = self._value_function.compute_loss(
obs_flat, returns_flat)
kl_before = self._compute_kl_constraint(obs)
self._train(obs_flat, actions_flat, rewards_flat, returns_flat,
advs_flat)
with torch.no_grad():
policy_loss_after = self._compute_loss_with_adv(
obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_after = self._value_function.compute_loss(
obs_flat, returns_flat)
kl_after = self._compute_kl_constraint(obs)
policy_entropy = self._compute_policy_entropy(obs)
with tabular.prefix(self.policy.name):
tabular.record('/LossBefore', policy_loss_before.item())
tabular.record('/LossAfter', policy_loss_after.item())
tabular.record('/dLoss',
(policy_loss_before - policy_loss_after).item())
tabular.record('/KLBefore', kl_before.item())
tabular.record('/KL', kl_after.item())
tabular.record('/Entropy', policy_entropy.mean().item())
with tabular.prefix(self._value_function.name):
tabular.record('/LossBefore', vf_loss_before.item())
tabular.record('/LossAfter', vf_loss_after.item())
tabular.record('/dLoss',
vf_loss_before.item() - vf_loss_after.item())
self._old_policy.load_state_dict(self.policy.state_dict())
undiscounted_returns = log_performance(
itr,
TrajectoryBatch.from_trajectory_list(self._env_spec, paths),
discount=self.discount)
return np.mean(undiscounted_returns)
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in runner.step_epochs():
for _ in range(self._n_samples):
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr,
runner.step_path)
runner.step_itr += 1
return last_return
def _train(self, obs, actions, rewards, returns, advs):
r"""Train the policy and value function with minibatch.
Args:
obs (torch.Tensor): Observation from the environment with shape
:math:`(N, O*)`.
actions (torch.Tensor): Actions fed to the environment with shape
:math:`(N, A*)`.
rewards (torch.Tensor): Acquired rewards with shape :math:`(N, )`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
advs (torch.Tensor): Advantage value at each step with shape
:math:`(N, )`.
"""
for dataset in self._policy_optimizer.get_minibatch(
obs, actions, rewards, advs):
self._train_policy(*dataset)
for dataset in self._vf_optimizer.get_minibatch(obs, returns):
self._train_value_function(*dataset)
def _train_policy(self, obs, actions, rewards, advantages):
r"""Train the policy.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N, A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, )`.
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated mean scalar value of policy loss (float).
"""
self._policy_optimizer.zero_grad()
loss = self._compute_loss_with_adv(obs, actions, rewards, advantages)
loss.backward()
self._policy_optimizer.step()
return loss
def _train_value_function(self, obs, returns):
r"""Train the value function.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, O*)`.
returns (torch.Tensor): Acquired returns
with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated mean scalar value of value function loss
(float).
"""
self._vf_optimizer.zero_grad()
loss = self._value_function.compute_loss(obs, returns)
loss.backward()
self._vf_optimizer.step()
return loss
def _compute_loss(self, obs, actions, rewards, valids, baselines):
r"""Compute mean value of loss.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N, P, A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, P)`.
valids (list[int]): Numbers of valid steps in each paths
baselines (torch.Tensor): Value function estimation at each step
with shape :math:`(N, P)`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
"""
obs_flat = torch.cat(filter_valids(obs, valids))
actions_flat = torch.cat(filter_valids(actions, valids))
rewards_flat = torch.cat(filter_valids(rewards, valids))
advantages_flat = self._compute_advantage(rewards, valids, baselines)
return self._compute_loss_with_adv(obs_flat, actions_flat,
rewards_flat, advantages_flat)
def _compute_loss_with_adv(self, obs, actions, rewards, advantages):
r"""Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of objective.
"""
objectives = self._compute_objective(advantages, obs, actions, rewards)
if self._entropy_regularzied:
policy_entropies = self._compute_policy_entropy(obs)
objectives += self._policy_ent_coeff * policy_entropies
return -objectives.mean()
def _compute_advantage(self, rewards, valids, baselines):
r"""Compute mean value of loss.
Notes: P is the maximum path length (self.max_path_length)
Args:
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, P)`.
valids (list[int]): Numbers of valid steps in each paths
baselines (torch.Tensor): Value function estimation at each step
with shape :math:`(N, P)`.
Returns:
torch.Tensor: Calculated advantage values given rewards and
baselines with shape :math:`(N \dot [T], )`.
"""
advantages = compute_advantages(self.discount, self._gae_lambda,
self.max_path_length, baselines,
rewards)
advantage_flat = torch.cat(filter_valids(advantages, valids))
if self._center_adv:
means = advantage_flat.mean()
variance = advantage_flat.var()
advantage_flat = (advantage_flat - means) / (variance + 1e-8)
if self._positive_adv:
advantage_flat -= advantage_flat.min()
return advantage_flat
def _compute_kl_constraint(self, obs):
r"""Compute KL divergence.
Compute the KL divergence between the old policy distribution and
current policy distribution.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
Returns:
torch.Tensor: Calculated mean scalar value of KL divergence
(float).
"""
with torch.no_grad():
old_dist = self._old_policy(obs)[0]
new_dist = self.policy(obs)[0]
kl_constraint = torch.distributions.kl.kl_divergence(
old_dist, new_dist)
return kl_constraint.mean()
def _compute_policy_entropy(self, obs):
r"""Compute entropy value of probability distribution.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
Returns:
torch.Tensor: Calculated entropy values given observation
with shape :math:`(N, P)`.
"""
if self._stop_entropy_gradient:
with torch.no_grad():
policy_entropy = self.policy(obs)[0].entropy()
else:
policy_entropy = self.policy(obs)[0].entropy()
# This prevents entropy from becoming negative for small policy std
if self._use_softplus_entropy:
policy_entropy = F.softplus(policy_entropy)
return policy_entropy
def _compute_objective(self, advantages, obs, actions, rewards):
r"""Compute objective value.
Args:
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated objective values
with shape :math:`(N \dot [T], )`.
"""
del rewards
log_likelihoods = self.policy(obs)[0].log_prob(actions)
return log_likelihoods * advantages
def process_samples(self, paths):
r"""Process sample data based on the collected paths.
Notes: P is the maximum path length (self.max_path_length)
Args:
paths (list[dict]): A list of collected paths
Returns:
torch.Tensor: The observations of the environment
with shape :math:`(N, P, O*)`.
torch.Tensor: The actions fed to the environment
with shape :math:`(N, P, A*)`.
torch.Tensor: The acquired rewards with shape :math:`(N, P)`.
list[int]: Numbers of valid steps in each paths.
torch.Tensor: Value function estimation at each step
with shape :math:`(N, P)`.
"""
valids = torch.Tensor([len(path['actions']) for path in paths]).int()
obs = torch.stack([
pad_to_last(path['observations'],
total_length=self.max_path_length,
axis=0) for path in paths
])
actions = torch.stack([
pad_to_last(path['actions'],
total_length=self.max_path_length,
axis=0) for path in paths
])
rewards = torch.stack([
pad_to_last(path['rewards'], total_length=self.max_path_length)
for path in paths
])
returns = torch.stack([
pad_to_last(tu.discount_cumsum(path['rewards'],
self.discount).copy(),
total_length=self.max_path_length) for path in paths
])
with torch.no_grad():
baselines = self._value_function(obs)
return obs, actions, rewards, returns, valids, baselines
| 18,850 | 37.788066 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/distributions/__init__.py | """PyTorch Custom Distributions."""
from garage.torch.distributions.tanh_normal import TanhNormal
__all__ = ['TanhNormal']
| 124 | 24 | 61 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/distributions/tanh_normal.py | """A Gaussian distribution with tanh transformation."""
import torch
from torch.distributions import Normal
from torch.distributions.independent import Independent
class TanhNormal(torch.distributions.Distribution):
r"""A distribution induced by applying a tanh transformation to a Gaussian random variable.
Algorithms like SAC and Pearl use this transformed distribution.
It can be thought of as a distribution of X where
:math:`Y ~ \mathcal{N}(\mu, \sigma)`
:math:`X = tanh(Y)`
Args:
loc (torch.Tensor): The mean of this distribution.
scale (torch.Tensor): The stdev of this distribution.
""" # noqa: 501
def __init__(self, loc, scale):
self._normal = Independent(Normal(loc, scale), 1)
super().__init__(batch_shape=self._normal.batch_shape,
event_shape=self._normal.event_shape)
def log_prob(self, value, pre_tanh_value=None, epsilon=1e-6):
"""The log likelihood of a sample on the this Tanh Distribution.
Args:
value (torch.Tensor): The sample whose loglikelihood is being
computed.
pre_tanh_value (torch.Tensor): The value prior to having the tanh
function applied to it but after it has been sampled from the
normal distribution.
epsilon (float): Regularization constant. Making this value larger
makes the computation more stable but less precise.
Note:
when pre_tanh_value is None, an estimate is made of what the
value is. This leads to a worse estimation of the log_prob.
If the value being used is collected from functions like
`sample` and `rsample`, one can instead use functions like
`sample_return_pre_tanh_value` or
`rsample_return_pre_tanh_value`
Returns:
torch.Tensor: The log likelihood of value on the distribution.
"""
# pylint: disable=arguments-differ
if pre_tanh_value is None:
# Fix in order to TanhNormal.log_prob(1.0) != inf
pre_tanh_value = torch.log((1 + epsilon + value) / (1 + epsilon - value)) / 2
norm_lp = self._normal.log_prob(pre_tanh_value)
ret = (norm_lp - torch.sum(
torch.log(self._clip_but_pass_gradient((1. - value**2)) + epsilon),
axis=-1))
return ret
def sample(self, sample_shape=torch.Size()):
"""Return a sample, sampled from this TanhNormal Distribution.
Args:
sample_shape (list): Shape of the returned value.
Note:
Gradients `do not` pass through this operation.
Returns:
torch.Tensor: Sample from this TanhNormal distribution.
"""
with torch.no_grad():
return self.rsample(sample_shape=sample_shape)
def rsample(self, sample_shape=torch.Size()):
"""Return a sample, sampled from this TanhNormal Distribution.
Args:
sample_shape (list): Shape of the returned value.
Note:
Gradients pass through this operation.
Returns:
torch.Tensor: Sample from this TanhNormal distribution.
"""
z = self._normal.rsample(sample_shape)
return torch.tanh(z)
def rsample_with_pre_tanh_value(self, sample_shape=torch.Size()):
"""Return a sample, sampled from this TanhNormal distribution.
Returns the sampled value before the tanh transform is applied and the
sampled value with the tanh transform applied to it.
Args:
sample_shape (list): shape of the return.
Note:
Gradients pass through this operation.
Returns:
torch.Tensor: Samples from this distribution.
torch.Tensor: Samples from the underlying
:obj:`torch.distributions.Normal` distribution, prior to being
transformed with `tanh`.
"""
z = self._normal.rsample(sample_shape)
return z, torch.tanh(z)
def cdf(self, value):
"""Returns the CDF at the value.
Returns the cumulative density/mass function evaluated at
`value` on the underlying normal distribution.
Args:
value (torch.Tensor): The element where the cdf is being evaluated
at.
Returns:
torch.Tensor: the result of the cdf being computed.
"""
return self._normal.cdf(value)
def icdf(self, value):
"""Returns the icdf function evaluated at `value`.
Returns the icdf function evaluated at `value` on the underlying
normal distribution.
Args:
value (torch.Tensor): The element where the cdf is being evaluated
at.
Returns:
torch.Tensor: the result of the cdf being computed.
"""
return self._normal.icdf(value)
@classmethod
def _from_distribution(cls, new_normal):
"""Construct a new TanhNormal distribution from a normal distribution.
Args:
new_normal (Independent(Normal)): underlying normal dist for
the new TanhNormal distribution.
Returns:
TanhNormal: A new distribution whose underlying normal dist
is new_normal.
"""
# pylint: disable=protected-access
new = cls(torch.zeros(1), torch.zeros(1))
new._normal = new_normal
return new
def expand(self, batch_shape, _instance=None):
"""Returns a new TanhNormal distribution.
(or populates an existing instance provided by a derived class) with
batch dimensions expanded to `batch_shape`. This method calls
:class:`~torch.Tensor.expand` on the distribution's parameters. As
such, this does not allocate new memory for the expanded distribution
instance. Additionally, this does not repeat any args checking or
parameter broadcasting in `__init__.py`, when an instance is first
created.
Args:
batch_shape (torch.Size): the desired expanded size.
_instance(instance): new instance provided by subclasses that
need to override `.expand`.
Returns:
Instance: New distribution instance with batch dimensions expanded
to `batch_size`.
"""
new_normal = self._normal.expand(batch_shape, _instance)
new = self._from_distribution(new_normal)
return new
def enumerate_support(self, expand=True):
"""Returns tensor containing all values supported by a discrete dist.
The result will enumerate over dimension 0, so the shape
of the result will be `(cardinality,) + batch_shape + event_shape`
(where `event_shape = ()` for univariate distributions).
Note that this enumerates over all batched tensors in lock-step
`[[0, 0], [1, 1], ...]`. With `expand=False`, enumeration happens
along dim 0, but with the remaining batch dimensions being
singleton dimensions, `[[0], [1], ..`.
To iterate over the full Cartesian product use
`itertools.product(m.enumerate_support())`.
Args:
expand (bool): whether to expand the support over the
batch dims to match the distribution's `batch_shape`.
Note:
Calls the enumerate_support function of the underlying normal
distribution.
Returns:
torch.Tensor: Tensor iterating over dimension 0.
"""
return self._normal.enumerate_support(expand)
@property
def mean(self):
"""torch.Tensor: mean of the distribution."""
return torch.tanh(self._normal.mean)
@property
def variance(self):
"""torch.Tensor: variance of the underlying normal distribution."""
return self._normal.variance
def entropy(self):
"""Returns entropy of the underlying normal distribution.
Returns:
torch.Tensor: entropy of the underlying normal distribution.
"""
return self._normal.entropy()
@staticmethod
def _clip_but_pass_gradient(x, lower=0., upper=1.):
"""Clipping function that allows for gradients to flow through.
Args:
x (torch.Tensor): value to be clipped
lower (float): lower bound of clipping
upper (float): upper bound of clipping
Returns:
torch.Tensor: x clipped between lower and upper.
"""
clip_up = (x > upper).float()
clip_low = (x < lower).float()
with torch.no_grad():
clip = ((upper - x) * clip_up + (lower - x) * clip_low)
return x + clip
def __repr__(self):
"""Returns the parameterization of the distribution.
Returns:
str: The parameterization of the distribution and underlying
distribution.
"""
return self.__class__.__name__
| 9,062 | 33.071429 | 95 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/embeddings/__init__.py | """PyTorch embedding modules for meta-learning algorithms."""
from garage.torch.embeddings.mlp_encoder import MLPEncoder
__all__ = ['MLPEncoder']
| 148 | 23.833333 | 61 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/embeddings/mlp_encoder.py | """An MLP network for encoding context of RL tasks."""
import akro
import numpy as np
from garage import InOutSpec
from garage.np.embeddings import Encoder
from garage.torch.modules import MLPModule
class MLPEncoder(MLPModule, Encoder):
"""This MLP network encodes context of RL tasks.
Context is stored in the terms of observation, action, and reward, and this
network uses an MLP module for encoding it.
Args:
input_dim (int) : Dimension of the network input.
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable or torch.nn.Module): Activation
function for intermediate dense layer(s). It should return a
torch.Tensor.Set it to None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable or torch.nn.Module): Activation
function for output dense layer. It should return a
torch.Tensor. Set it to None to maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
@property
def spec(self):
"""garage.InOutSpec: Input and output space."""
input_space = akro.Box(-np.inf, np.inf, self._input_dim)
output_space = akro.Box(-np.inf, np.inf, self._output_dim)
return InOutSpec(input_space, output_space)
@property
def input_dim(self):
"""int: Dimension of the encoder input."""
return self._input_dim
@property
def output_dim(self):
"""int: Dimension of the encoder output (embedding)."""
return self._output_dim
def reset(self, do_resets=None):
"""Reset the encoder.
This is effective only to recurrent encoder. do_resets is effective
only to vectoried encoder.
For a vectorized encoder, do_resets is an array of boolean indicating
which internal states to be reset. The length of do_resets should be
equal to the length of inputs.
Args:
do_resets (numpy.ndarray): Bool array indicating which states
to be reset.
"""
| 2,951 | 37.842105 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/modules/__init__.py | """Pytorch modules."""
from garage.torch.modules.gaussian_mlp_module import \
GaussianMLPIndependentStdModule, GaussianMLPModule, \
GaussianMLPTwoHeadedModule
from garage.torch.modules.mlp_module import MLPModule
from garage.torch.modules.multi_headed_mlp_module import MultiHeadedMLPModule
__all__ = [
'MLPModule',
'MultiHeadedMLPModule',
'GaussianMLPModule',
'GaussianMLPIndependentStdModule',
'GaussianMLPTwoHeadedModule',
]
| 458 | 27.6875 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/modules/gaussian_mlp_module.py | """GaussianMLPModule."""
import abc
import torch
from torch import nn
from torch.distributions import Normal
from torch.distributions.independent import Independent
from garage.torch.distributions import TanhNormal
from garage.torch.modules.mlp_module import MLPModule
from garage.torch.modules.multi_headed_mlp_module import MultiHeadedMLPModule
class GaussianMLPBaseModule(nn.Module):
"""Base of GaussianMLPModel.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of hidden layer (s).
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s).
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a torch.Tensor.
Set it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation.
- softplus: the std will be computed as log(1+exp(x)).
layer_normalization (bool): Bool for using layer normalization or not.
normal_distribution_cls (torch.distribution): normal distribution class
to be constructed and returned by a call to forward. By default, is
`torch.distributions.Normal`.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=torch.tanh,
std_hidden_w_init=nn.init.xavier_uniform_,
std_hidden_b_init=nn.init.zeros_,
std_output_nonlinearity=None,
std_output_w_init=nn.init.xavier_uniform_,
std_parameterization='exp',
layer_normalization=False,
normal_distribution_cls=Normal):
super().__init__()
self._input_dim = input_dim
self._hidden_sizes = hidden_sizes
self._action_dim = output_dim
self._learn_std = learn_std
self._std_hidden_sizes = std_hidden_sizes
self._min_std = min_std
self._max_std = max_std
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_hidden_w_init = std_hidden_w_init
self._std_hidden_b_init = std_hidden_b_init
self._std_output_nonlinearity = std_output_nonlinearity
self._std_output_w_init = std_output_w_init
self._std_parameterization = std_parameterization
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._norm_dist_class = normal_distribution_cls
if self._std_parameterization not in ('exp', 'softplus', 'softplus_real'):
raise NotImplementedError
init_std_param = torch.Tensor([init_std]).log()
if self._learn_std:
self._init_std = torch.nn.Parameter(init_std_param)
else:
self._init_std = init_std_param
self.register_buffer('init_std', self._init_std)
self._min_std_param = self._max_std_param = None
if min_std is not None:
self._min_std_param = torch.Tensor([min_std]).log()
self.register_buffer('min_std_param', self._min_std_param)
if max_std is not None:
self._max_std_param = torch.Tensor([max_std]).log()
self.register_buffer('max_std_param', self._max_std_param)
def to(self, *args, **kwargs):
"""Move the module to the specified device.
Args:
*args: args to pytorch to function.
**kwargs: keyword args to pytorch to function.
"""
ret = super().to(*args, **kwargs)
buffers = dict(self.named_buffers())
if not isinstance(self._init_std, torch.nn.Parameter):
self._init_std = buffers['init_std']
self._min_std_param = buffers.get('min_std_param', None)
self._max_std_param = buffers.get('max_std_param', None)
return ret
# Parent module's .to(), .cpu(), and .cuda() call children's ._apply().
def _apply(self, *args, **kwargs):
ret = super()._apply(*args, **kwargs)
buffers = dict(self.named_buffers())
if not isinstance(self._init_std, torch.nn.Parameter):
self._init_std = buffers['init_std']
self._min_std_param = buffers.get('min_std_param', None)
self._max_std_param = buffers.get('max_std_param', None)
return ret
@abc.abstractmethod
def _get_mean_and_log_std(self, *inputs):
pass
def forward(self, *inputs):
"""Forward method.
Args:
*inputs: Input to the module.
Returns:
torch.distributions.independent.Independent: Independent
distribution.
"""
mean, log_std_uncentered = self._get_mean_and_log_std(*inputs)
if self._std_parameterization not in ['softplus_real']:
if self._min_std_param or self._max_std_param:
log_std_uncentered = log_std_uncentered.clamp(
min=(None if self._min_std_param is None else
self._min_std_param.item()),
max=(None if self._max_std_param is None else
self._max_std_param.item()))
if self._std_parameterization == 'exp':
std = log_std_uncentered.exp()
elif self._std_parameterization == 'softplus':
std = log_std_uncentered.exp().exp().add(1.).log()
elif self._std_parameterization == 'softplus_real':
std = log_std_uncentered.exp().add(1.).log()
else:
assert False
dist = self._norm_dist_class(mean, std)
# This control flow is needed because if a TanhNormal distribution is
# wrapped by torch.distributions.Independent, then custom functions
# such as rsample_with_pretanh_value of the TanhNormal distribution
# are not accessable.
if not isinstance(dist, TanhNormal):
# Makes it so that a sample from the distribution is treated as a
# single sample and not dist.batch_shape samples.
dist = Independent(dist, 1)
return dist
@abc.abstractmethod
def get_last_linear_layers(self):
pass
class GaussianMLPModule(GaussianMLPBaseModule):
"""GaussianMLPModule that mean and std share the same network.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
normal_distribution_cls (torch.distribution): normal distribution class
to be constructed and returned by a call to forward. By default, is
`torch.distributions.Normal`.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_parameterization='exp',
layer_normalization=False,
normal_distribution_cls=Normal,
**kwargs):
super(GaussianMLPModule,
self).__init__(input_dim=input_dim,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
normal_distribution_cls=normal_distribution_cls)
self._mean_module = MLPModule(
input_dim=self._input_dim,
output_dim=self._action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization,
**kwargs
)
def _get_mean_and_log_std(self, *inputs):
"""Get mean and std of Gaussian distribution given inputs.
Args:
*inputs: Input to the module.
Returns:
torch.Tensor: The mean of Gaussian distribution.
torch.Tensor: The variance of Gaussian distribution.
"""
assert len(inputs) == 1
mean = self._mean_module(*inputs)
broadcast_shape = list(inputs[0].shape[:-1]) + [self._action_dim]
uncentered_log_std = torch.zeros(*broadcast_shape, device=self._init_std.device) + self._init_std
if self._std_parameterization in ['softplus_real']:
uncentered_log_std = uncentered_log_std.exp().exp().add(-1.0).log()
return mean, uncentered_log_std
def get_last_linear_layers(self):
return {
'mean': self._mean_module.get_last_linear_layer(),
}
class GaussianMLPIndependentStdModule(GaussianMLPBaseModule):
"""GaussianMLPModule which has two different mean and std network.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of hidden layer (s).
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s).
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a torch.Tensor.
Set it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
normal_distribution_cls (torch.distribution): normal distribution class
to be constructed and returned by a call to forward. By default, is
`torch.distributions.Normal`.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=torch.tanh,
std_hidden_w_init=nn.init.xavier_uniform_,
std_hidden_b_init=nn.init.zeros_,
std_output_nonlinearity=None,
std_output_w_init=nn.init.xavier_uniform_,
std_parameterization='exp',
layer_normalization=False,
normal_distribution_cls=Normal,
**kwargs):
super(GaussianMLPIndependentStdModule,
self).__init__(input_dim=input_dim,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_hidden_w_init=std_hidden_w_init,
std_hidden_b_init=std_hidden_b_init,
std_output_nonlinearity=std_output_nonlinearity,
std_output_w_init=std_output_w_init,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
normal_distribution_cls=normal_distribution_cls)
self._mean_module = MLPModule(
input_dim=self._input_dim,
output_dim=self._action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization,
**kwargs)
self._log_std_module = MLPModule(
input_dim=self._input_dim,
output_dim=self._action_dim,
hidden_sizes=self._std_hidden_sizes,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
output_nonlinearity=self._std_output_nonlinearity,
output_w_init=self._std_output_w_init,
output_b_init=self._init_std_b,
layer_normalization=self._layer_normalization,
**kwargs)
def _init_std_b(self, b):
"""Default bias initialization function.
Args:
b (torch.Tensor): The bias tensor.
Returns:
torch.Tensor: The bias tensor itself.
"""
if self._std_parameterization not in ['softplus_real']:
return nn.init.constant_(b, self._init_std.item())
else:
return nn.init.constant_(b, self._init_std.exp().exp().add(-1.0).log().item())
def _get_mean_and_log_std(self, *inputs):
"""Get mean and std of Gaussian distribution given inputs.
Args:
*inputs: Input to the module.
Returns:
torch.Tensor: The mean of Gaussian distribution.
torch.Tensor: The variance of Gaussian distribution.
"""
return self._mean_module(*inputs), self._log_std_module(*inputs)
def get_last_linear_layers(self):
return {
'mean': self._mean_module.get_last_linear_layer(),
'std': self._log_std_module.get_last_linear_layer(),
}
class GaussianMLPTwoHeadedModule(GaussianMLPBaseModule):
"""GaussianMLPModule which has only one mean network.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
normal_distribution_cls (torch.distribution): normal distribution class
to be constructed and returned by a call to forward. By default, is
`torch.distributions.Normal`.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_parameterization='exp',
layer_normalization=False,
normal_distribution_cls=Normal):
super(GaussianMLPTwoHeadedModule,
self).__init__(input_dim=input_dim,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
normal_distribution_cls=normal_distribution_cls)
self._shared_mean_log_std_network = MultiHeadedMLPModule(
n_heads=2,
input_dim=self._input_dim,
output_dims=self._action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearities=self._output_nonlinearity,
output_w_inits=self._output_w_init,
output_b_inits=[
nn.init.zeros_,
(lambda x: nn.init.constant_(x, self._init_std.item())
if self._std_parameterization not in ['softplus_real']
else lambda x: nn.init.constant_(x, self._init_std.exp().exp().add(-1.0).log().item())),
],
layer_normalization=self._layer_normalization)
def _get_mean_and_log_std(self, *inputs):
"""Get mean and std of Gaussian distribution given inputs.
Args:
*inputs: Input to the module.
Returns:
torch.Tensor: The mean of Gaussian distribution.
torch.Tensor: The variance of Gaussian distribution.
"""
return self._shared_mean_log_std_network(*inputs)
def get_last_linear_layers(self):
return {
'mean': self._shared_mean_log_std_network.get_last_linear_layer(),
}
| 28,760 | 45.0176 | 105 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/modules/mlp_module.py | """MLP Module."""
from torch import nn
from torch.nn import functional as F
from garage.torch.modules.multi_headed_mlp_module import MultiHeadedMLPModule
class MLPModule(MultiHeadedMLPModule):
"""MLP Model.
A Pytorch module composed only of a multi-layer perceptron (MLP), which
maps real-valued inputs to real-valued outputs.
Args:
input_dim (int) : Dimension of the network input.
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable or torch.nn.Module): Activation function
for intermediate dense layer(s). It should return a torch.Tensor.
Set it to None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable or torch.nn.Module): Activation function
for output dense layer. It should return a torch.Tensor.
Set it to None to maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes,
hidden_nonlinearity=F.relu,
hidden_w_init=nn.init.xavier_normal_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_normal_,
output_b_init=nn.init.zeros_,
layer_normalization=False,
**kwargs):
super().__init__(1, input_dim, output_dim, hidden_sizes,
hidden_nonlinearity, hidden_w_init, hidden_b_init,
output_nonlinearity, output_w_init, output_b_init,
layer_normalization, **kwargs)
self._output_dim = output_dim
# pylint: disable=arguments-differ
def forward(self, input_value):
"""Forward method.
Args:
input_value (torch.Tensor): Input values with (N, *, input_dim)
shape.
Returns:
torch.Tensor: Output value
"""
return super().forward(input_value)[0]
@property
def output_dim(self):
"""Return output dimension of network.
Returns:
int: Output dimension of network.
"""
return self._output_dim
| 3,153 | 36.105882 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/modules/multi_headed_mlp_module.py | """MultiHeadedMLPModule."""
import copy
import torch
import torch.nn as nn
from garagei.torch.modules.spectral_norm import spectral_norm
class MultiHeadedMLPModule(nn.Module):
"""MultiHeadedMLPModule Model.
A PyTorch module composed only of a multi-layer perceptron (MLP) with
multiple parallel output layers which maps real-valued inputs to
real-valued outputs. The length of outputs is n_heads and shape of each
output element is depend on each output dimension
Args:
n_heads (int): Number of different output layers
input_dim (int): Dimension of the network input.
output_dims (int or list or tuple): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable or torch.nn.Module or list or tuple):
Activation function for intermediate dense layer(s).
It should return a torch.Tensor. Set it to None to maintain a
linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearities (callable or torch.nn.Module or list or tuple):
Activation function for output dense layer. It should return a
torch.Tensor. Set it to None to maintain a linear activation.
Size of the parameter should be 1 or equal to n_head
output_w_inits (callable or list or tuple): Initializer function for
the weight of output dense layer(s). The function should return a
torch.Tensor. Size of the parameter should be 1 or equal to n_head
output_b_inits (callable or list or tuple): Initializer function for
the bias of output dense layer(s). The function should return a
torch.Tensor. Size of the parameter should be 1 or equal to n_head
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
n_heads,
input_dim,
output_dims,
hidden_sizes,
hidden_nonlinearity=torch.relu,
hidden_w_init=nn.init.xavier_normal_,
hidden_b_init=nn.init.zeros_,
output_nonlinearities=None,
output_w_inits=nn.init.xavier_normal_,
output_b_inits=nn.init.zeros_,
layer_normalization=False,
bias=True,
spectral_normalization=False,
spectral_coef=1.,
):
super().__init__()
self._layers = nn.ModuleList()
output_dims = self._check_parameter_for_output_layer(
'output_dims', output_dims, n_heads)
output_w_inits = self._check_parameter_for_output_layer(
'output_w_inits', output_w_inits, n_heads)
output_b_inits = self._check_parameter_for_output_layer(
'output_b_inits', output_b_inits, n_heads)
output_nonlinearities = self._check_parameter_for_output_layer(
'output_nonlinearities', output_nonlinearities, n_heads)
self._layers = nn.ModuleList()
prev_size = input_dim
for size in hidden_sizes:
hidden_layers = nn.Sequential()
if layer_normalization:
hidden_layers.add_module('layer_normalization',
nn.LayerNorm(prev_size))
if spectral_normalization:
linear_layer = spectral_norm(nn.Linear(prev_size, size, bias=bias), spectral_coef=spectral_coef)
else:
linear_layer = nn.Linear(prev_size, size, bias=bias)
hidden_w_init(linear_layer.weight)
if bias:
hidden_b_init(linear_layer.bias)
hidden_layers.add_module('linear', linear_layer)
if hidden_nonlinearity:
hidden_layers.add_module('non_linearity',
_NonLinearity(hidden_nonlinearity))
self._layers.append(hidden_layers)
prev_size = size
self._output_layers = nn.ModuleList()
for i in range(n_heads):
output_layer = nn.Sequential()
if spectral_normalization:
linear_layer = spectral_norm(nn.Linear(prev_size, output_dims[i], bias=bias), spectral_coef=spectral_coef)
else:
linear_layer = nn.Linear(prev_size, output_dims[i], bias=bias)
output_w_inits[i](linear_layer.weight)
if bias:
output_b_inits[i](linear_layer.bias)
output_layer.add_module('linear', linear_layer)
if output_nonlinearities[i]:
output_layer.add_module(
'non_linearity', _NonLinearity(output_nonlinearities[i]))
self._output_layers.append(output_layer)
@classmethod
def _check_parameter_for_output_layer(cls, var_name, var, n_heads):
"""Check input parameters for output layer are valid.
Args:
var_name (str): variable name
var (any): variable to be checked
n_heads (int): number of head
Returns:
list: list of variables (length of n_heads)
Raises:
ValueError: if the variable is a list but length of the variable
is not equal to n_heads
"""
if isinstance(var, (list, tuple)):
if len(var) == 1:
return list(var) * n_heads
if len(var) == n_heads:
return var
msg = ('{} should be either an integer or a collection of length '
'n_heads ({}), but {} provided.')
raise ValueError(msg.format(var_name, n_heads, var))
return [copy.deepcopy(var) for _ in range(n_heads)]
# pylint: disable=arguments-differ
def forward(self, input_val):
"""Forward method.
Args:
input_val (torch.Tensor): Input values with (N, *, input_dim)
shape.
Returns:
List[torch.Tensor]: Output values
"""
x = input_val
for layer in self._layers:
x = layer(x)
return [output_layer(x) for output_layer in self._output_layers]
def get_last_linear_layer(self):
for m in reversed(self._layers):
if isinstance(m, nn.Sequential):
for l in reversed(m):
if isinstance(l, nn.Linear):
return l
if isinstance(m, nn.Linear):
return m
return None
class _NonLinearity(nn.Module):
"""Wrapper class for non linear function or module.
Args:
non_linear (callable or type): Non-linear function or type to be
wrapped.
"""
def __init__(self, non_linear):
super().__init__()
if isinstance(non_linear, type):
self.module = non_linear()
elif callable(non_linear):
self.module = copy.deepcopy(non_linear)
else:
raise ValueError(
'Non linear function {} is not supported'.format(non_linear))
# pylint: disable=arguments-differ
def forward(self, input_value):
"""Forward method.
Args:
input_value (torch.Tensor): Input values
Returns:
torch.Tensor: Output value
"""
return self.module(input_value)
# pylint: disable=missing-return-doc, missing-return-type-doc
def __repr__(self):
return repr(self.module)
| 7,937 | 36.443396 | 122 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/optimizers/__init__.py | """PyTorch optimizers."""
from garage.torch.optimizers.conjugate_gradient_optimizer import (
ConjugateGradientOptimizer)
from garage.torch.optimizers.differentiable_sgd import DifferentiableSGD
from garage.torch.optimizers.optimizer_wrapper import OptimizerWrapper
__all__ = [
'OptimizerWrapper', 'ConjugateGradientOptimizer', 'DifferentiableSGD'
]
| 358 | 34.9 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/optimizers/conjugate_gradient_optimizer.py | """Conjugate Gradient Optimizer.
Computes the decent direction using the conjugate gradient method, and then
computes the optimal step size that will satisfy the KL divergence constraint.
Finally, it performs a backtracking line search to optimize the objective.
"""
import warnings
from dowel import logger
import numpy as np
import torch
from torch.optim import Optimizer
from garage.misc.tensor_utils import unflatten_tensors
def _build_hessian_vector_product(func, params, reg_coeff=1e-5):
"""Computes Hessian-vector product using Pearlmutter's algorithm.
`Pearlmutter, Barak A. "Fast exact multiplication by the Hessian." Neural
computation 6.1 (1994): 147-160.`
Args:
func (callable): A function that returns a torch.Tensor. Hessian of
the return value will be computed.
params (list[torch.Tensor]): A list of function parameters.
reg_coeff (float): A small value so that A -> A + reg*I.
Returns:
function: It can be called to get the final result.
"""
param_shapes = [p.shape or torch.Size([1]) for p in params]
f = func()
f_grads = torch.autograd.grad(f, params, create_graph=True)
def _eval(vector):
"""The evaluation function.
Args:
vector (torch.Tensor): The vector to be multiplied with
Hessian.
Returns:
torch.Tensor: The product of Hessian of function f and v.
"""
unflatten_vector = unflatten_tensors(vector, param_shapes)
assert len(f_grads) == len(unflatten_vector)
grad_vector_product = torch.sum(
torch.stack(
[torch.sum(g * x) for g, x in zip(f_grads, unflatten_vector)]))
hvp = list(
torch.autograd.grad(grad_vector_product, params,
retain_graph=True))
for i, (hx, p) in enumerate(zip(hvp, params)):
if hx is None:
hvp[i] = torch.zeros_like(p)
flat_output = torch.cat([h.reshape(-1) for h in hvp])
return flat_output + reg_coeff * vector
return _eval
def _conjugate_gradient(f_Ax, b, cg_iters, residual_tol=1e-10):
"""Use Conjugate Gradient iteration to solve Ax = b. Demmel p 312.
Args:
f_Ax (callable): A function to compute Hessian vector product.
b (torch.Tensor): Right hand side of the equation to solve.
cg_iters (int): Number of iterations to run conjugate gradient
algorithm.
residual_tol (float): Tolerence for convergence.
Returns:
torch.Tensor: Solution x* for equation Ax = b.
"""
p = b.clone()
r = b.clone()
x = torch.zeros_like(b)
rdotr = torch.dot(r, r)
for _ in range(cg_iters):
z = f_Ax(p)
v = rdotr / torch.dot(p, z)
x += v * p
r -= v * z
newrdotr = torch.dot(r, r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
return x
class ConjugateGradientOptimizer(Optimizer):
"""Performs constrained optimization via backtracking line search.
The search direction is computed using a conjugate gradient algorithm,
which gives x = A^{-1}g, where A is a second order approximation of the
constraint and g is the gradient of the loss function.
Args:
params (iterable): Iterable of parameters to optimize.
max_constraint_value (float): Maximum constraint value.
cg_iters (int): The number of CG iterations used to calculate A^-1 g
max_backtracks (int): Max number of iterations for backtrack
linesearch.
backtrack_ratio (float): backtrack ratio for backtracking line search.
hvp_reg_coeff (float): A small value so that A -> A + reg*I. It is
used by Hessian Vector Product calculation.
accept_violation (bool): whether to accept the descent step if it
violates the line search condition after exhausting all
backtracking budgets.
"""
def __init__(self,
params,
max_constraint_value,
cg_iters=10,
max_backtracks=15,
backtrack_ratio=0.8,
hvp_reg_coeff=1e-5,
accept_violation=False):
super().__init__(params, {})
self._max_constraint_value = max_constraint_value
self._cg_iters = cg_iters
self._max_backtracks = max_backtracks
self._backtrack_ratio = backtrack_ratio
self._hvp_reg_coeff = hvp_reg_coeff
self._accept_violation = accept_violation
def step(self, f_loss, f_constraint): # pylint: disable=arguments-differ
"""Take an optimization step.
Args:
f_loss (callable): Function to compute the loss.
f_constraint (callable): Function to compute the constraint value.
"""
# Collect trainable parameters and gradients
params = []
grads = []
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
params.append(p)
grads.append(p.grad.reshape(-1))
flat_loss_grads = torch.cat(grads)
# Build Hessian-vector-product function
f_Ax = _build_hessian_vector_product(f_constraint, params,
self._hvp_reg_coeff)
# Compute step direction
step_dir = _conjugate_gradient(f_Ax, flat_loss_grads, self._cg_iters)
# Replace nan with 0.
step_dir[step_dir.ne(step_dir)] = 0.
# Compute step size
step_size = np.sqrt(2.0 * self._max_constraint_value *
(1. /
(torch.dot(step_dir, f_Ax(step_dir)) + 1e-8)))
if np.isnan(step_size):
step_size = 1.
descent_step = step_size * step_dir
# Update parameters using backtracking line search
self._backtracking_line_search(params, descent_step, f_loss,
f_constraint)
@property
def state(self):
"""dict: The hyper-parameters of the optimizer."""
return {
'max_constraint_value': self._max_constraint_value,
'cg_iters': self._cg_iters,
'max_backtracks': self._max_backtracks,
'backtrack_ratio': self._backtrack_ratio,
'hvp_reg_coeff': self._hvp_reg_coeff,
'accept_violation': self._accept_violation,
}
@state.setter
def state(self, state):
# _max_constraint_value doesn't have a default value in __init__.
# The rest of thsese should match those default values.
# These values should only actually get used when unpickling a
self._max_constraint_value = state.get('max_constraint_value', 0.01)
self._cg_iters = state.get('cg_iters', 10)
self._max_backtracks = state.get('max_backtracks', 15)
self._backtrack_ratio = state.get('backtrack_ratio', 0.8)
self._hvp_reg_coeff = state.get('hvp_reg_coeff', 1e-5)
self._accept_violation = state.get('accept_violation', False)
def __setstate__(self, state):
"""Restore the optimizer state.
Args:
state (dict): State dictionary.
"""
if 'hvp_reg_coeff' not in state['state']:
warnings.warn(
'Resuming ConjugateGradientOptimizer with lost state. '
'This behavior is fixed if pickling from garage>=2020.02.0.')
self.defaults = state['defaults']
# Set the fields manually so that the setter gets called.
self.state = state['state']
self.param_groups = state['param_groups']
def _backtracking_line_search(self, params, descent_step, f_loss,
f_constraint):
prev_params = [p.clone() for p in params]
ratio_list = self._backtrack_ratio**np.arange(self._max_backtracks)
loss_before = f_loss()
param_shapes = [p.shape or torch.Size([1]) for p in params]
descent_step = unflatten_tensors(descent_step, param_shapes)
assert len(descent_step) == len(params)
for ratio in ratio_list:
for step, prev_param, param in zip(descent_step, prev_params,
params):
step = ratio * step
new_param = prev_param.data - step
param.data = new_param.data
loss = f_loss()
constraint_val = f_constraint()
if (loss < loss_before
and constraint_val <= self._max_constraint_value):
break
if ((torch.isnan(loss) or torch.isnan(constraint_val)
or loss >= loss_before
or constraint_val >= self._max_constraint_value)
and not self._accept_violation):
logger.log('Line search condition violated. Rejecting the step!')
if torch.isnan(loss):
logger.log('Violated because loss is NaN')
if torch.isnan(constraint_val):
logger.log('Violated because constraint is NaN')
if loss >= loss_before:
logger.log('Violated because loss not improving')
if constraint_val >= self._max_constraint_value:
logger.log('Violated because constraint is violated')
for prev, cur in zip(prev_params, params):
cur.data = prev.data
| 9,550 | 35.59387 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/optimizers/differentiable_sgd.py | """Differentiable Stochastic Gradient Descent Optimizer.
Useful for algorithms such as MAML that needs the gradient of functions of
post-updated parameters with respect to pre-updated parameters.
"""
class DifferentiableSGD:
"""Differentiable Stochastic Gradient Descent.
DifferentiableSGD performs the same optimization step as SGD, but instead
of updating parameters in-place, it saves updated parameters in new
tensors, so that the gradient of functions of new parameters can flow back
to the pre-updated parameters.
Args:
module (torch.nn.module): A torch module whose parameters needs to be
optimized.
lr (float): Learning rate of stochastic gradient descent.
"""
def __init__(self, module, lr=1e-3):
self.module = module
self.lr = lr
def step(self):
"""Take an optimization step."""
memo = set()
def update(module):
for child in module.children():
if child not in memo:
memo.add(child)
update(child)
params = list(module.named_parameters())
for name, param in params:
# Skip descendant modules' parameters.
if '.' not in name:
if param.grad is None:
continue
# Original SGD uses param.grad.data
new_param = param.add(-self.lr, param.grad)
del module._parameters[name] # pylint: disable=protected-access # noqa: E501
setattr(module, name, new_param)
module._parameters[name] = new_param # pylint: disable=protected-access # noqa: E501
update(self.module)
def zero_grad(self):
"""Sets gradients of all model parameters to zero."""
for param in self.module.parameters():
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
| 2,002 | 32.383333 | 105 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/optimizers/optimizer_wrapper.py | """A PyTorch optimizer wrapper that compute loss and optimize module."""
from garage import make_optimizer
from garage.np.optimizers import BatchDataset
class OptimizerWrapper:
"""A wrapper class to handle torch.optim.optimizer.
Args:
optimizer (Union[type, tuple[type, dict]]): Type of optimizer
for policy. This can be an optimizer type such as
`torch.optim.Adam` or a tuple of type and dictionary, where
dictionary contains arguments to initialize the optimizer.
e.g. `(torch.optim.Adam, {'lr' : 1e-3})`
Sample strategy to be used when sampling a new task.
module (torch.nn.Module): Module to be optimized.
max_optimization_epochs (int): Maximum number of epochs for update.
minibatch_size (int): Batch size for optimization.
"""
def __init__(self,
optimizer,
module,
max_optimization_epochs=1,
minibatch_size=None):
self._optimizer = make_optimizer(optimizer, module=module)
self._max_optimization_epochs = max_optimization_epochs
self._minibatch_size = minibatch_size
def get_minibatch(self, *inputs):
r"""Yields a batch of inputs.
Notes: P is the size of minibatch (self._minibatch_size)
Args:
*inputs (list[torch.Tensor]): A list of inputs. Each input has
shape :math:`(N \dot [T], *)`.
Yields:
list[torch.Tensor]: A list batch of inputs. Each batch has shape
:math:`(P, *)`.
"""
batch_dataset = BatchDataset(inputs, self._minibatch_size)
for _ in range(self._max_optimization_epochs):
for dataset in batch_dataset.iterate():
yield dataset
def zero_grad(self):
r"""Clears the gradients of all optimized :class:`torch.Tensor` s."""
self._optimizer.zero_grad()
def step(self, **closure):
"""Performs a single optimization step.
Arguments:
**closure (callable, optional): A closure that reevaluates the
model and returns the loss.
"""
self._optimizer.step(**closure)
| 2,214 | 33.609375 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/policies/__init__.py | """PyTorch Policies."""
from garage.torch.policies.context_conditioned_policy import (
ContextConditionedPolicy)
from garage.torch.policies.policy import Policy
from garage.torch.policies.deterministic_mlp_policy import ( # noqa: I100
DeterministicMLPPolicy)
from garage.torch.policies.gaussian_mlp_policy import GaussianMLPPolicy
from garage.torch.policies.tanh_gaussian_mlp_policy import (
TanhGaussianMLPPolicy)
__all__ = [
'DeterministicMLPPolicy',
'GaussianMLPPolicy',
'Policy',
'TanhGaussianMLPPolicy',
'ContextConditionedPolicy',
]
| 573 | 30.888889 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/policies/context_conditioned_policy.py | """A policy used in training meta reinforcement learning algorithms.
It is used in PEARL (Probabilistic Embeddings for Actor-Critic Reinforcement
Learning). The paper on PEARL can be found at https://arxiv.org/abs/1903.08254.
Code is adapted from https://github.com/katerakelly/oyster.
"""
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from garage.torch import global_device, product_of_gaussians
# pylint: disable=attribute-defined-outside-init
# pylint does not recognize attributes initialized as buffers in constructor
class ContextConditionedPolicy(nn.Module):
"""A policy that outputs actions based on observation and latent context.
In PEARL, policies are conditioned on current state and a latent context
(adaptation data) variable Z. This inference network estimates the
posterior probability of z given past transitions. It uses context
information stored in the encoder to infer the probabilistic value of z and
samples from a policy conditioned on z.
Args:
latent_dim (int): Latent context variable dimension.
context_encoder (garage.torch.embeddings.ContextEncoder): Recurrent or
permutation-invariant context encoder.
policy (garage.torch.policies.Policy): Policy used to train the
network.
use_information_bottleneck (bool): True if latent context is not
deterministic; false otherwise.
use_next_obs (bool): True if next observation is used in context
for distinguishing tasks; false otherwise.
"""
def __init__(self, latent_dim, context_encoder, policy,
use_information_bottleneck, use_next_obs):
super().__init__()
self._latent_dim = latent_dim
self._context_encoder = context_encoder
self._policy = policy
self._use_information_bottleneck = use_information_bottleneck
self._use_next_obs = use_next_obs
# initialize buffers for z distribution and z
# use buffers so latent context can be saved along with model weights
# z_means and z_vars are the params for the gaussian distribution
# over latent task belief maintained in the policy; z is a sample from
# this distribution that the policy is conditioned on
self.register_buffer('z', torch.zeros(1, latent_dim))
self.register_buffer('z_means', torch.zeros(1, latent_dim))
self.register_buffer('z_vars', torch.zeros(1, latent_dim))
self.reset_belief()
def reset_belief(self, num_tasks=1):
r"""Reset :math:`q(z \| c)` to the prior and sample a new z from the prior.
Args:
num_tasks (int): Number of tasks.
"""
# reset distribution over z to the prior
mu = torch.zeros(num_tasks, self._latent_dim).to(global_device())
if self._use_information_bottleneck:
var = torch.ones(num_tasks, self._latent_dim).to(global_device())
else:
var = torch.zeros(num_tasks, self._latent_dim).to(global_device())
self.z_means = mu
self.z_vars = var
# sample a new z from the prior
self.sample_from_belief()
# reset the context collected so far
self._context = None
# reset any hidden state in the encoder network (relevant for RNN)
self._context_encoder.reset()
def sample_from_belief(self):
"""Sample z using distributions from current means and variances."""
if self._use_information_bottleneck:
posteriors = [
torch.distributions.Normal(m, torch.sqrt(s)) for m, s in zip(
torch.unbind(self.z_means), torch.unbind(self.z_vars))
]
z = [d.rsample() for d in posteriors]
self.z = torch.stack(z)
else:
self.z = self.z_means
def update_context(self, timestep):
"""Append single transition to the current context.
Args:
timestep (garage._dtypes.TimeStep): Timestep containing transition
information to be added to context.
"""
o = torch.as_tensor(timestep.observation[None, None, ...],
device=global_device()).float()
a = torch.as_tensor(timestep.action[None, None, ...],
device=global_device()).float()
r = torch.as_tensor(np.array([timestep.reward])[None, None, ...],
device=global_device()).float()
no = torch.as_tensor(timestep.next_observation[None, None, ...],
device=global_device()).float()
if self._use_next_obs:
data = torch.cat([o, a, r, no], dim=2)
else:
data = torch.cat([o, a, r], dim=2)
if self._context is None:
self._context = data
else:
self._context = torch.cat([self._context, data], dim=1)
def infer_posterior(self, context):
r"""Compute :math:`q(z \| c)` as a function of input context and sample new z.
Args:
context (torch.Tensor): Context values, with shape
:math:`(X, N, C)`. X is the number of tasks. N is batch size. C
is the combined size of observation, action, reward, and next
observation if next observation is used in context. Otherwise,
C is the combined size of observation, action, and reward.
"""
params = self._context_encoder.forward(context)
params = params.view(context.size(0), -1,
self._context_encoder.output_dim)
# with probabilistic z, predict mean and variance of q(z | c)
if self._use_information_bottleneck:
mu = params[..., :self._latent_dim]
sigma_squared = F.softplus(params[..., self._latent_dim:])
z_params = [
product_of_gaussians(m, s)
for m, s in zip(torch.unbind(mu), torch.unbind(sigma_squared))
]
self.z_means = torch.stack([p[0] for p in z_params])
self.z_vars = torch.stack([p[1] for p in z_params])
else:
self.z_means = torch.mean(params, dim=1)
self.sample_from_belief()
# pylint: disable=arguments-differ
def forward(self, obs, context):
"""Given observations and context, get actions and probs from policy.
Args:
obs (torch.Tensor): Observation values, with shape
:math:`(X, N, O)`. X is the number of tasks. N is batch size. O
is the size of the flattened observation space.
context (torch.Tensor): Context values, with shape
:math:`(X, N, C)`. X is the number of tasks. N is batch size. C
is the combined size of observation, action, reward, and next
observation if next observation is used in context. Otherwise,
C is the combined size of observation, action, and reward.
Returns:
tuple:
* torch.Tensor: Predicted action values.
* np.ndarray: Mean of distribution.
* np.ndarray: Log std of distribution.
* torch.Tensor: Log likelihood of distribution.
* torch.Tensor: Sampled values from distribution before
applying tanh transformation.
torch.Tensor: z values, with shape :math:`(N, L)`. N is batch size.
L is the latent dimension.
"""
self.infer_posterior(context)
self.sample_from_belief()
task_z = self.z
# task, batch
t, b, _ = obs.size()
obs = obs.view(t * b, -1)
task_z = [z.repeat(b, 1) for z in task_z]
task_z = torch.cat(task_z, dim=0)
# run policy, get log probs and new actions
obs_z = torch.cat([obs, task_z.detach()], dim=1)
dist = self._policy(obs_z)[0]
pre_tanh, actions = dist.rsample_with_pre_tanh_value()
log_pi = dist.log_prob(value=actions, pre_tanh_value=pre_tanh)
log_pi = log_pi.unsqueeze(1)
mean = dist.mean.to('cpu').detach().numpy()
log_std = (dist.variance**.5).log().to('cpu').detach().numpy()
return (actions, mean, log_std, log_pi, pre_tanh), task_z
def get_action(self, obs):
"""Sample action from the policy, conditioned on the task embedding.
Args:
obs (torch.Tensor): Observation values, with shape :math:`(1, O)`.
O is the size of the flattened observation space.
Returns:
torch.Tensor: Output action value, with shape :math:`(1, A)`.
A is the size of the flattened action space.
dict:
* np.ndarray[float]: Mean of the distribution.
* np.ndarray[float]: Standard deviation of logarithmic values
of the distribution.
"""
z = self.z
obs = torch.as_tensor(obs[None], device=global_device()).float()
obs_in = torch.cat([obs, z], dim=1)
action, info = self._policy.get_action(obs_in)
action = np.squeeze(action, axis=0)
info['mean'] = np.squeeze(info['mean'], axis=0)
return action, info
def compute_kl_div(self):
r"""Compute :math:`KL(q(z|c) \| p(z))`.
Returns:
float: :math:`KL(q(z|c) \| p(z))`.
"""
prior = torch.distributions.Normal(
torch.zeros(self._latent_dim).to(global_device()),
torch.ones(self._latent_dim).to(global_device()))
posteriors = [
torch.distributions.Normal(mu, torch.sqrt(var)) for mu, var in zip(
torch.unbind(self.z_means), torch.unbind(self.z_vars))
]
kl_divs = [
torch.distributions.kl.kl_divergence(post, prior)
for post in posteriors
]
kl_div_sum = torch.sum(torch.stack(kl_divs))
return kl_div_sum
@property
def networks(self):
"""Return context_encoder and policy.
Returns:
list: Encoder and policy networks.
"""
return [self._context_encoder, self._policy]
@property
def context(self):
"""Return context.
Returns:
torch.Tensor: Context values, with shape :math:`(X, N, C)`.
X is the number of tasks. N is batch size. C is the combined
size of observation, action, reward, and next observation if
next observation is used in context. Otherwise, C is the
combined size of observation, action, and reward.
"""
return self._context
| 10,699 | 39.530303 | 86 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/policies/deterministic_mlp_policy.py | """This modules creates a deterministic policy network.
A neural network can be used as policy method in different RL algorithms.
It accepts an observation of the environment and predicts an action.
"""
import akro
import numpy as np
import torch
from garage.torch.modules import MLPModule
from garage.torch.policies.policy import Policy
class DeterministicMLPPolicy(Policy):
"""Implements a deterministic policy network.
The policy network selects action based on the state of the environment.
It uses a PyTorch neural network module to fit the function of pi(s).
"""
def __init__(self, env_spec, name='DeterministicMLPPolicy', **kwargs):
"""Initialize class with multiple attributes.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Policy name.
kwargs : Additional keyword arguments passed to the MLPModule.
"""
super().__init__(env_spec, name)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._module = MLPModule(input_dim=self._obs_dim,
output_dim=self._action_dim,
**kwargs)
# pylint: disable=arguments-differ
def forward(self, observations):
"""Compute actions from the observations.
Args:
observations (torch.Tensor): Batch of observations on default
torch device.
Returns:
torch.Tensor: Batch of actions.
"""
return self._module(observations)
def get_action(self, observation):
"""Get a single action given an observation.
Args:
observation (np.ndarray): Observation from the environment.
Returns:
tuple:
* np.ndarray: Predicted action.
* dict:
* list[float]: Mean of the distribution
* list[float]: Log of standard deviation of the
distribution
"""
if not isinstance(observation, np.ndarray) and not isinstance(
observation, torch.Tensor):
observation = self._env_spec.observation_space.flatten(observation)
with torch.no_grad():
observation = torch.Tensor(observation).unsqueeze(0)
action, agent_infos = self.get_actions(observation)
return action[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
"""Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Returns:
tuple:
* np.ndarray: Predicted actions.
* dict:
* list[float]: Mean of the distribution
* list[float]: Log of standard deviation of the
distribution
"""
if not isinstance(observations[0], np.ndarray) and not isinstance(
observations[0], torch.Tensor):
observations = self._env_spec.observation_space.flatten_n(
observations)
# frequently users like to pass lists of torch tensors or lists of
# numpy arrays. This handles those conversions.
if isinstance(observations, list):
if isinstance(observations[0], np.ndarray):
observations = np.stack(observations)
elif isinstance(observations[0], torch.Tensor):
observations = torch.stack(observations)
if isinstance(self._env_spec.observation_space, akro.Image) and \
len(observations.shape) < \
len(self._env_spec.observation_space.shape):
observations = self._env_spec.observation_space.unflatten_n(
observations)
with torch.no_grad():
x = self(torch.Tensor(observations))
return x.numpy(), dict()
| 4,005 | 35.418182 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/policies/gaussian_mlp_policy.py | """GaussianMLPPolicy."""
import torch
from torch import nn
from garage.torch.modules import GaussianMLPModule
from garage.torch.policies.stochastic_policy import StochasticPolicy
class GaussianMLPPolicy(StochasticPolicy):
"""MLP whose outputs are fed into a Normal distribution..
A policy that contains a MLP to make prediction based on a gaussian
distribution.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
min_std (float): Minimum value for std.
max_std (float): Maximum value for std.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
name (str): Name of policy.
"""
def __init__(self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_parameterization='exp',
layer_normalization=False,
name='GaussianMLPPolicy',
*,
module_cls=GaussianMLPModule):
super().__init__(env_spec, name)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._module = module_cls(
input_dim=self._obs_dim,
output_dim=self._action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization)
def forward(self, observations):
"""Compute the action distributions from the observations.
Args:
observations (torch.Tensor): Batch of observations on default
torch device.
Returns:
torch.distributions.Distribution: Batch distribution of actions.
dict[str, torch.Tensor]: Additional agent_info, as torch Tensors
"""
dist = self._module(observations)
return (dist, dict(mean=dist.mean, log_std=(dist.variance**.5).log()))
| 4,433 | 41.228571 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/policies/policy.py | """Base Policy."""
import abc
import torch
class Policy(torch.nn.Module, abc.ABC):
"""Policy base class.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Name of policy.
"""
def __init__(self, env_spec, name):
super().__init__()
self._env_spec = env_spec
self._name = name
@abc.abstractmethod
def get_action(self, observation):
"""Get a single action given an observation.
Args:
observation (torch.Tensor): Observation from the environment.
Returns:
tuple:
* torch.Tensor: Predicted action.
* dict:
* list[float]: Mean of the distribution
* list[float]: Log of standard deviation of the
distribution
"""
@abc.abstractmethod
def get_actions(self, observations):
"""Get actions given observations.
Args:
observations (torch.Tensor): Observations from the environment.
Returns:
tuple:
* torch.Tensor: Predicted actions.
* dict:
* list[float]: Mean of the distribution
* list[float]: Log of standard deviation of the
distribution
"""
@property
def observation_space(self):
"""The observation space for the environment.
Returns:
akro.Space: Observation space.
"""
return self._env_spec.observation_space
@property
def action_space(self):
"""The action space for the environment.
Returns:
akro.Space: Action space.
"""
return self._env_spec.action_space
def get_param_values(self):
"""Get the parameters to the policy.
This method is included to ensure consistency with TF policies.
Returns:
dict: The parameters (in the form of the state dictionary).
"""
return self.state_dict()
def set_param_values(self, state_dict):
"""Set the parameters to the policy.
This method is included to ensure consistency with TF policies.
Args:
state_dict (dict): State dictionary.
"""
self.load_state_dict(state_dict)
def reset(self, dones=None):
"""Reset the environment.
Args:
dones (numpy.ndarray): Reset values
"""
@property
def name(self):
"""Name of policy.
Returns:
str: Name of policy
"""
return self._name
| 2,648 | 22.236842 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/policies/stochastic_policy.py | """Base Stochastic Policy."""
import abc
import akro
import numpy as np
import torch
from garage.torch import global_device
from garage.torch.policies.policy import Policy
class StochasticPolicy(Policy, abc.ABC):
"""Abstract base class for torch stochastic policies."""
def get_action(self, observation):
r"""Get a single action given an observation.
Args:
observation (np.ndarray): Observation from the environment.
Shape is :math:`env_spec.observation_space`.
Returns:
tuple:
* np.ndarray: Predicted action. Shape is
:math:`env_spec.action_space`.
* dict:
* np.ndarray[float]: Mean of the distribution
* np.ndarray[float]: Standard deviation of logarithmic
values of the distribution.
"""
if not isinstance(observation, np.ndarray) and not isinstance(
observation, torch.Tensor):
observation = self._env_spec.observation_space.flatten(observation)
with torch.no_grad():
if not isinstance(observation, torch.Tensor):
observation = torch.as_tensor(observation).float().to(
global_device())
observation = observation.unsqueeze(0)
action, agent_infos = self.get_actions(observation)
return action[0], {k: v[0] for k, v in agent_infos.items()}
def get_actions(self, observations):
r"""Get actions given observations.
Args:
observations (np.ndarray): Observations from the environment.
Shape is :math:`batch_dim \bullet env_spec.observation_space`.
Returns:
tuple:
* np.ndarray: Predicted actions.
:math:`batch_dim \bullet env_spec.action_space`.
* dict:
* np.ndarray[float]: Mean of the distribution.
* np.ndarray[float]: Standard deviation of logarithmic
values of the distribution.
"""
if not isinstance(observations[0], np.ndarray) and not isinstance(
observations[0], torch.Tensor):
observations = self._env_spec.observation_space.flatten_n(
observations)
# frequently users like to pass lists of torch tensors or lists of
# numpy arrays. This handles those conversions.
if isinstance(observations, list):
if isinstance(observations[0], np.ndarray):
observations = np.stack(observations)
elif isinstance(observations[0], torch.Tensor):
observations = torch.stack(observations)
if isinstance(self._env_spec.observation_space, akro.Image) and \
len(observations.shape) < \
len(self._env_spec.observation_space.shape):
observations = self._env_spec.observation_space.unflatten_n(
observations)
with torch.no_grad():
if not isinstance(observations, torch.Tensor):
observations = torch.as_tensor(observations).float().to(
global_device())
dist, info = self.forward(observations)
return dist.sample().cpu().numpy(), {
k: v.detach().cpu().numpy()
for (k, v) in info.items()
}
# pylint: disable=arguments-differ
@abc.abstractmethod
def forward(self, observations):
"""Compute the action distributions from the observations.
Args:
observations (torch.Tensor): Batch of observations on default
torch device.
Returns:
torch.distributions.Distribution: Batch distribution of actions.
dict[str, torch.Tensor]: Additional agent_info, as torch Tensors.
Do not need to be detached, and can be on any device.
"""
| 3,967 | 37.524272 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/policies/tanh_gaussian_mlp_policy.py | """TanhGaussianMLPPolicy."""
import numpy as np
from torch import nn
from garage.torch.distributions import TanhNormal
from garage.torch.modules import GaussianMLPTwoHeadedModule
from garage.torch.policies.stochastic_policy import StochasticPolicy
class TanhGaussianMLPPolicy(StochasticPolicy):
"""Multiheaded MLP whose outputs are fed into a TanhNormal distribution.
A policy that contains a MLP to make prediction based on a gaussian
distribution with a tanh transformation.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=nn.ReLU,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
init_std=1.0,
min_std=np.exp(-20.),
max_std=np.exp(2.),
std_parameterization='exp',
layer_normalization=False,
*,
name=None,
module_cls=GaussianMLPTwoHeadedModule):
super().__init__(env_spec, name='TanhGaussianPolicy')
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._module = module_cls(
input_dim=self._obs_dim,
output_dim=self._action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
normal_distribution_cls=TanhNormal)
def forward(self, observations):
"""Compute the action distributions from the observations.
Args:
observations (torch.Tensor): Batch of observations on default
torch device.
Returns:
torch.distributions.Distribution: Batch distribution of actions.
dict[str, torch.Tensor]: Additional agent_info, as torch Tensors
"""
dist = self._module(observations)
ret_mean = dist.mean.cpu()
ret_log_std = (dist.variance.sqrt()).log().cpu()
return dist, dict(mean=ret_mean, log_std=ret_log_std)
| 4,776 | 42.825688 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/q_functions/__init__.py | """PyTorch Q-functions."""
from garage.torch.q_functions.continuous_mlp_q_function import (
ContinuousMLPQFunction)
__all__ = ['ContinuousMLPQFunction']
| 158 | 25.5 | 64 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/q_functions/continuous_mlp_q_function.py | """This modules creates a continuous Q-function network."""
import torch
from garage.torch.modules import MLPModule
class ContinuousMLPQFunction(MLPModule):
"""
Implements a continuous MLP Q-value network.
It predicts the Q-value for all actions based on the input state. It uses
a PyTorch neural network module to fit the function of Q(s, a).
"""
def __init__(self, env_spec, **kwargs):
"""
Initialize class with multiple attributes.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
nn_module (nn.Module): Neural network module in PyTorch.
"""
self._env_spec = env_spec
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
MLPModule.__init__(self,
input_dim=self._obs_dim + self._action_dim,
output_dim=1,
**kwargs)
def forward(self, observations, actions):
"""Return Q-value(s)."""
return super().forward(torch.cat([observations, actions], 1))
| 1,142 | 30.75 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/value_functions/__init__.py | """Value functions which use PyTorch."""
from garage.torch.value_functions.value_function import ValueFunction
from garage.torch.value_functions.gaussian_mlp_value_function import ( # noqa: I100,E501
GaussianMLPValueFunction)
__all__ = ['ValueFunction', 'GaussianMLPValueFunction']
| 288 | 40.285714 | 89 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/value_functions/gaussian_mlp_value_function.py | """A value function based on a GaussianMLP model."""
import torch
from torch import nn
from garage.torch.modules import GaussianMLPModule
from garage.torch.value_functions.value_function import ValueFunction
class GaussianMLPValueFunction(ValueFunction):
"""Gaussian MLP Value Function with Model.
It fits the input data to a gaussian distribution estimated by
a MLP.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
layer_normalization (bool): Bool for using layer normalization or not.
name (str): The name of the value function.
"""
def __init__(self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
layer_normalization=False,
name='GaussianMLPValueFunction'):
super(GaussianMLPValueFunction, self).__init__(env_spec, name)
input_dim = env_spec.observation_space.flat_dim
output_dim = 1
self.module = GaussianMLPModule(
input_dim=input_dim,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=None,
max_std=None,
std_parameterization='exp',
layer_normalization=layer_normalization)
def compute_loss(self, obs, returns):
r"""Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
"""
dist = self.module(obs)
ll = dist.log_prob(returns.reshape(-1, 1))
loss = -ll.mean()
return loss
# pylint: disable=arguments-differ
def forward(self, obs):
r"""Predict value based on paths.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(P, O*)`.
Returns:
torch.Tensor: Calculated baselines given observations with
shape :math:`(P, O*)`.
"""
return self.module(obs).mean.flatten(-2)
| 4,322 | 37.256637 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/torch/value_functions/value_function.py | """Base class for all baselines."""
import abc
import torch.nn as nn
class ValueFunction(abc.ABC, nn.Module):
"""Base class for all baselines.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Value function name, also the variable scope.
"""
def __init__(self, env_spec, name):
super(ValueFunction, self).__init__()
self._mdp_spec = env_spec
self.name = name
@abc.abstractmethod
def compute_loss(self, obs, returns):
r"""Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
"""
| 911 | 24.333333 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/helpers.py | """helper functions for tests and benchmarks."""
import bisect
import itertools
import pickle
import random
import numpy as np
import pytest
from tests.quirks import KNOWN_GYM_RENDER_NOT_IMPLEMENTED
def step_env(env, n=10, render=True):
"""Step env helper.
Args:
env (GarageEnv): Input environment.
n (int): Steps.
render (bool): Whether render the environment.
"""
env.reset()
for _ in range(n):
_, _, done, _ = env.step(env.action_space.sample())
if render:
env.render()
if done:
break
def step_env_with_gym_quirks(env, spec, n=10, render=True,
serialize_env=False):
"""Step env gym helper.
Args:
env (GarageEnv): Input environment.
spec (EnvSpec): The environment specification.
n (int): Steps.
render (bool): Whether to render the environment.
serialize_env (bool): Whether to serialize the environment.
"""
if serialize_env:
# Roundtrip serialization
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip.spec == env.spec
assert round_trip.env.spec.id == env.env.spec.id
assert (round_trip.env.spec.max_episode_steps ==
env.env.spec.max_episode_steps)
env = round_trip
env.reset()
for _ in range(n):
_, _, done, _ = env.step(env.action_space.sample())
if render:
if spec.id not in KNOWN_GYM_RENDER_NOT_IMPLEMENTED:
env.render()
else:
with pytest.raises(NotImplementedError):
env.render()
if done:
break
env.close()
if serialize_env:
# Roundtrip serialization
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip.spec == env.spec
assert round_trip.env.spec.id == env.env.spec.id
assert (round_trip.env.spec.max_episode_steps ==
env.env.spec.max_episode_steps)
def convolve(_input, filter_weights, filter_bias, strides, filters,
in_channels, hidden_nonlinearity):
"""Helper function for performing convolution.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
filter_weights (tuple(tf.Tensor)): The weights of the filters.
filter_bias (tuple(tf.Tensor)): The bias of the filters.
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
in_channels (tuple[int]): The number of input channels.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
Return:
tf.Tensor: The output of the convolution.
"""
batch_size = _input.shape[0]
in_width = _input.shape[1]
in_height = _input.shape[2]
for filter_iter, in_shape, filter_weight, _filter_bias, stride in zip(
filters, in_channels, filter_weights, filter_bias, strides):
filter_width = filter_iter[1][1]
filter_height = filter_iter[1][0]
out_width = int((in_width - filter_width) / stride) + 1
out_height = int((in_height - filter_height) / stride) + 1
flatten_filter_size = filter_width * filter_height * in_shape
reshape_filter = filter_weight.reshape(flatten_filter_size, -1)
image_vector = np.empty(
(batch_size, out_width, out_height, flatten_filter_size))
for batch in range(batch_size):
for w in range(out_width):
for h in range(out_height):
image_vector[batch][w][h] = _construct_image_vector(
_input, batch, w, h, filter_width, filter_height,
in_shape)
_input = np.dot(image_vector, reshape_filter) + _filter_bias
_input = hidden_nonlinearity(_input).eval()
in_width = out_width
in_height = out_height
return _input
def recurrent_step_lstm(input_val,
num_units,
step_hidden,
step_cell,
w_x_init,
w_h_init,
b_init,
nonlinearity,
gate_nonlinearity,
forget_bias=1.0):
"""Helper function for performing feedforward of a lstm cell.
Args:
input_val (tf.Tensor): Input placeholder.
num_units (int): Hidden dimension for LSTM cell.
step_hidden (tf.Tensor): Place holder for step hidden state.
step_cell (tf.Tensor): Place holder for step cell state.
nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
w_x_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
gate_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
w_h_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
forget_bias (float): Bias to be added to the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
Returns:
tf.Tensor: Final hidden state after feedforward.
tf.Tensor: Final cell state after feedforward.
Note:
Incoming gate: i(t) = f_i(x(t) @ W_xi + h(t-1) @ W_hi +
w_ci * c(t-1) + b_i)
Forget gate: f(t) = f_f(x(t) @ W_xf + h(t-1) @ W_hf +
w_cf * c(t-1) + b_f)
Cell gate: c(t) = f(t) * c(t - 1) + i(t) * f_c(x(t) @ W_xc +
h(t-1) @ W_hc + b_c)
Out gate: o(t) = f_o(x(t) @ W_xo + h(t-1) W_ho +
w_co * c(t) + b_o)
New hidden state: h(t) = o(t) * f_h(c(t))
Incoming, forget, cell, and out vectors must have the same
dimension as the hidden state.
"""
def f(x):
"""Linear function.
Args:
x (float): Input variable.
Returns:
float: Ouput variable.
"""
return x
if nonlinearity is None:
nonlinearity = f
if gate_nonlinearity is None:
gate_nonlinearity = f
input_dim = np.prod(input_val.shape[1:])
# Weights for the input gate
w_xi = np.full((input_dim, num_units), w_x_init)
w_hi = np.full((num_units, num_units), w_h_init)
b_i = np.full((num_units, ), b_init)
# Weights for the forget gate
w_xf = np.full((input_dim, num_units), w_x_init)
w_hf = np.full((num_units, num_units), w_h_init)
b_f = np.full((num_units, ), b_init)
# Weights for the cell gate
w_xc = np.full((input_dim, num_units), w_x_init)
w_hc = np.full((num_units, num_units), w_h_init)
b_c = np.full((num_units, ), b_init)
# Weights for the out gate
w_xo = np.full((input_dim, num_units), w_x_init)
w_ho = np.full((num_units, num_units), w_h_init)
b_o = np.full((num_units, ), b_init)
w_x_ifco = np.concatenate([w_xi, w_xf, w_xc, w_xo], axis=1)
w_h_ifco = np.concatenate([w_hi, w_hf, w_hc, w_ho], axis=1)
x_ifco = np.matmul(input_val, w_x_ifco)
h_ifco = np.matmul(step_hidden, w_h_ifco)
x_i = x_ifco[:, :num_units]
x_f = x_ifco[:, num_units:num_units * 2]
x_c = x_ifco[:, num_units * 2:num_units * 3]
x_o = x_ifco[:, num_units * 3:num_units * 4]
h_i = h_ifco[:, :num_units]
h_f = h_ifco[:, num_units:num_units * 2]
h_c = h_ifco[:, num_units * 2:num_units * 3]
h_o = h_ifco[:, num_units * 3:num_units * 4]
i = gate_nonlinearity(x_i + h_i + b_i)
f = gate_nonlinearity(x_f + h_f + b_f + forget_bias)
o = gate_nonlinearity(x_o + h_o + b_o)
c = f * step_cell + i * nonlinearity(x_c + h_c + b_c)
h = o * nonlinearity(c)
return h, c
def recurrent_step_gru(input_val,
num_units,
step_hidden,
w_x_init,
w_h_init,
b_init,
nonlinearity,
gate_nonlinearity,
forget_bias=1.0):
"""Helper function for performing feedforward of a GRU cell.
Args:
input_val (tf.Tensor): Input placeholder.
num_units (int): Hidden dimension for GRU cell.
step_hidden (tf.Tensor): Place holder for step hidden state.
nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
w_x_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
gate_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
w_h_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
forget_bias (float): Bias to be added to the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
Returns:
tf.Tensor: Final hidden state after feedforward.
Note:
Reset gate: r(t) = f_r(x(t) @ W_xr + h(t-1) @ W_hr + b_r)
Update gate: u(t) = f_u(x(t) @ W_xu + h(t-1) @ W_hu + b_u)
Cell gate: c(t) = f_c(x(t) @ W_xc + r(t) *
(h(t-1) @ W_hc) + b_c)
New hidden state: h(t) = u_t * h(t-1) + (1 - u(t)) * c(t)
The reset, update, and cell vectors must have the same dimension
as the hidden state.
"""
def f(x):
"""Linear function.
Args:
x (float): Input variable.
Returns:
float: Ouput variable.
"""
return x
del forget_bias
if nonlinearity is None:
nonlinearity = f
if gate_nonlinearity is None:
gate_nonlinearity = f
input_dim = np.prod(input_val.shape[1:])
# Weights for the update gate
w_xz = np.full((input_dim, num_units), w_x_init)
w_hz = np.full((num_units, num_units), w_h_init)
b_z = np.full((num_units, ), b_init)
# Weights for the reset gate
w_xr = np.full((input_dim, num_units), w_x_init)
w_hr = np.full((num_units, num_units), w_h_init)
b_r = np.full((num_units, ), b_init)
# Weights for the hidden gate
w_xh = np.full((input_dim, num_units), w_x_init)
w_hh = np.full((num_units, num_units), w_h_init)
b_h = np.full((num_units, ), b_init)
w_x_zrh = np.concatenate([w_xz, w_xr, w_xh], axis=1)
w_h_zrh = np.concatenate([w_hz, w_hr, w_hh], axis=1)
x_zrh = np.matmul(input_val, w_x_zrh)
h_zrh = np.matmul(step_hidden, w_h_zrh)
x_z = x_zrh[:, :num_units]
x_r = x_zrh[:, num_units:num_units * 2]
x_h = x_zrh[:, num_units * 2:num_units * 3]
h_z = h_zrh[:, :num_units]
h_r = h_zrh[:, num_units:num_units * 2]
h_h = h_zrh[:, num_units * 2:num_units * 3]
z = gate_nonlinearity(x_z + h_z + b_z)
r = gate_nonlinearity(x_r + h_r + b_r)
hh = nonlinearity(x_h + r * h_h + b_h)
h = z * step_hidden + (1 - z) * hh
return h
def _construct_image_vector(_input, batch, w, h, filter_width, filter_height,
in_shape):
"""Get sliding window of input image.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
batch (int): Batch index.
w (int): Width index.
h (int): Height index.
filter_width (int): Width of the filter.
filter_height (int): Height of the filter.
in_shape (int): The number of input channels.
Return:
np.array: The output of the sliding window.
"""
sw = np.empty((filter_width, filter_height, in_shape))
for dw in range(filter_width):
for dh in range(filter_height):
for in_c in range(in_shape):
sw[dw][dh][in_c] = _input[batch][w + dw][h + dh][in_c]
return sw.flatten()
def max_pooling(_input, pool_shape, pool_stride, padding='VALID'):
"""Helper function for performing max pooling.
Args:
_input (tf.Tensor): Input tf.Tensor to the CNN.
pool_shape (int): Dimension of the pooling layer.
pool_stride (int): The stride of the pooling layer.
padding (str): The type of padding algorithm to use, either 'SAME'
or 'VALID'.
Return:
tf.Tensor: The output tf.Tensor after max pooling.
"""
batch_size = _input.shape[0]
if padding == 'VALID':
height_size = int((_input.shape[1] - pool_shape) / pool_stride) + 1
width_size = int((_input.shape[2] - pool_shape) / pool_stride) + 1
else:
height_size = int((_input.shape[1] + pool_stride - 1) / pool_stride)
width_size = int((_input.shape[2] + pool_stride - 1) / pool_stride)
# max pooling
results = np.empty((batch_size, height_size, width_size, _input.shape[3]))
for b in range(batch_size):
for i in range(0, results.shape[1]):
for j in range(0, results.shape[2]):
for k in range(_input.shape[3]):
row = i * pool_shape
col = j * pool_shape
results[b][i][j][k] = np.max(
_input[b, row:row + pool_shape, col:col + # noqa: W504
pool_shape, k])
return results
# Taken from random.choices in Python 3.6 source since it's not available in
# python 3.5
# https://github.com/python/cpython/blob/3.6/Lib/random.py
# https://docs.python.org/3/library/random.html#random.choices
def choices(population, weights=None, *, cum_weights=None, k=1):
"""Return a k sized list of population elements chosen with replacement.
Args:
population (list[object]): List of object to be chosen from.
weights (list[float]): List of weight for the elements.
cum_weights (list[float]): List of cumulative weight for the elements.
k (int): Number of element to be returned.
Returns:
list[object]: List of objects chosen from the given population.
Raises:
TypeError: Cannot specify both weights and cumulative weights.
ValueError: The number of weights does not match the population
Note:
If the relative weights or cumulative weights are not specified,
the selections are made with equal probability.
"""
if cum_weights is None:
if weights is None:
_int = int
total = len(population)
return [
population[_int(random.random() * total)] for i in range(k)
]
cum_weights = list(itertools.accumulate(weights))
elif weights is not None:
raise TypeError('Cannot specify both weights and cumulative weights')
if len(cum_weights) != len(population):
raise ValueError('The number of weights does not match the population')
total = cum_weights[-1]
hi = len(cum_weights) - 1
return [
population[bisect.bisect(cum_weights,
random.random() * total, 0, hi)]
for i in range(k)
]
| 16,572 | 35.584989 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/mock.py | from unittest import mock
class PickleableMagicMock(mock.MagicMock):
def __reduce__(self):
return (mock.MagicMock, ())
| 133 | 18.142857 | 42 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/quirks.py | """Documented breakages and quirks caused by dependencies."""
# openai/gym environments known to not implement render()
#
# e.g.
# > gym/core.py", line 111, in render
# > raise NotImplementedError
# > NotImplementedError
#
# Tests calling render() on these should verify they raise NotImplementedError
# ```
# with pytest.raises(NotImplementedError):
# env.render()
# ```
KNOWN_GYM_RENDER_NOT_IMPLEMENTED = [
# Please keep alphabetized
'Blackjack-v0',
'GuessingGame-v0',
'HotterColder-v0',
'NChain-v0',
'Roulette-v0',
]
| 553 | 23.086957 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/wrappers.py | import gym
class AutoStopEnv(gym.Wrapper):
"""A env wrapper that stops rollout at step max_path_length."""
def __init__(self, env=None, env_name="", max_path_length=100):
if env_name:
super().__init__(gym.make(env_name))
else:
super().__init__(env)
self._rollout_step = 0
self._max_path_length = max_path_length
def step(self, actions):
self._rollout_step += 1
next_obs, reward, done, info = self.env.step(actions)
if self._rollout_step == self._max_path_length:
done = True
self._rollout_step = 0
return next_obs, reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
| 734 | 28.4 | 67 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/__init__.py | from tests.fixtures.fixtures import snapshot_config
from tests.fixtures.fixtures import TfGraphTestCase
from tests.fixtures.fixtures import TfTestCase
__all__ = ['snapshot_config', 'TfGraphTestCase', 'TfTestCase']
| 215 | 35 | 62 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/fixtures.py | import gc
import os
from dowel import logger
import tensorflow as tf
from garage.experiment import deterministic
from garage.experiment.snapshotter import SnapshotConfig
from tests.fixtures.logger import NullOutput
path = os.path.join(os.getcwd(), 'data/local/experiment')
snapshot_config = SnapshotConfig(snapshot_dir=path,
snapshot_mode='last',
snapshot_gap=1)
class TfTestCase:
def setup_method(self):
self.sess = tf.compat.v1.Session()
self.sess.__enter__()
def teardown_method(self):
self.sess.__exit__(None, None, None)
self.sess.close()
del self.sess
gc.collect()
class TfGraphTestCase:
def setup_method(self):
self.graph = tf.Graph()
for c in self.graph.collections:
self.graph.clear_collection(c)
self.graph_manager = self.graph.as_default()
self.graph_manager.__enter__()
self.sess = tf.compat.v1.Session(graph=self.graph)
self.sess_manager = self.sess.as_default()
self.sess_manager.__enter__()
self.sess.__enter__()
logger.add_output(NullOutput())
deterministic.set_seed(1)
# initialize global singleton_pool for each test case
from garage.sampler import singleton_pool
singleton_pool.initialize(1)
def teardown_method(self):
logger.remove_all()
self.sess.__exit__(None, None, None)
self.sess_manager.__exit__(None, None, None)
self.graph_manager.__exit__(None, None, None)
self.sess.close()
# These del are crucial to prevent ENOMEM in the CI
# b/c TensorFlow does not release memory explicitly
del self.graph
del self.sess
gc.collect()
| 1,789 | 28.344262 | 61 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/logger.py | from dowel import LogOutput, TabularInput
class NullOutput(LogOutput):
"""Dummy output to disable 'no logger output' warnings."""
@property
def types_accepted(self):
"""Accept all output types."""
return (object, )
def record(self, data, prefix=''):
"""Don't do anything."""
if isinstance(data, TabularInput):
data.mark_all()
| 390 | 23.4375 | 62 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/algos/__init__.py | from tests.fixtures.algos.dummy_algo import DummyAlgo
from tests.fixtures.algos.dummy_tf_algo import DummyTFAlgo
__all__ = ['DummyAlgo', 'DummyTFAlgo']
| 153 | 29.8 | 58 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/algos/dummy_algo.py | """A dummy algorithm fixture."""
from garage.np.algos import RLAlgorithm
class DummyAlgo(RLAlgorithm): # pylint: disable=too-few-public-methods
"""Dummy algo for test.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.np.policies.Policy): Policy.
baseline (garage.np.baselines.Baseline): The baseline.
"""
def __init__(self, env_spec, policy, baseline):
self.env_spec = env_spec
self.policy = policy
self.baseline = baseline
self.discount = 0.9
self.max_path_length = 1
self.n_samples = 10
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
See garage.np.algos.RLAlgorithm train().
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
"""
| 999 | 28.411765 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/algos/dummy_tf_algo.py | """Dummy algorithm."""
from garage.np.algos import RLAlgorithm
class DummyTFAlgo(RLAlgorithm):
"""Dummy algorithm."""
def init_opt(self):
"""Initialize the optimization procedure.
If using tensorflow, this may include declaring all the variables and
compiling functions.
"""
def optimize_policy(self, samples_data):
"""Optimize the policy using the samples.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
"""
| 562 | 22.458333 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/distributions/__init__.py | from tests.fixtures.distributions.dummy_distribution import DummyDistribution
__all__ = [
"DummyDistribution",
]
| 118 | 18.833333 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/distributions/dummy_distribution.py | """Dummy distribution for testing purpose."""
from garage.tf.distributions import Distribution
class DummyDistribution(Distribution):
"""Dummy distribution for testing purpose."""
@property
def dim(self):
"""Distribution dimension.
Returns:
int: Distribution dimenison.
"""
return 1
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""Compute the symbolic KL divergence of two distributions.
Args:
old_dist_info_vars (dict): Old distribution tensors.
new_dist_info_vars (dict): New distribution tensors.
"""
def kl(self, old_dist_info, new_dist_info):
"""Compute the KL divergence of two distributions.
Args:
old_dist_info (dict): Old distribution parameters.
new_dist_info (dict): New distribution parameters.
"""
def likelihood_ratio_sym(self, x_var, old_dist_info_vars,
new_dist_info_vars):
"""Likelihood ratio sym.
Args:
x_var (tf.Tensor): Input placeholder.
old_dist_info_vars (dict): Old distribution tensors.
new_dist_info_vars (dict): New distribution tensors.
"""
def entropy(self, dist_info):
"""Entropy.
Args:
dist_info (dict): Distribution parameters.
"""
def log_likelihood_sym(self, x_var, dist_info_vars):
"""Log Likelihood sym.
Args:
x_var (tf.Tensor): Input placeholder.
dist_info_vars (dict): Distribution tensors.
"""
def log_likelihood(self, xs, dist_info):
"""Log Likelihood.
Args:
xs (np.ndarray): Inputs.
dist_info (dict): Distribution parameters.
"""
@property
def dist_info_specs(self):
"""Distribution information specification.
Returns:
list[tuple]: Key for distribution information and shape.
"""
return [('dummy', (1, ))]
| 2,035 | 23.829268 | 68 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.