Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/baselines/baseline.py | """Base class for all baselines."""
import abc
class Baseline(abc.ABC):
"""Base class for all baselines.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
"""
def __init__(self, env_spec):
self._mdp_spec = env_spec
@abc.abstractmethod
def get_param_values(self):
"""Get parameter values.
Returns:
List[np.ndarray]: A list of values of each parameter.
"""
@abc.abstractmethod
def set_param_values(self, flattened_params):
"""Set param values.
Args:
flattened_params (np.ndarray): A numpy array of parameter values.
"""
@abc.abstractmethod
def fit(self, paths):
"""Fit regressor based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
"""
@abc.abstractmethod
def predict(self, path):
"""Predict value based on paths.
Args:
path (dict[numpy.ndarray]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
"""
def log_diagnostics(self, paths):
"""Log diagnostic information.
Args:
paths (list[dict]): A list of collected paths.
"""
| 1,252 | 19.209677 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/baselines/linear_feature_baseline.py | """A linear value function (baseline) based on features."""
import numpy as np
from garage.np.baselines.baseline import Baseline
class LinearFeatureBaseline(Baseline):
"""A linear value function (baseline) based on features.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
reg_coeff (float): Regularization coefficient.
name (str): Name of baseline.
"""
def __init__(self, env_spec, reg_coeff=1e-5, name='LinearFeatureBaseline'):
super().__init__(env_spec)
self._coeffs = None
self._reg_coeff = reg_coeff
self.name = name
self.lower_bound = -10
self.upper_bound = 10
def get_param_values(self):
"""Get parameter values.
Returns:
List[np.ndarray]: A list of values of each parameter.
"""
return self._coeffs
def set_param_values(self, flattened_params):
"""Set param values.
Args:
flattened_params (np.ndarray): A numpy array of parameter values.
"""
self._coeffs = flattened_params
def _features(self, path):
"""Extract features from path.
Args:
path (list[dict]): Sample paths.
Returns:
numpy.ndarray: Extracted features.
"""
obs = np.clip(path['observations'], self.lower_bound, self.upper_bound)
length = len(path['rewards'])
al = np.arange(length).reshape(-1, 1) / 100.0
return np.concatenate(
[obs, obs**2, al, al**2, al**3,
np.ones((length, 1))], axis=1)
# pylint: disable=unsubscriptable-object
def fit(self, paths):
"""Fit regressor based on paths.
Args:
paths (list[dict]): Sample paths.
"""
featmat = np.concatenate([self._features(path) for path in paths])
returns = np.concatenate([path['returns'] for path in paths])
reg_coeff = self._reg_coeff
for _ in range(5):
self._coeffs = np.linalg.lstsq(
featmat.T.dot(featmat) +
reg_coeff * np.identity(featmat.shape[1]),
featmat.T.dot(returns),
rcond=-1)[0]
if not np.any(np.isnan(self._coeffs)):
break
reg_coeff *= 10
def predict(self, path):
"""Predict value based on paths.
Args:
path (list[dict]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
"""
if self._coeffs is None:
return np.zeros(len(path['rewards']))
return self._features(path).dot(self._coeffs)
| 2,656 | 27.265957 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/baselines/linear_multi_feature_baseline.py | """Linear Multi-Feature Baseline."""
import numpy as np
from garage.np.baselines import LinearFeatureBaseline
class LinearMultiFeatureBaseline(LinearFeatureBaseline):
"""A linear value function (baseline) based on features.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
reg_coeff (float): Regularization coefficient.
features (list[str]): Name of features.
name (str): Name of baseline.
"""
def __init__(self,
env_spec,
features=None,
reg_coeff=1e-5,
name='LinearMultiFeatureBaseline'):
super().__init__(env_spec, reg_coeff, name)
features = features or ['observation']
self._feature_names = features
def _features(self, path):
"""Extract features from path.
Args:
path (list[dict]): Sample paths.
Returns:
numpy.ndarray: Extracted features.
"""
features = [
np.clip(path[feature_name], -10, 10)
for feature_name in self._feature_names
]
n = len(path['rewards'])
return np.concatenate(sum([[f, f**2]
for f in features], []) + [np.ones((n, 1))],
axis=1)
| 1,312 | 28.177778 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/baselines/zero_baseline.py | import numpy as np
from garage.np.baselines.baseline import Baseline
class ZeroBaseline(Baseline):
def __init__(self, env_spec):
pass
def get_param_values(self, **kwargs):
return None
def set_param_values(self, val, **kwargs):
pass
def fit(self, paths):
pass
def predict(self, path):
return np.zeros_like(path['rewards'])
def predict_n(self, paths):
return [np.zeros_like(path['rewards']) for path in paths]
| 489 | 18.6 | 65 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/embeddings/__init__.py | """Embedding encoders and decoders which use NumPy as a numerical backend."""
from garage.np.embeddings.encoder import Encoder, StochasticEncoder
__all__ = ['Encoder', 'StochasticEncoder']
| 190 | 37.2 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/embeddings/encoder.py | """Base class for context encoder."""
import abc
class Encoder(abc.ABC):
"""Base class of context encoders for training meta-RL algorithms."""
@property
@abc.abstractmethod
def spec(self):
"""garage.InOutSpec: Input and output space."""
@property
@abc.abstractmethod
def input_dim(self):
"""int: Dimension of the encoder input."""
@property
@abc.abstractmethod
def output_dim(self):
"""int: Dimension of the encoder output (embedding)."""
def reset(self, do_resets=None):
"""Reset the encoder.
This is effective only to recurrent encoder. do_resets is effective
only to vectoried encoder.
For a vectorized encoder, do_resets is an array of boolean indicating
which internal states to be reset. The length of do_resets should be
equal to the length of inputs.
Args:
do_resets (numpy.ndarray): Bool array indicating which states
to be reset.
"""
class StochasticEncoder(Encoder):
"""An stochastic context encoders.
An stochastic encoder maps an input to a distribution, but not a
deterministic vector.
"""
@property
@abc.abstractmethod
def distribution(self):
"""object: Embedding distribution."""
| 1,307 | 24.153846 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/exploration_policies/__init__.py | """Exploration strategies which use NumPy as a numerical backend."""
from garage.np.exploration_policies.add_gaussian_noise import AddGaussianNoise
from garage.np.exploration_policies.add_ornstein_uhlenbeck_noise import (
AddOrnsteinUhlenbeckNoise)
from garage.np.exploration_policies.epsilon_greedy_policy import (
EpsilonGreedyPolicy)
from garage.np.exploration_policies.exploration_policy import ExplorationPolicy
__all__ = [
'EpsilonGreedyPolicy', 'ExplorationPolicy', 'AddGaussianNoise',
'AddOrnsteinUhlenbeckNoise'
]
| 540 | 40.615385 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/exploration_policies/add_gaussian_noise.py | """Gaussian exploration strategy."""
import gym
import numpy as np
from garage.np.exploration_policies.exploration_policy import ExplorationPolicy
class AddGaussianNoise(ExplorationPolicy):
"""Add Gaussian noise to the action taken by the deterministic policy.
Args:
env_spec (EnvSpec): Environment spec to explore.
policy (garage.Policy): Policy to wrap.
max_sigma (float): Action noise standard deviation at the start of
exploration.
min_sigma (float): Action noise standard deviation at the end of the
decay period.
decay_period (int): Number of paths over which to linearly decay sigma
from max_sigma to min_sigma.
"""
def __init__(self,
env_spec,
policy,
max_sigma=1.0,
min_sigma=0.1,
decay_period=1000000):
assert isinstance(env_spec.action_space, gym.spaces.Box)
assert len(env_spec.action_space.shape) == 1
super().__init__(policy)
self._max_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self._action_space = env_spec.action_space
self._iteration = 0
def reset(self, dones=None):
"""Reset the state of the exploration.
Args:
dones (List[bool] or numpy.ndarray or None): Which vectorization
states to reset.
"""
self._iteration += 1
super().reset(dones)
def get_action(self, observation):
"""Get action from this policy for the input observation.
Args:
observation(numpy.ndarray): Observation from the environment.
Returns:
np.ndarray: Actions with noise.
List[dict]: Arbitrary policy state information (agent_info).
"""
action, agent_info = self.policy.get_action(observation)
sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(
1.0, self._iteration * 1.0 / self._decay_period)
return np.clip(action + np.random.normal(size=len(action)) * sigma,
self._action_space.low,
self._action_space.high), agent_info
def get_actions(self, observations):
"""Get actions from this policy for the input observation.
Args:
observations(list): Observations from the environment.
Returns:
np.ndarray: Actions with noise.
List[dict]: Arbitrary policy state information (agent_info).
"""
actions, agent_infos = self.policy.get_actions(observations)
sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(
1.0, self._iteration * 1.0 / self._decay_period)
return np.clip(actions + np.random.normal(size=len(actions)) * sigma,
self._action_space.low,
self._action_space.high), agent_infos
| 2,979 | 34.47619 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/exploration_policies/add_ornstein_uhlenbeck_noise.py | """Ornstein-Uhlenbeck exploration strategy.
Ornstein-Uhlenbeck exploration strategy comes from the Ornstein-Uhlenbeck
process. It is often used in DDPG algorithm because in continuous control task
it is better to have temporally correlated exploration to get smoother
transitions. And OU process is relatively smooth in time.
"""
import numpy as np
from garage.np.exploration_policies.exploration_policy import ExplorationPolicy
class AddOrnsteinUhlenbeckNoise(ExplorationPolicy):
r"""An exploration strategy based on the Ornstein-Uhlenbeck process.
The process is governed by the following stochastic differential equation.
.. math::
dx_t = -\theta(\mu - x_t)dt + \sigma \sqrt{dt} \mathcal{N}(\mathbb{0}, \mathbb{1}) # noqa: E501
Args:
env_spec (EnvSpec): Environment to explore.
policy (garage.Policy): Policy to wrap.
mu (float): :math:`\mu` parameter of this OU process. This is the drift
component.
sigma (float): :math:`\sigma > 0` parameter of this OU process. This is
the coefficient for the Wiener process component. Must be greater
than zero.
theta (float): :math:`\theta > 0` parameter of this OU process. Must be
greater than zero.
dt (float): Time-step quantum :math:`dt > 0` of this OU process. Must
be greater than zero.
x0 (float): Initial state :math:`x_0` of this OU process.
"""
def __init__(self,
env_spec,
policy,
*,
mu=0,
sigma=0.3,
theta=0.15,
dt=1e-2,
x0=None):
super().__init__(policy)
self._env_spec = env_spec
self._action_space = env_spec.action_space
self._action_dim = self._action_space.flat_dim
self._mu = mu
self._sigma = sigma
self._theta = theta
self._dt = dt
self._x0 = x0 if x0 is not None else self._mu * np.zeros(
self._action_dim)
self._state = self._x0
def _simulate(self):
"""Advance the OU process.
Returns:
np.ndarray: Updated OU process state.
"""
x = self._state
dx = self._theta * (self._mu - x) * self._dt + self._sigma * np.sqrt(
self._dt) * np.random.normal(size=len(x))
self._state = x + dx
return self._state
def reset(self, dones=None):
"""Reset the state of the exploration.
Args:
dones (List[bool] or numpy.ndarray or None): Which vectorization
states to reset.
"""
self._state = self._x0
super().reset(dones)
def get_action(self, observation):
"""Return an action with noise.
Args:
observation (np.ndarray): Observation from the environment.
Returns:
np.ndarray: An action with noise.
dict: Arbitrary policy state information (agent_info).
"""
action, agent_infos = self.policy.get_action(observation)
ou_state = self._simulate()
return np.clip(action + ou_state, self._action_space.low,
self._action_space.high), agent_infos
def get_actions(self, observations):
"""Return actions with noise.
Args:
observations (np.ndarray): Observation from the environment.
Returns:
np.ndarray: Actions with noise.
List[dict]: Arbitrary policy state information (agent_info).
"""
actions, agent_infos = self.policy.get_actions(observations)
ou_state = self._simulate()
return np.clip(actions + ou_state, self._action_space.low,
self._action_space.high), agent_infos
| 3,801 | 32.646018 | 103 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/exploration_policies/epsilon_greedy_policy.py | """ϵ-greedy exploration strategy.
Random exploration according to the value of epsilon.
"""
import numpy as np
from garage.np.exploration_policies.exploration_policy import ExplorationPolicy
class EpsilonGreedyPolicy(ExplorationPolicy):
"""ϵ-greedy exploration strategy.
Select action based on the value of ϵ. ϵ will decrease from
max_epsilon to min_epsilon within decay_ratio * total_timesteps.
At state s, with probability
1 − ϵ: select action = argmax Q(s, a)
ϵ : select a random action from an uniform distribution.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
policy (garage.Policy): Policy to wrap.
total_timesteps (int): Total steps in the training, equivalent to
max_path_length * n_epochs.
max_epsilon (float): The maximum(starting) value of epsilon.
min_epsilon (float): The minimum(terminal) value of epsilon.
decay_ratio (float): Fraction of total steps for epsilon decay.
"""
def __init__(self,
env_spec,
policy,
*,
total_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1):
super().__init__(policy)
self._env_spec = env_spec
self._max_epsilon = max_epsilon
self._min_epsilon = min_epsilon
self._decay_period = int(total_timesteps * decay_ratio)
self._action_space = env_spec.action_space
self._epsilon = self._max_epsilon
self._decrement = (self._max_epsilon -
self._min_epsilon) / self._decay_period
def get_action(self, observation):
"""Get action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from the environment.
Returns:
np.ndarray: An action with noise.
dict: Arbitrary policy state information (agent_info).
"""
opt_action, _ = self.policy.get_action(observation)
self._decay()
if np.random.random() < self._epsilon:
opt_action = self._action_space.sample()
return opt_action, dict()
def get_actions(self, observations):
"""Get actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observation from the environment.
Returns:
np.ndarray: Actions with noise.
List[dict]: Arbitrary policy state information (agent_info).
"""
opt_actions, _ = self.policy.get_actions(observations)
for itr, _ in enumerate(opt_actions):
self._decay()
if np.random.random() < self._epsilon:
opt_actions[itr] = self._action_space.sample()
return opt_actions, dict()
def _decay(self):
if self._epsilon > self._min_epsilon:
self._epsilon -= self._decrement
| 2,978 | 32.47191 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/exploration_policies/exploration_policy.py | """Exploration Policy API used by off-policy algorithms."""
import abc
# This should be an ABC inheritting from garage.Policy, but that doesn't exist
# yet.
class ExplorationPolicy(abc.ABC):
"""Policy that wraps another policy to add action noise.
Args:
policy (garage.Policy): Policy to wrap.
"""
def __init__(self, policy):
self.policy = policy
@abc.abstractmethod
def get_action(self, observation):
"""Return an action with noise.
Args:
observation (np.ndarray): Observation from the environment.
Returns:
np.ndarray: An action with noise.
dict: Arbitrary policy state information (agent_info).
"""
@abc.abstractmethod
def get_actions(self, observations):
"""Return actions with noise.
Args:
observations (np.ndarray): Observation from the environment.
Returns:
np.ndarray: Actions with noise.
List[dict]: Arbitrary policy state information (agent_info).
"""
def reset(self, dones=None):
"""Reset the state of the exploration.
Args:
dones (List[bool] or numpy.ndarray or None): Which vectorization
states to reset.
"""
self.policy.reset(dones)
def get_param_values(self):
"""Get parameter values.
Returns:
list or dict: Values of each parameter.
"""
return self.policy.get_param_values()
def set_param_values(self, params):
"""Set param values.
Args:
params (np.ndarray): A numpy array of parameter values.
"""
self.policy.set_param_values(params)
| 1,717 | 23.197183 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/optimizers/__init__.py | """Optimizers which use NumPy as a numerical backend."""
from garage.np.optimizers.minibatch_dataset import BatchDataset
__all__ = ['BatchDataset']
| 149 | 29 | 63 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/optimizers/minibatch_dataset.py | import numpy as np
class BatchDataset:
def __init__(self, inputs, batch_size, extra_inputs=None):
self._inputs = [i for i in inputs]
if extra_inputs is None:
extra_inputs = []
self._extra_inputs = extra_inputs
self._batch_size = batch_size
if batch_size is not None:
self._ids = np.arange(self._inputs[0].shape[0])
self.update()
@property
def number_batches(self):
if self._batch_size is None:
return 1
return int(np.ceil(self._inputs[0].shape[0] * 1.0 / self._batch_size))
def iterate(self, update=True):
if self._batch_size is None:
yield list(self._inputs) + list(self._extra_inputs)
else:
for itr in range(self.number_batches):
batch_start = itr * self._batch_size
batch_end = (itr + 1) * self._batch_size
batch_ids = self._ids[batch_start:batch_end]
batch = [d[batch_ids] for d in self._inputs]
yield list(batch) + list(self._extra_inputs)
if update:
self.update()
def update(self):
np.random.shuffle(self._ids)
| 1,202 | 32.416667 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/policies/__init__.py | """Policies which use NumPy as a numerical backend."""
from garage.np.policies.fixed_policy import FixedPolicy
from garage.np.policies.policy import Policy, StochasticPolicy
from garage.np.policies.scripted_policy import ScriptedPolicy
__all__ = ['FixedPolicy', 'Policy', 'StochasticPolicy', 'ScriptedPolicy']
| 312 | 38.125 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/policies/fixed_policy.py | """Policy that performs a fixed sequence of actions."""
from garage.np.policies.policy import Policy
class FixedPolicy(Policy):
"""Policy that performs a fixed sequence of actions.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
scripted_actions (list[np.ndarray] or np.ndarray): Sequence of actions
to perform.
agent_infos (list[dict[str, np.ndarray]] or None): Sequence of
agent_infos to produce.
"""
def __init__(self, env_spec, scripted_actions, agent_infos=None):
super().__init__(env_spec)
if agent_infos is None:
agent_infos = [{}] * len(scripted_actions)
self._scripted_actions = scripted_actions
self._agent_infos = agent_infos
self._indices = [0]
def reset(self, dones=None):
"""Reset policy.
Args:
dones (None or list[bool]): Vectorized policy states to reset.
Raises:
ValueError: If dones has length greater than 1.
"""
if dones is None:
dones = [True]
if len(dones) > 1:
raise ValueError('FixedPolicy does not support more than one '
'action at a time.')
self._indices[0] = 0
def set_param_values(self, params):
"""Set param values of policy.
Args:
params (object): Ignored.
"""
# pylint: disable=no-self-use
del params
def get_param_values(self):
"""Return policy params (there are none).
Returns:
tuple: Empty tuple.
"""
# pylint: disable=no-self-use
return ()
def get_action(self, observation):
"""Get next action.
Args:
observation (np.ndarray): Ignored.
Raises:
ValueError: If policy is currently vectorized (reset was called
with more than one done value).
Returns:
tuple[np.ndarray, dict[str, np.ndarray]]: The action and agent_info
for this time step.
"""
del observation
action = self._scripted_actions[self._indices[0]]
agent_info = self._agent_infos[self._indices[0]]
self._indices[0] += 1
return action, agent_info
def get_actions(self, observations):
"""Get next action.
Args:
observations (np.ndarray): Ignored.
Raises:
ValueError: If observations has length greater than 1.
Returns:
tuple[np.ndarray, dict[str, np.ndarray]]: The action and agent_info
for this time step.
"""
if len(observations) != 1:
raise ValueError('FixedPolicy does not support more than one '
'observation at a time.')
return self.get_action(observations[0])
| 2,880 | 27.524752 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/policies/policy.py | """Base class for policies based on numpy."""
import abc
class Policy(abc.ABC):
"""Base classe for policies based on numpy.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
"""
def __init__(self, env_spec):
self._env_spec = env_spec
@abc.abstractmethod
def get_action(self, observation):
"""Get action sampled from the policy.
Args:
observation (np.ndarray): Observation from the environment.
Returns:
(np.ndarray): Action sampled from the policy.
"""
def reset(self, dones=None):
"""Reset the policy.
If dones is None, it will be by default np.array([True]) which implies
the policy will not be "vectorized", i.e. number of parallel
environments for training data sampling = 1.
Args:
dones (numpy.ndarray): Bool that indicates terminal state(s).
"""
@property
def observation_space(self):
"""akro.Space: The observation space of the environment."""
return self._env_spec.observation_space
@property
def action_space(self):
"""akro.Space: The action space for the environment."""
return self._env_spec.action_space
def log_diagnostics(self, paths):
"""Log extra information per iteration based on the collected paths.
Args:
paths (list[dict]): A list of collected paths
"""
@property
def state_info_keys(self):
"""Get keys describing policy's state.
Returns:
List[str]: keys for the information related to the policy's state
when taking an action.
"""
return list()
def terminate(self):
"""Clean up operation."""
class StochasticPolicy(Policy):
"""Base class for stochastic policies implemented in numpy."""
@property
@abc.abstractmethod
def distribution(self):
"""Get the distribution of the policy.
Returns:
garage.tf.distribution: The distribution of the policy.
"""
@abc.abstractmethod
def dist_info(self, obs, state_infos):
"""Return the distribution information about the actions.
Args:
obs (np.ndarray): observation values
state_infos (dict): a dictionary whose values should contain
information about the state of the policy at the time it
received the observation
"""
| 2,496 | 24.742268 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/policies/scripted_policy.py | """Simulates a garage policy object."""
class ScriptedPolicy:
"""Simulates a garage policy object.
Args:
- scripted actions(list or dictionary): data structure indexed by
obervation, returns a corresponding action
- agent_env_infos(list or dictionary): data structure indexed by
obervation, returns a corresponding agent_env_info
"""
def __init__(self, scripted_actions, agent_env_infos=None):
self._scripted_actions = scripted_actions
self._agent_env_infos = agent_env_infos
def set_param_values(self, params):
"""Set param values of policy."""
pass
def get_param_values(self):
"""Return policy params as a list."""
return []
def reset(self, dones=None):
"""Reset Policy to initial state."""
pass
def get_action(self, obs):
"""Return action sampled from the policy."""
if self._agent_env_infos:
a_info = self._agent_env_infos[obs]
else:
a_info = dict()
return self._scripted_actions[obs], a_info
def get_actions(self, obses):
"""Return ACTIONS sampled from the policy."""
if self._agent_env_infos:
a_info = self._agent_env_infos[obses[0]]
else:
a_info = dict()
return [self._scripted_actions[obs] for obs in obses], a_info
| 1,385 | 29.8 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/q_functions/__init__.py | """Q-functions which use NumPy as a numerical backend."""
from garage.np.q_functions.q_function import QFunction
__all__ = ['QFunction']
| 138 | 26.8 | 57 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/q_functions/q_function.py | """Base class for Q Functions implemented in numpy."""
class QFunction:
"""Q-Function interface."""
pass
| 115 | 15.571429 | 54 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/plotter/__init__.py | from garage.plotter.plotter import Plotter
__all__ = ['Plotter']
| 66 | 15.75 | 42 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/plotter/plotter.py | import atexit
from collections import namedtuple
from enum import Enum
from multiprocessing import JoinableQueue
from multiprocessing import Process
import platform
from threading import Thread
import numpy as np
from garage.sampler.utils import rollout
__all__ = ['Plotter']
class Op(Enum):
STOP = 0
UPDATE = 1
DEMO = 2
Message = namedtuple('Message', ['op', 'args', 'kwargs'])
class Plotter:
# Static variable used to disable the plotter
enable = True
# List containing all plotters instantiated in the process
__plotters = []
def __init__(self, standalone=False):
Plotter.__plotters.append(self)
self._process = None
self._queue = None
def _worker_start(self):
env = None
policy = None
max_length = None
initial_rollout = True
try:
# Each iteration will process ALL messages currently in the
# queue
while True:
msgs = {}
# If true, block and yield processor
if initial_rollout:
msg = self._queue.get()
msgs[msg.op] = msg
# Only fetch the last message of each type
while not self._queue.empty():
msg = self._queue.get()
msgs[msg.op] = msg
else:
# Only fetch the last message of each type
while not self._queue.empty():
msg = self._queue.get_nowait()
msgs[msg.op] = msg
if Op.STOP in msgs:
break
elif Op.UPDATE in msgs:
env, policy = msgs[Op.UPDATE].args
elif Op.DEMO in msgs:
param_values, max_length = msgs[Op.DEMO].args
policy.set_param_values(param_values)
initial_rollout = False
rollout(
env,
policy,
max_path_length=max_length,
animated=True,
speedup=5)
else:
if max_length:
rollout(
env,
policy,
max_path_length=max_length,
animated=True,
speedup=5)
except KeyboardInterrupt:
pass
def close(self):
if not Plotter.enable:
return
if self._process and self._process.is_alive():
while not self._queue.empty():
self._queue.get()
self._queue.task_done()
self._queue.put(Message(op=Op.STOP, args=None, kwargs=None))
self._queue.close()
self._process.join()
@staticmethod
def disable():
"""Disable all instances of the Plotter class."""
Plotter.enable = False
@staticmethod
def get_plotters():
return Plotter.__plotters
def init_worker(self):
if not Plotter.enable:
return
self._queue = JoinableQueue()
if ('Darwin' in platform.platform()):
self._process = Thread(target=self._worker_start)
else:
self._process = Process(target=self._worker_start)
self._process.daemon = True
self._process.start()
atexit.register(self.close)
def init_plot(self, env, policy):
if not Plotter.enable:
return
if not (self._process and self._queue):
self.init_worker()
# Needed in order to draw glfw window on the main thread
if ('Darwin' in platform.platform()):
rollout(
env, policy, max_path_length=np.inf, animated=True, speedup=5)
self._queue.put(Message(op=Op.UPDATE, args=(env, policy), kwargs=None))
def update_plot(self, policy, max_length=np.inf):
if not Plotter.enable:
return
self._queue.put(
Message(
op=Op.DEMO,
args=(policy.get_param_values(), max_length),
kwargs=None))
| 4,249 | 29.57554 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/replay_buffer/__init__.py | """Replay buffers.
The replay buffer primitives can be used for RL algorithms.
"""
from garage.replay_buffer.her_replay_buffer import HERReplayBuffer
from garage.replay_buffer.path_buffer import PathBuffer
from garage.replay_buffer.replay_buffer import ReplayBuffer
__all__ = ['ReplayBuffer', 'HERReplayBuffer', 'PathBuffer']
| 328 | 31.9 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/replay_buffer/her_replay_buffer.py | """This module implements a Hindsight Experience Replay (HER).
See: https://arxiv.org/abs/1707.01495.
"""
import copy
import numpy as np
from garage.replay_buffer.path_buffer import PathBuffer
class HERReplayBuffer(PathBuffer):
"""Replay buffer for HER (Hindsight Experience Replay).
It constructs hindsight examples using future strategy.
Args:
replay_k (int): Number of HER transitions to add for each regular
Transition. Setting this to 0 means that no HER replays will
be added.
reward_fn (callable): Function to re-compute the reward with
substituted goals.
capacity_in_transitions (int): total size of transitions in the buffer.
env_spec (garage.envs.EnvSpec): Environment specification.
"""
def __init__(self, replay_k, reward_fn, capacity_in_transitions, env_spec):
self._replay_k = replay_k
self._reward_fn = reward_fn
self._env_spec = env_spec
if not float(replay_k).is_integer() or replay_k < 0:
raise ValueError('replay_k must be an integer and >= 0.')
super().__init__(capacity_in_transitions)
def _sample_her_goals(self, path, transition_idx):
"""Samples HER goals from the given path.
Goals are randomly sampled starting from the index after
transition_idx in the given path.
Args:
path (dict[str, np.ndarray]): A dict containing the transition
keys, where each key contains an ndarray of shape
:math:`(T, S^*)`.
transition_idx (int): index of the current transition. Only
transitions after the current transitions will be randomly
sampled for HER goals.
Returns:
np.ndarray: A numpy array of HER goals with shape
(replay_k, goal_dim).
"""
goal_indexes = np.random.randint(transition_idx + 1,
len(path['observations']),
size=self._replay_k)
return [
goal['achieved_goal']
for goal in np.asarray(path['observations'])[goal_indexes]
]
def _flatten_dicts(self, path):
for key in ['observations', 'next_observations']:
if not isinstance(path[key], dict):
path[key] = self._env_spec.observation_space.flatten_n(
path[key])
else:
path[key] = self._env_spec.observation_space.flatten(path[key])
def add_path(self, path):
"""Adds a path to the replay buffer.
For each transition in the given path except the last one,
replay_k HER transitions will added to the buffer in addition
to the one in the path. The last transition is added without
sampling additional HER goals.
Args:
path(dict[str, np.ndarray]): Each key in the dict must map
to a np.ndarray of shape :math:`(T, S^*)`.
"""
obs_space = self._env_spec.observation_space
if not isinstance(path['observations'][0], dict):
# unflatten dicts if they've been flattened
path['observations'] = obs_space.unflatten_n(path['observations'])
path['next_observations'] = (obs_space.unflatten_n(
path['next_observations']))
# create HER transitions and add them to the buffer
for idx in range(path['actions'].shape[0] - 1):
transition = {key: sample[idx] for key, sample in path.items()}
her_goals = self._sample_her_goals(path, idx)
# create replay_k transitions using the HER goals
for goal in her_goals:
t_new = copy.deepcopy(transition)
a_g = t_new['next_observations']['achieved_goal']
t_new['rewards'] = np.array(self._reward_fn(a_g, goal, None))
t_new['observations']['desired_goal'] = goal
t_new['next_observations']['desired_goal'] = copy.deepcopy(
goal)
t_new['terminals'] = np.array(False)
# flatten the observation dicts now that we're done with them
self._flatten_dicts(t_new)
for key in t_new.keys():
t_new[key] = t_new[key].reshape(1, -1)
# Since we're using a PathBuffer, add each new transition
# as its own path.
super().add_path(t_new)
self._flatten_dicts(path)
super().add_path(path)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
self.__dict__ = state
| 4,963 | 34.71223 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/replay_buffer/path_buffer.py | """A replay buffer that efficiently stores and can sample whole paths."""
import collections
import numpy as np
class PathBuffer:
"""A replay buffer that stores and can sample whole paths.
This buffer only stores valid steps, and doesn't require paths to
have a maximum length.
Args:
capacity_in_transitions (int): Total memory allocated for the buffer.
"""
def __init__(self, capacity_in_transitions):
self._capacity = capacity_in_transitions
self._transitions_stored = 0
self._first_idx_of_next_path = 0
# Each path in the buffer has a tuple of two ranges in
# self._path_segments. If the path is stored in a single contiguous
# region of the buffer, the second range will be range(0, 0).
# The "left" side of the deque contains the oldest path.
self._path_segments = collections.deque()
self._buffer = {}
def add_path(self, path):
"""Add a path to the buffer.
Args:
path (dict): A dict of array of shape (path_len, flat_dim).
Raises:
ValueError: If a key is missing from path or path has wrong shape.
"""
for key, buf_arr in self._buffer.items():
path_array = path.get(key, None)
if path_array is None:
raise ValueError('Key {} missing from path.'.format(key))
if (len(path_array.shape) != 2
or path_array.shape[1] != buf_arr.shape[1]):
raise ValueError('Array {} has wrong shape.'.format(key))
path_len = self._get_path_length(path)
first_seg, second_seg = self._next_path_segments(path_len)
# Remove paths which will overlap with this one.
while (self._path_segments and self._segments_overlap(
first_seg, self._path_segments[0][0])):
self._path_segments.popleft()
while (self._path_segments and self._segments_overlap(
second_seg, self._path_segments[0][0])):
self._path_segments.popleft()
self._path_segments.append((first_seg, second_seg))
for key, array in path.items():
buf_arr = self._get_or_allocate_key(key, array)
# numpy doesn't special case range indexing, so it's very slow.
# Slice manually instead, which is faster than any other method.
# pylint: disable=invalid-slice-index
buf_arr[first_seg.start:first_seg.stop] = array[:len(first_seg)]
buf_arr[second_seg.start:second_seg.stop] = array[len(first_seg):]
if second_seg.stop != 0:
self._first_idx_of_next_path = second_seg.stop
else:
self._first_idx_of_next_path = first_seg.stop
self._transitions_stored = min(self._capacity,
self._transitions_stored + path_len)
def sample_path(self):
"""Sample a single path from the buffer.
Returns:
path: A dict of arrays of shape (path_len, flat_dim).
"""
path_idx = np.random.randint(len(self._path_segments))
first_seg, second_seg = self._path_segments[path_idx]
first_seg_indices = np.arange(first_seg.start, first_seg.stop)
second_seg_indices = np.arange(second_seg.start, second_seg.stop)
indices = np.concatenate([first_seg_indices, second_seg_indices])
path = {key: buf_arr[indices] for key, buf_arr in self._buffer.items()}
return path
def sample_transitions(self, batch_size):
"""Sample a batch of transitions from the buffer.
Args:
batch_size (int): Number of transitions to sample.
Returns:
dict: A dict of arrays of shape (batch_size, flat_dim).
"""
# idx = np.random.randint(self._transitions_stored, size=batch_size)
idx = np.random.choice(self._transitions_stored, batch_size)
return {key: buf_arr[idx] for key, buf_arr in self._buffer.items()}
def _next_path_segments(self, n_indices):
"""Compute where the next path should be stored.
Args:
n_indices (int): Path length.
Returns:
tuple: Lists of indices where path should be stored.
Raises:
ValueError: If path length is greater than the size of buffer.
"""
if n_indices > self._capacity:
raise ValueError('Path is too long to store in buffer.')
start = self._first_idx_of_next_path
end = start + n_indices
if end > self._capacity:
second_end = end - self._capacity
return (range(start, self._capacity), range(0, second_end))
else:
return (range(start, end), range(0, 0))
def _get_or_allocate_key(self, key, array):
"""Get or allocate key in the buffer.
Args:
key (str): Key in buffer.
array (numpy.ndarray): Array corresponding to key.
Returns:
numpy.ndarray: A NumPy array corresponding to key in the buffer.
"""
buf_arr = self._buffer.get(key, None)
if buf_arr is None:
buf_arr = np.zeros((self._capacity, array.shape[1]), array.dtype)
self._buffer[key] = buf_arr
return buf_arr
def clear(self):
"""Clear buffer."""
self._transitions_stored = 0
self._first_idx_of_next_path = 0
self._path_segments.clear()
self._buffer.clear()
@staticmethod
def _get_path_length(path):
"""Get path length.
Args:
path (dict): Path.
Returns:
length: Path length.
Raises:
ValueError: If path is empty or has inconsistent lengths.
"""
length_key = None
length = None
for key, value in path.items():
if length is None:
length = len(value)
length_key = key
elif len(value) != length:
raise ValueError('path has inconsistent lengths between '
'{!r} and {!r}.'.format(length_key, key))
if not length:
raise ValueError('Nothing in path')
return length
@staticmethod
def _segments_overlap(seg_a, seg_b):
"""Compute if two segments overlap.
Args:
seg_a (range): List of indices of the first segment.
seg_b (range): List of indices of the second segment.
Returns:
bool: True iff the input ranges overlap at at least one index.
"""
# Empty segments never overlap.
if not seg_a or not seg_b:
return False
first = seg_a
second = seg_b
if seg_b.start < seg_a.start:
first, second = seg_b, seg_a
assert first.start <= second.start
return first.stop > second.start
@property
def n_transitions_stored(self):
"""Return the size of the replay buffer.
Returns:
int: Size of the current replay buffer.
"""
return int(self._transitions_stored)
| 7,114 | 33.877451 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/replay_buffer/replay_buffer.py | """This module implements a replay buffer memory.
Replay buffer is an important technique in reinforcement learning. It
stores transitions in a memory buffer of fixed size. When the buffer is
full, oldest memory will be discarded. At each step, a batch of memories
will be sampled from the buffer to update the agent's parameters. In a
word, replay buffer breaks temporal correlations and thus benefits RL
algorithms.
"""
import abc
from abc import abstractmethod
import numpy as np
class ReplayBuffer(metaclass=abc.ABCMeta):
"""Abstract class for Replay Buffer.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
size_in_transitions (int): total size of transitions in the buffer
time_horizon (int): time horizon of rollout.
"""
def __init__(self, env_spec, size_in_transitions, time_horizon):
del env_spec
self._current_size = 0
self._current_ptr = 0
self._n_transitions_stored = 0
self._time_horizon = time_horizon
self._size_in_transitions = size_in_transitions
self._size = size_in_transitions // time_horizon
self._initialized_buffer = False
self._buffer = {}
self._episode_buffer = {}
def store_episode(self):
"""Add an episode to the buffer."""
episode_buffer = self._convert_episode_to_batch_major()
rollout_batch_size = len(episode_buffer['observation'])
idx = self._get_storage_idx(rollout_batch_size)
for key in self._buffer:
self._buffer[key][idx] = episode_buffer[key]
self._n_transitions_stored = min(
self._size_in_transitions, self._n_transitions_stored +
self._time_horizon * rollout_batch_size)
@abstractmethod
def sample(self, batch_size):
"""Sample a transition of batch_size.
Args:
batch_size(int): The number of transitions to be sampled.
"""
raise NotImplementedError
def add_transition(self, **kwargs):
"""Add one transition into the replay buffer.
Args:
kwargs (dict(str, [numpy.ndarray])): Dictionary that holds
the transitions.
"""
transition = {k: [v] for k, v in kwargs.items()}
self.add_transitions(**transition)
def add_transitions(self, **kwargs):
"""Add multiple transitions into the replay buffer.
A transition contains one or multiple entries, e.g.
observation, action, reward, terminal and next_observation.
The same entry of all the transitions are stacked, e.g.
{'observation': [obs1, obs2, obs3]} where obs1 is one
numpy.ndarray observation from the environment.
Args:
kwargs (dict(str, [numpy.ndarray])): Dictionary that holds
the transitions.
"""
if not self._initialized_buffer:
self._initialize_buffer(**kwargs)
for key, value in kwargs.items():
self._episode_buffer[key].append(value)
if len(self._episode_buffer['observation']) == self._time_horizon:
self.store_episode()
for key in self._episode_buffer:
self._episode_buffer[key].clear()
def _initialize_buffer(self, **kwargs):
for key, value in kwargs.items():
self._episode_buffer[key] = list()
values = np.array(value)
self._buffer[key] = np.zeros(
[self._size, self._time_horizon, *values.shape[1:]],
dtype=values.dtype)
self._initialized_buffer = True
def _get_storage_idx(self, size_increment=1):
"""Get the storage index for the episode to add into the buffer.
Args:
size_increment(int): The number of storage indeces that new
transitions will be placed in.
Returns:
numpy.ndarray: The indeces to store size_incremente transitions at.
"""
if self._current_size + size_increment <= self._size:
idx = np.arange(self._current_size,
self._current_size + size_increment)
elif self._current_size < self._size:
overflow = size_increment - (self._size - self._current_size)
idx_a = np.arange(self._current_size, self._size)
idx_b = np.arange(0, overflow)
idx = np.concatenate([idx_a, idx_b])
self._current_ptr = overflow
else:
if self._current_ptr + size_increment <= self._size:
idx = np.arange(self._current_ptr,
self._current_ptr + size_increment)
self._current_ptr += size_increment
else:
overflow = size_increment - (self._size - self._current_size)
idx_a = np.arange(self._current_ptr, self._size)
idx_b = np.arange(0, overflow)
idx = np.concatenate([idx_a, idx_b])
self._current_ptr = overflow
# Update replay size
self._current_size = min(self._size,
self._current_size + size_increment)
if size_increment == 1:
idx = idx[0]
return idx
def _convert_episode_to_batch_major(self):
"""Convert the shape of episode_buffer.
episode_buffer: {time_horizon, algo.rollout_batch_size, flat_dim}.
buffer: {size, time_horizon, flat_dim}.
Returns:
dict: Transitions that have been formated to fit properly in this
replay buffer.
"""
transitions = {}
for key in self._episode_buffer:
val = np.array(self._episode_buffer[key])
transitions[key] = val.swapaxes(0, 1)
return transitions
@property
def full(self):
"""Whether the buffer is full.
Returns:
bool: True of the buffer has reachd its maximum size.
False otherwise.
"""
return self._current_size == self._size
@property
def n_transitions_stored(self):
"""Return the size of the replay buffer.
Returns:
int: Size of the current replay buffer.
"""
return self._n_transitions_stored
| 6,268 | 32.886486 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/__init__.py | """Samplers which run agents in environments."""
from garage.sampler.batch_sampler import BatchSampler
from garage.sampler.default_worker import DefaultWorker
from garage.sampler.is_sampler import ISSampler
from garage.sampler.local_sampler import LocalSampler
from garage.sampler.multiprocessing_sampler import MultiprocessingSampler
from garage.sampler.off_policy_vectorized_sampler import (
OffPolicyVectorizedSampler)
from garage.sampler.on_policy_vectorized_sampler import (
OnPolicyVectorizedSampler)
from garage.sampler.parallel_vec_env_executor import ParallelVecEnvExecutor
from garage.sampler.sampler import Sampler
from garage.sampler.stateful_pool import singleton_pool
from garage.sampler.vec_env_executor import VecEnvExecutor
from garage.sampler.vec_worker import VecWorker
from garage.sampler.worker import Worker
from garage.sampler.worker_factory import WorkerFactory
__all__ = [
'BatchSampler', 'ISSampler', 'Sampler', 'singleton_pool', 'LocalSampler',
'MultiprocessingSampler', 'ParallelVecEnvExecutor',
'VecEnvExecutor', 'VecWorker', 'OffPolicyVectorizedSampler',
'OnPolicyVectorizedSampler', 'WorkerFactory', 'Worker', 'DefaultWorker'
]
| 1,185 | 44.615385 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/batch_sampler.py | """Class with batch-based sampling."""
import warnings
from garage.sampler import parallel_sampler
from garage.sampler.sampler_deprecated import BaseSampler
from garage.sampler.utils import truncate_paths
class BatchSampler(BaseSampler):
"""Class with batch-based sampling.
Args:
algo (garage.np.algos.RLAlgorithm): The algorithm.
env (gym.Env): The environment.
"""
def __init__(self, algo, env):
super().__init__(algo, env)
warnings.warn(
DeprecationWarning(
'BatchSampler is deprecated, and will be removed in the next '
'release. Please use one of the samplers which implements '
'garage.sampler.Sampler, such as LocalSampler.'))
def start_worker(self):
"""Start workers."""
parallel_sampler.populate_task(self.env,
self.algo.policy,
scope=self.algo.scope)
def shutdown_worker(self):
"""Shutdown workers."""
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr, batch_size=None, whole_paths=True):
"""Sample the policy for new trajectories.
Args:
itr (int): Number of iteration.
batch_size (int): Number of environment steps in one batch.
whole_paths (bool): Whether to use whole path or truncated.
Returns:
list[dict]: A list of paths.
"""
if not batch_size:
batch_size = self.algo.max_path_length
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(
policy_params=cur_params,
max_samples=batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
return paths if whole_paths else truncate_paths(paths, batch_size)
| 1,931 | 31.2 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/default_worker.py | """Default Worker class."""
from collections import defaultdict
import gym
import numpy as np
from garage import TrajectoryBatch
from garage.experiment import deterministic
from garage.sampler.env_update import EnvUpdate
from garage.sampler.worker import Worker
class DefaultWorker(Worker):
"""Initialize a worker.
Args:
seed(int): The seed to use to intialize random number generators.
max_path_length(int or float): The maximum length paths which will
be sampled. Can be (floating point) infinity.
worker_number(int): The number of the worker where this update is
occurring. This argument is used to set a different seed for each
worker.
Attributes:
agent(Policy or None): The worker's agent.
env(gym.Env or None): The worker's environment.
"""
def __init__(
self,
*, # Require passing by keyword, since everything's an int.
seed,
max_path_length,
worker_number):
super().__init__(seed=seed,
max_path_length=max_path_length,
worker_number=worker_number)
self.agent = None
self.env = None
self._observations = []
self._last_observations = []
self._actions = []
self._rewards = []
self._terminals = []
self._lengths = []
self._agent_infos = defaultdict(list)
self._env_infos = defaultdict(list)
self._prev_obs = None
self._path_length = 0
self.worker_init()
def worker_init(self):
"""Initialize a worker."""
if self._seed is not None:
deterministic.set_seed(self._seed + self._worker_number)
def update_agent(self, agent_update):
"""Update an agent, assuming it implements garage.Policy.
Args:
agent_update (np.ndarray or dict or garage.Policy): If a
tuple, dict, or np.ndarray, these should be parameters to
agent, which should have been generated by calling
`policy.get_param_values`. Alternatively, a policy itself. Note
that other implementations of `Worker` may take different types
for this parameter.
"""
if isinstance(agent_update, (dict, tuple, np.ndarray)):
self.agent.set_param_values(agent_update)
elif agent_update is not None:
self.agent = agent_update
def update_env(self, env_update):
"""Use any non-None env_update as a new environment.
A simple env update function. If env_update is not None, it should be
the complete new environment.
This allows changing environments by passing the new environment as
`env_update` into `obtain_samples`.
Args:
env_update(gym.Env or EnvUpdate or None): The environment to
replace the existing env with. Note that other implementations
of `Worker` may take different types for this parameter.
Raises:
TypeError: If env_update is not one of the documented types.
"""
if env_update is not None:
if isinstance(env_update, EnvUpdate):
self.env = env_update(self.env)
elif isinstance(env_update, gym.Env):
if self.env is not None:
self.env.close()
self.env = env_update
else:
raise TypeError('Uknown environment update type.')
def start_rollout(self):
"""Begin a new rollout."""
self._path_length = 0
self._prev_obs = self.env.reset()
self.agent.reset()
def step_rollout(self):
"""Take a single time-step in the current rollout.
Returns:
bool: True iff the path is done, either due to the environment
indicating termination of due to reaching `max_path_length`.
"""
if self._path_length < self._max_path_length:
a, agent_info = self.agent.get_action(self._prev_obs)
next_o, r, d, env_info = self.env.step(a)
self._observations.append(self._prev_obs)
self._rewards.append(r)
self._actions.append(a)
for k, v in agent_info.items():
self._agent_infos[k].append(v)
for k, v in env_info.items():
self._env_infos[k].append(v)
self._path_length += 1
self._terminals.append(d)
if not d:
self._prev_obs = next_o
return False
self._lengths.append(self._path_length)
self._last_observations.append(self._prev_obs)
return True
def collect_rollout(self):
"""Collect the current rollout, clearing the internal buffer.
Returns:
garage.TrajectoryBatch: A batch of the trajectories completed since
the last call to collect_rollout().
"""
observations = self._observations
self._observations = []
last_observations = self._last_observations
self._last_observations = []
actions = self._actions
self._actions = []
rewards = self._rewards
self._rewards = []
terminals = self._terminals
self._terminals = []
env_infos = self._env_infos
self._env_infos = defaultdict(list)
agent_infos = self._agent_infos
self._agent_infos = defaultdict(list)
for k, v in agent_infos.items():
agent_infos[k] = np.asarray(v)
for k, v in env_infos.items():
env_infos[k] = np.asarray(v)
lengths = self._lengths
self._lengths = []
return TrajectoryBatch(self.env.spec, np.asarray(observations),
np.asarray(last_observations),
np.asarray(actions), np.asarray(rewards),
np.asarray(terminals), dict(env_infos),
dict(agent_infos), np.asarray(lengths,
dtype='i'))
def rollout(self):
"""Sample a single rollout of the agent in the environment.
Returns:
garage.TrajectoryBatch: The collected trajectory.
"""
self.start_rollout()
while not self.step_rollout():
pass
return self.collect_rollout()
def shutdown(self):
"""Close the worker's environment."""
self.env.close()
| 6,576 | 34.360215 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/env_update.py | """A callable that "updates" an environment."""
import warnings
class EnvUpdate:
"""A callable that "updates" an environment.
Implementors of this interface can be called on environments to update
them. The passed in environment should then be ignored, and the returned
one used instead.
Since no new environment needs to be passed in, this type can also
be used to construct new environments.
"""
# pylint: disable=too-few-public-methods
def __call__(self, old_env=None):
"""Update an environment.
Note that this implementation does nothing.
Args:
old_env (gym.Env or None): Previous environment. Should not be used
after being passed in, and should not be closed.
Returns:
gym.Env: The new, updated environment.
"""
return old_env
class NewEnvUpdate(EnvUpdate):
"""`~EnvUpdate` that creates a new environment every update.
Args:
env_constructor (Callable[gym.Env]): Callable that constructs an
environment.
"""
# pylint: disable=too-few-public-methods
def __init__(self, env_constructor):
self._env_constructor = env_constructor
def __call__(self, old_env=None):
"""Update an environment.
Args:
old_env (gym.Env or None): Previous environment. Should not be used
after being passed in, and should not be closed.
Returns:
gym.Env: The new, updated environment.
"""
if old_env:
old_env.close()
return self._env_constructor()
class SetTaskUpdate(EnvUpdate):
"""`~EnvUpdate` that calls set_task with the provided task.
Args:
env_constructor (Callable[gym.Env]): Callable that constructs an
environment.
task (object): Opaque task type.
"""
# pylint: disable=too-few-public-methods
def __init__(self, env_constructor, task):
self._env_constructor = env_constructor
self._task = task
def __call__(self, old_env=None):
"""Update an environment.
Args:
old_env (gym.Env or None): Previous environment. Should not be used
after being passed in, and should not be closed.
Returns:
gym.Env: The new, updated environment.
"""
if old_env is None:
old_env = self._env_constructor()
old_env.set_task(self._task)
return old_env
class ExistingEnvUpdate(EnvUpdate):
"""`~EnvUpdate` that carries an already constructed environment.
Args:
env (gym.Env): The environment.
"""
def __init__(self, env):
self._env = env
def __call__(self, old_env=None):
"""Update an environment.
This implementation does not close the old environment.
Args:
old_env (gym.Env or None): Previous environment. Should not be used
after being passed in, and should not be closed.
Returns:
gym.Env: The new, updated environment.
"""
return self._env
def __getstate__(self):
"""Get the pickle state.
Returns:
dict: The pickled state.
"""
warnings.warn('ExistingEnvUpdate is generally not the most efficient '
'method of transmitting environments to other '
'processes.')
return self.__dict__
| 3,466 | 24.681481 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/is_sampler.py | """Importance sampling sampler."""
import copy
from math import exp
from math import log
import random
import numpy as np
from numpy import var
from garage.sampler.batch_sampler import BatchSampler
from garage.sampler.utils import truncate_paths
tf = False
try:
import tensorflow as tf
import tensorflow_probability as tfp
except ImportError:
pass
class ISSampler(BatchSampler):
"""Importance sampling sampler.
Sampler which alternates between live sampling iterations using
BatchSampler and importance sampling iterations.
Args:
algo (garage.np.algos.RLAlgorithm): An algorithm instance.
env (garage.envs.GarageEnv): An environement instance.
n_backtrack (int): Number of past policies to update from.
If None, it uses all past policies.
n_is_pretrain (int): Number of importance sampling iterations to
perform in beginning of training
init_is (bool): Set initial iteration (after pretrain) an
importance sampling iteration.
skip_is_itrs (bool): Do not do any importance sampling
iterations (after pretrain).
hist_variance_penalty (int): Penalize variance of historical policy.
max_is_ratio (int): Maximum allowed importance sampling ratio.
ess_threshold (int): Minimum effective sample size required.
randomize_draw (bool): Whether to randomize important samples.
"""
def __init__(self,
algo,
env,
n_backtrack=None,
n_is_pretrain=0,
init_is=0,
skip_is_itrs=False,
hist_variance_penalty=0.0,
max_is_ratio=0,
ess_threshold=0,
randomize_draw=False):
self.n_backtrack = n_backtrack
self.n_is_pretrain = n_is_pretrain
self.skip_is_itrs = skip_is_itrs
self.hist_variance_penalty = hist_variance_penalty
self.max_is_ratio = max_is_ratio
self.ess_threshold = ess_threshold
self.randomize_draw = randomize_draw
self._hist = []
self._is_itr = init_is
super().__init__(algo, env)
@property
def history(self):
"""list: History of policies.
History of policies that have interacted with the environment and the
data from interaction episode(s).
"""
return self._hist
def add_history(self, policy_distribution, paths):
"""Store policy distribution and paths in history.
Args:
policy_distribution (garage.tf.distributions.Distribution): Policy distribution. # noqa: E501
paths (list): Paths.
"""
self._hist.append((policy_distribution, paths))
def get_history_list(self, n_past=None):
"""Get list of (distribution, data) tuples from history.
Args:
n_past (int): Number of past policies to update from.
If None, it uses all past policies.
Returns:
list: A list of paths.
"""
if n_past is None:
return self._hist
return self._hist[-min(n_past, len(self._hist)):]
def obtain_samples(self, itr, batch_size=None, whole_paths=True):
"""Collect samples for the given iteration number.
Args:
itr (int): Number of iteration.
batch_size (int): Number of environment steps in one batch.
whole_paths (bool): Whether to use whole path or truncated.
Returns:
list[dict]: A list of paths.
"""
# Importance sampling for first self.n_is_pretrain iterations
if itr < self.n_is_pretrain:
paths = self._obtain_is_samples(itr, batch_size, whole_paths)
return paths
# Alternate between importance sampling and live sampling
if self._is_itr and not self.skip_is_itrs:
paths = self._obtain_is_samples(itr, batch_size, whole_paths)
else:
paths = super().obtain_samples(itr, batch_size, whole_paths)
if not self.skip_is_itrs:
self.add_history(self.algo.policy.distribution, paths)
self._is_itr = (self._is_itr + 1) % 2
return paths
def _obtain_is_samples(self, _itr, batch_size=None, whole_paths=True):
"""Collect IS samples for the given iteration number.
Args:
_itr (int): Number of iteration.
batch_size (int): Number of batch size.
whole_paths (bool): Whether to use whole path or truncated.
Returns:
list: A list of paths.
"""
if batch_size is None:
batch_size = self.algo.max_path_length
paths = []
for hist_policy_distribution, hist_paths in self.get_history_list(
self.n_backtrack):
h_paths = self._sample_isweighted_paths(
policy=self.algo.policy,
hist_policy_distribution=hist_policy_distribution,
max_samples=batch_size,
paths=hist_paths,
hist_variance_penalty=self.hist_variance_penalty,
max_is_ratio=self.max_is_ratio,
ess_threshold=self.ess_threshold,
)
paths.extend(h_paths)
if len(paths) > batch_size:
paths = random.sample(paths, batch_size)
return paths if whole_paths else truncate_paths(paths, batch_size)
def _sample_isweighted_paths(self,
policy,
hist_policy_distribution,
max_samples,
paths=None,
hist_variance_penalty=0.0,
max_is_ratio=10,
ess_threshold=0):
"""Return sample of IS weighted paths.
Args:
policy (object): The policy.
hist_policy_distribution (list): Histogram policy distribution.
max_samples (int): Max number of samples.
paths (list): Paths.
hist_variance_penalty (float): Histogram variance penalty.
max_is_ratio (float): Maximum of IS ratio.
ess_threshold (float): Effective sample size estimate.
Returns:
list: A list of paths.
"""
if not paths:
return []
n_samples = min(len(paths), max_samples)
samples = None
if self.randomize_draw:
samples = random.sample(paths, n_samples)
elif paths:
if n_samples == len(paths):
samples = paths
else:
start = random.randint(0, len(paths) - n_samples)
samples = paths[start:start + n_samples]
# make duplicate of samples so we don't permanently alter historical
# data
samples = copy.deepcopy(samples)
if ess_threshold > 0:
is_weights = []
dist1 = policy.distribution
dist2 = hist_policy_distribution
for path in samples:
_, agent_infos = policy.get_actions(path['observations'])
if hist_variance_penalty > 0:
# pylint: disable=protected-access
dist2 = tfp.distributions.MultivariateNormalDiag(
loc=dist2.loc,
scale_diag=dist2.scale._diag +
log(1.0 + hist_variance_penalty))
path['agent_infos'] = agent_infos
# compute importance sampling weight
loglike_p = tf.compat.v1.get_default_session().run(
dist1.log_prob(path['actions']),
feed_dict={
policy.model.input: np.expand_dims(path['observations'], 1)
})
loglike_hp = tf.compat.v1.get_default_session().run(
dist2.log_prob(path['actions']),
feed_dict={
policy.model.input: np.expand_dims(path['observations'], 1)
})
is_ratio = exp(np.sum(loglike_p) - np.sum(loglike_hp))
# thresholding knobs
if max_is_ratio > 0:
is_ratio = min(is_ratio, max_is_ratio)
if ess_threshold > 0:
is_weights.append(is_ratio)
# apply importance sampling weight
path['rewards'] *= is_ratio
if ess_threshold:
# Effective sample size estimate.
# Kong, Augustine. "A note on importance sampling using
# standardized weights." University of Chicago, Dept.
# of Statistics, Tech. Rep 348 (1992).
if len(is_weights) / (1 + var(is_weights)) < ess_threshold:
return []
return samples
class __FakeISSampler:
# noqa: E501; pylint: disable=missing-param-doc,too-few-public-methods,no-method-argument
"""Raises an ImportError for environments without TensorFlow."""
def __init__(*args, **kwargs):
raise ImportError(
'ISSampler requires TensorFlow. To use it, please install '
'TensorFlow.')
if not tf:
ISSampler = __FakeISSampler # noqa: F811
| 9,219 | 33.402985 | 105 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/local_sampler.py | """Sampler that runs workers in the main process."""
import copy
from garage import TrajectoryBatch
from garage.sampler.sampler import Sampler
class LocalSampler(Sampler):
"""Sampler that runs workers in the main process.
This is probably the simplest possible sampler. It's called the "Local"
sampler because it runs everything in the same process and thread as where
it was called from.
Args:
worker_factory(WorkerFactory): Pickleable factory for creating
workers. Should be transmitted to other processes / nodes where
work needs to be done, then workers should be constructed
there.
agents(Agent or List[Agent]): Agent(s) to use to perform rollouts.
If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
envs(gym.Env or List[gym.Env]): Environment rollouts are performed
in. If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
"""
def __init__(self, worker_factory, agents, envs):
# pylint: disable=super-init-not-called
self._factory = worker_factory
self._agents = worker_factory.prepare_worker_messages(agents)
self._envs = worker_factory.prepare_worker_messages(
envs, preprocess=copy.deepcopy)
self._workers = [
worker_factory(i) for i in range(worker_factory.n_workers)
]
for worker, agent, env in zip(self._workers, self._agents, self._envs):
worker.update_agent(agent)
worker.update_env(env)
@classmethod
def from_worker_factory(cls, worker_factory, agents, envs):
"""Construct this sampler.
Args:
worker_factory(WorkerFactory): Pickleable factory for creating
workers. Should be transmitted to other processes / nodes where
work needs to be done, then workers should be constructed
there.
agents(Agent or List[Agent]): Agent(s) to use to perform rollouts.
If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
envs(gym.Env or List[gym.Env]): Environment rollouts are performed
in. If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
Returns:
Sampler: An instance of `cls`.
"""
return cls(worker_factory, agents, envs)
def _update_workers(self, agent_update, env_update):
"""Apply updates to the workers.
Args:
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
"""
agent_updates = self._factory.prepare_worker_messages(agent_update)
env_updates = self._factory.prepare_worker_messages(
env_update, preprocess=copy.deepcopy)
for worker, agent_up, env_up in zip(self._workers, agent_updates,
env_updates):
worker.update_agent(agent_up)
worker.update_env(env_up)
def obtain_samples(self, itr, num_samples, agent_update, env_update=None):
"""Collect at least a given number transitions (timesteps).
Args:
itr(int): The current iteration number. Using this argument is
deprecated.
num_samples(int): Minimum number of transitions / timesteps to
sample.
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
garage.TrajectoryBatch: The batch of collected trajectories.
"""
self._update_workers(agent_update, env_update)
batches = []
completed_samples = 0
while True:
for worker in self._workers:
batch = worker.rollout()
completed_samples += len(batch.actions)
batches.append(batch)
if completed_samples >= num_samples:
return TrajectoryBatch.concatenate(*batches)
def obtain_exact_trajectories(self,
n_traj_per_worker,
agent_update,
env_update=None):
"""Sample an exact number of trajectories per worker.
Args:
n_traj_per_worker (int): Exact number of trajectories to gather for
each worker.
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
TrajectoryBatch: Batch of gathered trajectories. Always in worker
order. In other words, first all trajectories from worker 0,
then all trajectories from worker 1, etc.
"""
self._update_workers(agent_update, env_update)
batches = []
for worker in self._workers:
for _ in range(n_traj_per_worker):
batch = worker.rollout()
batches.append(batch)
return TrajectoryBatch.concatenate(*batches)
def shutdown_worker(self):
"""Shutdown the workers."""
for worker in self._workers:
worker.shutdown()
def __getstate__(self):
"""Get the pickle state.
Returns:
dict: The pickled state.
"""
state = self.__dict__.copy()
# Workers aren't picklable (but WorkerFactory is).
state['_workers'] = None
return state
def __setstate__(self, state):
"""Unpickle the state.
Args:
state (dict): Unpickled state.
"""
self.__dict__.update(state)
self._workers = [
self._factory(i) for i in range(self._factory.n_workers)
]
for worker, agent, env in zip(self._workers, self._agents, self._envs):
worker.update_agent(agent)
worker.update_env(env)
| 7,503 | 39.562162 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/multiprocessing_sampler.py | """A multiprocessing sampler which avoids waiting as much as possible."""
from collections import defaultdict
import itertools
import multiprocessing as mp
import queue
import click
import cloudpickle
import setproctitle
from garage import TrajectoryBatch
from garage.sampler.sampler import Sampler
class MultiprocessingSampler(Sampler):
"""Sampler that uses multiprocessing to distribute workers.
Args:
worker_factory(WorkerFactory): Pickleable factory for creating
workers. Should be transmitted to other processes / nodes where
work needs to be done, then workers should be constructed
there.
agents(Agent or List[Agent]): Agent(s) to use to perform rollouts.
If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
envs(gym.Env or List[gym.Env]): Environment rollouts are performed
in. If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
"""
def __init__(self, worker_factory, agents, envs):
# pylint: disable=super-init-not-called
self._factory = worker_factory
self._agents = self._factory.prepare_worker_messages(
agents, cloudpickle.dumps)
self._envs = self._factory.prepare_worker_messages(envs)
self._to_sampler = mp.Queue(2 * self._factory.n_workers)
self._to_worker = [mp.Queue(1) for _ in range(self._factory.n_workers)]
# If we crash from an exception, with full queues, we would rather not
# hang forever, so we would like the process to close without flushing
# the queues.
# That's what cancel_join_thread does.
for q in self._to_worker:
q.cancel_join_thread()
self._workers = [
mp.Process(target=run_worker,
kwargs=dict(
factory=self._factory,
to_sampler=self._to_sampler,
to_worker=self._to_worker[worker_number],
worker_number=worker_number,
agent=self._agents[worker_number],
env=self._envs[worker_number],
),
daemon=False)
for worker_number in range(self._factory.n_workers)
]
self._agent_version = 0
for w in self._workers:
w.start()
@classmethod
def from_worker_factory(cls, worker_factory, agents, envs):
"""Construct this sampler.
Args:
worker_factory(WorkerFactory): Pickleable factory for creating
workers. Should be transmitted to other processes / nodes where
work needs to be done, then workers should be constructed
there.
agents(Agent or List[Agent]): Agent(s) to use to perform rollouts.
If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
envs(gym.Env or List[gym.Env]): Environment rollouts are performed
in. If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
Returns:
Sampler: An instance of `cls`.
"""
return cls(worker_factory, agents, envs)
def _push_updates(self, updated_workers, agent_updates, env_updates):
"""Apply updates to the workers and (re)start them.
Args:
updated_workers(set[int]): Set of workers that don't need to be
updated. Successfully updated workers will be added to this
set.
agent_updates(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_updates(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
"""
for worker_number, q in enumerate(self._to_worker):
if worker_number in updated_workers:
try:
q.put_nowait(('continue', ()))
except queue.Full:
pass
else:
try:
q.put_nowait(('start', (agent_updates[worker_number],
env_updates[worker_number],
self._agent_version)))
updated_workers.add(worker_number)
except queue.Full:
pass
def obtain_samples(self, itr, num_samples, agent_update, env_update=None):
"""Collect at least a given number transitions (timesteps).
Args:
itr(int): The current iteration number. Using this argument is
deprecated.
num_samples(int): Minimum number of transitions / timesteps to
sample.
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
garage.TrajectoryBatch: The batch of collected trajectories.
Raises:
AssertionError: On internal errors.
"""
del itr
batches = []
completed_samples = 0
self._agent_version += 1
updated_workers = set()
agent_ups = self._factory.prepare_worker_messages(
agent_update, cloudpickle.dumps)
env_ups = self._factory.prepare_worker_messages(env_update)
with click.progressbar(length=num_samples, label='Sampling') as pbar:
while completed_samples < num_samples:
self._push_updates(updated_workers, agent_ups, env_ups)
for _ in range(self._factory.n_workers):
try:
tag, contents = self._to_sampler.get_nowait()
if tag == 'trajectory':
batch, version, worker_n = contents
del worker_n
if version == self._agent_version:
batches.append(batch)
num_returned_samples = batch.lengths.sum()
completed_samples += num_returned_samples
pbar.update(num_returned_samples)
else:
# Receiving paths from previous iterations is
# normal. Potentially, we could gather them
# here, if an off-policy method wants them.
pass
else:
raise AssertionError(
'Unknown tag {} with contents {}'.format(
tag, contents))
except queue.Empty:
pass
for q in self._to_worker:
try:
q.put_nowait(('stop', ()))
except queue.Full:
pass
return TrajectoryBatch.concatenate(*batches)
def obtain_exact_trajectories(self,
n_traj_per_worker,
agent_update,
env_update=None):
"""Sample an exact number of trajectories per worker.
Args:
n_traj_per_worker (int): Exact number of trajectories to gather for
each worker.
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
TrajectoryBatch: Batch of gathered trajectories. Always in worker
order. In other words, first all trajectories from worker 0,
then all trajectories from worker 1, etc.
Raises:
AssertionError: On internal errors.
"""
self._agent_version += 1
updated_workers = set()
agent_ups = self._factory.prepare_worker_messages(
agent_update, cloudpickle.dumps)
env_ups = self._factory.prepare_worker_messages(env_update)
trajectories = defaultdict(list)
with click.progressbar(length=self._factory.n_workers,
label='Sampling') as pbar:
while any(
len(trajectories[i]) < n_traj_per_worker
for i in range(self._factory.n_workers)):
self._push_updates(updated_workers, agent_ups, env_ups)
tag, contents = self._to_sampler.get()
if tag == 'trajectory':
batch, version, worker_n = contents
if version == self._agent_version:
if len(trajectories[worker_n]) < n_traj_per_worker:
trajectories[worker_n].append(batch)
if len(trajectories[worker_n]) == n_traj_per_worker:
pbar.update(1)
try:
self._to_worker[worker_n].put_nowait(
('stop', ()))
except queue.Full:
pass
else:
raise AssertionError(
'Unknown tag {} with contents {}'.format(
tag, contents))
for q in self._to_worker:
try:
q.put_nowait(('stop', ()))
except queue.Full:
pass
ordered_trajectories = list(
itertools.chain(
*[trajectories[i] for i in range(self._factory.n_workers)]))
return TrajectoryBatch.concatenate(*ordered_trajectories)
def shutdown_worker(self):
"""Shutdown the workers."""
for (q, w) in zip(self._to_worker, self._workers):
# Loop until either the exit message is accepted or the process has
# closed. These might cause us to block, but ensures that the
# workers are closed.
while True:
try:
# Set a timeout in case the child process crashed.
q.put(('exit', ()), timeout=1)
break
except queue.Full:
# If the child process has crashed, we're done here.
# Otherwise it should eventually accept our message.
if not w.is_alive():
break
# If this hangs forever, most likely a queue needs
# cancel_join_thread called on it, or a subprocess has tripped the
# "closing dowel with TensorboardOutput blocks forever bug."
w.join()
for q in self._to_worker:
q.close()
self._to_sampler.close()
def __getstate__(self):
"""Get the pickle state.
Returns:
dict: The pickled state.
"""
return dict(
factory=self._factory,
agents=[cloudpickle.loads(agent) for agent in self._agents],
envs=self._envs)
def __setstate__(self, state):
"""Unpickle the state.
Args:
state (dict): Unpickled state.
"""
self.__init__(state['factory'], state['agents'], state['envs'])
def run_worker(factory, to_worker, to_sampler, worker_number, agent, env):
"""Run the streaming worker state machine.
Starts in the "not streaming" state.
Enters the "streaming" state when the "start" or "continue" message is
received.
While in the "streaming" state, it streams rollouts back to the parent
process.
When it receives a "stop" message, or the queue back to the parent process
is full, it enters the "not streaming" state.
When it receives the "exit" message, it terminates.
Critically, the worker never blocks on sending messages back to the
sampler, to ensure it remains responsive to messages.
Args:
factory(WorkerFactory): Pickleable factory for creating workers. Should
be transmitted to other processes / nodes where work needs to be
done, then workers should be constructed there.
to_worker(multiprocessing.Queue): Queue to send commands to the worker.
to_sampler(multiprocessing.Queue): Queue to send rollouts back to the
sampler.
worker_number(int): Number of this worker.
agent(Agent): Agent to use to perform rollouts. If a list is passed
in, it must have length exactly `worker_factory.n_workers`, and
will be spread across the workers.
env(gym.Env): Environment rollouts are performed in. If a list is
passed in, it must have length exactly `worker_factory.n_workers`,
and will be spread across the workers.
Raises:
AssertionError: On internal errors.
"""
# When a python process is closing, multiprocessing Queues attempt to flush
# their contents to the underlying pipe. If the pipe is full, they block
# until someone reads from it. In this case, the to_sampler pipe may be
# full (or nearly full), and never read from, causing this process to hang
# forever. To avoid this, call cancel_join_thread, which will cause the
# data to never be flushed to the pipe, allowing this process to end, and
# the join on this process in the parent process to complete.
# We call this immediately on process start in case this process crashes
# (usually do to a bug or out-of-memory error in the underlying worker).
to_sampler.cancel_join_thread()
setproctitle.setproctitle('worker:' + setproctitle.getproctitle())
inner_worker = factory(worker_number)
inner_worker.update_agent(cloudpickle.loads(agent))
inner_worker.update_env(env)
version = 0
streaming_samples = False
while True:
if streaming_samples:
# We're streaming, so try to get a message without waiting. If we
# can't, the message is "continue", without any contents.
try:
tag, contents = to_worker.get_nowait()
except queue.Empty:
tag = 'continue'
contents = None
else:
# We're not streaming anymore, so wait for a message.
tag, contents = to_worker.get()
if tag == 'start':
# Update env and policy.
agent_update, env_update, version = contents
inner_worker.update_agent(cloudpickle.loads(agent_update))
inner_worker.update_env(env_update)
streaming_samples = True
elif tag == 'stop':
streaming_samples = False
elif tag == 'continue':
batch = inner_worker.rollout()
try:
to_sampler.put_nowait(
('trajectory', (batch, version, worker_number)))
except queue.Full:
# Either the sampler has fallen far behind the workers, or we
# missed a "stop" message. Either way, stop streaming.
# If the queue becomes empty again, the sampler will send a
# continue (or some other) message.
streaming_samples = False
elif tag == 'exit':
to_worker.close()
to_sampler.close()
inner_worker.shutdown()
return
else:
raise AssertionError('Unknown tag {} with contents {}'.format(
tag, contents))
| 16,948 | 41.585427 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/off_policy_vectorized_sampler.py | """This module implements a Vectorized Sampler used for OffPolicy Algorithms.
It diffs from OnPolicyVectorizedSampler in two parts:
- The num of envs is defined by rollout_batch_size. In
OnPolicyVectorizedSampler, the number of envs can be decided by batch_size
and max_path_length. But OffPolicy algorithms usually samples transitions
from replay buffer, which only has buffer_batch_size.
- It needs to add transitions to replay buffer throughout the rollout.
"""
import itertools
import warnings
import cloudpickle
import numpy as np
from garage.experiment import deterministic
from garage.misc import tensor_utils
from garage.sampler.batch_sampler import BatchSampler
from garage.sampler.vec_env_executor import VecEnvExecutor
class OffPolicyVectorizedSampler(BatchSampler):
"""This class implements OffPolicyVectorizedSampler.
Args:
algo (garage.np.RLAlgorithm): Algorithm.
env (garage.envs.GarageEnv): Environment.
n_envs (int): Number of parallel environments managed by sampler.
no_reset (bool): Reset environment between samples or not.
"""
def __init__(self, algo, env, n_envs=None, no_reset=True):
if n_envs is None:
n_envs = int(algo.rollout_batch_size)
super().__init__(algo, env)
self._n_envs = n_envs
self._no_reset = no_reset
self._vec_env = None
self._env_spec = self.env.spec
self._last_obses = None
self._last_uncounted_discount = [0] * n_envs
self._last_running_length = [0] * n_envs
self._last_success_count = [0] * n_envs
warnings.warn(
DeprecationWarning(
'OffPolicyVectoriizedSampler is deprecated, and will be '
'removed in the next release. Please use VecWorker and one of '
'the new samplers which implement garage.sampler.Sampler, '
'such as RaySampler.'))
def start_worker(self):
"""Initialize the sampler."""
n_envs = self._n_envs
envs = [
cloudpickle.loads(cloudpickle.dumps(self.env))
for _ in range(n_envs)
]
# Deterministically set environment seeds based on the global seed.
seed0 = deterministic.get_seed()
if seed0 is not None:
for (i, e) in enumerate(envs):
e.seed(seed0 + i)
self._vec_env = VecEnvExecutor(
envs=envs, max_path_length=self.algo.max_path_length)
def shutdown_worker(self):
"""Terminate workers if necessary."""
self._vec_env.close()
# pylint: disable=too-many-branches, too-many-statements
def obtain_samples(self, itr, batch_size=None, whole_paths=True):
"""Collect samples for the given iteration number.
Args:
itr(int): Iteration number.
batch_size(int): Number of environment interactions in one batch.
whole_paths(bool): Not effective. Only keep here to comply
with base class.
Raises:
ValueError: If the algorithm doesn't have an exploration_policy
field.
Returns:
list: A list of paths.
"""
assert batch_size is not None
paths = []
if not self._no_reset or self._last_obses is None:
obses = self._vec_env.reset()
else:
obses = self._last_obses
completes = np.asarray([True] * self._vec_env.num_envs)
running_paths = [None] * self._vec_env.num_envs
n_samples = 0
policy = self.algo.exploration_policy
if policy is None:
raise ValueError('OffPolicyVectoriizedSampler should only be used '
'with an exploration_policy.')
while n_samples < batch_size:
policy.reset(completes)
obs_space = self.algo.env_spec.observation_space
input_obses = obs_space.flatten_n(obses)
actions, agent_infos = policy.get_actions(input_obses)
next_obses, rewards, dones, env_infos, completes = \
self._vec_env.step(actions)
self._last_obses = next_obses
agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
env_infos = tensor_utils.split_tensor_dict_list(env_infos)
n_samples += len(next_obses)
if agent_infos is None:
agent_infos = [dict() for _ in range(self._vec_env.num_envs)]
if env_infos is None:
env_infos = [dict() for _ in range(self._vec_env.num_envs)]
for (idx, reward, env_info, done, complete, obs, next_obs,
action) in zip(itertools.count(), rewards, env_infos, dones,
completes, obses, next_obses, actions):
if running_paths[idx] is None:
running_paths[idx] = dict(
rewards=[],
observations=[],
next_observations=[],
actions=[],
env_infos=[],
dones=[],
undiscounted_return=self._last_uncounted_discount[idx],
# running_length: Length of path up to now
# Note that running_length is not len(rewards)
# Because a path may not be complete in one batch
running_length=self._last_running_length[idx],
success_count=self._last_success_count[idx])
running_paths[idx]['rewards'].append(reward)
running_paths[idx]['observations'].append(obs)
running_paths[idx]['next_observations'].append(next_obs)
running_paths[idx]['actions'].append(action)
running_paths[idx]['env_infos'].append(env_info)
running_paths[idx]['dones'].append(done)
running_paths[idx]['running_length'] += 1
running_paths[idx]['undiscounted_return'] += reward
running_paths[idx]['success_count'] += env_info.get(
'is_success') or 0
self._last_uncounted_discount[idx] += reward
self._last_success_count[idx] += env_info.get(
'is_success') or 0
self._last_running_length[idx] += 1
if complete or n_samples >= batch_size:
paths.append(
dict(
rewards=np.asarray(running_paths[idx]['rewards']),
dones=np.asarray(running_paths[idx]['dones']),
env_infos=tensor_utils.stack_tensor_dict_list(
running_paths[idx]['env_infos']),
running_length=running_paths[idx]
['running_length'],
undiscounted_return=running_paths[idx]
['undiscounted_return'],
success_count=running_paths[idx]['success_count']))
act_space = self._env_spec.action_space
path_dict = {}
path_dict['observations'] = obs_space.flatten_n(
running_paths[idx]['observations'])
path_dict['next_observations'] = obs_space.flatten_n(
running_paths[idx]['next_observations'])
path_dict['rewards'] = np.asarray(
running_paths[idx]['rewards']).reshape(-1, 1)
path_dict['terminals'] = np.asarray(
running_paths[idx]['dones']).reshape(-1, 1)
path_dict['actions'] = act_space.flatten_n(
running_paths[idx]['actions'])
self.algo.replay_buffer.add_path(path_dict)
running_paths[idx] = None
if done:
self._last_running_length[idx] = 0
self._last_success_count[idx] = 0
self._last_uncounted_discount[idx] = 0
obses = next_obses
return paths
| 8,222 | 40.321608 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/on_policy_vectorized_sampler.py | """BatchSampler which uses VecEnvExecutor to run multiple environments."""
import itertools
import time
import warnings
import click
import cloudpickle
from dowel import logger, tabular
import numpy as np
from garage.experiment import deterministic
from garage.misc import tensor_utils
from garage.sampler.batch_sampler import BatchSampler
from garage.sampler.stateful_pool import singleton_pool
from garage.sampler.utils import truncate_paths
from garage.sampler.vec_env_executor import VecEnvExecutor
class OnPolicyVectorizedSampler(BatchSampler):
"""BatchSampler which uses VecEnvExecutor to run multiple environments.
Args:
algo (garage.np.algos.RLAlgorithm): An algorithm instance.
env (garage.envs.GarageEnv): An environement instance.
n_envs (int): Number of environment instances to setup.
This parameter has effect on sampling performance.
"""
def __init__(self, algo, env, n_envs=None):
if n_envs is None:
n_envs = singleton_pool.n_parallel * 4
super().__init__(algo, env)
self._n_envs = n_envs
self._vec_env = None
self._env_spec = self.env.spec
warnings.warn(
DeprecationWarning(
'OnPolicyVectoriizedSampler is deprecated, and will be '
'removed in the next release. Please use VecWorker and one of '
'the new samplers which implement garage.sampler.Sampler, '
'such as RaySampler.'))
def start_worker(self):
"""Start workers."""
n_envs = self._n_envs
envs = [
cloudpickle.loads(cloudpickle.dumps(self.env))
for _ in range(n_envs)
]
# Deterministically set environment seeds based on the global seed.
seed0 = deterministic.get_seed()
if seed0 is not None:
for (i, e) in enumerate(envs):
e.seed(seed0 + i)
self._vec_env = VecEnvExecutor(
envs=envs, max_path_length=self.algo.max_path_length)
def shutdown_worker(self):
"""Shutdown workers."""
self._vec_env.close()
# pylint: disable=too-many-statements
def obtain_samples(self, itr, batch_size=None, whole_paths=True):
"""Sample the policy for new trajectories.
Args:
itr (int): Iteration number.
batch_size (int): Number of samples to be collected. If None,
it will be default [algo.max_path_length * n_envs].
whole_paths (bool): Whether return all the paths or not. True
by default. It's possible for the paths to have total actual
sample size larger than batch_size, and will be truncated if
this flag is true.
Returns:
list[dict]: Sample paths.
Note:
Each path is a dictionary, with keys and values as following:
* observations: numpy.ndarray with shape [Batch, *obs_dims]
* actions: numpy.ndarray with shape [Batch, *act_dims]
* rewards: numpy.ndarray with shape [Batch, ]
* env_infos: A dictionary with each key representing one
environment info, value being a numpy.ndarray with shape
[Batch, ?]. One example is "ale.lives" for atari
environments.
* agent_infos: A dictionary with each key representing one
agent info, value being a numpy.ndarray with shape
[Batch, ?]. One example is "prev_action", which is used
for recurrent policy as previous action input, merged with
the observation input as the state input.
* dones: numpy.ndarray with shape [Batch, ]
"""
logger.log('Obtaining samples for iteration %d...' % itr)
if not batch_size:
batch_size = self.algo.max_path_length * self._n_envs
paths = []
n_samples = 0
obses = self._vec_env.reset()
completes = np.asarray([True] * self._vec_env.num_envs)
running_paths = [None] * self._vec_env.num_envs
policy_time = 0
env_time = 0
process_time = 0
policy = self.algo.policy
with click.progressbar(length=batch_size, label='Sampling') as pbar:
while n_samples < batch_size:
t = time.time()
policy.reset(completes)
actions, agent_infos = policy.get_actions(obses)
policy_time += time.time() - t
t = time.time()
next_obses, rewards, dones, env_infos, completes = \
self._vec_env.step(actions)
env_time += time.time() - t
t = time.time()
agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
env_infos = tensor_utils.split_tensor_dict_list(env_infos)
if env_infos is None:
env_infos = [dict() for _ in range(self._vec_env.num_envs)]
if agent_infos is None:
agent_infos = [
dict() for _ in range(self._vec_env.num_envs)
]
for idx, observation, action, reward, env_info, agent_info, done, complete in zip( # noqa: E501
itertools.count(), obses, actions, rewards, env_infos,
agent_infos, dones, completes):
if running_paths[idx] is None:
running_paths[idx] = dict(observations=[],
actions=[],
rewards=[],
env_infos=[],
agent_infos=[],
dones=[])
running_paths[idx]['observations'].append(observation)
running_paths[idx]['actions'].append(action)
running_paths[idx]['rewards'].append(reward)
running_paths[idx]['env_infos'].append(env_info)
running_paths[idx]['agent_infos'].append(agent_info)
running_paths[idx]['dones'].append(done)
if complete:
obs = np.asarray(running_paths[idx]['observations'])
actions = np.asarray(running_paths[idx]['actions'])
paths.append(
dict(observations=obs,
actions=actions,
rewards=np.asarray(
running_paths[idx]['rewards']),
env_infos=tensor_utils.stack_tensor_dict_list(
running_paths[idx]['env_infos']),
agent_infos=tensor_utils.
stack_tensor_dict_list(
running_paths[idx]['agent_infos']),
dones=np.asarray(
running_paths[idx]['dones'])))
n_samples += len(running_paths[idx]['rewards'])
running_paths[idx] = None
process_time += time.time() - t
pbar.update(len(obses))
obses = next_obses
tabular.record('PolicyExecTime', policy_time)
tabular.record('EnvExecTime', env_time)
tabular.record('ProcessExecTime', process_time)
return paths if whole_paths else truncate_paths(paths, batch_size)
| 7,696 | 41.291209 | 112 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/parallel_sampler.py | """Original parallel sampler pool backend."""
# pylint: skip-file
import signal
import cloudpickle
from dowel import logger
import numpy as np
from garage.experiment import deterministic
from garage.sampler.stateful_pool import SharedGlobal
from garage.sampler.stateful_pool import singleton_pool
from garage.sampler.utils import rollout
def _worker_init(g, id):
if singleton_pool.n_parallel > 1:
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
g.worker_id = id
def initialize(n_parallel):
"""Initialize the worker pool."""
# SIGINT is blocked for all processes created in parallel_sampler to avoid
# the creation of sleeping and zombie processes.
#
# If the user interrupts run_experiment, there's a chance some processes
# won't die due to a dead lock condition where one of the children in the
# parallel sampler exits without releasing a lock once after it catches
# SIGINT.
#
# Later the parent tries to acquire the same lock to proceed with his
# cleanup, but it remains sleeping waiting for the lock to be released.
# In the meantime, all the process in parallel sampler remain in the zombie
# state since the parent cannot proceed with their clean up.
try:
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGINT])
singleton_pool.initialize(n_parallel)
singleton_pool.run_each(_worker_init,
[(id, )
for id in range(singleton_pool.n_parallel)])
finally:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGINT])
def _get_scoped_g(g, scope):
if scope is None:
return g
if not hasattr(g, 'scopes'):
g.scopes = dict()
if scope not in g.scopes:
g.scopes[scope] = SharedGlobal()
g.scopes[scope].worker_id = g.worker_id
return g.scopes[scope]
def _worker_populate_task(g, env, policy, scope=None):
g = _get_scoped_g(g, scope)
g.env = cloudpickle.loads(env)
g.policy = cloudpickle.loads(policy)
def _worker_terminate_task(g, scope=None):
g = _get_scoped_g(g, scope)
if getattr(g, 'env', None):
g.env.close()
g.env = None
if getattr(g, 'policy', None):
g.policy.terminate()
g.policy = None
def populate_task(env, policy, scope=None):
"""Set each worker's env and policy."""
logger.log('Populating workers...')
if singleton_pool.n_parallel > 1:
singleton_pool.run_each(
_worker_populate_task,
[(cloudpickle.dumps(env), cloudpickle.dumps(policy), scope)] *
singleton_pool.n_parallel)
else:
# avoid unnecessary copying
g = _get_scoped_g(singleton_pool.G, scope)
g.env = env
g.policy = policy
logger.log('Populated')
def terminate_task(scope=None):
"""Close each worker's env and terminate each policy."""
singleton_pool.run_each(_worker_terminate_task,
[(scope, )] * singleton_pool.n_parallel)
def close():
"""Close the worker pool."""
singleton_pool.close()
def _worker_set_seed(_, seed):
logger.log('Setting seed to %d' % seed)
deterministic.set_seed(seed)
def set_seed(seed):
"""Set the seed in each worker."""
singleton_pool.run_each(_worker_set_seed,
[(seed + i, )
for i in range(singleton_pool.n_parallel)])
def _worker_set_policy_params(g, params, scope=None):
g = _get_scoped_g(g, scope)
g.policy.set_param_values(params)
def _worker_collect_one_path(g, max_path_length, scope=None):
g = _get_scoped_g(g, scope)
path = rollout(g.env, g.policy, max_path_length=max_path_length)
return path, len(path['rewards'])
def sample_paths(policy_params,
max_samples,
max_path_length=np.inf,
scope=None):
"""Sample paths from each worker.
:param policy_params: parameters for the policy. This will be updated on
each worker process
:param max_samples: desired maximum number of samples to be collected. The
actual number of collected samples might be greater since all trajectories
will be rolled out either until termination or until max_path_length is
reached
:param max_path_length: horizon / maximum length of a single trajectory
:return: a list of collected paths
"""
singleton_pool.run_each(_worker_set_policy_params,
[(policy_params, scope)] *
singleton_pool.n_parallel)
return singleton_pool.run_collect(_worker_collect_one_path,
threshold=max_samples,
args=(max_path_length, scope),
show_prog_bar=True)
| 4,829 | 32.082192 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/parallel_vec_env_executor.py | """Environment wrapper that runs multiple environments in parallel."""
# pylint: skip-file
import uuid
import warnings
import cloudpickle
from dowel import logger
import numpy as np
from garage.misc import tensor_utils
from garage.sampler.stateful_pool import singleton_pool
def worker_init_envs(g, alloc, scope, env):
"""Initialize the environment on a worker."""
logger.log('initializing environment on worker %d' % g.worker_id)
if not hasattr(g, 'parallel_vec_envs'):
g.parallel_vec_envs = dict()
g.parallel_vec_env_template = dict()
g.parallel_vec_envs[scope] = [
(idx, cloudpickle.loads(cloudpickle.dumps(env))) for idx in alloc
]
g.parallel_vec_env_template[scope] = env
# For these two methods below, we pack the data into batch numpy arrays
# whenever possible, to reduce communication cost
def worker_run_reset(g, flags, scope):
"""Reset the environment on a worker."""
if not hasattr(g, 'parallel_vec_envs'):
logger.log('on worker %d' % g.worker_id)
import traceback
for line in traceback.format_stack():
logger.log(line)
# log the stacktrace at least
logger.log('oops')
for k, v in g.__dict__.items():
logger.log(str(k) + ' : ' + str(v))
assert hasattr(g, 'parallel_vec_envs')
assert scope in g.parallel_vec_envs
n = len(g.parallel_vec_envs[scope])
env_template = g.parallel_vec_env_template[scope]
obs_dim = env_template.observation_space.flat_dim
ret_arr = np.zeros((n, obs_dim))
ids = []
flat_obs = []
reset_ids = []
for itr_idx, (idx, env) in enumerate(g.parallel_vec_envs[scope]):
flag = flags[idx]
if flag:
flat_obs.append(env.reset())
reset_ids.append(itr_idx)
ids.append(idx)
if reset_ids:
ret_arr[reset_ids] = env_template.observation_space.flatten_n(flat_obs)
return ids, ret_arr
def worker_run_step(g, action_n, scope):
"""Step the environment on a worker."""
assert hasattr(g, 'parallel_vec_envs')
assert scope in g.parallel_vec_envs
env_template = g.parallel_vec_env_template[scope]
ids = []
step_results = []
for (idx, env) in g.parallel_vec_envs[scope]:
action = action_n[idx]
ids.append(idx)
step_results.append(tuple(env.step(action)))
if not step_results:
return None
obs, rewards, dones, env_infos = list(map(list, list(zip(*step_results))))
obs = env_template.observation_space.flatten_n(obs)
rewards = np.asarray(rewards)
dones = np.asarray(dones)
env_infos = tensor_utils.stack_tensor_dict_list(env_infos)
return ids, obs, rewards, dones, env_infos
class ParallelVecEnvExecutor:
"""Environment wrapper that runs multiple environments in parallel."""
def __init__(self, env, n, max_path_length, scope=None):
if scope is None:
# initialize random scope
scope = str(uuid.uuid4())
envs_per_worker = int(np.ceil(n * 1.0 / singleton_pool.n_parallel))
alloc_env_ids = []
rest_alloc = n
start_id = 0
for _ in range(singleton_pool.n_parallel):
n_allocs = min(envs_per_worker, rest_alloc)
alloc_env_ids.append(list(range(start_id, start_id + n_allocs)))
start_id += n_allocs
rest_alloc = max(0, rest_alloc - envs_per_worker)
singleton_pool.run_each(worker_init_envs, [(alloc, scope, env)
for alloc in alloc_env_ids])
self._alloc_env_ids = alloc_env_ids
self._action_space = env.action_space
self._observation_space = env.observation_space
self._num_envs = n
self.scope = scope
self.ts = np.zeros(n, dtype='int')
self.max_path_length = max_path_length
warnings.warn(
DeprecationWarning(
'ParallelVecEnvExecutor is deprecated, and will be removed in '
'the next release. Please use VecWorker and one of the new '
'samplers which implement garage.sampler.Sampler, such as '
'RaySampler'))
def step(self, action_n):
"""Step all environments using the provided actions."""
results = singleton_pool.run_each(
worker_run_step,
[(action_n, self.scope) for _ in self._alloc_env_ids],
)
results = [x for x in results if x is not None]
ids, obs, rewards, dones, env_infos = list(zip(*results))
ids = np.concatenate(ids)
obs = self.observation_space.unflatten_n(np.concatenate(obs))
rewards = np.concatenate(rewards)
dones = np.concatenate(dones)
env_infos = tensor_utils.split_tensor_dict_list(
tensor_utils.concat_tensor_dict_list(env_infos))
if env_infos is None:
env_infos = [dict() for _ in range(self.num_envs)]
items = list(zip(ids, obs, rewards, dones, env_infos))
items = sorted(items, key=lambda x: x[0])
ids, obs, rewards, dones, env_infos = list(zip(*items))
obs = list(obs)
rewards = np.asarray(rewards)
dones = np.asarray(dones)
self.ts += 1
dones[self.ts >= self.max_path_length] = True
reset_obs = self._run_reset(dones)
for (i, done) in enumerate(dones):
if done:
obs[i] = reset_obs[i]
self.ts[i] = 0
return obs, rewards, dones, tensor_utils.stack_tensor_dict_list(
list(env_infos))
def _run_reset(self, dones):
dones = np.asarray(dones)
results = singleton_pool.run_each(
worker_run_reset,
[(dones, self.scope) for _ in self._alloc_env_ids],
)
ids, flat_obs = list(map(np.concatenate, list(zip(*results))))
zipped = list(zip(ids, flat_obs))
sorted_obs = np.asarray(
[x[1] for x in sorted(zipped, key=lambda x: x[0])])
done_ids, = np.where(dones)
done_flat_obs = sorted_obs[done_ids]
done_unflat_obs = self.observation_space.unflatten_n(done_flat_obs)
all_obs = [None] * self.num_envs
done_cursor = 0
for idx, done in enumerate(dones):
if done:
all_obs[idx] = done_unflat_obs[done_cursor]
done_cursor += 1
return all_obs
def reset(self):
"""Reset all environments."""
dones = np.asarray([True] * self.num_envs)
return self._run_reset(dones)
@property
def num_envs(self):
"""Read / write the number of environments."""
return self._num_envs
@property
def action_space(self):
"""Read / write the action space."""
return self._action_space
@property
def observation_space(self):
"""Read / write the observation space."""
return self._observation_space
def close(self):
"""Close all environments."""
pass
| 7,004 | 33.678218 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/ray_sampler.py | """This is an implementation of an on policy batch sampler.
Uses a data parallel design.
Included is a sampler that deploys sampler workers.
The sampler workers must implement some type of set agent parameters
function, and a rollout function.
"""
from collections import defaultdict
import itertools
import click
import cloudpickle
import ray
from garage import TrajectoryBatch
from garage.sampler.sampler import Sampler
class RaySampler(Sampler):
"""Collects Policy Rollouts in a data parallel fashion.
Args:
worker_factory(garage.sampler.WorkerFactory): Used for worker behavior.
agents(list[garage.Policy]): Agents to distribute across workers.
envs(list[gym.Env]): Environments to distribute across workers.
"""
def __init__(self, worker_factory, agents, envs):
# pylint: disable=super-init-not-called
if not ray.is_initialized():
ray.init(log_to_driver=False)
self._sampler_worker = ray.remote(SamplerWorker)
self._worker_factory = worker_factory
self._agents = agents
self._envs = self._worker_factory.prepare_worker_messages(envs)
self._all_workers = defaultdict(None)
self._workers_started = False
self.start_worker()
@classmethod
def from_worker_factory(cls, worker_factory, agents, envs):
"""Construct this sampler.
Args:
worker_factory(WorkerFactory): Pickleable factory for creating
workers. Should be transmitted to other processes / nodes where
work needs to be done, then workers should be constructed
there.
agents(Agent or List[Agent]): Agent(s) to use to perform rollouts.
If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
envs(gym.Env or List[gym.Env]): Environment rollouts are performed
in. If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
Returns:
Sampler: An instance of `cls`.
"""
return cls(worker_factory, agents, envs)
def start_worker(self):
"""Initialize a new ray worker."""
if self._workers_started:
return
self._workers_started = True
# We need to pickle the agent so that we can e.g. set up the TF.Session
# in the worker *before* unpickling it.
agent_pkls = self._worker_factory.prepare_worker_messages(
self._agents, cloudpickle.dumps)
for worker_id in range(self._worker_factory.n_workers):
self._all_workers[worker_id] = self._sampler_worker.remote(
worker_id, self._envs[worker_id], agent_pkls[worker_id],
self._worker_factory)
def _update_workers(self, agent_update, env_update):
"""Update all of the workers.
Args:
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
list[ray._raylet.ObjectID]: Remote values of worker ids.
"""
updating_workers = []
param_ids = self._worker_factory.prepare_worker_messages(
agent_update, ray.put)
env_ids = self._worker_factory.prepare_worker_messages(
env_update, ray.put)
for worker_id in range(self._worker_factory.n_workers):
worker = self._all_workers[worker_id]
updating_workers.append(
worker.update.remote(param_ids[worker_id], env_ids[worker_id]))
return updating_workers
def obtain_samples(self, itr, num_samples, agent_update, env_update=None):
"""Sample the policy for new trajectories.
Args:
itr(int): Iteration number.
num_samples(int): Number of steps the the sampler should collect.
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
TrajectoryBatch: Batch of gathered trajectories.
"""
active_workers = []
completed_samples = 0
batches = []
# update the policy params of each worker before sampling
# for the current iteration
idle_worker_ids = []
updating_workers = self._update_workers(agent_update, env_update)
with click.progressbar(length=num_samples, label='Sampling') as pbar:
while completed_samples < num_samples:
# if there are workers still being updated, check
# which ones are still updating and take the workers that
# are done updating, and start collecting trajectories on
# those workers.
if updating_workers:
updated, updating_workers = ray.wait(updating_workers,
num_returns=1,
timeout=0.1)
upd = [ray.get(up) for up in updated]
idle_worker_ids.extend(upd)
# if there are idle workers, use them to collect trajectories
# mark the newly busy workers as active
while idle_worker_ids:
idle_worker_id = idle_worker_ids.pop()
worker = self._all_workers[idle_worker_id]
active_workers.append(worker.rollout.remote())
# check which workers are done/not done collecting a sample
# if any are done, send them to process the collected
# trajectory if they are not, keep checking if they are done
ready, not_ready = ray.wait(active_workers,
num_returns=1,
timeout=0.001)
active_workers = not_ready
for result in ready:
ready_worker_id, trajectory_batch = ray.get(result)
idle_worker_ids.append(ready_worker_id)
num_returned_samples = trajectory_batch.lengths.sum()
completed_samples += num_returned_samples
batches.append(trajectory_batch)
pbar.update(num_returned_samples)
return TrajectoryBatch.concatenate(*batches)
def obtain_exact_trajectories(self,
n_traj_per_worker,
agent_update,
env_update=None):
"""Sample an exact number of trajectories per worker.
Args:
n_traj_per_worker (int): Exact number of trajectories to gather for
each worker.
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
TrajectoryBatch: Batch of gathered trajectories. Always in worker
order. In other words, first all trajectories from worker 0,
then all trajectories from worker 1, etc.
"""
active_workers = []
trajectories = defaultdict(list)
# update the policy params of each worker before sampling
# for the current iteration
idle_worker_ids = []
updating_workers = self._update_workers(agent_update, env_update)
with click.progressbar(length=self._worker_factory.n_workers,
label='Sampling') as pbar:
while any(
len(trajectories[i]) < n_traj_per_worker
for i in range(self._worker_factory.n_workers)):
# if there are workers still being updated, check
# which ones are still updating and take the workers that
# are done updating, and start collecting trajectories on
# those workers.
if updating_workers:
updated, updating_workers = ray.wait(updating_workers,
num_returns=1,
timeout=0.1)
upd = [ray.get(up) for up in updated]
idle_worker_ids.extend(upd)
# if there are idle workers, use them to collect trajectories
# mark the newly busy workers as active
while idle_worker_ids:
idle_worker_id = idle_worker_ids.pop()
worker = self._all_workers[idle_worker_id]
active_workers.append(worker.rollout.remote())
# check which workers are done/not done collecting a sample
# if any are done, send them to process the collected
# trajectory if they are not, keep checking if they are done
ready, not_ready = ray.wait(active_workers,
num_returns=1,
timeout=0.001)
active_workers = not_ready
for result in ready:
ready_worker_id, trajectory_batch = ray.get(result)
trajectories[ready_worker_id].append(trajectory_batch)
if len(trajectories[ready_worker_id]) < n_traj_per_worker:
idle_worker_ids.append(ready_worker_id)
pbar.update(1)
ordered_trajectories = list(
itertools.chain(*[
trajectories[i] for i in range(self._worker_factory.n_workers)
]))
return TrajectoryBatch.concatenate(*ordered_trajectories)
def shutdown_worker(self):
"""Shuts down the worker."""
for worker in self._all_workers.values():
worker.shutdown.remote()
ray.shutdown()
class SamplerWorker:
"""Constructs a single sampler worker.
Args:
worker_id(int): The id of the sampler_worker
env(gym.Env): The gym env
agent_pkl(bytes): The pickled agent
worker_factory(WorkerFactory): Factory to construct this worker's
behavior.
"""
def __init__(self, worker_id, env, agent_pkl, worker_factory):
# Must be called before pickle.loads below.
self.inner_worker = worker_factory(worker_id)
self.worker_id = worker_id
self.inner_worker.update_env(env)
self.inner_worker.update_agent(cloudpickle.loads(agent_pkl))
def update(self, agent_update, env_update):
"""Update the agent and environment.
Args:
agent_update(object): Agent update.
env_update(object): Environment update.
Returns:
int: The worker id.
"""
self.inner_worker.update_agent(agent_update)
self.inner_worker.update_env(env_update)
return self.worker_id
def rollout(self):
"""Compute one rollout of the agent in the environment.
Returns:
tuple[int, garage.TrajectoryBatch]: Worker ID and batch of samples.
"""
return (self.worker_id, self.inner_worker.rollout())
def shutdown(self):
"""Shuts down the worker."""
self.inner_worker.shutdown()
| 12,646 | 40.601974 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/sampler.py | """Base sampler class."""
import abc
import copy
class Sampler(abc.ABC):
"""Abstract base class of all samplers.
Implementations of this class should override `construct`,
`obtain_samples`, and `shutdown_worker`. `construct` takes a
`WorkerFactory`, which implements most of the RL-specific functionality a
`Sampler` needs. Specifically, it specifies how to construct `Worker`s,
which know how to perform rollouts and update both agents and environments.
Currently, `__init__` is also part of the interface, but calling it is
deprecated. `start_worker` is also deprecated, and does not need to be
implemented.
"""
def __init__(self, algo, env):
"""Construct a Sampler from an Algorithm.
Args:
algo(garage.RLAlgorithm): The RL Algorithm controlling this
sampler.
env(gym.Env): The environment being sampled from.
Calling this method is deprecated.
"""
self.algo = algo
self.env = env
@classmethod
def from_worker_factory(cls, worker_factory, agents, envs):
"""Construct this sampler.
Args:
worker_factory(WorkerFactory): Pickleable factory for creating
workers. Should be transmitted to other processes / nodes where
work needs to be done, then workers should be constructed
there.
agents(Agent or List[Agent]): Agent(s) to use to perform rollouts.
If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
envs(gym.Env or List[gym.Env]): Environment rollouts are performed
in. If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
Returns:
Sampler: An instance of `cls`.
"""
# This implementation works for most current implementations.
# Relying on this implementation is deprecated, but calling this method
# is not.
fake_algo = copy.copy(worker_factory)
fake_algo.policy = agents
return cls(fake_algo, envs)
def start_worker(self):
"""Initialize the sampler.
i.e. launching parallel workers if necessary.
This method is deprecated, please launch workers in construct instead.
"""
@abc.abstractmethod
def obtain_samples(self, itr, num_samples, agent_update, env_update=None):
"""Collect at least a given number transitions (timesteps).
Args:
itr(int): The current iteration number. Using this argument is
deprecated.
num_samples(int): Minimum number of transitions / timesteps to
sample.
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
garage.TrajectoryBatch: The batch of collected trajectories.
"""
@abc.abstractmethod
def shutdown_worker(self):
"""Terminate workers if necessary.
Because Python object destruction can be somewhat unpredictable, this
method isn't deprecated.
"""
| 3,727 | 35.54902 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/sampler_deprecated.py | """Base class of Sampler."""
import abc
class Sampler(abc.ABC):
"""Sampler interface."""
@abc.abstractmethod
def start_worker(self):
"""Initialize the sampler.
e.g. launching parallel workers if necessary.
"""
@abc.abstractmethod
def obtain_samples(self, itr, batch_size, whole_paths):
"""Collect samples for the given iteration number.
Args:
itr (int): Number of iteration.
batch_size (int): Number of environment steps in one batch.
whole_paths (bool): Whether to use whole path or truncated.
Returns:
list[dict]: A list of paths.
"""
@abc.abstractmethod
def shutdown_worker(self):
"""Terminate workers if necessary."""
class BaseSampler(Sampler):
# pylint: disable=abstract-method
"""Base class for sampler.
Args:
algo (garage.np.algos.RLAlgorithm): The algorithm.
env (gym.Env): The environment.
"""
def __init__(self, algo, env):
self.algo = algo
self.env = env
| 1,074 | 21.395833 | 71 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/stateful_pool.py | # flake8: noqa
# pylint: skip-file
import inspect
import multiprocessing as mp
import sys
import time
import traceback
import click
from joblib.pool import MemmappingPool
class SharedGlobal:
pass
class StatefulPool:
def __init__(self):
self.n_parallel = 1
self.pool = None
self.queue = None
self.worker_queue = None
self.G = SharedGlobal()
self.manager = None
self.initialized = False
def initialize(self, n_parallel):
self.n_parallel = n_parallel
if self.pool is not None:
print('Warning: terminating existing pool')
self.pool.terminate()
self.pool = None
self.queue.close()
self.worker_queue.close()
self.G = SharedGlobal()
if n_parallel > 1:
self.manager = mp.Manager()
self.queue = mp.Queue()
self.worker_queue = mp.Queue()
self.pool = MemmappingPool(
self.n_parallel,
temp_folder='/tmp',
)
self.initialized = True
def close(self):
if self.manager:
self.manager.shutdown()
if self.pool:
self.pool.close()
def run_each(self, runner, args_list=None):
"""
Run the method on each worker process, and collect the result of
execution.
The runner method will receive 'g' as its first argument, followed
by the arguments in the args_list, if any
:return:
"""
assert not inspect.ismethod(runner), (
'run_each() cannot run a class method. Please ensure that runner '
'is a function with the prototype def foo(g, ...), where g is an '
'object of type garage.sampler.stateful_pool.SharedGlobal')
if args_list is None:
args_list = [tuple()] * self.n_parallel
assert len(args_list) == self.n_parallel
if self.n_parallel > 1:
results = self.pool.map_async(_worker_run_each,
[(runner, args)
for args in args_list])
for i in range(self.n_parallel):
self.worker_queue.get()
for i in range(self.n_parallel):
self.queue.put(None)
return results.get()
return [runner(self.G, *args_list[0])]
def run_map(self, runner, args_list):
assert not inspect.ismethod(runner), (
'run_map() cannot run a class method. Please ensure that runner '
"is a function with the prototype 'def foo(g, ...)', where g is "
'an object of type garage.sampler.stateful_pool.SharedGlobal')
if self.n_parallel > 1:
return self.pool.map(_worker_run_map,
[(runner, args) for args in args_list])
else:
ret = []
for args in args_list:
ret.append(runner(self.G, *args))
return ret
def run_imap_unordered(self, runner, args_list):
assert not inspect.ismethod(runner), (
'run_imap_unordered() cannot run a class method. Please ensure '
"that runner is a function with the prototype 'def foo(g, ...)', "
'where g is an object of type '
'garage.sampler.stateful_pool.SharedGlobal')
if self.n_parallel > 1:
for x in self.pool.imap_unordered(_worker_run_map,
[(runner, args)
for args in args_list]):
yield x
else:
for args in args_list:
yield runner(self.G, *args)
def run_collect(self,
collect_once,
threshold,
args=None,
show_prog_bar=True):
"""
Run the collector method using the worker pool. The collect_once method
will receive 'g' as its first argument, followed by the provided args,
if any. The method should return a pair of values. The first should be
the object to be collected, and the second is the increment to be
added.
This will continue until the total increment reaches or exceeds the
given threshold.
Sample script:
def collect_once(g):
return 'a', 1
stateful_pool.run_collect(collect_once, threshold=3)
# should return ['a', 'a', 'a']
:param collector:
:param threshold:
:return:
"""
assert not inspect.ismethod(collect_once), (
'run_collect() cannot run a class method. Please ensure that '
"collect_once is a function with the prototype 'def foo(g, ...)', "
'where g is an object of type '
'garage.sampler.stateful_pool.SharedGlobal')
if args is None:
args = tuple()
with click.progressbar(length=threshold, label='Sampling') as pbar:
if self.pool:
counter = self.manager.Value('i', 0)
lock = self.manager.RLock()
results = self.pool.map_async(
_worker_run_collect,
[(collect_once, counter, lock, threshold, args)] *
self.n_parallel)
last_value = 0
while True:
time.sleep(0.1)
with lock:
pbar.update(counter.value - last_value)
if counter.value >= threshold:
break
last_value = counter.value
return sum(results.get(), [])
else:
count = 0
results = []
while count < threshold:
result, inc = collect_once(self.G, *args)
results.append(result)
count += inc
pbar.update(inc)
return results
return []
singleton_pool = StatefulPool()
def _worker_run_each(all_args):
try:
runner, args = all_args
# signals to the master that this task is up and running
singleton_pool.worker_queue.put(None)
# wait for the master to signal continuation
singleton_pool.queue.get()
return runner(singleton_pool.G, *args)
except Exception:
raise Exception(''.join(traceback.format_exception(*sys.exc_info())))
def _worker_run_collect(all_args):
try:
collect_once, counter, lock, threshold, args = all_args
collected = []
while True:
with lock:
if counter.value >= threshold:
return collected
result, inc = collect_once(singleton_pool.G, *args)
collected.append(result)
with lock:
counter.value += inc
if counter.value >= threshold:
return collected
except Exception:
raise Exception(''.join(traceback.format_exception(*sys.exc_info())))
def _worker_run_map(all_args):
try:
runner, args = all_args
return runner(singleton_pool.G, *args)
except Exception:
raise Exception(''.join(traceback.format_exception(*sys.exc_info())))
| 7,359 | 32.153153 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/utils.py | """Utility functions related to sampling."""
import time
import numpy as np
from garage.misc import tensor_utils
def rollout(env,
agent,
*,
max_path_length=np.inf,
animated=False,
speedup=1,
deterministic=False):
"""Sample a single rollout of the agent in the environment.
Args:
agent(Policy): Agent used to select actions.
env(gym.Env): Environment to perform actions in.
max_path_length(int): If the rollout reaches this many timesteps, it is
terminated.
animated(bool): If true, render the environment after each step.
speedup(float): Factor by which to decrease the wait time between
rendered steps. Only relevant, if animated == true.
deterministic (bool): If true, use the mean action returned by the
stochastic policy instead of sampling from the returned action
distribution.
Returns:
dict[str, np.ndarray or dict]: Dictionary, with keys:
* observations(np.array): Flattened array of observations.
There should be one more of these than actions. Note that
observations[i] (for i < len(observations) - 1) was used by the
agent to choose actions[i]. Should have shape (T + 1, S^*) (the
unflattened state space of the current environment).
* actions(np.array): Non-flattened array of actions. Should have
shape (T, S^*) (the unflattened action space of the current
environment).
* rewards(np.array): Array of rewards of shape (T,) (1D array of
length timesteps).
* agent_infos(Dict[str, np.array]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos(Dict[str, np.array]): Dictionary of stacked,
non-flattened `env_info` arrays.
* dones(np.array): Array of termination signals.
"""
observations = []
actions = []
rewards = []
agent_infos = []
env_infos = []
dones = []
o = env.reset()
agent.reset()
path_length = 0
if animated:
env.render()
while path_length < (max_path_length or np.inf):
a, agent_info = agent.get_action(o)
if deterministic and 'mean' in agent_info:
a = agent_info['mean']
next_o, r, d, env_info = env.step(a)
observations.append(o)
rewards.append(r)
actions.append(a)
agent_infos.append(agent_info)
env_infos.append(env_info)
dones.append(d)
path_length += 1
if d:
break
o = next_o
if animated:
env.render()
timestep = 0.05
time.sleep(timestep / speedup)
return dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),
env_infos=tensor_utils.stack_tensor_dict_list(env_infos),
dones=np.array(dones),
)
def truncate_paths(paths, max_samples):
"""Truncate the paths so that the total number of samples is max_samples.
This is done by removing extra paths at the end of
the list, and make the last path shorter if necessary
Args:
paths (list[dict[str, np.ndarray]]): Samples, items with keys:
* observations (np.ndarray): Enviroment observations
* actions (np.ndarray): Agent actions
* rewards (np.ndarray): Environment rewards
* env_infos (dict): Environment state information
* agent_infos (dict): Agent state information
max_samples(int) : Maximum number of samples allowed.
Returns:
list[dict[str, np.ndarray]]: A list of paths, truncated so that the
number of samples adds up to max-samples
Raises:
ValueError: If key a other than 'observations', 'actions', 'rewards',
'env_infos' and 'agent_infos' is found.
"""
# chop samples collected by extra paths
# make a copy
valid_keys = {
'observations', 'actions', 'rewards', 'env_infos', 'agent_infos'
}
paths = list(paths)
total_n_samples = sum(len(path['rewards']) for path in paths)
while paths and total_n_samples - len(paths[-1]['rewards']) >= max_samples:
total_n_samples -= len(paths.pop(-1)['rewards'])
if paths:
last_path = paths.pop(-1)
truncated_last_path = dict()
truncated_len = len(
last_path['rewards']) - (total_n_samples - max_samples)
for k, v in last_path.items():
if k in ['observations', 'actions', 'rewards']:
truncated_last_path[k] = v[:truncated_len]
elif k in ['env_infos', 'agent_infos']:
truncated_last_path[k] = tensor_utils.truncate_tensor_dict(
v, truncated_len)
else:
raise ValueError(
'Unexpected key {} found in path. Valid keys: {}'.format(
k, valid_keys))
paths.append(truncated_last_path)
return paths
| 5,222 | 36.042553 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/vec_env_executor.py | """Environment wrapper that runs multiple environments."""
import copy
import warnings
import numpy as np
from garage.misc import tensor_utils
class VecEnvExecutor:
"""Environment wrapper that runs multiple environments.
Args:
envs (list[gym.Env]): List of environments to batch together.
max_path_length (int): Maximum length of any path.
"""
def __init__(self, envs, max_path_length):
self.envs = envs
self._action_space = envs[0].action_space
self._observation_space = envs[0].observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
self.max_path_length = max_path_length
warnings.warn(
DeprecationWarning(
'VecEnvExecutor is deprecated, and will be removed in the '
'next release. Please use VecWorker and one of the new '
'samplers which implement garage.sampler.Sampler, such as '
'RaySampler'))
def step(self, action_n):
"""Step all environments using the provided actions.
Inserts an environment infor 'vec_env_executor.complete' containing the
episode end signal (time limit reached or done signal from
environment).
Args:
action_n (np.ndarray): Array of actions.
Returns:
tuple: Tuple containing:
* observations (np.ndarray)
* rewards (np.ndarray)
* dones (np.ndarray): The done signal from the environment.
* env_infos (dict[str, np.ndarray])
* completes (np.ndarray): whether or not the path is complete.
A path is complete at some time-step N if the done signal
has been received at that or before N, or if
max_path_length N >= max_path_length.
"""
all_results = [env.step(a) for (a, env) in zip(action_n, self.envs)]
obs, rewards, dones, env_infos = list(
map(list, list(zip(*all_results))))
dones = np.asarray(dones)
rewards = np.asarray(rewards)
self.ts += 1
completes = copy.deepcopy(dones)
if self.max_path_length is not None:
completes[self.ts >= self.max_path_length] = True
for (i, complete) in enumerate(completes):
if complete:
obs[i] = self.envs[i].reset()
self.ts[i] = 0
env_infos[i]['vec_env_executor.complete'] = completes
return (obs, rewards, dones,
tensor_utils.stack_tensor_dict_list(env_infos), completes)
def reset(self):
"""Reset all environments.
Returns:
np.ndarray: Observations of shape :math:`(K, O*)`
"""
results = [env.reset() for env in self.envs]
self.ts[:] = 0
return results
@property
def num_envs(self):
"""Read the number of environments.
Returns:
int: Number of environments
"""
return len(self.envs)
@property
def action_space(self):
"""Read the action space.
Returns:
gym.Space: The action space.
"""
return self._action_space
@property
def observation_space(self):
"""Read the observation space.
Returns:
gym.Space: The observation space.
"""
return self._observation_space
def close(self):
"""Close all environments."""
| 3,488 | 29.33913 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/vec_worker.py | """Worker that "vectorizes" environments."""
import collections
import copy
import gym
import numpy as np
from garage import TrajectoryBatch
from garage.sampler.default_worker import DefaultWorker
from garage.sampler.env_update import EnvUpdate
class VecWorker(DefaultWorker):
"""Worker with a single policy and multiple environemnts.
Alternates between taking a single step in all environments and asking the
policy for an action for every environment. This allows computing a batch
of actions, which is generally much more efficient than computing a single
action when using neural networks.
Args:
seed(int): The seed to use to intialize random number generators.
max_path_length(int or float): The maximum length paths which will
be sampled. Can be (floating point) infinity.
worker_number(int): The number of the worker this update is
occurring in. This argument is used to set a different seed for
each worker.
n_envs (int): Number of environment copies to use.
"""
DEFAULT_N_ENVS = 8
def __init__(self,
*,
seed,
max_path_length,
worker_number,
n_envs=DEFAULT_N_ENVS):
super().__init__(seed=seed,
max_path_length=max_path_length,
worker_number=worker_number)
self._n_envs = n_envs
self._completed_rollouts = []
self._needs_agent_reset = True
self._needs_env_reset = True
self._envs = [None] * n_envs
self._agents = [None] * n_envs
self._path_lengths = [0] * self._n_envs
def update_agent(self, agent_update):
"""Update an agent, assuming it implements garage.Policy.
Args:
agent_update (np.ndarray or dict or garage.Policy): If a
tuple, dict, or np.ndarray, these should be parameters to
agent, which should have been generated by calling
`policy.get_param_values`. Alternatively, a policy itself. Note
that other implementations of `Worker` may take different types
for this parameter.
"""
super().update_agent(agent_update)
self._needs_agent_reset = True
def update_env(self, env_update):
"""Use any non-None env_update as a new environment.
A simple env update function. If env_update is not None, it should be
the complete new environment.
This allows changing environments by passing the new environment as
`env_update` into `obtain_samples`.
Args:
env_update(gym.Env or EnvUpdate or None): The environment to
replace the existing env with. Note that other implementations
of `Worker` may take different types for this parameter.
Raises:
TypeError: If env_update is not one of the documented types.
ValueError: If the wrong number of updates is passed.
"""
if isinstance(env_update, list):
if len(env_update) != self._n_envs:
raise ValueError('If separate environments are passed for '
'each worker, there must be exactly n_envs '
'({}) environments, but received {} '
'environments.'.format(
self._n_envs, len(env_update)))
for env_index, env_up in enumerate(env_update):
self._update_env_inner(env_up, env_index)
elif env_update is not None:
for env_index in range(self._n_envs):
self._update_env_inner(copy.deepcopy(env_update), env_index)
def _update_env_inner(self, env_update, env_index):
"""Update a single environment.
Args:
env_update(gym.Env or EnvUpdate or None): The environment to
replace the existing env with. Note that other implementations
of `Worker` may take different types for this parameter.
env_index (int): Number of the environment to update.
Raises:
TypeError: If env_update is not one of the documented types.
"""
if isinstance(env_update, EnvUpdate):
self._envs[env_index] = env_update(self._envs[env_index])
self._needs_env_reset = True
elif isinstance(env_update, gym.Env):
if self._envs[env_index] is not None:
self._envs[env_index].close()
self._envs[env_index] = env_update
self._needs_env_reset = True
else:
raise TypeError('Unknown environment update type.')
def start_rollout(self):
"""Begin a new rollout."""
if self._needs_agent_reset or self._needs_env_reset:
n = len(self._envs)
self.agent.reset([True] * n)
if self._needs_env_reset:
self._prev_obs = np.asarray(
[env.reset() for env in self._envs])
else:
# Avoid calling reset on environments that are already at the
# start of a rollout.
for i, env in enumerate(self._envs):
if self._path_lengths[i] > 0:
self._prev_obs[i] = env.reset()
self._path_lengths = [0 for _ in range(n)]
self._observations = [[] for _ in range(n)]
self._actions = [[] for _ in range(n)]
self._rewards = [[] for _ in range(n)]
self._terminals = [[] for _ in range(n)]
self._env_infos = [collections.defaultdict(list) for _ in range(n)]
self._agent_infos = [
collections.defaultdict(list) for _ in range(n)
]
self._needs_agent_reset = False
self._needs_env_reset = False
def _gather_rollout(self, rollout_number, last_observation):
assert 0 < self._path_lengths[rollout_number] <= self._max_path_length
env_infos = self._env_infos[rollout_number]
agent_infos = self._agent_infos[rollout_number]
for k, v in env_infos.items():
env_infos[k] = np.asarray(v)
for k, v in agent_infos.items():
agent_infos[k] = np.asarray(v)
traj = TrajectoryBatch(
self._envs[rollout_number].spec,
np.asarray(self._observations[rollout_number]),
np.asarray([last_observation]),
np.asarray(self._actions[rollout_number]),
np.asarray(self._rewards[rollout_number]),
np.asarray(self._terminals[rollout_number]), dict(env_infos),
dict(agent_infos),
np.asarray([self._path_lengths[rollout_number]], dtype='l'))
self._completed_rollouts.append(traj)
self._observations[rollout_number] = []
self._actions[rollout_number] = []
self._rewards[rollout_number] = []
self._terminals[rollout_number] = []
self._path_lengths[rollout_number] = 0
self._prev_obs[rollout_number] = self._envs[rollout_number].reset()
self._env_infos[rollout_number] = collections.defaultdict(list)
self._agent_infos[rollout_number] = collections.defaultdict(list)
def step_rollout(self):
"""Take a single time-step in the current rollout.
Returns:
bool: True iff at least one of the paths was completed.
"""
finished = False
actions, agent_info = self.agent.get_actions(self._prev_obs)
completes = [False] * len(self._envs)
for i, action in enumerate(actions):
if self._path_lengths[i] < self._max_path_length:
next_o, r, d, env_info = self._envs[i].step(action)
self._observations[i].append(self._prev_obs[i])
self._rewards[i].append(r)
self._actions[i].append(actions[i])
for k, v in agent_info.items():
self._agent_infos[i][k].append(v[i])
for k, v in env_info.items():
self._env_infos[i][k].append(v)
self._path_lengths[i] += 1
self._terminals[i].append(d)
self._prev_obs[i] = next_o
if self._path_lengths[i] >= self._max_path_length or d:
self._gather_rollout(i, next_o)
completes[i] = True
finished = True
if finished:
self.agent.reset(completes)
return finished
def collect_rollout(self):
"""Collect all completed rollouts.
Returns:
garage.TrajectoryBatch: A batch of the trajectories completed since
the last call to collect_rollout().
"""
if len(self._completed_rollouts) == 1:
result = self._completed_rollouts[0]
else:
result = TrajectoryBatch.concatenate(*self._completed_rollouts)
self._completed_rollouts = []
return result
def shutdown(self):
"""Close the worker's environments."""
for env in self._envs:
env.close()
| 9,184 | 39.822222 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/worker.py | """Worker interface used in all Samplers."""
import abc
class Worker(abc.ABC):
"""Worker class used in all Samplers."""
def __init__(self, *, seed, max_path_length, worker_number):
"""Initialize a worker.
Args:
seed(int): The seed to use to intialize random number generators.
max_path_length(int or float): The maximum length paths which will
be sampled. Can be (floating point) infinity.
worker_number(int): The number of the worker this update is
occurring in. This argument is used to set a different seed for
each worker.
Should create fields the following fields:
agent(Policy or None): The worker's initial agent.
env(gym.Env or None): The worker's environment.
"""
self._seed = seed
self._max_path_length = max_path_length
self._worker_number = worker_number
def update_agent(self, agent_update):
"""Update the worker's agent, using agent_update.
Args:
agent_update(object): An agent update. The exact type of this
argument depends on the `Worker` implementation.
"""
def update_env(self, env_update):
"""Update the worker's env, using env_update.
Args:
env_update(object): An environment update. The exact type of this
argument depends on the `Worker` implementation.
"""
def rollout(self):
"""Sample a single rollout of the agent in the environment.
Returns:
garage.TrajectoryBatch: Batch of sampled trajectories. May be
truncated if max_path_length is set.
"""
def start_rollout(self):
"""Begin a new rollout."""
def step_rollout(self):
"""Take a single time-step in the current rollout.
Returns:
True iff the path is done, either due to the environment indicating
termination of due to reaching `max_path_length`.
"""
def collect_rollout(self):
"""Collect the current rollout, clearing the internal buffer.
Returns:
garage.TrajectoryBatch: Batch of sampled trajectories. May be
truncated if the rollouts haven't completed yet.
"""
def shutdown(self):
"""Shutdown the worker."""
def __getstate__(self):
"""Refuse to be pickled.
Raises:
ValueError: Always raised, since pickling Workers is not supported.
"""
raise ValueError('Workers are not pickleable. '
'Please pickle the WorkerFactory instead.')
| 2,682 | 29.488636 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/sampler/worker_factory.py | """Worker factory used by Samplers to construct Workers."""
import psutil
from garage.sampler.default_worker import DefaultWorker
def identity_function(value):
"""Do nothing.
This function exists so it can be pickled.
Args:
value(object): A value.
Returns:
object: The value.
"""
return value
class WorkerFactory:
"""Constructs workers for Samplers.
The intent is that this object should be sufficient to avoid subclassing
the sampler. Instead of subclassing the sampler for e.g. a specific
backend, implement a specialized WorkerFactory (or specify appropriate
functions to this one). Not that this object must be picklable, since it
may be passed to workers. However, its fields individually need not be.
All arguments to this type must be passed by keyword.
Args:
seed(int): The seed to use to intialize random number generators.
n_workers(int): The number of workers to use.
max_path_length(int): The maximum length paths which will be sampled.
worker_class(type): Class of the workers. Instances should implement
the Worker interface.
worker_args (dict or None): Additional arguments that should be passed
to the worker.
"""
def __init__(
self,
*, # Require passing by keyword.
seed,
max_path_length,
n_workers=psutil.cpu_count(logical=False),
worker_class=DefaultWorker,
worker_args=None):
self.n_workers = n_workers
self._seed = seed
self._max_path_length = max_path_length
self._worker_class = worker_class
if worker_args is None:
self._worker_args = {}
else:
self._worker_args = worker_args
def prepare_worker_messages(self, objs, preprocess=identity_function):
"""Take an argument and canonicalize it into a list for all workers.
This helper function is used to handle arguments in the sampler API
which may (optionally) be lists. Specifically, these are agent, env,
agent_update, and env_update. Checks that the number of parameters is
correct.
Args:
objs(object or list): Must be either a single object or a list
of length n_workers.
preprocess(function): Function to call on each single object before
creating the list.
Raises:
ValueError: If a list is passed of a length other than `n_workers`.
Returns:
List[object]: A list of length self.n_workers.
"""
if isinstance(objs, list):
if len(objs) != self.n_workers:
raise ValueError(
'Length of list doesn\'t match number of workers')
return [preprocess(obj) for obj in objs]
else:
return [preprocess(objs) for _ in range(self.n_workers)]
def __call__(self, worker_number):
"""Construct a worker given its number.
Args:
worker_number(int): The worker number. Should be at least 0 and
less than or equal to `n_workers`.
Raises:
ValueError: If the worker number is greater than `n_workers`.
Returns:
garage.sampler.Worker: The constructed worker.
"""
if worker_number >= self.n_workers:
raise ValueError('Worker number is too big')
return self._worker_class(worker_number=worker_number,
seed=self._seed,
max_path_length=self._max_path_length,
**self._worker_args)
| 3,720 | 32.827273 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/__init__.py | """Tensorflow Branch."""
from garage.tf._functions import paths_to_tensors
__all__ = ['paths_to_tensors']
| 107 | 20.6 | 49 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/_functions.py | """Utility functions for tf-based Reinforcement learning algorithms."""
import numpy as np
from garage.misc import tensor_utils as np_tensor_utils
from garage.tf.misc import tensor_utils
def paths_to_tensors(paths, max_path_length, baseline_predictions, discount,
gae_lambda):
"""Return processed sample data based on the collected paths.
Args:
paths (list[dict]): A list of collected paths.
max_path_length (int): Maximum length of a single rollout.
baseline_predictions(numpy.ndarray): : Predicted value of GAE
(Generalized Advantage Estimation) Baseline.
discount (float): Environment reward discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
Returns:
dict: Processed sample data, with key
* observations: (numpy.ndarray)
* actions: (numpy.ndarray)
* rewards: (numpy.ndarray)
* baselines: (numpy.ndarray)
* returns: (numpy.ndarray)
* valids: (numpy.ndarray)
* agent_infos: (dict)
* env_infos: (dict)
* paths: (list[dict])
"""
baselines = []
returns = []
total_steps = 0
for idx, path in enumerate(paths):
total_steps += len(path['rewards'])
path_baselines = np.append(baseline_predictions[idx], 0)
deltas = (path['rewards'] + discount * path_baselines[1:] -
path_baselines[:-1])
path['advantages'] = np_tensor_utils.discount_cumsum(
deltas, discount * gae_lambda)
path['deltas'] = deltas
for idx, path in enumerate(paths):
# baselines
path['baselines'] = baseline_predictions[idx]
baselines.append(path['baselines'])
# returns
path['returns'] = np_tensor_utils.discount_cumsum(
path['rewards'], discount)
returns.append(path['returns'])
# make all paths the same length
obs = [path['observations'] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
actions = [path['actions'] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path['rewards'] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path['returns'] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
baselines = tensor_utils.pad_tensor_n(baselines, max_path_length)
agent_infos = [path['agent_infos'] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos
])
env_infos = [path['env_infos'] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos])
valids = [np.ones_like(path['returns']) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
lengths = np.asarray([v.sum() for v in valids])
samples_data = dict(
observations=obs,
actions=actions,
rewards=rewards,
baselines=baselines,
returns=returns,
valids=valids,
lengths=lengths,
agent_infos=agent_infos,
env_infos=env_infos,
paths=paths,
)
return samples_data
| 3,400 | 33.01 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/__init__.py | """Tensorflow implementation of reinforcement learning algorithms."""
from garage.tf.algos.ddpg import DDPG
from garage.tf.algos.dqn import DQN
from garage.tf.algos.erwr import ERWR
from garage.tf.algos.npo import NPO
from garage.tf.algos.ppo import PPO
from garage.tf.algos.reps import REPS
from garage.tf.algos.rl2 import RL2
from garage.tf.algos.rl2ppo import RL2PPO
from garage.tf.algos.rl2trpo import RL2TRPO
from garage.tf.algos.td3 import TD3
from garage.tf.algos.te_npo import TENPO
from garage.tf.algos.te_ppo import TEPPO
from garage.tf.algos.tnpg import TNPG
from garage.tf.algos.trpo import TRPO
from garage.tf.algos.vpg import VPG
__all__ = [
'DDPG',
'DQN',
'ERWR',
'NPO',
'PPO',
'REPS',
'RL2',
'RL2PPO',
'RL2TRPO',
'TD3',
'TNPG',
'TRPO',
'VPG',
'TENPO',
'TEPPO',
]
| 840 | 23.028571 | 69 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/_rl2npo.py | """Natural Policy Gradient Optimization."""
from dowel import logger, tabular
import numpy as np
from garage.misc import tensor_utils as np_tensor_utils
from garage.tf.algos import NPO
class RL2NPO(NPO):
"""Natural Policy Gradient Optimization.
This is specific for RL^2
(https://arxiv.org/pdf/1611.02779.pdf).
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
pg_loss (str): A string from: 'vanilla', 'surrogate',
'surrogate_clip'. The type of loss functions to use.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
fit_baseline (str): Either 'before' or 'after'. See above docstring for
a more detail explanation. Currently it only supports 'before'.
name (str): The name of the algorithm.
"""
def optimize_policy(self, samples_data):
"""Optimize policy.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
"""
self._fit_baseline_with_data(samples_data)
samples_data['baselines'] = self._get_baseline_prediction(samples_data)
policy_opt_input_values = self._policy_opt_input_values(samples_data)
# Train policy network
logger.log('Computing loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
logger.log('Optimizing')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing KL after')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
logger.log('Computing loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
pol_ent = self._f_policy_entropy(*policy_opt_input_values)
tabular.record('{}/Entropy'.format(self.policy.name), np.mean(pol_ent))
ev = np_tensor_utils.explained_variance_1d(samples_data['baselines'],
samples_data['returns'],
samples_data['valids'])
tabular.record('{}/ExplainedVariance'.format(self._baseline.name), ev)
self._old_policy.model.parameters = self.policy.model.parameters
def _get_baseline_prediction(self, samples_data):
"""Get baseline prediction.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
Returns:
np.ndarray: Baseline prediction, with shape
:math:`(N, max_path_length * episode_per_task)`.
"""
paths = samples_data['paths']
baselines = [self._baseline.predict(path) for path in paths]
return np_tensor_utils.pad_tensor_n(baselines, self.max_path_length)
| 5,636 | 46.369748 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/ddpg.py | """Deep Deterministic Policy Gradient (DDPG) implementation in TensorFlow."""
from collections import deque
from dowel import logger, tabular
import numpy as np
import tensorflow as tf
from garage import _Default, make_optimizer
from garage import log_performance
from garage.np import obtain_evaluation_samples
from garage.np import samples_to_tensors
from garage.np.algos import RLAlgorithm
from garage.sampler import OffPolicyVectorizedSampler
from garage.tf.misc import tensor_utils
class DDPG(RLAlgorithm):
"""A DDPG model based on https://arxiv.org/pdf/1509.02971.pdf.
DDPG, also known as Deep Deterministic Policy Gradient, uses actor-critic
method to optimize the policy and reward prediction. It uses a supervised
method to update the critic network and policy gradient to update the actor
network. And there are exploration strategy, replay buffer and target
networks involved to stabilize the training process.
Example:
$ python garage/examples/tf/ddpg_pendulum.py
Args:
env_spec (EnvSpec): Environment specification.
policy (garage.tf.policies.Policy): Policy.
qf (object): The q value network.
replay_buffer (garage.replay_buffer.ReplayBuffer): Replay buffer.
steps_per_epoch (int): Number of train_once calls per epoch.
n_train_steps (int): Training steps.
max_path_length (int): Maximum path length. The episode will
terminate when length of trajectory reaches max_path_length.
max_eval_path_length (int or None): Maximum length of paths used for
off-policy evaluation. If None, defaults to `max_path_length`.
buffer_batch_size (int): Batch size of replay buffer.
min_buffer_size (int): The minimum buffer size for replay buffer.
rollout_batch_size (int): Roll out batch size.
exploration_policy (garage.np.exploration_policies.ExplorationPolicy):
Exploration strategy.
target_update_tau (float): Interpolation parameter for doing the
soft target update.
policy_lr (float): Learning rate for training policy network.
qf_lr (float): Learning rate for training q value network.
discount(float): Discount factor for the cumulative return.
policy_weight_decay (float): L2 regularization factor for parameters
of the policy network. Value of 0 means no regularization.
qf_weight_decay (float): L2 regularization factor for parameters
of the q value network. Value of 0 means no regularization.
policy_optimizer (tf.Optimizer): Optimizer for training policy network.
qf_optimizer (tf.Optimizer): Optimizer for training q function
network.
clip_pos_returns (bool): Whether or not clip positive returns.
clip_return (float): Clip return to be in [-clip_return,
clip_return].
max_action (float): Maximum action magnitude.
reward_scale (float): Reward scale.
smooth_return (bool): Whether to smooth the return.
name (str): Name of the algorithm shown in computation graph.
"""
def __init__(
self,
env_spec,
policy,
qf,
replay_buffer,
*, # Everything after this is numbers.
steps_per_epoch=20,
n_train_steps=50,
max_path_length=None,
max_eval_path_length=None,
buffer_batch_size=64,
min_buffer_size=int(1e4),
rollout_batch_size=1,
exploration_policy=None,
target_update_tau=0.01,
discount=0.99,
policy_weight_decay=0,
qf_weight_decay=0,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer,
policy_lr=_Default(1e-4),
qf_lr=_Default(1e-3),
clip_pos_returns=False,
clip_return=np.inf,
max_action=None,
reward_scale=1.,
smooth_return=True,
name='DDPG'):
action_bound = env_spec.action_space.high
self._max_action = action_bound if max_action is None else max_action
self._tau = target_update_tau
self._policy_weight_decay = policy_weight_decay
self._qf_weight_decay = qf_weight_decay
self._name = name
self._clip_pos_returns = clip_pos_returns
self._clip_return = clip_return
self._success_history = deque(maxlen=100)
self._episode_rewards = []
self._episode_policy_losses = []
self._episode_qf_losses = []
self._epoch_ys = []
self._epoch_qs = []
self._target_policy = policy.clone('target_policy')
self._target_qf = qf.clone('target_qf')
self._policy_optimizer = policy_optimizer
self._qf_optimizer = qf_optimizer
self._policy_lr = policy_lr
self._qf_lr = qf_lr
self._min_buffer_size = min_buffer_size
self._qf = qf
self._steps_per_epoch = steps_per_epoch
self._n_train_steps = n_train_steps
self._buffer_batch_size = buffer_batch_size
self._discount = discount
self._reward_scale = reward_scale
self._smooth_return = smooth_return
self.max_path_length = max_path_length
self._max_eval_path_length = max_eval_path_length
# used by OffPolicyVectorizedSampler
self.rollout_batch_size = rollout_batch_size
self.env_spec = env_spec
self.replay_buffer = replay_buffer
self.policy = policy
self.exploration_policy = exploration_policy
self.sampler_cls = OffPolicyVectorizedSampler
self.init_opt()
# pylint: disable=too-many-statements
def init_opt(self):
"""Build the loss function and init the optimizer."""
with tf.name_scope(self._name):
# Create target policy and qf network
with tf.name_scope('inputs'):
obs_dim = self.env_spec.observation_space.flat_dim
input_y = tf.compat.v1.placeholder(tf.float32,
shape=(None, 1),
name='input_y')
obs = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim),
name='input_observation')
actions = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.env_spec.action_space.flat_dim),
name='input_action')
policy_network_outputs = self._target_policy.get_action_sym(
obs, name='policy')
target_qf_outputs = self._target_qf.get_qval_sym(obs,
actions,
name='qf')
self.target_policy_f_prob_online = tensor_utils.compile_function(
inputs=[obs], outputs=policy_network_outputs)
self.target_qf_f_prob_online = tensor_utils.compile_function(
inputs=[obs, actions], outputs=target_qf_outputs)
# Set up target init and update function
with tf.name_scope('setup_target'):
ops = tensor_utils.get_target_ops(
self.policy.get_global_vars(),
self._target_policy.get_global_vars(), self._tau)
policy_init_ops, policy_update_ops = ops
qf_init_ops, qf_update_ops = tensor_utils.get_target_ops(
self._qf.get_global_vars(),
self._target_qf.get_global_vars(), self._tau)
target_init_op = policy_init_ops + qf_init_ops
target_update_op = policy_update_ops + qf_update_ops
f_init_target = tensor_utils.compile_function(
inputs=[], outputs=target_init_op)
f_update_target = tensor_utils.compile_function(
inputs=[], outputs=target_update_op)
with tf.name_scope('inputs'):
obs_dim = self.env_spec.observation_space.flat_dim
input_y = tf.compat.v1.placeholder(tf.float32,
shape=(None, 1),
name='input_y')
obs = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim),
name='input_observation')
actions = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.env_spec.action_space.flat_dim),
name='input_action')
# Set up policy training function
next_action = self.policy.get_action_sym(obs, name='policy_action')
next_qval = self._qf.get_qval_sym(obs,
next_action,
name='policy_action_qval')
with tf.name_scope('action_loss'):
action_loss = -tf.reduce_mean(next_qval)
if self._policy_weight_decay > 0.:
regularizer = tf.keras.regularizers.l2(
self._policy_weight_decay)
for var in self.policy.get_regularizable_vars():
policy_reg = regularizer(var)
action_loss += policy_reg
with tf.name_scope('minimize_action_loss'):
policy_optimizer = make_optimizer(
self._policy_optimizer,
learning_rate=self._policy_lr,
name='PolicyOptimizer')
policy_train_op = policy_optimizer.minimize(
action_loss, var_list=self.policy.get_trainable_vars())
f_train_policy = tensor_utils.compile_function(
inputs=[obs], outputs=[policy_train_op, action_loss])
# Set up qf training function
qval = self._qf.get_qval_sym(obs, actions, name='q_value')
with tf.name_scope('qval_loss'):
qval_loss = tf.reduce_mean(
tf.compat.v1.squared_difference(input_y, qval))
if self._qf_weight_decay > 0.:
regularizer = tf.keras.regularizers.l2(
self._qf_weight_decay)
for var in self._qf.get_regularizable_vars():
qf_reg = regularizer(var)
qval_loss += qf_reg
with tf.name_scope('minimize_qf_loss'):
qf_optimizer = make_optimizer(self._qf_optimizer,
learning_rate=self._qf_lr,
name='QFunctionOptimizer')
qf_train_op = qf_optimizer.minimize(
qval_loss, var_list=self._qf.get_trainable_vars())
f_train_qf = tensor_utils.compile_function(
inputs=[input_y, obs, actions],
outputs=[qf_train_op, qval_loss, qval])
self.f_train_policy = f_train_policy
self.f_train_qf = f_train_qf
self.f_init_target = f_init_target
self.f_update_target = f_update_target
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
data = self.__dict__.copy()
del data['target_policy_f_prob_online']
del data['target_qf_f_prob_online']
del data['f_train_policy']
del data['f_train_qf']
del data['f_init_target']
del data['f_update_target']
return data
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
self.__dict__.update(state)
self.init_opt()
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
runner.enable_logging = False
for _ in runner.step_epochs():
for cycle in range(self._steps_per_epoch):
runner.step_path = runner.obtain_samples(runner.step_itr)
for path in runner.step_path:
path['rewards'] *= self._reward_scale
last_return = self.train_once(runner.step_itr,
runner.step_path)
if (cycle == 0 and self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
runner.enable_logging = True
log_performance(runner.step_itr,
obtain_evaluation_samples(
self.policy, runner.get_env_copy()),
discount=self._discount)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
np.float64: Average return.
"""
paths = samples_to_tensors(paths)
epoch = itr / self._steps_per_epoch
self._episode_rewards.extend([
path for path, complete in zip(paths['undiscounted_returns'],
paths['complete']) if complete
])
self._success_history.extend([
path for path, complete in zip(paths['success_history'],
paths['complete']) if complete
])
# Avoid calculating the mean of an empty list in cases where
# all paths were non-terminal.
last_average_return = np.NaN
avg_success_rate = 0
if self._episode_rewards:
last_average_return = np.mean(self._episode_rewards)
if self._success_history:
if (itr % self._steps_per_epoch == 0
and self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
avg_success_rate = np.mean(self._success_history)
self.policy.log_diagnostics(paths)
self._qf.log_diagnostics(paths)
for _ in range(self._n_train_steps):
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
qf_loss, y_s, qval, policy_loss = self.optimize_policy()
self._episode_policy_losses.append(policy_loss)
self._episode_qf_losses.append(qf_loss)
self._epoch_ys.append(y_s)
self._epoch_qs.append(qval)
if itr % self._steps_per_epoch == 0:
logger.log('Training finished')
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
tabular.record('Epoch', epoch)
tabular.record('Policy/AveragePolicyLoss',
np.mean(self._episode_policy_losses))
tabular.record('QFunction/AverageQFunctionLoss',
np.mean(self._episode_qf_losses))
tabular.record('QFunction/AverageQ', np.mean(self._epoch_qs))
tabular.record('QFunction/MaxQ', np.max(self._epoch_qs))
tabular.record('QFunction/AverageAbsQ',
np.mean(np.abs(self._epoch_qs)))
tabular.record('QFunction/AverageY', np.mean(self._epoch_ys))
tabular.record('QFunction/MaxY', np.max(self._epoch_ys))
tabular.record('QFunction/AverageAbsY',
np.mean(np.abs(self._epoch_ys)))
tabular.record('AverageSuccessRate', avg_success_rate)
if not self._smooth_return:
self._episode_rewards = []
self._episode_policy_losses = []
self._episode_qf_losses = []
self._epoch_ys = []
self._epoch_qs = []
self._success_history.clear()
return last_average_return
def optimize_policy(self):
"""Perform algorithm optimizing.
Returns:
float: Loss of action predicted by the policy network
float: Loss of q value predicted by the q network.
float: ys.
float: Q value predicted by the q network.
"""
transitions = self.replay_buffer.sample_transitions(
self._buffer_batch_size)
observations = transitions['observations']
next_observations = transitions['next_observations']
rewards = transitions['rewards'].reshape(-1, 1)
actions = transitions['actions']
terminals = transitions['terminals'].reshape(-1, 1)
next_inputs = next_observations
inputs = observations
target_actions = self.target_policy_f_prob_online(next_inputs)
target_qvals = self.target_qf_f_prob_online(next_inputs,
target_actions)
clip_range = (-self._clip_return,
0. if self._clip_pos_returns else self._clip_return)
ys = np.clip(
rewards + (1.0 - terminals) * self._discount * target_qvals,
clip_range[0], clip_range[1])
_, qval_loss, qval = self.f_train_qf(ys, inputs, actions)
_, action_loss = self.f_train_policy(inputs)
self.f_update_target()
return qval_loss, ys, qval, action_loss
| 18,128 | 41.062645 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/dqn.py | """Deep Q-Learning Network algorithm."""
import akro
from dowel import tabular
import numpy as np
import tensorflow as tf
from garage import _Default, make_optimizer
from garage import log_performance
from garage.np import obtain_evaluation_samples
from garage.np import samples_to_tensors
from garage.np.algos import RLAlgorithm
from garage.sampler import OffPolicyVectorizedSampler
from garage.tf.misc import tensor_utils
class DQN(RLAlgorithm):
"""DQN from https://arxiv.org/pdf/1312.5602.pdf.
Known as Deep Q-Network, it estimates the Q-value function by deep neural
networks. It enables Q-Learning to be applied on high complexity
environments. To deal with pixel environments, numbers of tricks are
usually needed, e.g. skipping frames and stacking frames as single
observation.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
policy (garage.tf.policies.Policy): Policy.
qf (object): The q value network.
replay_buffer (garage.replay_buffer.ReplayBuffer): Replay buffer.
exploration_policy
(garage.np.exploration_policies.ExplorationPolicy):
Exploration strategy.
steps_per_epoch (int): Number of train_once calls per epoch.
min_buffer_size (int): The minimum buffer size for replay buffer.
buffer_batch_size (int): Batch size for replay buffer.
rollout_batch_size (int): Roll out batch size.
n_train_steps (int): Training steps.
max_path_length (int): Maximum path length. The episode will
terminate when length of trajectory reaches max_path_length.
max_eval_path_length (int or None): Maximum length of paths used for
off-policy evaluation. If None, defaults to `max_path_length`.
qf_lr (float): Learning rate for Q-Function.
qf_optimizer (tf.Optimizer): Optimizer for Q-Function.
discount (float): Discount factor for rewards.
target_network_update_freq (int): Frequency of updating target
network.
grad_norm_clipping (float): Maximum clipping value for clipping
tensor values to a maximum L2-norm. It must be larger than 0.
If None, no gradient clipping is done. For detail, see
docstring for tf.clip_by_norm.
double_q (bool): Bool for using double q-network.
reward_scale (float): Reward scale.
smooth_return (bool): Whether to smooth the return.
name (str): Name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
qf,
replay_buffer,
exploration_policy=None,
steps_per_epoch=20,
min_buffer_size=int(1e4),
buffer_batch_size=64,
rollout_batch_size=1,
n_train_steps=50,
max_path_length=None,
max_eval_path_length=None,
qf_lr=_Default(0.001),
qf_optimizer=tf.compat.v1.train.AdamOptimizer,
discount=1.0,
target_network_update_freq=5,
grad_norm_clipping=None,
double_q=False,
reward_scale=1.,
smooth_return=True,
name='DQN'):
self._qf_optimizer = qf_optimizer
self._qf_lr = qf_lr
self._name = name
self._target_network_update_freq = target_network_update_freq
self._grad_norm_clipping = grad_norm_clipping
self._double_q = double_q
# clone a target q-function
self._target_qf = qf.clone('target_qf')
self._min_buffer_size = min_buffer_size
self._qf = qf
self._steps_per_epoch = steps_per_epoch
self._n_train_steps = n_train_steps
self._buffer_batch_size = buffer_batch_size
self._discount = discount
self._reward_scale = reward_scale
self._smooth_return = smooth_return
self.max_path_length = max_path_length
self._max_eval_path_length = max_eval_path_length
# used by OffPolicyVectorizedSampler
self.env_spec = env_spec
self.rollout_batch_size = rollout_batch_size
self.replay_buffer = replay_buffer
self.policy = policy
self.exploration_policy = exploration_policy
self.sampler_cls = OffPolicyVectorizedSampler
self.init_opt()
def init_opt(self):
"""Initialize the networks and Ops.
Assume discrete space for dqn, so action dimension
will always be action_space.n
"""
action_dim = self.env_spec.action_space.n
self.episode_rewards = []
self.episode_qf_losses = []
# build q networks
with tf.name_scope(self._name):
action_t_ph = tf.compat.v1.placeholder(tf.int32,
None,
name='action')
reward_t_ph = tf.compat.v1.placeholder(tf.float32,
None,
name='reward')
done_t_ph = tf.compat.v1.placeholder(tf.float32, None, name='done')
with tf.name_scope('update_ops'):
target_update_op = tensor_utils.get_target_ops(
self._qf.get_global_vars(),
self._target_qf.get_global_vars())
self._qf_update_ops = tensor_utils.compile_function(
inputs=[], outputs=target_update_op)
with tf.name_scope('td_error'):
# Q-value of the selected action
action = tf.one_hot(action_t_ph,
action_dim,
on_value=1.,
off_value=0.)
q_selected = tf.reduce_sum(
self._qf.q_vals * action, # yapf: disable
axis=1)
# r + Q'(s', argmax_a(Q(s', _)) - Q(s, a)
if self._double_q:
target_qval_with_online_q = self._qf.get_qval_sym(
self._target_qf.input, self._qf.name)
future_best_q_val_action = tf.argmax(
target_qval_with_online_q, 1)
future_best_q_val = tf.reduce_sum(
self._target_qf.q_vals *
tf.one_hot(future_best_q_val_action,
action_dim,
on_value=1.,
off_value=0.),
axis=1)
else:
# r + max_a(Q'(s', _)) - Q(s, a)
future_best_q_val = tf.reduce_max(self._target_qf.q_vals,
axis=1)
q_best_masked = (1.0 - done_t_ph) * future_best_q_val
# if done, it's just reward
# else reward + discount * future_best_q_val
target_q_values = (reward_t_ph +
self._discount * q_best_masked)
# td_error = q_selected - tf.stop_gradient(target_q_values)
loss = tf.compat.v1.losses.huber_loss(
q_selected, tf.stop_gradient(target_q_values))
loss = tf.reduce_mean(loss)
with tf.name_scope('optimize_ops'):
qf_optimizer = make_optimizer(self._qf_optimizer,
learning_rate=self._qf_lr)
if self._grad_norm_clipping is not None:
gradients = qf_optimizer.compute_gradients(
loss, var_list=self._qf.get_trainable_vars())
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(
grad, self._grad_norm_clipping), var)
optimize_loss = qf_optimizer.apply_gradients(gradients)
else:
optimize_loss = qf_optimizer.minimize(
loss, var_list=self._qf.get_trainable_vars())
self._train_qf = tensor_utils.compile_function(
inputs=[
self._qf.input, action_t_ph, reward_t_ph, done_t_ph,
self._target_qf.input
],
outputs=[loss, optimize_loss])
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
runner.enable_logging = False
for _ in runner.step_epochs():
for cycle in range(self._steps_per_epoch):
runner.step_path = runner.obtain_samples(runner.step_itr)
for path in runner.step_path:
path['rewards'] *= self._reward_scale
last_return = self.train_once(runner.step_itr,
runner.step_path)
if (cycle == 0 and self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
runner.enable_logging = True
log_performance(runner.step_itr,
obtain_evaluation_samples(
self.policy, runner.get_env_copy()),
discount=self._discount)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Average return.
"""
paths = samples_to_tensors(paths)
epoch = itr / self._steps_per_epoch
self.episode_rewards.extend(paths['undiscounted_returns'])
last_average_return = np.mean(self.episode_rewards)
for _ in range(self._n_train_steps):
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
qf_loss = self.optimize_policy(None)
self.episode_qf_losses.append(qf_loss)
if self.replay_buffer.n_transitions_stored >= self._min_buffer_size:
if itr % self._target_network_update_freq == 0:
self._qf_update_ops()
if itr % self._steps_per_epoch == 0:
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
mean100ep_rewards = round(np.mean(self.episode_rewards[-100:]),
1)
mean100ep_qf_loss = np.mean(self.episode_qf_losses[-100:])
tabular.record('Epoch', epoch)
tabular.record('Episode100RewardMean', mean100ep_rewards)
tabular.record('{}/Episode100LossMean'.format(self._qf.name),
mean100ep_qf_loss)
return last_average_return
def optimize_policy(self, samples_data):
"""Optimize network using experiences from replay buffer.
Args:
samples_data (list): Processed batch data.
Returns:
numpy.float64: Loss of policy.
"""
del samples_data
transitions = self.replay_buffer.sample_transitions(
self._buffer_batch_size)
observations = transitions['observations']
rewards = transitions['rewards']
actions = self.env_spec.action_space.unflatten_n(
transitions['actions'])
next_observations = transitions['next_observations']
dones = transitions['terminals']
if isinstance(self.env_spec.observation_space, akro.Image):
if len(observations.shape[1:]) < len(
self.env_spec.observation_space.shape):
observations = self.env_spec.observation_space.unflatten_n(
observations)
next_observations = self.env_spec.observation_space.\
unflatten_n(next_observations)
loss, _ = self._train_qf(observations, actions, rewards, dones,
next_observations)
return loss
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_qf_update_ops']
del data['_train_qf']
return data
def __setstate__(self, state):
"""Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
"""
self.__dict__ = state
self.init_opt()
| 13,235 | 39.231003 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/erwr.py | """Episodic Reward Weighted Regression."""
from garage.tf.algos.vpg import VPG
from garage.tf.optimizers import LbfgsOptimizer
class ERWR(VPG):
"""Episodic Reward Weighted Regression [1].
Note:
This does not implement the original RwR [2]_ that deals with
"immediate reward problems" since it doesn't find solutions
that optimize for temporally delayed rewards.
.. [1] Kober, Jens, and Jan R. Peters. "Policy search for motor
primitives in robotics." Advances in neural information
processing systems. 2009.
.. [2] Peters, Jan, and Stefan Schaal. "Using reward-weighted
regression for reinforcement learning of task space control.
" Approximate Dynamic Programming and Reinforcement Learning,
2007. ADPRL 2007. IEEE International Symposium on. IEEE, 2007.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
name (str): The name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=True,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
flatten_input=True,
name='ERWR'):
if optimizer is None:
optimizer = LbfgsOptimizer
if optimizer_args is None:
optimizer_args = dict()
super().__init__(env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=optimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
flatten_input=flatten_input,
name=name)
| 5,410 | 46.052174 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/npo.py | """Natural Policy Gradient Optimization."""
# pylint: disable=wrong-import-order
import collections
from dowel import logger, tabular
import numpy as np
import tensorflow as tf
from garage import log_performance, TrajectoryBatch
from garage import make_optimizer
from garage.misc import tensor_utils as np_tensor_utils
from garage.np.algos import RLAlgorithm
from garage.sampler import OnPolicyVectorizedSampler
from garage.tf import paths_to_tensors
from garage.tf.misc.tensor_utils import center_advs
from garage.tf.misc.tensor_utils import compile_function
from garage.tf.misc.tensor_utils import compute_advantages
from garage.tf.misc.tensor_utils import discounted_returns
from garage.tf.misc.tensor_utils import flatten_inputs
from garage.tf.misc.tensor_utils import graph_inputs
from garage.tf.misc.tensor_utils import positive_advs
from garage.tf.optimizers import LbfgsOptimizer
from garage.tf.samplers import BatchSampler
class NPO(RLAlgorithm):
"""Natural Policy Gradient Optimization.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
pg_loss (str): A string from: 'vanilla', 'surrogate',
'surrogate_clip'. The type of loss functions to use.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
name (str): The name of the algorithm.
Note:
sane defaults for entropy configuration:
- entropy_method='max', center_adv=False, stop_gradient=True
(center_adv normalizes the advantages tensor, which will
significantly alleviate the effect of entropy. It is also
recommended to turn off entropy gradient so that the agent
will focus on high-entropy actions instead of increasing the
variance of the distribution.)
- entropy_method='regularized', stop_gradient=False,
use_neg_logli_entropy=False
"""
def __init__(self,
env_spec,
policy,
baseline,
scope=None,
max_path_length=100,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
pg_loss='surrogate',
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
flatten_input=True,
name='NPO'):
self.policy = policy
self.scope = scope
self.max_path_length = max_path_length
self._env_spec = env_spec
self._baseline = baseline
self._discount = discount
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._fixed_horizon = fixed_horizon
self._flatten_input = flatten_input
self._name = name
self._name_scope = tf.name_scope(self._name)
self._old_policy = policy.clone('old_policy')
self._use_softplus_entropy = use_softplus_entropy
self._use_neg_logli_entropy = use_neg_logli_entropy
self._stop_entropy_gradient = stop_entropy_gradient
self._pg_loss = pg_loss
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
optimizer = LbfgsOptimizer
self._check_entropy_configuration(entropy_method, center_adv,
stop_entropy_gradient,
use_neg_logli_entropy,
policy_ent_coeff)
if pg_loss not in ['vanilla', 'surrogate', 'surrogate_clip']:
raise ValueError('Invalid pg_loss')
self._optimizer = make_optimizer(optimizer, **optimizer_args)
self._lr_clip_range = float(lr_clip_range)
self._max_kl_step = float(max_kl_step)
self._policy_ent_coeff = float(policy_ent_coeff)
self._f_rewards = None
self._f_returns = None
self._f_policy_kl = None
self._f_policy_entropy = None
self._policy_network = None
self._old_policy_network = None
self._episode_reward_mean = collections.deque(maxlen=100)
if policy.vectorized:
self.sampler_cls = OnPolicyVectorizedSampler
else:
self.sampler_cls = BatchSampler
self.init_opt()
def init_opt(self):
"""Initialize optimizater."""
pol_loss_inputs, pol_opt_inputs = self._build_inputs()
self._policy_opt_inputs = pol_opt_inputs
pol_loss, pol_kl = self._build_policy_loss(pol_loss_inputs)
self._optimizer.update_opt(loss=pol_loss,
target=self.policy,
leq_constraint=(pol_kl, self._max_kl_step),
inputs=flatten_inputs(
self._policy_opt_inputs),
constraint_name='mean_kl')
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in runner.step_epochs():
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr, runner.step_path)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Average return.
"""
# -- Stage: Calculate baseline
paths = [
dict(
observations=self._env_spec.observation_space.flatten_n(
path['observations'])
if self._flatten_input else path['observations'],
actions=(
self._env_spec.action_space.flatten_n( # noqa: E126
path['actions'])),
rewards=path['rewards'],
env_infos=path['env_infos'],
agent_infos=path['agent_infos'],
dones=path['dones']) for path in paths
]
if hasattr(self._baseline, 'predict_n'):
baseline_predictions = self._baseline.predict_n(paths)
else:
baseline_predictions = [
self._baseline.predict(path) for path in paths
]
# -- Stage: Pre-process samples based on collected paths
samples_data = paths_to_tensors(paths, self.max_path_length,
baseline_predictions, self._discount,
self._gae_lambda)
# -- Stage: Run and calculate performance of the algorithm
undiscounted_returns = log_performance(
itr,
TrajectoryBatch.from_trajectory_list(self._env_spec, paths),
discount=self._discount)
self._episode_reward_mean.extend(undiscounted_returns)
tabular.record('Extras/EpisodeRewardMean',
np.mean(self._episode_reward_mean))
samples_data['average_return'] = np.mean(undiscounted_returns)
self.log_diagnostics(samples_data)
logger.log('Optimizing policy...')
self.optimize_policy(samples_data)
return samples_data['average_return']
def log_diagnostics(self, paths):
"""Log diagnostic information.
Args:
paths (list[dict]): A list of collected paths.
"""
logger.log('Logging diagnostics...')
self.policy.log_diagnostics(paths)
self._baseline.log_diagnostics(paths)
def optimize_policy(self, samples_data):
"""Optimize policy.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
"""
policy_opt_input_values = self._policy_opt_input_values(samples_data)
logger.log('Computing loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
logger.log('Optimizing')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing KL after')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
logger.log('Computing loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
pol_ent = self._f_policy_entropy(*policy_opt_input_values)
ent = np.sum(pol_ent) / np.sum(samples_data['valids'])
tabular.record('{}/Entropy'.format(self.policy.name), ent)
tabular.record('{}/Perplexity'.format(self.policy.name), np.exp(ent))
self._fit_baseline_with_data(samples_data)
ev = np_tensor_utils.explained_variance_1d(samples_data['baselines'],
samples_data['returns'],
samples_data['valids'])
tabular.record('{}/ExplainedVariance'.format(self._baseline.name), ev)
self._old_policy.model.parameters = self.policy.model.parameters
def _build_inputs(self):
"""Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
"""
observation_space = self.policy.observation_space
action_space = self.policy.action_space
with tf.name_scope('inputs'):
if self._flatten_input:
obs_var = tf.compat.v1.placeholder(
tf.float32,
shape=[None, None, observation_space.flat_dim],
name='obs')
else:
obs_var = observation_space.to_tf_placeholder(name='obs',
batch_dims=2)
action_var = action_space.to_tf_placeholder(name='action',
batch_dims=2)
reward_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='reward')
valid_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='valid')
baseline_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='baseline')
policy_state_info_vars = {
k: tf.compat.v1.placeholder(tf.float32,
shape=[None] * 2 + list(shape),
name=k)
for k, shape in self.policy.state_info_specs
}
policy_state_info_vars_list = [
policy_state_info_vars[k] for k in self.policy.state_info_keys
]
augmented_obs_var = obs_var
for k in self.policy.state_info_keys:
extra_state_var = policy_state_info_vars[k]
extra_state_var = tf.cast(extra_state_var, tf.float32)
augmented_obs_var = tf.concat([augmented_obs_var, extra_state_var],
-1)
self._policy_network = self.policy.build(augmented_obs_var,
name='policy')
self._old_policy_network = self._old_policy.build(augmented_obs_var,
name='policy')
policy_loss_inputs = graph_inputs(
'PolicyLossInputs',
action_var=action_var,
reward_var=reward_var,
baseline_var=baseline_var,
valid_var=valid_var,
policy_state_info_vars=policy_state_info_vars,
)
policy_opt_inputs = graph_inputs(
'PolicyOptInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
baseline_var=baseline_var,
valid_var=valid_var,
policy_state_info_vars_list=policy_state_info_vars_list,
)
return policy_loss_inputs, policy_opt_inputs
# pylint: disable=too-many-branches, too-many-statements
def _build_policy_loss(self, i):
"""Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
"""
policy_entropy = self._build_entropy_term(i)
rewards = i.reward_var
if self._maximum_entropy:
with tf.name_scope('augmented_rewards'):
rewards = i.reward_var + (self._policy_ent_coeff *
policy_entropy)
with tf.name_scope('policy_loss'):
adv = compute_advantages(self._discount,
self._gae_lambda,
self.max_path_length,
i.baseline_var,
rewards,
name='adv')
adv = tf.reshape(adv, [-1, self.max_path_length])
# Optionally normalize advantages
eps = tf.constant(1e-8, dtype=tf.float32)
if self._center_adv:
adv = center_advs(adv, axes=[0], eps=eps)
if self._positive_adv:
adv = positive_advs(adv, eps)
old_policy_dist = self._old_policy_network.dist
policy_dist = self._policy_network.dist
with tf.name_scope('kl'):
kl = old_policy_dist.kl_divergence(policy_dist)
pol_mean_kl = tf.reduce_mean(kl)
# Calculate vanilla loss
with tf.name_scope('vanilla_loss'):
ll = policy_dist.log_prob(i.action_var, name='log_likelihood')
vanilla = ll * adv
# Calculate surrogate loss
with tf.name_scope('surrogate_loss'):
lr = tf.exp(ll - old_policy_dist.log_prob(i.action_var))
surrogate = lr * adv
# Finalize objective function
with tf.name_scope('loss'):
if self._pg_loss == 'vanilla':
# VPG uses the vanilla objective
obj = tf.identity(vanilla, name='vanilla_obj')
elif self._pg_loss == 'surrogate':
# TRPO uses the standard surrogate objective
obj = tf.identity(surrogate, name='surr_obj')
elif self._pg_loss == 'surrogate_clip':
lr_clip = tf.clip_by_value(lr,
1 - self._lr_clip_range,
1 + self._lr_clip_range,
name='lr_clip')
surr_clip = lr_clip * adv
obj = tf.minimum(surrogate, surr_clip, name='surr_obj')
if self._entropy_regularzied:
obj += self._policy_ent_coeff * policy_entropy
# filter only the valid values
obj = tf.boolean_mask(obj, i.valid_var)
# Maximize E[surrogate objective] by minimizing
# -E_t[surrogate objective]
loss = -tf.reduce_mean(obj)
# Diagnostic functions
self._f_policy_kl = tf.compat.v1.get_default_session(
).make_callable(pol_mean_kl,
feed_list=flatten_inputs(self._policy_opt_inputs))
self._f_rewards = tf.compat.v1.get_default_session().make_callable(
rewards, feed_list=flatten_inputs(self._policy_opt_inputs))
returns = discounted_returns(self._discount, self.max_path_length,
rewards)
self._f_returns = tf.compat.v1.get_default_session().make_callable(
returns, feed_list=flatten_inputs(self._policy_opt_inputs))
return loss, pol_mean_kl
def _build_entropy_term(self, i):
"""Build policy entropy tensor.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy entropy.
"""
pol_dist = self._policy_network.dist
with tf.name_scope('policy_entropy'):
if self._use_neg_logli_entropy:
policy_entropy = -pol_dist.log_prob(i.action_var,
name='policy_log_likeli')
else:
policy_entropy = pol_dist.entropy()
# This prevents entropy from becoming negative for small policy std
if self._use_softplus_entropy:
policy_entropy = tf.nn.softplus(policy_entropy)
if self._stop_entropy_gradient:
policy_entropy = tf.stop_gradient(policy_entropy)
# dense form, match the shape of advantage
policy_entropy = tf.reshape(policy_entropy, [-1, self.max_path_length])
self._f_policy_entropy = compile_function(
flatten_inputs(self._policy_opt_inputs), policy_entropy)
return policy_entropy
def _fit_baseline_with_data(self, samples_data):
"""Update baselines from samples.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
"""
policy_opt_input_values = self._policy_opt_input_values(samples_data)
# Augment reward from baselines
rewards_tensor = self._f_rewards(*policy_opt_input_values)
returns_tensor = self._f_returns(*policy_opt_input_values)
returns_tensor = np.squeeze(returns_tensor, -1)
paths = samples_data['paths']
valids = samples_data['valids']
# Recompute parts of samples_data
aug_rewards = []
aug_returns = []
for rew, ret, val, path in zip(rewards_tensor, returns_tensor, valids,
paths):
path['rewards'] = rew[val.astype(np.bool)]
path['returns'] = ret[val.astype(np.bool)]
aug_rewards.append(path['rewards'])
aug_returns.append(path['returns'])
samples_data['rewards'] = np_tensor_utils.pad_tensor_n(
aug_rewards, self.max_path_length)
samples_data['returns'] = np_tensor_utils.pad_tensor_n(
aug_returns, self.max_path_length)
# Fit baseline
logger.log('Fitting baseline...')
self._baseline.fit(paths)
def _policy_opt_input_values(self, samples_data):
"""Map rollout samples to the policy optimizer inputs.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
Returns:
list(np.ndarray): Flatten policy optimization input values.
"""
policy_state_info_list = [
samples_data['agent_infos'][k] for k in self.policy.state_info_keys
]
# pylint: disable=unexpected-keyword-arg
policy_opt_input_values = self._policy_opt_inputs._replace(
obs_var=samples_data['observations'],
action_var=samples_data['actions'],
reward_var=samples_data['rewards'],
baseline_var=samples_data['baselines'],
valid_var=samples_data['valids'],
policy_state_info_vars_list=policy_state_info_list,
)
return flatten_inputs(policy_opt_input_values)
def _check_entropy_configuration(self, entropy_method, center_adv,
stop_entropy_gradient,
use_neg_logli_entropy, policy_ent_coeff):
"""Check entropy configuration.
Args:
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized'
adds the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
use_neg_logli_entropy (bool): Whether to estimate the entropy as
the negative log likelihood of the action.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
Raises:
ValueError: If center_adv is True when entropy_method is max.
ValueError: If stop_gradient is False when entropy_method is max.
ValueError: If policy_ent_coeff is non-zero when there is
no entropy method.
ValueError: If entropy_method is not one of 'max', 'regularized',
'no_entropy'.
"""
del use_neg_logli_entropy
if entropy_method == 'max':
if center_adv:
raise ValueError('center_adv should be False when '
'entropy_method is max')
if not stop_entropy_gradient:
raise ValueError('stop_gradient should be True when '
'entropy_method is max')
self._maximum_entropy = True
self._entropy_regularzied = False
elif entropy_method == 'regularized':
self._maximum_entropy = False
self._entropy_regularzied = True
elif entropy_method == 'no_entropy':
if policy_ent_coeff != 0.0:
raise ValueError('policy_ent_coeff should be zero '
'when there is no entropy method')
self._maximum_entropy = False
self._entropy_regularzied = False
else:
raise ValueError('Invalid entropy_method')
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_name_scope']
del data['_policy_opt_inputs']
del data['_f_policy_entropy']
del data['_f_policy_kl']
del data['_f_rewards']
del data['_f_returns']
del data['_policy_network']
del data['_old_policy_network']
return data
def __setstate__(self, state):
"""Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
"""
self.__dict__ = state
self._name_scope = tf.name_scope(self._name)
self.init_opt()
| 26,617 | 40.268217 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/ppo.py | """Proximal Policy Optimization."""
from garage.tf.algos.npo import NPO
from garage.tf.optimizers import FirstOrderOptimizer
class PPO(NPO):
"""Proximal Policy Optimization.
See https://arxiv.org/abs/1707.06347.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
name (str): The name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
scope=None,
max_path_length=100,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
flatten_input=True,
name='PPO'):
if optimizer is None:
optimizer = FirstOrderOptimizer
if optimizer_args is None:
optimizer_args = dict()
super().__init__(env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
pg_loss='surrogate_clip',
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=optimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
flatten_input=flatten_input,
name=name)
| 4,801 | 44.733333 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/reps.py | """Relative Entropy Policy Search implementation in Tensorflow."""
import collections
from dowel import logger, tabular
import numpy as np
import scipy.optimize
import tensorflow as tf
from garage import _Default, make_optimizer
from garage import log_performance, TrajectoryBatch
from garage.np.algos import RLAlgorithm
from garage.sampler import OnPolicyVectorizedSampler
from garage.tf import paths_to_tensors
from garage.tf.misc import tensor_utils
from garage.tf.misc.tensor_utils import flatten_inputs
from garage.tf.misc.tensor_utils import graph_inputs
from garage.tf.optimizers import LbfgsOptimizer
from garage.tf.samplers import BatchSampler
# pylint: disable=differing-param-doc, differing-type-doc
class REPS(RLAlgorithm): # noqa: D416
"""Relative Entropy Policy Search.
References
----------
[1] J. Peters, K. Mulling, and Y. Altun, "Relative Entropy Policy Search,"
Artif. Intell., pp. 1607-1612, 2008.
Example:
$ python garage/examples/tf/reps_gym_cartpole.py
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
epsilon (float): Dual func parameter.
l2_reg_dual (float): Coefficient for dual func l2 regularization.
l2_reg_loss (float): Coefficient for policy loss l2 regularization.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): Arguments of the optimizer.
dual_optimizer (object): Dual func optimizer.
dual_optimizer_args (dict): Arguments of the dual optimizer.
name (str): Name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
max_path_length=500,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
epsilon=0.5,
l2_reg_dual=0.,
l2_reg_loss=0.,
optimizer=LbfgsOptimizer,
optimizer_args=None,
dual_optimizer=scipy.optimize.fmin_l_bfgs_b,
dual_optimizer_args=None,
name='REPS'):
optimizer_args = optimizer_args or dict(max_opt_itr=_Default(50))
dual_optimizer_args = dual_optimizer_args or dict(maxiter=50)
self.policy = policy
self.max_path_length = max_path_length
self._env_spec = env_spec
self._baseline = baseline
self._discount = discount
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._fixed_horizon = fixed_horizon
self._flatten_input = True
self._name = name
self._name_scope = tf.name_scope(self._name)
self._old_policy = policy.clone('old_policy')
self._feat_diff = None
self._param_eta = None
self._param_v = None
self._f_dual = None
self._f_dual_grad = None
self._f_policy_kl = None
self._policy_network = None
self._old_policy_network = None
self._optimizer = make_optimizer(optimizer, **optimizer_args)
self._dual_optimizer = dual_optimizer
self._dual_optimizer_args = dual_optimizer_args
self._epsilon = float(epsilon)
self._l2_reg_dual = float(l2_reg_dual)
self._l2_reg_loss = float(l2_reg_loss)
self._episode_reward_mean = collections.deque(maxlen=100)
if policy.vectorized:
self.sampler_cls = OnPolicyVectorizedSampler
else:
self.sampler_cls = BatchSampler
self.init_opt()
def init_opt(self):
"""Initialize the optimization procedure."""
pol_loss_inputs, pol_opt_inputs, dual_opt_inputs = self._build_inputs()
self._policy_opt_inputs = pol_opt_inputs
self._dual_opt_inputs = dual_opt_inputs
pol_loss = self._build_policy_loss(pol_loss_inputs)
self._optimizer.update_opt(loss=pol_loss,
target=self.policy,
inputs=flatten_inputs(
self._policy_opt_inputs))
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in runner.step_epochs():
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr, runner.step_path)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Average return.
"""
# -- Stage: Calculate baseline
paths = [
dict(
observations=self._env_spec.observation_space.flatten_n(
path['observations'])
if self._flatten_input else path['observations'],
actions=(
self._env_spec.action_space.flatten_n( # noqa: E126
path['actions'])),
rewards=path['rewards'],
env_infos=path['env_infos'],
agent_infos=path['agent_infos'],
dones=path['dones']) for path in paths
]
if hasattr(self._baseline, 'predict_n'):
baseline_predictions = self._baseline.predict_n(paths)
else:
baseline_predictions = [
self._baseline.predict(path) for path in paths
]
# -- Stage: Pre-process samples based on collected paths
samples_data = paths_to_tensors(paths, self.max_path_length,
baseline_predictions, self._discount,
self._gae_lambda)
# -- Stage: Run and calculate performance of the algorithm
undiscounted_returns = log_performance(
itr,
TrajectoryBatch.from_trajectory_list(self._env_spec, paths),
discount=self._discount)
self._episode_reward_mean.extend(undiscounted_returns)
tabular.record('Extras/EpisodeRewardMean',
np.mean(self._episode_reward_mean))
samples_data['average_return'] = np.mean(undiscounted_returns)
self.log_diagnostics(samples_data)
logger.log('Optimizing policy...')
self.optimize_policy(samples_data)
return samples_data['average_return']
def log_diagnostics(self, paths):
"""Log diagnostic information.
Args:
paths (list[dict]): A list of collected paths.
"""
logger.log('Logging diagnostics...')
self.policy.log_diagnostics(paths)
self._baseline.log_diagnostics(paths)
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_name_scope']
del data['_policy_opt_inputs']
del data['_dual_opt_inputs']
del data['_f_dual']
del data['_f_dual_grad']
del data['_f_policy_kl']
del data['_policy_network']
del data['_old_policy_network']
return data
def __setstate__(self, state):
"""Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
"""
self.__dict__ = state
self._name_scope = tf.name_scope(self._name)
self.init_opt()
def optimize_policy(self, samples_data):
"""Optimize the policy using the samples.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
"""
# Initial BFGS parameter values.
x0 = np.hstack([self._param_eta, self._param_v])
# Set parameter boundaries: \eta>=1e-12, v unrestricted.
bounds = [(-np.inf, np.inf) for _ in x0]
bounds[0] = (1e-12, np.inf)
# Optimize dual
eta_before = self._param_eta
logger.log('Computing dual before')
self._feat_diff = self._features(samples_data)
dual_opt_input_values = self._dual_opt_input_values(samples_data)
dual_before = self._f_dual(*dual_opt_input_values)
logger.log('Optimizing dual')
def eval_dual(x):
"""Evaluate dual function loss.
Args:
x (numpy.ndarray): Input to dual function.
Returns:
numpy.float64: Dual function loss.
"""
self._param_eta = x[0]
self._param_v = x[1:]
dual_opt_input_values = self._dual_opt_input_values(samples_data)
return self._f_dual(*dual_opt_input_values)
def eval_dual_grad(x):
"""Evaluate gradient of dual function loss.
Args:
x (numpy.ndarray): Input to dual function.
Returns:
numpy.ndarray: Gradient of dual function loss.
"""
self._param_eta = x[0]
self._param_v = x[1:]
dual_opt_input_values = self._dual_opt_input_values(samples_data)
grad = self._f_dual_grad(*dual_opt_input_values)
eta_grad = np.float(grad[0])
v_grad = grad[1]
return np.hstack([eta_grad, v_grad])
params_ast, _, _ = self._dual_optimizer(func=eval_dual,
x0=x0,
fprime=eval_dual_grad,
bounds=bounds,
**self._dual_optimizer_args)
logger.log('Computing dual after')
self._param_eta, self._param_v = params_ast[0], params_ast[1:]
dual_opt_input_values = self._dual_opt_input_values(samples_data)
dual_after = self._f_dual(*dual_opt_input_values)
# Optimize policy
policy_opt_input_values = self._policy_opt_input_values(samples_data)
logger.log('Computing policy loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing policy KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
logger.log('Optimizing policy')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing policy KL')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
logger.log('Computing policy loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('EtaBefore', eta_before)
tabular.record('EtaAfter', self._param_eta)
tabular.record('DualBefore', dual_before)
tabular.record('DualAfter', dual_after)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
self._old_policy.model.parameters = self.policy.model.parameters
def _build_inputs(self):
"""Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
"""
observation_space = self.policy.observation_space
action_space = self.policy.action_space
with tf.name_scope('inputs'):
obs_var = observation_space.to_tf_placeholder(
name='obs',
batch_dims=2) # yapf: disable
action_var = action_space.to_tf_placeholder(
name='action',
batch_dims=2) # yapf: disable
reward_var = tensor_utils.new_tensor(
name='reward',
ndim=2,
dtype=tf.float32) # yapf: disable
valid_var = tensor_utils.new_tensor(
name='valid',
ndim=2,
dtype=tf.float32) # yapf: disable
feat_diff = tensor_utils.new_tensor(
name='feat_diff',
ndim=2,
dtype=tf.float32) # yapf: disable
param_v = tensor_utils.new_tensor(
name='param_v',
ndim=1,
dtype=tf.float32) # yapf: disable
param_eta = tensor_utils.new_tensor(
name='param_eta',
ndim=0,
dtype=tf.float32) # yapf: disable
policy_state_info_vars = {
k: tf.compat.v1.placeholder(
tf.float32,
shape=[None] * 2 + list(shape),
name=k)
for k, shape in self.policy.state_info_specs
} # yapf: disable
policy_state_info_vars_list = [
policy_state_info_vars[k]
for k in self.policy.state_info_keys
] # yapf: disable
self._policy_network = self.policy.build(obs_var, name='policy')
self._old_policy_network = self._old_policy.build(obs_var,
name='policy')
policy_loss_inputs = graph_inputs(
'PolicyLossInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars=policy_state_info_vars,
)
policy_opt_inputs = graph_inputs(
'PolicyOptInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars_list=policy_state_info_vars_list,
)
dual_opt_inputs = graph_inputs(
'DualOptInputs',
reward_var=reward_var,
valid_var=valid_var,
feat_diff=feat_diff,
param_eta=param_eta,
param_v=param_v,
policy_state_info_vars_list=policy_state_info_vars_list,
)
return policy_loss_inputs, policy_opt_inputs, dual_opt_inputs
def _build_policy_loss(self, i):
"""Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
Raises:
NotImplementedError: If is_recurrent is True.
"""
pol_dist = self._policy_network.dist
old_pol_dist = self._old_policy_network.dist
# Initialize dual params
self._param_eta = 15.
self._param_v = np.random.rand(
self._env_spec.observation_space.flat_dim * 2 + 4)
with tf.name_scope('bellman_error'):
delta_v = tf.boolean_mask(i.reward_var,
i.valid_var) + tf.tensordot(
i.feat_diff, i.param_v, 1)
with tf.name_scope('policy_loss'):
ll = pol_dist.log_prob(i.action_var)
ll = tf.boolean_mask(ll, i.valid_var)
loss = -tf.reduce_mean(
ll * tf.exp(delta_v / i.param_eta -
tf.reduce_max(delta_v / i.param_eta)))
reg_params = self.policy.get_regularizable_vars()
loss += self._l2_reg_loss * tf.reduce_sum(
[tf.reduce_mean(tf.square(param))
for param in reg_params]) / len(reg_params)
with tf.name_scope('kl'):
kl = old_pol_dist.kl_divergence(pol_dist)
pol_mean_kl = tf.reduce_mean(kl)
with tf.name_scope('dual'):
dual_loss = i.param_eta * self._epsilon + (
i.param_eta * tf.math.log(
tf.reduce_mean(
tf.exp(delta_v / i.param_eta -
tf.reduce_max(delta_v / i.param_eta)))) +
i.param_eta * tf.reduce_max(delta_v / i.param_eta))
dual_loss += self._l2_reg_dual * (tf.square(i.param_eta) +
tf.square(1 / i.param_eta))
dual_grad = tf.gradients(dual_loss, [i.param_eta, i.param_v])
# yapf: disable
self._f_dual = tensor_utils.compile_function(
flatten_inputs(self._dual_opt_inputs),
dual_loss,
log_name='f_dual')
# yapf: enable
self._f_dual_grad = tensor_utils.compile_function(
flatten_inputs(self._dual_opt_inputs),
dual_grad,
log_name='f_dual_grad')
self._f_policy_kl = tensor_utils.compile_function(
flatten_inputs(self._policy_opt_inputs),
pol_mean_kl,
log_name='f_policy_kl')
return loss
def _dual_opt_input_values(self, samples_data):
"""Update dual func optimize input values based on samples data.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
Returns:
list(np.ndarray): Flatten dual function optimization input values.
"""
policy_state_info_list = [
samples_data['agent_infos'][k]
for k in self.policy.state_info_keys
] # yapf: disable
# pylint: disable=unexpected-keyword-arg
dual_opt_input_values = self._dual_opt_inputs._replace(
reward_var=samples_data['rewards'],
valid_var=samples_data['valids'],
feat_diff=self._feat_diff,
param_eta=self._param_eta,
param_v=self._param_v,
policy_state_info_vars_list=policy_state_info_list,
)
return flatten_inputs(dual_opt_input_values)
def _policy_opt_input_values(self, samples_data):
"""Update policy optimize input values based on samples data.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
Returns:
list(np.ndarray): Flatten policy optimization input values.
"""
policy_state_info_list = [
samples_data['agent_infos'][k]
for k in self.policy.state_info_keys
] # yapf: disable
# pylint: disable=unexpected-keyword-arg
policy_opt_input_values = self._policy_opt_inputs._replace(
obs_var=samples_data['observations'],
action_var=samples_data['actions'],
reward_var=samples_data['rewards'],
valid_var=samples_data['valids'],
feat_diff=self._feat_diff,
param_eta=self._param_eta,
param_v=self._param_v,
policy_state_info_vars_list=policy_state_info_list,
)
return flatten_inputs(policy_opt_input_values)
def _features(self, samples_data):
"""Get valid view features based on samples data.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
Returns:
numpy.ndarray: Features for training.
"""
paths = samples_data['paths']
feat_diff = []
for path in paths:
o = np.clip(path['observations'],
self._env_spec.observation_space.low,
self._env_spec.observation_space.high)
lr = len(path['rewards'])
al = np.arange(lr).reshape(-1, 1) / self.max_path_length
feats = np.concatenate(
[o, o**2, al, al**2, al**3,
np.ones((lr, 1))], axis=1)
# pylint: disable=unsubscriptable-object
feats = np.vstack([feats, np.zeros(feats.shape[1])])
feat_diff.append(feats[1:] - feats[:-1])
return np.vstack(feat_diff)
| 21,822 | 35.92555 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/rl2.py | """Module for RL2.
This module contains RL2, RL2Worker and the environment wrapper for RL2.
"""
import abc
import collections
import akro
from dowel import logger
import gym
import numpy as np
from garage import log_multitask_performance, TrajectoryBatch
from garage.envs import EnvSpec
from garage.misc import tensor_utils as np_tensor_utils
from garage.np.algos import MetaRLAlgorithm
from garage.sampler import DefaultWorker
from garage.tf.algos._rl2npo import RL2NPO
class RL2Env(gym.Wrapper):
"""Environment wrapper for RL2.
In RL2, observation is concatenated with previous action,
reward and terminal signal to form new observation.
Args:
env (gym.Env): An env that will be wrapped.
"""
def __init__(self, env):
super().__init__(env)
action_space = akro.from_gym(self.env.action_space)
observation_space = self._create_rl2_obs_space()
self._spec = EnvSpec(action_space=action_space,
observation_space=observation_space)
def _create_rl2_obs_space(self):
"""Create observation space for RL2.
Returns:
gym.spaces.Box: Augmented observation space.
"""
obs_flat_dim = np.prod(self.env.observation_space.shape)
action_flat_dim = np.prod(self.env.action_space.shape)
return akro.Box(low=-np.inf,
high=np.inf,
shape=(obs_flat_dim + action_flat_dim + 1 + 1, ))
def reset(self, **kwargs):
"""gym.Env reset function.
Args:
kwargs: Keyword arguments.
Returns:
np.ndarray: augmented observation.
"""
del kwargs
obs = self.env.reset()
return np.concatenate(
[obs, np.zeros(self.env.action_space.shape), [0], [0]])
def step(self, action):
"""gym.Env step function.
Args:
action (int): action taken.
Returns:
np.ndarray: augmented observation.
float: reward.
bool: terminal signal.
dict: environment info.
"""
next_obs, reward, done, info = self.env.step(action)
next_obs = np.concatenate([next_obs, action, [reward], [done]])
return next_obs, reward, done, info
@property
def spec(self):
"""Environment specification.
Returns:
EnvSpec: Environment specification.
"""
return self._spec
class RL2Worker(DefaultWorker):
"""Initialize a worker for RL2.
In RL2, policy does not reset between trajectories in each meta batch.
Policy only resets once at the beginning of a trial/meta batch.
Args:
seed(int): The seed to use to intialize random number generators.
max_path_length(int or float): The maximum length paths which will
be sampled. Can be (floating point) infinity.
worker_number(int): The number of the worker where this update is
occurring. This argument is used to set a different seed for each
worker.
n_paths_per_trial (int): Number of trajectories sampled per trial/
meta batch. Policy resets in the beginning of a meta batch,
and obtain `n_paths_per_trial` trajectories in one meta batch.
Attributes:
agent(Policy or None): The worker's agent.
env(gym.Env or None): The worker's environment.
"""
def __init__(
self,
*, # Require passing by keyword, since everything's an int.
seed,
max_path_length,
worker_number,
n_paths_per_trial=2):
self._n_paths_per_trial = n_paths_per_trial
super().__init__(seed=seed,
max_path_length=max_path_length,
worker_number=worker_number)
def start_rollout(self):
"""Begin a new rollout."""
self._path_length = 0
self._prev_obs = self.env.reset()
def rollout(self):
"""Sample a single rollout of the agent in the environment.
Returns:
garage.TrajectoryBatch: The collected trajectory.
"""
self.agent.reset()
for _ in range(self._n_paths_per_trial):
self.start_rollout()
while not self.step_rollout():
pass
self._agent_infos['batch_idx'] = np.full(len(self._rewards),
self._worker_number)
return self.collect_rollout()
class NoResetPolicy:
"""A policy that does not reset.
For RL2 meta-test, the policy should not reset after meta-RL
adapation. The hidden state will be retained as it is where
the adaptation takes place.
Args:
policy (garage.tf.policies.Policy): Policy itself.
Returns:
object: The wrapped policy that does not reset.
"""
def __init__(self, policy):
self._policy = policy
def reset(self):
"""gym.Env reset function."""
def get_action(self, obs):
"""Get a single action from this policy for the input observation.
Args:
obs (numpy.ndarray): Observation from environment.
Returns:
tuple[numpy.ndarray, dict]: Predicted action and agent
info.
"""
return self._policy.get_action(obs)
def get_param_values(self):
"""Return values of params.
Returns:
np.ndarray: Policy parameters values.
"""
return self._policy.get_param_values()
def set_param_values(self, params):
"""Set param values.
Args:
params (np.ndarray): A numpy array of parameter values.
"""
self._policy.set_param_values(params)
# pylint: disable=protected-access
class RL2AdaptedPolicy:
"""A RL2 policy after adaptation.
Args:
policy (garage.tf.policies.Policy): Policy itself.
"""
def __init__(self, policy):
self._initial_hiddens = policy._prev_hiddens[:]
self._policy = policy
def reset(self):
"""gym.Env reset function."""
self._policy._prev_hiddens = self._initial_hiddens
def get_action(self, obs):
"""Get a single action from this policy for the input observation.
Args:
obs (numpy.ndarray): Observation from environment.
Returns:
tuple(numpy.ndarray, dict): Predicted action and agent info.
"""
return self._policy.get_action(obs)
def get_param_values(self):
"""Return values of params.
Returns:
tuple(np.ndarray, np.ndarray): Policy parameters values
and initial hidden state that will be set every time
the policy is used for meta-test.
"""
return (self._policy.get_param_values(), self._initial_hiddens)
def set_param_values(self, params):
"""Set param values.
Args:
params (tuple(np.ndarray, np.ndarray)): Two numpy array of
parameter values, one of the network parameters, one
for the initial hidden state.
"""
inner_params, hiddens = params
self._policy.set_param_values(inner_params)
self._initial_hiddens = hiddens
class RL2(MetaRLAlgorithm, abc.ABC):
"""RL^2.
Reference: https://arxiv.org/pdf/1611.02779.pdf.
When sampling for RL^2, there are more than one environments to be
sampled from. In the original implementation, within each task/environment,
all rollouts sampled will be concatenated into one single rollout, and fed
to the inner algorithm. Thus, returns and advantages are calculated across
the rollout.
RL2Worker is required in sampling for RL2.
See example/tf/rl2_ppo_halfcheetah.py for reference.
User should not instantiate RL2 directly.
Currently garage supports PPO and TRPO as inner algorithm. Refer to
garage/tf/algos/rl2ppo.py and garage/tf/algos/rl2trpo.py.
Args:
rl2_max_path_length (int): Maximum length for trajectories with respect
to RL^2. Notice that it is different from the maximum path length
for the inner algorithm.
meta_batch_size (int): Meta batch size.
task_sampler (garage.experiment.TaskSampler): Task sampler.
meta_evaluator (garage.experiment.MetaEvaluator): Evaluator for meta-RL
algorithms.
n_epochs_per_eval (int): If meta_evaluator is passed, meta-evaluation
will be performed every `n_epochs_per_eval` epochs.
inner_algo_args (dict): Arguments for inner algorithm.
"""
def __init__(self, rl2_max_path_length, meta_batch_size, task_sampler,
meta_evaluator, n_epochs_per_eval, **inner_algo_args):
self._inner_algo = RL2NPO(**inner_algo_args)
self._rl2_max_path_length = rl2_max_path_length
self.env_spec = self._inner_algo._env_spec
self._n_epochs_per_eval = n_epochs_per_eval
self._flatten_input = self._inner_algo._flatten_input
self._policy = self._inner_algo.policy
self._discount = self._inner_algo._discount
self._meta_batch_size = meta_batch_size
self._task_sampler = task_sampler
self._meta_evaluator = meta_evaluator
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch.
"""
last_return = None
for _ in runner.step_epochs():
if runner.step_itr % self._n_epochs_per_eval == 0:
if self._meta_evaluator is not None:
self._meta_evaluator.evaluate(self)
runner.step_path = runner.obtain_samples(
runner.step_itr,
env_update=self._task_sampler.sample(self._meta_batch_size))
last_return = self.train_once(runner.step_itr, runner.step_path)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Average return.
"""
paths = self._process_samples(itr, paths)
logger.log('Optimizing policy...')
self._inner_algo.optimize_policy(paths)
return paths['average_return']
def get_exploration_policy(self):
"""Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
object: The policy used to obtain samples that are later
used for meta-RL adaptation.
"""
self._policy.reset()
return NoResetPolicy(self._policy)
# pylint: disable=protected-access
def adapt_policy(self, exploration_policy, exploration_trajectories):
"""Produce a policy adapted for a task.
Args:
exploration_policy (garage.Policy): A policy which was returned
from get_exploration_policy(), and which generated
exploration_trajectories by interacting with an environment.
The caller may not use this object after passing it into this
method.
exploration_trajectories (garage.TrajectoryBatch): Trajectories to
adapt to, generated by exploration_policy exploring the
environment.
Returns:
garage.tf.policies.Policy: A policy adapted to the task represented
by the exploration_trajectories.
"""
return RL2AdaptedPolicy(exploration_policy._policy)
# pylint: disable=protected-access
def _process_samples(self, itr, paths):
# pylint: disable=too-many-statements
"""Return processed sample data based on the collected paths.
Args:
itr (int): Iteration number.
paths (OrderedDict[dict]): A list of collected paths for each
task. In RL^2, there are n environments/tasks and paths in
each of them will be concatenated at some point and fed to
the policy.
Returns:
dict: Processed sample data, with key
* observations: (numpy.ndarray)
* actions: (numpy.ndarray)
* rewards: (numpy.ndarray)
* returns: (numpy.ndarray)
* valids: (numpy.ndarray)
* agent_infos: (dict)
* env_infos: (dict)
* paths: (list[dict])
* average_return: (numpy.float64)
Raises:
ValueError: If 'batch_idx' is not found.
"""
concatenated_paths = []
paths_by_task = collections.defaultdict(list)
for path in paths:
path['returns'] = np_tensor_utils.discount_cumsum(
path['rewards'], self._discount)
path['lengths'] = [len(path['rewards'])]
if 'batch_idx' in path:
paths_by_task[path['batch_idx']].append(path)
elif 'batch_idx' in path['agent_infos']:
paths_by_task[path['agent_infos']['batch_idx'][0]].append(path)
else:
raise ValueError(
'Batch idx is required for RL2 but not found, '
'Make sure to use garage.tf.algos.rl2.RL2Worker '
'for sampling')
# all path in paths_by_task[i] are sampled from task[i]
for _paths in paths_by_task.values():
concatenated_path = self._concatenate_paths(_paths)
concatenated_paths.append(concatenated_path)
# stack and pad to max path length of the concatenated
# path, which will be fed to inner algo
# i.e. max_path_length * episode_per_task
concatenated_paths_stacked = (
np_tensor_utils.stack_and_pad_tensor_dict_list(
concatenated_paths, self._inner_algo.max_path_length))
name_map = None
if hasattr(self._task_sampler, '_envs') and hasattr(
self._task_sampler._envs[0].env, 'all_task_names'):
names = [
env.env.all_task_names[0] for env in self._task_sampler._envs
]
name_map = dict(enumerate(names))
undiscounted_returns = log_multitask_performance(
itr,
TrajectoryBatch.from_trajectory_list(self.env_spec, paths),
self._inner_algo._discount,
name_map=name_map)
concatenated_paths_stacked['paths'] = concatenated_paths
concatenated_paths_stacked['average_return'] = np.mean(
undiscounted_returns)
return concatenated_paths_stacked
def _concatenate_paths(self, paths):
"""Concatenate paths.
The input paths are from different rollouts but same task/environment.
In RL^2, paths within each meta batch are all concatenate into a single
path and fed to the policy.
Args:
paths (dict): Input paths. All paths are from different rollouts,
but the same task/environment.
Returns:
dict: Concatenated paths from the same task/environment. Shape of
values: :math:`[max_path_length * episode_per_task, S^*]`
list[dict]: Original input paths. Length of the list is
:math:`episode_per_task` and each path in the list has
values of shape :math:`[max_path_length, S^*]`
"""
if self._flatten_input:
observations = np.concatenate([
self.env_spec.observation_space.flatten_n(path['observations'])
for path in paths
])
else:
observations = np.concatenate(
[path['observations'] for path in paths])
actions = np.concatenate([
self.env_spec.action_space.flatten_n(path['actions'])
for path in paths
])
valids = np.concatenate(
[np.ones_like(path['rewards']) for path in paths])
baselines = np.concatenate(
[np.zeros_like(path['rewards']) for path in paths])
concatenated_path = np_tensor_utils.concat_tensor_dict_list(paths)
concatenated_path['observations'] = observations
concatenated_path['actions'] = actions
concatenated_path['valids'] = valids
concatenated_path['baselines'] = baselines
return concatenated_path
@property
def policy(self):
"""Policy.
Returns:
garage.Policy: Policy to be used.
"""
return self._inner_algo.policy
@property
def max_path_length(self):
"""Max path length.
Returns:
int: Maximum path length in a trajectory.
"""
return self._rl2_max_path_length
| 17,279 | 32.166987 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/rl2ppo.py | """Proximal Policy Optimization for RL2."""
from garage.tf.algos import RL2
from garage.tf.optimizers import FirstOrderOptimizer
class RL2PPO(RL2):
"""Proximal Policy Optimization specific for RL^2.
See https://arxiv.org/abs/1707.06347 for algorithm reference.
Args:
rl2_max_path_length (int): Maximum length for trajectories with respect
to RL^2. Notice that it is different from the maximum path length
for the inner algorithm.
meta_batch_size (int): Meta batch size.
task_sampler (garage.experiment.TaskSampler): Task sampler.
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
meta_evaluator (garage.experiment.MetaEvaluator): Evaluator for meta-RL
algorithms.
n_epochs_per_eval (int): If meta_evaluator is passed, meta-evaluation
will be performed every `n_epochs_per_eval` epochs.
name (str): The name of the algorithm.
"""
def __init__(self,
rl2_max_path_length,
meta_batch_size,
task_sampler,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
flatten_input=True,
meta_evaluator=None,
n_epochs_per_eval=10,
name='PPO'):
if optimizer_args is None:
optimizer_args = dict()
super().__init__(rl2_max_path_length=rl2_max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=task_sampler,
env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
pg_loss='surrogate_clip',
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=FirstOrderOptimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
flatten_input=flatten_input,
meta_evaluator=meta_evaluator,
n_epochs_per_eval=n_epochs_per_eval,
name=name)
| 5,654 | 46.521008 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/rl2trpo.py | """Trust Region Policy Optimization for RL2."""
from garage.tf.algos import RL2
from garage.tf.optimizers import ConjugateGradientOptimizer
from garage.tf.optimizers import PenaltyLbfgsOptimizer
class RL2TRPO(RL2):
"""Trust Region Policy Optimization specific for RL^2.
See https://arxiv.org/abs/1502.05477.
Args:
rl2_max_path_length (int): Maximum length for trajectories with respect
to RL^2. Notice that it is different from the maximum path length
for the inner algorithm.
meta_batch_size (int): Meta batch size.
task_sampler (garage.experiment.TaskSampler): Task sampler.
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
kl_constraint (str): KL constraint, either 'hard' or 'soft'.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
meta_evaluator (garage.experiment.MetaEvaluator): Evaluator for meta-RL
algorithms.
n_epochs_per_eval (int): If meta_evaluator is passed, meta-evaluation
will be performed every `n_epochs_per_eval` epochs.
name (str): The name of the algorithm.
"""
def __init__(self,
rl2_max_path_length,
meta_batch_size,
task_sampler,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=0.98,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
kl_constraint='hard',
entropy_method='no_entropy',
flatten_input=True,
meta_evaluator=None,
n_epochs_per_eval=10,
name='TRPO'):
if not optimizer:
if kl_constraint == 'hard':
optimizer = ConjugateGradientOptimizer
elif kl_constraint == 'soft':
optimizer = PenaltyLbfgsOptimizer
else:
raise ValueError('Invalid kl_constraint')
if optimizer_args is None:
optimizer_args = dict()
super().__init__(rl2_max_path_length=rl2_max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=task_sampler,
env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
pg_loss='surrogate',
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=optimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
flatten_input=flatten_input,
meta_evaluator=meta_evaluator,
n_epochs_per_eval=n_epochs_per_eval,
name=name)
| 6,244 | 45.604478 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/td3.py | """This module implements a TD3 model.
TD3, or Twin Delayed Deep Deterministic Policy Gradient, uses actor-critic
method to optimize the policy and reward prediction. Notably, it uses the
minimum value of two critics instead of one to limit overestimation.
"""
from collections import deque
from dowel import logger, tabular
import numpy as np
import tensorflow as tf
from garage import _Default, make_optimizer
from garage import log_performance
from garage.np import obtain_evaluation_samples
from garage.np import samples_to_tensors
from garage.np.algos import RLAlgorithm
from garage.sampler import OffPolicyVectorizedSampler
from garage.tf.misc import tensor_utils
class TD3(RLAlgorithm):
"""Implementation of TD3.
Based on https://arxiv.org/pdf/1802.09477.pdf.
Example:
$ python garage/examples/tf/td3_pendulum.py
Args:
env_spec (garage.envs.EnvSpec): Environment.
policy (garage.tf.policies.Policy): Policy.
qf (garage.tf.q_functions.QFunction): Q-function.
qf2 (garage.tf.q_functions.QFunction): Q function to use
replay_buffer (garage.replay_buffer.ReplayBuffer): Replay buffer.
target_update_tau (float): Interpolation parameter for doing the
soft target update.
policy_lr (float): Learning rate for training policy network.
qf_lr (float): Learning rate for training q value network.
policy_weight_decay (float): L2 weight decay factor for parameters
of the policy network.
qf_weight_decay (float): L2 weight decay factor for parameters
of the q value network.
policy_optimizer (tf.python.training.optimizer.Optimizer):
Optimizer for training policy network.
qf_optimizer (tf.python.training.optimizer.Optimizer):
Optimizer for training q function network.
clip_pos_returns (boolean): Whether or not clip positive returns.
clip_return (float): Clip return to be in [-clip_return,
clip_return].
discount (float): Discount factor for the cumulative return.
max_action (float): Maximum action magnitude.
name (str): Name of the algorithm shown in computation graph.
steps_per_epoch (int): Number of batches of samples in each epoch.
max_path_length (int): Maximum length of a path.
max_eval_path_length (int or None): Maximum length of paths used for
off-policy evaluation. If None, defaults to `max_path_length`.
n_train_steps (int): Number of optimizations in each epoch cycle.
buffer_batch_size (int): Size of replay buffer.
min_buffer_size (int):
Number of samples in replay buffer before first optimization.
rollout_batch_size (int): Roll out batch size.
reward_scale (float): Scale to reward.
exploration_policy_sigma (float): Action noise sigma.
exploration_policy_clip (float): Action noise clip.
actor_update_period (int): Action update period.
smooth_return (bool):
If True, do statistics on all samples collection.
Otherwise do statistics on one batch.
exploration_policy (garage.np.exploration_policies.ExplorationPolicy):
Exploration strategy.
"""
def __init__(
self,
env_spec,
policy,
qf,
qf2,
replay_buffer,
*, # Everything after this is numbers.
target_update_tau=0.01,
policy_weight_decay=0,
qf_weight_decay=0,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer,
policy_lr=_Default(1e-4),
qf_lr=_Default(1e-3),
clip_pos_returns=False,
clip_return=np.inf,
discount=0.99,
max_action=None,
name='TD3',
steps_per_epoch=20,
max_path_length=None,
max_eval_path_length=None,
n_train_steps=50,
buffer_batch_size=64,
min_buffer_size=1e4,
rollout_batch_size=1,
reward_scale=1.,
exploration_policy_sigma=0.2,
actor_update_period=2,
exploration_policy_clip=0.5,
smooth_return=True,
exploration_policy=None):
action_bound = env_spec.action_space.high
self._max_action = action_bound if max_action is None else max_action
self._tau = target_update_tau
self._policy_weight_decay = policy_weight_decay
self._qf_weight_decay = qf_weight_decay
self._name = name
self._clip_pos_returns = clip_pos_returns
self._clip_return = clip_return
self._success_history = deque(maxlen=100)
self._episode_rewards = []
self._episode_policy_losses = []
self._episode_qf_losses = []
self._epoch_ys = []
self._epoch_qs = []
self._target_policy = policy.clone('target_policy')
self._target_qf = qf.clone('target_qf')
self.qf2 = qf2
self.qf = qf
self._exploration_policy_sigma = exploration_policy_sigma
self._exploration_policy_clip = exploration_policy_clip
self._actor_update_period = actor_update_period
self._action_loss = None
self._target_qf2 = qf2.clone('target_qf2')
self._policy_optimizer = policy_optimizer
self._qf_optimizer = qf_optimizer
self._policy_lr = policy_lr
self._qf_lr = qf_lr
self._policy = policy
self._n_train_steps = n_train_steps
self._min_buffer_size = min_buffer_size
self._qf = qf
self._steps_per_epoch = steps_per_epoch
self._n_train_steps = n_train_steps
self._buffer_batch_size = buffer_batch_size
self._discount = discount
self._reward_scale = reward_scale
self._smooth_return = smooth_return
self.max_path_length = max_path_length
self._max_eval_path_length = max_eval_path_length
# used by OffPolicyVectorizedSampler
self.env_spec = env_spec
self.rollout_batch_size = rollout_batch_size
self.replay_buffer = replay_buffer
self.policy = policy
self.exploration_policy = exploration_policy
self.sampler_cls = OffPolicyVectorizedSampler
self.init_opt()
def init_opt(self):
"""Build the loss function and init the optimizer."""
with tf.name_scope(self._name):
# Create target policy (actor) and qf (critic) networks
with tf.name_scope('inputs'):
obs_dim = self.env_spec.observation_space.flat_dim
y = tf.compat.v1.placeholder(tf.float32,
shape=(None, 1),
name='input_y')
obs = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim),
name='input_observation')
actions = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.env_spec.action_space.flat_dim),
name='input_action')
policy_network_outputs = self._target_policy.get_action_sym(
obs, name='policy')
target_qf_outputs = self._target_qf.get_qval_sym(obs,
actions,
name='qf')
target_qf2_outputs = self._target_qf2.get_qval_sym(obs,
actions,
name='qf')
self.target_policy_f_prob_online = tensor_utils.compile_function(
inputs=[obs], outputs=policy_network_outputs)
self.target_qf_f_prob_online = tensor_utils.compile_function(
inputs=[obs, actions], outputs=target_qf_outputs)
self.target_qf2_f_prob_online = tensor_utils.compile_function(
inputs=[obs, actions], outputs=target_qf2_outputs)
# Set up target init and update functions
with tf.name_scope('setup_target'):
policy_init_op, policy_update_op = tensor_utils.get_target_ops(
self.policy.get_global_vars(),
self._target_policy.get_global_vars(), self._tau)
qf_init_ops, qf_update_ops = tensor_utils.get_target_ops(
self.qf.get_global_vars(),
self._target_qf.get_global_vars(), self._tau)
qf2_init_ops, qf2_update_ops = tensor_utils.get_target_ops(
self.qf2.get_global_vars(),
self._target_qf2.get_global_vars(), self._tau)
target_init_op = policy_init_op + qf_init_ops + qf2_init_ops
target_update_op = (policy_update_op + qf_update_ops +
qf2_update_ops)
f_init_target = tensor_utils.compile_function(
inputs=[], outputs=target_init_op)
f_update_target = tensor_utils.compile_function(
inputs=[], outputs=target_update_op)
# Set up policy training function
next_action = self.policy.get_action_sym(obs, name='policy_action')
next_qval = self.qf.get_qval_sym(obs,
next_action,
name='policy_action_qval')
with tf.name_scope('action_loss'):
action_loss = -tf.reduce_mean(next_qval)
with tf.name_scope('minimize_action_loss'):
policy_optimizer = make_optimizer(
self._policy_optimizer,
learning_rate=self._policy_lr,
name='PolicyOptimizer')
policy_train_op = policy_optimizer.minimize(
action_loss, var_list=self.policy.get_trainable_vars())
f_train_policy = tensor_utils.compile_function(
inputs=[obs], outputs=[policy_train_op, action_loss])
# Set up qf training function
qval = self.qf.get_qval_sym(obs, actions, name='q_value')
q2val = self.qf2.get_qval_sym(obs, actions, name='q2_value')
with tf.name_scope('qval1_loss'):
qval1_loss = tf.reduce_mean(tf.math.squared_difference(
y, qval))
with tf.name_scope('qval2_loss'):
qval2_loss = tf.reduce_mean(
tf.math.squared_difference(y, q2val))
with tf.name_scope('minimize_qf_loss'):
qf_optimizer = make_optimizer(self._qf_optimizer,
learning_rate=self._qf_lr,
name='QFunctionOptimizer')
qf_train_op = qf_optimizer.minimize(
qval1_loss, var_list=self.qf.get_trainable_vars())
qf2_train_op = qf_optimizer.minimize(
qval2_loss, var_list=self.qf2.get_trainable_vars())
f_train_qf = tensor_utils.compile_function(
inputs=[y, obs, actions],
outputs=[qf_train_op, qval1_loss, qval])
f_train_qf2 = tensor_utils.compile_function(
inputs=[y, obs, actions],
outputs=[qf2_train_op, qval2_loss, q2val])
self.f_train_policy = f_train_policy
self.f_train_qf = f_train_qf
self.f_init_target = f_init_target
self.f_update_target = f_update_target
self.f_train_qf2 = f_train_qf2
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: State dictionary.
"""
data = self.__dict__.copy()
del data['target_policy_f_prob_online']
del data['target_qf_f_prob_online']
del data['target_qf2_f_prob_online']
del data['f_train_policy']
del data['f_train_qf']
del data['f_train_qf2']
del data['f_init_target']
del data['f_update_target']
return data
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Current state.
"""
self.__dict__.update(state)
self.init_opt()
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
runner.enable_logging = False
for _ in runner.step_epochs():
for cycle in range(self._steps_per_epoch):
runner.step_path = runner.obtain_samples(runner.step_itr)
for path in runner.step_path:
path['rewards'] *= self._reward_scale
last_return = self.train_once(runner.step_itr,
runner.step_path)
if (cycle == 0 and self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
runner.enable_logging = True
log_performance(runner.step_itr,
obtain_evaluation_samples(
self.policy, runner.get_env_copy()),
discount=self._discount)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
np.float64: Average return.
"""
paths = samples_to_tensors(paths)
epoch = itr / self._steps_per_epoch
self._episode_rewards.extend([
path for path, complete in zip(paths['undiscounted_returns'],
paths['complete']) if complete
])
self._success_history.extend([
path for path, complete in zip(paths['success_history'],
paths['complete']) if complete
])
# Avoid calculating the mean of an empty list in cases where
# all paths were non-terminal.
last_average_return = np.NaN
avg_success_rate = 0
if self._episode_rewards:
last_average_return = np.mean(self._episode_rewards)
if self._success_history:
if (itr % self._steps_per_epoch == 0
and self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
avg_success_rate = np.mean(self._success_history)
self.policy.log_diagnostics(paths)
self._qf.log_diagnostics(paths)
for _ in range(self._n_train_steps):
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
qf_loss, y_s, qval, policy_loss = self.optimize_policy(itr)
self._episode_policy_losses.append(policy_loss)
self._episode_qf_losses.append(qf_loss)
self._epoch_ys.append(y_s)
self._epoch_qs.append(qval)
if itr % self._steps_per_epoch == 0:
logger.log('Training finished')
if (self.replay_buffer.n_transitions_stored >=
self._min_buffer_size):
tabular.record('Epoch', epoch)
tabular.record('Policy/AveragePolicyLoss',
np.mean(self._episode_policy_losses))
tabular.record('QFunction/AverageQFunctionLoss',
np.mean(self._episode_qf_losses))
tabular.record('QFunction/AverageQ', np.mean(self._epoch_qs))
tabular.record('QFunction/MaxQ', np.max(self._epoch_qs))
tabular.record('QFunction/AverageAbsQ',
np.mean(np.abs(self._epoch_qs)))
tabular.record('QFunction/AverageY', np.mean(self._epoch_ys))
tabular.record('QFunction/MaxY', np.max(self._epoch_ys))
tabular.record('QFunction/AverageAbsY',
np.mean(np.abs(self._epoch_ys)))
tabular.record('AverageSuccessRate', avg_success_rate)
if not self._smooth_return:
self._episode_rewards = []
self._episode_policy_losses = []
self._episode_qf_losses = []
self._epoch_ys = []
self._epoch_qs = []
self._success_history.clear()
return last_average_return
def optimize_policy(self, itr):
"""Perform algorithm optimizing.
Args:
itr(int): Iterations.
Returns:
action_loss(float): Loss of action predicted by the policy network.
qval_loss(float): Loss of q value predicted by the q network.
ys(float): y_s.
qval(float): Q value predicted by the q network.
"""
transitions = self.replay_buffer.sample_transitions(
self._buffer_batch_size)
observations = transitions['observations']
rewards = transitions['rewards']
actions = transitions['actions']
next_observations = transitions['next_observations']
terminals = transitions['terminals']
next_inputs = next_observations
inputs = observations
target_actions = self.target_policy_f_prob_online(next_inputs)
noise = np.random.normal(0.0, self._exploration_policy_sigma,
target_actions.shape)
noise = np.clip(noise, -self._exploration_policy_clip,
self._exploration_policy_clip)
target_actions += noise
target_qvals = self.target_qf_f_prob_online(next_inputs,
target_actions)
target_q2vals = self.target_qf2_f_prob_online(next_inputs,
target_actions)
target_qvals = np.minimum(target_qvals, target_q2vals)
ys = (rewards + (1.0 - terminals) * self._discount * target_qvals)
_, qval_loss, qval = self.f_train_qf(ys, inputs, actions)
_, q2val_loss, q2val = self.f_train_qf2(ys, inputs, actions)
if qval_loss > q2val_loss:
qval_loss = q2val_loss
qval = q2val
# update policy and target networks less frequently
if self._action_loss is None or (itr % self._actor_update_period) == 0:
_, self._action_loss = self.f_train_policy(inputs)
self.f_update_target()
return qval_loss, ys, qval, self._action_loss
| 19,248 | 40.130342 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/te.py | """Task Embedding Algorithm."""
from collections import defaultdict
import numpy as np
from garage import TrajectoryBatch
from garage.sampler import DefaultWorker
class TaskEmbeddingWorker(DefaultWorker):
"""A sampler worker for Task Embedding Algorithm.
In addition to DefaultWorker, this worker adds one-hot task id to env_info,
and adds latent and latent infos to agent_info.
Args:
seed(int): The seed to use to intialize random number generators.
max_path_length(int or float): The maximum length paths which will
be sampled. Can be (floating point) infinity.
worker_number(int): The number of the worker where this update is
occurring. This argument is used to set a different seed for each
worker.
Attributes:
agent(Policy or None): The worker's agent.
env(gym.Env or None): The worker's environment.
"""
def __init__(
self,
*, # Require passing by keyword, since everything's an int.
seed,
max_path_length,
worker_number):
self._latents = []
self._tasks = []
self._latent_infos = defaultdict(list)
self._z, self._t, self._latent_info = None, None, None
super().__init__(seed=seed,
max_path_length=max_path_length,
worker_number=worker_number)
def start_rollout(self):
"""Begin a new rollout."""
# pylint: disable=protected-access
self._t = self.env._active_task_one_hot()
self._z, self._latent_info = self.agent.get_latent(self._t)
self._z = self.agent.latent_space.flatten(self._z)
super().start_rollout()
def step_rollout(self):
"""Take a single time-step in the current rollout.
Returns:
bool: True iff the path is done, either due to the environment
indicating termination of due to reaching `max_path_length`.
"""
if self._path_length < self._max_path_length:
a, agent_info = self.agent.get_action_given_latent(
self._prev_obs, self._z)
next_o, r, d, env_info = self.env.step(a)
self._observations.append(self._prev_obs)
self._rewards.append(r)
self._actions.append(a)
self._tasks.append(self._t)
self._latents.append(self._z)
for k, v in self._latent_info.items():
self._latent_infos[k].append(v)
for k, v in agent_info.items():
self._agent_infos[k].append(v)
for k, v in env_info.items():
self._env_infos[k].append(v)
self._path_length += 1
self._terminals.append(d)
if not d:
self._prev_obs = next_o
return False
self._lengths.append(self._path_length)
self._last_observations.append(self._prev_obs)
return True
def collect_rollout(self):
"""Collect the current rollout, clearing the internal buffer.
One-hot task id is saved in env_infos['task_onehot']. Latent is saved
in agent_infos['latent']. Latent infos are saved in
agent_infos['latent_info_name'], where info_name is the original latent
info name.
Returns:
garage.TrajectoryBatch: A batch of the trajectories completed since
the last call to collect_rollout().
"""
observations = self._observations
self._observations = []
last_observations = self._last_observations
self._last_observations = []
actions = self._actions
self._actions = []
rewards = self._rewards
self._rewards = []
terminals = self._terminals
self._terminals = []
latents = self._latents
self._latents = []
tasks = self._tasks
self._tasks = []
env_infos = self._env_infos
self._env_infos = defaultdict(list)
agent_infos = self._agent_infos
self._agent_infos = defaultdict(list)
latent_infos = self._latent_infos
self._latent_infos = defaultdict(list)
for k, v in latent_infos.items():
latent_infos[k] = np.asarray(v)
for k, v in agent_infos.items():
agent_infos[k] = np.asarray(v)
for k, v in env_infos.items():
env_infos[k] = np.asarray(v)
env_infos['task_onehot'] = np.asarray(tasks)
agent_infos['latent'] = np.asarray(latents)
for k, v in latent_infos.items():
agent_infos['latent_{}'.format(k)] = v
lengths = self._lengths
self._lengths = []
return TrajectoryBatch(self.env.spec, np.asarray(observations),
np.asarray(last_observations),
np.asarray(actions), np.asarray(rewards),
np.asarray(terminals), dict(env_infos),
dict(agent_infos), np.asarray(lengths,
dtype='i'))
| 5,135 | 35.94964 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/te_npo.py | """Natural Policy Optimization with Task Embeddings."""
# pylint: disable=too-many-lines
import akro
from dowel import Histogram, logger, tabular
import numpy as np
import scipy.stats
import tensorflow as tf
from garage import InOutSpec, log_performance, TrajectoryBatch
from garage.experiment import deterministic
from garage.misc import tensor_utils as np_tensor_utils
from garage.np.algos import RLAlgorithm
from garage.sampler import LocalSampler
from garage.tf import paths_to_tensors
from garage.tf.embeddings import StochasticEncoder
from garage.tf.misc.tensor_utils import center_advs
from garage.tf.misc.tensor_utils import compile_function
from garage.tf.misc.tensor_utils import compute_advantages
from garage.tf.misc.tensor_utils import concat_tensor_list
from garage.tf.misc.tensor_utils import discounted_returns
from garage.tf.misc.tensor_utils import flatten_inputs
from garage.tf.misc.tensor_utils import graph_inputs
from garage.tf.misc.tensor_utils import pad_tensor
from garage.tf.misc.tensor_utils import pad_tensor_dict
from garage.tf.misc.tensor_utils import pad_tensor_n
from garage.tf.misc.tensor_utils import positive_advs
from garage.tf.misc.tensor_utils import stack_tensor_dict_list
from garage.tf.optimizers import LbfgsOptimizer
from garage.tf.policies import TaskEmbeddingPolicy
class TENPO(RLAlgorithm):
"""Natural Policy Optimization with Task Embeddings.
See https://karolhausman.github.io/pdf/hausman17nips-ws2.pdf for algorithm
reference.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.TaskEmbeddingPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
encoder_ent_coeff (float): The coefficient of the policy encoder
entropy. Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_ce_gradient (bool): Whether to stop the cross entropy gradient.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
inference (garage.tf.embeddings.StochasticEncoder): A encoder
that infers the task embedding from trajectory.
inference_optimizer (object): The optimizer of the inference. Should be
an optimizer in garage.tf.optimizers.
inference_optimizer_args (dict): The arguments of the inference
optimizer.
inference_ce_coeff (float): The coefficient of the cross entropy of
task embeddings inferred from task one-hot and trajectory. This is
effectively the coefficient of log-prob of inference.
name (str): The name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
encoder_ent_coeff=0.0,
use_softplus_entropy=False,
stop_ce_gradient=False,
flatten_input=True,
inference=None,
inference_optimizer=None,
inference_optimizer_args=None,
inference_ce_coeff=0.0,
name='NPOTaskEmbedding'):
# pylint: disable=too-many-statements
assert isinstance(policy, TaskEmbeddingPolicy)
assert isinstance(inference, StochasticEncoder)
self.policy = policy
self.scope = scope
self.max_path_length = max_path_length
self._env_spec = env_spec
self._baseline = baseline
self._discount = discount
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._fixed_horizon = fixed_horizon
self._flatten_input = flatten_input
self._name = name
self._name_scope = tf.name_scope(self._name)
self._old_policy = policy.clone('old_policy')
self._use_softplus_entropy = use_softplus_entropy
self._stop_ce_gradient = stop_ce_gradient
optimizer = optimizer or LbfgsOptimizer
optimizer_args = optimizer_args or dict()
inference_opt = inference_optimizer or LbfgsOptimizer
inference_opt_args = inference_optimizer_args or dict()
with self._name_scope:
self._optimizer = optimizer(**optimizer_args)
self._lr_clip_range = float(lr_clip_range)
self._max_kl_step = float(max_kl_step)
self._policy_ent_coeff = float(policy_ent_coeff)
self._inference = inference
self._old_inference = inference.clone('old_inference')
self.inference_ce_coeff = float(inference_ce_coeff)
self.inference_optimizer = inference_opt(**inference_opt_args)
self.encoder_ent_coeff = encoder_ent_coeff
self._f_rewards = None
self._f_returns = None
self._f_policy_kl = None
self._f_policy_entropy = None
self._f_encoder_kl = None
self._f_encoder_entropy = None
self._f_task_entropies = None
self._f_inference_ce = None
self._policy_network = None
self._old_policy_network = None
self._encoder_network = None
self._old_encoder_network = None
self._infer_network = None
self._old_infer_network = None
self.sampler_cls = LocalSampler
self.init_opt()
def init_opt(self):
"""Initialize optimizater.
Raises:
NotImplementedError: Raise if the policy is recurrent.
"""
# Input variables
(pol_loss_inputs, pol_opt_inputs, infer_loss_inputs,
infer_opt_inputs) = self._build_inputs()
self._policy_opt_inputs = pol_opt_inputs
self._inference_opt_inputs = infer_opt_inputs
# Jointly optimize policy and encoder network
pol_loss, pol_kl, _ = self._build_policy_loss(pol_loss_inputs)
self._optimizer.update_opt(loss=pol_loss,
target=self.policy,
leq_constraint=(pol_kl, self._max_kl_step),
inputs=flatten_inputs(
self._policy_opt_inputs),
constraint_name='mean_kl')
# Optimize inference distribution separately (supervised learning)
infer_loss, _ = self._build_inference_loss(infer_loss_inputs)
self.inference_optimizer.update_opt(loss=infer_loss,
target=self._inference,
inputs=flatten_inputs(
self._inference_opt_inputs))
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
last_return = None
for _ in runner.step_epochs():
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr, runner.step_path)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Average return.
"""
undiscounted_returns = log_performance(
itr,
TrajectoryBatch.from_trajectory_list(self._env_spec, paths),
discount=self._discount)
samples_data = self.paths_to_tensors(paths)
samples_data['average_return'] = np.mean(undiscounted_returns)
logger.log('Optimizing policy...')
self.optimize_policy(itr, samples_data)
return samples_data['average_return']
def optimize_policy(self, itr, samples_data):
"""Optimize policy.
Args:
itr (int): Iteration number.
samples_data (dict): Processed sample data.
See process_samples() for details.
"""
del itr
policy_opt_input_values = self._policy_opt_input_values(samples_data)
inference_opt_input_values = self._inference_opt_input_values(
samples_data)
self._train_policy_and_encoder_networks(policy_opt_input_values)
self._train_inference_network(inference_opt_input_values)
paths = samples_data['paths']
self.evaluate(policy_opt_input_values, samples_data)
self.visualize_distribution()
logger.log('Fitting baseline...')
self._baseline.fit(paths)
self._old_policy.model.parameters = self.policy.model.parameters
self._old_policy.encoder.model.parameters = (
self.policy.encoder.model.parameters)
self._old_inference.model.parameters = self._inference.model.parameters
def paths_to_tensors(self, paths):
# pylint: disable=too-many-statements
"""Return processed sample data based on the collected paths.
Args:
paths (list[dict]): A list of collected paths.
Returns:
dict: Processed sample data, with key
* observations: (numpy.ndarray)
* tasks: (numpy.ndarray)
* actions: (numpy.ndarray)
* trjectories: (numpy.ndarray)
* rewards: (numpy.ndarray)
* baselines: (numpy.ndarray)
* returns: (numpy.ndarray)
* valids: (numpy.ndarray)
* agent_infos: (dict)
* letent_infos: (dict)
* env_infos: (dict)
* trjectory_infos: (dict)
* paths: (list[dict])
"""
max_path_length = self.max_path_length
def _extract_latent_infos(infos):
"""Extract and pack latent infos from dict.
Args:
infos (dict): A dict that contains latent infos with key
prefixed by 'latent_'.
Returns:
dict: A dict of latent infos.
"""
latent_infos = dict()
for k, v in infos.items():
if k.startswith('latent_'):
latent_infos[k[7:]] = v
return latent_infos
for path in paths:
if self._flatten_input:
path['observations'] = (
self._env_spec.observation_space.flatten_n(
path['observations']))
path['actions'] = (self._env_spec.action_space.flatten_n(
path['actions']))
path['tasks'] = self.policy.task_space.flatten_n(
path['env_infos']['task_onehot'])
path['latents'] = path['agent_infos']['latent']
path['latent_infos'] = _extract_latent_infos(path['agent_infos'])
# - Calculate a forward-looking sliding window.
# - If step_space has shape (n, d), then trajs will have shape
# (n, window, d)
# - The length of the sliding window is determined by the
# trajectory inference spec. We smear the last few elements to
# preserve the time dimension.
# - Only observation is used for a single step.
# Alternatively, stacked [observation, action] can be used for
# in harder tasks.
obs = pad_tensor(path['observations'], max_path_length)
obs_flat = self._env_spec.observation_space.flatten_n(obs)
steps = obs_flat
window = self._inference.spec.input_space.shape[0]
traj = np_tensor_utils.sliding_window(steps, window, smear=True)
traj_flat = self._inference.spec.input_space.flatten_n(traj)
path['trajectories'] = traj_flat
_, traj_info = self._inference.get_latents(traj_flat)
path['trajectory_infos'] = traj_info
all_path_baselines = [self._baseline.predict(path) for path in paths]
tasks = [path['tasks'] for path in paths]
tasks = pad_tensor_n(tasks, max_path_length)
trajectories = np.stack([path['trajectories'] for path in paths])
latents = [path['latents'] for path in paths]
latents = pad_tensor_n(latents, max_path_length)
latent_infos = [path['latent_infos'] for path in paths]
latent_infos = stack_tensor_dict_list(
[pad_tensor_dict(p, max_path_length) for p in latent_infos])
trajectory_infos = [path['trajectory_infos'] for path in paths]
trajectory_infos = stack_tensor_dict_list(
[pad_tensor_dict(p, max_path_length) for p in trajectory_infos])
samples_data = paths_to_tensors(paths, max_path_length,
all_path_baselines, self._discount,
self._gae_lambda)
samples_data['tasks'] = tasks
samples_data['latents'] = latents
samples_data['latent_infos'] = latent_infos
samples_data['trajectories'] = trajectories
samples_data['trajectory_infos'] = trajectory_infos
return samples_data
def _build_inputs(self):
"""Build input variables.
Returns:
namedtuple: Collection of variables to compute policy loss.
namedtuple: Collection of variables to do policy optimization.
namedtuple: Collection of variables to compute inference loss.
namedtuple: Collection of variables to do inference optimization.
"""
# pylint: disable=too-many-statements
observation_space = self.policy.observation_space
action_space = self.policy.action_space
task_space = self.policy.task_space
latent_space = self.policy.latent_space
trajectory_space = self._inference.spec.input_space
with tf.name_scope('inputs'):
if self._flatten_input:
obs_var = tf.compat.v1.placeholder(
tf.float32,
shape=[None, None, observation_space.flat_dim],
name='obs')
task_var = tf.compat.v1.placeholder(
tf.float32,
shape=[None, None, task_space.flat_dim],
name='task')
trajectory_var = tf.compat.v1.placeholder(
tf.float32, shape=[None, None, trajectory_space.flat_dim])
latent_var = tf.compat.v1.placeholder(
tf.float32, shape=[None, None, latent_space.flat_dim])
else:
obs_var = observation_space.to_tf_placeholder(name='obs',
batch_dims=2)
task_var = task_space.to_tf_placeholder(name='task',
batch_dims=2)
trajectory_var = trajectory_space.to_tf_placeholder(
name='trajectory', batch_dims=2)
latent_var = latent_space.to_tf_placeholder(name='latent',
batch_dims=2)
action_var = action_space.to_tf_placeholder(name='action',
batch_dims=2)
reward_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='reward')
baseline_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='baseline')
valid_var = tf.compat.v1.placeholder(tf.float32,
shape=[None, None],
name='valid')
# Policy state (for RNNs)
policy_state_info_vars = {
k: tf.compat.v1.placeholder(tf.float32,
shape=[None] * 2 + list(shape),
name=k)
for k, shape in self.policy.state_info_specs
}
policy_state_info_vars_list = [
policy_state_info_vars[k] for k in self.policy.state_info_keys
]
# Encoder state (for RNNs)
embed_state_info_vars = {
k: tf.compat.v1.placeholder(tf.float32,
shape=[None] * 2 + list(shape),
name='embed_%s' % k)
for k, shape in self.policy.encoder.state_info_specs
}
embed_state_info_vars_list = [
embed_state_info_vars[k]
for k in self.policy.encoder.state_info_keys
]
# Inference distribution state (for RNNs)
infer_state_info_vars = {
k: tf.compat.v1.placeholder(tf.float32,
shape=[None] * 2 + list(shape),
name='infer_%s' % k)
for k, shape in self._inference.state_info_specs
}
infer_state_info_vars_list = [
infer_state_info_vars[k]
for k in self._inference.state_info_keys
]
extra_obs_var = [
tf.cast(v, tf.float32) for v in policy_state_info_vars_list
]
# Pylint false alarm
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
augmented_obs_var = tf.concat([obs_var] + extra_obs_var, axis=-1)
extra_traj_var = [
tf.cast(v, tf.float32) for v in infer_state_info_vars_list
]
augmented_traj_var = tf.concat([trajectory_var] + extra_traj_var, -1)
# Policy and encoder network loss and optimizer inputs
policy_loss_inputs = graph_inputs(
'PolicyLossInputs',
augmented_obs_var=augmented_obs_var,
augmented_traj_var=augmented_traj_var,
task_var=task_var,
latent_var=latent_var,
action_var=action_var,
reward_var=reward_var,
baseline_var=baseline_var,
valid_var=valid_var)
policy_opt_inputs = graph_inputs(
'PolicyOptInputs',
obs_var=obs_var,
action_var=action_var,
reward_var=reward_var,
baseline_var=baseline_var,
trajectory_var=trajectory_var,
task_var=task_var,
latent_var=latent_var,
valid_var=valid_var,
policy_state_info_vars_list=policy_state_info_vars_list,
embed_state_info_vars_list=embed_state_info_vars_list,
)
# Inference network loss and optimizer inputs
inference_loss_inputs = graph_inputs('InferenceLossInputs',
latent_var=latent_var,
valid_var=valid_var)
inference_opt_inputs = graph_inputs(
'InferenceOptInputs',
latent_var=latent_var,
trajectory_var=trajectory_var,
valid_var=valid_var,
infer_state_info_vars_list=infer_state_info_vars_list,
)
return (policy_loss_inputs, policy_opt_inputs, inference_loss_inputs,
inference_opt_inputs)
def _build_policy_loss(self, i):
"""Build policy loss and other output tensors.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy loss.
tf.Tensor: Mean policy KL divergence.
"""
# pylint: disable=too-many-statements
self._policy_network, self._encoder_network = (self.policy.build(
i.augmented_obs_var, i.task_var, name='loss_policy'))
self._old_policy_network, self._old_encoder_network = (
self._old_policy.build(i.augmented_obs_var,
i.task_var,
name='loss_old_policy'))
self._infer_network = self._inference.build(i.augmented_traj_var,
name='loss_infer')
self._old_infer_network = self._old_inference.build(
i.augmented_traj_var, name='loss_old_infer')
pol_dist = self._policy_network.dist
old_pol_dist = self._old_policy_network.dist
# Entropy terms
encoder_entropy, inference_ce, policy_entropy = (
self._build_entropy_terms(i))
# Augment the path rewards with entropy terms
with tf.name_scope('augmented_rewards'):
rewards = (i.reward_var -
(self.inference_ce_coeff * inference_ce) +
(self._policy_ent_coeff * policy_entropy))
with tf.name_scope('policy_loss'):
with tf.name_scope('advantages'):
adv = compute_advantages(self._discount,
self._gae_lambda,
self.max_path_length,
i.baseline_var,
rewards,
name='advantages')
adv = tf.reshape(adv, [-1, self.max_path_length])
# Optionally normalize advantages
eps = tf.constant(1e-8, dtype=tf.float32)
if self._center_adv:
adv = center_advs(adv, axes=[0], eps=eps)
if self._positive_adv:
adv = positive_advs(adv, eps)
# Calculate loss function and KL divergence
with tf.name_scope('kl'):
kl = old_pol_dist.kl_divergence(pol_dist)
pol_mean_kl = tf.reduce_mean(kl)
ll = pol_dist.log_prob(i.action_var, name='log_likelihood')
# Calculate surrogate loss
with tf.name_scope('surr_loss'):
old_ll = old_pol_dist.log_prob(i.action_var)
old_ll = tf.stop_gradient(old_ll)
# Clip early to avoid overflow
lr = tf.exp(
tf.minimum(ll - old_ll, np.log(1 + self._lr_clip_range)))
surrogate = lr * adv
surrogate = tf.debugging.check_numerics(surrogate,
message='surrogate')
# Finalize objective function
with tf.name_scope('loss'):
lr_clip = tf.clip_by_value(lr,
1 - self._lr_clip_range,
1 + self._lr_clip_range,
name='lr_clip')
surr_clip = lr_clip * adv
obj = tf.minimum(surrogate, surr_clip, name='surr_obj')
obj = tf.boolean_mask(obj, i.valid_var)
# Maximize E[surrogate objective] by minimizing
# -E_t[surrogate objective]
loss = -tf.reduce_mean(obj)
# Encoder entropy bonus
loss -= self.encoder_ent_coeff * encoder_entropy
encoder_mean_kl = self._build_encoder_kl()
# Diagnostic functions
self._f_policy_kl = tf.compat.v1.get_default_session(
).make_callable(pol_mean_kl,
feed_list=flatten_inputs(self._policy_opt_inputs))
self._f_rewards = tf.compat.v1.get_default_session().make_callable(
rewards, feed_list=flatten_inputs(self._policy_opt_inputs))
returns = discounted_returns(self._discount,
self.max_path_length,
rewards,
name='returns')
self._f_returns = tf.compat.v1.get_default_session().make_callable(
returns, feed_list=flatten_inputs(self._policy_opt_inputs))
return loss, pol_mean_kl, encoder_mean_kl
def _build_entropy_terms(self, i):
"""Build policy entropy tensor.
Args:
i (namedtuple): Collection of variables to compute policy loss.
Returns:
tf.Tensor: Policy entropy.
"""
pol_dist = self._policy_network.dist
infer_dist = self._infer_network.dist
enc_dist = self._encoder_network.dist
with tf.name_scope('entropy_terms'):
# 1. Encoder distribution total entropy
with tf.name_scope('encoder_entropy'):
encoder_dist, _, _ = self.policy.encoder.build(
i.task_var, name='encoder_entropy').outputs
encoder_all_task_entropies = -encoder_dist.log_prob(
i.latent_var)
if self._use_softplus_entropy:
encoder_entropy = tf.nn.softplus(
encoder_all_task_entropies)
encoder_entropy = tf.reduce_mean(encoder_entropy,
name='encoder_entropy')
encoder_entropy = tf.stop_gradient(encoder_entropy)
# 2. Infernece distribution cross-entropy (log-likelihood)
with tf.name_scope('inference_ce'):
# Build inference with trajectory windows
traj_ll = infer_dist.log_prob(
enc_dist.sample(seed=deterministic.get_tf_seed_stream()),
name='traj_ll')
inference_ce_raw = -traj_ll
inference_ce = tf.clip_by_value(inference_ce_raw, -3, 3)
if self._use_softplus_entropy:
inference_ce = tf.nn.softplus(inference_ce)
if self._stop_ce_gradient:
inference_ce = tf.stop_gradient(inference_ce)
# 3. Policy path entropies
with tf.name_scope('policy_entropy'):
policy_entropy = -pol_dist.log_prob(i.action_var,
name='policy_log_likeli')
# This prevents entropy from becoming negative
# for small policy std
if self._use_softplus_entropy:
policy_entropy = tf.nn.softplus(policy_entropy)
policy_entropy = tf.stop_gradient(policy_entropy)
# Diagnostic functions
self._f_task_entropies = compile_function(flatten_inputs(
self._policy_opt_inputs),
encoder_all_task_entropies,
log_name='f_task_entropies')
self._f_encoder_entropy = compile_function(
flatten_inputs(self._policy_opt_inputs),
encoder_entropy,
log_name='f_encoder_entropy')
self._f_inference_ce = compile_function(
flatten_inputs(self._policy_opt_inputs),
tf.reduce_mean(inference_ce * i.valid_var),
log_name='f_inference_ce')
self._f_policy_entropy = compile_function(flatten_inputs(
self._policy_opt_inputs),
policy_entropy,
log_name='f_policy_entropy')
return encoder_entropy, inference_ce, policy_entropy
def _build_encoder_kl(self):
"""Build graph for encoder KL divergence.
Returns:
tf.Tensor: Encoder KL divergence.
"""
dist = self._encoder_network.dist
old_dist = self._old_encoder_network.dist
with tf.name_scope('encoder_kl'):
kl = old_dist.kl_divergence(dist)
mean_kl = tf.reduce_mean(kl)
# Diagnostic function
self._f_encoder_kl = compile_function(flatten_inputs(
self._policy_opt_inputs),
mean_kl,
log_name='f_encoder_kl')
return mean_kl
def _build_inference_loss(self, i):
"""Build loss function for the inference network.
Args:
i (namedtuple): Collection of variables to compute inference loss.
Returns:
tf.Tensor: Inference loss.
"""
dist = self._infer_network.dist
old_dist = self._old_infer_network.dist
with tf.name_scope('infer_loss'):
traj_ll = dist.log_prob(i.latent_var, name='traj_ll_2')
# Calculate loss
traj_gammas = tf.constant(float(self._discount),
dtype=tf.float32,
shape=[self.max_path_length])
# Pylint false alarm
# pylint: disable=no-value-for-parameter
traj_discounts = tf.compat.v1.cumprod(traj_gammas,
exclusive=True,
name='traj_discounts')
discount_traj_ll = traj_discounts * traj_ll
discount_traj_ll = tf.boolean_mask(discount_traj_ll, i.valid_var)
with tf.name_scope('loss'):
infer_loss = -tf.reduce_mean(discount_traj_ll,
name='infer_loss')
with tf.name_scope('kl'):
# Calculate predicted encoder distributions for each timestep
# Calculate KL divergence
kl = old_dist.kl_divergence(dist)
infer_kl = tf.reduce_mean(kl, name='infer_kl')
return infer_loss, infer_kl
def _policy_opt_input_values(self, samples_data):
"""Map rollout samples to the policy optimizer inputs.
Args:
samples_data (dict): Processed sample data.
See process_samples() for details.
Returns:
list(np.ndarray): Flatten policy optimization input values.
"""
policy_state_info_list = [
samples_data['agent_infos'][k] for k in self.policy.state_info_keys
]
embed_state_info_list = [
samples_data['latent_infos'][k]
for k in self.policy.encoder.state_info_keys
]
policy_opt_input_values = self._policy_opt_inputs._replace(
obs_var=samples_data['observations'],
action_var=samples_data['actions'],
reward_var=samples_data['rewards'],
baseline_var=samples_data['baselines'],
trajectory_var=samples_data['trajectories'],
task_var=samples_data['tasks'],
latent_var=samples_data['latents'],
valid_var=samples_data['valids'],
policy_state_info_vars_list=policy_state_info_list,
embed_state_info_vars_list=embed_state_info_list,
)
return flatten_inputs(policy_opt_input_values)
def _inference_opt_input_values(self, samples_data):
"""Map rollout samples to the inference optimizer inputs.
Args:
samples_data (dict): Processed sample data.
See process_samples() for details.
Returns:
list(np.ndarray): Flatten inference optimization input values.
"""
infer_state_info_list = [
samples_data['trajectory_infos'][k]
for k in self._inference.state_info_keys
]
# pylint: disable=unexpected-keyword-arg
inference_opt_input_values = self._inference_opt_inputs._replace(
latent_var=samples_data['latents'],
trajectory_var=samples_data['trajectories'],
valid_var=samples_data['valids'],
infer_state_info_vars_list=infer_state_info_list,
)
return flatten_inputs(inference_opt_input_values)
def evaluate(self, policy_opt_input_values, samples_data):
"""Evaluate rewards and everything else.
Args:
policy_opt_input_values (list[np.ndarray]): Flattened
policy optimization input values.
samples_data (dict): Processed sample data.
See process_samples() for details.
Returns:
dict: Processed sample data.
"""
# pylint: disable=too-many-statements
# Augment reward from baselines
rewards_tensor = self._f_rewards(*policy_opt_input_values)
returns_tensor = self._f_returns(*policy_opt_input_values)
returns_tensor = np.squeeze(returns_tensor, -1)
paths = samples_data['paths']
valids = samples_data['valids']
baselines = [path['baselines'] for path in paths]
env_rewards = [path['rewards'] for path in paths]
env_rewards = concat_tensor_list(env_rewards.copy())
env_returns = [path['returns'] for path in paths]
env_returns = concat_tensor_list(env_returns.copy())
env_average_discounted_return = (np.mean(
[path['returns'][0] for path in paths]))
# Recompute parts of samples_data
aug_rewards = []
aug_returns = []
for rew, ret, val, path in zip(rewards_tensor, returns_tensor, valids,
paths):
path['rewards'] = rew[val.astype(np.bool)]
path['returns'] = ret[val.astype(np.bool)]
aug_rewards.append(path['rewards'])
aug_returns.append(path['returns'])
aug_rewards = concat_tensor_list(aug_rewards)
aug_returns = concat_tensor_list(aug_returns)
samples_data['rewards'] = aug_rewards
samples_data['returns'] = aug_returns
# Calculate effect of the entropy terms
d_rewards = np.mean(aug_rewards - env_rewards)
tabular.record('{}/EntRewards'.format(self.policy.name), d_rewards)
aug_average_discounted_return = (np.mean(
[path['returns'][0] for path in paths]))
d_returns = np.mean(aug_average_discounted_return -
env_average_discounted_return)
tabular.record('{}/EntReturns'.format(self.policy.name), d_returns)
# Calculate explained variance
ev = np_tensor_utils.explained_variance_1d(np.concatenate(baselines),
aug_returns)
tabular.record('{}/ExplainedVariance'.format(self._baseline.name), ev)
inference_rmse = (samples_data['trajectory_infos']['mean'] -
samples_data['latents'])**2.
inference_rmse = np.sqrt(inference_rmse.mean())
tabular.record('Inference/RMSE', inference_rmse)
inference_rrse = np_tensor_utils.rrse(
samples_data['latents'], samples_data['trajectory_infos']['mean'])
tabular.record('Inference/RRSE', inference_rrse)
embed_ent = self._f_encoder_entropy(*policy_opt_input_values)
tabular.record('{}/Encoder/Entropy'.format(self.policy.name),
embed_ent)
infer_ce = self._f_inference_ce(*policy_opt_input_values)
tabular.record('Inference/CrossEntropy', infer_ce)
pol_ent = self._f_policy_entropy(*policy_opt_input_values)
pol_ent = np.sum(pol_ent) / np.sum(samples_data['valids'])
tabular.record('{}/Entropy'.format(self.policy.name), pol_ent)
task_ents = self._f_task_entropies(*policy_opt_input_values)
tasks = samples_data['tasks'][:, 0, :]
_, task_indices = np.nonzero(tasks)
path_lengths = np.sum(samples_data['valids'], axis=1)
for t in range(self.policy.task_space.flat_dim):
lengths = path_lengths[task_indices == t]
completed = lengths < self.max_path_length
pct_completed = np.mean(completed)
tabular.record('Tasks/EpisodeLength/t={}'.format(t),
np.mean(lengths))
tabular.record('Tasks/CompletionRate/t={}'.format(t),
pct_completed)
tabular.record('Tasks/Entropy/t={}'.format(t), task_ents[t])
return samples_data
def visualize_distribution(self):
"""Visualize encoder distribution."""
num_tasks = self.policy.task_space.flat_dim
all_tasks = np.eye(num_tasks, num_tasks)
_, latent_infos = self.policy.encoder.get_latents(all_tasks)
for task in range(num_tasks):
for i in range(self.policy.latent_space.flat_dim):
stds = np.exp(latent_infos['log_std'][task, i])
norm = scipy.stats.norm(loc=latent_infos['mean'][task, i],
scale=stds)
samples = norm.rvs(100)
hist = Histogram(samples)
tabular.record('Encoder/task={},i={}'.format(task, i), hist)
def _train_policy_and_encoder_networks(self, policy_opt_input_values):
"""Joint optimization of policy and encoder networks.
Args:
policy_opt_input_values (list(np.ndarray)): Flatten policy
optimization input values.
Returns:
float: Policy loss after optimization.
"""
logger.log('Computing loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
embed_kl_before = self._f_encoder_kl(*policy_opt_input_values)
logger.log('Optimizing')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing KL after')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
embed_kl = self._f_encoder_kl(*policy_opt_input_values)
logger.log('Computing loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name),
loss_before - loss_after)
tabular.record('{}/KLBefore'.format(self.policy.name),
policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
tabular.record('{}/Encoder/KLBefore'.format(self.policy.name),
embed_kl_before)
tabular.record('{}/Encoder/KL'.format(self.policy.name), embed_kl)
return loss_after
def _train_inference_network(self, inference_opt_input_values):
"""Optimize inference network.
Args:
inference_opt_input_values (list(np.ndarray)): Flatten inference
optimization input values.
Returns:
float: Inference loss after optmization.
"""
logger.log('Optimizing inference network...')
infer_loss_before = self.inference_optimizer.loss(
inference_opt_input_values)
tabular.record('Inference/Loss', infer_loss_before)
self.inference_optimizer.optimize(inference_opt_input_values)
infer_loss_after = self.inference_optimizer.loss(
inference_opt_input_values)
tabular.record('Inference/dLoss', infer_loss_before - infer_loss_after)
return infer_loss_after
@classmethod
def _get_latent_space(cls, latent_dim):
"""Get latent space given latent length.
Args:
latent_dim (int): Length of latent.
Returns:
akro.Space: Space of latent.
"""
latent_lb = np.zeros(latent_dim, )
latent_up = np.ones(latent_dim, )
return akro.Box(latent_lb, latent_up)
@classmethod
def get_encoder_spec(cls, task_space, latent_dim):
"""Get the embedding spec of the encoder.
Args:
task_space (akro.Space): Task spec.
latent_dim (int): Latent dimension.
Returns:
garage.InOutSpec: Encoder spec.
"""
latent_space = cls._get_latent_space(latent_dim)
return InOutSpec(task_space, latent_space)
@classmethod
def get_infer_spec(cls, env_spec, latent_dim, inference_window_size):
"""Get the embedding spec of the inference.
Every `inference_window_size` timesteps in the trajectory will be used
as the inference network input.
Args:
env_spec (garage.envs.EnvSpec): Environment spec.
latent_dim (int): Latent dimension.
inference_window_size (int): Length of inference window.
Returns:
garage.InOutSpec: Inference spec.
"""
latent_space = cls._get_latent_space(latent_dim)
obs_lb, obs_ub = env_spec.observation_space.bounds
obs_lb_flat = env_spec.observation_space.flatten(obs_lb)
obs_ub_flat = env_spec.observation_space.flatten(obs_ub)
traj_lb = np.stack([obs_lb_flat] * inference_window_size)
traj_ub = np.stack([obs_ub_flat] * inference_window_size)
traj_space = akro.Box(traj_lb, traj_ub)
return InOutSpec(traj_space, latent_space)
def __getstate__(self):
"""Parameters to save in snapshot.
Returns:
dict: Parameters to save.
"""
data = self.__dict__.copy()
del data['_name_scope']
del data['_inference_opt_inputs']
del data['_policy_opt_inputs']
del data['_f_inference_ce']
del data['_f_task_entropies']
del data['_f_encoder_entropy']
del data['_f_encoder_kl']
del data['_f_policy_entropy']
del data['_f_policy_kl']
del data['_f_rewards']
del data['_f_returns']
del data['_policy_network']
del data['_old_policy_network']
del data['_encoder_network']
del data['_old_encoder_network']
del data['_infer_network']
del data['_old_infer_network']
return data
def __setstate__(self, state):
"""Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
"""
self.__dict__ = state
self._name_scope = tf.name_scope(self._name)
self.init_opt()
| 44,443 | 39.588128 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/te_ppo.py | """Proximal Policy Optimization with Task Embedding."""
from garage.tf.algos.te_npo import TENPO
from garage.tf.optimizers import FirstOrderOptimizer
class TEPPO(TENPO):
"""Proximal Policy Optimization with Task Embedding.
See https://karolhausman.github.io/pdf/hausman17nips-ws2.pdf for algorithm
reference.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.TaskEmbeddingPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
encoder_ent_coeff (float): The coefficient of the policy encoder
entropy. Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_ce_gradient (bool): Whether to stop the cross entropy gradient.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
inference (garage.tf.embedding.encoder.StochasticEncoder): A encoder
that infers the task embedding from trajectory.
inference_optimizer (object): The optimizer of the inference. Should be
an optimizer in garage.tf.optimizers.
inference_optimizer_args (dict): The arguments of the inference
optimizer.
inference_ce_coeff (float): The coefficient of the cross entropy of
task embeddings inferred from task one-hot and trajectory. This is
effectively the coefficient of log-prob of inference.
name (str): The name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=0.98,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=1e-3,
encoder_ent_coeff=1e-3,
use_softplus_entropy=False,
stop_ce_gradient=False,
flatten_input=True,
inference=None,
inference_optimizer=None,
inference_optimizer_args=None,
inference_ce_coeff=1e-3,
name='PPOTaskEmbedding'):
optimizer = optimizer or FirstOrderOptimizer
optimizer_args = optimizer_args or dict(batch_size=32, max_epochs=10)
inference_optimizer = inference_optimizer or FirstOrderOptimizer
inference_optimizer_args = inference_optimizer_args or dict(
batch_size=32, max_epochs=10)
super().__init__(env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=optimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
stop_ce_gradient=stop_ce_gradient,
flatten_input=flatten_input,
inference=inference,
inference_optimizer=inference_optimizer,
inference_optimizer_args=inference_optimizer_args,
inference_ce_coeff=inference_ce_coeff,
name=name)
| 5,585 | 45.941176 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/tnpg.py | """Truncated Natural Policy Gradient."""
from garage.tf.algos.npo import NPO
from garage.tf.optimizers import ConjugateGradientOptimizer
class TNPG(NPO):
"""Truncated Natural Policy Gradient.
TNPG uses Conjugate Gradient to compute the policy gradient.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
name (str): The name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=0.98,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
flatten_input=True,
name='TNPG'):
if optimizer is None:
optimizer = ConjugateGradientOptimizer
default_args = dict(max_backtracks=1)
if optimizer_args is None:
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
super().__init__(env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
pg_loss='surrogate',
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=optimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
flatten_input=flatten_input,
name=name)
| 4,992 | 45.231481 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/trpo.py | """Trust Region Policy Optimization."""
from garage.tf.algos.npo import NPO
from garage.tf.optimizers import ConjugateGradientOptimizer
from garage.tf.optimizers import PenaltyLbfgsOptimizer
class TRPO(NPO):
"""Trust Region Policy Optimization.
See https://arxiv.org/abs/1502.05477.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
kl_constraint (str): KL constraint, either 'hard' or 'soft'.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
name (str): The name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=0.98,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
kl_constraint='hard',
entropy_method='no_entropy',
flatten_input=True,
name='TRPO'):
if not optimizer:
if kl_constraint == 'hard':
optimizer = ConjugateGradientOptimizer
elif kl_constraint == 'soft':
optimizer = PenaltyLbfgsOptimizer
else:
raise ValueError('Invalid kl_constraint')
if optimizer_args is None:
optimizer_args = dict()
super().__init__(env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
pg_loss='surrogate',
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=optimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
flatten_input=flatten_input,
name=name)
| 5,188 | 44.121739 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/algos/vpg.py | """Vanilla Policy Gradient."""
from garage.tf.algos.npo import NPO
from garage.tf.optimizers import FirstOrderOptimizer
class VPG(NPO):
"""Vanilla Policy Gradient.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.tf.policies.StochasticPolicy): Policy.
baseline (garage.tf.baselines.Baseline): The baseline.
scope (str): Scope for identifying the algorithm.
Must be specified if running multiple algorithms
simultaneously, each using different environments
and policies.
max_path_length (int): Maximum length of a single rollout.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
fixed_horizon (bool): Whether to fix horizon.
lr_clip_range (float): The limit on the likelihood ratio between
policies, as in PPO.
max_kl_step (float): The maximum KL divergence between old and new
policies, as in TRPO.
optimizer (object): The optimizer of the algorithm. Should be the
optimizers in garage.tf.optimizers.
optimizer_args (dict): The arguments of the optimizer.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
use_neg_logli_entropy (bool): Whether to estimate the entropy as the
negative log likelihood of the action.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
flatten_input (bool): Whether to flatten input along the observation
dimension. If True, for example, an observation with shape (2, 4)
will be flattened to 8.
name (str): The name of the algorithm.
"""
def __init__(self,
env_spec,
policy,
baseline,
scope=None,
max_path_length=500,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
fixed_horizon=False,
lr_clip_range=0.01,
max_kl_step=0.01,
optimizer=None,
optimizer_args=None,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
use_neg_logli_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
flatten_input=True,
name='VPG'):
if optimizer is None:
default_args = dict(
batch_size=None,
max_epochs=1,
)
optimizer = FirstOrderOptimizer
if optimizer_args is None:
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
super().__init__(env_spec=env_spec,
policy=policy,
baseline=baseline,
scope=scope,
max_path_length=max_path_length,
discount=discount,
gae_lambda=gae_lambda,
center_adv=center_adv,
positive_adv=positive_adv,
fixed_horizon=fixed_horizon,
pg_loss='vanilla',
lr_clip_range=lr_clip_range,
max_kl_step=max_kl_step,
optimizer=optimizer,
optimizer_args=optimizer_args,
policy_ent_coeff=policy_ent_coeff,
use_softplus_entropy=use_softplus_entropy,
use_neg_logli_entropy=use_neg_logli_entropy,
stop_entropy_gradient=stop_entropy_gradient,
entropy_method=entropy_method,
name=name)
| 4,891 | 44.296296 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/baselines/__init__.py | """Baseline estimators for TensorFlow-based algorithms."""
from garage.tf.baselines.continuous_mlp_baseline import ContinuousMLPBaseline
from garage.tf.baselines.gaussian_cnn_baseline import GaussianCNNBaseline
from garage.tf.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
__all__ = [
'ContinuousMLPBaseline',
'GaussianCNNBaseline',
'GaussianMLPBaseline',
]
| 383 | 33.909091 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/baselines/continuous_mlp_baseline.py | """A value function (baseline) based on a MLP model."""
import numpy as np
from garage.np.baselines import Baseline
from garage.tf.regressors import ContinuousMLPRegressor
class ContinuousMLPBaseline(Baseline):
"""A value function using a MLP network.
It fits the input data by performing linear regression
to the outputs.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
num_seq_inputs (float): Number of sequence per input. By default
it is 1.0, which means only one single sequence.
regressor_args (dict): Arguments for regressor.
name (str): Name of baseline.
"""
def __init__(self,
env_spec,
num_seq_inputs=1,
regressor_args=None,
name='ContinuousMLPBaseline'):
super().__init__(env_spec)
if regressor_args is None:
regressor_args = dict()
self._regressor = ContinuousMLPRegressor(
input_shape=(env_spec.observation_space.flat_dim *
num_seq_inputs, ),
output_dim=1,
name=name,
**regressor_args)
self.name = name
def fit(self, paths):
"""Fit regressor based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
"""
observations = np.concatenate([p['observations'] for p in paths])
returns = np.concatenate([p['returns'] for p in paths])
self._regressor.fit(observations, returns.reshape((-1, 1)))
def predict(self, path):
"""Predict value based on paths.
Args:
path (dict[numpy.ndarray]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
"""
return self._regressor.predict(path['observations']).flatten()
def get_param_values(self):
"""Get parameter values.
Returns:
List[np.ndarray]: A list of values of each parameter.
"""
return self._regressor.get_param_values()
def set_param_values(self, flattened_params):
"""Set param values.
Args:
flattened_params (np.ndarray): A numpy array of parameter values.
"""
self._regressor.set_param_values(flattened_params)
def get_params_internal(self):
"""Get the params, which are the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._regressor.get_params_internal()
| 2,590 | 27.788889 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/baselines/gaussian_cnn_baseline.py | """Gaussian CNN Baseline."""
import akro
import numpy as np
from garage.misc.tensor_utils import normalize_pixel_batch
from garage.np.baselines import Baseline
from garage.tf.regressors import GaussianCNNRegressor
class GaussianCNNBaseline(Baseline):
"""GaussianCNNBaseline With Model.
It fits the input data to a gaussian distribution estimated by a CNN.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
subsample_factor (float): The factor to subsample the data. By
default it is 1.0, which means using all the data.
regressor_args (dict): Arguments for regressor.
name (str): Name of baseline.
"""
def __init__(
self,
env_spec,
subsample_factor=1.,
regressor_args=None,
name='GaussianCNNBaseline',
):
if not isinstance(env_spec.observation_space, akro.Box) or \
not len(env_spec.observation_space.shape) in (2, 3):
raise ValueError(
'{} can only process 2D, 3D akro.Image or'
' akro.Box observations, but received an env_spec with '
'observation_space of type {} and shape {}'.format(
type(self).__name__,
type(env_spec.observation_space).__name__,
env_spec.observation_space.shape))
super().__init__(env_spec)
if regressor_args is None:
regressor_args = dict()
self._regressor = GaussianCNNRegressor(
input_shape=(env_spec.observation_space.shape),
output_dim=1,
subsample_factor=subsample_factor,
name=name,
**regressor_args)
self.name = name
self.env_spec = env_spec
def fit(self, paths):
"""Fit regressor based on paths.
Args:
paths (dict[numpy.ndarray]): Sample paths.
"""
observations = np.concatenate([p['observations'] for p in paths])
if isinstance(self.env_spec.observation_space, akro.Image):
observations = normalize_pixel_batch(observations)
returns = np.concatenate([p['returns'] for p in paths])
self._regressor.fit(observations, returns.reshape((-1, 1)))
def predict(self, path):
"""Predict value based on paths.
Args:
path (dict[numpy.ndarray]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
"""
observations = path['observations']
if isinstance(self.env_spec.observation_space, akro.Image):
observations = normalize_pixel_batch(observations)
return self._regressor.predict(observations).flatten()
def get_param_values(self):
"""Get parameter values.
Returns:
List[np.ndarray]: A list of values of each parameter.
"""
return self._regressor.get_param_values()
def set_param_values(self, flattened_params):
"""Set param values.
Args:
flattened_params (np.ndarray): A numpy array of parameter values.
"""
self._regressor.set_param_values(flattened_params)
def get_params_internal(self):
"""Get the params, which are the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._regressor.get_params_internal()
| 3,478 | 30.342342 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/baselines/gaussian_mlp_baseline.py | """A value function (baseline) based on a GaussianMLP model."""
import numpy as np
from garage.np.baselines import Baseline
from garage.tf.regressors import GaussianMLPRegressor
class GaussianMLPBaseline(Baseline):
"""Gaussian MLP Baseline with Model.
It fits the input data to a gaussian distribution estimated by
a MLP.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
subsample_factor (float): The factor to subsample the data. By
default it is 1.0, which means using all the data.
num_seq_inputs (float): Number of sequence per input. By default
it is 1.0, which means only one single sequence.
regressor_args (dict): Arguments for regressor.
name (str): Name of baseline.
"""
def __init__(
self,
env_spec,
subsample_factor=1.,
num_seq_inputs=1,
regressor_args=None,
name='GaussianMLPBaseline',
):
super().__init__(env_spec)
if regressor_args is None:
regressor_args = dict()
self._regressor = GaussianMLPRegressor(
input_shape=(env_spec.observation_space.flat_dim *
num_seq_inputs, ),
output_dim=1,
name=name,
subsample_factor=subsample_factor,
**regressor_args)
self.name = name
def fit(self, paths):
"""Fit regressor based on paths.
Args:
paths (list[dict]): Sample paths.
"""
observations = np.concatenate([p['observations'] for p in paths])
returns = np.concatenate([p['returns'] for p in paths])
self._regressor.fit(observations, returns.reshape((-1, 1)))
def predict(self, path):
"""Predict value based on paths.
Args:
path (list[dict]): Sample paths.
Returns:
numpy.ndarray: Predicted value.
"""
return self._regressor.predict(path['observations']).flatten()
def get_param_values(self):
"""Get parameter values.
Returns:
List[np.ndarray]: A list of values of each parameter.
"""
return self._regressor.get_param_values()
def set_param_values(self, flattened_params):
"""Set param values.
Args:
flattened_params (np.ndarray): A numpy array of parameter values.
"""
self._regressor.set_param_values(flattened_params)
def get_params_internal(self):
"""Get the params, which are the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._regressor.get_params_internal()
| 2,780 | 27.96875 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/distributions/__init__.py | # flake8: noqa
from garage.tf.distributions.distribution import Distribution
from garage.tf.distributions.bernoulli import Bernoulli
from garage.tf.distributions.categorical import Categorical
from garage.tf.distributions.diagonal_gaussian import DiagonalGaussian
from garage.tf.distributions.recurrent_categorical import RecurrentCategorical
from garage.tf.distributions.recurrent_diagonal_gaussian import (
RecurrentDiagonalGaussian)
__all__ = [
'Distribution',
'Bernoulli',
'Categorical',
'DiagonalGaussian',
'RecurrentCategorical',
'RecurrentDiagonalGaussian',
]
| 596 | 32.166667 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/distributions/bernoulli.py | # flake8: noqa
# pylint: skip-file
import numpy as np
import tensorflow as tf
from garage.tf.distributions.distribution import Distribution
TINY = 1e-8
class Bernoulli(Distribution):
def __init__(self, dim, name='Bernoulli'):
self._name = name
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars, name='kl_sym'):
with tf.name_scope(name):
old_p = old_dist_info_vars['p']
new_p = new_dist_info_vars['p']
kl = (old_p *
(tf.math.log(old_p + TINY) - tf.math.log(new_p + TINY)) +
(1 - old_p) * (tf.math.log(1 - old_p + TINY) -
tf.math.log(1 - new_p + TINY)))
ndims = kl.get_shape().ndims
return tf.reduce_sum(kl, axis=ndims - 1)
def kl(self, old_dist_info, new_dist_info):
old_p = old_dist_info['p']
new_p = new_dist_info['p']
kl = old_p * (np.log(old_p + TINY) - np.log(new_p + TINY)) \
+ (1 - old_p) \
* (np.log(1 - old_p + TINY) - np.log(1 - new_p + TINY))
return np.sum(kl, axis=-1)
def sample(self, dist_info):
p = np.asarray(dist_info['p'])
return np.cast['int'](
np.random.uniform(low=0., high=1., size=p.shape) < p)
def likelihood_ratio_sym(self,
x_var,
old_dist_info_vars,
new_dist_info_vars,
name='likelihood_ratio_sym'):
with tf.name_scope(name):
old_p = old_dist_info_vars['p']
new_p = new_dist_info_vars['p']
ndims = old_p.get_shape().ndims
return tf.reduce_prod(x_var * new_p / (old_p + TINY) +
(1 - x_var) * (1 - new_p) /
(1 - old_p + TINY),
axis=ndims - 1)
def log_likelihood_sym(self,
x_var,
dist_info_vars,
name='log_likelihood_sym'):
with tf.name_scope(name):
p = dist_info_vars['p']
ndims = p.get_shape().ndims
return tf.reduce_sum(x_var * tf.math.log(p + TINY) +
(1 - x_var) * tf.math.log(1 - p + TINY),
axis=ndims - 1)
def log_likelihood(self, xs, dist_info):
p = dist_info['p']
return np.sum(xs * np.log(p + TINY) + (1 - xs) * np.log(1 - p + TINY),
axis=-1)
def entropy(self, dist_info):
p = dist_info['p']
return np.sum(-p * np.log(p + TINY) - (1 - p) * np.log(1 - p + TINY),
axis=-1)
@property
def dist_info_keys(self):
return ['p']
| 2,871 | 33.60241 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/distributions/categorical.py | # flake8: noqa
# pylint: skip-file
import numpy as np
import tensorflow as tf
from garage.tf.distributions.distribution import Distribution
from garage.tf.misc.tensor_utils import compile_function
TINY = 1e-8
def from_onehot(x_var):
ret = np.zeros((len(x_var), ), 'int32')
nonzero_n, nonzero_a = np.nonzero(x_var)
ret[nonzero_n] = nonzero_a
return ret
class Categorical(Distribution):
def __init__(self, dim, name=None):
with tf.compat.v1.variable_scope(name, 'Categorical'):
self._dim = dim
self._name = name
weights_var = tf.compat.v1.placeholder(dtype=tf.float32,
shape=(None, dim),
name='weights')
self._f_sample = compile_function(
inputs=[weights_var],
outputs=tf.random.categorical(tf.math.log(weights_var + 1e-8),
num_samples=1)[:, 0],
)
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars, name='kl_sym'):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
with tf.name_scope(name):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
ndims = old_prob_var.get_shape().ndims
# Assume layout is N * A
return tf.reduce_sum(old_prob_var *
(tf.math.log(old_prob_var + TINY) -
tf.math.log(new_prob_var + TINY)),
axis=ndims - 1)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info['prob']
new_prob = new_dist_info['prob']
return np.sum(old_prob *
(np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=-1)
def likelihood_ratio_sym(self,
x_var,
old_dist_info_vars,
new_dist_info_vars,
name='likelihood_ratio_sym'):
with tf.name_scope(name):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
ndims = old_prob_var.get_shape().ndims
x_var = tf.cast(x_var, tf.float32)
# Assume layout is N * A
return (tf.reduce_sum(new_prob_var * x_var, ndims - 1) + TINY) / \
(tf.reduce_sum(old_prob_var * x_var, ndims - 1) + TINY)
def entropy_sym(self, dist_info_vars):
with tf.name_scope('entropy_sym'):
probs = dist_info_vars['prob']
return -tf.reduce_sum(probs * tf.math.log(probs + TINY), axis=1)
def cross_entropy_sym(self,
old_dist_info_vars,
new_dist_info_vars,
name='cross_entropy_sym'):
with tf.name_scope(name):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
ndims = old_prob_var.get_shape().ndims
# Assume layout is N * A
return tf.reduce_sum(old_prob_var *
(-tf.math.log(new_prob_var + TINY)),
axis=ndims - 1)
def entropy(self, info):
probs = info['prob']
return -np.sum(probs * np.log(probs + TINY), axis=-1)
def log_likelihood_sym(self,
x_var,
dist_info_vars,
name='log_likelihood_sym'):
with tf.name_scope(name):
probs = dist_info_vars['prob']
ndims = probs.get_shape().ndims
return tf.math.log(
tf.reduce_sum(probs * tf.cast(x_var, tf.float32), ndims - 1) +
TINY)
def log_likelihood(self, xs, dist_info):
probs = dist_info['prob']
# Assume layout is N * A
return np.log(np.sum(probs * xs, axis=-1) + TINY)
@property
def dist_info_specs(self):
return [('prob', (self.dim, ))]
def sample(self, dist_info):
return self._f_sample(dist_info['prob'])
def sample_sym(self, dist_info, name='sample_sym'):
with tf.name_scope(name):
probs = dist_info['prob']
samples = tf.multinomial(tf.math.log(probs + 1e-8),
num_samples=1)[:, 0]
return tf.nn.embedding_lookup(np.eye(self.dim, dtype=np.float32),
samples)
| 4,788 | 36.124031 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/distributions/diagonal_gaussian.py | """Diagonal Gaussian Distribution."""
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.distributions.distribution import Distribution
class DiagonalGaussian(Distribution):
"""Diagonal Gaussian Distribution.
Args:
dim (int): Dimension of the distribution.
name (str): Name (scope) of the distribution.
"""
def __init__(self, dim, name='DiagonalGaussian'):
self._dim = dim
self._name = name
@property
def dim(self):
"""int: Dimension of the distribution."""
return self._dim
def kl(self, old_dist_info, new_dist_info):
"""KL Divergence between the old and the new distribution.
Args:
old_dist_info (dict): Parameters of the old distribution.
new_dist_info (dict): Parameters of the new distribution.
Returns:
float: KL Divergence between two distributions.
"""
old_means = old_dist_info['mean']
old_log_stds = old_dist_info['log_std']
new_means = new_dist_info['mean']
new_log_stds = new_dist_info['log_std']
# Compute the KL divergence of two multivariate Gaussian distribution
# with diagonal covariance matrices
old_std = np.exp(old_log_stds)
new_std = np.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = np.square(old_means - new_means) + \
np.square(old_std) - np.square(new_std)
denominator = 2 * np.square(new_std) + 1e-8
return np.sum(numerator / denominator + new_log_stds - old_log_stds,
axis=-1)
def kl_sym(self, old_dist_info_vars, new_dist_info_vars, name='kl_sym'):
"""Symbolic KL between the old and the new distribution.
Args:
old_dist_info_vars (tf.Tensor): Symbolic parameters of
the old distribution.
new_dist_info_vars (tf.Tensor): Symbolic parameters of
the new distribution.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic KL divergence between the two distributions.
"""
with tf.name_scope(name):
old_means = old_dist_info_vars['mean']
old_log_stds = old_dist_info_vars['log_std']
new_means = new_dist_info_vars['mean']
new_log_stds = new_dist_info_vars['log_std']
# Compute the KL divergence of two multivariate
# Gaussian distribution with diagonal covariance matrices
old_std = tf.exp(old_log_stds)
new_std = tf.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = tf.square(old_means - new_means) + \
tf.square(old_std) - tf.square(new_std)
denominator = 2 * tf.square(new_std) + 1e-8
return tf.reduce_sum(numerator / denominator + new_log_stds -
old_log_stds,
axis=-1)
def likelihood_ratio_sym(self,
x_var,
old_dist_info_vars,
new_dist_info_vars,
name='likelihood_ratio_sym'):
"""Symbolic likelihood ratio.
Args:
x_var (tf.Tensor): Input placeholder.
old_dist_info_vars (dict): Old distribution tensors.
new_dist_info_vars (dict): New distribution tensors.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic likelihood ratio.
"""
with tf.name_scope(name):
logli_new = self.log_likelihood_sym(x_var, new_dist_info_vars)
logli_old = self.log_likelihood_sym(x_var, old_dist_info_vars)
return tf.exp(logli_new - logli_old)
def log_likelihood_sym(self,
x_var,
dist_info_vars,
name='log_likelihood_sym'):
"""Symbolic log likelihood.
Args:
x_var (tf.Tensor): Input placeholder.
dist_info_vars (dict): Parameters of a distribution.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic log likelihood.
"""
with tf.name_scope(name):
means = dist_info_vars['mean']
log_stds = dist_info_vars['log_std']
zs = (x_var - means) / tf.exp(log_stds)
return - tf.reduce_sum(log_stds, axis=-1) - \
0.5 * tf.reduce_sum(tf.square(zs), axis=-1) - \
0.5 * self.dim * np.log(2 * np.pi)
def sample(self, dist_info):
"""Sample a value given a distribution.
Args:
dist_info (dict): Parameters of a distribution.
Returns:
np.ndarray: A sample from the distribution.
"""
# pylint: disable=no-self-use
means = dist_info['mean']
log_stds = dist_info['log_std']
rnd = np.random.normal(size=means.shape)
return rnd * np.exp(log_stds) + means
def sample_sym(self, dist_info_vars):
"""Sample a symbolic value given a distribution.
Args:
dist_info_vars (dict): Symbolic parameters of a distribution.
Returns:
tf.Tensor: A symbolic sample from the distribution.
"""
# pylint: disable=no-self-use
means = dist_info_vars['mean']
log_stds = dist_info_vars['log_std']
rnd = tf.random.normal(shape=tf.shape(means),
seed=deterministic.get_tf_seed_stream())
return rnd * tf.math.exp(log_stds) + means
def log_likelihood(self, xs, dist_info):
"""Log likelihood of a sample under a distribution.
Args:
xs (np.ndarray): Input value.
dist_info (dict): Parameters of a distribution.
Returns:
float: Log likelihood of a sample under the distribution.
"""
means = dist_info['mean']
log_stds = dist_info['log_std']
zs = (xs - means) / np.exp(log_stds)
return - np.sum(log_stds, axis=-1) - \
0.5 * np.sum(np.square(zs), axis=-1) - \
0.5 * self.dim * np.log(2 * np.pi)
def entropy(self, dist_info):
"""Entropy of a distribution.
Args:
dist_info (dict): Parameters of a distribution.
Returns:
float: Entropy of the distribution.
"""
log_stds = dist_info['log_std']
return np.sum(log_stds + np.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
def entropy_sym(self, dist_info_vars, name='entropy_sym'):
"""Symbolic entropy of a distribution.
Args:
dist_info_vars (dict): Symbolic parameters of a distribution.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic entropy of the distribution.
"""
with tf.name_scope(name):
log_std_var = dist_info_vars['log_std']
return tf.reduce_sum(log_std_var +
np.log(np.sqrt(2 * np.pi * np.e)),
axis=-1)
@property
def dist_info_specs(self):
"""list: Specification of the parameter of a distribution."""
return [('mean', (self.dim, )), ('log_std', (self.dim, ))]
| 7,668 | 33.545045 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/distributions/distribution.py | """Distributions Base."""
class Distribution:
"""Base class for distribution."""
@property
def dim(self):
"""int: Dimension of this distribution."""
raise NotImplementedError
def kl_sym(self, old_dist_info_vars, new_dist_info_vars, name='kl_sym'):
"""Compute the symbolic KL divergence of two distributions.
Args:
old_dist_info_vars (tf.Tensor): Symbolic parameters of
the old distribution.
new_dist_info_vars (tf.Tensor): Symbolic parameters of
the new distribution.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic KL divergence between the two distributions.
"""
raise NotImplementedError
def kl(self, old_dist_info, new_dist_info):
"""Compute the KL divergence of two distributions.
Args:
old_dist_info (dict): Parameters of the old distribution.
new_dist_info (dict): Parameters of the new distribution.
Returns:
float: KL Divergence between two distributions.
"""
raise NotImplementedError
def likelihood_ratio_sym(self,
x_var,
old_dist_info_vars,
new_dist_info_vars,
name='ll_ratio_sym'):
"""Symbolic likelihood ratio.
Args:
x_var (tf.Tensor): Input placeholder.
old_dist_info_vars (dict): Old distribution tensors.
new_dist_info_vars (dict): New distribution tensors.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic likelihood ratio.
"""
raise NotImplementedError
def entropy(self, dist_info):
"""Entropy of a distribution.
Args:
dist_info (dict): Parameters of a distribution.
Returns:
float: Entropy of the distribution.
"""
raise NotImplementedError
def entropy_sym(self, dist_info_vars, name='entropy_sym'):
"""Symbolic entropy of a distribution.
Args:
dist_info_vars (dict): Symbolic parameters of a distribution.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic entropy of the distribution.
"""
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars, name='ll_sym'):
"""Symbolic log likelihood.
Args:
x_var (tf.Tensor): Input placeholder.
dist_info_vars (dict): Parameters of a distribution.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic log likelihood.
"""
raise NotImplementedError
def log_likelihood(self, xs, dist_info):
"""Log likelihood of a sample under a distribution.
Args:
xs (np.ndarray): Input value.
dist_info (dict): Parameters of a distribution.
Returns:
float: Log likelihood of a sample under the distribution.
"""
raise NotImplementedError
@property
def dist_info_specs(self):
"""list: Specification of the parameter of a distribution."""
raise NotImplementedError
@property
def dist_info_keys(self):
"""list: Parameter names."""
return [k for k, _ in self.dist_info_specs]
| 3,445 | 27.479339 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/distributions/recurrent_categorical.py | # flake8: noqa
# pylint: skip-file
import numpy as np
import tensorflow as tf
from garage.tf.distributions.categorical import Categorical
from garage.tf.distributions.distribution import Distribution
TINY = 1e-8
class RecurrentCategorical(Distribution):
def __init__(self, dim, name='RecurrentCategorical'):
self._cat = Categorical(dim, name)
self._dim = dim
self._name = name
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars, name='kl_sym'):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
with tf.name_scope(name):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
# Assume layout is N * T * A
return tf.reduce_sum(old_prob_var *
(tf.math.log(old_prob_var + TINY) -
tf.math.log(new_prob_var + TINY)),
axis=2)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info['prob']
new_prob = new_dist_info['prob']
return np.sum(old_prob *
(np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=2)
def likelihood_ratio_sym(self,
x_var,
old_dist_info_vars,
new_dist_info_vars,
name='likelihood_ratio_sym'):
with tf.name_scope(name):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
# Assume layout is N * T * A
a_dim = tf.shape(x_var)[2]
flat_ratios = self._cat.likelihood_ratio_sym(
tf.reshape(x_var, tf.stack([-1, a_dim])),
dict(prob=tf.reshape(old_prob_var, tf.stack([-1, a_dim]))),
dict(prob=tf.reshape(new_prob_var, tf.stack([-1, a_dim]))))
return tf.reshape(flat_ratios, tf.shape(old_prob_var)[:2])
def entropy(self, dist_info):
probs = dist_info['prob']
return -np.sum(probs * np.log(probs + TINY), axis=2)
def entropy_sym(self, dist_info_vars, name='entropy_sym'):
with tf.name_scope(name):
probs = dist_info_vars['prob']
return -tf.reduce_sum(probs * tf.math.log(probs + TINY), 2)
def log_likelihood_sym(self, xs, dist_info_vars,
name='log_likelihood_sym'):
with tf.name_scope(name):
probs = dist_info_vars['prob']
# Assume layout is N * T * A
a_dim = tf.shape(probs)[2]
flat_logli = self._cat.log_likelihood_sym(
tf.reshape(xs, tf.stack([-1, a_dim])),
dict(prob=tf.reshape(probs, tf.stack((-1, a_dim)))))
return tf.reshape(flat_logli, tf.shape(probs)[:2])
def log_likelihood(self, xs, dist_info):
probs = dist_info['prob']
# Assume layout is N * T * A
a_dim = tf.shape(probs)[2]
flat_logli = self._cat.log_likelihood_sym(
xs.reshape((-1, a_dim)), dict(prob=probs.reshape((-1, a_dim))))
return flat_logli.reshape(probs.shape[:2])
@property
def dist_info_specs(self):
return [('prob', (self.dim, ))]
| 3,474 | 36.365591 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/distributions/recurrent_diagonal_gaussian.py | from garage.tf.distributions.diagonal_gaussian import DiagonalGaussian
RecurrentDiagonalGaussian = DiagonalGaussian
| 117 | 28.5 | 70 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/embeddings/__init__.py | """Embeddings."""
from garage.tf.embeddings.encoder import Encoder, StochasticEncoder
from garage.tf.embeddings.gaussian_mlp_encoder import GaussianMLPEncoder
__all__ = ['Encoder', 'StochasticEncoder', 'GaussianMLPEncoder']
| 225 | 36.666667 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/embeddings/encoder.py | """Encoders in TensorFlow."""
# pylint: disable=abstract-method
from garage.np.embeddings import Encoder as BaseEncoder
from garage.np.embeddings import StochasticEncoder as BaseStochasticEncoder
from garage.tf.models import Module, StochasticModule
class Encoder(BaseEncoder, Module):
"""Base class for encoders in TensorFlow."""
def get_latent(self, input_value):
"""Get a sample of embedding for the given input.
Args:
input_value (numpy.ndarray): Tensor to encode.
Returns:
numpy.ndarray: An embedding sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
"""
def get_latents(self, input_values):
"""Get samples of embedding for the given inputs.
Args:
input_values (numpy.ndarray): Tensors to encode.
Returns:
numpy.ndarray: Embeddings sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (list[numpy.ndarray]): Means of the distribution.
- log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
"""
def clone(self, name):
"""Return a clone of the encoder.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created encoder. It has to be
different from source encoder if cloned under the same
computational graph.
Returns:
garage.tf.embeddings.encoder.Encoder: Newly cloned encoder.
"""
class StochasticEncoder(BaseStochasticEncoder, StochasticModule):
"""Base class for stochastic encoders in TensorFlow."""
def build(self, embedding_input, name=None):
"""Build encoder.
After buil, self.distribution is a Gaussian distribution conitioned
on embedding_input.
Args:
embedding_input (tf.Tensor) : Embedding input.
name (str): Name of the model, which is also the name scope.
"""
| 2,401 | 30.194805 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/embeddings/gaussian_mlp_encoder.py | """GaussianMLPEncoder."""
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.embeddings import StochasticEncoder
from garage.tf.models import GaussianMLPModel, StochasticModule
class GaussianMLPEncoder(StochasticEncoder, StochasticModule):
"""GaussianMLPEncoder with GaussianMLPModel.
An embedding that contains a MLP to make prediction based on
a gaussian distribution.
Args:
embedding_spec (garage.InOutSpec):
Encoder specification.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
init_std (float): Initial value for std.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
std_output_nonlinearity (callable): Nonlinearity for output layer in
the std network. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
std_parameterization (str): How the std should be parametrized. There
are a few options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
embedding_spec,
name='GaussianMLPEncoder',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=None,
std_parameterization='exp',
layer_normalization=False):
super().__init__(name)
self._embedding_spec = embedding_spec
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._learn_std = learn_std
self._adaptive_std = adaptive_std
self._std_share_network = std_share_network
self._init_std = init_std
self._min_std = min_std
self._max_std = max_std
self._std_hidden_sizes = std_hidden_sizes
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_output_nonlinearity = std_output_nonlinearity
self._std_parameterization = std_parameterization
self._layer_normalization = layer_normalization
self._latent_dim = embedding_spec.output_space.flat_dim
self._input_dim = embedding_spec.input_space.flat_dim
self._network = None
self._f_dist = None
self.model = GaussianMLPModel(
output_dim=self._latent_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
adaptive_std=adaptive_std,
std_share_network=std_share_network,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_hidden_sizes=std_hidden_sizes,
std_hidden_nonlinearity=std_hidden_nonlinearity,
std_output_nonlinearity=std_output_nonlinearity,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization,
name='GaussianMLPModel')
self._initialize()
def _initialize(self):
"""Initialize encoder."""
embedding_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self._input_dim),
name='default_encoder')
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
self._network = self.model.build(embedding_input)
self._f_dist = tf.compat.v1.get_default_session().make_callable(
[
self._network.dist.sample(
seed=deterministic.get_tf_seed_stream()),
self._network.mean, self._network.log_std
],
feed_list=[embedding_input])
def build(self, embedding_input, name=None):
"""Build encoder.
Args:
embedding_input (tf.Tensor) : Embedding input.
name (str): Name of the model, which is also the name scope.
Returns:
tfp.distributions.MultivariateNormalDiag: Distribution.
tf.tensor: Mean.
tf.Tensor: Log of standard deviation.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(embedding_input, name=name)
@property
def spec(self):
"""garage.InOutSpec: Specification of input and output."""
return self._embedding_spec
@property
def input_dim(self):
"""int: Dimension of the encoder input."""
return self._embedding_spec.input_space.flat_dim
@property
def output_dim(self):
"""int: Dimension of the encoder output (embedding)."""
return self._embedding_spec.output_space.flat_dim
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
def get_latent(self, input_value):
"""Get a sample of embedding for the given input.
Args:
input_value (numpy.ndarray): Tensor to encode.
Returns:
numpy.ndarray: An embedding sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (numpy.ndarray): Mean of the distribution.
- log_std (numpy.ndarray): Log standard deviation of the
distribution.
"""
flat_input = self._embedding_spec.input_space.flatten(input_value)
sample, mean, log_std = self._f_dist(np.expand_dims([flat_input], 1))
sample = self._embedding_spec.output_space.unflatten(
np.squeeze(sample, 1)[0])
mean = self._embedding_spec.output_space.unflatten(
np.squeeze(mean, 1)[0])
log_std = self._embedding_spec.output_space.unflatten(
np.squeeze(log_std, 1)[0])
return sample, dict(mean=mean, log_std=log_std)
def get_latents(self, input_values):
"""Get samples of embedding for the given inputs.
Args:
input_values (numpy.ndarray): Tensors to encode.
Returns:
numpy.ndarray: Embeddings sampled from embedding distribution.
dict: Embedding distribution information.
Note:
It returns an embedding and a dict, with keys
- mean (list[numpy.ndarray]): Means of the distribution.
- log_std (list[numpy.ndarray]): Log standard deviations of the
distribution.
"""
flat_input = self._embedding_spec.input_space.flatten_n(input_values)
samples, means, log_stds = self._f_dist(np.expand_dims(flat_input, 1))
samples = self._embedding_spec.output_space.unflatten_n(
np.squeeze(samples, 1))
means = self._embedding_spec.output_space.unflatten_n(
np.squeeze(means, 1))
log_stds = self._embedding_spec.output_space.unflatten_n(
np.squeeze(log_stds, 1))
return samples, dict(mean=means, log_std=log_stds)
@property
def distribution(self):
"""Encoder distribution.
Returns:
tfp.Distribution.MultivariateNormalDiag: Encoder distribution.
"""
return self._network.dist
@property
def input(self):
"""tf.Tensor: Input to encoder network."""
return self._network.input
@property
def latent_mean(self):
"""tf.Tensor: Predicted mean of a Gaussian distribution."""
return self._network.mean
@property
def latent_std_param(self):
"""tf.Tensor: Predicted std of a Gaussian distribution."""
return self._network.log_std
def clone(self, name):
"""Return a clone of the encoder.
It copies the configuration of the primitive and also the parameters.
Args:
name (str): Name of the newly created encoder. It has to be
different from source encoder if cloned under the same
computational graph.
Returns:
garage.tf.embeddings.encoder.Encoder: Newly cloned encoder.
"""
new_encoder = self.__class__(
embedding_spec=self._embedding_spec,
name=name,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
learn_std=self._learn_std,
adaptive_std=self._adaptive_std,
std_share_network=self._std_share_network,
init_std=self._init_std,
min_std=self._min_std,
max_std=self._max_std,
std_hidden_sizes=self._std_hidden_sizes,
std_hidden_nonlinearity=self._std_hidden_nonlinearity,
std_output_nonlinearity=self._std_output_nonlinearity,
std_parameterization=self._std_parameterization,
layer_normalization=self._layer_normalization)
new_encoder.model.parameters = self.model.parameters
return new_encoder
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_f_dist']
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Parameters to restore from snapshot.
Args:
state (dict): Parameters to restore from.
"""
super().__setstate__(state)
self._initialize()
| 13,245 | 38.777778 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/misc/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/misc/tensor_utils.py | """Tensor utility functions for tensorflow."""
from collections import namedtuple
from collections.abc import Iterable
import numpy as np
import tensorflow as tf
# pylint: disable=unused-argument
# yapf: disable
# pylint: disable=missing-return-doc, missing-return-type-doc
def compile_function(inputs, outputs, log_name=None):
"""Compiles a tensorflow function using the current session.
Args:
inputs (list[tf.Tensor]): Inputs to the function. Can be a list of
inputs or just one.
outputs (list[tf.Tensor]): Outputs of the function. Can be a list of
outputs or just one.
log_name (string): Name of the function. This is None by default.
Returns:
function: Compiled tensorflow function.
"""
def run(*input_vals):
sess = tf.compat.v1.get_default_session()
return sess.run(outputs, feed_dict=dict(list(zip(inputs, input_vals))))
return run
# yapf: enable
# pylint: enable=missing-return-doc, missing-return-type-doc
# pylint: enable=unused-argument
def get_target_ops(variables, target_variables, tau=None):
"""Get target variables update operations.
In RL algorithms we often update target network every n
steps. This function returns the tf.Operation for updating
target variables (denoted by target_var) from variables
(denote by var) with fraction tau. In other words, each time
we want to keep tau of the var and add (1 - tau) of target_var
to var.
Args:
variables (list[tf.Variable]): Soure variables for update.
target_variables (list[tf.Variable]): Target variables to
be updated.
tau (float): Fraction to update. Set it to be None for
hard-update.
Returns:
tf.Operation: Operation for updating the target variables.
"""
update_ops = []
init_ops = []
assert len(variables) == len(target_variables)
for var, target_var in zip(variables, target_variables):
init_ops.append(tf.compat.v1.assign(target_var, var))
if tau is not None:
update_ops.append(
tf.compat.v1.assign(target_var,
tau * var + (1.0 - tau) * target_var))
if tau is not None:
return init_ops, update_ops
else:
return init_ops
def flatten_batch(t, name='flatten_batch'):
"""Flatten a batch of observations.
Reshape a tensor of size (X, Y, Z) into (X*Y, Z)
Args:
t (tf.Tensor): Tensor to flatten.
name (string): Name of the operation.
Returns:
tf.Tensor: Flattened tensor.
"""
return tf.reshape(t, [-1] + list(t.shape[2:]), name=name)
def flatten_batch_dict(d, name='flatten_batch_dict'):
"""Flatten a batch of observations represented as a dict.
Args:
d (dict[tf.Tensor]): A dict of Tensors to flatten.
name (string): The name of the operation (None by default).
Returns:
dict[tf.Tensor]: A dict with flattened tensors.
"""
with tf.name_scope(name):
return {k: flatten_batch(v) for k, v in d.items()}
def filter_valids(t, valid, name='filter_valids'):
"""Filter out tensor using valid array.
Args:
t (tf.Tensor): The tensor to filter.
valid (list[float]): Array of length of the valid values (either
0 or 1).
name (string): Name of the operation.
Returns:
tf.Tensor: Filtered Tensor.
"""
# Must round before cast to prevent floating-error
return tf.dynamic_partition(t,
tf.cast(tf.round(valid), tf.int32),
2,
name=name)[1]
def filter_valids_dict(d, valid, name='filter_valids_dict'):
"""Filter valid values on a dict.
Args:
d (dict[tf.Tensor]): Dict of tensors to be filtered.
valid (list[float]): Array of length of the valid values (elements
can be either 0 or 1).
name (string): Name of the operation. None by default.
Returns:
dict[tf.Tensor]: Dict with filtered tensors.
"""
with tf.name_scope(name):
return {k: filter_valids(v, valid) for k, v in d.items()}
def graph_inputs(name, **kwargs):
"""Creates a namedtuple of the given keys and values.
Args:
name (string): Name of the tuple.
kwargs (tf.Tensor): One or more tensor(s) to add to the
namedtuple's values. The parameter names are used as keys
in the namedtuple. Ex. obs1=tensor1, obs2=tensor2.
Returns:
namedtuple: Namedtuple containing the collection of variables
passed.
"""
Singleton = namedtuple(name, kwargs.keys())
return Singleton(**kwargs)
# yapf: disable
# pylint: disable=missing-yield-doc
# pylint: disable=missing-yield-type-doc
def flatten_inputs(deep):
"""Flattens an Iterable recursively.
Args:
deep (Iterable): An Iterable to flatten.
Returns:
List: The flattened result.
"""
def flatten(deep):
for d in deep:
if isinstance(d, Iterable) and not isinstance(
d, (str, bytes, tf.Tensor, np.ndarray)):
yield from flatten(d)
else:
yield d
return list(flatten(deep))
# pylint: enable=missing-yield-doc
# pylint: enable=missing-yield-type-doc
# yapf: enable
def flatten_tensor_variables(ts):
"""Flattens a list of tensors into a single, 1-dimensional tensor.
Args:
ts (Iterable): Iterable containing either tf.Tensors or arrays.
Returns:
tf.Tensor: Flattened Tensor.
"""
return tf.concat(axis=0,
values=[tf.reshape(x, [-1]) for x in ts],
name='flatten_tensor_variables')
def new_tensor(name, ndim, dtype):
"""Creates a placeholder tf.Tensor with the specified arguments.
Args:
name (string): Name of the tf.Tensor.
ndim (int): Number of dimensions of the tf.Tensor.
dtype (type): Data type of the tf.Tensor's contents.
Returns:
tf.Tensor: Placeholder tensor.
"""
return tf.compat.v1.placeholder(dtype=dtype,
shape=[None] * ndim,
name=name)
def new_tensor_like(name, arr_like):
"""Creates a new placeholder tf.Tensor similar to arr_like.
The new tf.Tensor has the same number of dimensions and
dtype as arr_like.
Args:
name (string): Name of the new tf.Tensor.
arr_like (tf.Tensor): Tensor to copy attributes from.
Returns:
tf.Tensor: New placeholder tensor.
"""
return new_tensor(name,
arr_like.get_shape().ndims, arr_like.dtype.base_dtype)
def concat_tensor_list(tensor_list):
"""Concatenates a list of tensors into one tensor.
Args:
tensor_list (list[ndarray]): list of tensors.
Return:
ndarray: Concatenated tensor.
"""
return np.concatenate(tensor_list, axis=0)
def concat_tensor_dict_list(tensor_dict_list):
"""Concatenates a dict of tensors lists.
Each list of tensors gets concatenated into one tensor.
Args:
tensor_dict_list (dict[list[ndarray]]): Dict with lists of tensors.
Returns:
dict[ndarray]: A dict with the concatenated tensors.
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = concat_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = concat_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def stack_tensor_dict_list(tensor_dict_list):
"""Stack a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict_list (dict): a list of dictionaries of {tensors or
dictionary of tensors}.
Returns:
dict: a dictionary of {stacked tensors or dictionary of stacked
tensors}.
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = stack_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = np.array([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def split_tensor_dict_list(tensor_dict):
"""Split a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict (dict): a list of dictionaries of {tensors or
dictionary of tensors}.
Returns:
dict: a dictionary of {split tensors or dictionary of split tensors}.
"""
keys = list(tensor_dict.keys())
ret = None
for k in keys:
vals = tensor_dict[k]
if isinstance(vals, dict):
vals = split_tensor_dict_list(vals)
if ret is None:
ret = [{k: v} for v in vals]
else:
for v, cur_dict in zip(vals, ret):
cur_dict[k] = v
return ret
def pad_tensor(x, max_len):
"""Pad tensors with zeros.
Args:
x (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
return np.concatenate([
x,
np.tile(np.zeros_like(x[0]),
(max_len - len(x), ) + (1, ) * np.ndim(x[0]))
])
def pad_tensor_n(xs, max_len):
"""Pad array of tensors.
Args:
xs (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
def pad_tensor_dict(tensor_dict, max_len):
"""Pad dictionary of tensors with zeros.
Args:
tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
max_len (int): Maximum length.
Returns:
dict[numpy.ndarray]: Padded tensor.
"""
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len)
return ret
def compute_advantages(discount,
gae_lambda,
max_len,
baselines,
rewards,
name='compute_advantages'):
"""Calculate advantages.
Advantages are a discounted cumulative sum.
The discount cumulative sum can be represented as an IIR
filter ob the reversed input vectors, i.e.
y[t] - discount*y[t+1] = x[t], or
rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
Given the time-domain IIR filter step response, we can
calculate the filter response to our signal by convolving the
signal with the filter response function. The time-domain IIR
step response is calculated below as discount_filter:
discount_filter = [1, discount, discount^2, ..., discount^N-1]
where the epsiode length is N.
We convolve discount_filter with the reversed time-domain
signal deltas to calculate the reversed advantages:
rev(advantages) = discount_filter (X) rev(deltas)
TensorFlow's tf.nn.conv1d op is not a true convolution, but
actually a cross-correlation, so its input and output are
already implicitly reversed for us.
advantages = discount_filter (tf.nn.conv1d) deltas
Args:
discount (float): Discount factor.
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_len (int): Maximum length of a single rollout.
baselines (tf.Tensor): A 2D vector of value function estimates with
shape (N, T), where N is the batch dimension (number of episodes)
and T is the maximum path length experienced by the agent.
rewards (tf.Tensor): A 2D vector of per-step rewards with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent.
name (string): Name of the operation.
Returns:
tf.Tensor: A 2D vector of calculated advantage values with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent.
"""
with tf.name_scope(name):
# Prepare convolutional IIR filter to calculate advantages
gamma_lambda = tf.constant(float(discount) * float(gae_lambda),
dtype=tf.float32,
shape=[max_len, 1, 1])
advantage_filter = tf.compat.v1.cumprod(gamma_lambda, exclusive=True)
# Calculate deltas
pad = tf.zeros_like(baselines[:, :1])
baseline_shift = tf.concat([baselines[:, 1:], pad], 1)
deltas = rewards + discount * baseline_shift - baselines
# Convolve deltas with the discount filter to get advantages
deltas_pad = tf.expand_dims(tf.concat(
[deltas, tf.zeros_like(deltas[:, :-1])], axis=1),
axis=2)
adv = tf.nn.conv1d(deltas_pad,
advantage_filter,
stride=1,
padding='VALID')
advantages = tf.reshape(adv, [-1])
return advantages
def center_advs(advs, axes, eps, offset=0, scale=1, name='center_adv'):
"""Normalize the advs tensor.
This calculates the mean and variance using the axes specified
and normalizes the tensor using those values.
Args:
advs (tf.Tensor): Tensor to normalize.
axes (array[int]): Axes along which to compute the mean and variance.
eps (float): Small number to avoid dividing by zero.
offset (tf.Tensor): Offset added to the normalized tensor.
This is zero by default.
scale (tf.Tensor): Scale to apply to the normalized tensor. This is
1 by default but can also be None.
name (string): Name of the operation. None by default.
Returns:
tf.Tensor: Normalized, scaled and offset tensor.
"""
with tf.name_scope(name):
mean, var = tf.nn.moments(advs, axes=axes)
advs = tf.nn.batch_normalization(advs, mean, var, offset, scale, eps)
return advs
def positive_advs(advs, eps, name='positive_adv'):
"""Make all the values in the advs tensor positive.
Offsets all values in advs by the minimum value in
the tensor, plus an epsilon value to avoid dividing by zero.
Args:
advs (tf.Tensor): The tensor to offset.
eps (tf.float32): A small value to avoid by-zero division.
name (string): Name of the operation.
Returns:
tf.Tensor: Tensor with modified (postiive) values.
"""
with tf.name_scope(name):
m = tf.reduce_min(advs)
advs = (advs - m) + eps
return advs
def discounted_returns(discount, max_len, rewards, name='discounted_returns'):
"""Calculate discounted returns.
Args:
discount (float): Discount factor.
max_len (int): Maximum length of a single rollout.
rewards (tf.Tensor): A 2D vector of per-step rewards with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent.
name (string): Name of the operation. None by default.
Returns:
tf.Tensor: Tensor of discounted returns.
"""
with tf.name_scope(name):
gamma = tf.constant(float(discount),
dtype=tf.float32,
shape=[max_len, 1, 1])
return_filter = tf.math.cumprod(gamma, exclusive=True)
rewards_pad = tf.expand_dims(tf.concat(
[rewards, tf.zeros_like(rewards[:, :-1])], axis=1),
axis=2)
returns = tf.nn.conv1d(rewards_pad,
return_filter,
stride=1,
padding='VALID')
return returns
| 16,285 | 30.684825 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/__init__.py | """Network Models."""
from garage.tf.models.categorical_cnn_model import CategoricalCNNModel
from garage.tf.models.categorical_gru_model import CategoricalGRUModel
from garage.tf.models.categorical_lstm_model import CategoricalLSTMModel
from garage.tf.models.categorical_mlp_model import CategoricalMLPModel
from garage.tf.models.cnn_mlp_merge_model import CNNMLPMergeModel
from garage.tf.models.cnn_model import CNNModel
from garage.tf.models.cnn_model_max_pooling import CNNModelWithMaxPooling
from garage.tf.models.gaussian_cnn_model import GaussianCNNModel
from garage.tf.models.gaussian_gru_model import GaussianGRUModel
from garage.tf.models.gaussian_lstm_model import GaussianLSTMModel
from garage.tf.models.gaussian_mlp_model import GaussianMLPModel
from garage.tf.models.gru_model import GRUModel
from garage.tf.models.lstm_model import LSTMModel
from garage.tf.models.mlp_dueling_model import MLPDuelingModel
from garage.tf.models.mlp_merge_model import MLPMergeModel
from garage.tf.models.mlp_model import MLPModel
from garage.tf.models.model import BaseModel, Model
from garage.tf.models.module import Module, StochasticModule
from garage.tf.models.normalized_input_mlp_model import (
NormalizedInputMLPModel)
from garage.tf.models.sequential import Sequential
__all__ = [
'BaseModel', 'CategoricalCNNModel', 'CategoricalGRUModel',
'CategoricalLSTMModel', 'CategoricalMLPModel', 'CNNMLPMergeModel',
'CNNModel', 'CNNModelWithMaxPooling', 'LSTMModel', 'Model', 'Module',
'GaussianCNNModel', 'GaussianGRUModel', 'GaussianLSTMModel',
'GaussianMLPModel', 'GRUModel', 'MLPDuelingModel', 'MLPMergeModel',
'MLPModel', 'NormalizedInputMLPModel', 'Sequential', 'StochasticModule'
]
| 1,713 | 52.5625 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/categorical_cnn_model.py | """Categorical CNN Model.
A model represented by a Categorical distribution
which is parameterized by a convolutional neural network (CNN)
followed a multilayer perceptron (MLP).
"""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.categorical_mlp_model import CategoricalMLPModel
from garage.tf.models.cnn_model import CNNModel
from garage.tf.models.model import Model
class CategoricalCNNModel(Model):
"""Categorical CNN Model.
A model represented by a Categorical distribution
which is parameterized by a convolutional neural network (CNN) followed
by a multilayer perceptron (MLP).
Args:
output_dim (int): Dimension of the network output.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
name (str): Model name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
filters,
strides,
padding,
name=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name)
self._cnn_model = CNNModel(filters=filters,
strides=strides,
padding=padding,
hidden_nonlinearity=hidden_nonlinearity,
name='CNNModel')
self._mlp_model = CategoricalMLPModel(
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization,
name='MLPModel')
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return self._mlp_model.network_output_spec()
# pylint: disable=arguments-differ
def _build(self, state_input, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Observation inputs.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
"""
time_dim = tf.shape(state_input)[1]
dim = state_input.get_shape()[2:].as_list()
state_input = tf.reshape(state_input, [-1, *dim])
cnn_output = self._cnn_model.build(state_input, name=name).outputs
dim = cnn_output.get_shape()[-1]
cnn_output = tf.reshape(cnn_output, [-1, time_dim, dim])
mlp_output = self._mlp_model.build(cnn_output, name=name).dist
return mlp_output
| 5,373 | 41.992 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/categorical_gru_model.py | """Categorical GRU Model.
A model represented by a Categorical distribution
which is parameterized by a Gated Recurrent Unit (GRU).
"""
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models.gru_model import GRUModel
class CategoricalGRUModel(GRUModel):
"""Categorical GRU Model.
A model represented by a Categorical distribution
which is parameterized by a Gated Recurrent Unit (GRU).
Args:
output_dim (int): Dimension of the network output.
hidden_dim (int): Hidden dimension for GRU cell.
name (str): Policy name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
hidden_dim,
name=None,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
layer_normalization=False):
super().__init__(
output_dim=output_dim,
hidden_dim=hidden_dim,
name=name,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
recurrent_nonlinearity=recurrent_nonlinearity,
recurrent_w_init=recurrent_w_init,
output_nonlinearity=tf.nn.softmax,
output_w_init=output_w_init,
output_b_init=output_b_init,
hidden_state_init=hidden_state_init,
hidden_state_init_trainable=hidden_state_init_trainable,
layer_normalization=layer_normalization)
self._output_normalization_fn = output_nonlinearity
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return ['dist', 'step_output', 'step_hidden', 'init_hidden']
# pylint: disable=arguments-differ
def _build(self, state_input, step_input, step_hidden, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Full observation input, with shape
:math:`(N, T, S^*)`.
step_input (tf.Tensor): Step observation input, with shape
:math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state , used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`.
"""
outputs, step_output, step_hidden, init_hidden = super()._build(
state_input, step_input, step_hidden, name=name)
if self._output_normalization_fn:
outputs = self._output_normalization_fn(outputs)
dist = tfp.distributions.OneHotCategorical(probs=outputs)
return dist, step_output, step_hidden, init_hidden
| 5,604 | 42.789063 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/categorical_lstm_model.py | """Categorical LSTM Model.
A model represented by a Categorical distribution
which is parameterized by a Long short-term memory (LSTM).
"""
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models.lstm_model import LSTMModel
class CategoricalLSTMModel(LSTMModel):
"""Categorical LSTM Model.
A model represented by a Categorical distribution
which is parameterized by a Long short-term memory (LSTM).
Args:
output_dim (int): Dimension of the network output.
hidden_dim (int): Hidden dimension for LSTM cell.
name (str): Policy name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
forget_bias (bool): If True, add 1 to the bias of the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
hidden_dim,
name=None,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
cell_state_init=tf.zeros_initializer(),
cell_state_init_trainable=False,
forget_bias=True,
layer_normalization=False):
super().__init__(
output_dim=output_dim,
hidden_dim=hidden_dim,
name=name,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
recurrent_nonlinearity=recurrent_nonlinearity,
recurrent_w_init=recurrent_w_init,
output_nonlinearity=tf.nn.softmax,
output_w_init=output_w_init,
output_b_init=output_b_init,
hidden_state_init=hidden_state_init,
hidden_state_init_trainable=hidden_state_init_trainable,
cell_state_init=cell_state_init,
cell_state_init_trainable=cell_state_init_trainable,
forget_bias=forget_bias,
layer_normalization=layer_normalization)
self._output_normalization_fn = output_nonlinearity
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'dist', 'step_output', 'step_hidden', 'step_cell', 'init_hidden',
'init_cell'
]
# pylint: disable=arguments-differ
def _build(self,
state_input,
step_input,
step_hidden,
step_cell,
name=None):
"""Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
step_cell (tf.Tensor): Cell state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
tf.Tensor: Step output, with shape :math:`(N, S^*)`
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`
tf.Tensor: Initial hidden state, used to reset the hidden state
when policy resets. Shape: :math:`(S^*)`
tf.Tensor: Initial cell state, used to reset the cell state
when policy resets. Shape: :math:`(S^*)`
"""
(outputs, step_output, step_hidden, step_cell, init_hidden,
init_cell) = super()._build(state_input,
step_input,
step_hidden,
step_cell,
name=name)
if self._output_normalization_fn:
outputs = self._output_normalization_fn(outputs)
dist = tfp.distributions.OneHotCategorical(probs=outputs)
return (dist, step_output, step_hidden, step_cell, init_hidden,
init_cell)
| 7,030 | 43.220126 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/categorical_mlp_model.py | """Categorical MLP Model.
A model represented by a Categorical distribution
which is parameterized by a multilayer perceptron (MLP).
"""
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models.mlp_model import MLPModel
class CategoricalMLPModel(MLPModel):
"""Categorical MLP Model.
A model represented by a Categorical distribution
which is parameterized by a multilayer perceptron (MLP).
Args:
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
name (str): Model name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
name=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(output_dim, name, hidden_sizes, hidden_nonlinearity,
hidden_w_init, hidden_b_init, tf.nn.softmax,
output_w_init, output_b_init, layer_normalization)
self._output_normalization_fn = output_nonlinearity
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return ['dist']
def _build(self, state_input, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Observation inputs.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.OneHotCategorical: Policy distribution.
"""
prob = super()._build(state_input, name=name)
if self._output_normalization_fn:
prob = self._output_normalization_fn(prob)
return tfp.distributions.OneHotCategorical(probs=prob)
| 3,627 | 38.868132 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/cnn.py | """CNN in TensorFlow."""
import tensorflow as tf
from garage.experiment import deterministic
def cnn(input_var,
filters,
strides,
name,
padding,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer()):
"""Convolutional neural network (CNN).
Note:
Based on 'NHWC' data format: [batch, height, width, channel].
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Network name, also the variable scope.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
Return:
tf.Tensor: The output tf.Tensor of the CNN.
"""
with tf.compat.v1.variable_scope(name):
h = input_var
for index, (filter_iter, stride) in enumerate(zip(filters, strides)):
_stride = [1, stride, stride, 1]
h = _conv(h, 'h{}'.format(index), filter_iter[1], filter_iter[0],
_stride, hidden_w_init, hidden_b_init, padding)
if hidden_nonlinearity is not None:
h = hidden_nonlinearity(h)
# flatten
dim = tf.reduce_prod(h.get_shape()[1:].as_list())
return tf.reshape(h, [-1, dim])
def cnn_with_max_pooling(input_var,
filters,
strides,
name,
pool_shapes,
pool_strides,
padding,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer()):
"""Convolutional neural network (CNN) with max-pooling.
Note:
Based on 'NHWC' data format: [batch, height, width, channel].
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Model name, also the variable scope of the cnn.
pool_shapes (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
pool_strides (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
Return:
tf.Tensor: The output tf.Tensor of the CNN.
"""
pool_strides = [1, pool_strides[0], pool_strides[1], 1]
pool_shapes = [1, pool_shapes[0], pool_shapes[1], 1]
with tf.compat.v1.variable_scope(name):
h = input_var
for index, (filter_iter, stride) in enumerate(zip(filters, strides)):
_stride = [1, stride, stride, 1]
h = _conv(h, 'h{}'.format(index), filter_iter[1], filter_iter[0],
_stride, hidden_w_init, hidden_b_init, padding)
if hidden_nonlinearity is not None:
h = hidden_nonlinearity(h)
h = tf.nn.max_pool2d(h,
ksize=pool_shapes,
strides=pool_strides,
padding=padding)
# flatten
dim = tf.reduce_prod(h.get_shape()[1:].as_list())
return tf.reshape(h, [-1, dim])
def _conv(input_var, name, filter_size, num_filter, strides, hidden_w_init,
hidden_b_init, padding):
"""Helper function for performing convolution.
Args:
input_var (tf.Tensor): Input tf.Tensor to the CNN.
name (str): Variable scope of the convolution Op.
filter_size (tuple[int]): Dimension of the filter. For example,
(3, 5) means the dimension of the filter is (3 x 5).
num_filter (int): Number of filters. For example, (3, 32) means
there are two convolutional layers. The filter for the first layer
has 3 channels and the second one with 32 channels.
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
Return:
tf.Tensor: The output of the convolution.
"""
# channel from input
input_shape = input_var.get_shape()[-1]
# [filter_height, filter_width, in_channels, out_channels]
w_shape = [filter_size[0], filter_size[1], input_shape, num_filter]
b_shape = [1, 1, 1, num_filter]
with tf.compat.v1.variable_scope(name):
weight = tf.compat.v1.get_variable('weight',
w_shape,
initializer=hidden_w_init)
bias = tf.compat.v1.get_variable('bias',
b_shape,
initializer=hidden_b_init)
return tf.nn.conv2d(
input_var, weight, strides=strides, padding=padding) + bias
| 7,874 | 43.491525 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/cnn_mlp_merge_model.py | """CNN and MLP Merge Model."""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.cnn_model import CNNModel
from garage.tf.models.cnn_model_max_pooling import CNNModelWithMaxPooling
from garage.tf.models.mlp_merge_model import MLPMergeModel
from garage.tf.models.model import Model
class CNNMLPMergeModel(Model):
"""Convolutional neural network followed by a Multilayer Perceptron.
Combination of a CNN Model (optionally with max pooling) and an
MLP Merge model. The CNN accepts the state as an input, while
the MLP accepts the CNN's output and the action as inputs.
Args:
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
hidden_sizes (tuple[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this q-function consists of
two hidden layers, each with 32 hidden units.
output_dim (int): Dimension of the network output.
action_merge_layer (int): The index of layers at which to concatenate
action inputs with the network. The indexing works like standard
python list indexing. Index of 0 refers to the input layer
(observation input) while an index of -1 points to the last
hidden layer. Default parameter points to second layer from the
end.
name (str): Model name, also the variable scope.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
max_pooling (bool): Boolean for using max pooling layer or not.
pool_shapes (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
pool_strides (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
cnn_hidden_nonlinearity (callable): Activation function for
intermediate dense layer(s) in the CNN. It should return a
tf.Tensor. Set it to None to maintain a linear activation.
cnn_hidden_w_init (callable): Initializer function for the weight of
intermediate dense layer(s) in the CNN. Function should return a
tf.Tensor.
cnn_hidden_b_init (callable): Initializer function for the bias of
intermediate dense layer(s) in the CNN. Function should return a
tf.Tensor.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s) in the MLP. It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the MLP. The function should
return a tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the MLP. The function should
return a tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer in the MLP. It should return a tf.Tensor. Set it to None
to maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the MLP. The function should return
a tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s) in the MLP. The function should return
a tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
filters,
strides,
hidden_sizes=(256, ),
output_dim=1,
action_merge_layer=-2,
name=None,
padding='SAME',
max_pooling=False,
pool_strides=(2, 2),
pool_shapes=(2, 2),
cnn_hidden_nonlinearity=tf.nn.relu,
cnn_hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
cnn_hidden_b_init=tf.zeros_initializer(),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name)
if not max_pooling:
self.cnn_model = CNNModel(
filters=filters,
hidden_w_init=cnn_hidden_w_init,
hidden_b_init=cnn_hidden_b_init,
strides=strides,
padding=padding,
hidden_nonlinearity=cnn_hidden_nonlinearity)
else:
self.cnn_model = CNNModelWithMaxPooling(
filters=filters,
hidden_w_init=cnn_hidden_w_init,
hidden_b_init=cnn_hidden_b_init,
strides=strides,
padding=padding,
pool_strides=pool_strides,
pool_shapes=pool_shapes,
hidden_nonlinearity=cnn_hidden_nonlinearity)
self.mlp_merge_model = MLPMergeModel(
output_dim=output_dim,
hidden_sizes=hidden_sizes,
concat_layer=action_merge_layer,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network inputs.
"""
return ['state', 'action']
# pylint: disable=arguments-differ
def _build(self, state, action, name=None):
"""Build the model and return the outputs.
This builds the model such that the output of the CNN is fed
to the MLP. The CNN receives the state as the input. The MLP
receives two inputs, the output of the CNN and the action
tensor.
Args:
state (tf.Tensor): State placeholder tensor of shape
:math:`(N, O*)`.
action (tf.Tensor): Action placeholder tensor of shape
:math:`(N, A*)`.
name (str): Name of the model.
Returns:
tf.Tensor: Output of the model of shape (N, output_dim).
"""
cnn_out = self.cnn_model.build(state, name=name).outputs
mlp_out = self.mlp_merge_model.build(cnn_out, action,
name=name).outputs
return mlp_out
| 7,692 | 44.791667 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/cnn_model.py | """CNN Model."""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.cnn import cnn
from garage.tf.models.model import Model
class CNNModel(Model):
"""CNN Model.
Args:
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Model name, also the variable scope.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
"""
def __init__(self,
filters,
strides,
padding,
name=None,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer()):
super().__init__(name)
self._filters = filters
self._strides = strides
self._padding = padding
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
# pylint: disable=arguments-differ
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return cnn(input_var=state_input,
filters=self._filters,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
strides=self._strides,
padding=self._padding,
name='cnn')
| 3,003 | 38.526316 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/cnn_model_max_pooling.py | """CNN Model."""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.cnn import cnn_with_max_pooling
from garage.tf.models.model import Model
class CNNModelWithMaxPooling(Model):
"""CNN Model with max pooling.
Args:
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Model name, also the variable scope of the cnn.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
pool_strides (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
pool_shapes (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
"""
def __init__(self,
filters,
strides,
name=None,
padding='SAME',
pool_strides=(2, 2),
pool_shapes=(2, 2),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer()):
super().__init__(name)
self._filters = filters
self._strides = strides
self._padding = padding
self._pool_strides = pool_strides
self._pool_shapes = pool_shapes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
# pylint: disable=arguments-differ
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
return cnn_with_max_pooling(
input_var=state_input,
filters=self._filters,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
strides=self._strides,
padding=self._padding,
pool_shapes=self._pool_shapes,
pool_strides=self._pool_strides,
name='cnn')
| 3,633 | 39.831461 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/gaussian_cnn_model.py | """GaussianCNNModel."""
import numpy as np
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.distributions import DiagonalGaussian
from garage.tf.models.cnn import cnn
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
from garage.tf.models.parameter import parameter
class GaussianCNNModel(Model):
"""GaussianCNNModel.
Args:
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Convolutional model for mean. For example, (32, 32) means the
network consists of two dense layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and
dimension of filters. For example, ((3, (3, 5)), (32, (3, 3)))
means there are two convolutional layers. The filter for the first
layer have 3 channels and its shape is (3 x 5), while the filter
for the second layer have 32 channels and its shape is (3 x 3).
std_strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
std_padding (str): The type of padding algorithm to use in std network,
either 'SAME' or 'VALID'.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Conv for std. For example, (32, 32) means the Conv consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the std network.
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the std network.
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
filters,
strides,
padding,
hidden_sizes,
name=None,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_filters=(),
std_strides=(),
std_padding='SAME',
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_hidden_b_init=tf.zeros_initializer(),
std_output_nonlinearity=None,
std_output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_parameterization='exp',
layer_normalization=False):
# Network parameters
super().__init__(name)
self._output_dim = output_dim
self._filters = filters
self._strides = strides
self._padding = padding
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._learn_std = learn_std
self._adaptive_std = adaptive_std
self._std_share_network = std_share_network
self._init_std = init_std
self._min_std = min_std
self._max_std = max_std
self._std_filters = std_filters
self._std_strides = std_strides
self._std_padding = std_padding
self._std_hidden_sizes = std_hidden_sizes
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_hidden_w_init = std_hidden_w_init
self._std_hidden_b_init = std_hidden_b_init
self._std_output_nonlinearity = std_output_nonlinearity
self._std_output_w_init = std_output_w_init
self._std_parameterization = std_parameterization
self._layer_normalization = layer_normalization
# Tranform std arguments to parameterized space
self._init_std_param = None
self._min_std_param = None
self._max_std_param = None
if self._std_parameterization == 'exp':
self._init_std_param = np.log(init_std)
if min_std is not None:
self._min_std_param = np.log(min_std)
if max_std is not None:
self._max_std_param = np.log(max_std)
elif self._std_parameterization == 'softplus':
self._init_std_param = np.log(np.exp(init_std) - 1)
if min_std is not None:
self._min_std_param = np.log(np.exp(min_std) - 1)
if max_std is not None:
self._max_std_param = np.log(np.exp(max_std) - 1)
else:
raise NotImplementedError
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['sample', 'mean', 'log_std', 'std_param', 'dist']
# pylint: disable=arguments-differ
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Sampled action.
tf.Tensor: Mean.
tf.Tensor: Parameterized log_std.
tf.Tensor: log_std.
garage.tf.distributions.DiagonalGaussian: Policy distribution.
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an CNN
b = np.concatenate([
np.zeros(action_dim),
np.full(action_dim, self._init_std_param)
], axis=0) # yapf: disable
mean_std_conv = cnn(
input_var=state_input,
filters=self._filters,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
strides=self._strides,
padding=self._padding,
name='mean_std_cnn')
mean_std_network = mlp(
mean_std_conv,
output_dim=action_dim * 2,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=tf.constant_initializer(b),
name='mean_std_network',
layer_normalization=self._layer_normalization)
with tf.compat.v1.variable_scope('mean_network'):
mean_network = mean_std_network[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_network = mean_std_network[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
mean_conv = cnn(input_var=state_input,
filters=self._filters,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
strides=self._strides,
padding=self._padding,
name='mean_cnn')
mean_network = mlp(
mean_conv,
output_dim=action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
name='mean_network',
layer_normalization=self._layer_normalization)
# std network
if self._adaptive_std:
log_std_conv = cnn(
input_var=state_input,
filters=self._std_filters,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
strides=self._std_strides,
padding=self._std_padding,
name='log_std_cnn')
log_std_network = mlp(
log_std_conv,
output_dim=action_dim,
hidden_sizes=self._std_hidden_sizes,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
output_nonlinearity=self._std_output_nonlinearity,
output_w_init=self._std_output_w_init,
output_b_init=tf.constant_initializer(
self._init_std_param),
name='log_std_network',
layer_normalization=self._layer_normalization)
else:
log_std_network = parameter(
input_var=state_input,
length=action_dim,
initializer=tf.constant_initializer(
self._init_std_param),
trainable=self._learn_std,
name='log_std_network')
mean_var = mean_network
std_param = log_std_network
with tf.compat.v1.variable_scope('std_limits'):
if self._min_std_param is not None:
std_param = tf.maximum(std_param, self._min_std_param)
if self._max_std_param is not None:
std_param = tf.minimum(std_param, self._max_std_param)
with tf.compat.v1.variable_scope('std_parameterization'):
# build std_var with std parameterization
if self._std_parameterization == 'exp':
log_std_var = std_param
else: # we know it must be softplus here
log_std_var = tf.math.log(tf.math.log(1. + tf.exp(std_param)))
dist = DiagonalGaussian(self._output_dim)
rnd = tf.random.normal(shape=mean_var.get_shape().as_list()[1:],
seed=deterministic.get_tf_seed_stream())
action_var = rnd * tf.exp(log_std_var) + mean_var
return action_var, mean_var, log_std_var, std_param, dist
| 15,250 | 46.363354 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/gaussian_gru_model.py | """Gaussian GRU Model.
A model represented by a Gaussian distribution
which is parameterized by a Gated Recurrent Unit (GRU).
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models.gru import gru
from garage.tf.models.model import Model
from garage.tf.models.parameter import recurrent_parameter
class GaussianGRUModel(Model):
"""Gaussian GRU Model.
A model represented by a Gaussian distribution
which is parameterized by a Gated Recurrent Unit (GRU).
Args:
output_dim (int): Output dimension of the model.
hidden_dim (int): Hidden dimension for GRU cell for mean.
name (str): Model name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
std_share_network (bool): Boolean for whether mean and std share
the same network.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
hidden_dim=32,
name='GaussianGRUModel',
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
learn_std=True,
init_std=1.0,
std_share_network=False,
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._layer_normalization = layer_normalization
self._learn_std = learn_std
self._std_share_network = std_share_network
# pylint: disable=assignment-from-no-return
self._init_std_param = np.log(init_std)
self._initialize()
def _initialize(self):
action_dim = self._output_dim
self._mean_std_gru_cell = tf.keras.layers.GRUCell(
units=self._hidden_dim,
activation=self._hidden_nonlinearity,
kernel_initializer=self._hidden_w_init,
bias_initializer=self._hidden_b_init,
recurrent_activation=self._recurrent_nonlinearity,
recurrent_initializer=self._recurrent_w_init,
name='mean_std_gru_layer')
self._mean_gru_cell = tf.keras.layers.GRUCell(
units=self._hidden_dim,
activation=self._hidden_nonlinearity,
kernel_initializer=self._hidden_w_init,
bias_initializer=self._hidden_b_init,
recurrent_activation=self._recurrent_nonlinearity,
recurrent_initializer=self._recurrent_w_init,
name='mean_gru_layer')
self._mean_std_output_nonlinearity_layer = tf.keras.layers.Dense(
units=action_dim * 2,
activation=self._output_nonlinearity,
kernel_initializer=self._output_w_init,
bias_initializer=self._output_b_init,
name='mean_std_output_layer')
self._mean_output_nonlinearity_layer = tf.keras.layers.Dense(
units=action_dim,
activation=self._output_nonlinearity,
kernel_initializer=self._output_w_init,
bias_initializer=self._output_b_init,
name='mean_output_layer')
def network_input_spec(self):
"""Network input spec.
Returns:
list[str]: Name of the model inputs, in order.
"""
return ['full_input', 'step_input', 'step_hidden_input']
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'dist', 'step_mean', 'step_log_std', 'step_hidden', 'init_hidden'
]
# pylint: disable=arguments-differ
def _build(self, state_input, step_input, step_hidden, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an MLP
(outputs, step_outputs, step_hidden, hidden_init_var) = gru(
name='mean_std_network',
gru_cell=self._mean_std_gru_cell,
all_input_var=state_input,
step_input_var=step_input,
step_hidden_var=step_hidden,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self.
_hidden_state_init_trainable,
output_nonlinearity_layer=self.
_mean_std_output_nonlinearity_layer)
with tf.compat.v1.variable_scope('mean_network'):
mean_var = outputs[..., :action_dim]
step_mean_var = step_outputs[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_var = outputs[..., action_dim:]
step_log_std_var = step_outputs[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
(mean_var, step_mean_var, step_hidden, hidden_init_var) = gru(
name='mean_network',
gru_cell=self._mean_gru_cell,
all_input_var=state_input,
step_input_var=step_input,
step_hidden_var=step_hidden,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self.
_hidden_state_init_trainable,
output_nonlinearity_layer=self.
_mean_output_nonlinearity_layer)
log_std_var, step_log_std_var = recurrent_parameter(
input_var=state_input,
step_input_var=step_input,
length=action_dim,
initializer=tf.constant_initializer(self._init_std_param),
trainable=self._learn_std,
name='log_std_param')
dist = tfp.distributions.MultivariateNormalDiag(
loc=mean_var, scale_diag=tf.exp(log_std_var))
return (dist, step_mean_var, step_log_std_var, step_hidden,
hidden_init_var)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_mean_std_gru_cell']
del new_dict['_mean_gru_cell']
del new_dict['_mean_std_output_nonlinearity_layer']
del new_dict['_mean_output_nonlinearity_layer']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 10,676 | 41.369048 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/gaussian_lstm_model.py | """Gaussian LSTM Model.
A model represented by a Gaussian distribution
which is parameterized by a Long short-term memory (LSTM).
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models.lstm import lstm
from garage.tf.models.model import Model
from garage.tf.models.parameter import recurrent_parameter
class GaussianLSTMModel(Model):
"""Gaussian LSTM Model.
A model represented by a Gaussian distribution
which is parameterized by a Long short-term memory (LSTM).
Args:
output_dim (int): Output dimension of the model.
hidden_dim (int): Hidden dimension for LSTM cell for mean.
name (str): Model name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
forget_bias (bool): If True, add 1 to the bias of the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
std_share_network (bool): Boolean for whether mean and std share
the same network.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
hidden_dim=32,
name=None,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
cell_state_init=tf.zeros_initializer(),
cell_state_init_trainable=False,
forget_bias=True,
learn_std=True,
init_std=1.0,
std_share_network=False,
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._cell_state_init = cell_state_init
self._cell_state_init_trainable = cell_state_init_trainable
self._forget_bias = forget_bias
self._layer_normalization = layer_normalization
self._learn_std = learn_std
self._std_share_network = std_share_network
# pylint: disable=assignment-from-no-return
self._init_std_param = np.log(init_std)
self._initialize()
def _initialize(self):
action_dim = self._output_dim
self._mean_std_lstm_cell = tf.keras.layers.LSTMCell(
units=self._hidden_dim,
activation=self._hidden_nonlinearity,
kernel_initializer=self._hidden_w_init,
bias_initializer=self._hidden_b_init,
recurrent_activation=self._recurrent_nonlinearity,
recurrent_initializer=self._recurrent_w_init,
unit_forget_bias=self._forget_bias,
name='mean_std_lstm_layer')
self._mean_lstm_cell = tf.keras.layers.LSTMCell(
units=self._hidden_dim,
activation=self._hidden_nonlinearity,
kernel_initializer=self._hidden_w_init,
bias_initializer=self._hidden_b_init,
recurrent_activation=self._recurrent_nonlinearity,
recurrent_initializer=self._recurrent_w_init,
unit_forget_bias=self._forget_bias,
name='mean_lstm_layer')
self._mean_std_output_nonlinearity_layer = tf.keras.layers.Dense(
units=action_dim * 2,
activation=self._output_nonlinearity,
kernel_initializer=self._output_w_init,
bias_initializer=self._output_b_init,
name='mean_std_output_layer')
self._mean_output_nonlinearity_layer = tf.keras.layers.Dense(
units=action_dim,
activation=self._output_nonlinearity,
kernel_initializer=self._output_w_init,
bias_initializer=self._output_b_init,
name='mean_output_layer')
def network_input_spec(self):
"""Network input spec.
Returns:
list[str]: Name of the model inputs, in order.
"""
return [
'full_input', 'step_input', 'step_hidden_input', 'step_cell_input'
]
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'dist', 'step_mean', 'step_log_std', 'step_hidden', 'step_cell',
'init_hidden', 'init_cell'
]
# pylint: disable=arguments-differ
def _build(self,
state_input,
step_input,
step_hidden,
step_cell,
name=None):
"""Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input,
with shape :math:`(N, T, S^*)`.
step_input (tf.Tensor): Single timestep observation input,
with shape :math:`(N, S^*)`.
step_hidden (tf.Tensor): Hidden state for step, with shape
:math:`(N, S^*)`.
step_cell (tf.Tensor): Cell state for step, with shape
:math:`(N, S^*)`.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Policy distribution.
tf.Tensor: Step means, with shape :math:`(N, S^*)`.
tf.Tensor: Step log std, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, S^*)`.
tf.Tensor: Step cell state, with shape :math:`(N, S^*)`.
tf.Tensor: Initial hidden state, with shape :math:`(S^*)`.
tf.Tensor: Initial cell state, with shape :math:`(S^*)`
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an MLP
(outputs, step_outputs, step_hidden, step_cell,
hidden_init_var, cell_init_var) = lstm(
name='mean_std_network',
lstm_cell=self._mean_std_lstm_cell,
all_input_var=state_input,
step_input_var=step_input,
step_hidden_var=step_hidden,
step_cell_var=step_cell,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self.
_hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
output_nonlinearity_layer=self.
_mean_std_output_nonlinearity_layer)
with tf.compat.v1.variable_scope('mean_network'):
mean_var = outputs[..., :action_dim]
step_mean_var = step_outputs[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_var = outputs[..., action_dim:]
step_log_std_var = step_outputs[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
(mean_var, step_mean_var, step_hidden, step_cell,
hidden_init_var, cell_init_var) = lstm(
name='mean_network',
lstm_cell=self._mean_lstm_cell,
all_input_var=state_input,
step_input_var=step_input,
step_hidden_var=step_hidden,
step_cell_var=step_cell,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self.
_hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
output_nonlinearity_layer=self.
_mean_output_nonlinearity_layer)
log_std_var, step_log_std_var = recurrent_parameter(
input_var=state_input,
step_input_var=step_input,
length=action_dim,
initializer=tf.constant_initializer(self._init_std_param),
trainable=self._learn_std,
name='log_std_param')
dist = tfp.distributions.MultivariateNormalDiag(
loc=mean_var, scale_diag=tf.exp(log_std_var))
return (dist, step_mean_var, step_log_std_var, step_hidden, step_cell,
hidden_init_var, cell_init_var)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_mean_std_lstm_cell']
del new_dict['_mean_lstm_cell']
del new_dict['_mean_std_output_nonlinearity_layer']
del new_dict['_mean_output_nonlinearity_layer']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 12,429 | 42.310105 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/gaussian_mlp_model.py | """Gaussian MLP Model.
A model represented by a Gaussian distribution
which is parameterized by a multilayer perceptron (MLP).
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from garage.experiment import deterministic
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
from garage.tf.models.parameter import parameter
class GaussianMLPModel(Model):
"""Gaussian MLP Model.
A model represented by a Gaussian distribution
which is parameterized by a multilayer perceptron (MLP).
Args:
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the std network. The function
should return a tf.Tensor.
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the std network. The function
should return a tf.Tensor.
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
name=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_hidden_b_init=tf.zeros_initializer(),
std_output_nonlinearity=None,
std_output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
std_parameterization='exp',
layer_normalization=False):
# Network parameters
super().__init__(name)
self._hidden_sizes = hidden_sizes
self._output_dim = output_dim
self._learn_std = learn_std
self._adaptive_std = adaptive_std
self._std_share_network = std_share_network
self._std_hidden_sizes = std_hidden_sizes
self._init_std = init_std
self._min_std = min_std
self._max_std = max_std
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_hidden_w_init = std_hidden_w_init
self._std_hidden_b_init = std_hidden_b_init
self._std_output_nonlinearity = std_output_nonlinearity
self._std_output_w_init = std_output_w_init
self._std_parameterization = std_parameterization
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
# Tranform std arguments to parameterized space
self._init_std_param = None
self._min_std_param = None
self._max_std_param = None
# pylint: disable=assignment-from-no-return
if self._std_parameterization == 'exp':
self._init_std_param = np.log(init_std)
if min_std is not None:
self._min_std_param = np.log(min_std)
if max_std is not None:
self._max_std_param = np.log(max_std)
elif self._std_parameterization == 'softplus':
self._init_std_param = np.log(np.exp(init_std) - 1)
if min_std is not None:
self._min_std_param = np.log(np.exp(min_std) - 1)
if max_std is not None:
self._max_std_param = np.log(np.exp(max_std) - 1)
else:
raise ValueError("std parameterization should be or 'exp' or "
"'softplus' but got {}".format(
self._std_parameterization))
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['dist', 'mean', 'log_std']
# pylint: disable=arguments-differ
def _build(self, state_input, name=None):
"""Build model.
Args:
state_input (tf.Tensor): Entire time-series observation input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Returns:
tfp.distributions.MultivariateNormalDiag: Distribution.
tf.tensor: Mean.
tf.Tensor: Log of standard deviation.
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an MLP
b = np.concatenate([
np.zeros(action_dim),
np.full(action_dim, self._init_std_param)
], axis=0) # yapf: disable
mean_std_network = mlp(
state_input,
output_dim=action_dim * 2,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=tf.constant_initializer(b),
name='mean_std_network',
layer_normalization=self._layer_normalization)
with tf.compat.v1.variable_scope('mean_network'):
mean_network = mean_std_network[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_network = mean_std_network[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
mean_network = mlp(
state_input,
output_dim=action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
name='mean_network',
layer_normalization=self._layer_normalization)
# std network
if self._adaptive_std:
log_std_network = mlp(
state_input,
output_dim=action_dim,
hidden_sizes=self._std_hidden_sizes,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
output_nonlinearity=self._std_output_nonlinearity,
output_w_init=self._std_output_w_init,
output_b_init=tf.constant_initializer(
self._init_std_param),
name='log_std_network',
layer_normalization=self._layer_normalization)
else:
log_std_network = parameter(
input_var=state_input,
length=action_dim,
initializer=tf.constant_initializer(
self._init_std_param),
trainable=self._learn_std,
name='log_std_network')
log_std_network = tf.expand_dims(log_std_network, 1)
mean_var = mean_network
std_param = log_std_network
with tf.compat.v1.variable_scope('std_limits'):
if self._min_std_param is not None:
std_param = tf.maximum(std_param, self._min_std_param)
if self._max_std_param is not None:
std_param = tf.minimum(std_param, self._max_std_param)
with tf.compat.v1.variable_scope('std_parameterization'):
# build std_var with std parameterization
if self._std_parameterization == 'exp':
log_std_var = std_param
else: # we know it must be softplus here
log_std_var = tf.math.log(tf.math.log(1. + tf.exp(std_param)))
return tfp.distributions.MultivariateNormalDiag(
loc=mean_var,
scale_diag=tf.exp(log_std_var)), mean_var, log_std_var
| 12,278 | 44.817164 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/gru.py | """GRU in TensorFlow."""
import tensorflow as tf
def gru(name,
gru_cell,
all_input_var,
step_input_var,
step_hidden_var,
output_nonlinearity_layer,
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False):
r"""Gated Recurrent Unit (GRU).
Args:
name (str): Name of the variable scope.
gru_cell (tf.keras.layers.Layer): GRU cell used to generate
outputs.
all_input_var (tf.Tensor): Place holder for entire time-series inputs,
with shape :math:`(N, T, S^*)`.
step_input_var (tf.Tensor): Place holder for step inputs, with shape
:math:`(N, S^*)`.
step_hidden_var (tf.Tensor): Place holder for step hidden state, with
shape :math:`(N, H)`.
output_nonlinearity_layer (callable): Activation function for output
dense layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
Return:
tf.Tensor: Entire time-series outputs, with shape :math:`(N, T, S^*)`.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, H)`
tf.Tensor: Initial hidden state, with shape :math:`(H, )`
"""
with tf.compat.v1.variable_scope(name):
hidden_dim = gru_cell.units
output, [hidden] = gru_cell(step_input_var, states=[step_hidden_var])
output = output_nonlinearity_layer(output)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(hidden_dim, ),
initializer=hidden_state_init,
trainable=hidden_state_init_trainable,
dtype=tf.float32)
hidden_init_var_b = tf.broadcast_to(
hidden_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])
rnn = tf.keras.layers.RNN(gru_cell, return_sequences=True)
hs = rnn(all_input_var, initial_state=hidden_init_var_b)
outputs = output_nonlinearity_layer(hs)
return outputs, output, hidden, hidden_init_var
| 2,361 | 37.721311 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/gru_model.py | """GRU Model.
A model composed only of a Gated Recurrent Unit (GRU).
"""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.gru import gru
from garage.tf.models.model import Model
class GRUModel(Model):
"""GRU Model.
Args:
output_dim (int): Dimension of the network output.
hidden_dim (int): Hidden dimension for GRU cell.
name (str): Policy name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
hidden_dim,
name=None,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._initialize()
def _initialize(self):
"""Initialize model."""
self._gru_cell = tf.keras.layers.GRUCell(
units=self._hidden_dim,
activation=self._hidden_nonlinearity,
kernel_initializer=self._hidden_w_init,
bias_initializer=self._hidden_b_init,
recurrent_activation=self._recurrent_nonlinearity,
recurrent_initializer=self._recurrent_w_init,
name='gru_layer')
self._output_nonlinearity_layer = tf.keras.layers.Dense(
units=self._output_dim,
activation=self._output_nonlinearity,
kernel_initializer=self._output_w_init,
bias_initializer=self._output_b_init,
name='output_layer')
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['full_input', 'step_input', 'step_hidden_input']
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['all_output', 'step_output', 'step_hidden', 'init_hidden']
# pylint: disable=arguments-differ
def _build(self, all_input_var, step_input_var, step_hidden_var,
name=None):
"""Build model given input placeholder(s).
Args:
all_input_var (tf.Tensor): Place holder for entire time-series
inputs.
step_input_var (tf.Tensor): Place holder for step inputs.
step_hidden_var (tf.Tensor): Place holder for step hidden state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Initial hidden state.
"""
del name
return gru(
name='gru',
gru_cell=self._gru_cell,
all_input_var=all_input_var,
step_input_var=step_input_var,
step_hidden_var=step_hidden_var,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
output_nonlinearity_layer=self._output_nonlinearity_layer)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_gru_cell']
del new_dict['_output_nonlinearity_layer']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 6,740 | 37.965318 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/lstm.py | """LSTM in TensorFlow."""
import tensorflow as tf
def lstm(name,
lstm_cell,
all_input_var,
step_input_var,
step_hidden_var,
step_cell_var,
output_nonlinearity_layer,
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
cell_state_init=tf.zeros_initializer(),
cell_state_init_trainable=False):
r"""Long Short-Term Memory (LSTM).
Args:
name (str): Name of the variable scope.
lstm_cell (tf.keras.layers.Layer): LSTM cell used to generate
outputs.
all_input_var (tf.Tensor): Place holder for entire time-seried inputs,
with shape :math:`(N, T, S^*)`.
step_input_var (tf.Tensor): Place holder for step inputs, with shape
:math:`(N, S^*)`.
step_hidden_var (tf.Tensor): Place holder for step hidden state, with
shape :math:`(N, H)`.
step_cell_var (tf.Tensor): Place holder for cell state, with shape
:math:`(N, H)`.
output_nonlinearity_layer (callable): Activation function for output
dense layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
Return:
tf.Tensor: Entire time-seried outputs, with shape :math:`(N, T, S^*)`.
tf.Tensor: Step output, with shape :math:`(N, S^*)`.
tf.Tensor: Step hidden state, with shape :math:`(N, H)`.
tf.Tensor: Step cell state, with shape :math:`(N, H)`.
tf.Tensor: Initial hidden state, with shape :math:`(H, )`.
tf.Tensor: Initial cell state, with shape :math:`(H, )`.
"""
with tf.compat.v1.variable_scope(name):
hidden_dim = lstm_cell.units
output, [hidden,
cell] = lstm_cell(step_input_var,
states=(step_hidden_var, step_cell_var))
output = output_nonlinearity_layer(output)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(hidden_dim, ),
initializer=hidden_state_init,
trainable=hidden_state_init_trainable,
dtype=tf.float32)
cell_init_var = tf.compat.v1.get_variable(
name='initial_cell',
shape=(hidden_dim, ),
initializer=cell_state_init,
trainable=cell_state_init_trainable,
dtype=tf.float32)
hidden_init_var_b = tf.broadcast_to(
hidden_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])
cell_init_var_b = tf.broadcast_to(
cell_init_var, shape=[tf.shape(all_input_var)[0], hidden_dim])
rnn = tf.keras.layers.RNN(lstm_cell, return_sequences=True)
hs = rnn(all_input_var,
initial_state=[hidden_init_var_b, cell_init_var_b])
outputs = output_nonlinearity_layer(hs)
return outputs, output, hidden, cell, hidden_init_var, cell_init_var
| 3,458 | 40.674699 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/lstm_model.py | """LSTM Model.
A model composed only of a long-short term memory (LSTM).
"""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.lstm import lstm
from garage.tf.models.model import Model
class LSTMModel(Model):
"""LSTM Model.
Args:
output_dim (int): Dimension of the network output.
hidden_dim (int): Hidden dimension for LSTM cell.
name (str): Policy name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
recurrent_nonlinearity (callable): Activation function for recurrent
layers. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
recurrent_w_init (callable): Initializer function for the weight
of recurrent layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
hidden_state_init (callable): Initializer function for the
initial hidden state. The functino should return a tf.Tensor.
hidden_state_init_trainable (bool): Bool for whether the initial
hidden state is trainable.
cell_state_init (callable): Initializer function for the
initial cell state. The functino should return a tf.Tensor.
cell_state_init_trainable (bool): Bool for whether the initial
cell state is trainable.
forget_bias (bool): If True, add 1 to the bias of the forget gate at
initialization. It's used to reduce the scale of forgetting at the
beginning of the training.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
hidden_dim,
name=None,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
recurrent_nonlinearity=tf.nn.sigmoid,
recurrent_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
hidden_state_init=tf.zeros_initializer(),
hidden_state_init_trainable=False,
cell_state_init=tf.zeros_initializer(),
cell_state_init_trainable=False,
forget_bias=True,
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_dim = hidden_dim
self._forget_bias = forget_bias
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._cell_state_init = cell_state_init
self._cell_state_init_trainable = cell_state_init_trainable
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._initialize()
def _initialize(self):
"""Initialize model."""
self._lstm_cell = tf.keras.layers.LSTMCell(
units=self._hidden_dim,
activation=self._hidden_nonlinearity,
kernel_initializer=self._hidden_w_init,
bias_initializer=self._hidden_b_init,
recurrent_activation=self._recurrent_nonlinearity,
recurrent_initializer=self._recurrent_w_init,
unit_forget_bias=self._forget_bias,
name='lstm_layer')
self._output_nonlinearity_layer = tf.keras.layers.Dense(
units=self._output_dim,
activation=self._output_nonlinearity,
kernel_initializer=self._output_w_init,
bias_initializer=self._output_b_init,
name='output_layer')
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'full_input', 'step_input', 'step_hidden_input', 'step_cell_input'
]
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return [
'all_output', 'step_output', 'step_hidden', 'step_cell',
'init_hidden', 'init_cell'
]
# pylint: disable=arguments-differ
def _build(self,
all_input_var,
step_input_var,
step_hidden_var,
step_cell_var,
name=None):
"""Build model given input placeholder(s).
Args:
all_input_var (tf.Tensor): Place holder for entire time-series
inputs.
step_input_var (tf.Tensor): Place holder for step inputs.
step_hidden_var (tf.Tensor): Place holder for step hidden state.
step_cell_var (tf.Tensor): Place holder for step cell state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Step cell state.
tf.Tensor: Initial hidden state.
tf.Tensor: Initial cell state.
"""
del name
return lstm(
name='lstm',
lstm_cell=self._lstm_cell,
all_input_var=all_input_var,
step_input_var=step_input_var,
step_hidden_var=step_hidden_var,
step_cell_var=step_cell_var,
hidden_state_init=self._hidden_state_init,
hidden_state_init_trainable=self._hidden_state_init_trainable,
cell_state_init=self._cell_state_init,
cell_state_init_trainable=self._cell_state_init_trainable,
output_nonlinearity_layer=self._output_nonlinearity_layer)
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_lstm_cell']
del new_dict['_output_nonlinearity_layer']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 8,038 | 38.79703 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/mlp.py | """MLP in TensorFlow."""
import tensorflow as tf
from garage.experiment import deterministic
def mlp(input_var,
output_dim,
hidden_sizes,
name,
input_var2=None,
concat_layer=-2,
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
"""Multi-layer perceptron (MLP).
It maps real-valued inputs to real-valued outputs.
Args:
input_var (tf.Tensor): Input tf.Tensor to the MLP.
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
name (str): Network name, also the variable scope.
input_var2 (tf.Tensor): Second input tf.Tensor to the MLP if input
needs to be concatenated with a layer in the model.
concat_layer (int): The index of layers at which to concatenate
input_var2 with the network. If input_var2 is not supplied, this
arguments is ignored. The indexing works like standard python list
indexing. Index of 0 refers to the input layer (input_var) while
an index of -1 points to the last hidden layer. Default parameter
points to second layer from the end. If the model has only one
layer, input_var2 is concatenated with that layer.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
Return:
tf.Tensor: The output tf.Tensor of the MLP.
"""
n_layers = len(hidden_sizes) + 1
_merge_inputs = False
if input_var2 is not None:
_merge_inputs = True
if n_layers > 1:
_concat_layer = (concat_layer % n_layers + n_layers) % n_layers
else:
_concat_layer = 0
with tf.compat.v1.variable_scope(name):
l_hid = input_var
for idx, hidden_size in enumerate(hidden_sizes):
if _merge_inputs and idx == _concat_layer:
l_hid = tf.keras.layers.concatenate([l_hid, input_var2])
l_hid = tf.compat.v1.layers.dense(inputs=l_hid,
units=hidden_size,
activation=hidden_nonlinearity,
kernel_initializer=hidden_w_init,
bias_initializer=hidden_b_init,
name='hidden_{}'.format(idx))
if layer_normalization:
l_hid = tf.keras.layers.LayerNormalization()(l_hid)
if _merge_inputs and _concat_layer == len(hidden_sizes):
l_hid = tf.keras.layers.concatenate([l_hid, input_var2])
l_out = tf.compat.v1.layers.dense(inputs=l_hid,
units=output_dim,
activation=output_nonlinearity,
kernel_initializer=output_w_init,
bias_initializer=output_b_init,
name='output')
return l_out
| 4,569 | 43.803922 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/tf/models/mlp_dueling_model.py | """MLP Dueling Model."""
import tensorflow as tf
from garage.experiment import deterministic
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
class MLPDuelingModel(Model):
"""MLP Model with dueling network structure.
Args:
output_dim (int): Dimension of the network output.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means this MLP consists of two
hidden layers, each with 32 hidden units.
name (str): Model name, also the variable scope.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
name=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(
seed=deterministic.get_tf_seed_stream()),
output_b_init=tf.zeros_initializer(),
layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
# pylint: disable=arguments-differ
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
action_out = mlp(input_var=state_input,
output_dim=self._output_dim,
hidden_sizes=self._hidden_sizes,
name='action_value',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
state_out = mlp(input_var=state_input,
output_dim=1,
hidden_sizes=self._hidden_sizes,
name='state_value',
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
action_out_mean = tf.reduce_mean(action_out, 1)
# calculate the advantage of performing certain action
# over other action in a particular state
action_out_advantage = action_out - tf.expand_dims(action_out_mean, 1)
q_func_out = state_out + action_out_advantage
return q_func_out
| 4,871 | 43.697248 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.