Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/__init__.py | """Collection of dummy environments used in testing."""
from tests.fixtures.envs.dummy.base import DummyEnv
from tests.fixtures.envs.dummy.dummy_box_env import DummyBoxEnv
from tests.fixtures.envs.dummy.dummy_dict_env import DummyDictEnv
from tests.fixtures.envs.dummy.dummy_discrete_2d_env import DummyDiscrete2DEnv
from tests.fixtures.envs.dummy.dummy_discrete_env import DummyDiscreteEnv
from tests.fixtures.envs.dummy.dummy_discrete_pixel_env import (
DummyDiscretePixelEnv)
from tests.fixtures.envs.dummy.dummy_discrete_pixel_env_baselines import (
DummyDiscretePixelEnvBaselines)
from tests.fixtures.envs.dummy.dummy_multitask_box_env import (
DummyMultiTaskBoxEnv)
from tests.fixtures.envs.dummy.dummy_reward_box_env import DummyRewardBoxEnv
__all__ = [
'DummyEnv', 'DummyBoxEnv', 'DummyDictEnv', 'DummyDiscrete2DEnv',
'DummyDiscreteEnv', 'DummyDiscretePixelEnv',
'DummyDiscretePixelEnvBaselines', 'DummyMultiTaskBoxEnv',
'DummyRewardBoxEnv'
]
| 980 | 45.714286 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/base.py | """Dummy environment for testing purpose."""
import gym
class DummyEnv(gym.Env):
"""Base dummy environment.
Args:
random (bool): If observations are randomly generated or not.
obs_dim (iterable): Observation space dimension.
action_dim (iterable): Action space dimension.
"""
def __init__(self, random, obs_dim=(4, ), action_dim=(2, )):
self.random = random
self.state = None
self._obs_dim = obs_dim
self._action_dim = action_dim
@property
def observation_space(self):
"""Return an observation space."""
raise NotImplementedError
@property
def action_space(self):
"""Return an action space."""
raise NotImplementedError
def reset(self):
"""Reset the environment."""
raise NotImplementedError
def step(self, action):
"""Step the environment.
Args:
action (int): Action input.
"""
raise NotImplementedError
def render(self, mode='human'):
"""Render.
Args:
mode (str): Render mode.
"""
| 1,125 | 21.078431 | 69 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/dummy_box_env.py | """Dummy gym.spaces.Box environment for testing purpose."""
import gym
import numpy as np
from tests.fixtures.envs.dummy import DummyEnv
class DummyBoxEnv(DummyEnv):
"""A dummy gym.spaces.Box environment.
Args:
random (bool): If observations are randomly generated or not.
obs_dim (iterable): Observation space dimension.
action_dim (iterable): Action space dimension.
"""
def __init__(self, random=True, obs_dim=(4, ), action_dim=(2, )):
super().__init__(random, obs_dim, action_dim)
@property
def observation_space(self):
"""Return an observation space.
Returns:
gym.spaces: The observation space of the environment.
"""
return gym.spaces.Box(low=-1,
high=1,
shape=self._obs_dim,
dtype=np.float32)
@property
def action_space(self):
"""Return an action space.
Returns:
gym.spaces: The action space of the environment.
"""
return gym.spaces.Box(low=-5.0,
high=5.0,
shape=self._action_dim,
dtype=np.float32)
def reset(self):
"""Reset the environment.
Returns:
np.ndarray: The observation obtained after reset.
"""
return np.ones(self._obs_dim, dtype=np.float32)
def step(self, action):
"""Step the environment.
Args:
action (int): Action input.
Returns:
np.ndarray: Observation.
float: Reward.
bool: If the environment is terminated.
dict: Environment information.
"""
return self.observation_space.sample(), 0, False, dict(dummy='dummy')
| 1,841 | 25.314286 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/dummy_dict_env.py | """Dummy akro.Dict environment for testing purpose."""
import akro
import gym
import numpy as np
from garage.envs import EnvSpec
from tests.fixtures.envs.dummy import DummyEnv
class DummyDictEnv(DummyEnv):
"""A dummy akro.Dict environment with predefined inner spaces.
Args:
random (bool): If observations are randomly generated or not.
obs_space_type (str): The type of the inner spaces of the
dict observation space.
act_space_type (str): The type of action space to mock.
"""
def __init__(self, random=True, obs_space_type='box',
act_space_type='box'):
assert obs_space_type in ['box', 'image', 'discrete']
assert act_space_type in ['box', 'discrete']
super().__init__(random)
self.obs_space_type = obs_space_type
self.act_space_type = act_space_type
self.spec = EnvSpec(action_space=self.action_space,
observation_space=self.observation_space)
@property
def observation_space(self):
"""Return the observation space.
Returns:
akro.Dict: Observation space.
"""
if self.obs_space_type == 'box':
return gym.spaces.Dict({
'achieved_goal':
gym.spaces.Box(low=-200.,
high=200.,
shape=(3, ),
dtype=np.float32),
'desired_goal':
gym.spaces.Box(low=-200.,
high=200.,
shape=(3, ),
dtype=np.float32),
'observation':
gym.spaces.Box(low=-200.,
high=200.,
shape=(25, ),
dtype=np.float32)
})
elif self.obs_space_type == 'image':
return gym.spaces.Dict({
'dummy':
gym.spaces.Box(low=0,
high=255,
shape=(100, 100, 3),
dtype=np.uint8),
})
else:
return gym.spaces.Dict({'dummy': gym.spaces.Discrete(5)})
@property
def action_space(self):
"""Return the action space.
Returns:
akro.Box: Action space.
"""
if self.act_space_type == 'box':
return akro.Box(low=-5.0, high=5.0, shape=(1, ), dtype=np.float32)
else:
return akro.Discrete(5)
def reset(self):
"""Reset the environment.
Returns:
numpy.ndarray: Observation after reset.
"""
return self.observation_space.sample()
def step(self, action):
"""Step the environment.
Args:
action (int): Action input.
Returns:
np.ndarray: Observation.
float: Reward.
bool: If the environment is terminated.
dict: Environment information.
"""
return self.observation_space.sample(), 0, True, dict()
# pylint: disable=no-self-use
def compute_reward(self, achieved_goal, goal, info):
"""Function to compute new reward.
Args:
achieved_goal (numpy.ndarray): Achieved goal.
goal (numpy.ndarray): Original desired goal.
info (dict): Extra information.
Returns:
float: New computed reward.
"""
del info
return np.sum(achieved_goal - goal)
| 3,583 | 28.866667 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/dummy_discrete_2d_env.py | import gym
import numpy as np
from tests.fixtures.envs.dummy import DummyEnv
class DummyDiscrete2DEnv(DummyEnv):
"""A dummy discrete environment."""
def __init__(self, random=True):
super().__init__(random)
self.shape = (2, 2)
self._observation_space = gym.spaces.Box(
low=-1, high=1, shape=self.shape, dtype=np.float32)
@property
def observation_space(self):
"""Return an observation space."""
return self._observation_space
@observation_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
@property
def action_space(self):
"""Return an action space."""
return gym.spaces.Discrete(2)
def reset(self):
"""Reset the environment."""
self.state = np.ones(self.shape)
return self.state
def step(self, action):
"""Step the environment."""
if self.state is not None:
if self.random:
obs = self.observation_space.sample()
else:
obs = self.state + action / 10.
else:
raise RuntimeError(
"DummyEnv: reset() must be called before step()!")
return obs, 0, True, dict()
| 1,276 | 26.76087 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/dummy_discrete_env.py | import gym
import numpy as np
from tests.fixtures.envs.dummy import DummyEnv
class DummyDiscreteEnv(DummyEnv):
"""A dummy discrete environment."""
def __init__(self, obs_dim=(1, ), action_dim=1, random=True):
super().__init__(random, obs_dim, action_dim)
@property
def observation_space(self):
"""Return an observation space."""
return gym.spaces.Box(
low=-1, high=1, shape=self._obs_dim, dtype=np.float32)
@property
def action_space(self):
"""Return an action space."""
return gym.spaces.Discrete(self._action_dim)
def reset(self):
"""Reset the environment."""
self.state = np.ones(self._obs_dim, dtype=np.float32)
return self.state
def step(self, action):
"""Step the environment."""
if self.state is not None:
if self.random:
obs = self.observation_space.sample()
else:
obs = self.state + action / 10.
else:
raise RuntimeError(
"DummyEnv: reset() must be called before step()!")
return obs, 0, True, dict()
| 1,142 | 27.575 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/dummy_discrete_pixel_env.py | """A dummy discrete pixel env."""
from unittest import mock
import gym
import numpy as np
from tests.fixtures.envs.dummy import DummyEnv
class DummyDiscretePixelEnv(DummyEnv):
"""A dummy discrete pixel environment.
It follows Atari game convention, where actions are 'NOOP', 'FIRE', ...
It also contains self.unwrapped.ale.lives, get_action_meanings for testing.
Several properties are made for testing purpose as following:
-Observations are
after reset : np.ones(self._shape).
action 1 (FIRE): np.full(self._shape, 2).
otherwise : random if self.random is True,
otherwise previous state + action.
-The environment has 5 lives.
-Done will be True if
-all 5 lives are exhausted
-env.step(2), followed by env.step(1)
Args:
random (bool): Whether observations are generated randomly.
"""
def __init__(self, random=True):
super().__init__(random, obs_dim=(100, 100, 3), action_dim=5)
self.unwrapped.get_action_meanings = self._get_action_meanings
self.unwrapped.ale = mock.Mock()
self.unwrapped.ale.lives = self.get_lives
self._observation_space = gym.spaces.Box(low=0,
high=255,
shape=self._obs_dim,
dtype=np.uint8)
self.step_called = 0
self._lives = None
self._prev_action = None
@property
def observation_space(self):
"""akro.Box: Observation space of this environment."""
return self._observation_space
@observation_space.setter
def observation_space(self, observation_space):
"""Observation space setter.
Args:
observation_space (akro.Box): Observation space to be set.
"""
self._observation_space = observation_space
@property
def action_space(self):
"""akro.Discrete: an action space."""
return gym.spaces.Discrete(self._action_dim)
# pylint: disable=no-self-use
def _get_action_meanings(self):
"""Action meanings.
Returns:
list[str]: Meaning of action, indices are aligned with actions.
"""
return ['NOOP', 'FIRE', 'SLEEP', 'EAT', 'PLAY']
def get_lives(self):
"""Get number of lives.
Returns:
int: Number of lives remaining.
"""
return self._lives
def reset(self):
"""Reset the environment.
Returns:
np.ndarray: Environment state.
"""
self.state = np.ones(self._obs_dim, dtype=np.uint8)
self._lives = 5
self.step_called = 0
return self.state
def step(self, action):
"""Step the environment.
Before gym fixed overflow issue for sample() in
np.uint8 environment, we will handle the sampling here.
We need high=256 since np.random.uniform sample from [low, high)
(includes low, but excludes high).
Args:
action (int): Action.
Returns:
np.ndarray: observation.
float: reward.
bool: terminal signal.
dict: extra environment info.
Raises:
RuntimeError: step when empty lives left.
"""
done = False
if self.state is not None:
# Simulating FIRE action
if action == 1:
if self._prev_action == 2:
done = True
obs = np.full(self._obs_dim, 2, dtype=np.uint8)
else:
if self.random:
obs = np.random.uniform(low=0,
high=256,
size=self._obs_dim).astype(
np.uint8)
else:
obs = self.state + action
if self._lives == 0:
raise RuntimeError('DummyEnv: Cannot step when lives = 0!')
self._lives -= 1
if self._lives == 0:
done = True
else:
raise RuntimeError(
'DummyEnv: reset() must be called before step()!')
self.step_called += 1
self._prev_action = action
return obs, 0, done, {'ale.lives': self._lives}
| 4,401 | 29.150685 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/dummy_discrete_pixel_env_baselines.py | import gym
import numpy as np
from tests.fixtures.envs.dummy import DummyEnv
class LazyFrames(object):
def __init__(self, frames):
"""
LazyFrames class from baselines.
Openai baselines use this class for FrameStack environment
wrapper. It is used for testing garage.envs.wrappers.AtariEnv.
garge.envs.wrapper.AtariEnv is used when algorithms are trained
using baselines wrappers, e.g. during benchmarking.
"""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
class DummyDiscretePixelEnvBaselines(DummyEnv):
"""
A dummy discrete pixel environment.
This environment is for testing garge.envs.wrapper.AtariEnv.
"""
def __init__(self):
super().__init__(random=False, obs_dim=(10, 10, 3), action_dim=5)
self._observation_space = gym.spaces.Box(
low=0, high=255, shape=self._obs_dim, dtype=np.uint8)
@property
def observation_space(self):
"""Return an observation space."""
return self._observation_space
@property
def action_space(self):
"""Return an action space."""
return gym.spaces.Discrete(self._action_dim)
def step(self, action):
"""gym.Env step function."""
obs = self.observation_space.sample()
return LazyFrames([obs]), 0, True, dict()
def reset(self, **kwargs):
"""gym.Env reset function."""
obs = np.ones(self._obs_dim, dtype=np.uint8)
return LazyFrames([obs])
| 1,821 | 27.46875 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/dummy_multitask_box_env.py | """Dummy gym.spaces.Box environment for testing purpose."""
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.helpers import choices
class DummyMultiTaskBoxEnv(DummyBoxEnv):
"""A dummy gym.spaces.Box multitask environment.
Args:
random (bool): If observations are randomly generated or not.
obs_dim (iterable): Observation space dimension.
action_dim (iterable): Action space dimension.
"""
def __init__(self, random=True, obs_dim=(4, ), action_dim=(2, )):
super().__init__(random, obs_dim, action_dim)
self.task = 'dummy1'
def sample_tasks(self, n):
"""Sample a list of `num_tasks` tasks.
Args:
n (int): Number of tasks to sample.
Returns:
list[str]: A list of tasks.
"""
return choices(self.all_task_names, k=n)
@property
def all_task_names(self):
"""list[str]: Return a list of dummy task names."""
return ['dummy1', 'dummy2', 'dummy3']
def set_task(self, task):
"""Reset with a task.
Args:
task (str): A task.
"""
self.task = task
def step(self, action):
"""Step the environment.
Args:
action (int): Action input.
Returns:
np.ndarray: Observation.
float: Reward.
bool: If the environment is terminated.
dict: Environment information.
"""
return (self.observation_space.sample(), 0, False,
dict(dummy='dummy', task_name=self.task))
| 1,579 | 24.901639 | 69 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/dummy/dummy_reward_box_env.py | from tests.fixtures.envs.dummy import DummyBoxEnv
class DummyRewardBoxEnv(DummyBoxEnv):
"""A dummy box environment."""
def __init__(self, random=True):
super().__init__(random)
def step(self, action):
"""Step the environment."""
if action == 0:
reward = 10
else:
reward = -10
return self.observation_space.sample(), reward, True, dict()
| 417 | 23.588235 | 68 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/wrappers/__init__.py | from tests.fixtures.envs.wrappers.reshape_observation import ReshapeObservation
__all__ = ['ReshapeObservation']
| 114 | 27.75 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/envs/wrappers/reshape_observation.py | """Reshaping Observation for gym.Env."""
import gym
import numpy as np
class ReshapeObservation(gym.Wrapper):
"""
Reshaping Observation wrapper for gym.Env.
This wrapper convert the observations into the given shape.
Args:
env (gym.Env): The environment to be wrapped.
shape (list[int]): Target shape to be applied on the observations.
"""
def __init__(self, env, shape):
super().__init__(env)
print(env.observation_space.shape)
assert np.prod(shape) == np.prod(env.observation_space.shape)
_low = env.observation_space.low.flatten()[0]
_high = env.observation_space.high.flatten()[0]
self._observation_space = gym.spaces.Box(
_low, _high, shape=shape, dtype=env.observation_space.dtype)
self._shape = shape
@property
def observation_space(self):
"""gym.Env observation space."""
return self._observation_space
@observation_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
def _observation(self, obs):
return obs.reshape(self._shape)
def reset(self):
"""gym.Env reset function."""
return self._observation(self.env.reset())
def step(self, action):
"""gym.Env step function."""
obs, reward, done, info = self.env.step(action)
return self._observation(obs), reward, done, info
| 1,449 | 29.851064 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/experiment/__init__.py | from tests.fixtures.experiment.fixture_experiment import fixture_exp
__all__ = ['fixture_exp']
| 96 | 23.25 | 68 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/experiment/fixture_experiment.py | """A dummy experiment fixture."""
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import VPG
from garage.tf.policies import CategoricalMLPPolicy
# pylint: disable=missing-return-type-doc
def fixture_exp(snapshot_config, sess):
"""Dummy fixture experiment function.
Args:
snapshot_config (garage.experiment.SnapshotConfig): The snapshot
configuration used by LocalRunner to create the snapshotter.
If None, it will create one with default settings.
sess (tf.Session): An optional TensorFlow session.
A new session will be created immediately if not provided.
Returns:
np.ndarray: Values of the parameters evaluated in
the current session
"""
with LocalTFRunner(snapshot_config=snapshot_config, sess=sess) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(8, 8))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
runner.setup(algo, env)
runner.train(n_epochs=5, batch_size=100)
return policy.get_param_values()
| 1,569 | 33.888889 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/__init__.py | """Mock models for testing."""
from tests.fixtures.models.simple_categorical_gru_model import (
SimpleCategoricalGRUModel)
from tests.fixtures.models.simple_categorical_lstm_model import (
SimpleCategoricalLSTMModel)
from tests.fixtures.models.simple_categorical_mlp_model import (
SimpleCategoricalMLPModel)
from tests.fixtures.models.simple_cnn_model import SimpleCNNModel
from tests.fixtures.models.simple_cnn_model_with_max_pooling import (
SimpleCNNModelWithMaxPooling)
from tests.fixtures.models.simple_gaussian_cnn_model import (
SimpleGaussianCNNModel)
from tests.fixtures.models.simple_gaussian_gru_model import (
SimpleGaussianGRUModel)
from tests.fixtures.models.simple_gaussian_lstm_model import (
SimpleGaussianLSTMModel)
from tests.fixtures.models.simple_gaussian_mlp_model import (
SimpleGaussianMLPModel)
from tests.fixtures.models.simple_gru_model import SimpleGRUModel
from tests.fixtures.models.simple_lstm_model import SimpleLSTMModel
from tests.fixtures.models.simple_mlp_merge_model import SimpleMLPMergeModel
from tests.fixtures.models.simple_mlp_model import SimpleMLPModel
__all__ = [
'SimpleCategoricalGRUModel',
'SimpleCategoricalLSTMModel',
'SimpleCategoricalMLPModel',
'SimpleCNNModel',
'SimpleCNNModelWithMaxPooling',
'SimpleGaussianCNNModel',
'SimpleGaussianGRUModel',
'SimpleGaussianLSTMModel',
'SimpleGaussianMLPModel',
'SimpleGRUModel',
'SimpleLSTMModel',
'SimpleMLPMergeModel',
'SimpleMLPModel',
]
| 1,518 | 37.948718 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_categorical_gru_model.py | """Simple CategoricalGRUModel for testing."""
import tensorflow_probability as tfp
from tests.fixtures.models.simple_gru_model import SimpleGRUModel
class SimpleCategoricalGRUModel(SimpleGRUModel):
"""Simple CategoricalGRUModel for testing.
Args:
output_dim (int): Dimension of the network output.
hidden_dim (int): Hidden dimension for GRU cell.
name (str): Policy name, also the variable scope.
args: Extra arguments which are not used.
kwargs: Extra keyword arguments which are not used.
"""
def __init__(self, output_dim, hidden_dim, name, *args, **kwargs):
super().__init__(output_dim, hidden_dim, name)
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'all_output', 'step_output', 'step_hidden', 'init_hidden', 'dist'
]
def _build(self, obs_input, step_obs_input, step_hidden, name=None):
"""Build model.
Args:
obs_input (tf.Tensor): Entire time-series observation input.
step_obs_input (tf.Tensor): Single timestep observation input.
step_hidden (tf.Tensor): Hidden state for step.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Initial hidden state.
tfp.distributions.OneHotCategorical: Distribution.
"""
outputs, output, step_hidden, hidden_init_var = super()._build(
obs_input, step_obs_input, step_hidden, name)
dist = tfp.distributions.OneHotCategorical(outputs)
return outputs, output, step_hidden, hidden_init_var, dist
| 1,853 | 33.333333 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_categorical_lstm_model.py | """Simple CategoricalLSTMModel for testing."""
import tensorflow_probability as tfp
from tests.fixtures.models.simple_lstm_model import SimpleLSTMModel
class SimpleCategoricalLSTMModel(SimpleLSTMModel):
"""Simple CategoricalLSTMModel for testing.
Args:
output_dim (int): Dimension of the network output.
hidden_dim (int): Hidden dimension for LSTM cell.
name (str): Policy name, also the variable scope.
args: Extra arguments which are not used.
kwargs: Extra keyword arguments which are not used.
"""
def __init__(self, output_dim, hidden_dim, name, *args, **kwargs):
super().__init__(output_dim, hidden_dim, name)
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return [
'all_output', 'step_output', 'step_hidden', 'step_cell',
'init_hidden', 'init_cell', 'dist'
]
def _build(self,
obs_input,
step_obs_input,
step_hidden,
step_cell,
name=None):
"""Build model.
Args:
obs_input (tf.Tensor): Entire time-series observation input.
step_obs_input (tf.Tensor): Single timestep observation input.
step_hidden (tf.Tensor): Hidden state for step.
step_cell (tf.Tensor): Cell state for step.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Entire time-series outputs.
tf.Tensor: Step output.
tf.Tensor: Step hidden state.
tf.Tensor: Step cell state.
tf.Tensor: Initial hidden state.
tf.Tensor: Initial cell state.
tfp.distributions.OneHotCategorical: Distribution.
"""
(outputs, output, step_hidden, step_cell, hidden_init_var,
cell_init_var) = super()._build(obs_input, step_obs_input,
step_hidden, step_cell, name)
dist = tfp.distributions.OneHotCategorical(outputs)
return (outputs, output, step_hidden, step_cell, hidden_init_var,
cell_init_var, dist)
| 2,243 | 33.523077 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_categorical_mlp_model.py | """Simple CategoricalMLPModel for testing."""
import tensorflow_probability as tfp
from tests.fixtures.models.simple_mlp_model import SimpleMLPModel
class SimpleCategoricalMLPModel(SimpleMLPModel):
"""Simple CategoricalMLPModel for testing.
Args:
output_dim (int): Dimension of the network output.
name (str): Policy name, also the variable scope.
args: Extra arguments which are not used.
kwargs: Extra keyword arguments which are not used.
"""
def __init__(self, output_dim, name, *args, **kwargs):
super().__init__(output_dim, name)
def network_output_spec(self):
"""Network output spec.
Returns:
list[str]: Name of the model outputs, in order.
"""
return ['prob', 'dist']
def _build(self, obs_input, name=None):
"""Build model.
Args:
obs_input (tf.Tensor): Observation inputs.
name (str): Name of the model, also the name scope.
Returns:
tf.Tensor: Network outputs.
tfp.distributions.OneHotCategorical: Distribution.
"""
prob = super()._build(obs_input, name)
dist = tfp.distributions.OneHotCategorical(prob)
return prob, dist
| 1,256 | 26.933333 | 65 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_cnn_model.py | """Simple CNNModel for testing."""
import tensorflow as tf
from garage.tf.models import Model
class SimpleCNNModel(Model):
"""Simple CNNModel for testing.
Args:
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Model name, also the variable scope.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
"""
def __init__(self,
filters,
strides,
padding,
name=None,
hidden_nonlinearity=None,
hidden_w_init=None,
hidden_b_init=None):
del hidden_nonlinearity, hidden_w_init, hidden_b_init
super().__init__(name)
self.filters = filters
self.strides = strides
self.padding = padding
# pylint: disable=arguments-differ
def _build(self, obs_input, name=None):
"""Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
height_size = obs_input.get_shape().as_list()[1]
width_size = obs_input.get_shape().as_list()[2]
for filter_iter, stride in zip(self.filters, self.strides):
if self.padding == 'SAME':
height_size = int((height_size + stride - 1) / stride)
width_size = int((width_size + stride - 1) / stride)
else:
height_size = int(
(height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * self.filters[-1][0]
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], flatten_shape), return_var)
| 3,211 | 40.714286 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_cnn_model_with_max_pooling.py | """Simple CNNModel with max pooling for testing."""
import tensorflow as tf
from garage.tf.models import Model
class SimpleCNNModelWithMaxPooling(Model):
"""Simple CNNModel with max pooling for testing.
Args:
filters (Tuple[Tuple[int, Tuple[int, int]], ...]): Number and dimension
of filters. For example, ((3, (3, 5)), (32, (3, 3))) means there
are two convolutional layers. The filter for the first layer have 3
channels and its shape is (3 x 5), while the filter for the second
layer have 32 channels and its shape is (3 x 3).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
name (str): Model name, also the variable scope of the cnn.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
pool_strides (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
pool_shapes (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
"""
def __init__(self,
filters,
strides,
padding,
pool_strides,
pool_shapes,
name=None,
hidden_nonlinearity=None,
hidden_w_init=None,
hidden_b_init=None):
del hidden_nonlinearity, hidden_w_init, hidden_b_init
super().__init__(name)
self.filters = filters
self.strides = strides
self.padding = padding
self.pool_strides = pool_strides
self.pool_shapes = pool_shapes
# pylint: disable=arguments-differ
def _build(self, obs_input, name=None):
"""Build model given input placeholder(s).
Args:
obs_input (tf.Tensor): Tensor input for state.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Tensor output of the model.
"""
del name
height_size = obs_input.get_shape().as_list()[1]
width_size = obs_input.get_shape().as_list()[2]
for filter_iter, stride in zip(self.filters, self.strides):
if self.padding == 'SAME':
height_size = int((height_size + stride - 1) / stride)
width_size = int((width_size + stride - 1) / stride)
new_height = height_size + self.pool_strides[0] - 1
height_size = int(new_height / self.pool_strides[0])
new_width = width_size + self.pool_strides[1] - 1
width_size = int(new_width / self.pool_strides[1])
else:
height_size = int(
(height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
new_height = height_size - self.pool_shapes[0]
height_size = int(new_height / self.pool_strides[0]) + 1
new_width = width_size - self.pool_shapes[0]
width_size = int(new_width / self.pool_strides[1]) + 1
flatten_shape = height_size * width_size * self.filters[-1][0]
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], flatten_shape), return_var)
| 4,287 | 44.136842 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_gaussian_cnn_model.py | import tensorflow as tf
from garage.tf.distributions import DiagonalGaussian
from garage.tf.models import Model
class SimpleGaussianCNNModel(Model):
"""Simple GaussianCNNModel for testing."""
def __init__(self,
output_dim,
name='SimpleGaussianCNNModel',
*args,
**kwargs):
super().__init__(name)
self.output_dim = output_dim
def network_output_spec(self):
return ['sample', 'mean', 'log_std', 'std_param', 'dist']
def _build(self, obs_input, name=None):
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
mean = tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
log_std = tf.fill((tf.shape(obs_input)[0], self.output_dim), 0.5)
action = mean + log_std * 0.5
dist = DiagonalGaussian(self.output_dim)
# action will be 0.5 + 0.5 * 0.5 = 0.75
return action, mean, log_std, log_std, dist
| 1,026 | 33.233333 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_gaussian_gru_model.py | import tensorflow as tf
from garage.tf.distributions import DiagonalGaussian
from garage.tf.models import Model
class SimpleGaussianGRUModel(Model):
"""Simple GaussianGRUModel for testing."""
def __init__(self,
output_dim,
hidden_dim,
name='SimpleGaussianGRUModel',
*args,
**kwargs):
super().__init__(name)
self.output_dim = output_dim
self.hidden_dim = hidden_dim
def network_input_spec(self):
"""Network input spec."""
return ['full_input', 'step_input', 'step_hidden_input']
def network_output_spec(self):
"""Network output spec."""
return [
'mean', 'step_mean', 'log_std', 'step_log_std', 'step_hidden',
'init_hidden', 'dist'
]
def _build(self,
obs_input,
step_obs_input,
step_hidden,
step_cell,
name=None):
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
mean = log_std = tf.fill(
(tf.shape(obs_input)[0], tf.shape(obs_input)[1], self.output_dim),
return_var)
step_mean = step_log_std = tf.fill(
(tf.shape(step_obs_input)[0], self.output_dim), return_var)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
dist = DiagonalGaussian(self.output_dim)
return (mean, step_mean, log_std, step_log_std, step_hidden,
hidden_init_var, dist)
| 1,754 | 30.339286 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_gaussian_lstm_model.py | import tensorflow as tf
from garage.tf.distributions import DiagonalGaussian
from garage.tf.models import Model
class SimpleGaussianLSTMModel(Model):
"""Simple GaussianLSTMModel for testing."""
def __init__(self,
output_dim,
hidden_dim,
name='SimpleGaussianLSTMModel',
*args,
**kwargs):
super().__init__(name)
self.output_dim = output_dim
self.hidden_dim = hidden_dim
def network_input_spec(self):
"""Network input spec."""
return [
'full_input', 'step_input', 'step_hidden_input', 'step_cell_input'
]
def network_output_spec(self):
"""Network output spec."""
return [
'mean', 'step_mean', 'log_std', 'step_log_std', 'step_hidden',
'step_cell', 'init_hidden', 'init_cell', 'dist'
]
def _build(self,
obs_input,
step_obs_input,
step_hidden,
step_cell,
name=None):
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
mean = log_std = tf.fill(
(tf.shape(obs_input)[0], tf.shape(obs_input)[1], self.output_dim),
return_var)
step_mean = step_log_std = tf.fill(
(tf.shape(step_obs_input)[0], self.output_dim), return_var)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
cell_init_var = tf.compat.v1.get_variable(
name='initial_cell',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
dist = DiagonalGaussian(self.output_dim)
# sample = 0.5 * 0.5 + 0.5 = 0.75
return (mean, step_mean, log_std, step_log_std, step_hidden, step_cell,
hidden_init_var, cell_init_var, dist)
| 2,121 | 32.15625 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_gaussian_mlp_model.py | import numpy as np
import tensorflow as tf
from garage.tf.distributions import DiagonalGaussian
from garage.tf.models import Model
class SimpleGaussianMLPModel(Model):
"""Simple GaussianMLPModel for testing."""
def __init__(self,
output_dim,
name='SimpleGaussianMLPModel',
*args,
**kwargs):
super().__init__(name)
self.output_dim = output_dim
def network_output_spec(self):
return ['mean', 'log_std', 'std_param', 'dist']
def _build(self, obs_input, name=None):
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
mean = tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
log_std = tf.fill((tf.shape(obs_input)[0], self.output_dim),
np.log(0.5))
dist = DiagonalGaussian(self.output_dim)
# action will be 0.5 + 0.5 * 0.5 = 0.75
return mean, log_std, log_std, dist
| 1,023 | 32.032258 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_gru_model.py | import tensorflow as tf
from garage.tf.models import Model
class SimpleGRUModel(Model):
"""Simple GRUModel for testing."""
def __init__(self,
output_dim,
hidden_dim,
name='SimpleGRUModel',
*args,
**kwargs):
super().__init__(name)
self.output_dim = output_dim
self.hidden_dim = hidden_dim
def network_input_spec(self):
"""Network input spec."""
return ['full_input', 'step_input', 'step_hidden_input']
def network_output_spec(self):
"""Network output spec."""
return ['all_output', 'step_output', 'step_hidden', 'init_hidden']
def _build(self, obs_input, step_obs_input, step_hidden, name=None):
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
outputs = tf.fill(
(tf.shape(obs_input)[0], tf.shape(obs_input)[1], self.output_dim),
return_var)
output = tf.fill((tf.shape(step_obs_input)[0], self.output_dim),
return_var)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
return outputs, output, step_hidden, hidden_init_var
| 1,420 | 31.295455 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_lstm_model.py | import tensorflow as tf
from garage.tf.models import Model
class SimpleLSTMModel(Model):
"""Simple LSTMModel for testing."""
def __init__(self,
output_dim,
hidden_dim,
name='SimpleLSTMModel',
*args,
**kwargs):
super().__init__(name)
self.output_dim = output_dim
self.hidden_dim = hidden_dim
def network_input_spec(self):
"""Network input spec."""
return [
'full_input', 'step_input', 'step_hidden_input', 'step_cell_input'
]
def network_output_spec(self):
"""Network output spec."""
return [
'all_output', 'step_output', 'step_hidden', 'step_cell',
'init_hidden', 'init_cell'
]
def _build(self,
obs_input,
step_obs_input,
step_hidden,
step_cell,
name=None):
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
outputs = tf.fill(
(tf.shape(obs_input)[0], tf.shape(obs_input)[1], self.output_dim),
return_var)
output = tf.fill((tf.shape(step_obs_input)[0], self.output_dim),
return_var)
hidden_init_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
cell_init_var = tf.compat.v1.get_variable(
name='initial_cell',
shape=(self.hidden_dim, ),
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.float32)
return (outputs, output, step_hidden, step_cell, hidden_init_var,
cell_init_var)
| 1,884 | 29.901639 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_mlp_merge_model.py | import tensorflow as tf
from garage.tf.models import Model
class SimpleMLPMergeModel(Model):
"""Simple MLPMergeModel for testing."""
def __init__(self, output_dim, name=None, *args, **kwargs):
super().__init__(name)
self.output_dim = output_dim
def network_input_spec(self):
"""Network input spec."""
return ['input_var1', 'input_var2']
def _build(self, obs_input, act_input, name=None):
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
| 640 | 29.52381 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/models/simple_mlp_model.py | import tensorflow as tf
from garage.tf.models import Model
class SimpleMLPModel(Model):
"""Simple MLPModel for testing."""
def __init__(self, output_dim, name=None, *args, **kwargs):
super().__init__(name)
self.output_dim = output_dim
def _build(self, obs_input, name=None):
return_var = tf.compat.v1.get_variable(
'return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
| 506 | 28.823529 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/policies/__init__.py | from tests.fixtures.policies.dummy_policy import (DummyPolicy,
DummyPolicyWithoutVectorized)
from tests.fixtures.policies.dummy_recurrent_policy import (
DummyRecurrentPolicy)
__all__ = [
'DummyPolicy', 'DummyRecurrentPolicy', 'DummyPolicyWithoutVectorized'
]
| 319 | 34.555556 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/policies/dummy_policy.py | """Dummy Policy for algo tests.."""
import numpy as np
from garage.np.policies import Policy
from tests.fixtures.distributions import DummyDistribution
class DummyPolicy(Policy):
"""Dummy Policy.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
"""
def __init__(self, env_spec):
# pylint: disable=super-init-not-called
self._env_spec = env_spec
self._param = []
self._param_values = np.random.uniform(-1, 1, 1000)
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Predicted action.
dict: Distribution parameters.
"""
return self.action_space.sample(), dict(dummy='dummy', mean=0.)
def get_actions(self, observations):
"""Get multiple actions from this policy for the input observations.
Args:
observations (numpy.ndarray): Observations from environment.
Returns:
numpy.ndarray: Predicted actions.
dict: Distribution parameters.
"""
n = len(observations)
action, action_info = self.get_action(None)
return [action] * n, action_info
def get_params_internal(self):
"""Return a list of policy internal params.
Returns:
list: Policy parameters.
"""
return self._param
def get_param_values(self):
"""Return values of params.
Returns:
np.ndarray: Policy parameters values.
"""
return self._param_values
@property
def distribution(self):
"""Return the distribution.
Returns:
garage.distribution: Policy distribution.
"""
return DummyDistribution()
@property
def vectorized(self):
"""Vectorized or not.
Returns:
bool: True if vectorized.
"""
return True
class DummyPolicyWithoutVectorized(DummyPolicy):
"""Dummy Policy without vectorized.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
"""
def __init__(self, env_spec):
super().__init__(env_spec=env_spec)
@property
def vectorized(self):
"""Vectorized or not.
Returns:
bool: True if vectorized.
"""
return False
| 2,494 | 21.889908 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/policies/dummy_recurrent_policy.py | """Dummy Recurrent Policy for algo tests."""
import numpy as np
from garage.np.policies import Policy
from tests.fixtures.distributions import DummyDistribution
class DummyRecurrentPolicy(Policy):
"""Dummy Recurrent Policy.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
"""
def __init__(
self,
env_spec,
):
super().__init__(env_spec=env_spec)
self.params = []
self.param_values = np.random.uniform(-1, 1, 1000)
def get_action(self, observation):
"""Get single action from this policy for the input observation.
Args:
observation (numpy.ndarray): Observation from environment.
Returns:
numpy.ndarray: Predicted action.
dict: Distribution parameters. Empty because no distribution is
used.
"""
return self.action_space.sample(), dict()
def get_params_internal(self):
"""Return a list of policy internal params.
Returns:
list: Policy parameters.
"""
return self.params
def get_param_values(self):
"""Return values of params.
Returns:
np.ndarray: Policy parameters values.
"""
return self.param_values
@property
def distribution(self):
"""Return the distribution.
Returns:
garage.distribution: Policy distribution.
"""
return DummyDistribution()
| 1,509 | 22.230769 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/q_functions/__init__.py | from tests.fixtures.q_functions.simple_q_function import SimpleQFunction
__all__ = ['SimpleQFunction']
| 104 | 25.25 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/q_functions/simple_q_function.py | """Simple QFunction for testing."""
import tensorflow as tf
from garage.tf.q_functions import QFunction
from tests.fixtures.models import SimpleMLPModel
class SimpleQFunction(QFunction):
"""Simple QFunction for testing.
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Name of the q-function, also serves as the variable scope.
"""
def __init__(self, env_spec, name='SimpleQFunction'):
super().__init__(name)
# avnish
# self.obs_dim = env_spec.observation_space.shape
self.obs_dim = (env_spec.observation_space.flat_dim, )
action_dim = env_spec.observation_space.flat_dim
self.model = SimpleMLPModel(output_dim=action_dim)
self._q_val = None
self._initialize()
def _initialize(self):
"""Initialize QFunction."""
obs_ph = tf.compat.v1.placeholder(tf.float32, (None, ) + self.obs_dim,
name='obs')
with tf.compat.v1.variable_scope(self.name, reuse=False) as vs:
self._variable_scope = vs
self._q_val = self.model.build(obs_ph).outputs
@property
def q_vals(self):
"""Return the Q values, the output of the network.
Return:
list[tf.Tensor]: Q values.
"""
return self._q_val
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
self.__dict__.update(state)
self._initialize()
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_q_val']
return new_dict
| 1,810 | 25.632353 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/regressors/__init__.py | from tests.fixtures.regressors.simple_gaussian_cnn_regressor import (
SimpleGaussianCNNRegressor)
from tests.fixtures.regressors.simple_gaussian_mlp_regressor import (
SimpleGaussianMLPRegressor)
from tests.fixtures.regressors.simple_mlp_regressor import (SimpleMLPRegressor)
__all__ = [
'SimpleGaussianCNNRegressor', 'SimpleGaussianMLPRegressor',
'SimpleMLPRegressor'
]
| 388 | 34.363636 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/regressors/simple_gaussian_cnn_regressor.py | """Simple GaussianCNNRegressor for testing."""
import numpy as np
import tensorflow as tf
from garage.tf.regressors import StochasticRegressor
from tests.fixtures.models import SimpleGaussianCNNModel
class SimpleGaussianCNNRegressor(StochasticRegressor):
"""Simple GaussianCNNRegressor for testing.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
args (list): Unused positionl arguments.
kwargs (dict): Unused keyword arguments.
"""
def __init__(self, input_shape, output_dim, name, *args, **kwargs):
super().__init__(input_shape, output_dim, name)
del args, kwargs
self.model = SimpleGaussianCNNModel(output_dim=self._output_dim)
self._ys = None
self._network = None
self._initialize()
@property
def recurrent(self):
"""bool: If this module has a hidden state."""
return False
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
@property
def distribution(self):
"""garage.tf.distributions.DiagonalGaussian: Distribution."""
return self._network.dist
def dist_info_sym(self, input_var, state_info_vars=None, name='default'):
"""Create a symbolic graph of the distribution parameters.
Args:
input_var (tf.Tensor): tf.Tensor of the input data.
state_info_vars (dict): a dictionary whose values should contain
information about the state of the policy at the time it
received the input.
name (str): Name of the new graph.
Return:
dict[tf.Tensor]: Outputs of the symbolic distribution parameter
graph.
"""
del state_info_vars
with tf.compat.v1.variable_scope(self._variable_scope):
network = self.model.build(input_var, name=name)
means_var = network.means
log_stds_var = network.log_stds
return dict(mean=means_var, log_std=log_stds_var)
def _initialize(self):
"""Initialize graph."""
input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + self._input_shape)
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
self._network = self.model.build(input_ph)
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Label of input data.
"""
self._ys = ys
def predict(self, xs):
"""Predict ys based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
np.ndarray: The predicted ys.
"""
if self._ys is None:
mean = tf.compat.v1.get_default_session().run(
self._network.mean, feed_dict={self._network.input: xs})
self._ys = np.full((len(xs), 1), mean)
return self._ys
def get_params_internal(self):
"""Get the params, which are the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._variable_scope.trainable_variables()
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 3,931 | 28.125926 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/regressors/simple_gaussian_mlp_regressor.py | """Simple GaussianMLPRegressor for testing."""
import numpy as np
import tensorflow as tf
from garage.tf.regressors import StochasticRegressor
from tests.fixtures.models import SimpleGaussianMLPModel
class SimpleGaussianMLPRegressor(StochasticRegressor):
"""Simple GaussianMLPRegressor for testing.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
args (list): Unused positionl arguments.
kwargs (dict): Unused keyword arguments.
"""
def __init__(self, input_shape, output_dim, name, *args, **kwargs):
super().__init__(input_shape, output_dim, name)
del args, kwargs
self.model = SimpleGaussianMLPModel(output_dim=self._output_dim)
self._ys = None
self._network = None
self._initialize()
@property
def recurrent(self):
"""bool: If this module has a hidden state."""
return False
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
@property
def distribution(self):
"""garage.tf.distributions.DiagonalGaussian: Distribution."""
return self._network.dist
def dist_info_sym(self, input_var, state_info_vars=None, name='default'):
"""Create a symbolic graph of the distribution parameters.
Args:
input_var (tf.Tensor): tf.Tensor of the input data.
state_info_vars (dict): a dictionary whose values should contain
information about the state of the policy at the time it
received the input.
name (str): Name of the new graph.
Return:
dict[tf.Tensor]: Outputs of the symbolic distribution parameter
graph.
"""
del state_info_vars
with tf.compat.v1.variable_scope(self._variable_scope):
network = self.model.build(input_var, name=name)
means_var = network.means
log_stds_var = network.log_stds
return dict(mean=means_var, log_std=log_stds_var)
def _initialize(self):
"""Initialize graph."""
input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + self._input_shape)
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
self._network = self.model.build(input_ph)
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Label of input data.
"""
self._ys = ys
def predict(self, xs):
"""Predict ys based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
np.ndarray: The predicted ys.
"""
if self._ys is None:
mean = tf.compat.v1.get_default_session().run(
self._network.mean, feed_dict={self._network.input: xs})
self._ys = np.full((len(xs), 1), mean)
return self._ys
def get_params_internal(self):
"""Get the params, which are the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._variable_scope.trainable_variables()
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 3,931 | 28.125926 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/regressors/simple_mlp_regressor.py | """Simple MLPRegressor for testing."""
import tensorflow as tf
from garage.tf.regressors import Regressor
from tests.fixtures.models import SimpleMLPModel
class SimpleMLPRegressor(Regressor):
"""Simple MLPRegressor for testing.
Args:
input_shape (tuple[int]): Input shape of the training data.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
args (list): Unused positionl arguments.
kwargs (dict): Unused keyword arguments.
"""
def __init__(self, input_shape, output_dim, name, *args, **kwargs):
super().__init__(input_shape, output_dim, name)
del args, kwargs
self.model = SimpleMLPModel(output_dim=self._output_dim,
name='SimpleMLPModel')
self._ys = None
self._network = None
self._initialize()
def _initialize(self):
"""Initialize graph."""
input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + self._input_shape)
with tf.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
self._network = self.model.build(input_ph)
@property
def recurrent(self):
"""bool: If this module has a hidden state."""
return False
@property
def vectorized(self):
"""bool: If this module supports vectorization input."""
return True
def fit(self, xs, ys):
"""Fit with input data xs and label ys.
Args:
xs (numpy.ndarray): Input data.
ys (numpy.ndarray): Label of input data.
"""
self._ys = ys
def predict(self, xs):
"""Predict ys based on input xs.
Args:
xs (numpy.ndarray): Input data.
Return:
np.ndarray: The predicted ys.
"""
if self._ys is None:
outputs = tf.compat.v1.get_default_session().run(
self._network.outputs, feed_dict={self._network.input: xs})
self._ys = outputs
return self._ys
def get_params_internal(self):
"""Get the params, which are the trainable variables.
Returns:
List[tf.Variable]: A list of trainable variables in the current
variable scope.
"""
return self._variable_scope.trainable_variables()
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: the state to be pickled for the instance.
"""
new_dict = super().__getstate__()
del new_dict['_network']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
super().__setstate__(state)
self._initialize()
| 2,876 | 26.141509 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/sampler/__init__.py | """Fixtures for testing samplers."""
from tests.fixtures.sampler.ray_fixtures import (ray_local_session_fixture,
ray_session_fixture)
__all__ = ['ray_local_session_fixture', 'ray_session_fixture']
| 248 | 34.571429 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/sampler/ray_fixtures.py | """Pytest fixtures for intializing ray during ray related tests."""
import pytest
import ray
@pytest.fixture(scope='function')
def ray_local_session_fixture():
"""Initializes Ray and shuts down Ray in local mode.
Yields:
None: Yield is for purposes of pytest module style.
All statements before the yield are apart of module setup, and all
statements after the yield are apart of module teardown.
"""
if not ray.is_initialized():
ray.init(local_mode=True,
ignore_reinit_error=True,
log_to_driver=False,
include_webui=False)
yield
if ray.is_initialized():
ray.shutdown()
@pytest.fixture(scope='function')
def ray_session_fixture():
"""Initializes Ray and shuts down Ray.
Yields:
None: Yield is for purposes of pytest module style.
All statements before the yield are apart of module setup, and all
statements after the yield are apart of module teardown.
"""
if not ray.is_initialized():
ray.init(memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_webui=False)
yield
if ray.is_initialized():
ray.shutdown()
| 1,328 | 28.533333 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/tf/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/fixtures/tf/algos/dummy_off_policy_algo.py | """A dummy off-policy algorithm."""
from garage.np.algos import RLAlgorithm
class DummyOffPolicyAlgo(RLAlgorithm):
"""A dummy off-policy algorithm."""
def init_opt(self):
"""Initialize the optimization procedure."""
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
"""
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
"""
def optimize_policy(self, samples_data):
"""Optimize the policy using the samples.
Args:
samples_data (dict): Processed sample data.
See garage.tf.paths_to_tensors() for details.
"""
| 1,058 | 26.868421 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/test_dtypes.py | import akro
import gym.spaces
import numpy as np
import pytest
from garage import TimeStep
from garage import TimeStepBatch
from garage import TrajectoryBatch
from garage.envs import EnvSpec
@pytest.fixture
def traj_data():
# spaces
obs_space = gym.spaces.Box(low=1,
high=np.inf,
shape=(4, 3, 2),
dtype=np.float32)
act_space = gym.spaces.MultiDiscrete([2, 5])
env_spec = EnvSpec(obs_space, act_space)
# generate data
lens = np.array([10, 20, 7, 25, 25, 40, 10, 5])
n_t = lens.sum()
obs = np.stack([obs_space.low] * n_t)
last_obs = np.stack([obs_space.low] * len(lens))
act = np.stack([[1, 3]] * n_t)
rew = np.arange(n_t)
terms = np.zeros(n_t, dtype=np.bool)
terms[np.cumsum(lens) - 1] = True # set terminal bits
# env_infos
env_infos = dict()
env_infos['goal'] = np.stack([[1, 1]] * n_t)
env_infos['foo'] = np.arange(n_t)
# agent_infos
agent_infos = dict()
agent_infos['prev_action'] = act
agent_infos['hidden'] = np.arange(n_t)
return {
'env_spec': env_spec,
'observations': obs,
'last_observations': last_obs,
'actions': act,
'rewards': rew,
'terminals': terms,
'env_infos': env_infos,
'agent_infos': agent_infos,
'lengths': lens,
}
def test_new_traj(traj_data):
t = TrajectoryBatch(**traj_data)
assert t.env_spec is traj_data['env_spec']
assert t.observations is traj_data['observations']
assert t.last_observations is traj_data['last_observations']
assert t.actions is traj_data['actions']
assert t.rewards is traj_data['rewards']
assert t.terminals is traj_data['terminals']
assert t.env_infos is traj_data['env_infos']
assert t.agent_infos is traj_data['agent_infos']
assert t.lengths is traj_data['lengths']
def test_lengths_shape_mismatch_traj(traj_data):
with pytest.raises(ValueError,
match='Lengths tensor must be a tensor of shape'):
traj_data['lengths'] = traj_data['lengths'].reshape((4, -1))
t = TrajectoryBatch(**traj_data)
del t
def test_lengths_dtype_mismatch_traj(traj_data):
with pytest.raises(ValueError,
match='Lengths tensor must have an integer dtype'):
traj_data['lengths'] = traj_data['lengths'].astype(np.float32)
t = TrajectoryBatch(**traj_data)
del t
def test_obs_env_spec_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='observations must conform'):
traj_data['observations'] = traj_data['observations'][:, :, :, :1]
t = TrajectoryBatch(**traj_data)
del t
def test_obs_batch_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='batch dimension of observations'):
traj_data['observations'] = traj_data['observations'][:-1]
t = TrajectoryBatch(**traj_data)
del t
def test_last_obs_env_spec_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='last_observations must conform'):
traj_data['last_observations'] = \
traj_data['last_observations'][:, :, :, :1]
t = TrajectoryBatch(**traj_data)
del t
def test_last_obs_batch_mismatch_traj(traj_data):
with pytest.raises(ValueError,
match='batch dimension of last_observations'):
traj_data['last_observations'] = traj_data['last_observations'][:-1]
t = TrajectoryBatch(**traj_data)
del t
def test_act_env_spec_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='actions must conform'):
traj_data['actions'] = traj_data['actions'][:, 0]
t = TrajectoryBatch(**traj_data)
del t
def test_act_box_env_spec_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='actions should have'):
traj_data['env_spec'].action_space = akro.Box(low=1,
high=np.inf,
shape=(4, 3, 2),
dtype=np.float32)
t = TrajectoryBatch(**traj_data)
del t
def test_act_batch_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='batch dimension of actions'):
traj_data['actions'] = traj_data['actions'][:-1]
t = TrajectoryBatch(**traj_data)
del t
def test_rewards_shape_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='Rewards tensor'):
traj_data['rewards'] = traj_data['rewards'].reshape((2, -1))
t = TrajectoryBatch(**traj_data)
del t
def test_terminals_shape_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='terminals tensor must have shape'):
traj_data['terminals'] = traj_data['terminals'].reshape((2, -1))
t = TrajectoryBatch(**traj_data)
del t
def test_terminals_dtype_mismatch_traj(traj_data):
with pytest.raises(ValueError, match='terminals tensor must be dtype'):
traj_data['terminals'] = traj_data['terminals'].astype(np.float32)
t = TrajectoryBatch(**traj_data)
del t
def test_env_infos_not_ndarray_traj(traj_data):
with pytest.raises(ValueError,
match='entry in env_infos must be a numpy array'):
traj_data['env_infos']['bar'] = []
t = TrajectoryBatch(**traj_data)
del t
def test_env_infos_batch_mismatch_traj(traj_data):
with pytest.raises(ValueError,
match='entry in env_infos must have a batch dimension'):
traj_data['env_infos']['goal'] = traj_data['env_infos']['goal'][:-1]
t = TrajectoryBatch(**traj_data)
del t
def test_agent_infos_not_ndarray_traj(traj_data):
with pytest.raises(ValueError,
match='entry in agent_infos must be a numpy array'):
traj_data['agent_infos']['bar'] = list()
t = TrajectoryBatch(**traj_data)
del t
def test_agent_infos_batch_mismatch_traj(traj_data):
with pytest.raises(
ValueError,
match='entry in agent_infos must have a batch dimension'):
traj_data['agent_infos']['hidden'] = traj_data['agent_infos'][
'hidden'][:-1]
t = TrajectoryBatch(**traj_data)
del t
def test_to_trajectory_list(traj_data):
t = TrajectoryBatch(**traj_data)
t_list = t.to_trajectory_list()
assert len(t_list) == len(traj_data['lengths'])
start = 0
for length, last_obs, s in zip(traj_data['lengths'],
traj_data['last_observations'], t_list):
stop = start + length
assert (
s['observations'] == traj_data['observations'][start:stop]).all()
assert (s['next_observations'] == np.concatenate(
(traj_data['observations'][start + 1:stop], [last_obs]))).all()
assert (s['actions'] == traj_data['actions'][start:stop]).all()
assert (s['rewards'] == traj_data['rewards'][start:stop]).all()
assert (s['dones'] == traj_data['terminals'][start:stop]).all()
start = stop
assert start == len(traj_data['rewards'])
@pytest.fixture
def sample_data():
# spaces
obs_space = gym.spaces.Box(low=1,
high=10,
shape=(4, 3, 2),
dtype=np.float32)
act_space = gym.spaces.MultiDiscrete([2, 5])
env_spec = EnvSpec(obs_space, act_space)
# generate data
obs = obs_space.sample()
next_obs = obs_space.sample()
act = act_space.sample()
rew = 10.0
terms = False
# env_infos
env_infos = dict()
env_infos['goal'] = np.array([[1, 1]])
env_infos['TimeLimit.truncated'] = not terms
# agent_infos
agent_infos = dict()
agent_infos['prev_action'] = act
return {
'env_spec': env_spec,
'observation': obs,
'next_observation': next_obs,
'action': act,
'reward': rew,
'terminal': terms,
'env_info': env_infos,
'agent_info': agent_infos,
}
def test_new_time_step(sample_data):
s = TimeStep(**sample_data)
assert s.env_spec is sample_data['env_spec']
assert s.observation is sample_data['observation']
assert s.action is sample_data['action']
assert s.reward is sample_data['reward']
assert s.terminal is sample_data['terminal']
assert s.env_info is sample_data['env_info']
assert s.agent_info is sample_data['agent_info']
del s
obs_space = akro.Box(low=-1, high=10, shape=(4, 3, 2), dtype=np.float32)
act_space = akro.Box(low=-1, high=10, shape=(4, 2), dtype=np.float32)
env_spec = EnvSpec(obs_space, act_space)
sample_data['env_spec'] = env_spec
obs_space = akro.Box(low=-1000,
high=1000,
shape=(4, 3, 2),
dtype=np.float32)
act_space = akro.Box(low=-1000, high=1000, shape=(4, 2), dtype=np.float32)
sample_data['observation'] = obs_space.sample()
sample_data['next_observation'] = obs_space.sample()
sample_data['action'] = act_space.sample()
s = TimeStep(**sample_data)
assert s.observation is sample_data['observation']
assert s.next_observation is sample_data['next_observation']
assert s.action is sample_data['action']
def test_obs_env_spec_mismatch_time_step(sample_data):
with pytest.raises(ValueError,
match='observation must conform to observation_space'):
sample_data['observation'] = sample_data['observation'][:, :, :1]
s = TimeStep(**sample_data)
del s
obs_space = akro.Box(low=1, high=10, shape=(4, 5, 2), dtype=np.float32)
act_space = gym.spaces.MultiDiscrete([2, 5])
env_spec = EnvSpec(obs_space, act_space)
sample_data['env_spec'] = env_spec
with pytest.raises(
ValueError,
match='observation should have the same dimensionality'):
sample_data['observation'] = sample_data['observation'][:, :, :1]
s = TimeStep(**sample_data)
del s
def test_next_obs_env_spec_mismatch_time_step(sample_data):
with pytest.raises(
ValueError,
match='next_observation must conform to observation_space'):
sample_data['next_observation'] = sample_data[
'next_observation'][:, :, :1]
s = TimeStep(**sample_data)
del s
obs_space = akro.Box(low=1, high=10, shape=(4, 3, 2), dtype=np.float32)
act_space = gym.spaces.MultiDiscrete([2, 5])
env_spec = EnvSpec(obs_space, act_space)
sample_data['env_spec'] = env_spec
with pytest.raises(
ValueError,
match='next_observation should have the same dimensionality'):
sample_data['next_observation'] = sample_data[
'next_observation'][:, :, :1]
s = TimeStep(**sample_data)
del s
def test_act_env_spec_mismatch_time_step(sample_data):
with pytest.raises(ValueError,
match='action must conform to action_space'):
sample_data['action'] = sample_data['action'][:-1]
s = TimeStep(**sample_data)
del s
obs_space = akro.Box(low=1, high=10, shape=(4, 3, 2), dtype=np.float32)
act_space = akro.Discrete(5)
env_spec = EnvSpec(obs_space, act_space)
sample_data['env_spec'] = env_spec
with pytest.raises(ValueError,
match='action should have the same dimensionality'):
sample_data['action'] = sample_data['action'][:-1]
s = TimeStep(**sample_data)
del s
def test_reward_dtype_mismatch_time_step(sample_data):
with pytest.raises(ValueError, match='reward must be type'):
sample_data['reward'] = []
s = TimeStep(**sample_data)
del s
def test_terminal_dtype_mismatch_time_step(sample_data):
with pytest.raises(ValueError, match='terminal must be dtype bool'):
sample_data['terminal'] = []
s = TimeStep(**sample_data)
del s
def test_agent_info_dtype_mismatch_time_step(sample_data):
with pytest.raises(ValueError, match='agent_info must be type'):
sample_data['agent_info'] = []
s = TimeStep(**sample_data)
del s
def test_env_info_dtype_mismatch_time_step(sample_data):
with pytest.raises(ValueError, match='env_info must be type'):
sample_data['env_info'] = []
s = TimeStep(**sample_data)
del s
@pytest.fixture
def batch_data():
# spaces
obs_space = gym.spaces.Box(low=1,
high=np.inf,
shape=(4, 3, 2),
dtype=np.float32)
act_space = gym.spaces.MultiDiscrete([2, 5])
env_spec = EnvSpec(obs_space, act_space)
# generate data
batch_size = 2
obs = np.stack([obs_space.low] * batch_size)
next_obs = np.stack([obs_space.low] * batch_size)
act = np.stack([[1, 3]] * batch_size)
rew = np.arange(batch_size)
terms = np.zeros(batch_size, dtype=np.bool)
terms[np.cumsum(batch_size) - 1] = True # set terminal bits
# env_infos
env_infos = dict()
env_infos['goal'] = np.stack([[1, 1]] * batch_size)
env_infos['foo'] = np.arange(batch_size)
# agent_infos
agent_infos = dict()
agent_infos['prev_action'] = act
agent_infos['hidden'] = np.arange(batch_size)
return {
'env_spec': env_spec,
'observations': obs,
'next_observations': next_obs,
'actions': act,
'rewards': rew,
'terminals': terms,
'env_infos': env_infos,
'agent_infos': agent_infos,
}
def test_new_ts_batch(batch_data):
s = TimeStepBatch(**batch_data)
assert s.env_spec is batch_data['env_spec']
assert s.observations is batch_data['observations']
assert s.next_observations is batch_data['next_observations']
assert s.actions is batch_data['actions']
assert s.rewards is batch_data['rewards']
assert s.terminals is batch_data['terminals']
assert s.env_infos is batch_data['env_infos']
assert s.agent_infos is batch_data['agent_infos']
def test_observations_env_spec_mismatch_batch(batch_data):
with pytest.raises(ValueError, match='observations must conform'):
batch_data['observations'] = batch_data['observations'][:, :, :, :1]
s = TimeStepBatch(**batch_data)
del s
obs_space = akro.Box(low=1, high=10, shape=(4, 5, 2), dtype=np.float32)
act_space = gym.spaces.MultiDiscrete([2, 5])
env_spec = EnvSpec(obs_space, act_space)
batch_data['env_spec'] = env_spec
with pytest.raises(
ValueError,
match='observations should have the same dimensionality'):
batch_data['observations'] = batch_data['observations'][:, :, :, :1]
s = TimeStepBatch(**batch_data)
del s
def test_observations_batch_mismatch_batch(batch_data):
with pytest.raises(ValueError, match='batch dimension of observations'):
batch_data['observations'] = batch_data['observations'][:-1]
s = TimeStepBatch(**batch_data)
del s
def test_next_observations_env_spec_mismatch_batch(batch_data):
with pytest.raises(ValueError, match='next_observations must conform'):
batch_data['next_observations'] = batch_data[
'next_observations'][:, :, :, :1]
s = TimeStepBatch(**batch_data)
del s
obs_space = akro.Box(low=1, high=10, shape=(4, 3, 2), dtype=np.float32)
act_space = gym.spaces.MultiDiscrete([2, 5])
env_spec = EnvSpec(obs_space, act_space)
batch_data['env_spec'] = env_spec
with pytest.raises(
ValueError,
match='next_observations should have the same dimensionality'):
batch_data['next_observations'] = batch_data[
'next_observations'][:, :, :, :1]
s = TimeStepBatch(**batch_data)
del s
def test_next_observations_batch_mismatch_batch(batch_data):
with pytest.raises(ValueError,
match='batch dimension of '
'next_observations'):
batch_data['next_observations'] = batch_data['next_observations'][:-1]
s = TimeStepBatch(**batch_data)
del s
def test_actions_batch_mismatch_batch(batch_data):
with pytest.raises(ValueError, match='batch dimension of actions'):
batch_data['actions'] = batch_data['actions'][:-1]
s = TimeStepBatch(**batch_data)
del s
def test_rewards_batch_mismatch_batch(batch_data):
with pytest.raises(ValueError, match='batch dimension of rewards'):
batch_data['rewards'] = batch_data['rewards'][:-1]
s = TimeStepBatch(**batch_data)
del s
def test_act_env_spec_mismatch_batch(batch_data):
with pytest.raises(ValueError, match='actions must conform'):
batch_data['actions'] = batch_data['actions'][:, 0]
s = TimeStepBatch(**batch_data)
del s
def test_act_box_env_spec_mismatch_batch(batch_data):
with pytest.raises(ValueError, match='actions should have'):
batch_data['env_spec'].action_space = akro.Box(low=1,
high=np.inf,
shape=(4, 3, 2),
dtype=np.float32)
s = TimeStepBatch(**batch_data)
del s
def test_empty_terminals__batch(batch_data):
with pytest.raises(ValueError, match='batch dimension of terminals'):
batch_data['terminals'] = []
s = TimeStepBatch(**batch_data)
del s
def test_terminals_dtype_mismatch_batch(batch_data):
with pytest.raises(ValueError, match='terminals tensor must be dtype'):
batch_data['terminals'] = batch_data['terminals'].astype(np.float32)
s = TimeStepBatch(**batch_data)
del s
def test_env_infos_not_ndarray_batch(batch_data):
with pytest.raises(ValueError,
match='entry in env_infos must be a numpy array'):
batch_data['env_infos']['bar'] = []
s = TimeStepBatch(**batch_data)
del s
def test_env_infos_batch_mismatch_batch(batch_data):
with pytest.raises(ValueError,
match='entry in env_infos must have a batch dimension'):
batch_data['env_infos']['goal'] = batch_data['env_infos']['goal'][:-1]
s = TimeStepBatch(**batch_data)
del s
def test_agent_infos_not_ndarray_batch(batch_data):
with pytest.raises(ValueError,
match='entry in agent_infos must be a numpy array'):
batch_data['agent_infos']['bar'] = list()
s = TimeStepBatch(**batch_data)
del s
def test_agent_infos_batch_mismatch_batch(batch_data):
with pytest.raises(
ValueError,
match='entry in agent_infos must have a batch dimension'):
batch_data['agent_infos']['hidden'] = batch_data['agent_infos'][
'hidden'][:-1]
s = TimeStepBatch(**batch_data)
del s
def test_concatenate_batch(batch_data):
single_batch = TimeStepBatch(**batch_data)
batches = [single_batch, single_batch]
s = TimeStepBatch.concatenate(*batches)
new_obs = np.concatenate(
[batch_data['observations'], batch_data['observations']])
new_next_obs = np.concatenate(
[batch_data['next_observations'], batch_data['next_observations']])
new_actions = np.concatenate(
[batch_data['actions'], batch_data['actions']])
new_rewards = np.concatenate(
[batch_data['rewards'], batch_data['rewards']])
new_terminals = np.concatenate(
[batch_data['terminals'], batch_data['terminals']])
new_env_infos = {
k: np.concatenate([b.env_infos[k] for b in batches])
for k in batches[0].env_infos.keys()
}
new_agent_infos = {
k: np.concatenate([b.agent_infos[k] for b in batches])
for k in batches[0].agent_infos.keys()
}
assert s.env_spec == batch_data['env_spec']
assert np.array_equal(s.observations, new_obs)
assert np.array_equal(s.next_observations, new_next_obs)
assert np.array_equal(s.actions, new_actions)
assert np.array_equal(s.rewards, new_rewards)
assert np.array_equal(s.terminals, new_terminals)
for key in new_env_infos:
assert key in s.env_infos
assert np.array_equal(new_env_infos[key], s.env_infos[key])
for key in new_agent_infos:
assert key in s.agent_infos
assert np.array_equal(new_agent_infos[key], s.agent_infos[key])
def test_concatenate_empty_batch():
with pytest.raises(ValueError, match='at least one'):
batches = []
s = TimeStepBatch.concatenate(*batches)
del s
def test_split_batch(batch_data):
s = TimeStepBatch(
env_spec=batch_data['env_spec'],
observations=batch_data['observations'],
actions=batch_data['actions'],
rewards=batch_data['rewards'],
next_observations=batch_data['next_observations'],
terminals=batch_data['terminals'],
env_infos=batch_data['env_infos'],
agent_infos=batch_data['agent_infos'],
)
batches = s.split()
assert len(batches) == 2 # original batch_data is a batch of 2
for i, batch in enumerate(batches):
assert batch.env_spec == batch_data['env_spec']
assert np.array_equal(batch.observations,
[batch_data['observations'][i]])
assert np.array_equal(batch.next_observations,
[batch_data['next_observations'][i]])
assert np.array_equal(batch.actions, [batch_data['actions'][i]])
assert np.array_equal(batch.rewards, [batch_data['rewards'][i]])
assert np.array_equal(batch.terminals, [batch_data['terminals'][i]])
for key in batch.env_infos:
assert key in batch_data['env_infos']
assert np.array_equal(batch.env_infos[key],
[batch_data['env_infos'][key][i]])
for key in batch.agent_infos:
assert key in batch_data['agent_infos']
assert (np.array_equal(batch.agent_infos[key],
[batch_data['agent_infos'][key][i]]))
def test_to_time_step_list_batch(batch_data):
s = TimeStepBatch(
env_spec=batch_data['env_spec'],
observations=batch_data['observations'],
actions=batch_data['actions'],
rewards=batch_data['rewards'],
next_observations=batch_data['next_observations'],
terminals=batch_data['terminals'],
env_infos=batch_data['env_infos'],
agent_infos=batch_data['agent_infos'],
)
batches = s.to_time_step_list()
assert len(batches) == 2 # original batch_data is a batch of 2
for i, batch in enumerate(batches):
assert np.array_equal(batch['observations'],
[batch_data['observations'][i]])
assert np.array_equal(batch['next_observations'],
[batch_data['next_observations'][i]])
assert np.array_equal(batch['actions'], [batch_data['actions'][i]])
assert np.array_equal(batch['rewards'], [batch_data['rewards'][i]])
assert np.array_equal(batch['terminals'], [batch_data['terminals'][i]])
for key in batch['env_infos']:
assert key in batch_data['env_infos']
assert np.array_equal(batch['env_infos'][key],
[batch_data['env_infos'][key][i]])
for key in batch['agent_infos']:
assert key in batch_data['agent_infos']
assert np.array_equal(batch['agent_infos'][key],
[batch_data['agent_infos'][key][i]])
def test_from_empty_time_step_list_batch(batch_data):
with pytest.raises(ValueError, match='at least one dict'):
batches = []
s = TimeStepBatch.from_time_step_list(batch_data['env_spec'], batches)
del s
def test_from_time_step_list_batch(batch_data):
batches = [batch_data, batch_data]
s = TimeStepBatch.from_time_step_list(batch_data['env_spec'], batches)
new_obs = np.concatenate(
[batch_data['observations'], batch_data['observations']])
new_next_obs = np.concatenate(
[batch_data['next_observations'], batch_data['next_observations']])
new_actions = np.concatenate(
[batch_data['actions'], batch_data['actions']])
new_rewards = np.concatenate(
[batch_data['rewards'], batch_data['rewards']])
new_terminals = np.concatenate(
[batch_data['terminals'], batch_data['terminals']])
new_env_infos = {
k: np.concatenate([b['env_infos'][k] for b in batches])
for k in batches[0]['env_infos'].keys()
}
new_agent_infos = {
k: np.concatenate([b['agent_infos'][k] for b in batches])
for k in batches[0]['agent_infos'].keys()
}
assert s.env_spec == batch_data['env_spec']
assert np.array_equal(s.observations, new_obs)
assert np.array_equal(s.next_observations, new_next_obs)
assert np.array_equal(s.actions, new_actions)
assert np.array_equal(s.rewards, new_rewards)
assert np.array_equal(s.terminals, new_terminals)
for key in new_env_infos:
assert key in s.env_infos
assert np.array_equal(new_env_infos[key], s.env_infos[key])
for key in new_agent_infos:
assert key in s.agent_infos
assert np.array_equal(new_agent_infos[key], s.agent_infos[key])
| 25,538 | 34.970423 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/test_functions.py | """Test root level functions in garage."""
import csv
import math
import tempfile
import akro
import dowel
from dowel import logger, tabular
import numpy as np
import pytest
import tensorflow as tf
import torch
from garage import _Default, make_optimizer
from garage import log_multitask_performance, log_performance, TrajectoryBatch
from garage.envs import EnvSpec
from tests.fixtures import TfGraphTestCase
@pytest.mark.serial
def test_log_performance():
lengths = np.array([10, 5, 1, 1])
batch = TrajectoryBatch(
EnvSpec(akro.Box(np.array([0., 0., 0.]), np.array([1., 1., 1.])),
akro.Box(np.array([-1., -1.]), np.array([0., 0.]))),
observations=np.ones((sum(lengths), 3), dtype=np.float32),
last_observations=np.ones((len(lengths), 3), dtype=np.float32),
actions=np.zeros((sum(lengths), 2), dtype=np.float32),
rewards=np.array([
0.34026529, 0.58263177, 0.84307509, 0.97651095, 0.81723901,
0.22631398, 0.03421301, 0.97515046, 0.64311832, 0.65068933,
0.17657714, 0.04783857, 0.73904013, 0.41364329, 0.52235551,
0.24203526, 0.43328910
]),
terminals=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
dtype=bool),
env_infos={
'success':
np.array([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
dtype=bool)
},
agent_infos={},
lengths=lengths)
log_file = tempfile.NamedTemporaryFile()
csv_output = dowel.CsvOutput(log_file.name)
logger.add_output(csv_output)
log_performance(7, batch, 0.8, prefix='test_log_performance')
logger.log(tabular)
logger.dump_output_type(dowel.CsvOutput)
with open(log_file.name, 'r') as file:
rows = list(csv.DictReader(file))
res = {k: float(r) for (k, r) in rows[0].items()}
assert res['test_log_performance/Iteration'] == 7
assert res['test_log_performance/NumTrajs'] == 4
assert math.isclose(res['test_log_performance/SuccessRate'], 0.75)
assert math.isclose(res['test_log_performance/CompletionRate'], 0.5)
assert math.isclose(res['test_log_performance/AverageDiscountedReturn'],
1.1131040640673113)
assert math.isclose(res['test_log_performance/AverageReturn'],
2.1659965525)
assert math.isclose(res['test_log_performance/StdReturn'],
2.354067152038576)
@pytest.mark.serial
def test_log_multitask_performance_task_name():
lengths = np.array([10, 5, 1, 1])
batch = TrajectoryBatch(
EnvSpec(akro.Box(np.array([0., 0., 0.]), np.array([1., 1., 1.])),
akro.Box(np.array([-1., -1.]), np.array([0., 0.]))),
observations=np.ones((sum(lengths), 3), dtype=np.float32),
last_observations=np.ones((len(lengths), 3), dtype=np.float32),
actions=np.zeros((sum(lengths), 2), dtype=np.float32),
rewards=np.array([
0.34026529, 0.58263177, 0.84307509, 0.97651095, 0.81723901,
0.22631398, 0.03421301, 0.97515046, 0.64311832, 0.65068933,
0.17657714, 0.04783857, 0.73904013, 0.41364329, 0.52235551,
0.24203526, 0.43328910
]),
terminals=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
dtype=bool),
env_infos={
'success':
np.array([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
dtype=bool),
'task_name':
np.array(['env1'] * 10 + ['env2'] * 5 + ['env1'] + ['env3'])
},
agent_infos={},
lengths=lengths)
log_file = tempfile.NamedTemporaryFile()
csv_output = dowel.CsvOutput(log_file.name)
logger.add_output(csv_output)
log_multitask_performance(7, batch, 0.8)
logger.log(tabular)
logger.dump_output_type(dowel.CsvOutput)
with open(log_file.name, 'r') as file:
rows = list(csv.DictReader(file))
res = {k: float(r) for (k, r) in rows[0].items()}
assert res['env1/Iteration'] == 7
assert res['env2/Iteration'] == 7
assert res['env3/Iteration'] == 7
assert res['env1/NumTrajs'] == 2
assert res['env2/NumTrajs'] == 1
assert res['env3/NumTrajs'] == 1
assert math.isclose(res['env1/SuccessRate'], 0.5)
assert math.isclose(res['env2/SuccessRate'], 1.0)
assert math.isclose(res['env3/SuccessRate'], 1.0)
@pytest.mark.serial
def test_log_multitask_performance_task_id():
lengths = np.array([10, 5, 1, 1])
batch = TrajectoryBatch(
EnvSpec(akro.Box(np.array([0., 0., 0.]), np.array([1., 1., 1.])),
akro.Box(np.array([-1., -1.]), np.array([0., 0.]))),
observations=np.ones((sum(lengths), 3), dtype=np.float32),
last_observations=np.ones((len(lengths), 3), dtype=np.float32),
actions=np.zeros((sum(lengths), 2), dtype=np.float32),
rewards=np.array([
0.34026529, 0.58263177, 0.84307509, 0.97651095, 0.81723901,
0.22631398, 0.03421301, 0.97515046, 0.64311832, 0.65068933,
0.17657714, 0.04783857, 0.73904013, 0.41364329, 0.52235551,
0.24203526, 0.43328910
]),
terminals=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
dtype=bool),
env_infos={
'success':
np.array([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
dtype=bool),
'task_id':
np.array([1] * 10 + [3] * 5 + [1] + [4])
},
agent_infos={},
lengths=lengths)
log_file = tempfile.NamedTemporaryFile()
csv_output = dowel.CsvOutput(log_file.name)
logger.add_output(csv_output)
log_multitask_performance(7, batch, 0.8, {
1: 'env1',
3: 'env2',
4: 'env3',
5: 'env4'
})
logger.log(tabular)
logger.dump_output_type(dowel.CsvOutput)
with open(log_file.name, 'r') as file:
rows = list(csv.DictReader(file))
res = {k: float(r) for (k, r) in rows[0].items()}
assert res['env1/Iteration'] == 7
assert res['env2/Iteration'] == 7
assert res['env3/Iteration'] == 7
assert res['env4/Iteration'] == 7
assert res['env1/NumTrajs'] == 2
assert res['env2/NumTrajs'] == 1
assert res['env3/NumTrajs'] == 1
assert res['env4/NumTrajs'] == 0
assert math.isclose(res['env1/SuccessRate'], 0.5)
assert math.isclose(res['env2/SuccessRate'], 1.0)
assert math.isclose(res['env3/SuccessRate'], 1.0)
assert math.isnan(res['env4/SuccessRate'])
assert math.isnan(res['env4/AverageReturn'])
class TestOptimizerInterface(TfGraphTestCase):
"""Test class for tf & pytorch make_optimizer functions."""
def test_tf_make_optimizer_with_type(self):
"""Test make_optimizer function with type as first argument."""
optimizer_type = tf.compat.v1.train.AdamOptimizer
lr = 0.123
optimizer = make_optimizer(optimizer_type,
learning_rate=lr,
name='testOptimizer')
assert isinstance(optimizer, optimizer_type)
self.sess.run(tf.compat.v1.global_variables_initializer())
assert optimizer._name == 'testOptimizer'
assert np.allclose(
optimizer._lr, lr
) # Adam holds the value of learning rate in private variable self._lr
def test_tf_make_optimizer_with_tuple(self):
"""Test make_optimizer function with tuple as first argument."""
lr = 0.123
optimizer_type = (tf.compat.v1.train.AdamOptimizer, {
'learning_rate': lr
})
optimizer = make_optimizer(optimizer_type)
# pylint: disable=isinstance-second-argument-not-valid-type
assert isinstance(optimizer, optimizer_type)
self.sess.run(tf.compat.v1.global_variables_initializer())
assert np.allclose(
optimizer._lr, lr
) # Adam holds the value of learning rate in private variable self._lr
def test_tf_make_optimizer_raise_value_error(self):
"""Test make_optimizer raises value error."""
lr = 0.123
optimizer_type = (tf.compat.v1.train.AdamOptimizer, {
'learning_rate': lr
})
with pytest.raises(ValueError):
_ = make_optimizer(optimizer_type, learning_rate=lr)
def test_torch_make_optimizer_with_type(self):
"""Test make_optimizer function with type as first argument."""
optimizer_type = torch.optim.Adam
module = torch.nn.Linear(2, 1)
lr = 0.123
optimizer = make_optimizer(optimizer_type, module=module, lr=lr)
assert isinstance(optimizer, optimizer_type)
assert optimizer.defaults['lr'] == lr
def test_torch_make_optimizer_with_tuple(self):
"""Test make_optimizer function with tuple as first argument."""
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
optimizer = make_optimizer(optimizer_type, module=module)
# pylint: disable=isinstance-second-argument-not-valid-type
assert isinstance(optimizer, optimizer_type)
assert optimizer.defaults['lr'] == optimizer_type[1]['lr']
def test_torch_make_optimizer_raise_value_error(self):
"""Test make_optimizer raises value error."""
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
with pytest.raises(ValueError):
_ = make_optimizer(optimizer_type, module=module, lr=0.123)
| 9,585 | 40.141631 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_env_spec.py | import pickle
import akro
from garage.envs.env_spec import EnvSpec
class TestEnvSpec:
def test_pickleable(self):
env_spec = EnvSpec(akro.Box(-1, 1, (1, )), akro.Box(-2, 2, (2, )))
round_trip = pickle.loads(pickle.dumps(env_spec))
assert round_trip
assert round_trip.action_space == env_spec.action_space
assert round_trip.observation_space == env_spec.observation_space
| 418 | 26.933333 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_garage_env.py | from gym.wrappers import TimeLimit
import numpy as np
import pytest
from garage.envs import EnvSpec, GarageEnv
from garage.envs.grid_world_env import GridWorldEnv
from garage.np.policies import ScriptedPolicy
class TestGarageEnv:
def test_wraps_env_spec(self):
garage_env = GarageEnv(env_name='Pendulum-v0')
assert isinstance(garage_env.spec, EnvSpec)
def test_closes_box2d(self):
garage_env = GarageEnv(env_name='CarRacing-v0')
garage_env.render()
assert garage_env.env.viewer is not None
garage_env.close()
assert garage_env.env.viewer is None
@pytest.mark.mujoco
def test_closes_mujoco(self):
garage_env = GarageEnv(env_name='Ant-v2')
garage_env.render()
assert garage_env.env.viewer is not None
garage_env.close()
assert garage_env.env.viewer is None
def test_time_limit_env(self):
garage_env = GarageEnv(env_name='Pendulum-v0')
garage_env.reset()
for _ in range(200):
_, _, done, info = garage_env.step(
garage_env.spec.action_space.sample())
assert not done and info['TimeLimit.truncated']
assert info['GarageEnv.TimeLimitTerminated']
def test_garage_env_idempotent():
# test if garage env can wrap itself
env_no_wrap = GridWorldEnv(desc='4x4')
env_single_wrap = GarageEnv(GridWorldEnv(desc='4x4'))
env_double_wrap = GarageEnv(GarageEnv(GridWorldEnv(desc='4x4')))
policy = ScriptedPolicy(
scripted_actions=[2, 2, 1, 0, 3, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1])
obs_nw = env_no_wrap.reset()
obs_sw = env_single_wrap.reset()
obs_dw = env_double_wrap.reset()
for _ in range(16):
assert np.all(np.equal(obs_nw, obs_sw))
assert np.all(np.equal(obs_nw, obs_dw))
assert np.all(np.equal(obs_sw, obs_dw))
step_nw = env_no_wrap.step(policy.get_action(obs_nw)[0])
step_sw = env_single_wrap.step(policy.get_action(obs_sw)[0])
step_dw = env_double_wrap.step(policy.get_action(obs_dw)[0])
obs_nw = step_nw[0]
obs_sw = step_sw[0]
obs_dw = step_dw[0]
# test that single wrapped and double wrapped envs return the same
# values
assert np.all(np.equal(step_sw, step_dw))
def test_garage_env_idempotent_time_limit():
# test if garage env can wrap itself if environments
# are wrapped with timelimits
env_single_wrap = GarageEnv(
TimeLimit(GridWorldEnv(desc='4x4'), max_episode_steps=16))
env_double_wrap = GarageEnv(
GarageEnv(TimeLimit(GridWorldEnv(desc='4x4'), max_episode_steps=16)))
# purposefully greater than the max path length to expose
# time limit truncations
num_steps = 20
policy = ScriptedPolicy(
scripted_actions=[2, 2, 1, 0, 3, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1])
obs_sw = env_single_wrap.reset()
obs_dw = env_double_wrap.reset()
assert np.all(np.equal(obs_sw, obs_dw))
for _ in range(num_steps):
step_sw = env_single_wrap.step(policy.get_action(obs_sw)[0])
step_dw = env_double_wrap.step(policy.get_action(obs_dw)[0])
obs_sw = step_sw[0]
obs_dw = step_dw[0]
# test that single wrapped and double wrapped envs return the same
# values
assert np.all(np.equal(step_sw, step_dw))
| 3,337 | 35.681319 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_grid_world_env.py | import pickle
from garage.envs.grid_world_env import GridWorldEnv
from tests.helpers import step_env
class TestGridWorldEnv:
def test_pickleable(self):
env = GridWorldEnv(desc='8x8')
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
assert round_trip.start_state == env.start_state
step_env(round_trip)
round_trip.close()
env.close()
def test_does_not_modify_action(self):
env = GridWorldEnv(desc='8x8')
a = env.action_space.sample()
a_copy = a
env.reset()
env.step(a)
assert a == a_copy
env.close()
| 637 | 24.52 | 56 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_half_cheetah_meta_envs.py | import pickle
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
from garage.envs.mujoco.half_cheetah_dir_env import HalfCheetahDirEnv
from garage.envs.mujoco.half_cheetah_vel_env import HalfCheetahVelEnv
@pytest.mark.mujoco
@pytest.mark.parametrize('env_type', [HalfCheetahVelEnv, HalfCheetahDirEnv])
def test_can_sim(env_type):
env = env_type()
task = env.sample_tasks(1)[0]
env.set_task(task)
for _ in range(3):
env.step(env.action_space.sample())
@pytest.mark.mujoco
@pytest.mark.parametrize('env_type', [HalfCheetahVelEnv, HalfCheetahDirEnv])
def test_pickling_keeps_goal(env_type):
env = env_type()
task = env.sample_tasks(1)[0]
env.set_task(task)
env_clone = pickle.loads(pickle.dumps(env))
assert env._task == env_clone._task
| 1,169 | 29.789474 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_multi_env_wrapper.py | """Tests for garage.envs.multi_env_wrapper"""
import akro
import numpy as np
import pytest
from garage.envs import GarageEnv
from garage.envs.multi_env_wrapper import (MultiEnvWrapper,
round_robin_strategy,
uniform_random_strategy)
class TestMultiEnvWrapper:
"""Tests for garage.envs.multi_env_wrapper"""
def _init_multi_env_wrapper(self,
env_names,
sample_strategy=uniform_random_strategy):
"""helper function to initialize multi_env_wrapper
Args:
env_names (list(str)): List of gym.Env names.
sample_strategy (func): A sampling strategy.
Returns:
garage.envs.multi_env_wrapper: Multi env wrapper.
"""
task_envs = [GarageEnv(env_name=name) for name in env_names]
return MultiEnvWrapper(task_envs, sample_strategy=sample_strategy)
def test_tasks_from_same_env(self):
"""test init with multiple tasks from same env"""
envs = ['CartPole-v0', 'CartPole-v0']
mt_env = self._init_multi_env_wrapper(envs)
assert mt_env.num_tasks == 2
def test_tasks_from_different_envs(self):
"""test init with multiple tasks from different env"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(envs)
assert mt_env.num_tasks == 2
def test_raise_exception_when_different_obs_space(self):
"""test if exception is raised when using tasks with different obs space""" # noqa: E501
envs = ['CartPole-v0', 'Blackjack-v0']
with pytest.raises(ValueError):
_ = self._init_multi_env_wrapper(envs)
def test_raise_exception_when_different_action_space(self):
"""test if exception is raised when using tasks with different action space""" # noqa: E501
envs = ['LunarLander-v2', 'LunarLanderContinuous-v2']
with pytest.raises(ValueError):
_ = self._init_multi_env_wrapper(envs)
def test_default_active_task_is_none(self):
"""test if default active task is none"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(
envs, sample_strategy=round_robin_strategy)
assert mt_env._active_task_index is None
def test_one_hot_observation_space(self):
"""test one hot representation of observation space"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(envs)
cartpole = GarageEnv(env_name='CartPole-v0')
cartpole_lb, cartpole_ub = cartpole.observation_space.bounds
obs_space = akro.Box(np.concatenate([cartpole_lb,
np.zeros(2)]),
np.concatenate([cartpole_ub,
np.ones(2)]))
assert mt_env.observation_space.shape == obs_space.shape
assert (
mt_env.observation_space.bounds[0] == obs_space.bounds[0]).all()
assert (
mt_env.observation_space.bounds[1] == obs_space.bounds[1]).all()
def test_action_space(self):
"""test action space"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(envs)
task1 = GarageEnv(env_name='CartPole-v0')
assert mt_env.action_space.shape == task1.action_space.shape
def test_round_robin_sample_strategy(self):
"""test round robin samping strategy"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(
envs, sample_strategy=round_robin_strategy)
tasks = []
for _ in envs:
mt_env.reset()
_, _, _, info = mt_env.step(1)
tasks.append(info['task_id'])
assert tasks[0] == 0 and tasks[1] == 1
def test_uniform_random_sample_strategy(self):
"""test uniform_random sampling strategy"""
envs = ['CartPole-v0', 'CartPole-v1', 'CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(
envs, sample_strategy=uniform_random_strategy)
tasks = []
for _ in envs:
mt_env.reset()
_, _, _, info = mt_env.step(1)
tasks.append(info['task_id'])
for task in tasks:
assert 0 <= task < 4
def test_task_remains_same_between_multiple_step_calls(self):
"""test if active_task remains same between multiple step calls"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(
envs, sample_strategy=round_robin_strategy)
mt_env.reset()
tasks = []
for _ in envs:
_, _, _, info = mt_env.step(1)
tasks.append(info['task_id'])
assert tasks[0] == 0 and tasks[1] == 0
def test_task_space(self):
"""test task space"""
envs = ['CartPole-v0', 'CartPole-v1']
mt_env = self._init_multi_env_wrapper(envs)
bounds = mt_env.task_space.bounds
lb = np.zeros(2)
ub = np.ones(2)
assert (bounds[0] == lb).all() and (bounds[1] == ub).all()
def test_one_hot_observation(self):
"""test if output of step function is correct"""
envs = ['CartPole-v0', 'CartPole-v0']
mt_env = self._init_multi_env_wrapper(
envs, sample_strategy=round_robin_strategy)
obs = mt_env.reset()
assert (obs[-2:] == np.array([1., 0.])).all()
obs = mt_env.step(1)[0]
assert (obs[-2:] == np.array([1., 0.])).all()
obs = mt_env.reset()
assert (obs[-2:] == np.array([0., 1.])).all()
obs = mt_env.step(1)[0]
assert (obs[-2:] == np.array([0., 1.])).all()
@pytest.mark.mujoco
class TestMetaWorldMultiEnvWrapper:
"""Tests for garage.envs.multi_env_wrapper using Metaworld Envs"""
def setup_class(self):
"""Init Wrapper with MT10."""
# pylint: disable=import-outside-toplevel
from metaworld.benchmarks import MT10
tasks = MT10.get_train_tasks().all_task_names
envs = []
for task in tasks:
envs.append(MT10.from_task(task))
self.task_names = tasks
self.env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla',
env_names=tasks)
self.env_no_onehot = MultiEnvWrapper(
envs, sample_strategy=round_robin_strategy, mode='del-onehot')
def teardown_class(self):
"""Close the MTMetaWorldWrapper."""
self.env.close()
self.env_no_onehot.close()
def test_num_tasks(self):
"""Assert num tasks returns 10, because MT10 is being tested."""
assert self.env.num_tasks == 10
assert self.env_no_onehot.num_tasks == 10
def test_observation_space(self):
assert self.env.observation_space.shape == (9 + self.env.num_tasks, )
assert self.env_no_onehot.observation_space.shape == (9, )
def test_step(self):
"""Test that env_infos includes extra infos and obs has onehot."""
self.env.reset()
self.env_no_onehot.reset()
action0 = self.env.spec.action_space.sample()
action1 = self.env_no_onehot.spec.action_space.sample()
obs0, _, _, info0 = self.env.step(action0)
obs1, _, _, info1 = self.env_no_onehot.step(action1)
assert info0['task_id'] == self.env.active_task_index
assert info1['task_id'] == self.env.active_task_index
assert (self.env._active_task_one_hot() == obs0[9:]).all()
assert obs0.shape[0] == obs1.shape[0] + self.env.num_tasks
def test_reset(self):
"""Test round robin switching of environments during call to reset."""
self.env.reset()
active_task_id = self.env.active_task_index
for _ in range(self.env.num_tasks):
self.env.reset()
action = self.env.spec.action_space.sample()
_, _, _, info = self.env.step(action)
assert not info['task_id'] == active_task_id
active_task_id = self.env.active_task_index
def test_env_names(self):
"""Test that env_names that are passed via the env_names param are added to env_infos.""" # noqa: E501
self.env.reset()
self.env._active_task_index = 0
for i in range(self.env.num_tasks):
action = self.env.spec.action_space.sample()
_, _, _, info = self.env.step(action)
assert info['task_name'] == self.task_names[i]
self.env.reset()
| 8,707 | 39.129032 | 111 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_normalized_env.py | import pickle
import numpy as np
from garage.envs import PointEnv
from garage.envs.normalized_env import NormalizedEnv
from tests.helpers import step_env
class TestNormalizedEnv:
def test_pickleable(self):
inner_env = PointEnv(goal=(1., 2.))
env = NormalizedEnv(inner_env, scale_reward=10.)
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
assert round_trip._scale_reward == env._scale_reward
assert np.array_equal(round_trip.env._goal, env.env._goal)
step_env(round_trip)
round_trip.close()
env.close()
def test_does_not_modify_action(self):
inner_env = PointEnv(goal=(1., 2.))
env = NormalizedEnv(inner_env, scale_reward=10.)
a = env.action_space.high + 1.
a_copy = a
env.reset()
env.step(a)
assert np.array_equal(a, a_copy)
env.close()
| 902 | 28.129032 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_normalized_gym.py | import gym
from garage.envs import GarageEnv, normalize
class TestNormalizedGym:
def setup_method(self):
self.env = GarageEnv(
normalize(gym.make('Pendulum-v0'),
normalize_reward=True,
normalize_obs=True,
flatten_obs=True))
def teardown_method(self):
self.env.close()
def test_does_not_modify_action(self):
a = self.env.action_space.sample()
a_copy = a
self.env.reset()
self.env.step(a)
assert a == a_copy
def test_flatten(self):
for _ in range(10):
self.env.reset()
for _ in range(5):
self.env.render()
action = self.env.action_space.sample()
next_obs, _, done, _ = self.env.step(action)
assert next_obs.shape == self.env.observation_space.low.shape
if done:
break
def test_unflatten(self):
for _ in range(10):
self.env.reset()
for _ in range(5):
action = self.env.action_space.sample()
next_obs, _, done, _ = self.env.step(action)
# yapf: disable
assert (self.env.observation_space.flatten(next_obs).shape
== self.env.observation_space.flat_dim)
# yapf: enable
if done:
break
| 1,444 | 29.104167 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_point_env.py | import pickle
import numpy as np
from garage.envs.point_env import PointEnv
from tests.helpers import step_env
class TestPointEnv:
def test_pickleable(self):
env = PointEnv()
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
step_env(round_trip)
env.close()
round_trip.close()
def test_does_not_modify_action(self):
env = PointEnv()
a = env.action_space.sample()
a_copy = a.copy()
env.reset()
env.step(a)
assert a.all() == a_copy.all()
env.close()
def test_observation_space(self):
env = PointEnv()
obs_space = env.observation_space
a = env.action_space.sample()
obs, _, _, _ = env.step(a)
assert obs_space.contains(obs)
def test_reset(self):
env = PointEnv()
assert (env._point == np.array([0, 0])).all()
a = env.action_space.sample()
_ = env.step(a)
env.reset()
assert (env._point == np.array([0, 0])).all()
def test_task(self):
env = PointEnv()
tasks = env.sample_tasks(5)
assert len(tasks) == 5
for task in tasks:
env.set_task(task)
assert (env._goal == task['goal']).all()
def test_done(self):
env = PointEnv()
for _ in range(1000):
_, _, done, _ = env.step(env._goal)
if done:
break
else:
assert False, 'Should report done'
| 1,505 | 22.904762 | 53 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_rl2_env.py | from garage.envs import PointEnv
from garage.tf.algos.rl2 import RL2Env
class TestRL2Env:
# pylint: disable=unsubscriptable-object
def test_observation_dimension(self):
env = PointEnv()
wrapped_env = RL2Env(PointEnv())
assert wrapped_env.spec.observation_space.shape[0] == (
env.observation_space.shape[0] + env.action_space.shape[0] + 2)
obs = env.reset()
obs2 = wrapped_env.reset()
assert obs.shape[0] + env.action_space.shape[0] + 2 == obs2.shape[0]
obs, _, _, _ = env.step(env.action_space.sample())
obs2, _, _, _ = wrapped_env.step(env.action_space.sample())
assert obs.shape[0] + env.action_space.shape[0] + 2 == obs2.shape[0]
| 728 | 37.368421 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/test_task_onehot_wrapper.py | """Tests for garage.envs.TaskOnehotWrapper"""
import numpy as np
from garage.envs import PointEnv, TaskOnehotWrapper
class TestSingleWrappedEnv:
def setup_method(self):
self.env = PointEnv()
self.base_len = len(self.env.reset())
self.n_total_tasks = 5
self.task_index = 1
self.wrapped = TaskOnehotWrapper(self.env, self.task_index,
self.n_total_tasks)
def test_produces_correct_onehots(self):
obs = self.wrapped.reset()
assert len(obs) == self.base_len + self.n_total_tasks
assert (obs[-self.n_total_tasks:] == np.array([0, 1, 0, 0, 0])).all()
def test_spec_obs_space(self):
obs = self.wrapped.reset()
assert self.wrapped.observation_space.contains(obs)
assert self.wrapped.spec.observation_space.contains(obs)
assert (self.wrapped.spec.observation_space ==
self.wrapped.observation_space)
def test_wrapped_env_list_produces_correct_onehots():
envs = [PointEnv(), PointEnv(), PointEnv(), PointEnv()]
base_len = len(envs[0].reset())
n_total_tasks = len(envs)
wrapped = TaskOnehotWrapper.wrap_env_list(envs)
assert len(wrapped) == n_total_tasks
for i, env in enumerate(wrapped):
obs = env.reset()
assert len(obs) == base_len + n_total_tasks
onehot = np.zeros(n_total_tasks)
onehot[i] = 1.
assert (obs[-n_total_tasks:] == onehot).all()
next_obs, _, _, _ = env.step(env.action_space.sample())
assert (next_obs[-n_total_tasks:] == onehot).all()
def test_wrapped_env_cons_list_produces_correct_onehots():
env_cons = [PointEnv] * 6
base_len = 3
n_total_tasks = len(env_cons)
wrapped_cons = TaskOnehotWrapper.wrap_env_cons_list(env_cons)
wrapped_envs = [cons() for cons in wrapped_cons]
assert len(wrapped_envs) == n_total_tasks
for i, env in enumerate(wrapped_envs):
obs = env.reset()
assert len(obs) == base_len + n_total_tasks
onehot = np.zeros(n_total_tasks)
onehot[i] = 1.
assert (obs[-n_total_tasks:] == onehot).all()
next_obs, _, _, _ = env.step(env.action_space.sample())
assert (next_obs[-n_total_tasks:] == onehot).all()
| 2,263 | 35.516129 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/box2d/parser/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/dm_control/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/dm_control/test_dm_control_env.py | import collections
from copy import copy
import pickle
import dm_control.mujoco
import dm_control.suite
import pytest
from garage.envs.dm_control import DmControlEnv
from tests.helpers import step_env
@pytest.mark.mujoco
class TestDmControlEnv:
def test_can_step(self):
domain_name, task_name = dm_control.suite.ALL_TASKS[0]
env = DmControlEnv.from_suite(domain_name, task_name)
ob_space = env.observation_space
act_space = env.action_space
ob = env.reset()
assert ob_space.contains(ob)
a = act_space.sample()
assert act_space.contains(a)
# Skip rendering because it causes TravisCI to run out of memory
# Sometimes random actions lead to physics errors
with env._env.physics.suppress_physics_errors():
step_env(env, render=False)
env.close()
@pytest.mark.nightly
@pytest.mark.parametrize('domain_name, task_name',
dm_control.suite.ALL_TASKS)
def test_all_can_step(self, domain_name, task_name):
env = DmControlEnv.from_suite(domain_name, task_name)
ob_space = env.observation_space
act_space = env.action_space
ob = env.reset()
assert ob_space.contains(ob)
a = act_space.sample()
assert act_space.contains(a)
# Skip rendering because it causes TravisCI to run out of memory
# Sometimes random actions lead to physics errors
with env._env.physics.suppress_physics_errors():
step_env(env, render=False)
env.close()
def test_pickleable(self):
domain_name, task_name = dm_control.suite.ALL_TASKS[0]
env = DmControlEnv.from_suite(domain_name, task_name)
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
# Skip rendering because it causes TravisCI to run out of memory
# Sometimes random actions lead to physics errors
with env._env.physics.suppress_physics_errors():
step_env(env, render=False)
round_trip.close()
env.close()
@pytest.mark.nightly
@pytest.mark.parametrize('domain_name, task_name',
dm_control.suite.ALL_TASKS)
def test_all_pickleable(self, domain_name, task_name):
env = DmControlEnv.from_suite(domain_name, task_name)
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
# Skip rendering because it causes TravisCI to run out of memory
# Sometimes random actions lead to physics errors
with env._env.physics.suppress_physics_errors():
step_env(env, render=False)
round_trip.close()
env.close()
def test_does_not_modify_actions(self):
domain_name, task_name = dm_control.suite.ALL_TASKS[0]
env = DmControlEnv.from_suite(domain_name, task_name)
a = env.action_space.sample()
a_copy = copy(a)
env.step(a)
if isinstance(a, collections.Iterable):
assert a.all() == a_copy.all()
else:
assert a == a_copy
env.close()
@pytest.mark.nightly
@pytest.mark.parametrize('domain_name, task_name',
dm_control.suite.ALL_TASKS)
def test_all_does_not_modify_actions(self, domain_name, task_name):
env = DmControlEnv.from_suite(domain_name, task_name)
a = env.action_space.sample()
a_copy = copy(a)
env.step(a)
if isinstance(a, collections.Iterable):
assert a.all() == a_copy.all()
else:
assert a == a_copy
env.close()
| 3,617 | 35.545455 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/dm_control/test_dm_control_tf_policy.py | from dm_control.suite import ALL_TASKS
import pytest
from garage.envs import GarageEnv
from garage.envs.dm_control import DmControlEnv
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
@pytest.mark.mujoco
class TestDmControlTfPolicy(TfGraphTestCase):
def test_dm_control_tf_policy(self):
task = ALL_TASKS[0]
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(DmControlEnv.from_suite(*task))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32),
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=5,
discount=0.99,
max_kl_step=0.01,
)
runner.setup(algo, env)
runner.train(n_epochs=1, batch_size=10)
env.close()
| 1,195 | 27.47619 | 70 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_atari_env.py | import numpy as np
from garage.envs.wrappers import AtariEnv
from tests.fixtures.envs.dummy import DummyDiscretePixelEnvBaselines
class TestFireReset:
def test_atari_env(self):
env = DummyDiscretePixelEnvBaselines()
env_wrapped = AtariEnv(env)
obs = env.reset()
obs_wrapped = env_wrapped.reset()
assert not isinstance(obs, np.ndarray)
assert isinstance(obs_wrapped, np.ndarray)
obs, _, _, _ = env.step(1)
obs_wrapped, _, _, _ = env_wrapped.step(1)
assert not isinstance(obs, np.ndarray)
assert isinstance(obs_wrapped, np.ndarray)
| 618 | 29.95 | 68 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_clip_reward.py | from garage.envs.wrappers import ClipReward
from tests.fixtures.envs.dummy import DummyRewardBoxEnv
class TestClipReward:
def test_clip_reward(self):
# reward = 10 when action = 0, otherwise -10
env = DummyRewardBoxEnv(random=True)
env_wrap = ClipReward(env)
env.reset()
env_wrap.reset()
_, reward, _, _ = env.step(0)
_, reward_wrap, _, _ = env_wrap.step(0)
assert reward == 10
assert reward_wrap == 1
_, reward, _, _ = env.step(1)
_, reward_wrap, _, _ = env_wrap.step(1)
assert reward == -10
assert reward_wrap == -1
| 632 | 25.375 | 55 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_episodic_life.py | import numpy as np
from garage.envs.wrappers import EpisodicLife
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
class TestEpisodicLife:
def test_episodic_life_reset(self):
env = EpisodicLife(DummyDiscretePixelEnv())
obs = env.reset()
# env has reset
assert np.array_equal(obs, np.ones(env.observation_space.shape))
assert env.unwrapped.ale.lives() == 5
obs, _, d, info = env.step(0)
assert d
assert info['ale.lives'] == 4
obs = env.reset()
# env has not reset
assert not np.array_equal(obs, np.ones(env.observation_space.shape))
for _ in range(3):
obs, r, d, info = env.step(0)
assert d
assert info['ale.lives'] == 0
obs = env.reset()
# env has reset
assert np.array_equal(obs, np.ones(env.observation_space.shape))
| 888 | 27.677419 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_fire_reset.py | import numpy as np
from garage.envs.wrappers import FireReset
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
class TestFireReset:
def test_fire_reset(self):
env = DummyDiscretePixelEnv()
env_wrap = FireReset(env)
obs = env.reset()
obs_wrap = env_wrap.reset()
assert np.array_equal(obs, np.ones(env.observation_space.shape))
assert np.array_equal(obs_wrap, np.full(env.observation_space.shape,
2))
env_wrap.step(2)
obs_wrap = env_wrap.reset() # env will call reset again, after fire
assert np.array_equal(obs_wrap, np.ones(env.observation_space.shape))
| 695 | 32.142857 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_grayscale_env.py | import gym.spaces
import numpy as np
import pytest
from garage.envs.wrappers import Grayscale
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
class TestGrayscale:
def setup_method(self):
self.env = DummyDiscretePixelEnv(random=False)
self.env_g = Grayscale(DummyDiscretePixelEnv(random=False))
def teardown_method(self):
self.env.close()
self.env_g.close()
def test_grayscale_invalid_environment_type(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Discrete(64)
Grayscale(self.env)
def test_grayscale_invalid_environment_shape(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Box(
low=0, high=255, shape=(4, ), dtype=np.uint8)
Grayscale(self.env)
def test_grayscale_observation_space(self):
assert self.env_g.observation_space.shape == (
self.env.observation_space.shape[:-1])
def test_grayscale_reset(self):
"""
RGB to grayscale conversion using scikit-image.
Weights used for conversion:
Y = 0.2125 R + 0.7154 G + 0.0721 B
Reference:
http://scikit-image.org/docs/dev/api/skimage.color.html#skimage.color.rgb2grey
"""
grayscale_output = np.round(
np.dot(self.env.reset()[:, :, :3],
[0.2125, 0.7154, 0.0721])).astype(np.uint8)
np.testing.assert_array_almost_equal(grayscale_output,
self.env_g.reset())
def test_grayscale_step(self):
self.env.reset()
self.env_g.reset()
obs, _, _, _ = self.env.step(1)
obs_g, _, _, _ = self.env_g.step(1)
grayscale_output = np.round(
np.dot(obs[:, :, :3], [0.2125, 0.7154, 0.0721])).astype(np.uint8)
np.testing.assert_array_almost_equal(grayscale_output, obs_g)
| 1,947 | 32.586207 | 86 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_max_and_skip.py | import numpy as np
from garage.envs.wrappers import MaxAndSkip
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
class TestMaxAndSkip:
def setup_method(self):
self.env = DummyDiscretePixelEnv(random=False)
self.env_wrap = MaxAndSkip(DummyDiscretePixelEnv(random=False), skip=4)
def teardown_method(self):
self.env.close()
self.env_wrap.close()
def test_max_and_skip_reset(self):
np.testing.assert_array_equal(self.env.reset(), self.env_wrap.reset())
def test_max_and_skip_step(self):
self.env.reset()
self.env_wrap.reset()
obs_wrap, reward_wrap, _, _ = self.env_wrap.step(1)
reward = 0
for i in range(4):
obs, r, _, _ = self.env.step(1)
reward += r
np.testing.assert_array_equal(obs, obs_wrap)
np.testing.assert_array_equal(reward, reward_wrap)
# done=True because both env stepped more than 4 times in total
obs_wrap, _, done_wrap, _ = self.env_wrap.step(1)
obs, _, done, _ = self.env.step(1)
assert done
assert done_wrap
np.testing.assert_array_equal(obs, obs_wrap)
| 1,171 | 29.842105 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_noop.py | import numpy as np
from garage.envs.wrappers import Noop
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
class TestNoop:
def test_noop(self):
env = Noop(DummyDiscretePixelEnv(), noop_max=3)
for _ in range(1000):
env.reset()
assert 1 <= env.env.step_called <= 3
env = Noop(DummyDiscretePixelEnv(), noop_max=10)
for _ in range(1000):
obs = env.reset()
if env.env.step_called % 5 == 0:
"""
There are only 5 lives in the environment, so if number of
steps are multiple of 5, env will call reset at last.
"""
assert np.array_equal(obs,
np.ones(env.observation_space.shape))
else:
assert not np.array_equal(obs,
np.ones(env.observation_space.shape))
| 933 | 32.357143 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_resize_env.py | import gym.spaces
import numpy as np
import pytest
from garage.envs.wrappers import Resize
from tests.fixtures.envs.dummy import DummyDiscrete2DEnv
class TestResize:
def setup_method(self):
self.width = 16
self.height = 16
self.env = DummyDiscrete2DEnv()
self.env_r = Resize(
DummyDiscrete2DEnv(), width=self.width, height=self.height)
def teardown_method(self):
self.env.close()
self.env_r.close()
def test_resize_invalid_environment_type(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Discrete(64)
Resize(self.env, width=self.width, height=self.height)
def test_resize_invalid_environment_shape(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Box(
low=0, high=255, shape=(4, ), dtype=np.uint8)
Resize(self.env, width=self.width, height=self.height)
def test_resize_output_observation_space(self):
assert self.env_r.observation_space.shape == (self.width, self.height)
def test_resize_output_reset(self):
assert self.env_r.reset().shape == (self.width, self.height)
def test_resize_output_step(self):
self.env_r.reset()
obs_r, _, _, _ = self.env_r.step(1)
assert obs_r.shape == (self.width, self.height)
| 1,385 | 32 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/envs/wrappers/test_stack_frames_env.py | import gym.spaces
import numpy as np
import pytest
from garage.envs.wrappers import StackFrames
from tests.fixtures.envs.dummy import DummyDiscrete2DEnv
class TestStackFrames:
def setup_method(self):
self.n_frames = 4
self.env = DummyDiscrete2DEnv(random=False)
self.env_s = StackFrames(
DummyDiscrete2DEnv(random=False), n_frames=self.n_frames)
self.width, self.height = self.env.observation_space.shape
def teardown_method(self):
self.env.close()
self.env_s.close()
def test_stack_frames_invalid_environment_type(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Discrete(64)
StackFrames(self.env, n_frames=4)
def test_stack_frames_invalid_environment_shape(self):
with pytest.raises(ValueError):
self.env.observation_space = gym.spaces.Box(
low=0, high=255, shape=(4, ), dtype=np.uint8)
StackFrames(self.env, n_frames=4)
def test_stack_frames_output_observation_space(self):
assert self.env_s.observation_space.shape == (self.width, self.height,
self.n_frames)
def test_stack_frames_for_reset(self):
frame_stack = self.env.reset()
for i in range(self.n_frames - 1):
frame_stack = np.dstack((frame_stack, self.env.reset()))
np.testing.assert_array_equal(self.env_s.reset(), frame_stack)
def test_stack_frames_for_step(self):
self.env.reset()
self.env_s.reset()
frame_stack = np.empty((self.width, self.height, self.n_frames))
for i in range(10):
frame_stack = frame_stack[:, :, 1:]
obs, _, _, _ = self.env.step(1)
frame_stack = np.dstack((frame_stack, obs))
obs_stack, _, _, _ = self.env_s.step(1)
np.testing.assert_array_equal(obs_stack, frame_stack)
| 1,949 | 34.454545 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_deterministic.py | """Tests for deterministic.py"""
import random
import numpy as np
import tensorflow as tf
import torch
from garage.experiment import deterministic
def test_deterministic_pytorch():
"""Test deterministic behavior of PyTorch"""
deterministic.set_seed(111)
rand_tensor = torch.rand((5, 5))
deterministic_tensor = torch.Tensor(
[[0.715565920, 0.913992643, 0.281857729, 0.258099794, 0.631108642],
[0.600053012, 0.931192935, 0.215290189, 0.603278518, 0.732785344],
[0.185717106, 0.510067403, 0.754451334, 0.288391531, 0.577469587],
[0.035843492, 0.102626860, 0.341910362, 0.439984798, 0.634111166],
[0.622391582, 0.633447766, 0.857972443, 0.157199264, 0.785320759]])
assert torch.all(torch.eq(rand_tensor, deterministic_tensor))
def test_deterministic_tensorflow():
"""Test deterministic behavior of Tensorflow"""
deterministic.set_seed(0)
with tf.compat.v1.Session() as sess:
rand_tensor = sess.run(tf.random.uniform((5, 5), seed=0))
deterministic_tensor = np.array(
[[0.10086262, 0.9701668, 0.8487642, 0.04828131, 0.04852307],
[0.77747464, 0.844468, 0.41707492, 0.5099584, 0.6552025],
[0.9881507, 0.36698937, 0.37789786, 0.69118714, 0.99544394],
[0.4662125, 0.9912039, 0.6973165, 0.7741407, 0.8881662],
[0.03854167, 0.97539485, 0.23024535, 0.83840847, 0.79527795]],
dtype=np.float32)
assert np.allclose(rand_tensor, deterministic_tensor)
def test_deterministic_numpy():
"""Test deterministic behavior of numpy"""
deterministic.set_seed(22)
rand_tensor = np.random.rand(5, 5)
deterministic_tensor = np.array(
[[0.20846054, 0.48168106, 0.42053804, 0.859182, 0.17116155],
[0.33886396, 0.27053283, 0.69104135, 0.22040452, 0.81195092],
[0.01052687, 0.5612037, 0.81372619, 0.7451003, 0.18911136],
[0.00614087, 0.77204387, 0.95783217, 0.70193788, 0.29757827],
[0.76799274, 0.68821832, 0.38718348, 0.61520583, 0.42755524]])
assert np.allclose(rand_tensor, deterministic_tensor)
def test_deterministic_random():
"""Test deterministic behavior of random"""
deterministic.set_seed(55)
rand_array = [random.random() for _ in range(10)]
deterministic_array = [
0.09033985426934954, 0.9506335645634441, 0.14997105299598545,
0.7393703706762795, 0.8412423959349363, 0.7471369518620469,
0.30193759566924927, 0.35162393686161975, 0.7218626135761532,
0.9656464075038401
]
assert rand_array == deterministic_array
| 2,567 | 37.909091 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_experiment.py | import os
import pathlib
import shutil
import subprocess
import sys
import tempfile
import textwrap
import pytest
from garage.experiment.experiment import run_experiment, wrap_experiment
def dummy_func(*_):
pass
def test_default_log_dir():
# Because this test uses the default log directory, if any other tests are
# run in parallel with it that use this directory, it will fail.
# For this reason, all tests which use run_experiment, must pass a
# non-default exp_prefix or log_dir.
default_path = os.path.join(os.getcwd(), 'data/local/experiment')
pathlib.Path(default_path).mkdir(parents=True, exist_ok=True)
folder_num = len(os.listdir(default_path))
run_experiment(dummy_func)
assert len(os.listdir(default_path)) == folder_num + 1
def test_experiment_with_none_task():
with pytest.raises(Exception):
run_experiment()
def test_experiment_with_not_callable_task():
with pytest.raises(ValueError):
run_experiment(1)
def test_experiment_with_variant():
# Note: exp_name in variant does nothing.
exp_variant = {'exp_name': 'test_name'}
exp_path = os.path.join(os.getcwd(), 'data/local/test-prefix')
pathlib.Path(exp_path).mkdir(parents=True, exist_ok=True)
old_folder_contents = set(os.listdir(exp_path))
# Pass a non-default exp_prefix, so test_default_log_dir is safe.
run_experiment(dummy_func, exp_prefix='test_prefix', variant=exp_variant)
prefix_contents = set(os.listdir(exp_path))
folder_content_diff = prefix_contents - old_folder_contents
assert len(folder_content_diff) == 1
exp_folder_name = folder_content_diff.pop()
assert exp_folder_name.startswith('test_prefix')
def _hard_rmtree(path):
# Sometimes rmtree doesn't work, for some reason, but moving the directory
# to a temporary directory does.
shutil.rmtree(path, ignore_errors=True)
try:
with tempfile.TemporaryDirectory() as trash_dir:
shutil.move(str(path), trash_dir)
except FileNotFoundError:
pass
def test_wrap_experiment_makes_log_dir():
prefix = 'wrap_exp_test_makes_log_dir'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp'
@wrap_experiment(prefix=prefix)
def test_exp(ctxt=None):
assert expected_path.samefile(ctxt.snapshot_dir)
assert not exp_path.exists()
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert prefix_contents[0].samefile(expected_path)
expected_path = exp_path / 'test_exp_1'
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 2
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
expected_path = exp_path / 'test_exp_2'
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 3
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
def _run_launcher(launcher_path, prefix):
with launcher_path.open('w') as launcher_f:
launcher_f.write(
textwrap.dedent(r"""
from garage import wrap_experiment
@wrap_experiment(prefix='{}')
def test_exp(ctxt=None):
print(ctxt.snapshot_dir)
test_exp()""".format(prefix)))
output = (subprocess.check_output(
(sys.executable, str(launcher_path)),
stderr=subprocess.STDOUT)).decode('utf-8').strip().split('\n')
snapshot_dir = output[-1]
return snapshot_dir, output
def test_wrap_experiment_builds_git_archive():
prefix = 'wrap_exp_test_builds_git_archive'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp' / 'launch_archive.tar.xz'
# Because __main__ actually points to pytest right now, we need to run the
# "real" test in a subprocess.
with tempfile.TemporaryDirectory() as launcher_dir:
launch_dir = pathlib.Path(launcher_dir)
subprocess.check_call(('git', 'init'), cwd=launcher_dir)
# Make a test file, since git ls-files needs at least one commit.
test_txt = launch_dir / 'test.txt'
test_txt.touch()
subprocess.check_call(('git', 'add', str(test_txt)), cwd=launcher_dir)
subprocess.check_call(
('git', '-c', 'user.name=Test User', '-c',
'[email protected]', 'commit', '-m', 'Initial commit'),
cwd=launcher_dir)
subdir = launch_dir / 'subdir'
subdir.mkdir()
launcher_path = pathlib.Path(launcher_dir) / 'subdir' / 'run_exp.py'
snapshot_dir, _ = _run_launcher(launcher_path, prefix)
archive_path = os.path.join(snapshot_dir, 'launch_archive.tar.xz')
assert expected_path.samefile(archive_path)
assert expected_path.exists()
archive_size = expected_path.stat().st_size
assert archive_size > 250
contents = subprocess.check_output(
('tar', '--list', '--file', archive_path)).decode('utf-8')
assert 'subdir/run_exp.py' in contents.strip()
assert 'test.txt' in contents.strip()
def test_wrap_experiment_builds_git_archive_deleted_files():
prefix = 'wrap_exp_test_builds_git_archive_deleted_files'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp' / 'launch_archive.tar.xz'
# Because __main__ actually points to pytest right now, we need to run the
# "real" test in a subprocess.
with tempfile.TemporaryDirectory() as launcher_dir:
launch_dir = pathlib.Path(launcher_dir)
subprocess.check_call(('git', 'init'), cwd=launcher_dir)
# Make a test file, since git ls-files needs at least one commit.
to_delete = launch_dir / 'to_delete.txt'
to_delete.touch()
subprocess.check_call(('git', 'add', str(to_delete)), cwd=launcher_dir)
subprocess.check_call(
('git', '-c', 'user.name=Test User', '-c',
'[email protected]', 'commit', '-m', 'Initial commit'),
cwd=launcher_dir)
to_delete.unlink()
subdir = launch_dir / 'subdir'
subdir.mkdir()
launcher_path = pathlib.Path(launcher_dir) / 'subdir' / 'run_exp.py'
snapshot_dir, _ = _run_launcher(launcher_path, prefix)
archive_path = os.path.join(snapshot_dir, 'launch_archive.tar.xz')
assert expected_path.samefile(archive_path)
assert expected_path.exists()
archive_size = expected_path.stat().st_size
assert archive_size > 250
contents = subprocess.check_output(
('tar', '--list', '--file', archive_path)).decode('utf-8')
assert 'subdir/run_exp.py' in contents.strip()
assert 'test.txt' not in contents.strip()
def test_wrap_experiment_builds_git_archive_large_file():
prefix = 'wrap_exp_test_builds_git_archive_large_files'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp' / 'launch_archive.tar.xz'
# Because __main__ actually points to pytest right now, we need to run the
# "real" test in a subprocess.
with tempfile.TemporaryDirectory() as launcher_dir:
launch_dir = pathlib.Path(launcher_dir)
subprocess.check_call(('git', 'init'), cwd=launcher_dir)
# Make a test file, since git ls-files needs at least one commit.
test_txt = launch_dir / 'test.txt'
test_txt.touch()
subprocess.check_call(('git', 'add', str(test_txt)), cwd=launcher_dir)
subprocess.check_call(
('git', '-c', 'user.name=Test User', '-c',
'[email protected]', 'commit', '-m', 'Initial commit'),
cwd=launcher_dir)
subdir = launch_dir / 'subdir'
subdir.mkdir()
launcher_path = pathlib.Path(launcher_dir) / 'subdir' / 'run_exp.py'
large_file = launch_dir / 'large.obj'
with large_file.open(mode='wb') as f:
f.write(b'0' * int(1e7))
snapshot_dir, output = _run_launcher(launcher_path, prefix)
assert any(['archive_launch_repo' in line for line in output])
archive_path = os.path.join(snapshot_dir, 'launch_archive.tar.xz')
assert expected_path.samefile(archive_path)
assert expected_path.exists()
archive_size = expected_path.stat().st_size
assert archive_size > 250
contents = subprocess.check_output(
('tar', '--list', '--file', archive_path)).decode('utf-8')
assert 'subdir/run_exp.py' in contents.strip()
assert 'test.txt' in contents.strip()
assert 'large.obj' not in contents.strip()
def test_wrap_experiment_launcher_outside_git():
prefix = 'wrap_exp_test_launcher_outside_git'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp'
# Because this is testing a file outside of a git repo, we need to make
# ourselves a launcher script outside of any git repo.
with tempfile.TemporaryDirectory() as launcher_dir:
launcher_path = pathlib.Path(launcher_dir) / 'run_exp.py'
snapshot_dir, _ = _run_launcher(launcher_path, prefix)
assert os.path.samefile(str(expected_path), str(snapshot_dir))
def test_wrap_experiment_raises_on_non_ctxt_param_name():
prefix = 'wrap_exp_test_prefix2'
with pytest.raises(ValueError,
match="named 'ctxt' instead of '_snapshot_config'"):
@wrap_experiment(prefix=prefix)
def _test_exp(_snapshot_config=None):
pass
def test_wrap_experiment_raises_on_empty_params():
prefix = 'wrap_exp_test_prefix3'
with pytest.raises(ValueError, match="named 'ctxt'"):
@wrap_experiment(prefix=prefix)
def _test_exp():
pass
def test_wrap_experiment_name_parameters_passed():
prefix = 'wrap_exp_test_name_parameters_passed'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp_seed=2'
@wrap_experiment(prefix=prefix, name_parameters='passed')
def test_exp(ctxt=None, seed=1):
del seed
assert expected_path.samefile(ctxt.snapshot_dir)
assert not exp_path.exists()
test_exp(seed=2)
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert prefix_contents[0].samefile(expected_path)
expected_path = exp_path / 'test_exp_seed=2_1'
test_exp(seed=2)
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 2
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
expected_path = exp_path / 'test_exp_seed=3'
test_exp(seed=3)
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 3
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
def test_wrap_experiment_name_parameters_all():
prefix = 'wrap_exp_test_name_parameters_all'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp_seed=1'
@wrap_experiment(prefix=prefix, name_parameters='all')
def test_exp(ctxt=None, seed=1):
del seed
assert expected_path.samefile(ctxt.snapshot_dir)
assert not exp_path.exists()
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert prefix_contents[0].samefile(expected_path)
expected_path = exp_path / 'test_exp_seed=1_1'
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 2
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
expected_path = exp_path / 'test_exp_seed=1_2'
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 3
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
def test_wrap_experiment_name_parameters_all_disordered():
prefix = 'wrap_exp_test_name_parameters_all_disordered'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp_seed=1_env=test-env'
@wrap_experiment(prefix=prefix, name_parameters='all')
def test_exp(ctxt=None, seed=1, env='test-env'):
del seed
del env
assert expected_path.samefile(ctxt.snapshot_dir)
assert not exp_path.exists()
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert prefix_contents[0].samefile(expected_path)
expected_path = exp_path / 'test_exp_seed=2_env=test-env-v2'
test_exp(env='test-env-v2', seed=2)
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 2
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
expected_path = exp_path / 'test_exp_seed=1_env=test-env-v2'
test_exp(env='test-env-v2')
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 3
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
def test_wrap_experiment_name_parameters_passed_disordered():
prefix = 'wrap_exp_test_name_parameters_passed_disordered'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = exp_path / 'test_exp_seed=2_env=test-env'
@wrap_experiment(prefix=prefix, name_parameters='passed')
def test_exp(ctxt=None, seed=1, env='test-env'):
del seed
del env
assert expected_path.samefile(ctxt.snapshot_dir)
assert not exp_path.exists()
test_exp(seed=2, env='test-env')
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert prefix_contents[0].samefile(expected_path)
expected_path = exp_path / 'test_exp_seed=2_env=test-env-v2'
test_exp(env='test-env-v2', seed=2)
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 2
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
expected_path = exp_path / 'test_exp_env=test-env-v2'
test_exp(env='test-env-v2')
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 3
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
def test_wrap_experiment_name_parameters_none():
@wrap_experiment(name_parameters='none')
def test_exp(ctxt=None, seed=1):
del ctxt
del seed
with pytest.raises(ValueError, match='wrap_experiment.name_parameters'):
test_exp()
def test_wrap_experiment_logdir():
prefix = 'wrap_exp_logdir'
name = 'specified_logdir'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
expected_path = exp_path / name
_hard_rmtree(exp_path)
logdir = 'data/local/wrap_exp_logdir/specified_logdir'
@wrap_experiment(prefix=prefix, log_dir=logdir)
def test_exp(ctxt=None, seed=1):
del seed
assert expected_path.samefile(ctxt.snapshot_dir)
assert not exp_path.exists()
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert prefix_contents[0].samefile(expected_path)
expected_path = pathlib.Path(os.getcwd(), logdir + '_1')
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 2
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
def test_wrap_experiment_dynamic_log_dir():
prefix = 'wrap_exp_dynamic_logdir'
name = 'specified_logdir'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
expected_path = exp_path / name
_hard_rmtree(exp_path)
logdir = 'data/local/wrap_exp_dynamic_logdir/specified_logdir'
@wrap_experiment
def test_exp(ctxt=None, seed=1):
del seed
assert expected_path.samefile(ctxt.snapshot_dir)
assert not exp_path.exists()
test_exp(dict(prefix=prefix, log_dir=logdir))
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert prefix_contents[0].samefile(expected_path)
expected_path = pathlib.Path(os.getcwd(), logdir + '_1')
test_exp(dict(prefix=prefix, log_dir=logdir))
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 2
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
def test_wrap_experiment_use_existing_dir():
prefix = 'wrap_exp_existing_dir'
name = 'test_exp'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
expected_path = exp_path / name
_hard_rmtree(exp_path)
logdir = 'data/local/wrap_exp_existing_dir/test_exp'
@wrap_experiment(prefix=prefix)
def test_exp(ctxt=None, seed=1):
del seed
assert expected_path.samefile(ctxt.snapshot_dir)
assert not exp_path.exists()
test_exp()
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert prefix_contents[0].samefile(expected_path)
@wrap_experiment(prefix=prefix)
def test_exp_2(ctxt, seed=1):
del seed
assert expected_path.samefile(ctxt.snapshot_dir)
test_exp_2(dict(log_dir=logdir, use_existing_dir=True))
prefix_contents = list(exp_path.iterdir())
assert len(prefix_contents) == 1
assert any(
[expected_path.samefile(directory) for directory in prefix_contents])
def test_wrap_experiment_invalid_options():
prefix = 'wrap_exp_invalid_options'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
logdir = 'data/local/wrap_exp_invalid_options/test_exp'
@wrap_experiment(prefix=prefix)
def test_exp(ctxt):
del ctxt
with pytest.raises(ValueError):
test_exp(dict(logdir=logdir))
| 18,241 | 32.349177 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_experiment_wrapper.py | import base64
import pickle
import pytest
from garage.experiment import SnapshotConfig
from garage.experiment.experiment_wrapper import run_experiment
def method_call(snapshot_config, variant_data, from_dir, from_epoch):
assert isinstance(snapshot_config, SnapshotConfig)
assert snapshot_config.snapshot_dir == 'data/'
assert snapshot_config.snapshot_mode == 'last'
assert snapshot_config.snapshot_gap == 1
assert variant_data is None
assert from_dir == 'resume_dir/'
assert from_epoch == 'first'
def invalid_method_call():
pass
class TestExperimentWrapper:
def test_experiment_wrapper_method_call(self):
data = base64.b64encode(pickle.dumps(method_call)).decode('utf-8')
args = [
'',
'--args_data',
data,
'--log_dir',
'data/',
'--resume_from_dir',
'resume_dir/',
'--resume_from_epoch',
'first',
]
run_experiment(args)
def test_experiment_wrapper_invalid_method_call(self):
data = base64.b64encode(
pickle.dumps(invalid_method_call)).decode('utf-8')
args = ['', '--args_data', data]
with pytest.raises(BaseException):
run_experiment(args)
| 1,276 | 26.76087 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_local_runner.py | import gym
import pytest
import torch
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.plotter import Plotter
from garage.sampler import LocalSampler
from garage.torch.algos import PPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
class TestLocalRunner:
"""Test class for LocalRunner."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec)
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
@pytest.mark.mujoco
def test_set_plot(self):
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config)
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
value_function=self.value_function,
max_path_length=100,
discount=0.99,
gae_lambda=0.97,
lr_clip_range=2e-1)
runner.setup(algo, self.env)
runner.train(n_epochs=1, batch_size=100, plot=True)
assert isinstance(
runner._plotter,
Plotter), ('self.plotter in LocalRunner should be set to Plotter.')
def test_setup_no_sampler():
runner = LocalRunner(snapshot_config)
class SupervisedAlgo:
def train(self, runner):
# pylint: disable=undefined-loop-variable
for epoch in runner.step_epochs():
pass
assert epoch == 4
runner.setup(SupervisedAlgo(), None)
runner.train(n_epochs=5)
class CrashingAlgo:
def train(self, runner):
# pylint: disable=undefined-loop-variable
for epoch in runner.step_epochs():
runner.obtain_samples(epoch)
def test_setup_no_sampler_cls():
runner = LocalRunner(snapshot_config)
algo = CrashingAlgo()
algo.max_path_length = 100
runner.setup(algo, None)
with pytest.raises(ValueError, match='sampler_cls'):
runner.train(n_epochs=5)
def test_setup_no_policy():
runner = LocalRunner(snapshot_config)
with pytest.raises(ValueError, match='policy'):
runner.setup(CrashingAlgo(), None, sampler_cls=LocalSampler)
def test_setup_no_max_path_length():
runner = LocalRunner(snapshot_config)
algo = CrashingAlgo()
algo.policy = None
with pytest.raises(ValueError, match='max_path_length'):
runner.setup(algo, None, sampler_cls=LocalSampler)
def test_setup_no_batch_size():
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config)
algo = CrashingAlgo()
algo.max_path_length = 100
algo.policy = None
runner.setup(algo, None, sampler_cls=LocalSampler)
with pytest.raises(ValueError, match='batch_size'):
runner.train(n_epochs=5)
| 3,274 | 29.045872 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_meta_evaluator.py | import csv
import tempfile
import cloudpickle
from dowel import CsvOutput, logger, tabular
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv, PointEnv
from garage.experiment import LocalTFRunner, MetaEvaluator, SnapshotConfig
from garage.experiment.deterministic import set_seed
from garage.experiment.local_runner import LocalRunner
from garage.experiment.task_sampler import SetTaskSampler
from garage.np.algos import MetaRLAlgorithm
from garage.sampler import LocalSampler
from garage.tf.policies import GaussianMLPPolicy
class RandomPolicy:
def __init__(self, action_space):
self._action_space = action_space
def reset(self):
pass
def get_action(self, observation):
del observation
return self._action_space.sample(), {}
class SingleActionPolicy:
def __init__(self, action):
self._action = action
def reset(self):
pass
def get_action(self, observation):
del observation
return self._action, {}
class OptimalActionInference(MetaRLAlgorithm):
sampler_cls = LocalSampler
def __init__(self, env, max_path_length):
self.env = env
self.policy = RandomPolicy(self.env.spec.action_space)
self.max_path_length = max_path_length
def train(self, runner):
del runner
def get_exploration_policy(self):
return self.policy
def adapt_policy(self, exploration_policy, exploration_trajectories):
best_timestep = np.argmax(exploration_trajectories.rewards)
best_action = exploration_trajectories.actions[best_timestep]
return SingleActionPolicy(best_action)
@pytest.mark.serial
def test_meta_evaluator():
set_seed(100)
tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
max_path_length = 200
with tempfile.TemporaryDirectory() as log_dir_name:
runner = LocalRunner(
SnapshotConfig(snapshot_dir=log_dir_name,
snapshot_mode='last',
snapshot_gap=1))
env = GarageEnv(PointEnv())
algo = OptimalActionInference(env=env, max_path_length=max_path_length)
runner.setup(algo, env)
meta_eval = MetaEvaluator(test_task_sampler=tasks,
max_path_length=max_path_length,
n_test_tasks=10)
log_file = tempfile.NamedTemporaryFile()
csv_output = CsvOutput(log_file.name)
logger.add_output(csv_output)
meta_eval.evaluate(algo)
logger.log(tabular)
meta_eval.evaluate(algo)
logger.log(tabular)
logger.dump_output_type(CsvOutput)
logger.remove_output_type(CsvOutput)
with open(log_file.name, 'r') as file:
rows = list(csv.DictReader(file))
assert len(rows) == 2
assert float(rows[0]['MetaTest/__unnamed_task__/CompletionRate']) < 1.0
assert float(rows[0]['MetaTest/__unnamed_task__/Iteration']) == 0
assert (float(rows[0]['MetaTest/__unnamed_task__/MaxReturn']) >= float(
rows[0]['MetaTest/__unnamed_task__/AverageReturn']))
assert (float(rows[0]['MetaTest/__unnamed_task__/AverageReturn']) >=
float(rows[0]['MetaTest/__unnamed_task__/MinReturn']))
assert float(rows[1]['MetaTest/__unnamed_task__/Iteration']) == 1
class MockAlgo:
sampler_cls = LocalSampler
def __init__(self, env, policy, max_path_length, n_exploration_traj,
meta_eval):
self.env = env
self.policy = policy
self.max_path_length = max_path_length
self.n_exploration_traj = n_exploration_traj
self.meta_eval = meta_eval
def train(self, runner):
for step in runner.step_epochs():
if step % 5 == 0:
self.meta_eval.evaluate(self)
def get_exploration_policy(self):
return self.policy
def adapt_policy(self, exploration_policy, exploration_trajectories):
del exploration_policy
assert (len(
exploration_trajectories.lengths) == self.n_exploration_traj)
def test_pickle_meta_evaluator():
set_seed(100)
tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
max_path_length = 200
env = GarageEnv(PointEnv())
n_traj = 3
with tempfile.TemporaryDirectory() as log_dir_name:
runner = LocalRunner(
SnapshotConfig(snapshot_dir=log_dir_name,
snapshot_mode='last',
snapshot_gap=1))
meta_eval = MetaEvaluator(test_task_sampler=tasks,
max_path_length=max_path_length,
n_test_tasks=10,
n_exploration_traj=n_traj)
policy = RandomPolicy(env.spec.action_space)
algo = MockAlgo(env, policy, max_path_length, n_traj, meta_eval)
runner.setup(algo, env)
log_file = tempfile.NamedTemporaryFile()
csv_output = CsvOutput(log_file.name)
logger.add_output(csv_output)
meta_eval.evaluate(algo)
meta_eval_pickle = cloudpickle.dumps(meta_eval)
meta_eval2 = cloudpickle.loads(meta_eval_pickle)
meta_eval2.evaluate(algo)
def test_meta_evaluator_with_tf():
set_seed(100)
tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
max_path_length = 200
env = GarageEnv(PointEnv())
n_traj = 3
with tempfile.TemporaryDirectory() as log_dir_name:
ctxt = SnapshotConfig(snapshot_dir=log_dir_name,
snapshot_mode='none',
snapshot_gap=1)
with LocalTFRunner(ctxt) as runner:
meta_eval = MetaEvaluator(test_task_sampler=tasks,
max_path_length=max_path_length,
n_test_tasks=10,
n_exploration_traj=n_traj)
policy = GaussianMLPPolicy(env.spec)
algo = MockAlgo(env, policy, max_path_length, n_traj, meta_eval)
runner.setup(algo, env)
log_file = tempfile.NamedTemporaryFile()
csv_output = CsvOutput(log_file.name)
logger.add_output(csv_output)
meta_eval.evaluate(algo)
algo_pickle = cloudpickle.dumps(algo)
tf.compat.v1.reset_default_graph()
with LocalTFRunner(ctxt) as runner:
algo2 = cloudpickle.loads(algo_pickle)
runner.setup(algo2, env)
runner.train(10, 0)
| 6,549 | 34.405405 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_resume.py | import tempfile
import numpy as np
import tensorflow as tf
from garage.experiment import LocalTFRunner, SnapshotConfig
from tests.fixtures import TfGraphTestCase
from tests.fixtures.experiment import fixture_exp
class TestResume(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.temp_dir = tempfile.TemporaryDirectory()
self.snapshot_config = SnapshotConfig(snapshot_dir=self.temp_dir.name,
snapshot_mode='last',
snapshot_gap=1)
self.policy_params = fixture_exp(self.snapshot_config, self.sess)
for c in self.graph.collections:
self.graph.clear_collection(c)
def teardown_method(self):
self.temp_dir.cleanup()
super().teardown_method()
def test_resume(self):
sess = tf.compat.v1.Session(graph=tf.Graph())
with LocalTFRunner(self.snapshot_config, sess) as runner:
args = runner.restore(self.temp_dir.name)
assert np.equal(
runner._algo.policy.get_param_values(),
self.policy_params).all(), 'Policy parameters should persist'
assert args.n_epochs == 5, (
'Snapshot should save training parameters')
assert args.start_epoch == 5, (
'Last experiment should end at 5th iterations')
batch_size = runner._train_args.batch_size
runner.resume(n_epochs=10,
plot=False,
store_paths=True,
pause_for_plot=False)
assert runner._train_args.n_epochs == 10
assert runner._train_args.batch_size == batch_size
assert not runner._train_args.plot
assert runner._train_args.store_paths
assert not runner._train_args.pause_for_plot
| 1,901 | 36.294118 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_snapshotter.py | from os import path as osp
import pickle
import tempfile
import pytest
from garage.experiment import Snapshotter
configurations = [('all', {
'itr_1.pkl': 0,
'itr_2.pkl': 1
}), ('last', {
'params.pkl': 1
}), ('gap', {
'itr_2.pkl': 1
}), ('gap_and_last', {
'itr_2.pkl': 1,
'params.pkl': 1
}), ('none', {})]
class TestSnapshotter:
def setup_method(self):
self.temp_dir = tempfile.TemporaryDirectory()
def teardown_method(self):
self.temp_dir.cleanup()
@pytest.mark.parametrize('mode, files', [*configurations])
def test_snapshotter(self, mode, files):
snapshotter = Snapshotter(self.temp_dir.name, mode, 2)
assert snapshotter.snapshot_dir == self.temp_dir.name
assert snapshotter.snapshot_mode == mode
assert snapshotter.snapshot_gap == 2
snapshot_data = [{'testparam': 1}, {'testparam': 4}]
snapshotter.save_itr_params(1, snapshot_data[0])
snapshotter.save_itr_params(2, snapshot_data[1])
for f, num in files.items():
filename = osp.join(self.temp_dir.name, f)
assert osp.exists(filename)
with open(filename, 'rb') as pkl_file:
data = pickle.load(pkl_file)
assert data == snapshot_data[num]
def test_invalid_snapshot_mode(self):
with pytest.raises(ValueError):
snapshotter = Snapshotter(
snapshot_dir=self.temp_dir.name, snapshot_mode='invalid')
snapshotter.save_itr_params(2, {'testparam': 'invalid'})
| 1,552 | 28.301887 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_snapshotter_integration.py | import tempfile
import pytest
from garage.envs import GarageEnv
from garage.experiment import SnapshotConfig, Snapshotter
from garage.tf.algos import VPG
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import TfGraphTestCase
from tests.fixtures.experiment import fixture_exp
configurations = [('last', 4), ('first', 0), (3, 3)]
class TestSnapshot(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.temp_dir = tempfile.TemporaryDirectory()
snapshot_config = SnapshotConfig(snapshot_dir=self.temp_dir.name,
snapshot_mode='all',
snapshot_gap=1)
fixture_exp(snapshot_config, self.sess)
for c in self.graph.collections:
self.graph.clear_collection(c)
def teardown_method(self):
self.temp_dir.cleanup()
super().teardown_method()
@pytest.mark.parametrize('load_mode, last_epoch', [*configurations])
def test_load(self, load_mode, last_epoch):
snapshotter = Snapshotter()
saved = snapshotter.load(self.temp_dir.name, load_mode)
assert isinstance(saved['algo'], VPG)
assert isinstance(saved['env'], GarageEnv)
assert isinstance(saved['algo'].policy, CategoricalMLPPolicy)
assert saved['stats'].total_epoch == last_epoch
def test_load_with_invalid_load_mode(self):
snapshotter = Snapshotter()
with pytest.raises(ValueError):
snapshotter.load(self.temp_dir.name, 'foo')
| 1,558 | 33.644444 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/experiment/test_task_sampler.py | import functools
import unittest.mock
import numpy as np
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
from garage.envs.mujoco.half_cheetah_vel_env import HalfCheetahVelEnv
from garage.experiment import task_sampler
@pytest.mark.mujoco
def test_env_pool_sampler():
# Import, construct environments here to avoid using up too much
# resources if this test isn't run.
# pylint: disable=import-outside-toplevel
from metaworld.benchmarks import ML10
train_tasks = ML10.get_train_tasks().all_task_names
ML10_train_envs = [
ML10.from_task(train_task) for train_task in train_tasks
]
tasks = task_sampler.EnvPoolSampler(ML10_train_envs)
assert tasks.n_tasks == 10
updates = tasks.sample(10)
for env in ML10_train_envs:
assert any(env is update() for update in updates)
with pytest.raises(ValueError):
tasks.sample(10, with_replacement=True)
with pytest.raises(ValueError):
tasks.sample(11)
tasks.grow_pool(20)
tasks.sample(20)
@pytest.mark.mujoco
def test_construct_envs_sampler_ml10():
# pylint: disable=import-outside-toplevel
from metaworld.benchmarks import ML10
train_tasks = ML10.get_train_tasks().all_task_names
ML10_constructors = [
functools.partial(ML10.from_task, train_task)
for train_task in train_tasks
]
tasks = task_sampler.ConstructEnvsSampler(ML10_constructors)
assert tasks.n_tasks == 10
updates = tasks.sample(15)
envs = [update() for update in updates]
action = envs[0].action_space.sample()
rewards = [env.step(action)[1] for env in envs]
assert np.var(rewards) > 0
env = envs[0]
env.close = unittest.mock.MagicMock(name='env.close')
updates[-1](env)
env.close.assert_called_with()
@pytest.mark.mujoco
def test_set_task_task_sampler_ml10():
# pylint: disable=import-outside-toplevel
from metaworld.benchmarks import ML10
tasks = task_sampler.SetTaskSampler(ML10.get_train_tasks)
assert tasks.n_tasks == 10
updates = tasks.sample(3)
envs = [update() for update in updates]
action = envs[0].action_space.sample()
rewards = [env.step(action)[1] for env in envs]
assert np.var(rewards) > 0
env = envs[0]
env.close = unittest.mock.MagicMock(name='env.close')
updates[-1](env)
env.close.assert_not_called()
@pytest.mark.mujoco
def test_set_task_task_sampler_half_cheetah_vel_env():
tasks = task_sampler.SetTaskSampler(HalfCheetahVelEnv)
assert tasks.n_tasks is None
updates = tasks.sample(10)
envs = [update() for update in updates]
action = envs[0].action_space.sample()
rewards = [env.step(action)[1] for env in envs]
assert np.var(rewards) > 0
env = envs[0]
env.close = unittest.mock.MagicMock(name='env.close')
updates[-1](env)
env.close.assert_not_called()
| 3,239 | 32.402062 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/misc/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/misc/test_tensor_utils.py | """
This script creates a test that tests functions in garage.misc.tensor_utils.
"""
import numpy as np
from garage.envs import GarageEnv
from garage.misc.tensor_utils import concat_tensor_dict_list
from garage.misc.tensor_utils import explained_variance_1d
from garage.misc.tensor_utils import normalize_pixel_batch
from garage.misc.tensor_utils import pad_tensor
from garage.misc.tensor_utils import stack_and_pad_tensor_dict_list
from garage.misc.tensor_utils import stack_tensor_dict_list
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
class TestTensorUtil:
def setup_method(self):
self.data = [
dict(obs=[1, 1, 1],
act=[2, 2, 2],
info=dict(lala=[1, 1], baba=[2, 2])),
dict(obs=[1, 1, 1],
act=[2, 2, 2],
info=dict(lala=[1, 1], baba=[2, 2]))
]
self.data2 = [
dict(obs=[1, 1, 1],
act=[2, 2, 2],
info=dict(lala=[1, 1], baba=[2, 2])),
dict(obs=[1, 1, 1], act=[2, 2, 2], info=dict(lala=[1, 1]))
]
self.max_len = 10
self.tensor = [1, 1, 1]
def test_normalize_pixel_batch(self):
env = GarageEnv(DummyDiscretePixelEnv(), is_image=True)
obs = env.reset()
obs_normalized = normalize_pixel_batch(obs)
expected = [ob / 255.0 for ob in obs]
assert np.allclose(obs_normalized, expected)
def test_concat_tensor_dict_list(self):
results = concat_tensor_dict_list(self.data)
assert results['obs'].shape == (6, )
assert results['act'].shape == (6, )
assert results['info']['lala'].shape == (4, )
assert results['info']['baba'].shape == (4, )
results = concat_tensor_dict_list(self.data2)
assert results['obs'].shape == (6, )
assert results['act'].shape == (6, )
assert results['info']['lala'].shape == (4, )
assert results['info']['baba'].shape == (2, )
def test_stack_tensor_dict_list(self):
results = stack_tensor_dict_list(self.data)
assert results['obs'].shape == (2, 3)
assert results['act'].shape == (2, 3)
assert results['info']['lala'].shape == (2, 2)
assert results['info']['baba'].shape == (2, 2)
results = stack_tensor_dict_list(self.data2)
assert results['obs'].shape == (2, 3)
assert results['act'].shape == (2, 3)
assert results['info']['lala'].shape == (2, 2)
assert results['info']['baba'].shape == (2, )
def test_pad_tensor(self):
results = pad_tensor(self.tensor, self.max_len)
assert len(self.tensor) == 3
assert np.array_equal(results, [1, 1, 1, 0, 0, 0, 0, 0, 0, 0])
results = pad_tensor(self.tensor, self.max_len, mode='last')
assert np.array_equal(results, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def test_explained_variance_1d(self):
y = np.array([1, 2, 3, 4, 5, 0, 0, 0, 0, 0])
y_hat = np.array([2, 3, 4, 5, 6, 0, 0, 0, 0, 0])
valids = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
result = explained_variance_1d(y, y_hat, valids)
assert result == 1.0
result = explained_variance_1d(y, y_hat)
np.testing.assert_almost_equal(result, 0.95)
def test_stack_and_pad_tensor_dict_list(self):
result = stack_and_pad_tensor_dict_list(self.data, max_len=5)
assert np.array_equal(result['obs'],
np.array([[1, 1, 1, 0, 0], [1, 1, 1, 0, 0]]))
assert np.array_equal(result['info']['lala'],
np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0]]))
assert np.array_equal(result['info']['baba'],
np.array([[2, 2, 0, 0, 0], [2, 2, 0, 0, 0]]))
| 3,783 | 38.831579 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/np/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/np/algos/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/np/algos/test_cem.py | import pytest
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.algos import CEM
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import OnPolicyVectorizedSampler
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestCEM(TfGraphTestCase):
@pytest.mark.large
def test_cem_cartpole(self):
"""Test CEM with Cartpole-v1 environment."""
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 10
algo = CEM(env_spec=env.spec,
policy=policy,
baseline=baseline,
best_frac=0.1,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
rtn = runner.train(n_epochs=10, batch_size=2048)
assert rtn > 40
env.close()
| 1,310 | 32.615385 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/np/algos/test_cma_es.py | from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.algos import CMAES
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import OnPolicyVectorizedSampler
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestCMAES(TfGraphTestCase):
def test_cma_es_cartpole(self):
"""Test CMAES with Cartpole-v1 environment."""
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CMAES(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
runner.train(n_epochs=1, batch_size=1000)
# No assertion on return because CMAES is not stable.
env.close()
| 1,284 | 35.714286 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/np/exploration_strategies/test_add_gaussian_noise.py | """Tests for epsilon greedy policy."""
import pickle
import numpy as np
import pytest
from garage.np.exploration_policies import AddGaussianNoise
from tests.fixtures.envs.dummy import DummyBoxEnv
@pytest.fixture
def env():
return DummyBoxEnv()
class ConstantPolicy:
"""Simple policy for testing."""
def __init__(self, action):
self.action = action
def get_action(self, _):
return self.action, dict()
def get_actions(self, observations):
return np.full(len(observations), self.action), dict()
def reset(self, *args, **kwargs):
pass
def get_param_values(self):
return {'action': self.action}
def set_param_values(self, params):
self.action = params['action']
def test_params(env):
policy1 = ConstantPolicy(env.action_space.sample())
policy2 = ConstantPolicy(env.action_space.sample())
assert (policy1.get_action(None)[0] != policy2.get_action(None)[0]).all()
exp_policy1 = AddGaussianNoise(env, policy1)
exp_policy2 = AddGaussianNoise(env, policy2)
exp_policy1.set_param_values(exp_policy2.get_param_values())
assert (policy1.get_action(None)[0] == policy2.get_action(None)[0]).all()
def test_decay_period(env):
policy = ConstantPolicy(env.action_space.sample())
exp_policy = AddGaussianNoise(env,
policy,
max_sigma=1.,
min_sigma=0.,
decay_period=2)
assert (exp_policy.get_action(None)[0] != policy.get_action(None)[0]).all()
exp_policy.reset()
assert (exp_policy.get_action(None)[0] != policy.get_action(None)[0]).all()
exp_policy.reset()
assert (exp_policy.get_action(None)[0] == policy.get_action(None)[0]).all()
| 1,804 | 28.112903 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/np/exploration_strategies/test_epsilon_greedy_policy.py | """Tests for epsilon greedy policy."""
import pickle
import numpy as np
from garage.np.exploration_policies import EpsilonGreedyPolicy
from tests.fixtures.envs.dummy import DummyDiscreteEnv
class SimplePolicy:
"""Simple policy for testing."""
def __init__(self, env_spec):
self.env_spec = env_spec
def get_action(self, _):
return self.env_spec.action_space.sample(), dict()
def get_actions(self, observations):
return np.full(len(observations),
self.env_spec.action_space.sample()), dict()
class TestEpsilonGreedyPolicy:
def setup_method(self):
self.env = DummyDiscreteEnv()
self.policy = SimplePolicy(env_spec=self.env)
self.epsilon_greedy_policy = EpsilonGreedyPolicy(env_spec=self.env,
policy=self.policy,
total_timesteps=100,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
self.env.reset()
def test_epsilon_greedy_policy(self):
obs, _, _, _ = self.env.step(1)
action, _ = self.epsilon_greedy_policy.get_action(obs)
assert self.env.action_space.contains(action)
# epsilon decay by 1 step, new epsilon = 1 - 0.98 = 0.902
random_rate = np.random.random(
100000) < self.epsilon_greedy_policy._epsilon
assert np.isclose([0.902], [sum(random_rate) / 100000], atol=0.01)
actions, _ = self.epsilon_greedy_policy.get_actions([obs] * 5)
# epsilon decay by 6 steps in total, new epsilon = 1 - 6 * 0.98 = 0.412
random_rate = np.random.random(
100000) < self.epsilon_greedy_policy._epsilon
assert np.isclose([0.412], [sum(random_rate) / 100000], atol=0.01)
for action in actions:
assert self.env.action_space.contains(action)
def test_epsilon_greedy_policy_is_pickleable(self):
obs, _, _, _ = self.env.step(1)
for _ in range(5):
self.epsilon_greedy_policy.get_action(obs)
h_data = pickle.dumps(self.epsilon_greedy_policy)
policy = pickle.loads(h_data)
assert policy._epsilon == self.epsilon_greedy_policy._epsilon
| 2,397 | 34.791045 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/np/policies/test_fixed_policy.py | import numpy as np
import pytest
from garage.np.policies import FixedPolicy
def test_vectorization_multi_raises():
policy = FixedPolicy(None, np.array([1, 2, 3]))
with pytest.raises(ValueError):
policy.reset([True, True])
with pytest.raises(ValueError):
policy.get_actions(np.array([0, 0]))
def test_get_actions():
policy = FixedPolicy(None, np.array([1, 2, 3]))
assert policy.get_actions(np.array([0]).reshape(1, 1))[0] == 1
assert policy.get_action(np.array([0]))[0] == 2
assert policy.get_action(np.array([0]))[0] == 3
with pytest.raises(IndexError):
policy.get_action(np.ndarray([0]))
| 650 | 28.590909 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/np/policies/test_scripted_policy.py | from garage.np.policies import ScriptedPolicy
class TestScriptedPolicy:
def setup_method(self):
self.sp = ScriptedPolicy(scripted_actions=[1], agent_env_infos={0: 1})
"""
potentially add more tests down the line
"""
def test_pass_codecov(self):
self.sp.get_action(0)
self.sp.get_actions([0])
| 341 | 20.375 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/replay_buffer/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/replay_buffer/test_her_replay_buffer.py | import pickle
import numpy as np
import pytest
from garage.envs import GarageEnv
from garage.replay_buffer import HERReplayBuffer
from tests.fixtures.envs.dummy import DummyDictEnv
class TestHerReplayBuffer:
def setup_method(self):
self.env = GarageEnv(DummyDictEnv())
self.obs = self.env.reset()
self._replay_k = 4
self.replay_buffer = HERReplayBuffer(env_spec=self.env.spec,
capacity_in_transitions=10,
replay_k=self._replay_k,
reward_fn=self.env.compute_reward)
def test_replay_k(self):
self.replay_buffer = HERReplayBuffer(env_spec=self.env.spec,
capacity_in_transitions=10,
replay_k=0,
reward_fn=self.env.compute_reward)
with pytest.raises(ValueError):
self.replay_buffer = HERReplayBuffer(
env_spec=self.env.spec,
capacity_in_transitions=10,
replay_k=0.2,
reward_fn=self.env.compute_reward)
def _add_one_path(self):
path = dict(
observations=np.asarray([self.obs, self.obs]),
actions=np.asarray([
self.env.action_space.sample(),
self.env.action_space.sample()
]),
rewards=np.asarray([[1], [1]]),
terminals=np.asarray([[False], [False]]),
next_observations=np.asarray([self.obs, self.obs]),
)
self.replay_buffer.add_path(path)
def test_add_path(self):
self._add_one_path()
# HER buffer should add replay_k + 1 transitions to the buffer
# for each transition in the given path. This doesn't apply to
# the last transition, where only that transition gets added.
path_len = 2
total_expected_transitions = sum(
[self._replay_k + 1 for _ in range(path_len - 1)]) + 1
assert (self.replay_buffer.n_transitions_stored ==
total_expected_transitions)
assert (len(
self.replay_buffer._path_segments) == total_expected_transitions -
1)
# check that buffer has the correct keys
assert {
'observations', 'next_observations', 'actions', 'rewards',
'terminals'
} <= set(self.replay_buffer._buffer)
# check that dict obses are flattened
obs = self.replay_buffer._buffer['observations'][0]
next_obs = self.replay_buffer._buffer['next_observations'][0]
assert obs.shape == self.env.spec.observation_space.flat_dim
assert next_obs.shape == self.env.spec.observation_space.flat_dim
def test_pickleable(self):
self._add_one_path()
replay_buffer_pickled = pickle.loads(pickle.dumps(self.replay_buffer))
assert (replay_buffer_pickled._buffer.keys() ==
self.replay_buffer._buffer.keys())
for k in replay_buffer_pickled._buffer:
assert replay_buffer_pickled._buffer[
k].shape == self.replay_buffer._buffer[k].shape
sample = self.replay_buffer.sample_transitions(1)
sample2 = replay_buffer_pickled.sample_transitions(1)
for k in sample.keys():
assert sample[k].shape == sample2[k].shape
assert len(sample) == len(sample2)
| 3,484 | 38.602273 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/replay_buffer/test_path_buffer.py | # pylint: disable=protected-access
import numpy as np
import pytest
from garage.replay_buffer import PathBuffer
from tests.fixtures.envs.dummy import DummyDiscreteEnv
class TestPathBuffer:
def test_add_path_dtype(self):
env = DummyDiscreteEnv()
obs = env.reset()
replay_buffer = PathBuffer(capacity_in_transitions=3)
replay_buffer.add_path({
'observations':
np.array([obs]),
'actions':
np.array([[env.action_space.sample()]])
})
sample = replay_buffer.sample_transitions(1)
sample_obs = sample['observations']
sample_action = sample['actions']
assert sample_obs.dtype == env.observation_space.dtype
assert sample_action.dtype == env.action_space.dtype
def test_eviction_policy(self):
obs = np.array([[1], [1]])
replay_buffer = PathBuffer(capacity_in_transitions=3)
replay_buffer.add_path(dict(obs=obs))
sampled_obs = replay_buffer.sample_transitions(3)['obs']
assert (sampled_obs == np.array([[1], [1], [1]])).all()
sampled_path_obs = replay_buffer.sample_path()['obs']
assert (sampled_path_obs == np.array([[1], [1]])).all()
obs2 = np.array([[2], [3]])
replay_buffer.add_path(dict(obs=obs2))
with pytest.raises(Exception):
assert replay_buffer.add_path(dict(test_obs=obs2))
obs3 = np.array([1])
with pytest.raises(Exception):
assert replay_buffer.add_path(dict(obs=obs3))
obs4 = np.array([[4], [5], [6], [7]])
with pytest.raises(Exception):
assert replay_buffer.add_path(dict(obs=obs4))
# Can still sample from old path
new_sampled_obs = replay_buffer.sample_transitions(1000)['obs']
assert set(new_sampled_obs.flatten()) == {1, 2, 3}
# Can't sample complete old path
for _ in range(100):
new_sampled_path_obs = replay_buffer.sample_path()['obs']
assert (new_sampled_path_obs == np.array([[2], [3]])).all()
replay_buffer.clear()
assert replay_buffer.n_transitions_stored == 0
assert not replay_buffer._buffer
| 2,197 | 32.815385 | 71 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_is_sampler.py | import unittest.mock
import gym
import pytest
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import ISSampler
from garage.tf.algos import TRPO
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestISSampler(TfGraphTestCase):
@pytest.mark.mujoco
def test_is_sampler(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = GaussianMLPPolicy(env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo,
env,
sampler_cls=ISSampler,
sampler_args=dict(n_backtrack=1, init_is=1))
runner._start_worker()
paths = runner._sampler.obtain_samples(1)
assert paths == [], 'Should return empty paths if no history'
# test importance and live sampling get called alternatively
with unittest.mock.patch.object(ISSampler,
'_obtain_is_samples') as mocked:
assert runner._sampler.obtain_samples(2, 20)
mocked.assert_not_called()
assert runner._sampler.obtain_samples(3)
mocked.assert_called_once_with(3, None, True)
# test importance sampling for first n_is_pretrain iterations
with unittest.mock.patch.object(ISSampler,
'_obtain_is_samples') as mocked:
runner._sampler.n_is_pretrain = 5
runner._sampler.n_backtrack = None
runner._sampler.obtain_samples(4)
mocked.assert_called_once_with(4, None, True)
runner._sampler.obtain_samples(5)
# test random draw important samples
runner._sampler.randomize_draw = True
assert runner._sampler.obtain_samples(6, 1)
runner._sampler.randomize_draw = False
runner._sampler.obtain_samples(7, 30)
# test ess_threshold use
runner._sampler.ess_threshold = 500
paths = runner._sampler.obtain_samples(8, 30)
assert paths == [], (
'Should return empty paths when ess_threshold is large')
runner._sampler.ess_threshold = 0
# test random sample selection when len(paths) > batch size
runner._sampler.n_is_pretrain = 15
runner._sampler.obtain_samples(9, 10)
runner._sampler.obtain_samples(10, 1)
runner._shutdown_worker()
| 3,100 | 37.7625 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_local_sampler.py | from unittest.mock import Mock
import numpy as np
import pytest
from garage.envs import GarageEnv
from garage.envs import PointEnv
from garage.envs.grid_world_env import GridWorldEnv
from garage.experiment.task_sampler import SetTaskSampler
from garage.np.policies import FixedPolicy, ScriptedPolicy
from garage.sampler import LocalSampler, OnPolicyVectorizedSampler
from garage.sampler import WorkerFactory
class TestSampler:
"""
Uses mock policy for 4x4 gridworldenv
'4x4': [
'SFFF',
'FHFH',
'FFFH',
'HFFG'
]
0: left
1: down
2: right
3: up
-1: no move
'S' : starting point
'F' or '.': free space
'W' or 'x': wall
'H' or 'o': hole (terminates episode)
'G' : goal
[2,2,1,0,3,1,1,1,2,2,1,1,1,2,2,1]
"""
def setup_method(self):
self.env = GarageEnv(GridWorldEnv(desc='4x4'))
self.policy = ScriptedPolicy(
scripted_actions=[2, 2, 1, 0, 3, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1])
self.algo = Mock(env_spec=self.env.spec,
policy=self.policy,
max_path_length=16)
def teardown_method(self):
self.env.close()
def test_local_batch_sampler(self):
workers = WorkerFactory(seed=100,
max_path_length=self.algo.max_path_length)
sampler1 = LocalSampler.from_worker_factory(workers, self.policy,
self.env)
sampler2 = OnPolicyVectorizedSampler(self.algo, self.env)
sampler2.start_worker()
trajs1 = sampler1.obtain_samples(
0, 1000, tuple(self.algo.policy.get_param_values()))
trajs2 = sampler2.obtain_samples(0, 1000)
# pylint: disable=superfluous-parens
assert trajs1.observations.shape[0] >= 1000
assert trajs1.actions.shape[0] >= 1000
assert (sum(trajs1.rewards[:trajs1.lengths[0]]) == sum(
trajs2[0]['rewards']) == 1)
true_obs = np.array([0, 1, 2, 6, 10, 14])
true_actions = np.array([2, 2, 1, 1, 1, 2])
true_rewards = np.array([0, 0, 0, 0, 0, 1])
start = 0
for length in trajs1.lengths:
observations = trajs1.observations[start:start + length]
actions = trajs1.actions[start:start + length]
rewards = trajs1.rewards[start:start + length]
assert np.array_equal(observations, true_obs)
assert np.array_equal(actions, true_actions)
assert np.array_equal(rewards, true_rewards)
start += length
sampler1.shutdown_worker()
sampler2.shutdown_worker()
def test_update_envs_env_update():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
tasks = SetTaskSampler(PointEnv)
n_workers = 8
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = LocalSampler.from_worker_factory(workers, policy, env)
rollouts = sampler.obtain_samples(0,
161,
np.asarray(policy.get_param_values()),
env_update=tasks.sample(n_workers))
mean_rewards = []
goals = []
for rollout in rollouts.split():
mean_rewards.append(rollout.rewards.mean())
goals.append(rollout.env_infos['task'][0]['goal'])
assert len(mean_rewards) == 11
assert len(goals) == 11
assert np.var(mean_rewards) > 1e-2
assert np.var(goals) > 1e-2
with pytest.raises(ValueError):
sampler.obtain_samples(0,
10,
np.asarray(policy.get_param_values()),
env_update=tasks.sample(n_workers + 1))
def test_init_with_env_updates():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
n_workers = 8
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = LocalSampler.from_worker_factory(workers,
policy,
envs=tasks.sample(n_workers))
rollouts = sampler.obtain_samples(0, 160, policy)
assert sum(rollouts.lengths) >= 160
def test_obtain_exact_trajectories():
max_path_length = 15
n_workers = 8
env = GarageEnv(PointEnv())
per_worker_actions = [env.action_space.sample() for _ in range(n_workers)]
policies = [
FixedPolicy(env.spec, [action] * max_path_length)
for action in per_worker_actions
]
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = LocalSampler.from_worker_factory(workers, policies, envs=env)
n_traj_per_worker = 3
rollouts = sampler.obtain_exact_trajectories(n_traj_per_worker,
agent_update=policies)
# At least one action per trajectory.
assert sum(rollouts.lengths) >= n_workers * n_traj_per_worker
# All of the trajectories.
assert len(rollouts.lengths) == n_workers * n_traj_per_worker
worker = -1
for count, rollout in enumerate(rollouts.split()):
if count % n_traj_per_worker == 0:
worker += 1
assert (rollout.actions == per_worker_actions[worker]).all()
def test_no_seed():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
n_workers = 8
workers = WorkerFactory(seed=None,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = LocalSampler.from_worker_factory(workers, policy, env)
rollouts = sampler.obtain_samples(0, 160, policy)
assert sum(rollouts.lengths) >= 160
| 6,625 | 36.647727 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_multiprocessing_sampler.py | import pickle
from unittest.mock import Mock
import numpy as np
import pytest
from garage.envs import GarageEnv
from garage.envs import PointEnv
from garage.envs.grid_world_env import GridWorldEnv
from garage.experiment.task_sampler import SetTaskSampler
from garage.np.policies import FixedPolicy, ScriptedPolicy
from garage.sampler import LocalSampler, MultiprocessingSampler
from garage.sampler import WorkerFactory
@pytest.mark.timeout(10)
def test_obtain_samples():
env = GarageEnv(GridWorldEnv(desc='4x4'))
policy = ScriptedPolicy(
scripted_actions=[2, 2, 1, 0, 3, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1])
algo = Mock(env_spec=env.spec, policy=policy, max_path_length=16)
workers = WorkerFactory(seed=100,
max_path_length=algo.max_path_length,
n_workers=8)
sampler1 = MultiprocessingSampler.from_worker_factory(workers, policy, env)
sampler2 = LocalSampler.from_worker_factory(workers, policy, env)
trajs1 = sampler1.obtain_samples(0, 1000,
tuple(algo.policy.get_param_values()))
trajs2 = sampler2.obtain_samples(0, 1000,
tuple(algo.policy.get_param_values()))
# pylint: disable=superfluous-parens
assert trajs1.observations.shape[0] >= 1000
assert trajs1.actions.shape[0] >= 1000
assert (sum(trajs1.rewards[:trajs1.lengths[0]]) == sum(
trajs2.rewards[:trajs2.lengths[0]]) == 1)
true_obs = np.array([0, 1, 2, 6, 10, 14])
true_actions = np.array([2, 2, 1, 1, 1, 2])
true_rewards = np.array([0, 0, 0, 0, 0, 1])
start = 0
for length in trajs1.lengths:
observations = trajs1.observations[start:start + length]
actions = trajs1.actions[start:start + length]
rewards = trajs1.rewards[start:start + length]
assert np.array_equal(observations, true_obs)
assert np.array_equal(actions, true_actions)
assert np.array_equal(rewards, true_rewards)
start += length
sampler1.shutdown_worker()
sampler2.shutdown_worker()
env.close()
@pytest.mark.timeout(10)
def test_update_envs_env_update():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
tasks = SetTaskSampler(PointEnv)
n_workers = 8
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = MultiprocessingSampler.from_worker_factory(workers, policy, env)
rollouts = sampler.obtain_samples(0,
161,
np.asarray(policy.get_param_values()),
env_update=tasks.sample(n_workers))
mean_rewards = []
goals = []
for rollout in rollouts.split():
mean_rewards.append(rollout.rewards.mean())
goals.append(rollout.env_infos['task'][0]['goal'])
assert np.var(mean_rewards) > 0
assert np.var(goals) > 0
with pytest.raises(ValueError):
sampler.obtain_samples(0,
10,
np.asarray(policy.get_param_values()),
env_update=tasks.sample(n_workers + 1))
sampler.shutdown_worker()
env.close()
@pytest.mark.timeout(10)
def test_init_with_env_updates():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
n_workers = 8
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = MultiprocessingSampler.from_worker_factory(
workers, policy, envs=tasks.sample(n_workers))
rollouts = sampler.obtain_samples(0, 160, policy)
assert sum(rollouts.lengths) >= 160
sampler.shutdown_worker()
env.close()
@pytest.mark.timeout(10)
def test_obtain_exact_trajectories():
max_path_length = 15
n_workers = 8
env = GarageEnv(PointEnv())
per_worker_actions = [env.action_space.sample() for _ in range(n_workers)]
policies = [
FixedPolicy(env.spec, [action] * max_path_length)
for action in per_worker_actions
]
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = MultiprocessingSampler.from_worker_factory(workers,
policies,
envs=env)
n_traj_per_worker = 3
rollouts = sampler.obtain_exact_trajectories(n_traj_per_worker,
agent_update=policies)
# At least one action per trajectory.
assert sum(rollouts.lengths) >= n_workers * n_traj_per_worker
# All of the trajectories.
assert len(rollouts.lengths) == n_workers * n_traj_per_worker
worker = -1
for count, rollout in enumerate(rollouts.split()):
if count % n_traj_per_worker == 0:
worker += 1
assert (rollout.actions == per_worker_actions[worker]).all()
sampler.shutdown_worker()
env.close()
@pytest.mark.timeout(30)
def test_init_with_crashed_worker():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
n_workers = 2
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
class CrashingPolicy:
def reset(self, **kwargs):
raise Exception('Intentional subprocess crash')
bad_policy = CrashingPolicy()
# This causes worker 2 to crash.
sampler = MultiprocessingSampler.from_worker_factory(
workers, [policy, bad_policy], envs=tasks.sample(n_workers))
rollouts = sampler.obtain_samples(0, 160, None)
assert sum(rollouts.lengths) >= 160
sampler.shutdown_worker()
env.close()
@pytest.mark.timeout(10)
def test_pickle():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
tasks = SetTaskSampler(PointEnv)
n_workers = 8
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = MultiprocessingSampler.from_worker_factory(workers, policy, env)
sampler_pickled = pickle.dumps(sampler)
sampler.shutdown_worker()
sampler2 = pickle.loads(sampler_pickled)
rollouts = sampler2.obtain_samples(0,
161,
np.asarray(policy.get_param_values()),
env_update=tasks.sample(n_workers))
mean_rewards = []
goals = []
for rollout in rollouts.split():
mean_rewards.append(rollout.rewards.mean())
goals.append(rollout.env_infos['task'][0]['goal'])
assert np.var(mean_rewards) > 0
assert np.var(goals) > 0
sampler2.shutdown_worker()
env.close()
| 7,930 | 37.5 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_off_policy_vectorized_sampler_integration.py | import gym
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import PathBuffer
from garage.sampler import OffPolicyVectorizedSampler
from garage.tf.algos import DDPG
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config, TfGraphTestCase
from tests.fixtures.envs.dummy import DummyDictEnv
from tests.fixtures.policies import DummyPolicy
from tests.fixtures.tf.algos.dummy_off_policy_algo import DummyOffPolicyAlgo
class TestOffPolicyVectorizedSampler(TfGraphTestCase):
@pytest.mark.mujoco
def test_no_reset(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
# This tests if off-policy sampler respect batch_size
# when no_reset is set to True
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
algo = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
)
sampler = OffPolicyVectorizedSampler(algo, env, 1, no_reset=True)
sampler.start_worker()
runner.initialize_tf_vars()
paths1 = sampler.obtain_samples(0, 5)
paths2 = sampler.obtain_samples(0, 5)
len1 = sum([len(path['rewards']) for path in paths1])
len2 = sum([len(path['rewards']) for path in paths2])
assert len1 == 5 and len2 == 5, 'Sampler should respect batch_size'
# yapf: disable
# When done is False in 1st sampling, the next sampling should be
# stacked with the last batch in 1st sampling
case1 = (len(paths1[-1]['rewards']) + len(paths2[0]['rewards'])
== paths2[0]['running_length'])
# When done is True in 1st sampling, the next sampling should be
# separated
case2 = len(paths2[0]['rewards']) == paths2[0]['running_length']
done = paths1[-1]['dones'][-1]
assert (
(not done and case1) or (done and case2)
), 'Running length should be the length of full path'
# yapf: enable
case1 = np.isclose(
paths1[-1]['rewards'].sum() + paths2[0]['rewards'].sum(),
paths2[0]['undiscounted_return'])
case2 = np.isclose(paths2[0]['rewards'].sum(),
paths2[0]['undiscounted_return'])
assert (
(not done and case1) or (done and case2)
), 'Undiscounted_return should be the sum of rewards of full path'
| 3,841 | 43.16092 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_on_policy_vectorized_sampler.py | import gym
import pytest
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import OnPolicyVectorizedSampler
from garage.tf.algos import REPS
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
configs = [(1, None, 4), (3, None, 12), (2, 3, 3)]
class TestOnPolicyVectorizedSampler(TfGraphTestCase):
@pytest.mark.parametrize('cpus, n_envs, expected_n_envs', [*configs])
def test_on_policy_vectorized_sampler_n_envs(self, cpus, n_envs,
expected_n_envs):
with LocalTFRunner(snapshot_config, sess=self.sess,
max_cpus=cpus) as runner:
env = GarageEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(env_spec=env.spec,
hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = REPS(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99)
runner.setup(algo, env, sampler_args=dict(n_envs=n_envs))
assert isinstance(runner._sampler, OnPolicyVectorizedSampler)
assert runner._sampler._n_envs == expected_n_envs
env.close()
| 1,476 | 35.02439 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_ray_batched_sampler.py | """Tests for ray_batched_sampler."""
from unittest.mock import Mock
import numpy as np
import pytest
import ray
from garage.envs import GarageEnv
from garage.envs import PointEnv
from garage.envs.grid_world_env import GridWorldEnv
from garage.experiment.task_sampler import SetTaskSampler
from garage.np.policies import FixedPolicy, ScriptedPolicy
from garage.sampler import OnPolicyVectorizedSampler, RaySampler, WorkerFactory
from tests.fixtures.sampler import ray_local_session_fixture
def test_ray_batch_sampler(ray_local_session_fixture):
del ray_local_session_fixture
env = GarageEnv(GridWorldEnv(desc='4x4'))
policy = ScriptedPolicy(
scripted_actions=[2, 2, 1, 0, 3, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1])
algo = Mock(env_spec=env.spec, policy=policy, max_path_length=16)
assert ray.is_initialized()
workers = WorkerFactory(seed=100, max_path_length=algo.max_path_length)
sampler1 = RaySampler(workers, policy, env)
sampler1.start_worker()
sampler2 = OnPolicyVectorizedSampler(algo, env)
sampler2.start_worker()
trajs1 = sampler1.obtain_samples(0, 1000,
tuple(algo.policy.get_param_values()))
trajs2 = sampler2.obtain_samples(0, 1000)
# pylint: disable=superfluous-parens
assert trajs1.observations.shape[0] >= 1000
assert trajs1.actions.shape[0] >= 1000
assert (sum(trajs1.rewards[:trajs1.lengths[0]]) == sum(
trajs2[0]['rewards']) == 1)
true_obs = np.array([0, 1, 2, 6, 10, 14])
true_actions = np.array([2, 2, 1, 1, 1, 2])
true_rewards = np.array([0, 0, 0, 0, 0, 1])
start = 0
for length in trajs1.lengths:
observations = trajs1.observations[start:start + length]
actions = trajs1.actions[start:start + length]
rewards = trajs1.rewards[start:start + length]
assert np.array_equal(observations, true_obs)
assert np.array_equal(actions, true_actions)
assert np.array_equal(rewards, true_rewards)
start += length
sampler1.shutdown_worker()
sampler2.shutdown_worker()
env.close()
def test_update_envs_env_update(ray_local_session_fixture):
del ray_local_session_fixture
assert ray.is_initialized()
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
tasks = SetTaskSampler(PointEnv)
n_workers = 8
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = RaySampler.from_worker_factory(workers, policy, env)
rollouts = sampler.obtain_samples(0,
160,
np.asarray(policy.get_param_values()),
env_update=tasks.sample(n_workers))
mean_rewards = []
goals = []
for rollout in rollouts.split():
mean_rewards.append(rollout.rewards.mean())
goals.append(rollout.env_infos['task'][0]['goal'])
assert np.var(mean_rewards) > 0
assert np.var(goals) > 0
with pytest.raises(ValueError):
sampler.obtain_samples(0,
10,
np.asarray(policy.get_param_values()),
env_update=tasks.sample(n_workers + 1))
def test_obtain_exact_trajectories(ray_local_session_fixture):
del ray_local_session_fixture
assert ray.is_initialized()
max_path_length = 15
n_workers = 8
env = GarageEnv(PointEnv())
per_worker_actions = [env.action_space.sample() for _ in range(n_workers)]
policies = [
FixedPolicy(env.spec, [action] * max_path_length)
for action in per_worker_actions
]
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = RaySampler.from_worker_factory(workers, policies, envs=env)
n_traj_per_worker = 3
rollouts = sampler.obtain_exact_trajectories(n_traj_per_worker, policies)
# At least one action per trajectory.
assert sum(rollouts.lengths) >= n_workers * n_traj_per_worker
# All of the trajectories.
assert len(rollouts.lengths) == n_workers * n_traj_per_worker
worker = -1
for count, rollout in enumerate(rollouts.split()):
if count % n_traj_per_worker == 0:
worker += 1
assert (rollout.actions == per_worker_actions[worker]).all()
def test_init_with_env_updates(ray_local_session_fixture):
del ray_local_session_fixture
assert ray.is_initialized()
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec,
scripted_actions=[
env.action_space.sample()
for _ in range(max_path_length)
])
tasks = SetTaskSampler(lambda: GarageEnv(PointEnv()))
n_workers = 8
workers = WorkerFactory(seed=100,
max_path_length=max_path_length,
n_workers=n_workers)
sampler = RaySampler.from_worker_factory(workers,
policy,
envs=tasks.sample(n_workers))
rollouts = sampler.obtain_samples(0, 160, policy)
assert sum(rollouts.lengths) >= 160
| 5,551 | 39.525547 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_rl2_worker.py | from garage.envs import GarageEnv
from garage.tf.algos.rl2 import RL2Worker
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.policies import DummyPolicy
class TestRL2Worker(TfGraphTestCase):
def test_rl2_worker(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(1, )))
policy = DummyPolicy(env_spec=env.spec)
worker = RL2Worker(seed=1,
max_path_length=100,
worker_number=1,
n_paths_per_trial=5)
worker.update_agent(policy)
worker.update_env(env)
rollouts = worker.rollout()
assert rollouts.rewards.shape[0] == 500
| 715 | 33.095238 | 51 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.