Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_sampler.py | from dowel import logger
import numpy as np
from garage.sampler.utils import truncate_paths
from tests.fixtures.logger import NullOutput
class TestSampler:
def setup_method(self):
logger.add_output(NullOutput())
def teardown_method(self):
logger.remove_all()
def test_truncate_paths(self):
paths = [
dict(
observations=np.zeros((100, 1)),
actions=np.zeros((100, 1)),
rewards=np.zeros(100),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(100)),
),
dict(
observations=np.zeros((50, 1)),
actions=np.zeros((50, 1)),
rewards=np.zeros(50),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(50)),
),
]
truncated = truncate_paths(paths, 130)
assert len(truncated) == 2
assert len(truncated[-1]['observations']) == 30
assert len(truncated[0]['observations']) == 100
# make sure not to change the original one
assert len(paths) == 2
assert len(paths[-1]['observations']) == 50
| 1,183 | 27.878049 | 55 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_stateful_pool.py | from garage.sampler.stateful_pool import StatefulPool
def _worker_collect_once(_):
return 'a', 1
class TestStatefulPool:
def test_stateful_pool(self):
stateful_pool = StatefulPool()
stateful_pool.initialize(n_parallel=10)
results = stateful_pool.run_collect(
_worker_collect_once, 3, show_prog_bar=False)
assert all([r == 'a' for r in results]) and len(results) >= 3
def test_stateful_pool_over_capacity(self):
stateful_pool = StatefulPool()
stateful_pool.initialize(n_parallel=4)
results = stateful_pool.run_collect(
_worker_collect_once, 3, show_prog_bar=False)
assert len(results) >= 3
| 694 | 30.590909 | 69 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_utils.py | import numpy as np
import pytest
from garage.envs import GarageEnv
from garage.sampler import utils
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.policies import DummyPolicy
class TestRollout:
def setup_method(self):
self.env = GarageEnv(DummyBoxEnv(obs_dim=(4, 4), action_dim=(2, 2)))
self.policy = DummyPolicy(self.env.spec)
def test_max_path_length(self):
# pylint: disable=unsubscriptable-object
path = utils.rollout(self.env, self.policy, max_path_length=3)
assert path['observations'].shape[0] == 3
assert path['actions'].shape[0] == 3
assert path['rewards'].shape[0] == 3
agent_info = [
path['agent_infos'][k]
for k in self.policy.distribution.dist_info_keys
]
assert agent_info[0].shape[0] == 3
# dummy is the env_info_key
assert path['env_infos']['dummy'].shape[0] == 3
def test_deterministic_action(self):
path = utils.rollout(self.env,
self.policy,
max_path_length=5,
deterministic=True)
assert (path['actions'] == 0.).all()
class TestTruncatePaths:
def setup_method(self):
self.paths = [
dict(
observations=np.zeros((100, 1)),
actions=np.zeros((100, 1)),
rewards=np.zeros(100),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(100)),
),
dict(
observations=np.zeros((50, 1)),
actions=np.zeros((50, 1)),
rewards=np.zeros(50),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(50)),
),
]
self.paths_dict = [
dict(
observations=np.zeros((100, 1)),
actions=np.zeros((100, 1)),
rewards=np.zeros(100),
env_infos=dict(),
agent_infos=dict(lala=dict(baba=np.zeros(100))),
),
dict(
observations=np.zeros((50, 1)),
actions=np.zeros((50, 1)),
rewards=np.zeros(50),
env_infos=dict(),
agent_infos=dict(lala=dict(baba=np.zeros(50))),
),
]
def test_truncates(self):
truncated = utils.truncate_paths(self.paths, 130)
assert len(truncated) == 2
assert len(truncated[-1]['observations']) == 30
assert len(truncated[0]['observations']) == 100
# make sure not to change the original one
assert len(self.paths) == 2
assert len(self.paths[-1]['observations']) == 50
def test_truncates_dict(self):
truncated = utils.truncate_paths(self.paths_dict, 130)
assert len(truncated) == 2
assert len(truncated[-1]['agent_infos']['lala']['baba']) == 30
assert len(truncated[0]['agent_infos']['lala']['baba']) == 100
def test_invalid_path(self):
self.paths[0]['invalid'] = None
with pytest.raises(ValueError):
utils.truncate_paths(self.paths, 3)
| 3,172 | 33.11828 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/sampler/test_vec_worker.py | import pprint
import pytest
from garage.envs import GarageEnv
from garage.envs import GridWorldEnv
from garage.experiment.task_sampler import EnvPoolSampler
from garage.np.policies import ScriptedPolicy
from garage.sampler import LocalSampler, VecWorker, WorkerFactory
SEED = 100
N_TRAJ = 5
MAX_PATH_LENGTH = 9
@pytest.fixture
def env():
return GarageEnv(GridWorldEnv(desc='4x4'))
@pytest.fixture
def policy():
return ScriptedPolicy(
scripted_actions=[2, 2, 1, 0, 3, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1])
@pytest.fixture
def envs():
descs = [
['SFFF', 'FHFH', 'FFFH', 'HFFG'],
['SFFF', 'FFFH', 'FHFH', 'HFFG'],
['SFFF', 'FFFH', 'FHFH', 'FFFG'],
['SFFF', 'FFFF', 'FFFF', 'FFFF'],
['SHFF', 'HHFF', 'FFFF', 'FFFF'],
]
return [GarageEnv(GridWorldEnv(desc=desc)) for desc in descs]
@pytest.fixture
def other_envs():
descs = [
['FFFS', 'FHFH', 'FFFH', 'HFFG'],
['FFSF', 'FFFH', 'FHFH', 'HFFG'],
['FFFF', 'FFSH', 'FHFH', 'FFFG'],
['FFFF', 'FFFF', 'FSFF', 'FFFF'],
['HHFF', 'HHHF', 'HSHF', 'HHHF'],
]
return [GarageEnv(GridWorldEnv(desc=desc)) for desc in descs]
def assert_trajs_eq(ground_truth_traj, test_traj):
# We should have the exact same trajectories.
ground_truth_set = {(tuple(traj.actions), tuple(traj.observations))
for traj in ground_truth_traj.split()}
test_set = {(tuple(traj.actions), tuple(traj.observations))
for traj in test_traj.split()}
print()
pprint.pprint(ground_truth_set)
pprint.pprint(test_set)
assert test_set == ground_truth_set
def test_rollout(env, policy):
worker = VecWorker(seed=SEED,
max_path_length=MAX_PATH_LENGTH,
worker_number=0,
n_envs=N_TRAJ)
worker.update_agent(policy)
worker.update_env(env)
traj = worker.rollout()
assert len(traj.lengths) == N_TRAJ
traj2 = worker.rollout()
assert len(traj2.lengths) == N_TRAJ
assert str(traj) == str(traj2)
assert traj.actions.var() > 0
worker.shutdown()
def test_non_vec_rollout(env, policy):
worker = VecWorker(seed=SEED,
max_path_length=MAX_PATH_LENGTH,
worker_number=0,
n_envs=1)
worker.update_agent(policy)
worker.update_env(env)
traj = worker.rollout()
assert len(traj.lengths) == 1
assert traj.actions.var() > 0
traj2 = worker.rollout()
assert len(traj2.lengths) == 1
worker.shutdown()
def test_in_local_sampler(policy, envs):
true_workers = WorkerFactory(seed=100,
n_workers=N_TRAJ,
max_path_length=MAX_PATH_LENGTH)
true_sampler = LocalSampler.from_worker_factory(true_workers, policy, envs)
vec_workers = WorkerFactory(seed=100,
n_workers=1,
worker_class=VecWorker,
worker_args=dict(n_envs=N_TRAJ),
max_path_length=MAX_PATH_LENGTH)
vec_sampler = LocalSampler.from_worker_factory(vec_workers, policy, [envs])
n_samples = 100
true_trajs = true_sampler.obtain_samples(0, n_samples, None)
vec_trajs = vec_sampler.obtain_samples(0, n_samples, None)
assert vec_trajs.lengths.sum() >= n_samples
assert_trajs_eq(true_trajs, vec_trajs)
# Test start_rollout optimization
true_trajs = true_sampler.obtain_samples(0, n_samples, None)
vec_trajs = vec_sampler.obtain_samples(0, n_samples, None)
assert vec_trajs.lengths.sum() >= n_samples
assert_trajs_eq(true_trajs, vec_trajs)
true_sampler.shutdown_worker()
vec_sampler.shutdown_worker()
def test_reset_optimization(policy, envs, other_envs):
true_workers = WorkerFactory(seed=100,
n_workers=N_TRAJ,
max_path_length=MAX_PATH_LENGTH)
true_sampler = LocalSampler.from_worker_factory(true_workers, policy, envs)
vec_workers = WorkerFactory(seed=100,
n_workers=1,
worker_class=VecWorker,
worker_args=dict(n_envs=N_TRAJ),
max_path_length=MAX_PATH_LENGTH)
vec_sampler = LocalSampler.from_worker_factory(vec_workers, [policy],
[envs])
n_samples = 4 * MAX_PATH_LENGTH
true_sampler.obtain_samples(0, n_samples, None)
true_sampler.obtain_samples(0, n_samples, None)
true_trajs = true_sampler.obtain_samples(0, n_samples, None, other_envs)
vec_trajs = vec_sampler.obtain_samples(0, n_samples, None, [other_envs])
assert vec_trajs.lengths.sum() >= n_samples
assert_trajs_eq(true_trajs, vec_trajs)
true_sampler.shutdown_worker()
vec_sampler.shutdown_worker()
def test_init_with_env_updates(policy, envs):
task_sampler = EnvPoolSampler(envs)
envs = task_sampler.sample(N_TRAJ)
true_workers = WorkerFactory(seed=100,
n_workers=N_TRAJ,
max_path_length=MAX_PATH_LENGTH)
true_sampler = LocalSampler.from_worker_factory(true_workers, policy, envs)
vec_workers = WorkerFactory(seed=100,
n_workers=1,
worker_class=VecWorker,
worker_args=dict(n_envs=N_TRAJ),
max_path_length=MAX_PATH_LENGTH)
vec_sampler = LocalSampler.from_worker_factory(vec_workers, [policy],
[envs])
n_samples = 100
true_trajs = true_sampler.obtain_samples(0, n_samples, None)
vec_trajs = vec_sampler.obtain_samples(0, n_samples, None)
assert vec_trajs.lengths.sum() >= n_samples
assert_trajs_eq(true_trajs, vec_trajs)
true_sampler.shutdown_worker()
vec_sampler.shutdown_worker()
| 6,027 | 34.046512 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_batch_polopt.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_ddpg.py | """This script creates a test that fails when garage.tf.algos.DDPG performance
is too low.
"""
import gym
import pytest
import tensorflow as tf
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import PathBuffer
from garage.tf.algos import DDPG
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestDDPG(TfGraphTestCase):
"""Tests for DDPG algorithm."""
@pytest.mark.mujoco_long
def test_ddpg_double_pendulum(self):
"""Test DDPG with Pendulum environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(gym.make('InvertedDoublePendulum-v2'))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e5))
algo = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
min_buffer_size=int(5e3),
exploration_policy=exploration_policy,
)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 60
env.close()
@pytest.mark.mujoco_long
def test_ddpg_pendulum(self):
"""Test DDPG with Pendulum environment.
This environment has a [-3, 3] action_space bound.
"""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
algo = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
min_buffer_size=int(5e3),
exploration_policy=exploration_policy,
)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 10
env.close()
@pytest.mark.mujoco_long
def test_ddpg_pendulum_with_decayed_weights(self):
"""Test DDPG with Pendulum environment and decayed weights.
This environment has a [-3, 3] action_space bound.
"""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
algo = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
policy_weight_decay=0.01,
qf_weight_decay=0.01,
min_buffer_size=int(5e3),
exploration_policy=exploration_policy,
)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 10
env.close()
| 5,795 | 41.306569 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_dqn.py | """
This script creates a test that fails when garage.tf.algos.DQN performance is
too low.
"""
import pickle
import gym
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import EpsilonGreedyPolicy
from garage.replay_buffer import PathBuffer
from garage.tf.algos import DQN
from garage.tf.policies import DiscreteQfDerivedPolicy
from garage.tf.q_functions import DiscreteMLPQFunction
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestDQN(TfGraphTestCase):
@pytest.mark.large
def test_dqn_cartpole(self):
"""Test DQN with CartPole environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
n_epochs = 10
steps_per_epoch = 10
sampler_batch_size = 500
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GarageEnv(gym.make('CartPole-v0'))
replay_buffer = PathBuffer(capacity_in_transitions=int(1e4))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64))
policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
epilson_greedy_policy = EpsilonGreedyPolicy(
env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=epilson_greedy_policy,
replay_buffer=replay_buffer,
qf_lr=1e-4,
discount=1.0,
min_buffer_size=int(1e3),
double_q=False,
n_train_steps=500,
steps_per_epoch=steps_per_epoch,
target_network_update_freq=1,
buffer_batch_size=32)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=n_epochs,
batch_size=sampler_batch_size)
assert last_avg_ret > 15
env.close()
@pytest.mark.large
def test_dqn_cartpole_double_q(self):
"""Test DQN with CartPole environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
n_epochs = 10
steps_per_epoch = 10
sampler_batch_size = 500
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GarageEnv(gym.make('CartPole-v0'))
replay_buffer = PathBuffer(capacity_in_transitions=int(1e4))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64))
policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
epilson_greedy_policy = EpsilonGreedyPolicy(
env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=epilson_greedy_policy,
replay_buffer=replay_buffer,
qf_lr=1e-4,
discount=1.0,
min_buffer_size=int(1e3),
double_q=True,
n_train_steps=500,
steps_per_epoch=steps_per_epoch,
target_network_update_freq=1,
buffer_batch_size=32)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=n_epochs,
batch_size=sampler_batch_size)
assert last_avg_ret > 15
env.close()
@pytest.mark.large
def test_dqn_cartpole_grad_clip(self):
"""Test DQN with CartPole environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
n_epochs = 10
steps_per_epoch = 10
sampler_batch_size = 500
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GarageEnv(gym.make('CartPole-v0'))
replay_buffer = PathBuffer(capacity_in_transitions=int(1e4))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64))
policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
epilson_greedy_policy = EpsilonGreedyPolicy(
env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=epilson_greedy_policy,
replay_buffer=replay_buffer,
qf_lr=1e-4,
discount=1.0,
min_buffer_size=int(1e3),
double_q=False,
n_train_steps=500,
grad_norm_clipping=5.0,
steps_per_epoch=steps_per_epoch,
target_network_update_freq=1,
buffer_batch_size=32)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=n_epochs,
batch_size=sampler_batch_size)
assert last_avg_ret > 13
env.close()
def test_dqn_cartpole_pickle(self):
"""Test DQN with CartPole environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
n_epochs = 10
steps_per_epoch = 10
sampler_batch_size = 500
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GarageEnv(gym.make('CartPole-v0'))
replay_buffer = PathBuffer(capacity_in_transitions=int(1e4))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64))
policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
epilson_greedy_policy = EpsilonGreedyPolicy(
env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=epilson_greedy_policy,
replay_buffer=replay_buffer,
qf_lr=1e-4,
discount=1.0,
min_buffer_size=int(1e3),
double_q=False,
n_train_steps=500,
grad_norm_clipping=5.0,
steps_per_epoch=steps_per_epoch,
target_network_update_freq=1,
buffer_batch_size=32)
runner.setup(algo, env)
with tf.compat.v1.variable_scope(
'DiscreteMLPQFunction/MLPModel/mlp/hidden_0', reuse=True):
bias = tf.compat.v1.get_variable('bias')
# assign it to all one
old_bias = tf.ones_like(bias).eval()
bias.load(old_bias)
h = pickle.dumps(algo)
with tf.compat.v1.Session(graph=tf.Graph()):
pickle.loads(h)
with tf.compat.v1.variable_scope(
'DiscreteMLPQFunction/MLPModel/mlp/hidden_0',
reuse=True):
new_bias = tf.compat.v1.get_variable('bias')
new_bias = new_bias.eval()
assert np.array_equal(old_bias, new_bias)
env.close()
| 8,134 | 40.505102 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_erwr.py | import pytest
from garage.envs import GarageEnv
from garage.experiment import deterministic, LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import ERWR
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestERWR(TfGraphTestCase):
@pytest.mark.large
def test_erwr_cartpole(self):
"""Test ERWR with Cartpole-v1 environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
deterministic.set_seed(1)
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = ERWR(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=10000)
assert last_avg_ret > 80
env.close()
| 1,245 | 31.789474 | 70 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_npo.py | """
This script creates a test that fails when garage.tf.algos.NPO performance is
too low.
"""
import gym
import pytest
import tensorflow as tf
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.tf.algos import NPO
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestNPO(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
self.baseline = GaussianMLPBaseline(
env_spec=self.env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
@pytest.mark.mujoco
def test_npo_pendulum(self):
"""Test NPO with Pendulum environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = NPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.98,
policy_ent_coeff=0.0)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 20
@pytest.mark.mujoco
def test_npo_with_unknown_pg_loss(self):
"""Test NPO with unkown pg loss."""
with pytest.raises(ValueError, match='Invalid pg_loss'):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
pg_loss='random pg_loss',
)
@pytest.mark.mujoco
def test_npo_with_invalid_entropy_method(self):
"""Test NPO with invalid entropy method."""
with pytest.raises(ValueError, match='Invalid entropy_method'):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
entropy_method=None,
)
@pytest.mark.mujoco
def test_npo_with_max_entropy_and_center_adv(self):
"""Test NPO with max entropy and center_adv."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
entropy_method='max',
center_adv=True,
)
@pytest.mark.mujoco
def test_npo_with_max_entropy_and_no_stop_entropy_gradient(self):
"""Test NPO with max entropy and false stop_entropy_gradient."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
entropy_method='max',
stop_entropy_gradient=False,
)
@pytest.mark.mujoco
def test_npo_with_invalid_no_entropy_configuration(self):
"""Test NPO with invalid no entropy configuration."""
with pytest.raises(ValueError):
NPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
entropy_method='no_entropy',
policy_ent_coeff=0.02,
)
def teardown_method(self):
self.env.close()
super().teardown_method()
| 3,679 | 32.761468 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_ppo.py | """
This script creates a test that fails when garage.tf.algos.PPO performance is
too low.
"""
import gym
import pytest
import tensorflow as tf
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import PPO
from garage.tf.baselines import ContinuousMLPBaseline
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.policies import GaussianGRUPolicy
from garage.tf.policies import GaussianLSTMPolicy
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
from tests.fixtures.envs.wrappers import ReshapeObservation
class TestPPO(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
self.lstm_policy = GaussianLSTMPolicy(env_spec=self.env.spec)
self.gru_policy = GaussianGRUPolicy(env_spec=self.env.spec)
self.baseline = GaussianMLPBaseline(
env_spec=self.env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
@pytest.mark.mujoco
def test_ppo_pendulum(self):
"""Test PPO with Pendulum environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32, max_epochs=10))
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
@pytest.mark.mujoco
def test_ppo_with_maximum_entropy(self):
"""Test PPO with maxium entropy method."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32, max_epochs=10),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
@pytest.mark.mujoco
def test_ppo_with_neg_log_likeli_entropy_estimation_and_max(self):
"""
Test PPO with negative log likelihood entropy estimation and max
entropy method.
"""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32, max_epochs=10),
stop_entropy_gradient=True,
use_neg_logli_entropy=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
@pytest.mark.mujoco
def test_ppo_with_neg_log_likeli_entropy_estimation_and_regularized(self):
"""
Test PPO with negative log likelihood entropy estimation and
regularized entropy method.
"""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32, max_epochs=10),
stop_entropy_gradient=True,
use_neg_logli_entropy=True,
entropy_method='regularized',
policy_ent_coeff=0.0,
center_adv=True)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
@pytest.mark.mujoco
def test_ppo_with_regularized_entropy(self):
"""Test PPO with regularized entropy method."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
lr_clip_range=0.01,
optimizer_args=dict(batch_size=32, max_epochs=10),
stop_entropy_gradient=False,
entropy_method='regularized',
policy_ent_coeff=0.02,
center_adv=True,
flatten_input=False)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 35
@pytest.mark.mujoco
def test_ppo_pendulum_flatten_input(self):
"""Test PPO with CartPole to test observation flattening."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(
normalize(ReshapeObservation(gym.make('CartPole-v1'), (2, 2))))
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
))
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 80
def teardown_method(self):
self.env.close()
super().teardown_method()
class TestPPOContinuousBaseline(TfGraphTestCase):
@pytest.mark.huge
def test_ppo_pendulum_continuous_baseline(self):
"""Test PPO with Pendulum environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = ContinuousMLPBaseline(
env_spec=env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 100
env.close()
@pytest.mark.mujoco_long
def test_ppo_pendulum_recurrent_continuous_baseline(self):
"""Test PPO with Pendulum environment and recurrent policy."""
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
policy = GaussianLSTMPolicy(env_spec=env.spec, )
baseline = ContinuousMLPBaseline(
env_spec=env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 100
env.close()
class TestPPOPendulumLSTM(TfGraphTestCase):
@pytest.mark.mujoco_long
def test_ppo_pendulum_lstm(self):
"""Test PPO with Pendulum environment and recurrent policy."""
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
lstm_policy = GaussianLSTMPolicy(env_spec=env.spec)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
algo = PPO(
env_spec=env.spec,
policy=lstm_policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 60
class TestPPOPendulumGRU(TfGraphTestCase):
@pytest.mark.mujoco_long
def test_ppo_pendulum_gru(self):
"""Test PPO with Pendulum environment and recurrent policy."""
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
gru_policy = GaussianGRUPolicy(env_spec=env.spec)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
algo = PPO(
env_spec=env.spec,
policy=gru_policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 80
| 12,311 | 38.210191 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_reps.py | """
This script creates a test that fails when garage.tf.algos.REPS performance is
too low.
"""
import gym
import pytest
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import REPS
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestREPS(TfGraphTestCase):
@pytest.mark.large
def test_reps_cartpole(self):
"""Test REPS with gym Cartpole environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(env_spec=env.spec,
hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = REPS(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=4000)
assert last_avg_ret > 5
env.close()
| 1,242 | 29.317073 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_rl2ppo.py | """
This script creates a test that fails when garage.tf.algos.RL2PPO
performance is too low.
"""
import numpy as np
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
from garage.envs import normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.experiment.meta_evaluator import MetaEvaluator
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2PPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.policies import GaussianGRUPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
@pytest.mark.mujoco
class TestRL2PPO(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.max_path_length = 100
self.meta_batch_size = 10
self.episode_per_task = 4
self.tasks = task_sampler.SetTaskSampler(lambda: RL2Env(env=normalize(
HalfCheetahDirEnv())))
self.env_spec = RL2Env(env=normalize(HalfCheetahDirEnv())).spec
self.policy = GaussianGRUPolicy(env_spec=self.env_spec,
hidden_dim=64,
state_include_action=False)
self.baseline = LinearFeatureBaseline(env_spec=self.env_spec)
@pytest.mark.timeout(120)
def test_rl2_ppo_pendulum(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = RL2PPO(rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=self.max_path_length *
self.episode_per_task)
runner.setup(
algo,
self.tasks.sample(self.meta_batch_size),
sampler_cls=LocalSampler,
n_workers=self.meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_paths_per_trial=self.episode_per_task))
last_avg_ret = runner.train(n_epochs=1,
batch_size=self.episode_per_task *
self.max_path_length *
self.meta_batch_size)
assert last_avg_ret > -40
def test_rl2_ppo_pendulum_meta_test(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
meta_evaluator = MetaEvaluator(
test_task_sampler=self.tasks,
n_exploration_traj=10,
n_test_rollouts=10,
max_path_length=self.max_path_length,
n_test_tasks=1)
algo = RL2PPO(rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=self.max_path_length *
self.episode_per_task,
meta_evaluator=meta_evaluator,
n_epochs_per_eval=10)
runner.setup(algo,
self.tasks.sample(self.meta_batch_size),
sampler_cls=LocalSampler,
n_workers=self.meta_batch_size,
worker_class=RL2Worker)
last_avg_ret = runner.train(n_epochs=1,
batch_size=self.episode_per_task *
self.max_path_length *
self.meta_batch_size)
assert last_avg_ret > -40
def test_rl2_ppo_pendulum_exploration_policy(self):
with LocalTFRunner(snapshot_config, sess=self.sess):
algo = RL2PPO(rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=self.max_path_length *
self.episode_per_task)
exploration_policy = algo.get_exploration_policy()
params = exploration_policy.get_param_values()
new_params = np.zeros_like(params)
exploration_policy.set_param_values(new_params)
assert np.array_equal(new_params,
exploration_policy.get_param_values())
def test_rl2_ppo_pendulum_adapted_policy(self):
with LocalTFRunner(snapshot_config, sess=self.sess):
algo = RL2PPO(rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=self.max_path_length *
self.episode_per_task)
exploration_policy = algo.get_exploration_policy()
adapted_policy = algo.adapt_policy(exploration_policy, [])
(params, hidden) = adapted_policy.get_param_values()
expected_new_params = np.zeros_like(params)
expected_hidden = np.zeros_like(hidden)
adapted_policy.set_param_values(
(expected_new_params, expected_hidden))
(new_params, new_hidden) = adapted_policy.get_param_values()
assert np.array_equal(expected_new_params, new_params)
assert np.array_equal(expected_hidden, new_hidden)
def test_rl2_ppo_pendulum_wrong_worker(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
with pytest.raises(ValueError):
algo = RL2PPO(rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=self.max_path_length *
self.episode_per_task,
flatten_input=False)
runner.setup(algo,
self.tasks.sample(self.meta_batch_size),
sampler_cls=LocalSampler,
n_workers=self.meta_batch_size)
runner.train(n_epochs=10,
batch_size=self.episode_per_task *
self.max_path_length * self.meta_batch_size)
| 9,914 | 44.691244 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_rl2trpo.py | """
This script creates a test that fails when garage.tf.algos.RL2TRPO
performance is too low.
"""
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
from garage.envs import normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2TRPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.optimizers import ConjugateGradientOptimizer
from garage.tf.optimizers import FiniteDifferenceHvp
from garage.tf.optimizers import PenaltyLbfgsOptimizer
from garage.tf.policies import GaussianGRUPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
@pytest.mark.mujoco
class TestRL2TRPO(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.max_path_length = 100
self.meta_batch_size = 10
self.episode_per_task = 4
self.tasks = task_sampler.SetTaskSampler(lambda: RL2Env(env=normalize(
HalfCheetahDirEnv())))
self.env_spec = RL2Env(env=normalize(HalfCheetahDirEnv())).spec
self.policy = GaussianGRUPolicy(env_spec=self.env_spec,
hidden_dim=64,
state_include_action=False)
self.baseline = LinearFeatureBaseline(env_spec=self.env_spec)
def test_rl2_trpo_pendulum(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = RL2TRPO(
rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=self.max_path_length * self.episode_per_task,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(
base_eps=1e-5)))
runner.setup(algo,
self.tasks.sample(self.meta_batch_size),
sampler_cls=LocalSampler,
n_workers=self.meta_batch_size,
worker_class=RL2Worker)
last_avg_ret = runner.train(n_epochs=1,
batch_size=self.episode_per_task *
self.max_path_length *
self.meta_batch_size)
assert last_avg_ret > -40
def test_rl2_trpo_pendulum_default_optimizer(self):
with LocalTFRunner(snapshot_config, sess=self.sess):
algo = RL2TRPO(rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
kl_constraint='hard',
max_path_length=self.max_path_length *
self.episode_per_task,
discount=0.99,
max_kl_step=0.01)
assert isinstance(algo._inner_algo._optimizer,
ConjugateGradientOptimizer)
def test_ppo_pendulum_default_optimizer2(self):
with LocalTFRunner(snapshot_config, sess=self.sess):
algo = RL2TRPO(rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
kl_constraint='soft',
max_path_length=self.max_path_length *
self.episode_per_task,
discount=0.99,
max_kl_step=0.01)
assert isinstance(algo._inner_algo._optimizer,
PenaltyLbfgsOptimizer)
def test_rl2_trpo_pendulum_invalid_kl_constraint(self):
with LocalTFRunner(snapshot_config, sess=self.sess):
with pytest.raises(ValueError):
RL2TRPO(rl2_max_path_length=self.max_path_length,
meta_batch_size=self.meta_batch_size,
task_sampler=self.tasks,
env_spec=self.env_spec,
policy=self.policy,
baseline=self.baseline,
kl_constraint='xyz',
max_path_length=self.max_path_length *
self.episode_per_task,
discount=0.99,
max_kl_step=0.01)
| 5,469 | 43.112903 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_td3.py | """Create a test that fails when garage.tf.algos.TD3 performance is too low."""
import gym
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import AddGaussianNoise
from garage.replay_buffer import PathBuffer
from garage.tf.algos import TD3
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestTD3(TfGraphTestCase):
"""Tests for TD3 algo."""
@pytest.mark.mujoco_long
def test_td3_pendulum(self):
"""Test TD3 with Pendulum environment."""
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(gym.make('InvertedDoublePendulum-v2'))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddGaussianNoise(env.spec,
policy,
max_sigma=0.1,
min_sigma=0.1)
qf = ContinuousMLPQFunction(name='ContinuousMLPQFunction',
env_spec=env.spec,
hidden_sizes=[400, 300],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
qf2 = ContinuousMLPQFunction(name='ContinuousMLPQFunction2',
env_spec=env.spec,
hidden_sizes=[400, 300],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
algo = TD3(env_spec=env.spec,
policy=policy,
policy_lr=1e-3,
qf_lr=1e-3,
qf=qf,
qf2=qf2,
replay_buffer=replay_buffer,
steps_per_epoch=20,
target_update_tau=0.005,
n_train_steps=50,
discount=0.99,
smooth_return=False,
min_buffer_size=int(1e4),
buffer_batch_size=100,
policy_weight_decay=0.001,
qf_weight_decay=0.001,
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=250)
assert last_avg_ret > 400
| 3,098 | 41.452055 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_te.py | """This script tests Task Embedding algorithms."""
import akro
import numpy as np
import tensorflow as tf
from garage import InOutSpec
from garage.envs import GarageEnv, MultiEnvWrapper, PointEnv
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearMultiFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import TEPPO
from garage.tf.algos.te import TaskEmbeddingWorker
from garage.tf.embeddings import GaussianMLPEncoder
from garage.tf.policies import GaussianMLPTaskEmbeddingPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestTE(TfGraphTestCase):
def setup_method(self):
super().setup_method()
def circle(r, n):
"""Generate n points on a circle of radius r.
Args:
r (float): Radius of the circle.
n (int): Number of points to generate.
Yields:
tuple(float, float): Coordinate of a point.
"""
for t in np.arange(0, 2 * np.pi, 2 * np.pi / n):
yield r * np.sin(t), r * np.cos(t)
N = 4
goals = circle(3.0, N)
tasks = {
str(i + 1): {
'args': [],
'kwargs': {
'goal': g,
'never_done': False,
'done_bonus': 0.0,
}
}
for i, g in enumerate(goals)
}
latent_length = 1
inference_window = 2
self.batch_size = 100 * len(tasks)
self.policy_ent_coeff = 2e-2
self.encoder_ent_coeff = 2.2e-3
self.inference_ce_coeff = 5e-2
self.max_path_length = 100
embedding_init_std = 1.0
embedding_max_std = 2.0
embedding_min_std = 0.38
policy_init_std = 1.0
policy_max_std = None
policy_min_std = None
task_names = sorted(tasks.keys())
task_args = [tasks[t]['args'] for t in task_names]
task_kwargs = [tasks[t]['kwargs'] for t in task_names]
task_envs = [
GarageEnv(PointEnv(*t_args, **t_kwargs))
for t_args, t_kwargs in zip(task_args, task_kwargs)
]
self.env = env = MultiEnvWrapper(task_envs,
round_robin_strategy,
mode='vanilla')
latent_lb = np.zeros(latent_length, )
latent_ub = np.ones(latent_length, )
latent_space = akro.Box(latent_lb, latent_ub)
obs_lb, obs_ub = env.observation_space.bounds
obs_lb_flat = env.observation_space.flatten(obs_lb)
obs_ub_flat = env.observation_space.flatten(obs_ub)
traj_lb = np.stack([obs_lb_flat] * inference_window)
traj_ub = np.stack([obs_ub_flat] * inference_window)
traj_space = akro.Box(traj_lb, traj_ub)
task_embed_spec = InOutSpec(env.task_space, latent_space)
traj_embed_spec = InOutSpec(traj_space, latent_space)
self.inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=[20, 10],
std_share_network=True,
init_std=2.0,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=[20, 20],
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
self.policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=[32, 16],
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
self.baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
def test_te_worker(self):
worker = TaskEmbeddingWorker(seed=1,
max_path_length=100,
worker_number=1)
worker.update_env(self.env)
worker.update_agent(self.policy)
worker.start_rollout()
while not worker.step_rollout():
pass
paths = worker.collect_rollout()
assert 'task_onehot' in paths.env_infos.keys()
assert paths.env_infos['task_onehot'][0].shape == (4, )
assert 'latent' in paths.agent_infos.keys()
assert paths.agent_infos['latent'][0].shape == (1, )
def test_te_ppo(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = TEPPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
inference=self.inference,
max_path_length=self.max_path_length,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=self.policy_ent_coeff,
encoder_ent_coeff=self.encoder_ent_coeff,
inference_ce_coeff=self.inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
inference_optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
center_adv=True,
stop_ce_gradient=True)
runner.setup(algo,
self.env,
sampler_cls=LocalSampler,
sampler_args=None,
worker_class=TaskEmbeddingWorker)
runner.train(n_epochs=1, batch_size=self.batch_size, plot=False)
| 6,241 | 35.290698 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_tnpg.py | import gym
import pytest
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TNPG
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestTNPG(TfGraphTestCase):
@pytest.mark.mujoco_long
def test_tnpg_inverted_pendulum(self):
"""Test TNPG with InvertedPendulum-v2 environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = GaussianMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TNPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(reg_coeff=5e-1))
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=10000)
assert last_avg_ret > 15
env.close()
| 1,306 | 32.512821 | 71 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_trpo.py | """
This script creates a test that fails when garage.tf.algos.TRPO performance is
too low.
"""
import gym
import pytest
import tensorflow as tf
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.experiment import snapshotter
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.baselines import GaussianCNNBaseline
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.optimizers import FiniteDifferenceHvp
from garage.tf.policies import CategoricalCNNPolicy
from garage.tf.policies import CategoricalGRUPolicy
from garage.tf.policies import CategoricalLSTMPolicy
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestTRPO(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
self.baseline = GaussianMLPBaseline(
env_spec=self.env.spec,
regressor_args=dict(hidden_sizes=(32, 32)),
)
@pytest.mark.mujoco_long
def test_trpo_pendulum(self):
"""Test TRPO with Pendulum environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = TRPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.98,
policy_ent_coeff=0.0)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 40
@pytest.mark.mujoco
def test_trpo_unknown_kl_constraint(self):
"""Test TRPO with unkown KL constraints."""
with pytest.raises(ValueError, match='Invalid kl_constraint'):
TRPO(
env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.98,
policy_ent_coeff=0.0,
kl_constraint='random kl_constraint',
)
@pytest.mark.mujoco_long
def test_trpo_soft_kl_constraint(self):
"""Test TRPO with unkown KL constraints."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
algo = TRPO(env_spec=self.env.spec,
policy=self.policy,
baseline=self.baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.98,
policy_ent_coeff=0.0,
kl_constraint='soft')
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 45
@pytest.mark.mujoco_long
def test_trpo_lstm_cartpole(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('CartPole-v1')))
policy = CategoricalLSTMPolicy(name='policy', env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01,
optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(
base_eps=1e-5)))
snapshotter.snapshot_dir = './'
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 60
env.close()
@pytest.mark.mujoco_long
def test_trpo_gru_cartpole(self):
deterministic.set_seed(2)
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('CartPole-v1')))
policy = CategoricalGRUPolicy(name='policy', env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01,
optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(
base_eps=1e-5)))
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > 40
env.close()
def teardown_method(self):
self.env.close()
super().teardown_method()
class TestTRPOCNNCubeCrash(TfGraphTestCase):
@pytest.mark.large
def test_trpo_cnn_cubecrash(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('CubeCrash-v0')))
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32))
baseline = GaussianCNNBaseline(
env_spec=env.spec,
regressor_args=dict(filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32),
use_trust_region=True))
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.98,
max_kl_step=0.01,
policy_ent_coeff=0.0,
flatten_input=False)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=2048)
assert last_avg_ret > -1.5
env.close()
| 6,691 | 36.595506 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/algos/test_vpg.py | import pytest
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import VPG
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestVPG(TfGraphTestCase):
@pytest.mark.large
def test_vpg_cartpole(self):
"""Test VPG with CartPole-v1 environment."""
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=10000)
assert last_avg_ret > 90
env.close()
| 1,249 | 31.894737 | 70 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/baselines/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/baselines/test_baselines.py | """
This script creates a test that fails when
garage.tf.baselines failed to initialize.
"""
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.baselines import ContinuousMLPBaseline
from garage.tf.baselines import GaussianMLPBaseline
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestTfBaselines(TfGraphTestCase):
def test_baseline(self):
"""Test the baseline initialization."""
box_env = GarageEnv(DummyBoxEnv())
deterministic_mlp_baseline = ContinuousMLPBaseline(env_spec=box_env)
gaussian_mlp_baseline = GaussianMLPBaseline(env_spec=box_env)
self.sess.run(tf.compat.v1.global_variables_initializer())
deterministic_mlp_baseline.get_param_values()
gaussian_mlp_baseline.get_param_values()
box_env.close()
| 857 | 30.777778 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/baselines/test_continuous_mlp_baseline.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.baselines import ContinuousMLPBaseline
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.regressors import SimpleMLPRegressor
class TestContinuousMLPBaseline(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_fit(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'continuous_mlp_baseline.'
'ContinuousMLPRegressor'),
new=SimpleMLPRegressor):
cmb = ContinuousMLPBaseline(env_spec=box_env.spec)
paths = [{
'observations': [np.full(obs_dim, 1)],
'returns': [1]
}, {
'observations': [np.full(obs_dim, 2)],
'returns': [2]
}]
cmb.fit(paths)
obs = {'observations': [np.full(obs_dim, 1), np.full(obs_dim, 2)]}
prediction = cmb.predict(obs)
assert np.array_equal(prediction, [1, 2])
@pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_param_values(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'continuous_mlp_baseline.'
'ContinuousMLPRegressor'),
new=SimpleMLPRegressor):
cmb = ContinuousMLPBaseline(env_spec=box_env.spec)
new_cmb = ContinuousMLPBaseline(env_spec=box_env.spec,
name='ContinuousMLPBaseline2')
# Manual change the parameter of ContinuousMLPBaseline
with tf.compat.v1.variable_scope('ContinuousMLPBaseline2', reuse=True):
return_var = tf.compat.v1.get_variable('SimpleMLPModel/return_var')
return_var.load(1.0)
old_param_values = cmb.get_param_values()
new_param_values = new_cmb.get_param_values()
assert not np.array_equal(old_param_values, new_param_values)
new_cmb.set_param_values(old_param_values)
new_param_values = new_cmb.get_param_values()
assert np.array_equal(old_param_values, new_param_values)
@pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_get_params_internal(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'continuous_mlp_baseline.'
'ContinuousMLPRegressor'),
new=SimpleMLPRegressor):
cmb = ContinuousMLPBaseline(env_spec=box_env.spec)
params_interal = cmb.get_params_internal()
trainable_params = tf.compat.v1.trainable_variables(
scope='ContinuousMLPBaseline')
assert np.array_equal(params_interal, trainable_params)
def test_is_pickleable(self):
box_env = GarageEnv(DummyBoxEnv(obs_dim=(1, )))
with mock.patch(('garage.tf.baselines.'
'continuous_mlp_baseline.'
'ContinuousMLPRegressor'),
new=SimpleMLPRegressor):
cmb = ContinuousMLPBaseline(env_spec=box_env.spec)
obs = {'observations': [np.full(1, 1), np.full(1, 1)]}
with tf.compat.v1.variable_scope('ContinuousMLPBaseline', reuse=True):
return_var = tf.compat.v1.get_variable('SimpleMLPModel/return_var')
return_var.load(1.0)
prediction = cmb.predict(obs)
h = pickle.dumps(cmb)
with tf.compat.v1.Session(graph=tf.Graph()):
cmb_pickled = pickle.loads(h)
prediction2 = cmb_pickled.predict(obs)
assert np.array_equal(prediction, prediction2)
| 3,916 | 39.802083 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/baselines/test_gaussian_cnn_baseline.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.misc.tensor_utils import normalize_pixel_batch
from garage.tf.baselines import GaussianCNNBaseline
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
from tests.fixtures.regressors import SimpleGaussianCNNRegressor
class TestGaussianCNNBaseline(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim', [[1, 1, 1], [2, 2, 2], [1, 1], [2, 2]])
def test_fit(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'gaussian_cnn_baseline.'
'GaussianCNNRegressor'),
new=SimpleGaussianCNNRegressor):
gcb = GaussianCNNBaseline(env_spec=box_env.spec)
paths = [{
'observations': [np.full(obs_dim, 1)],
'returns': [1]
}, {
'observations': [np.full(obs_dim, 2)],
'returns': [2]
}]
gcb.fit(paths)
obs = {'observations': [np.full(obs_dim, 1), np.full(obs_dim, 2)]}
prediction = gcb.predict(obs)
assert np.array_equal(prediction, [1, 2])
@pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1, 1, 1], [2, 2, 2, 2]])
def test_invalid_obs_shape(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with pytest.raises(ValueError):
GaussianCNNBaseline(env_spec=box_env.spec)
def test_obs_is_image(self):
env = GarageEnv(DummyDiscretePixelEnv(), is_image=True)
with mock.patch(('garage.tf.baselines.'
'gaussian_cnn_baseline.'
'GaussianCNNRegressor'),
new=SimpleGaussianCNNRegressor):
with mock.patch(
'garage.tf.baselines.'
'gaussian_cnn_baseline.'
'normalize_pixel_batch',
side_effect=normalize_pixel_batch) as npb:
gcb = GaussianCNNBaseline(env_spec=env.spec)
obs_dim = env.spec.observation_space.shape
paths = [{
'observations': [np.full(obs_dim, 1)],
'returns': [1]
}, {
'observations': [np.full(obs_dim, 2)],
'returns': [2]
}]
gcb.fit(paths)
observations = np.concatenate(
[p['observations'] for p in paths])
assert npb.call_count == 1, (
"Expected '%s' to have been called once. Called %s times."
% (npb._mock_name or 'mock', npb.call_count))
assert (npb.call_args_list[0][0][0] == observations).all()
obs = {
'observations': [np.full(obs_dim, 1),
np.full(obs_dim, 2)]
}
observations = obs['observations']
gcb.predict(obs)
assert npb.call_args_list[1][0][0] == observations
def test_obs_not_image(self):
env = GarageEnv(DummyDiscretePixelEnv(), is_image=False)
with mock.patch(('garage.tf.baselines.'
'gaussian_cnn_baseline.'
'GaussianCNNRegressor'),
new=SimpleGaussianCNNRegressor):
with mock.patch(
'garage.tf.baselines.'
'gaussian_cnn_baseline.'
'normalize_pixel_batch',
side_effect=normalize_pixel_batch) as npb:
gcb = GaussianCNNBaseline(env_spec=env.spec)
obs_dim = env.spec.observation_space.shape
paths = [{
'observations': [np.full(obs_dim, 1)],
'returns': [1]
}, {
'observations': [np.full(obs_dim, 2)],
'returns': [2]
}]
gcb.fit(paths)
obs = {
'observations': [np.full(obs_dim, 1),
np.full(obs_dim, 2)]
}
gcb.predict(obs)
assert not npb.called
@pytest.mark.parametrize('obs_dim', [[1, 1, 1], [2, 2, 2], [1, 1], [2, 2]])
def test_param_values(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'gaussian_cnn_baseline.'
'GaussianCNNRegressor'),
new=SimpleGaussianCNNRegressor):
gcb = GaussianCNNBaseline(env_spec=box_env.spec)
new_gcb = GaussianCNNBaseline(env_spec=box_env.spec,
name='GaussianCNNBaseline2')
# Manual change the parameter of GaussianCNNBaseline
with tf.compat.v1.variable_scope('GaussianCNNBaseline', reuse=True):
return_var = tf.compat.v1.get_variable(
'SimpleGaussianCNNModel/return_var')
return_var.load(1.0)
old_param_values = gcb.get_param_values()
new_param_values = new_gcb.get_param_values()
assert not np.array_equal(old_param_values, new_param_values)
new_gcb.set_param_values(old_param_values)
new_param_values = new_gcb.get_param_values()
assert np.array_equal(old_param_values, new_param_values)
@pytest.mark.parametrize('obs_dim', [[1, 1, 1], [2, 2, 2], [1, 1], [2, 2]])
def test_get_params_internal(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'gaussian_cnn_baseline.'
'GaussianCNNRegressor'),
new=SimpleGaussianCNNRegressor):
gcb = GaussianCNNBaseline(env_spec=box_env.spec,
regressor_args=dict())
params_interal = gcb.get_params_internal()
trainable_params = tf.compat.v1.trainable_variables(
scope='GaussianCNNBaseline')
assert np.array_equal(params_interal, trainable_params)
def test_is_pickleable(self):
box_env = GarageEnv(DummyBoxEnv(obs_dim=(1, 1)))
with mock.patch(('garage.tf.baselines.'
'gaussian_cnn_baseline.'
'GaussianCNNRegressor'),
new=SimpleGaussianCNNRegressor):
gcb = GaussianCNNBaseline(env_spec=box_env.spec)
obs = {'observations': [np.full((1, 1), 1), np.full((1, 1), 1)]}
with tf.compat.v1.variable_scope('GaussianCNNBaseline', reuse=True):
return_var = tf.compat.v1.get_variable(
'SimpleGaussianCNNModel/return_var')
return_var.load(1.0)
prediction = gcb.predict(obs)
h = pickle.dumps(gcb)
with tf.compat.v1.Session(graph=tf.Graph()):
gcb_pickled = pickle.loads(h)
prediction2 = gcb_pickled.predict(obs)
assert np.array_equal(prediction, prediction2)
| 7,255 | 39.99435 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/baselines/test_gaussian_mlp_baseline.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.baselines import GaussianMLPBaseline
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.regressors import SimpleGaussianMLPRegressor
class TestGaussianMLPBaseline(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_fit(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'gaussian_mlp_baseline.'
'GaussianMLPRegressor'),
new=SimpleGaussianMLPRegressor):
gmb = GaussianMLPBaseline(env_spec=box_env.spec)
paths = [{
'observations': [np.full(obs_dim, 1)],
'returns': [1]
}, {
'observations': [np.full(obs_dim, 2)],
'returns': [2]
}]
gmb.fit(paths)
obs = {'observations': [np.full(obs_dim, 1), np.full(obs_dim, 2)]}
prediction = gmb.predict(obs)
assert np.array_equal(prediction, [1, 2])
@pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_param_values(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'gaussian_mlp_baseline.'
'GaussianMLPRegressor'),
new=SimpleGaussianMLPRegressor):
gmb = GaussianMLPBaseline(env_spec=box_env.spec)
new_gmb = GaussianMLPBaseline(env_spec=box_env.spec,
name='GaussianMLPBaseline2')
# Manual change the parameter of GaussianMLPBaseline
with tf.compat.v1.variable_scope('GaussianMLPBaseline', reuse=True):
return_var = tf.compat.v1.get_variable(
'SimpleGaussianMLPModel/return_var')
return_var.load(1.0)
old_param_values = gmb.get_param_values()
new_param_values = new_gmb.get_param_values()
assert not np.array_equal(old_param_values, new_param_values)
new_gmb.set_param_values(old_param_values)
new_param_values = new_gmb.get_param_values()
assert np.array_equal(old_param_values, new_param_values)
@pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1], [2, 2]])
def test_get_params_internal(self, obs_dim):
box_env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim))
with mock.patch(('garage.tf.baselines.'
'gaussian_mlp_baseline.'
'GaussianMLPRegressor'),
new=SimpleGaussianMLPRegressor):
gmb = GaussianMLPBaseline(env_spec=box_env.spec,
regressor_args=dict())
params_interal = gmb.get_params_internal()
trainable_params = tf.compat.v1.trainable_variables(
scope='GaussianMLPBaseline')
assert np.array_equal(params_interal, trainable_params)
def test_is_pickleable(self):
box_env = GarageEnv(DummyBoxEnv(obs_dim=(1, )))
with mock.patch(('garage.tf.baselines.'
'gaussian_mlp_baseline.'
'GaussianMLPRegressor'),
new=SimpleGaussianMLPRegressor):
gmb = GaussianMLPBaseline(env_spec=box_env.spec)
obs = {'observations': [np.full(1, 1), np.full(1, 1)]}
with tf.compat.v1.variable_scope('GaussianMLPBaseline', reuse=True):
return_var = tf.compat.v1.get_variable(
'SimpleGaussianMLPModel/return_var')
return_var.load(1.0)
prediction = gmb.predict(obs)
h = pickle.dumps(gmb)
with tf.compat.v1.Session(graph=tf.Graph()):
gmb_pickled = pickle.loads(h)
prediction2 = gmb_pickled.predict(obs)
assert np.array_equal(prediction, prediction2)
| 4,024 | 39.656566 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/distributions/test_diagonal_gaussian.py | import numpy as np
from garage.tf.distributions import DiagonalGaussian
def test_kl():
gaussian = DiagonalGaussian(dim=2)
dist1 = dict(mean=np.array([0, 0]), log_std=np.array([0, 0]))
dist2 = dict(mean=np.array([0, 0]), log_std=np.array([1, 1]))
dist3 = dict(mean=np.array([1, 1]), log_std=np.array([0, 0]))
assert np.isclose(gaussian.kl(dist1, dist1), 0)
assert np.isclose(gaussian.kl(dist1, dist2),
2 * (1 + np.e**2) / (2 * np.e**2))
assert np.isclose(gaussian.kl(dist3, dist1), 2 * 0.5)
def test_sample():
gaussian = DiagonalGaussian(dim=2)
dist = dict(mean=np.array([1, 1]), log_std=np.array([0, 0]))
samples = [gaussian.sample(dist) for _ in range(10000)]
assert np.isclose(np.mean(samples), 1, atol=0.1)
assert np.isclose(np.var(samples), 1, atol=0.1)
def test_sample_sym():
gaussian = DiagonalGaussian(dim=2)
dist = dict(mean=np.array([1., 1.], dtype=np.float32),
log_std=np.array([0., 0.], dtype=np.float32))
samples = [gaussian.sample_sym(dist).numpy() for _ in range(10000)]
assert np.isclose(np.mean(samples), 1, atol=0.1)
assert np.isclose(np.var(samples), 1, atol=0.1)
| 1,197 | 34.235294 | 71 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/embeddings/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/embeddings/test_gaussian_mlp_encoder.py | import pickle
from unittest import mock
# pylint: disable=wrong-import-order
import akro
import numpy as np
import pytest
import tensorflow as tf
from garage import InOutSpec
from garage.envs import GarageEnv
from garage.tf.embeddings import GaussianMLPEncoder
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.models import SimpleGaussianMLPModel
class TestGaussianMLPEncoder(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim, embedding_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_get_embedding(self, obs_dim, embedding_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=embedding_dim))
embedding_spec = InOutSpec(input_space=env.spec.observation_space,
output_space=env.spec.action_space)
embedding = GaussianMLPEncoder(embedding_spec)
task_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
embedding.input_dim))
embedding.build(task_input, name='task_input')
env.reset()
obs, _, _, _ = env.step(1)
latent, _ = embedding.get_latent(obs)
latents, _ = embedding.get_latents([obs] * 5)
assert env.action_space.contains(latent)
for latent in latents:
assert env.action_space.contains(latent)
@pytest.mark.parametrize('obs_dim, embedding_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_is_pickleable(self, obs_dim, embedding_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=embedding_dim))
embedding_spec = InOutSpec(input_space=env.spec.observation_space,
output_space=env.spec.action_space)
embedding = GaussianMLPEncoder(embedding_spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
with tf.compat.v1.variable_scope('GaussianMLPEncoder/GaussianMLPModel',
reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_network/hidden_0/bias')
# assign it to all one
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(
[embedding.distribution.loc,
embedding.distribution.stddev()],
feed_dict={embedding.model.input: [[obs.flatten()]]})
p = pickle.dumps(embedding)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
embedding_pickled = pickle.loads(p)
output2 = sess.run(
[
embedding_pickled.distribution.loc,
embedding_pickled.distribution.stddev()
],
feed_dict={embedding_pickled.model.input: [[obs.flatten()]]})
assert np.array_equal(output1, output2)
def test_clone(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(2, ), action_dim=(2, )))
embedding_spec = InOutSpec(input_space=env.spec.observation_space,
output_space=env.spec.action_space)
embedding = GaussianMLPEncoder(embedding_spec)
clone_embedding = embedding.clone(name='cloned')
assert clone_embedding.input_dim == embedding.input_dim
assert clone_embedding.output_dim == embedding.output_dim
def test_auxiliary(self):
input_space = akro.Box(np.array([-1, -1]), np.array([1, 1]))
latent_space = akro.Box(np.array([-2, -2, -2]), np.array([2, 2, 2]))
embedding_spec = InOutSpec(input_space=input_space,
output_space=latent_space)
embedding = GaussianMLPEncoder(embedding_spec,
hidden_sizes=[32, 32, 32])
# 9 Layers: (3 hidden + 1 output) * (1 weight + 1 bias) + 1 log_std
assert len(embedding.get_params()) == 9
assert len(embedding.get_global_vars()) == 9
assert embedding.distribution.loc.get_shape().as_list(
)[-1] == latent_space.shape[0]
assert embedding.input.shape.as_list() == [
None, None, input_space.shape[0]
]
assert (embedding.latent_mean.shape.as_list() == [
None, None, latent_space.shape[0]
])
assert (embedding.latent_std_param.shape.as_list() == [
None, 1, latent_space.shape[0]
])
# To increase coverage in embeddings/base.py
embedding.reset()
assert embedding.input_dim == embedding_spec.input_space.flat_dim
assert embedding.output_dim == embedding_spec.output_space.flat_dim
var_shapes = [
(2, 32),
(32, ), # input
(32, 32),
(32, ), # hidden 0
(32, 32),
(32, ), # hidden 1
(32, 3),
(3, ), # hidden 2
(3, )
] # log_std
assert sorted(embedding.get_param_shapes()) == sorted(var_shapes)
var_count = sum(list(map(np.prod, var_shapes)))
embedding.set_param_values(np.ones(var_count))
assert (embedding.get_param_values() == np.ones(var_count)).all()
assert (sorted(
map(np.shape, embedding.flat_to_params(
np.ones(var_count)))) == sorted(var_shapes))
| 5,614 | 37.724138 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/envs/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/envs/test_base.py | import pickle
import gym
import pytest
from garage.envs import GarageEnv
from tests.helpers import step_env_with_gym_quirks
class TestGarageEnv:
def test_is_pickleable(self):
env = GarageEnv(env_name='CartPole-v1')
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip.spec == env.spec
assert round_trip.env.spec.id == env.env.spec.id
assert (round_trip.env.spec.max_episode_steps ==
env.env.spec.max_episode_steps)
@pytest.mark.nightly
@pytest.mark.parametrize('spec', list(gym.envs.registry.all()))
def test_all_gym_envs(self, spec):
if spec._env_name.startswith('Defender'):
pytest.skip(
'Defender-* envs bundled in atari-py 0.2.x don\'t load')
env = GarageEnv(spec.make())
step_env_with_gym_quirks(env, spec)
@pytest.mark.nightly
@pytest.mark.parametrize('spec', list(gym.envs.registry.all()))
def test_all_gym_envs_pickleable(self, spec):
if spec._env_name.startswith('Defender'):
pytest.skip(
'Defender-* envs bundled in atari-py 0.2.x don\'t load')
if spec.id == 'KellyCoinflipGeneralized-v0':
pytest.skip(
'KellyCoinflipGeneralized-v0\'s action space is random')
env = GarageEnv(env_name=spec.id)
step_env_with_gym_quirks(env,
spec,
n=1,
render=True,
serialize_env=True)
| 1,551 | 34.272727 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/experiment/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/experiment/test_local_tf_runner.py | import pytest
import ray
import tensorflow as tf
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler, RaySampler, singleton_pool
from garage.tf.algos import VPG
from garage.tf.plotter import Plotter
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.samplers import BatchSampler
from tests.fixtures import snapshot_config, TfGraphTestCase
from tests.fixtures.sampler import ray_session_fixture
class TestLocalRunner(TfGraphTestCase):
def test_session(self):
with LocalTFRunner(snapshot_config):
assert tf.compat.v1.get_default_session() is not None, (
'LocalTFRunner() should provide a default tf session.')
sess = tf.compat.v1.Session()
with LocalTFRunner(snapshot_config, sess=sess):
assert tf.compat.v1.get_default_session() is sess, (
'LocalTFRunner(sess) should use sess as default session.')
def test_singleton_pool(self):
max_cpus = 8
with LocalTFRunner(snapshot_config, max_cpus=max_cpus):
assert max_cpus == singleton_pool.n_parallel, (
'LocalTFRunner(max_cpu) should set up singleton_pool.')
def test_train(self):
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(8, 8))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
runner.setup(algo, env)
runner.train(n_epochs=1, batch_size=100)
def test_external_sess(self):
with tf.compat.v1.Session() as sess:
with LocalTFRunner(snapshot_config, sess=sess):
pass
# sess should still be the default session here.
tf.no_op().run()
def test_set_plot(self):
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(8, 8))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
runner.setup(algo, env)
runner.train(n_epochs=1, batch_size=100, plot=True)
assert isinstance(runner._plotter, Plotter), (
'self.plotter in LocalTFRunner should be set to Plotter.')
def test_call_train_before_set_up(self):
with pytest.raises(Exception):
with LocalTFRunner(snapshot_config) as runner:
runner.train(n_epochs=1, batch_size=100)
def test_call_save_before_set_up(self):
with pytest.raises(Exception):
with LocalTFRunner(snapshot_config) as runner:
runner.save(0)
def test_make_sampler_batch_sampler(self):
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(8, 8))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
runner.setup(algo,
env,
sampler_cls=BatchSampler,
sampler_args=dict(n_envs=3))
assert isinstance(runner._sampler, BatchSampler)
runner.train(n_epochs=1, batch_size=10)
def test_make_sampler_local_sampler(self):
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(8, 8))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
runner.setup(algo, env, sampler_cls=LocalSampler)
assert isinstance(runner._sampler, LocalSampler)
runner.train(n_epochs=1, batch_size=10)
def test_make_sampler_ray_sampler(self, ray_session_fixture):
del ray_session_fixture
assert ray.is_initialized()
with LocalTFRunner(snapshot_config) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(8, 8))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
runner.setup(algo, env, sampler_cls=RaySampler)
assert isinstance(runner._sampler, RaySampler)
runner.train(n_epochs=1, batch_size=10)
| 6,269 | 37.703704 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/misc/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/misc/test_tensor_utils.py | """
Test tf utility functions mainly in garage.tf.misc.tensor_utils
"""
import numpy as np
import tensorflow as tf
from garage.tf.misc.tensor_utils import compute_advantages, get_target_ops
from tests.fixtures import TfGraphTestCase
class TestTensorUtil(TfGraphTestCase):
"""Test class for tf utility functions."""
def test_compute_advantages(self):
"""Tests compute_advantages function in utils."""
discount = 1
gae_lambda = 1
max_len = 1
rewards = tf.compat.v1.placeholder(dtype=tf.float32,
name='reward',
shape=[None, None])
baselines = tf.compat.v1.placeholder(dtype=tf.float32,
name='baseline',
shape=[None, None])
adv = compute_advantages(discount, gae_lambda, max_len, baselines,
rewards)
# Set up inputs and outputs
rewards_val = np.ones(shape=[2, 1])
baselines_val = np.zeros(shape=[2, 1])
desired_val = np.array([1., 1.])
adv = self.sess.run(adv,
feed_dict={
rewards: rewards_val,
baselines: baselines_val,
})
assert np.array_equal(adv, desired_val)
def test_get_target_ops(self):
var = tf.compat.v1.get_variable('var', [1],
initializer=tf.constant_initializer(1))
target_var = tf.compat.v1.get_variable(
'target_var', [1], initializer=tf.constant_initializer(2))
self.sess.run(tf.compat.v1.global_variables_initializer())
assert target_var.eval() == 2
update_ops = get_target_ops([var], [target_var])
self.sess.run(update_ops)
assert target_var.eval() == 1
def test_get_target_ops_tau(self):
var = tf.compat.v1.get_variable('var', [1],
initializer=tf.constant_initializer(1))
target_var = tf.compat.v1.get_variable(
'target_var', [1], initializer=tf.constant_initializer(2))
self.sess.run(tf.compat.v1.global_variables_initializer())
assert target_var.eval() == 2
init_ops, update_ops = get_target_ops([var], [target_var], tau=0.2)
self.sess.run(update_ops)
assert np.allclose(target_var.eval(), 1.8)
self.sess.run(init_ops)
assert np.allclose(target_var.eval(), 1)
| 2,567 | 39.125 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_categorical_cnn_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.models import CategoricalCNNModel
from tests.fixtures import TfGraphTestCase
class TestCategoricalMLPModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
batch_size = 5
input_width = 10
input_height = 10
self._obs_input = np.ones(
(batch_size, 1, input_width, input_height, 3))
self._input_shape = (input_width, input_height, 3
) # height, width, channel
self._input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, None) +
self._input_shape,
name='input')
def test_dist(self):
model = CategoricalCNNModel(output_dim=1,
filters=((5, (3, 3)), ),
strides=(1, ),
padding='VALID')
dist = model.build(self._input_ph).dist
assert isinstance(dist, tfp.distributions.OneHotCategorical)
def test_instantiate_with_different_name(self):
model = CategoricalCNNModel(output_dim=1,
filters=((5, (3, 3)), ),
strides=(1, ),
padding='VALID')
model.build(self._input_ph)
model.build(self._input_ph, name='another_model')
# yapf: disable
@pytest.mark.parametrize(
'output_dim, filters, strides, padding, hidden_sizes', [
(1, ((1, (1, 1)), ), (1, ), 'SAME', (1, )),
(1, ((3, (3, 3)), ), (2, ), 'VALID', (2, )),
(1, ((3, (3, 3)), ), (2, ), 'SAME', (3, )),
(2, ((3, (3, 3)), (32, (3, 3))), (2, 2), 'VALID', (1, 1)),
(3, ((3, (3, 3)), (32, (3, 3))), (2, 2), 'SAME', (2, 2)),
])
# yapf: enable
def test_is_pickleable(self, output_dim, filters, strides, padding,
hidden_sizes):
model = CategoricalCNNModel(output_dim=output_dim,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
dist = model.build(self._input_ph).dist
# assign bias to all one
with tf.compat.v1.variable_scope('CategoricalCNNModel', reuse=True):
cnn_bias = tf.compat.v1.get_variable('CNNModel/cnn/h0/bias')
bias = tf.compat.v1.get_variable('MLPModel/mlp/hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
cnn_bias.load(tf.ones_like(cnn_bias).eval())
output1 = self.sess.run(dist.probs,
feed_dict={self._input_ph: self._obs_input})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None) +
self._input_shape)
model_pickled = pickle.loads(h)
dist2 = model_pickled.build(input_var).dist
output2 = sess.run(dist2.probs,
feed_dict={input_var: self._obs_input})
assert np.array_equal(output1, output2)
| 3,728 | 41.862069 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_categorical_gru_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.models import CategoricalGRUModel
from tests.fixtures import TfGraphTestCase
class TestCategoricalGRUModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 1
self.time_step = 1
self.feature_shape = 2
self.output_dim = 1
self.obs_inputs = np.full(
(self.batch_size, self.time_step, self.feature_shape), 1.)
self.obs_input = np.full((self.batch_size, self.feature_shape), 1.)
self.input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
self.step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='input')
def test_dist(self):
model = CategoricalGRUModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
dist = model.build(self.input_var, self.step_input_var,
step_hidden_var).dist
assert isinstance(dist, tfp.distributions.OneHotCategorical)
def test_output_nonlinearity(self):
model = CategoricalGRUModel(output_dim=1,
hidden_dim=4,
output_nonlinearity=lambda x: x / 2)
obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, None, 1))
step_obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 1))
step_hidden_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 4))
obs = np.ones((1, 1, 1))
dist = model.build(obs_ph, step_obs_ph, step_hidden_ph).dist
probs = tf.compat.v1.get_default_session().run(dist.probs,
feed_dict={obs_ph: obs})
assert probs == [0.5]
@pytest.mark.parametrize('output_dim', [1, 2, 5, 10])
def test_output_normalized(self, output_dim):
model = CategoricalGRUModel(output_dim=output_dim, hidden_dim=4)
obs_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, output_dim))
step_obs_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, output_dim))
step_hidden_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 4))
obs = np.ones((1, 1, output_dim))
dist = model.build(obs_ph, step_obs_ph, step_hidden_ph).dist
probs = tf.compat.v1.get_default_session().run(tf.reduce_sum(
dist.probs),
feed_dict={obs_ph: obs})
assert np.isclose(probs, 1.0)
def test_is_pickleable(self):
model = CategoricalGRUModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
network = model.build(self.input_var, self.step_input_var,
step_hidden_var)
dist = network.dist
# assign bias to all one
with tf.compat.v1.variable_scope('CategoricalGRUModel/gru',
reuse=True):
init_hidden = tf.compat.v1.get_variable('initial_hidden')
init_hidden.load(tf.ones_like(init_hidden).eval())
hidden = np.zeros((self.batch_size, 1))
outputs1 = self.sess.run(dist.probs,
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run(
[network.step_output, network.step_hidden],
# yapf: disable
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden
})
# yapf: enable
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
1),
name='initial_hidden',
dtype=tf.float32)
network2 = model_pickled.build(input_var, step_input_var,
step_hidden_var)
dist2 = network2.dist
outputs2 = sess.run(dist2.probs,
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[network2.step_output, network2.step_hidden],
# yapf: disable
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden
})
# yapf: enable
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
| 5,858 | 45.133858 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_categorical_lstm_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.models import CategoricalLSTMModel
from tests.fixtures import TfGraphTestCase
class TestCategoricalLSTMModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 1
self.time_step = 1
self.feature_shape = 2
self.output_dim = 1
self.obs_inputs = np.full(
(self.batch_size, self.time_step, self.feature_shape), 1.)
self.obs_input = np.full((self.batch_size, self.feature_shape), 1.)
self._input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
self._step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='input')
def test_dist(self):
model = CategoricalLSTMModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_cell',
dtype=tf.float32)
dist = model.build(self._input_var, self._step_input_var,
step_hidden_var, step_cell_var).dist
assert isinstance(dist, tfp.distributions.OneHotCategorical)
def test_output_nonlinearity(self):
model = CategoricalLSTMModel(output_dim=1,
hidden_dim=4,
output_nonlinearity=lambda x: x / 2)
obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, None, 1))
step_obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 1))
step_hidden_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 4))
step_cell_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 4))
obs = np.ones((1, 1, 1))
dist = model.build(obs_ph, step_obs_ph, step_hidden_ph,
step_cell_ph).dist
probs = tf.compat.v1.get_default_session().run(dist.probs,
feed_dict={obs_ph: obs})
assert probs == [0.5]
@pytest.mark.parametrize('output_dim', [1, 2, 5, 10])
def test_output_normalized(self, output_dim):
model = CategoricalLSTMModel(output_dim=output_dim, hidden_dim=4)
obs_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, output_dim))
step_obs_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, output_dim))
step_hidden_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 4))
step_cell_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 4))
obs = np.ones((1, 1, output_dim))
dist = model.build(obs_ph, step_obs_ph, step_hidden_ph,
step_cell_ph).dist
probs = tf.compat.v1.get_default_session().run(tf.reduce_sum(
dist.probs),
feed_dict={obs_ph: obs})
assert np.isclose(probs, 1.0)
def test_is_pickleable(self):
model = CategoricalLSTMModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_cell',
dtype=tf.float32)
network = model.build(self._input_var, self._step_input_var,
step_hidden_var, step_cell_var)
dist = network.dist
# assign bias to all one
with tf.compat.v1.variable_scope('CategoricalLSTMModel/lstm',
reuse=True):
init_hidden = tf.compat.v1.get_variable('initial_hidden')
init_hidden.load(tf.ones_like(init_hidden).eval())
hidden = np.zeros((self.batch_size, 1))
cell = np.zeros((self.batch_size, 1))
outputs1 = self.sess.run(dist.probs,
feed_dict={self._input_var: self.obs_inputs})
output1 = self.sess.run(
[network.step_output, network.step_hidden, network.step_cell],
feed_dict={
self._step_input_var: self.obs_input,
step_hidden_var: hidden,
step_cell_var: cell
})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
1),
name='initial_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
1),
name='initial_cell',
dtype=tf.float32)
network2 = model_pickled.build(input_var, step_input_var,
step_hidden_var, step_cell_var)
dist2 = network2.dist
outputs2 = sess.run(dist2.probs,
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[
network2.step_output, network2.step_hidden,
network2.step_cell
],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden,
step_cell_var: cell
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
| 6,902 | 47.272727 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_categorical_mlp_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.models import CategoricalMLPModel
from tests.fixtures import TfGraphTestCase
class TestCategoricalMLPModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
self.obs = np.ones((1, 5))
def test_dist(self):
model = CategoricalMLPModel(output_dim=1)
dist = model.build(self.input_var).dist
assert isinstance(dist, tfp.distributions.OneHotCategorical)
@pytest.mark.parametrize('output_dim', [1, 2, 5, 10])
def test_output_normalized(self, output_dim):
model = CategoricalMLPModel(output_dim=output_dim)
obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, output_dim))
obs = np.ones((1, output_dim))
dist = model.build(obs_ph).dist
probs = tf.compat.v1.get_default_session().run(tf.reduce_sum(
dist.probs),
feed_dict={obs_ph: obs})
assert np.isclose(probs, 1.0)
def test_output_nonlinearity(self):
model = CategoricalMLPModel(output_dim=1,
output_nonlinearity=lambda x: x / 2)
obs_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, 1))
obs = np.ones((1, 1))
dist = model.build(obs_ph).dist
probs = tf.compat.v1.get_default_session().run(dist.probs,
feed_dict={obs_ph: obs})
assert probs == [0.5]
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_sizes', [
(1, (1, )),
(1, (2, )),
(2, (3, )),
(2, (1, 1)),
(3, (2, 2)),
])
# yapf: enable
def test_is_pickleable(self, output_dim, hidden_sizes):
model = CategoricalMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
dist = model.build(self.input_var).dist
# assign bias to all one
with tf.compat.v1.variable_scope('CategoricalMLPModel/mlp',
reuse=True):
bias = tf.compat.v1.get_variable('hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(dist.probs,
feed_dict={self.input_var: self.obs})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model_pickled = pickle.loads(h)
dist2 = model_pickled.build(input_var).dist
output2 = sess.run(dist2.probs, feed_dict={input_var: self.obs})
assert np.array_equal(output1, output2)
| 3,067 | 37.835443 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_cnn.py | import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.cnn import cnn
from garage.tf.models.cnn import cnn_with_max_pooling
from tests.fixtures import TfGraphTestCase
from tests.helpers import convolve
from tests.helpers import max_pooling
class TestCNN(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 5
self.input_width = 10
self.input_height = 10
self.obs_input = np.ones(
(self.batch_size, self.input_width, self.input_height, 3))
input_shape = self.obs_input.shape[1:] # height, width, channel
self._input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
self.hidden_nonlinearity = tf.nn.relu
@pytest.mark.parametrize('filters, strides', [
(((32, (1, 1)), ), (1, )),
(((32, (3, 3)), ), (1, )),
(((32, (2, 3)), ), (1, )),
(((32, (3, 3)), ), (2, )),
(((32, (2, 3)), ), (2, )),
(((32, (1, 1)), (64, (1, 1))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (1, 1)),
(((32, (2, 3)), (64, (3, 3))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (2, 2)),
(((32, (2, 3)), (64, (3, 3))), (2, 2)),
])
def test_output_shape_same(self, filters, strides):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn(input_var=self._input_ph,
filters=filters,
strides=strides,
name='cnn',
padding='SAME',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn,
feed_dict={self._input_ph: self.obs_input})
height_size = self.input_height
width_size = self.input_width
for stride in strides:
height_size = int((height_size + stride - 1) / stride)
width_size = int((width_size + stride - 1) / stride)
flatten_shape = width_size * height_size * filters[-1][0]
assert result.shape == (5, flatten_shape)
@pytest.mark.parametrize('filters, strides', [
(((32, (1, 1)), ), (1, )),
(((32, (3, 3)), ), (1, )),
(((32, (2, 3)), ), (1, )),
(((32, (3, 3)), ), (2, )),
(((32, (2, 3)), ), (2, )),
(((32, (1, 1)), (64, (1, 1))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (1, 1)),
(((32, (2, 3)), (64, (3, 3))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (2, 2)),
(((32, (2, 3)), (64, (3, 3))), (2, 2)),
])
def test_output_shape_valid(self, filters, strides):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn(input_var=self._input_ph,
filters=filters,
strides=strides,
name='cnn',
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn,
feed_dict={self._input_ph: self.obs_input})
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
assert result.shape == (self.batch_size, flatten_shape)
@pytest.mark.parametrize('filters, in_channels, strides',
[(((32, (1, 1)), ), (3, ), (1, )),
(((32, (3, 3)), ), (3, ), (1, )),
(((32, (2, 3)), ), (3, ), (1, )),
(((32, (3, 3)), ), (3, ), (2, )),
(((32, (2, 3)), ), (3, ), (2, )),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1)),
(((32, (2, 3)), (64, (3, 3))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2)),
(((32, (2, 3)), (64, (3, 3))), (3, 32), (2, 2))])
def test_output_with_identity_filter(self, filters, in_channels, strides):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn(input_var=self._input_ph,
filters=filters,
strides=strides,
name='cnn1',
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn,
feed_dict={self._input_ph: self.obs_input})
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
h_out = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
np.testing.assert_array_equal(h_out, result)
# yapf: disable
@pytest.mark.parametrize('filters, in_channels, strides',
[(((32, (1, 1)), ), (3, ), (1, )),
(((32, (3, 3)), ), (3, ), (1, )),
(((32, (2, 3)), ), (3, ), (1, )),
(((32, (3, 3)), ), (3, ), (2, )),
(((32, (2, 3)), ), (3, ), (2, )),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1)),
(((32, (2, 3)), (64, (3, 3))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2)),
(((32, (2, 3)), (64, (3, 3))), (3, 32), (2, 2))])
# yapf: enable
def test_output_with_random_filter(self, filters, in_channels, strides):
# Build a cnn with random filter weights
with tf.compat.v1.variable_scope('CNN'):
self.cnn2 = cnn(input_var=self._input_ph,
filters=filters,
strides=strides,
name='cnn1',
padding='VALID',
hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn2,
feed_dict={self._input_ph: self.obs_input})
two_layer = len(filters) == 2
# get weight values
with tf.compat.v1.variable_scope('CNN', reuse=True):
h0_w = tf.compat.v1.get_variable('cnn1/h0/weight').eval()
h0_b = tf.compat.v1.get_variable('cnn1/h0/bias').eval()
if two_layer:
h1_w = tf.compat.v1.get_variable('cnn1/h1/weight').eval()
h1_b = tf.compat.v1.get_variable('cnn1/h1/bias').eval()
filter_weights = (h0_w, h1_w) if two_layer else (h0_w, )
filter_bias = (h0_b, h1_b) if two_layer else (h0_b, )
# convolution according to TensorFlow's approach
input_val = convolve(_input=self.obs_input,
filter_weights=filter_weights,
filter_bias=filter_bias,
strides=strides,
filters=filters,
in_channels=in_channels,
hidden_nonlinearity=self.hidden_nonlinearity)
# flatten
dense_out = input_val.reshape((self.batch_size, -1)).astype(np.float32)
np.testing.assert_array_almost_equal(dense_out, result)
# yapf: disable
@pytest.mark.parametrize(
'filters, in_channels, strides, pool_shape, pool_stride', [
(((32, (1, 1)), ), (3, ), (1, ), 1, 1),
(((32, (3, 3)), ), (3, ), (1, ), 1, 1),
(((32, (2, 3)), ), (3, ), (1, ), 1, 1),
(((32, (3, 3)), ), (3, ), (2, ), 2, 2),
(((32, (2, 3)), ), (3, ), (2, ), 2, 2),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1), 1, 1),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1), 1, 1),
(((32, (2, 3)), (64, (3, 3))), (3, 32), (1, 1), 1, 1)
])
# yapf: enable
def test_output_with_max_pooling(self, filters, in_channels, strides,
pool_shape, pool_stride):
# Build a cnn with random filter weights
with tf.compat.v1.variable_scope('CNN'):
self.cnn2 = cnn_with_max_pooling(
input_var=self._input_ph,
filters=filters,
strides=strides,
name='cnn1',
pool_shapes=(pool_shape, pool_shape),
pool_strides=(pool_stride, pool_stride),
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=self.hidden_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
result = self.sess.run(self.cnn2,
feed_dict={self._input_ph: self.obs_input})
two_layer = len(filters) == 2
# get weight values
with tf.compat.v1.variable_scope('CNN', reuse=True):
h0_w = tf.compat.v1.get_variable('cnn1/h0/weight').eval()
h0_b = tf.compat.v1.get_variable('cnn1/h0/bias').eval()
if two_layer:
h1_w = tf.compat.v1.get_variable('cnn1/h1/weight').eval()
h1_b = tf.compat.v1.get_variable('cnn1/h1/bias').eval()
filter_weights = (h0_w, h1_w) if two_layer else (h0_w, )
filter_bias = (h0_b, h1_b) if two_layer else (h0_b, )
input_val = self.obs_input
# convolution according to TensorFlow's approach
# and perform max pooling on each layer
for filter_iter, filter_weight, _filter_bias, in_channel in zip(
filters, filter_weights, filter_bias, in_channels):
input_val = convolve(_input=input_val,
filter_weights=(filter_weight, ),
filter_bias=(_filter_bias, ),
strides=strides,
filters=(filter_iter, ),
in_channels=(in_channel, ),
hidden_nonlinearity=self.hidden_nonlinearity)
# max pooling
input_val = max_pooling(_input=input_val,
pool_shape=pool_shape,
pool_stride=pool_stride)
# flatten
dense_out = input_val.reshape((self.batch_size, -1)).astype(np.float32)
np.testing.assert_array_equal(dense_out, result)
def test_invalid_padding(self):
with pytest.raises(ValueError):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn(input_var=self._input_ph,
filters=((32, (3, 3)), ),
strides=(1, ),
name='cnn',
padding='UNKNOWN')
def test_invalid_padding_max_pooling(self):
with pytest.raises(ValueError):
with tf.compat.v1.variable_scope('CNN'):
self.cnn = cnn_with_max_pooling(input_var=self._input_ph,
filters=((32, (3, 3)), ),
strides=(1, ),
name='cnn',
pool_shapes=(1, 1),
pool_strides=(1, 1),
padding='UNKNOWN')
| 12,923 | 44.992883 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_cnn_mlp_merge_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models import CNNMLPMergeModel
from tests.fixtures import TfGraphTestCase
class TestCNNMLPMergeModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 1
self.input_width = 10
self.input_height = 10
self.obs_input = np.ones(
(self.batch_size, self.input_width, self.input_height, 3))
# skip batch size
self.input_shape = (self.input_width, self.input_height, 3)
self.obs_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self.input_shape,
name='input')
self.action_input = np.ones((self.batch_size, 3))
self.action_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self.action_input.shape[1:],
name='input')
self.action_input = np.ones((self.batch_size, 3))
self.hidden_nonlinearity = tf.nn.relu
# yapf: disable
@pytest.mark.parametrize('filters, in_channels, strides, hidden_sizes', [
(((32, (1, 1)), ), (3, ), (1, ), (1, )), # noqa: E122
(((32, (3, 3)), ), (3, ), (1, ), (2, )),
(((32, (3, 3)), ), (3, ), (2, ), (3, )),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1), (1, )),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1), (2, )),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2), (3, )),
])
# yapf: enable
def test_output_value(self, filters, in_channels, strides, hidden_sizes):
model = CNNMLPMergeModel(filters=filters,
strides=strides,
hidden_sizes=hidden_sizes,
action_merge_layer=1,
name='cnn_mlp_merge_model1',
padding='VALID',
cnn_hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=self.hidden_nonlinearity)
model_out = model.build(self.obs_ph, self.action_ph).outputs
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
cnn_output = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
with tf.compat.v1.variable_scope('cnn_mlp_merge_model1/MLPMergeModel',
reuse=True):
h0_w = tf.compat.v1.get_variable('mlp_concat/hidden_0/kernel')
h0_b = tf.compat.v1.get_variable('mlp_concat/hidden_0/bias')
out_w = tf.compat.v1.get_variable('mlp_concat/output/kernel')
out_b = tf.compat.v1.get_variable('mlp_concat/output/bias')
mlp_output = self.sess.run(model_out,
feed_dict={
self.obs_ph: self.obs_input,
self.action_ph: self.action_input
})
# First layer
h0_in = tf.matmul(cnn_output, h0_w) + h0_b
h0_out = self.hidden_nonlinearity(h0_in)
# output
h1_in = tf.matmul(tf.concat([h0_out, self.action_input], 1),
out_w) + out_b
# eval output
out = self.sess.run(h1_in,
feed_dict={
self.obs_ph: self.obs_input,
self.action_ph: self.action_input
})
np.testing.assert_array_equal(out, mlp_output)
@pytest.mark.parametrize(
'filters, in_channels, strides, pool_strides, pool_shapes',
[
(((32, (1, 1)), ), (3, ), (1, ), (1, 1), (1, 1)), # noqa: E122
(((32, (3, 3)), ), (3, ), (1, ), (2, 2), (1, 1)),
(((32, (3, 3)), ), (3, ), (1, ), (1, 1), (2, 2)),
(((32, (3, 3)), ), (3, ), (1, ), (2, 2), (2, 2)),
(((32, (3, 3)), ), (3, ), (2, ), (1, 1), (2, 2)),
(((32, (3, 3)), ), (3, ), (2, ), (2, 2), (2, 2)),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1), (1, 1), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1), (1, 1), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2), (1, 1), (1, 1)),
])
def test_output_value_max_pooling(self, filters, in_channels, strides,
pool_strides, pool_shapes):
model = CNNMLPMergeModel(filters=filters,
strides=strides,
name='cnn_mlp_merge_model2',
padding='VALID',
max_pooling=True,
action_merge_layer=1,
pool_strides=pool_strides,
pool_shapes=pool_shapes,
cnn_hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=self.hidden_nonlinearity)
model_out = model.build(self.obs_ph, self.action_ph).outputs
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
height_size = int(
(height_size - pool_shapes[0]) / pool_strides[0]) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
width_size = int(
(width_size - pool_shapes[1]) / pool_strides[1]) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
cnn_output = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
# feed cnn output to MLPMergeModel
with tf.compat.v1.variable_scope('cnn_mlp_merge_model2/MLPMergeModel',
reuse=True):
h0_w = tf.compat.v1.get_variable('mlp_concat/hidden_0/kernel')
h0_b = tf.compat.v1.get_variable('mlp_concat/hidden_0/bias')
out_w = tf.compat.v1.get_variable('mlp_concat/output/kernel')
out_b = tf.compat.v1.get_variable('mlp_concat/output/bias')
mlp_output = self.sess.run(model_out,
feed_dict={
self.obs_ph: self.obs_input,
self.action_ph: self.action_input
})
# First layer
h0_in = tf.matmul(cnn_output, h0_w) + h0_b
h0_out = self.hidden_nonlinearity(h0_in)
# output
h1_in = tf.matmul(tf.concat([h0_out, self.action_input], 1),
out_w) + out_b
# eval output
out = self.sess.run(h1_in,
feed_dict={
self.obs_ph: self.obs_input,
self.action_ph: self.action_input
})
np.testing.assert_array_equal(out, mlp_output)
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((32, (1, 1)), ), (1, )), # noqa: E122
(((32, (3, 3)), ), (1, )),
(((32, (3, 3)), ), (2, )),
(((32, (1, 1)), (64, (1, 1))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (2, 2)),
])
# yapf: enable
def test_is_pickleable(self, filters, strides):
model = CNNMLPMergeModel(filters=filters,
strides=strides,
name='cnn_mlp_merge_model',
padding='VALID',
cnn_hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self.obs_ph).outputs
with tf.compat.v1.variable_scope(
'cnn_mlp_merge_model/MLPMergeModel/mlp_concat', reuse=True):
bias = tf.compat.v1.get_variable('output/bias')
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(outputs,
feed_dict={self.obs_ph: self.obs_input})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
self.input_shape,
name='input')
outputs = model_pickled.build(input_ph).outputs
output2 = sess.run(outputs, feed_dict={input_ph: self.obs_input})
assert np.array_equal(output1, output2)
| 9,887 | 42.946667 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_cnn_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models import CNNModel
from garage.tf.models import CNNModelWithMaxPooling
from tests.fixtures import TfGraphTestCase
class TestCNNModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 5
self.input_width = 10
self.input_height = 10
self.obs_input = np.ones(
(self.batch_size, self.input_width, self.input_height, 3))
# pylint: disable=unsubscriptable-object
input_shape = self.obs_input.shape[1:] # height, width, channel
self._input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
# yapf: disable
@pytest.mark.parametrize('filters, in_channels, strides', [
(((32, (1, 1)),), (3, ), (1, )), # noqa: E122
(((32, (3, 3)),), (3, ), (1, )),
(((32, (3, 3)),), (3, ), (2, )),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2)),
])
# yapf: enable
def test_output_value(self, filters, in_channels, strides):
model = CNNModel(filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph).outputs
output = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
expected_output = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize(
'filters, in_channels, strides, pool_strides, pool_shapes',
[
(((32, (1, 1)), ), (3, ), (1, ), (1, 1), (1, 1)), # noqa: E122
(((32, (3, 3)), ), (3, ), (1, ), (2, 2), (1, 1)),
(((32, (3, 3)), ), (3, ), (1, ), (1, 1), (2, 2)),
(((32, (3, 3)), ), (3, ), (1, ), (2, 2), (2, 2)),
(((32, (3, 3)), ), (3, ), (2, ), (1, 1), (2, 2)),
(((32, (3, 3)), ), (3, ), (2, ), (2, 2), (2, 2)),
(((32, (1, 1)), (64, (1, 1))), (3, 32), (1, 1), (1, 1), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (1, 1), (1, 1), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (3, 32), (2, 2), (1, 1), (1, 1)),
])
# yapf: enable
def test_output_value_max_pooling(self, filters, in_channels, strides,
pool_strides, pool_shapes):
model = CNNModelWithMaxPooling(
filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
pool_strides=pool_strides,
pool_shapes=pool_shapes,
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph).outputs
output = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
filter_sum = 1
# filter value after 3 layers of conv
for filter_iter, in_channel in zip(filters, in_channels):
filter_sum *= filter_iter[1][0] * filter_iter[1][1] * in_channel
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
height_size = int(
(height_size - pool_shapes[0]) / pool_strides[0]) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
width_size = int(
(width_size - pool_shapes[1]) / pool_strides[1]) + 1
flatten_shape = height_size * width_size * filters[-1][0]
# flatten
expected_output = np.full((self.batch_size, flatten_shape),
filter_sum,
dtype=np.float32)
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((32, (1, 1)),), (1, )), # noqa: E122
(((32, (3, 3)),), (1, )),
(((32, (3, 3)),), (2, )),
(((32, (1, 1)), (64, (1, 1))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (1, 1)),
(((32, (3, 3)), (64, (3, 3))), (2, 2)),
])
# yapf: enable
def test_is_pickleable(self, filters, strides):
model = CNNModel(filters=filters,
strides=strides,
name='cnn_model',
padding='VALID',
hidden_w_init=tf.constant_initializer(1),
hidden_nonlinearity=None)
outputs = model.build(self._input_ph).outputs
with tf.compat.v1.variable_scope('cnn_model/cnn/h0', reuse=True):
bias = tf.compat.v1.get_variable('bias')
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(outputs,
feed_dict={self._input_ph: self.obs_input})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
# pylint: disable=unsubscriptable-object
input_shape = self.obs_input.shape[1:] # height, width, channel
input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
outputs = model_pickled.build(input_ph).outputs
output2 = sess.run(outputs, feed_dict={input_ph: self.obs_input})
assert np.array_equal(output1, output2)
| 6,701 | 41.150943 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_gaussian_cnn_model.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models import GaussianCNNModel
from tests.fixtures import TfGraphTestCase
class TestGaussianCNNModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 5
self.input_width = 10
self.input_height = 10
self.obs = np.full(
(self.batch_size, self.input_width, self.input_height, 3), 1)
input_shape = self.obs.shape[1:] # height, width, channel
self._input_ph = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
@mock.patch('tensorflow.random.normal')
@pytest.mark.parametrize('filters, in_channels, strides, output_dim, '
'hidden_sizes',
[(((3, (1, 1)), ), (3, ), (1, ), 1, (1, )),
(((3, (3, 3)), ), (3, ), (1, ), 2, (2, )),
(((3, (3, 3)), ), (3, ), (2, ), 1, (1, 1)),
(((3, (1, 1)), (3, (1, 1))), (3, 3), (1, 1), 2,
(2, 2)),
(((3, (3, 3)), (3, (3, 3))), (3, 3), (2, 2), 2,
(2, 2))])
def test_std_share_network_output_values(self, mock_normal, filters,
in_channels, strides, output_dim,
hidden_sizes):
mock_normal.return_value = 0.5
model = GaussianCNNModel(filters=filters,
strides=strides,
padding='VALID',
output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=True,
hidden_nonlinearity=None,
std_parameterization='exp',
hidden_w_init=tf.constant_initializer(0.01),
output_w_init=tf.constant_initializer(1))
outputs = model.build(self._input_ph).outputs
action, mean, log_std, std_param = self.sess.run(
outputs[:-1], feed_dict={self._input_ph: self.obs})
filter_sum = 1
for filter_iter, in_channel in zip(filters, in_channels):
filter_height = filter_iter[1][0]
filter_width = filter_iter[1][1]
filter_sum *= 0.01 * filter_height * filter_width * in_channel
for _ in hidden_sizes:
filter_sum *= 0.01
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
network_output = filter_sum * flatten_shape * np.prod(hidden_sizes)
expected_mean = np.full((self.batch_size, output_dim),
network_output,
dtype=np.float32)
expected_std_param = np.full((self.batch_size, output_dim),
network_output,
dtype=np.float32)
expected_log_std = np.full((self.batch_size, output_dim),
network_output,
dtype=np.float32)
assert np.allclose(mean, expected_mean)
assert np.allclose(std_param, expected_std_param)
assert np.allclose(log_std, expected_log_std)
expected_action = 0.5 * np.exp(expected_log_std) + expected_mean
assert np.allclose(action, expected_action, rtol=0, atol=0.1)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_std_share_network_shapes(self, output_dim, hidden_sizes):
# should be 2 * output_dim
model = GaussianCNNModel(filters=((3, (3, 3)), (6, (3, 3))),
strides=[1, 1],
padding='SAME',
hidden_sizes=hidden_sizes,
output_dim=output_dim,
std_share_network=True)
model.build(self._input_ph)
with tf.compat.v1.variable_scope(model.name, reuse=True):
std_share_output_weights = tf.compat.v1.get_variable(
'dist_params/mean_std_network/output/kernel')
std_share_output_bias = tf.compat.v1.get_variable(
'dist_params/mean_std_network/output/bias')
assert std_share_output_weights.shape[1] == output_dim * 2
assert std_share_output_bias.shape == output_dim * 2
@mock.patch('tensorflow.random.normal')
@pytest.mark.parametrize('filters, in_channels, strides, output_dim, '
'hidden_sizes',
[(((3, (1, 1)), ), (3, ), (1, ), 1, (1, )),
(((3, (3, 3)), ), (3, ), (1, ), 2, (2, )),
(((3, (3, 3)), ), (3, ), (2, ), 1, (1, 1)),
(((3, (1, 1)), (3, (1, 1))), (3, 3), (1, 1), 2,
(2, 2)),
(((3, (3, 3)), (3, (3, 3))), (3, 3), (2, 2), 2,
(2, 2))])
def test_without_std_share_network_output_values(self, mock_normal,
filters, in_channels,
strides, output_dim,
hidden_sizes):
mock_normal.return_value = 0.5
model = GaussianCNNModel(filters=filters,
strides=strides,
padding='VALID',
output_dim=output_dim,
init_std=2,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=False,
hidden_nonlinearity=None,
std_parameterization='exp',
hidden_w_init=tf.constant_initializer(0.01),
output_w_init=tf.constant_initializer(1))
outputs = model.build(self._input_ph).outputs
action, mean, log_std, std_param = self.sess.run(
outputs[:-1], feed_dict={self._input_ph: self.obs})
filter_sum = 1
for filter_iter, in_channel in zip(filters, in_channels):
filter_height = filter_iter[1][0]
filter_width = filter_iter[1][1]
filter_sum *= 0.01 * filter_height * filter_width * in_channel
for _ in hidden_sizes:
filter_sum *= 0.01
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
network_output = filter_sum * flatten_shape * np.prod(hidden_sizes)
expected_mean = np.full((self.batch_size, output_dim),
network_output,
dtype=np.float32)
expected_std_param = np.full((self.batch_size, output_dim),
np.log(2),
dtype=np.float32)
expected_log_std = np.full((self.batch_size, output_dim),
np.log(2),
dtype=np.float32)
assert np.allclose(mean, expected_mean)
assert np.allclose(std_param, expected_std_param)
assert np.allclose(log_std, expected_log_std)
expected_action = 0.5 * np.exp(expected_log_std) + expected_mean
assert np.allclose(action, expected_action, rtol=0, atol=0.1)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_without_std_share_network_shapes(self, output_dim, hidden_sizes):
model = GaussianCNNModel(filters=((3, (3, 3)), (6, (3, 3))),
strides=[1, 1],
padding='SAME',
hidden_sizes=hidden_sizes,
output_dim=output_dim,
std_share_network=False,
adaptive_std=False)
model.build(self._input_ph)
with tf.compat.v1.variable_scope(model.name, reuse=True):
mean_output_weights = tf.compat.v1.get_variable(
'dist_params/mean_network/output/kernel')
mean_output_bias = tf.compat.v1.get_variable(
'dist_params/mean_network/output/bias')
log_std_output_weights = tf.compat.v1.get_variable(
'dist_params/log_std_network/parameter')
assert mean_output_weights.shape[1] == output_dim
assert mean_output_bias.shape == output_dim
assert log_std_output_weights.shape == output_dim
@mock.patch('tensorflow.random.normal')
@pytest.mark.parametrize('filters, in_channels, strides, output_dim, '
'hidden_sizes',
[(((3, (1, 1)), ), (3, ), (1, ), 1, (1, )),
(((3, (3, 3)), ), (3, ), (1, ), 2, (2, )),
(((3, (3, 3)), ), (3, ), (2, ), 1, (1, 1)),
(((3, (1, 1)), (3, (1, 1))), (3, 3), (1, 1), 2,
(2, 2)),
(((3, (3, 3)), (3, (3, 3))), (3, 3), (2, 2), 2,
(2, 2))])
def test_adaptive_std_network_output_values(self, mock_normal, filters,
in_channels, strides,
output_dim, hidden_sizes):
mock_normal.return_value = 0.5
model = GaussianCNNModel(
filters=filters,
strides=strides,
padding='VALID',
output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=True,
hidden_nonlinearity=None,
std_hidden_nonlinearity=None,
std_filters=filters,
std_strides=strides,
std_padding='VALID',
std_hidden_sizes=hidden_sizes,
hidden_w_init=tf.constant_initializer(0.01),
output_w_init=tf.constant_initializer(1),
std_hidden_w_init=tf.constant_initializer(0.01),
std_output_w_init=tf.constant_initializer(1))
outputs = model.build(self._input_ph).outputs
action, mean, log_std, std_param = self.sess.run(
outputs[:-1], feed_dict={self._input_ph: self.obs})
filter_sum = 1
for filter_iter, in_channel in zip(filters, in_channels):
filter_height = filter_iter[1][0]
filter_width = filter_iter[1][1]
filter_sum *= 0.01 * filter_height * filter_width * in_channel
for _ in hidden_sizes:
filter_sum *= 0.01
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
network_output = filter_sum * flatten_shape * np.prod(hidden_sizes)
expected_mean = np.full((self.batch_size, output_dim),
network_output,
dtype=np.float32)
expected_std_param = np.full((self.batch_size, output_dim),
network_output,
dtype=np.float32)
expected_log_std = np.full((self.batch_size, output_dim),
network_output,
dtype=np.float32)
assert np.allclose(mean, expected_mean)
assert np.allclose(std_param, expected_std_param)
assert np.allclose(log_std, expected_log_std)
expected_action = 0.5 * np.exp(expected_log_std) + expected_mean
assert np.allclose(action, expected_action, rtol=0, atol=0.1)
@pytest.mark.parametrize('output_dim, hidden_sizes, std_hidden_sizes',
[(1, (0, ), (0, )), (1, (1, ), (1, )),
(1, (2, ), (2, )), (2, (3, ), (3, )),
(2, (1, 1), (1, 1)), (3, (2, 2), (2, 2))])
def test_adaptive_std_output_shape(self, output_dim, hidden_sizes,
std_hidden_sizes):
model = GaussianCNNModel(
filters=((3, (3, 3)), (6, (3, 3))),
strides=[1, 1],
padding='SAME',
output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=True,
hidden_nonlinearity=None,
std_hidden_nonlinearity=None,
std_filters=((3, (3, 3)), (6, (3, 3))),
std_strides=[1, 1],
std_padding='SAME',
std_hidden_sizes=std_hidden_sizes,
hidden_w_init=tf.constant_initializer(0.01),
output_w_init=tf.constant_initializer(1),
std_hidden_w_init=tf.constant_initializer(0.01),
std_output_w_init=tf.constant_initializer(1))
model.build(self._input_ph)
with tf.compat.v1.variable_scope(model.name, reuse=True):
mean_output_weights = tf.compat.v1.get_variable(
'dist_params/mean_network/output/kernel')
mean_output_bias = tf.compat.v1.get_variable(
'dist_params/mean_network/output/bias')
log_std_output_weights = tf.compat.v1.get_variable(
'dist_params/log_std_network/output/kernel')
log_std_output_bias = tf.compat.v1.get_variable(
'dist_params/log_std_network/output/bias')
assert mean_output_weights.shape[1] == output_dim
assert mean_output_bias.shape == output_dim
assert log_std_output_weights.shape[1] == output_dim
assert log_std_output_bias.shape == output_dim
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
@mock.patch('tensorflow.random.normal')
def test_std_share_network_is_pickleable(self, mock_normal, output_dim,
hidden_sizes):
mock_normal.return_value = 0.5
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 10, 10, 3))
model = GaussianCNNModel(filters=((3, (3, 3)), (6, (3, 3))),
strides=[1, 1],
padding='SAME',
hidden_sizes=hidden_sizes,
output_dim=output_dim,
std_share_network=True)
outputs = model.build(input_var).outputs
# get output bias
with tf.compat.v1.variable_scope('GaussianCNNModel', reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_std_network/output/bias')
# assign it to all ones
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(outputs[:-1], feed_dict={input_var: self.obs})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 10, 10, 3))
model_pickled = pickle.loads(h)
outputs = model_pickled.build(input_var).outputs
output2 = sess.run(outputs[:-1], feed_dict={input_var: self.obs})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_is_pickleable(self, mock_normal,
output_dim, hidden_sizes):
mock_normal.return_value = 0.5
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 10, 10, 3))
model = GaussianCNNModel(filters=((3, (3, 3)), (6, (3, 3))),
strides=[1, 1],
padding='SAME',
hidden_sizes=hidden_sizes,
output_dim=output_dim,
std_share_network=False,
adaptive_std=False)
outputs = model.build(input_var).outputs
# get output bias
with tf.compat.v1.variable_scope('GaussianCNNModel', reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_network/output/bias')
# assign it to all ones
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(outputs[:-1], feed_dict={input_var: self.obs})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 10, 10, 3))
model_pickled = pickle.loads(h)
outputs = model_pickled.build(input_var).outputs
output2 = sess.run(outputs[:-1], feed_dict={input_var: self.obs})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('output_dim, hidden_sizes, std_hidden_sizes',
[(1, (0, ), (0, )), (1, (1, ), (1, )),
(1, (2, ), (2, )), (2, (3, ), (3, )),
(2, (1, 1), (1, 1)), (3, (2, 2), (2, 2))])
@mock.patch('tensorflow.random.normal')
def test_adaptive_std_is_pickleable(self, mock_normal, output_dim,
hidden_sizes, std_hidden_sizes):
mock_normal.return_value = 0.5
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 10, 10, 3))
model = GaussianCNNModel(filters=((3, (3, 3)), (6, (3, 3))),
strides=[1, 1],
padding='SAME',
output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=True,
hidden_nonlinearity=None,
std_hidden_nonlinearity=None,
std_filters=((3, (3, 3)), (6, (3, 3))),
std_strides=[1, 1],
std_padding='SAME',
std_hidden_sizes=std_hidden_sizes,
hidden_w_init=tf.constant_initializer(1),
output_w_init=tf.constant_initializer(1),
std_hidden_w_init=tf.constant_initializer(1),
std_output_w_init=tf.constant_initializer(1))
outputs = model.build(input_var).outputs
# get output bias
with tf.compat.v1.variable_scope('GaussianCNNModel', reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_network/output/bias')
# assign it to all ones
bias.load(tf.ones_like(bias).eval())
h = pickle.dumps(model)
output1 = self.sess.run(outputs[:-1], feed_dict={input_var: self.obs})
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 10, 10, 3))
model_pickled = pickle.loads(h)
outputs = model_pickled.build(input_var).outputs
output2 = sess.run(outputs[:-1], feed_dict={input_var: self.obs})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
@mock.patch('tensorflow.random.normal')
def test_softplus_output_values(self, mock_normal, output_dim,
hidden_sizes):
mock_normal.return_value = 0.5
filters = ((3, (3, 3)), (6, (3, 3)))
in_channels = [3, 3]
strides = [1, 1]
model = GaussianCNNModel(filters=filters,
strides=strides,
padding='VALID',
output_dim=output_dim,
init_std=2.0,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=False,
hidden_nonlinearity=None,
std_parameterization='softplus',
hidden_w_init=tf.constant_initializer(0.01),
output_w_init=tf.constant_initializer(1))
outputs = model.build(self._input_ph).outputs
action, mean, log_std, std_param = self.sess.run(
outputs[:-1], feed_dict={self._input_ph: self.obs})
filter_sum = 1
for filter_iter, in_channel in zip(filters, in_channels):
filter_height = filter_iter[1][0]
filter_width = filter_iter[1][1]
filter_sum *= 0.01 * filter_height * filter_width * in_channel
for _ in hidden_sizes:
filter_sum *= 0.01
height_size = self.input_height
width_size = self.input_width
for filter_iter, stride in zip(filters, strides):
height_size = int((height_size - filter_iter[1][0]) / stride) + 1
width_size = int((width_size - filter_iter[1][1]) / stride) + 1
flatten_shape = height_size * width_size * filters[-1][0]
network_output = filter_sum * flatten_shape * np.prod(hidden_sizes)
expected_mean = np.full((self.batch_size, output_dim),
network_output,
dtype=np.float32)
expected_std_param = np.full((self.batch_size, output_dim),
np.log(np.exp(2) - 1),
dtype=np.float32)
expected_log_std = np.log(np.log(1. + np.exp(expected_std_param)))
assert np.allclose(mean, expected_mean)
assert np.allclose(std_param, expected_std_param)
assert np.allclose(log_std, expected_log_std)
expected_action = 0.5 * np.exp(expected_log_std) + expected_mean
assert np.allclose(action, expected_action, rtol=0, atol=0.1)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_exp_min_std(self, output_dim, hidden_sizes):
filters = ((3, (3, 3)), (6, (3, 3)))
strides = [1, 1]
model = GaussianCNNModel(filters=filters,
strides=strides,
padding='VALID',
output_dim=output_dim,
init_std=2.0,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=False,
hidden_nonlinearity=None,
std_parameterization='exp',
min_std=10,
hidden_w_init=tf.constant_initializer(0.01),
output_w_init=tf.constant_initializer(1))
outputs = model.build(self._input_ph).outputs
_, _, log_std, std_param = self.sess.run(
outputs[:-1], feed_dict={self._input_ph: self.obs})
expected_log_std = np.full([1, output_dim], np.log(10))
expected_std_param = np.full([1, output_dim], np.log(10))
assert np.allclose(log_std, expected_log_std)
assert np.allclose(std_param, expected_std_param)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_exp_max_std(self, output_dim, hidden_sizes):
filters = ((3, (3, 3)), (6, (3, 3)))
strides = [1, 1]
model = GaussianCNNModel(filters=filters,
strides=strides,
padding='VALID',
output_dim=output_dim,
init_std=10.0,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=False,
hidden_nonlinearity=None,
std_parameterization='exp',
max_std=1.0,
hidden_w_init=tf.constant_initializer(0.01),
output_w_init=tf.constant_initializer(1))
outputs = model.build(self._input_ph).outputs
_, _, log_std, std_param = self.sess.run(
outputs[:-1], feed_dict={self._input_ph: self.obs})
expected_log_std = np.full([1, output_dim], np.log(1))
expected_std_param = np.full([1, output_dim], np.log(1))
assert np.allclose(log_std, expected_log_std)
assert np.allclose(std_param, expected_std_param)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_softplus_min_std(self, output_dim, hidden_sizes):
filters = ((3, (3, 3)), (6, (3, 3)))
strides = [1, 1]
model = GaussianCNNModel(filters=filters,
strides=strides,
padding='VALID',
output_dim=output_dim,
init_std=2.0,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=False,
hidden_nonlinearity=None,
std_parameterization='softplus',
min_std=10,
hidden_w_init=tf.constant_initializer(0.1),
output_w_init=tf.constant_initializer(1))
outputs = model.build(self._input_ph).outputs
_, _, log_std, std_param = self.sess.run(
outputs[:-1], feed_dict={self._input_ph: self.obs})
expected_log_std = np.full([1, output_dim], np.log(10))
expected_std_param = np.full([1, output_dim], np.log(np.exp(10) - 1))
assert np.allclose(log_std, expected_log_std)
assert np.allclose(std_param, expected_std_param)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_softplus_max_std(self, output_dim, hidden_sizes):
filters = ((3, (3, 3)), (6, (3, 3)))
strides = [1, 1]
model = GaussianCNNModel(filters=filters,
strides=strides,
padding='VALID',
output_dim=output_dim,
init_std=10.0,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=False,
hidden_nonlinearity=None,
std_parameterization='softplus',
max_std=1.0,
hidden_w_init=tf.constant_initializer(0.1),
output_w_init=tf.constant_initializer(1))
outputs = model.build(self._input_ph).outputs
_, _, log_std, std_param = self.sess.run(
outputs[:-1], feed_dict={self._input_ph: self.obs})
expected_log_std = np.full([1, output_dim], np.log(1))
expected_std_param = np.full([1, output_dim], np.log(np.exp(1) - 1))
assert np.allclose(log_std, expected_log_std, rtol=0, atol=0.0001)
assert np.allclose(std_param, expected_std_param, rtol=0, atol=0.0001)
def test_unknown_std_parameterization(self):
with pytest.raises(NotImplementedError):
_ = GaussianCNNModel(filters=(((3, 3), 3), ((3, 3), 6)),
strides=[1, 1],
padding='SAME',
hidden_sizes=(1, ),
output_dim=1,
std_parameterization='unknown')
| 30,504 | 48.201613 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_gaussian_gru_model.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.models import GaussianGRUModel
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_gru
class TestGaussianGRUModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 1
self.time_step = 2
self.feature_shape = 2
self.default_initializer = tf.constant_initializer(0.1)
self.obs_inputs = np.full(
(self.batch_size, self.time_step, self.feature_shape), 1.)
self.obs_input = np.full((self.batch_size, self.feature_shape), 1.)
self.input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
self.step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='step_input')
def test_dist(self):
model = GaussianGRUModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
dist = model.build(self.input_var, self.step_input_var,
step_hidden_var).dist
assert isinstance(dist, tfp.distributions.MultivariateNormalDiag)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_std_share_network_output_values(self, mock_normal, output_dim,
hidden_dim):
mock_normal.return_value = 0.5
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
(_, step_mean_var, step_log_std_var, step_hidden,
hidden_init_var) = model.build(self.input_var, self.step_input_var,
step_hidden_var).outputs
hidden1 = hidden2 = np.full((self.batch_size, hidden_dim),
hidden_init_var.eval())
for _ in range(self.time_step):
mean1, log_std1, hidden1 = self.sess.run(
[step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden1
})
hidden2 = recurrent_step_gru(input_val=self.obs_input,
num_units=hidden_dim,
step_hidden=hidden2,
w_x_init=0.1,
w_h_init=0.1,
b_init=0.,
nonlinearity=None,
gate_nonlinearity=None)
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 0.1)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(mean1, output2)
assert np.allclose(log_std1, output2)
assert np.allclose(hidden1, hidden2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
def test_std_share_network_shapes(self, output_dim, hidden_dim):
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
model.build(self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/kernel' in var.name:
std_share_output_weights = var
if 'output_layer/bias' in var.name:
std_share_output_bias = var
assert std_share_output_weights.shape[1] == output_dim * 2
assert std_share_output_bias.shape == output_dim * 2
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim, init_std', [
(1, 1, 1),
(1, 1, 2),
(1, 2, 1),
(1, 2, 2),
(3, 3, 1),
(3, 3, 2),
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_output_values(self, mock_normal,
output_dim, hidden_dim,
init_std):
mock_normal.return_value = 0.5
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer,
init_std=init_std)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
(_, step_mean_var, step_log_std_var, step_hidden,
hidden_init_var) = model.build(self.input_var, self.step_input_var,
step_hidden_var).outputs
hidden1 = hidden2 = np.full((self.batch_size, hidden_dim),
hidden_init_var.eval())
for _ in range(self.time_step):
mean1, log_std1, hidden1 = self.sess.run(
[step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden1
})
hidden2 = recurrent_step_gru(input_val=self.obs_input,
num_units=hidden_dim,
step_hidden=hidden2,
w_x_init=0.1,
w_h_init=0.1,
b_init=0.,
nonlinearity=None,
gate_nonlinearity=None)
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 0.1)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(mean1, output2)
expected_log_std = np.full((self.batch_size, output_dim),
np.log(init_std))
assert np.allclose(log_std1, expected_log_std)
assert np.allclose(hidden1, hidden2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
def test_without_std_share_network_shapes(self, output_dim, hidden_dim):
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
model.build(self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/kernel' in var.name:
std_share_output_weights = var
if 'output_layer/bias' in var.name:
std_share_output_bias = var
if 'log_std_param/parameter' in var.name:
log_std_param = var
assert std_share_output_weights.shape[1] == output_dim
assert std_share_output_bias.shape == output_dim
assert log_std_param.shape == output_dim
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_std_share_network_is_pickleable(self, mock_normal, output_dim,
hidden_dim):
mock_normal.return_value = 0.5
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
(dist, step_mean_var, step_log_std_var, step_hidden,
_) = model.build(self.input_var, self.step_input_var,
step_hidden_var).outputs
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/bias' in var.name:
var.load(tf.ones_like(var).eval())
hidden = np.zeros((self.batch_size, hidden_dim))
outputs1 = self.sess.run([dist.loc, dist.scale.diag],
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run([step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden
}) # noqa: E126
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.feature_shape),
name='step_input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='initial_hidden',
dtype=tf.float32)
(dist2, step_mean_var2, step_log_std_var2, step_hidden2,
_) = model_pickled.build(input_var, step_input_var,
step_hidden_var).outputs
outputs2 = sess.run([dist2.loc, dist2.scale.diag],
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[step_mean_var2, step_log_std_var2, step_hidden2],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_is_pickleable(self, mock_normal,
output_dim, hidden_dim):
mock_normal.return_value = 0.5
model = GaussianGRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
(dist, step_mean_var, step_log_std_var, step_hidden,
_) = model.build(self.input_var, self.step_input_var,
step_hidden_var).outputs
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/bias' in var.name:
var.load(tf.ones_like(var).eval())
hidden = np.zeros((self.batch_size, hidden_dim))
outputs1 = self.sess.run([dist.loc, dist.scale.diag],
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run([step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden
}) # noqa: E126
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.feature_shape),
name='step_input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='initial_hidden',
dtype=tf.float32)
(dist2, step_mean_var2, step_log_std_var2, step_hidden2,
_) = model_pickled.build(input_var, step_input_var,
step_hidden_var).outputs
outputs2 = sess.run([dist2.loc, dist2.scale.diag],
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[step_mean_var2, step_log_std_var2, step_hidden2],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
| 18,127 | 47.084881 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_gaussian_lstm_model.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.models import GaussianLSTMModel
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_lstm
class TestGaussianLSTMModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 1
self.time_step = 2
self.feature_shape = 2
self.default_initializer = tf.constant_initializer(0.1)
self.obs_inputs = np.full(
(self.batch_size, self.time_step, self.feature_shape), 1.)
self.obs_input = np.full((self.batch_size, self.feature_shape), 1.)
self.input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
self.step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='step_input')
def test_dist(self):
model = GaussianLSTMModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_cell',
dtype=tf.float32)
dist = model.build(self.input_var, self.step_input_var,
step_hidden_var, step_cell_var).dist
assert isinstance(dist, tfp.distributions.MultivariateNormalDiag)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_std_share_network_output_values(self, mock_normal, output_dim,
hidden_dim):
mock_normal.return_value = 0.5
model = GaussianLSTMModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_cell',
dtype=tf.float32)
(_, step_mean_var, step_log_std_var, step_hidden, step_cell,
hidden_init_var, cell_init_var) = model.build(self.input_var,
self.step_input_var,
step_hidden_var,
step_cell_var).outputs
hidden1 = hidden2 = np.full((self.batch_size, hidden_dim),
hidden_init_var.eval())
cell1 = cell2 = np.full((self.batch_size, hidden_dim),
cell_init_var.eval())
for _ in range(self.time_step):
mean1, log_std1, hidden1, cell1 = self.sess.run(
[step_mean_var, step_log_std_var, step_hidden, step_cell],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden1,
step_cell_var: cell1
})
hidden2, cell2 = recurrent_step_lstm(input_val=self.obs_input,
num_units=hidden_dim,
step_hidden=hidden2,
step_cell=cell2,
w_x_init=0.1,
w_h_init=0.1,
b_init=0.,
nonlinearity=None,
gate_nonlinearity=None)
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 0.1)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(mean1, output2)
assert np.allclose(log_std1, output2)
assert np.allclose(hidden1, hidden2)
assert np.allclose(cell1, cell2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
def test_std_share_network_shapes(self, output_dim, hidden_dim):
model = GaussianLSTMModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_cell',
dtype=tf.float32)
model.build(self.input_var, self.step_input_var, step_hidden_var,
step_cell_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/kernel' in var.name:
std_share_output_weights = var
if 'output_layer/bias' in var.name:
std_share_output_bias = var
assert std_share_output_weights.shape[1] == output_dim * 2
assert std_share_output_bias.shape == output_dim * 2
@pytest.mark.parametrize('output_dim, hidden_dim, init_std', [(1, 1, 1),
(1, 1, 2),
(1, 2, 1),
(1, 2, 2),
(3, 3, 1),
(3, 3, 2)])
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_output_values(self, mock_normal,
output_dim, hidden_dim,
init_std):
mock_normal.return_value = 0.5
model = GaussianLSTMModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer,
init_std=init_std)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_cell',
dtype=tf.float32)
(_, step_mean_var, step_log_std_var, step_hidden, step_cell,
hidden_init_var, cell_init_var) = model.build(self.input_var,
self.step_input_var,
step_hidden_var,
step_cell_var).outputs
hidden1 = hidden2 = np.full((self.batch_size, hidden_dim),
hidden_init_var.eval())
cell1 = cell2 = np.full((self.batch_size, hidden_dim),
cell_init_var.eval())
for _ in range(self.time_step):
mean1, log_std1, hidden1, cell1 = self.sess.run(
[step_mean_var, step_log_std_var, step_hidden, step_cell],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden1,
step_cell_var: cell1
})
hidden2, cell2 = recurrent_step_lstm(input_val=self.obs_input,
num_units=hidden_dim,
step_hidden=hidden2,
step_cell=cell2,
w_x_init=0.1,
w_h_init=0.1,
b_init=0.,
nonlinearity=None,
gate_nonlinearity=None)
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 0.1)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(mean1, output2)
expected_log_std = np.full((self.batch_size, output_dim),
np.log(init_std))
assert np.allclose(log_std1, expected_log_std)
assert np.allclose(hidden1, hidden2)
assert np.allclose(cell1, cell2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
def test_without_std_share_network_shapes(self, output_dim, hidden_dim):
model = GaussianLSTMModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_cell',
dtype=tf.float32)
model.build(self.input_var, self.step_input_var, step_hidden_var,
step_cell_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/kernel' in var.name:
std_share_output_weights = var
if 'output_layer/bias' in var.name:
std_share_output_bias = var
if 'log_std_param/parameter' in var.name:
log_std_param = var
assert std_share_output_weights.shape[1] == output_dim
assert std_share_output_bias.shape == output_dim
assert log_std_param.shape == output_dim
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_std_share_network_is_pickleable(self, mock_normal, output_dim,
hidden_dim):
mock_normal.return_value = 0.5
model = GaussianLSTMModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_cell',
dtype=tf.float32)
(dist, step_mean_var, step_log_std_var, step_hidden, step_cell, _,
_) = model.build(self.input_var, self.step_input_var, step_hidden_var,
step_cell_var).outputs
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/bias' in var.name:
var.load(tf.ones_like(var).eval())
hidden = np.zeros((self.batch_size, hidden_dim))
cell = np.zeros((self.batch_size, hidden_dim))
outputs1 = self.sess.run([dist.loc, dist.scale.diag],
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run(
[step_mean_var, step_log_std_var, step_hidden, step_cell],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden,
step_cell_var: cell
})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.feature_shape),
name='step_input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='initial_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='initial_cell',
dtype=tf.float32)
(dist2, step_mean_var2, step_log_std_var2, step_hidden2,
step_cell2, _, _) = model_pickled.build(input_var, step_input_var,
step_hidden_var,
step_cell_var).outputs
outputs2 = sess.run([dist2.loc, dist2.scale.diag],
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[step_mean_var2, step_log_std_var2, step_hidden2, step_cell2],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden,
step_cell_var: cell
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_dim', [
(1, 1),
(2, 2),
(3, 3)
])
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_is_pickleable(self, mock_normal,
output_dim, hidden_dim):
mock_normal.return_value = 0.5
model = GaussianLSTMModel(output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_cell',
dtype=tf.float32)
(dist, step_mean_var, step_log_std_var, step_hidden, step_cell, _,
_) = model.build(self.input_var, self.step_input_var, step_hidden_var,
step_cell_var).outputs
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.compat.v1.variable_scope.
# A workaround is to access in tf.compat.v1.global_variables()
for var in tf.compat.v1.global_variables():
if 'output_layer/bias' in var.name:
var.load(tf.ones_like(var).eval())
hidden = np.zeros((self.batch_size, hidden_dim))
cell = np.zeros((self.batch_size, hidden_dim))
outputs1 = self.sess.run([dist.loc, dist.scale.diag],
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run(
[step_mean_var, step_log_std_var, step_hidden, step_cell],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden,
step_cell_var: cell
})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32,
shape=(None, self.feature_shape),
name='step_input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='initial_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='initial_cell',
dtype=tf.float32)
(dist2, step_mean_var2, step_log_std_var2, step_hidden2,
step_cell2, _, _) = model_pickled.build(input_var, step_input_var,
step_hidden_var,
step_cell_var).outputs
outputs2 = sess.run([dist2.loc, dist2.scale.diag],
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[step_mean_var2, step_log_std_var2, step_hidden2, step_cell2],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden,
step_cell_var: cell
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
| 22,399 | 50.494253 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_gaussian_mlp_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.models import GaussianMLPModel
from tests.fixtures import TfGraphTestCase
class TestGaussianMLPModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, 5))
self.obs = np.ones((1, 1, 5))
def test_dist(self):
model = GaussianMLPModel(output_dim=1)
dist = model.build(self.input_var).dist
assert isinstance(dist, tfp.distributions.MultivariateNormalDiag)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_std_share_network_output_values(self, output_dim, hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=True,
hidden_nonlinearity=None,
std_parameterization='exp',
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
dist = model.build(self.input_var).dist
mean, log_std = self.sess.run(
[dist.loc, tf.math.log(dist.stddev())],
feed_dict={self.input_var: self.obs})
expected_mean = np.full([1, 1, output_dim], 5 * np.prod(hidden_sizes))
expected_log_std = np.full([1, 1, output_dim],
5 * np.prod(hidden_sizes))
assert np.array_equal(mean, expected_mean)
assert np.array_equal(log_std, expected_log_std)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_std_share_network_shapes(self, output_dim, hidden_sizes):
# should be 2 * output_dim
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=True)
model.build(self.input_var)
with tf.compat.v1.variable_scope(model.name, reuse=True):
std_share_output_weights = tf.compat.v1.get_variable(
'dist_params/mean_std_network/output/kernel')
std_share_output_bias = tf.compat.v1.get_variable(
'dist_params/mean_std_network/output/bias')
assert std_share_output_weights.shape[1] == output_dim * 2
assert std_share_output_bias.shape == output_dim * 2
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_without_std_share_network_output_values(self, output_dim,
hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
init_std=2,
std_share_network=False,
adaptive_std=False,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
dist = model.build(self.input_var).dist
mean, log_std = self.sess.run(
[dist.loc, tf.math.log(dist.stddev())],
feed_dict={self.input_var: self.obs})
expected_mean = np.full([1, 1, output_dim], 5 * np.prod(hidden_sizes))
expected_log_std = np.full([1, 1, output_dim], np.log(2.))
assert np.array_equal(mean, expected_mean)
assert np.allclose(log_std, expected_log_std)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_without_std_share_network_shapes(self, output_dim, hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=False)
model.build(self.input_var)
with tf.compat.v1.variable_scope(model.name, reuse=True):
mean_output_weights = tf.compat.v1.get_variable(
'dist_params/mean_network/output/kernel')
mean_output_bias = tf.compat.v1.get_variable(
'dist_params/mean_network/output/bias')
log_std_output_weights = tf.compat.v1.get_variable(
'dist_params/log_std_network/parameter')
assert mean_output_weights.shape[1] == output_dim
assert mean_output_bias.shape == output_dim
assert log_std_output_weights.shape == output_dim
@pytest.mark.parametrize('output_dim, hidden_sizes, std_hidden_sizes',
[(1, (0, ), (0, )), (1, (1, ), (1, )),
(1, (2, ), (2, )), (2, (3, ), (3, )),
(2, (1, 1), (1, 1)), (3, (2, 2), (2, 2))])
def test_adaptive_std_network_output_values(self, output_dim, hidden_sizes,
std_hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
std_share_network=False,
hidden_sizes=hidden_sizes,
std_hidden_sizes=std_hidden_sizes,
adaptive_std=True,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer(),
std_hidden_nonlinearity=None,
std_hidden_w_init=tf.ones_initializer(),
std_output_w_init=tf.ones_initializer())
dist = model.build(self.input_var).dist
mean, log_std = self.sess.run(
[dist.loc, tf.math.log(dist.stddev())],
feed_dict={self.input_var: self.obs})
expected_mean = np.full([1, 1, output_dim], 5 * np.prod(hidden_sizes))
expected_log_std = np.full([1, 1, output_dim],
5 * np.prod(std_hidden_sizes))
assert np.array_equal(mean, expected_mean)
assert np.array_equal(log_std, expected_log_std)
@pytest.mark.parametrize('output_dim, hidden_sizes, std_hidden_sizes',
[(1, (0, ), (0, )), (1, (1, ), (1, )),
(1, (2, ), (2, )), (2, (3, ), (3, )),
(2, (1, 1), (1, 1)), (3, (2, 2), (2, 2))])
def test_adaptive_std_output_shape(self, output_dim, hidden_sizes,
std_hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_hidden_sizes=std_hidden_sizes,
std_share_network=False,
adaptive_std=True)
model.build(self.input_var)
with tf.compat.v1.variable_scope(model.name, reuse=True):
mean_output_weights = tf.compat.v1.get_variable(
'dist_params/mean_network/output/kernel')
mean_output_bias = tf.compat.v1.get_variable(
'dist_params/mean_network/output/bias')
log_std_output_weights = tf.compat.v1.get_variable(
'dist_params/log_std_network/output/kernel')
log_std_output_bias = tf.compat.v1.get_variable(
'dist_params/log_std_network/output/bias')
assert mean_output_weights.shape[1] == output_dim
assert mean_output_bias.shape == output_dim
assert log_std_output_weights.shape[1] == output_dim
assert log_std_output_bias.shape == output_dim
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_std_share_network_is_pickleable(self, output_dim, hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=True,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
dist = model.build(self.input_var).dist
# get output bias
with tf.compat.v1.variable_scope('GaussianMLPModel', reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_std_network/output/bias')
# assign it to all ones
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(
[dist.loc, tf.math.log(dist.stddev())],
feed_dict={self.input_var: self.obs})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, 5))
model_pickled = pickle.loads(h)
dist2 = model_pickled.build(input_var).dist
output2 = sess.run(
[dist2.loc, tf.math.log(dist2.stddev())],
feed_dict={input_var: self.obs})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_without_std_share_network_is_pickleable(self, output_dim,
hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
adaptive_std=False,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
dist = model.build(self.input_var).dist
# get output bias
with tf.compat.v1.variable_scope('GaussianMLPModel', reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_network/output/bias')
# assign it to all ones
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(
[dist.loc, tf.math.log(dist.stddev())],
feed_dict={self.input_var: self.obs})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, 5))
model_pickled = pickle.loads(h)
dist2 = model_pickled.build(input_var).dist
output2 = sess.run(
[dist2.loc, tf.math.log(dist2.stddev())],
feed_dict={input_var: self.obs})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('output_dim, hidden_sizes, std_hidden_sizes',
[(1, (0, ), (0, )), (1, (1, ), (1, )),
(1, (2, ), (2, )), (2, (3, ), (3, )),
(2, (1, 1), (1, 1)), (3, (2, 2), (2, 2))])
def test_adaptive_std_is_pickleable(self, output_dim, hidden_sizes,
std_hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_hidden_sizes=std_hidden_sizes,
std_share_network=False,
adaptive_std=True,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer(),
std_hidden_nonlinearity=None,
std_hidden_w_init=tf.ones_initializer(),
std_output_w_init=tf.ones_initializer())
dist = model.build(self.input_var).dist
# get output bias
with tf.compat.v1.variable_scope('GaussianMLPModel', reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_network/output/bias')
# assign it to all ones
bias.load(tf.ones_like(bias).eval())
h = pickle.dumps(model)
output1 = self.sess.run(
[dist.loc, tf.math.log(dist.stddev())],
feed_dict={self.input_var: self.obs})
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, 5))
model_pickled = pickle.loads(h)
dist2 = model_pickled.build(input_var).dist
output2 = sess.run(
[dist2.loc, tf.math.log(dist2.stddev())],
feed_dict={input_var: self.obs})
assert np.array_equal(output1, output2)
# pylint: disable=assignment-from-no-return
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_softplus_output_values(self, output_dim, hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=None,
std_share_network=False,
adaptive_std=False,
init_std=2,
std_parameterization='softplus',
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
dist = model.build(self.input_var).dist
mean, log_std = self.sess.run(
[dist.loc, tf.math.log(dist.stddev())],
feed_dict={self.input_var: self.obs})
expected_mean = np.full([1, 1, output_dim], 5 * np.prod(hidden_sizes))
expected_std_param = np.full([1, 1, output_dim], np.log(np.exp(2) - 1))
expected_log_std = np.log(np.log(1. + np.exp(expected_std_param)))
assert np.array_equal(mean, expected_mean)
assert np.allclose(log_std, expected_log_std)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_exp_min_std(self, output_dim, hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
init_std=1,
min_std=10,
std_parameterization='exp')
dist = model.build(self.input_var).dist
log_std = self.sess.run(tf.math.log(dist.stddev()),
feed_dict={self.input_var: self.obs})
expected_log_std = np.full([1, 1, output_dim], np.log(10))
assert np.allclose(log_std, expected_log_std)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_exp_max_std(self, output_dim, hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
init_std=10,
max_std=1,
std_parameterization='exp')
dist = model.build(self.input_var).dist
log_std = self.sess.run(tf.math.log(dist.stddev()),
feed_dict={self.input_var: self.obs})
expected_log_std = np.full([1, 1, output_dim], np.log(1))
assert np.allclose(log_std, expected_log_std)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_softplus_min_std(self, output_dim, hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
init_std=1,
min_std=10,
std_parameterization='softplus')
dist = model.build(self.input_var).dist
log_std = self.sess.run(tf.math.log(dist.stddev()),
feed_dict={self.input_var: self.obs})
expected_log_std = np.full([1, 1, output_dim], np.log(10))
assert np.allclose(log_std, expected_log_std)
@pytest.mark.parametrize('output_dim, hidden_sizes',
[(1, (0, )), (1, (1, )), (1, (2, )), (2, (3, )),
(2, (1, 1)), (3, (2, 2))])
def test_softplus_max_std(self, output_dim, hidden_sizes):
model = GaussianMLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
std_share_network=False,
init_std=10,
max_std=1,
std_parameterization='softplus')
dist = model.build(self.input_var).dist
log_std = self.sess.run(tf.math.log(dist.stddev()),
feed_dict={self.input_var: self.obs})
expected_log_std = np.full([1, 1, output_dim], np.log(1))
# This test fails just outside of the default absolute tolerance.
assert np.allclose(log_std, expected_log_std, atol=1e-7)
def test_unknown_std_parameterization(self):
with pytest.raises(ValueError):
GaussianMLPModel(output_dim=1, std_parameterization='unknown')
| 18,970 | 48.923684 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_gru.py | import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.gru import gru
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_gru
class TestGRU(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 2
self.hidden_dim = 2
self.step_hidden_var = tf.compat.v1.placeholder(
shape=(self.batch_size, self.hidden_dim),
name='initial_hidden',
dtype=tf.float32)
self.gru_cell = tf.keras.layers.GRUCell(
units=self.hidden_dim,
activation=tf.nn.tanh,
kernel_initializer=tf.constant_initializer(1),
recurrent_activation=tf.nn.sigmoid,
recurrent_initializer=tf.constant_initializer(1),
name='lstm_layer')
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_shapes(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for _ in range(time_step):
output, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
assert output.shape == (self.batch_size, output_dim)
assert hidden.shape == (self.batch_size, self.hidden_dim)
full_output = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
assert full_output.shape == (self.batch_size, time_step, output_dim)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_value(self, time_step, input_dim, output_dim, hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden1 = hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for i in range(time_step):
output1, hidden1 = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden1
}) # noqa: E126
hidden2 = recurrent_step_gru(input_val=obs_input,
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 1.)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(output1, output2)
assert np.allclose(hidden1, hidden2)
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim', [
(1, 1, 1),
(1, 1, 3),
(1, 3, 1),
(3, 1, 1),
(3, 3, 1),
(3, 3, 3),
])
# yapf: enable
def test_output_value_trainable_hidden_and_cell(self, time_step, input_dim,
output_dim):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init_trainable=True,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
_, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
with tf.compat.v1.variable_scope('GRU/gru', reuse=True):
hidden_init_var = tf.compat.v1.get_variable(name='initial_hidden')
assert hidden_init_var in tf.compat.v1.trainable_variables()
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
def test_gradient_paths(self):
time_step = 3
input_dim = 2
output_dim = 4
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
grads_step_o_i = tf.gradients(output_t, step_input_var)
grads_step_o_h = tf.gradients(output_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, step_input_var)
self.sess.run([grads_step_o_i, grads_step_o_h, grads_step_h],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
grads_step_o_i = tf.gradients(outputs_t, step_input_var)
grads_step_o_h = tf.gradients(outputs_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, input_var)
# No gradient flow
with pytest.raises(TypeError):
self.sess.run(grads_step_o_i,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_h,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_h, feed_dict={input_var: obs_inputs})
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_same_as_rnn(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Create a RNN and compute the entire outputs
rnn_layer = tf.keras.layers.RNN(cell=self.gru_cell,
return_sequences=True,
return_state=True)
# Set initial state to all 0s
hidden_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(hidden_init),
trainable=False,
dtype=tf.float32)
outputs, hiddens = rnn_layer(input_var, initial_state=[hidden_var])
outputs = output_nonlinearity(outputs)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs, hiddens = self.sess.run([outputs, hiddens],
feed_dict={input_var: obs_inputs})
# Compute output by doing t step() on the gru cell
hidden = np.full((self.batch_size, self.hidden_dim), hidden_init)
_, output_t, hidden_t, _ = self.gru
for i in range(time_step):
output, hidden = self.sess.run([output_t, hidden_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
# The output from i-th timestep
assert np.array_equal(output, outputs[:, i, :])
assert np.array_equal(hidden, hiddens)
# Also the full output from lstm
full_outputs = self.sess.run(self.gru[0],
feed_dict={input_var: obs_inputs})
assert np.array_equal(outputs, full_outputs)
| 17,963 | 42.814634 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_gru_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models import GRUModel
from tests.fixtures import TfGraphTestCase
class TestGRUModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 1
self.time_step = 1
self.feature_shape = 2
self.output_dim = 1
self.obs_inputs = np.full(
(self.batch_size, self.time_step, self.feature_shape), 1.)
self.obs_input = np.full((self.batch_size, self.feature_shape), 1.)
self.input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
self.step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='input')
@pytest.mark.parametrize('output_dim, hidden_dim', [(1, 1), (1, 2),
(3, 3)])
def test_output_values(self, output_dim, hidden_dim):
model = GRUModel(output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=tf.constant_initializer(1),
recurrent_w_init=tf.constant_initializer(1),
output_w_init=tf.constant_initializer(1))
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
outputs = model.build(self.input_var, self.step_input_var,
step_hidden_var).outputs
output = self.sess.run(outputs[0],
feed_dict={self.input_var: self.obs_inputs})
expected_output = np.full(
[self.batch_size, self.time_step, output_dim], hidden_dim * -2)
assert np.array_equal(output, expected_output)
def test_is_pickleable(self):
model = GRUModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
network = model.build(self.input_var, self.step_input_var,
step_hidden_var)
# assign bias to all one
with tf.compat.v1.variable_scope('GRUModel/gru', reuse=True):
init_hidden = tf.compat.v1.get_variable('initial_hidden')
init_hidden.load(tf.ones_like(init_hidden).eval())
hidden = np.zeros((self.batch_size, 1))
outputs1 = self.sess.run(network.all_output,
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run(
[network.step_output, network.step_hidden],
# yapf: disable
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden
})
# yapf: enable
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
1),
name='initial_hidden',
dtype=tf.float32)
network2 = model_pickled.build(input_var, step_input_var,
step_hidden_var)
outputs2 = sess.run(network2.all_output,
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[network2.step_output, network2.step_hidden],
# yapf: disable
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden
})
# yapf: enable
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
| 4,926 | 43.387387 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_lstm.py | import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.lstm import lstm
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_lstm
class TestLSTM(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 2
self.hidden_dim = 2
self._step_hidden_var = tf.compat.v1.placeholder(
shape=(self.batch_size, self.hidden_dim),
name='initial_hidden',
dtype=tf.float32)
self._step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
self.hidden_dim),
name='initial_cell',
dtype=tf.float32)
self.lstm_cell = tf.keras.layers.LSTMCell(
units=self.hidden_dim,
activation=tf.nn.tanh,
kernel_initializer=tf.constant_initializer(1),
recurrent_activation=tf.nn.sigmoid,
recurrent_initializer=tf.constant_initializer(1),
name='lstm_layer')
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init, cell_init', [
(1, 1, 1, 0, 0), # noqa: E122
(1, 1, 3, 0, 0),
(1, 3, 1, 0, 0),
(3, 1, 1, 0, 0),
(3, 3, 1, 0, 0),
(3, 3, 3, 0, 0),
(1, 1, 1, 0.5, 0.5),
(1, 1, 3, 0.5, 0.5),
(1, 3, 1, 0.5, 0.5),
(3, 1, 1, 0.5, 0.5),
(3, 3, 1, 0.5, 0.5),
(3, 3, 3, 0.5, 0.5),
])
# yapf: enable
def test_output_shapes(self, time_step, input_dim, output_dim, hidden_init,
cell_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(
all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
hidden_state_init=tf.constant_initializer(hidden_init),
cell_state_init=tf.constant_initializer(cell_init),
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the lstm cell
outputs_t, output_t, h_t, c_t, hidden_init, cell_init = self.lstm
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
for _ in range(time_step):
output, hidden, cell = self.sess.run(
[output_t, h_t, c_t],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
assert output.shape == (self.batch_size, output_dim)
assert hidden.shape == (self.batch_size, self.hidden_dim)
assert cell.shape == (self.batch_size, self.hidden_dim)
full_output = self.sess.run(outputs_t,
feed_dict={_input_var: obs_inputs})
assert full_output.shape == (self.batch_size, time_step, output_dim)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init, cell_init', [
(1, 1, 1, 0, 0), # noqa: E122
(1, 1, 3, 0, 0),
(1, 3, 1, 0, 0),
(3, 1, 1, 0, 0),
(3, 3, 1, 0, 0),
(3, 3, 3, 0, 0),
(1, 1, 1, 0.5, 0.5),
(1, 1, 3, 0.5, 0.5),
(1, 3, 1, 0.5, 0.5),
(3, 1, 1, 0.5, 0.5),
(3, 3, 1, 0.5, 0.5),
(3, 3, 3, 0.5, 0.5),
])
# yapf: enable
def test_output_value(self, time_step, input_dim, output_dim, hidden_init,
cell_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(
all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
hidden_state_init=tf.constant_initializer(hidden_init),
cell_state_init=tf.constant_initializer(cell_init),
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the lstm cell
outputs_t, output_t, h_t, c_t, hidden_init, cell_init = self.lstm
hidden1 = hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell1 = cell2 = np.full((self.batch_size, self.hidden_dim),
cell_init.eval())
for i in range(time_step):
output1, hidden1, cell1 = self.sess.run(
[output_t, h_t, c_t],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden1,
self._step_cell_var: cell1
})
hidden2, cell2 = recurrent_step_lstm(
input_val=obs_input,
num_units=self.hidden_dim,
step_hidden=hidden2,
step_cell=cell2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. / (1. + np.exp(-x)))
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 1.)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(output1, output2)
assert np.allclose(hidden1, hidden2)
assert np.allclose(cell1, cell2)
full_output1 = self.sess.run(outputs_t,
feed_dict={_input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell2 = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2, cell2 = recurrent_step_lstm(
input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
step_cell=cell2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. / (1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim', [
(1, 1, 1),
(1, 1, 3),
(1, 3, 1),
(3, 1, 1),
(3, 3, 1),
(3, 3, 3),
])
# yapf: enable
def test_output_value_trainable_hidden_and_cell(self, time_step, input_dim,
output_dim):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
hidden_state_init_trainable=True,
cell_state_init_trainable=True,
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the lstm cell
outputs_t, _, h_t, c_t, hidden_init, cell_init = self.lstm
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
hidden, cell = self.sess.run(
[h_t, c_t],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
with tf.compat.v1.variable_scope('LSTM/lstm', reuse=True):
hidden_init_var = tf.compat.v1.get_variable(name='initial_hidden')
cell_init_var = tf.compat.v1.get_variable(name='initial_cell')
assert hidden_init_var in tf.compat.v1.trainable_variables()
assert cell_init_var in tf.compat.v1.trainable_variables()
full_output1 = self.sess.run(outputs_t,
feed_dict={_input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell2 = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2, cell2 = recurrent_step_lstm(
input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
step_cell=cell2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. / (1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
def test_gradient_paths(self):
time_step = 3
input_dim = 2
output_dim = 4
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the lstm cell
outputs_t, output_t, h_t, c_t, hidden_init, cell_init = self.lstm
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
cell = np.full((self.batch_size, self.hidden_dim), cell_init.eval())
grads_step_o_i = tf.gradients(output_t, _step_input_var)
grads_step_o_h = tf.gradients(output_t, self._step_hidden_var)
grads_step_o_c = tf.gradients(output_t, self._step_cell_var)
grads_step_h = tf.gradients(h_t, _step_input_var)
grads_step_c = tf.gradients(c_t, _step_input_var)
self.sess.run(
[
grads_step_o_i, grads_step_o_h, grads_step_o_c, grads_step_h,
grads_step_c
],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
grads_step_o_i = tf.gradients(outputs_t, _step_input_var)
grads_step_o_h = tf.gradients(outputs_t, self._step_hidden_var)
grads_step_o_c = tf.gradients(outputs_t, self._step_cell_var)
grads_step_h = tf.gradients(h_t, _input_var)
grads_step_c = tf.gradients(c_t, _input_var)
# No gradient flow
with pytest.raises(TypeError):
self.sess.run(grads_step_o_i,
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_h,
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_c,
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
with pytest.raises(TypeError):
self.sess.run(grads_step_h, feed_dict={_input_var: obs_inputs})
with pytest.raises(TypeError):
self.sess.run(grads_step_c, feed_dict={_input_var: obs_inputs})
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init, cell_init', [
(1, 1, 1, 0, 0), # noqa: E122
(1, 1, 3, 0, 0),
(1, 3, 1, 0, 0),
(3, 1, 1, 0, 0),
(3, 3, 1, 0, 0),
(3, 3, 3, 0, 0),
(1, 1, 1, 0.5, 0.5),
(1, 1, 3, 0.5, 0.5),
(1, 3, 1, 0.5, 0.5),
(3, 1, 1, 0.5, 0.5),
(3, 3, 1, 0.5, 0.5),
(3, 3, 3, 0.5, 0.5),
])
# yapf: enable
def test_output_same_as_rnn(self, time_step, input_dim, output_dim,
hidden_init, cell_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
_step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='input')
_output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('LSTM'):
self.lstm = lstm(
all_input_var=_input_var,
name='lstm',
lstm_cell=self.lstm_cell,
step_input_var=_step_input_var,
step_hidden_var=self._step_hidden_var,
step_cell_var=self._step_cell_var,
hidden_state_init=tf.constant_initializer(hidden_init),
cell_state_init=tf.constant_initializer(cell_init),
output_nonlinearity_layer=_output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Create a RNN and compute the entire outputs
rnn_layer = tf.keras.layers.RNN(cell=self.lstm_cell,
return_sequences=True,
return_state=True)
# Set initial state to all 0s
hidden_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(hidden_init),
trainable=False,
dtype=tf.float32)
cell_var = tf.compat.v1.get_variable(
name='initial_cell',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(cell_init),
trainable=False,
dtype=tf.float32)
outputs, hiddens, cells = rnn_layer(
_input_var, initial_state=[hidden_var, cell_var])
outputs = _output_nonlinearity(outputs)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs, hiddens, cells = self.sess.run(
[outputs, hiddens, cells], feed_dict={_input_var: obs_inputs})
# Compute output by doing t step() on the lstm cell
hidden = np.full((self.batch_size, self.hidden_dim), hidden_init)
cell = np.full((self.batch_size, self.hidden_dim), cell_init)
_, output_t, hidden_t, cell_t, _, _ = self.lstm
for i in range(time_step):
output, hidden, cell = self.sess.run(
[output_t, hidden_t, cell_t],
feed_dict={
_step_input_var: obs_input,
self._step_hidden_var: hidden,
self._step_cell_var: cell
})
# The output from i-th timestep
assert np.array_equal(output, outputs[:, i, :])
assert np.array_equal(hidden, hiddens)
assert np.array_equal(cell, cells)
# Also the full output from lstm
full_outputs = self.sess.run(self.lstm[0],
feed_dict={_input_var: obs_inputs})
assert np.array_equal(outputs, full_outputs)
| 20,605 | 42.381053 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_lstm_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models import LSTMModel
from tests.fixtures import TfGraphTestCase
class TestLSTMModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 1
self.time_step = 1
self.feature_shape = 2
self.output_dim = 1
self.obs_inputs = np.full(
(self.batch_size, self.time_step, self.feature_shape), 1.)
self.obs_input = np.full((self.batch_size, self.feature_shape), 1.)
self._input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
self._step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='input')
@pytest.mark.parametrize('output_dim, hidden_dim', [(1, 1), (1, 2),
(3, 3)])
def test_output_values(self, output_dim, hidden_dim):
model = LSTMModel(output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=tf.constant_initializer(1),
recurrent_w_init=tf.constant_initializer(1),
output_w_init=tf.constant_initializer(1))
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
hidden_dim),
name='step_cell',
dtype=tf.float32)
outputs = model.build(self._input_var, self._step_input_var,
step_hidden_var, step_cell_var).outputs
output = self.sess.run(outputs[0],
feed_dict={self._input_var: self.obs_inputs})
expected_output = np.full(
[self.batch_size, self.time_step, output_dim], hidden_dim * 8)
assert np.array_equal(output, expected_output)
def test_is_pickleable(self):
model = LSTMModel(output_dim=1, hidden_dim=1)
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size, 1),
name='step_cell',
dtype=tf.float32)
network = model.build(self._input_var, self._step_input_var,
step_hidden_var, step_cell_var)
# assign bias to all one
with tf.compat.v1.variable_scope('LSTMModel/lstm', reuse=True):
init_hidden = tf.compat.v1.get_variable('initial_hidden')
init_hidden.load(tf.ones_like(init_hidden).eval())
hidden = np.zeros((self.batch_size, 1))
cell = np.zeros((self.batch_size, 1))
outputs1 = self.sess.run(network.all_output,
feed_dict={self._input_var: self.obs_inputs})
output1 = self.sess.run(
[network.step_output, network.step_hidden, network.step_cell],
feed_dict={
self._step_input_var: self.obs_input,
step_hidden_var: hidden,
step_cell_var: cell
})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
self.feature_shape),
name='input')
step_input_var = tf.compat.v1.placeholder(
tf.float32, shape=(None, self.feature_shape), name='input')
step_hidden_var = tf.compat.v1.placeholder(shape=(self.batch_size,
1),
name='initial_hidden',
dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(self.batch_size,
1),
name='initial_cell',
dtype=tf.float32)
network2 = model_pickled.build(input_var, step_input_var,
step_hidden_var, step_cell_var)
outputs2 = sess.run(network2.all_output,
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[
network2.step_output, network2.step_hidden,
network2.step_cell
],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden,
step_cell_var: cell
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
| 5,801 | 45.790323 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_mlp.py | import numpy as np
import tensorflow as tf
from garage.tf.models.mlp import mlp
from tests.fixtures import TfGraphTestCase
class TestMLP(TfGraphTestCase):
# pylint: disable=unsubscriptable-object
def setup_method(self):
super(TestMLP, self).setup_method()
self.obs_input = np.array([[1, 2, 3, 4]])
input_shape = self.obs_input.shape[1:] # 4
self.hidden_nonlinearity = tf.nn.relu
self._input = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape,
name='input')
self._output_shape = 2
# We build a default mlp
with tf.compat.v1.variable_scope('MLP'):
self.mlp_f = mlp(input_var=self._input,
output_dim=self._output_shape,
hidden_sizes=(32, 32),
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp1')
self.sess.run(tf.compat.v1.global_variables_initializer())
def test_multiple_same_mlp(self):
# We create another mlp with the same name, trying to reuse it
with tf.compat.v1.variable_scope('MLP', reuse=True):
self.mlp_same_copy = mlp(
input_var=self._input,
output_dim=self._output_shape,
hidden_sizes=(32, 32),
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp1')
# We modify the weight of the default mlp and feed
# The another mlp created should output the same result
with tf.compat.v1.variable_scope('MLP', reuse=True):
w = tf.compat.v1.get_variable('mlp1/hidden_0/kernel')
self.sess.run(w.assign(w + 1))
mlp_output = self.sess.run(self.mlp_f,
feed_dict={self._input: self.obs_input})
mlp_output2 = self.sess.run(
self.mlp_same_copy, feed_dict={self._input: self.obs_input})
np.testing.assert_array_almost_equal(mlp_output, mlp_output2)
def test_different_mlp(self):
# We create another mlp with different name
with tf.compat.v1.variable_scope('MLP'):
self.mlp_different_copy = mlp(
input_var=self._input,
output_dim=self._output_shape,
hidden_sizes=(32, 32),
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp2')
# Initialize the new mlp variables
self.sess.run(tf.compat.v1.global_variables_initializer())
# We modify the weight of the default mlp and feed
# The another mlp created should output different result
with tf.compat.v1.variable_scope('MLP', reuse=True):
w = tf.compat.v1.get_variable('mlp1/hidden_0/kernel')
self.sess.run(w.assign(w + 1))
mlp_output = self.sess.run(self.mlp_f,
feed_dict={self._input: self.obs_input})
mlp_output2 = self.sess.run(
self.mlp_different_copy,
feed_dict={self._input: self.obs_input})
np.not_equal(mlp_output, mlp_output2)
def test_output_shape(self):
mlp_output = self.sess.run(self.mlp_f,
feed_dict={self._input: self.obs_input})
assert mlp_output.shape[1] == self._output_shape
def test_output_value(self):
with tf.compat.v1.variable_scope('MLP', reuse=True):
h1_w = tf.compat.v1.get_variable('mlp1/hidden_0/kernel')
h1_b = tf.compat.v1.get_variable('mlp1/hidden_0/bias')
h2_w = tf.compat.v1.get_variable('mlp1/hidden_1/kernel')
h2_b = tf.compat.v1.get_variable('mlp1/hidden_1/bias')
out_w = tf.compat.v1.get_variable('mlp1/output/kernel')
out_b = tf.compat.v1.get_variable('mlp1/output/bias')
mlp_output = self.sess.run(self.mlp_f,
feed_dict={self._input: self.obs_input})
# First layer
h2_in = tf.matmul(self._input, h1_w) + h1_b
h2_in = self.hidden_nonlinearity(h2_in)
# Second layer
h3_in = tf.matmul(h2_in, h2_w) + h2_b
h3_in = self.hidden_nonlinearity(h3_in)
# Output layer
h3_out = tf.matmul(h3_in, out_w) + out_b
out = self.sess.run(h3_out, feed_dict={self._input: self.obs_input})
np.testing.assert_array_equal(out, mlp_output)
def test_layer_normalization(self):
# Create a mlp with layer normalization
with tf.compat.v1.variable_scope('MLP'):
self.mlp_f_w_n = mlp(input_var=self._input,
output_dim=self._output_shape,
hidden_sizes=(32, 32),
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp2',
layer_normalization=True)
# Initialize the new mlp variables
self.sess.run(tf.compat.v1.global_variables_initializer())
with tf.compat.v1.variable_scope('MLP', reuse=True):
h1_w = tf.compat.v1.get_variable('mlp2/hidden_0/kernel')
h1_b = tf.compat.v1.get_variable('mlp2/hidden_0/bias')
h2_w = tf.compat.v1.get_variable('mlp2/hidden_1/kernel')
h2_b = tf.compat.v1.get_variable('mlp2/hidden_1/bias')
out_w = tf.compat.v1.get_variable('mlp2/output/kernel')
out_b = tf.compat.v1.get_variable('mlp2/output/bias')
with tf.compat.v1.variable_scope('MLP_1', reuse=True) as vs:
gamma_1, beta_1, gamma_2, beta_2 = vs.global_variables()
# First layer
y = tf.matmul(self._input, h1_w) + h1_b
y = self.hidden_nonlinearity(y)
mean, variance = tf.nn.moments(y, [1], keepdims=True)
normalized_y = (y - mean) / tf.sqrt(variance + 1e-12)
y_out = normalized_y * gamma_1 + beta_1
# Second layer
y = tf.matmul(y_out, h2_w) + h2_b
y = self.hidden_nonlinearity(y)
mean, variance = tf.nn.moments(y, [1], keepdims=True)
normalized_y = (y - mean) / tf.sqrt(variance + 1e-12)
y_out = normalized_y * gamma_2 + beta_2
# Output layer
y = tf.matmul(y_out, out_w) + out_b
out = self.sess.run(y, feed_dict={self._input: self.obs_input})
mlp_output = self.sess.run(self.mlp_f_w_n,
feed_dict={self._input: self.obs_input})
np.testing.assert_array_almost_equal(out, mlp_output, decimal=2)
| 6,649 | 40.823899 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_mlp_concat.py | import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.mlp import mlp
from tests.fixtures import TfGraphTestCase
class TestMLPConcat(TfGraphTestCase):
# pylint: disable=unsubscriptable-object
def setup_method(self):
super(TestMLPConcat, self).setup_method()
self.obs_input = np.array([[1, 2, 3, 4]])
self.act_input = np.array([[1, 2, 3, 4]])
input_shape_1 = self.obs_input.shape[1:] # 4
input_shape_2 = self.act_input.shape[1:] # 4
self.hidden_nonlinearity = tf.nn.relu
self._obs_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
input_shape_1,
name='input')
self._act_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
input_shape_2,
name='input')
self._output_shape = 2
# We build a default mlp
with tf.compat.v1.variable_scope('MLP_Concat'):
self.mlp_f = mlp(input_var=self._obs_input,
output_dim=self._output_shape,
hidden_sizes=(32, 32),
input_var2=self._act_input,
concat_layer=0,
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp1')
self.sess.run(tf.compat.v1.global_variables_initializer())
def test_multiple_same_mlp(self):
# We create another mlp with the same name, trying to reuse it
with tf.compat.v1.variable_scope('MLP_Concat', reuse=True):
self.mlp_same_copy = mlp(
input_var=self._obs_input,
output_dim=self._output_shape,
hidden_sizes=(32, 32),
input_var2=self._act_input,
concat_layer=0,
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp1')
# We modify the weight of the default mlp and feed
# The another mlp created should output the same result
with tf.compat.v1.variable_scope('MLP_Concat', reuse=True):
w = tf.compat.v1.get_variable('mlp1/hidden_0/kernel')
self.sess.run(w.assign(w + 1))
mlp_output = self.sess.run(self.mlp_f,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
mlp_output2 = self.sess.run(self.mlp_same_copy,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
np.testing.assert_array_almost_equal(mlp_output, mlp_output2)
def test_different_mlp(self):
# We create another mlp with different name
with tf.compat.v1.variable_scope('MLP_Concat'):
self.mlp_different_copy = mlp(
input_var=self._obs_input,
output_dim=self._output_shape,
hidden_sizes=(32, 32),
input_var2=self._act_input,
concat_layer=0,
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp2')
# Initialize the new mlp variables
self.sess.run(tf.compat.v1.global_variables_initializer())
# We modify the weight of the default mlp and feed
# The another mlp created should output different result
with tf.compat.v1.variable_scope('MLP_Concat', reuse=True):
w = tf.compat.v1.get_variable('mlp1/hidden_0/kernel')
self.sess.run(w.assign(w + 1))
mlp_output = self.sess.run(self.mlp_f,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
mlp_output2 = self.sess.run(self.mlp_different_copy,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
assert not np.array_equal(mlp_output, mlp_output2)
def test_output_shape(self):
mlp_output = self.sess.run(self.mlp_f,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
assert mlp_output.shape[1] == self._output_shape
def test_output_value(self):
with tf.compat.v1.variable_scope('MLP_Concat', reuse=True):
h1_w = tf.compat.v1.get_variable('mlp1/hidden_0/kernel')
h1_b = tf.compat.v1.get_variable('mlp1/hidden_0/bias')
h2_w = tf.compat.v1.get_variable('mlp1/hidden_1/kernel')
h2_b = tf.compat.v1.get_variable('mlp1/hidden_1/bias')
out_w = tf.compat.v1.get_variable('mlp1/output/kernel')
out_b = tf.compat.v1.get_variable('mlp1/output/bias')
mlp_output = self.sess.run(self.mlp_f,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
# First layer
h2_in = tf.matmul(tf.concat([self._obs_input, self._act_input], 1),
h1_w) + h1_b
h2_in = self.hidden_nonlinearity(h2_in)
# Second layer
h3_in = tf.matmul(h2_in, h2_w) + h2_b
h3_in = self.hidden_nonlinearity(h3_in)
# Output layer
h3_out = tf.matmul(h3_in, out_w) + out_b
out = self.sess.run(h3_out,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
np.testing.assert_array_equal(out, mlp_output)
def test_layer_normalization(self):
# Create a mlp with layer normalization
with tf.compat.v1.variable_scope('MLP_Concat'):
self.mlp_f_w_n = mlp(input_var=self._obs_input,
output_dim=self._output_shape,
hidden_sizes=(32, 32),
input_var2=self._act_input,
concat_layer=0,
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp2',
layer_normalization=True)
# Initialize the new mlp variables
self.sess.run(tf.compat.v1.global_variables_initializer())
with tf.compat.v1.variable_scope('MLP_Concat', reuse=True):
h1_w = tf.compat.v1.get_variable('mlp2/hidden_0/kernel')
h1_b = tf.compat.v1.get_variable('mlp2/hidden_0/bias')
h2_w = tf.compat.v1.get_variable('mlp2/hidden_1/kernel')
h2_b = tf.compat.v1.get_variable('mlp2/hidden_1/bias')
out_w = tf.compat.v1.get_variable('mlp2/output/kernel')
out_b = tf.compat.v1.get_variable('mlp2/output/bias')
with tf.compat.v1.variable_scope('MLP_Concat_1', reuse=True) as vs:
gamma_1, beta_1, gamma_2, beta_2 = vs.global_variables()
# First layer
y = tf.matmul(tf.concat([self._obs_input, self._act_input], 1),
h1_w) + h1_b
y = self.hidden_nonlinearity(y)
mean, variance = tf.nn.moments(y, [1], keepdims=True)
normalized_y = (y - mean) / tf.sqrt(variance + 1e-12)
y_out = normalized_y * gamma_1 + beta_1
# Second layer
y = tf.matmul(y_out, h2_w) + h2_b
y = self.hidden_nonlinearity(y)
mean, variance = tf.nn.moments(y, [1], keepdims=True)
normalized_y = (y - mean) / tf.sqrt(variance + 1e-12)
y_out = normalized_y * gamma_2 + beta_2
# Output layer
y = tf.matmul(y_out, out_w) + out_b
out = self.sess.run(y,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
mlp_output = self.sess.run(self.mlp_f_w_n,
feed_dict={
self._obs_input: self.obs_input,
self._act_input: self.act_input
})
np.testing.assert_array_almost_equal(out, mlp_output, decimal=2)
@pytest.mark.parametrize('concat_idx', [2, 1, 0, -1, -2])
def test_concat_layer(self, concat_idx):
with tf.compat.v1.variable_scope('mlp_concat_test'):
_ = mlp(input_var=self._obs_input,
output_dim=self._output_shape,
hidden_sizes=(64, 32),
input_var2=self._act_input,
concat_layer=concat_idx,
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp2')
obs_input_size = self._obs_input.shape[1]
act_input_size = self._act_input.shape[1]
expected_units = [obs_input_size, 64, 32]
expected_units[concat_idx] += act_input_size
actual_units = []
with tf.compat.v1.variable_scope('mlp_concat_test', reuse=True):
h1_w = tf.compat.v1.get_variable('mlp2/hidden_0/kernel')
h2_w = tf.compat.v1.get_variable('mlp2/hidden_1/kernel')
out_w = tf.compat.v1.get_variable('mlp2/output/kernel')
actual_units.append(h1_w.shape[0])
actual_units.append(h2_w.shape[0])
actual_units.append(out_w.shape[0])
assert np.array_equal(expected_units, actual_units)
@pytest.mark.parametrize('concat_idx', [2, 1, 0, -1, -2])
def test_invalid_concat_args(self, concat_idx):
with tf.compat.v1.variable_scope('mlp_concat_test'):
_ = mlp(input_var=self._obs_input,
output_dim=self._output_shape,
hidden_sizes=(64, 32),
concat_layer=concat_idx,
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp_no_input2')
obs_input_size = self._obs_input.shape[1]
# concat_layer argument should be silently ignored.
expected_units = [obs_input_size, 64, 32]
actual_units = []
with tf.compat.v1.variable_scope('mlp_concat_test', reuse=True):
h1_w = tf.compat.v1.get_variable('mlp_no_input2/hidden_0/kernel')
h2_w = tf.compat.v1.get_variable('mlp_no_input2/hidden_1/kernel')
out_w = tf.compat.v1.get_variable('mlp_no_input2/output/kernel')
actual_units.append(h1_w.shape[0])
actual_units.append(h2_w.shape[0])
actual_units.append(out_w.shape[0])
assert np.array_equal(expected_units, actual_units)
@pytest.mark.parametrize('concat_idx', [2, 1, 0, -1, -2])
def test_no_hidden(self, concat_idx):
with tf.compat.v1.variable_scope('mlp_concat_test'):
_ = mlp(input_var=self._obs_input,
output_dim=self._output_shape,
hidden_sizes=(),
input_var2=self._act_input,
concat_layer=concat_idx,
hidden_nonlinearity=self.hidden_nonlinearity,
name='mlp2')
obs_input_size = self._obs_input.shape[1]
act_input_size = self._act_input.shape[1]
# concat_layer argument should be reset to point to input_var.
expected_units = [obs_input_size]
expected_units[0] += act_input_size
actual_units = []
with tf.compat.v1.variable_scope('mlp_concat_test', reuse=True):
out_w = tf.compat.v1.get_variable('mlp2/output/kernel')
actual_units.append(out_w.shape[0])
assert np.array_equal(expected_units, actual_units)
| 12,567 | 42.944056 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_mlp_model.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models import MLPDuelingModel
from garage.tf.models import MLPMergeModel
from garage.tf.models import MLPModel
from tests.fixtures import TfGraphTestCase
class TestMLPModel(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
self.obs = np.ones((1, 5))
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_sizes', [
(1, (0, )),
(1, (1, )),
(1, (2, )),
(2, (3, )),
(2, (1, 1)),
(3, (2, 2)),
])
# yapf: enable
def test_output_values(self, output_dim, hidden_sizes):
model = MLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
outputs = model.build(self.input_var).outputs
output = self.sess.run(outputs, feed_dict={self.input_var: self.obs})
expected_output = np.full([1, output_dim], 5 * np.prod(hidden_sizes))
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_sizes', [
(1, (0, )),
(1, (1, )),
(1, (2, )),
(2, (3, )),
(2, (1, 1)),
(3, (2, 2)),
])
# yapf: enable
def test_output_values_dueling(self, output_dim, hidden_sizes):
model = MLPDuelingModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
outputs = model.build(self.input_var).outputs
output = self.sess.run(outputs, feed_dict={self.input_var: self.obs})
expected_output = np.full([1, output_dim], 5 * np.prod(hidden_sizes))
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_sizes', [
(1, (0, )),
(1, (1, )),
(1, (2, )),
(2, (3, )),
(2, (1, 1)),
(3, (2, 2)),
])
# yapf: enable
def test_output_values_merging(self, output_dim, hidden_sizes):
model = MLPMergeModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
concat_layer=0,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
input_var2 = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
obs2 = np.ones((1, 5))
outputs = model.build(self.input_var, input_var2).outputs
output = self.sess.run(outputs,
feed_dict={
self.input_var: self.obs,
input_var2: obs2
})
expected_output = np.full([1, output_dim], 10 * np.prod(hidden_sizes))
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('output_dim, hidden_sizes', [
(1, (0, )),
(1, (1, )),
(1, (2, )),
(2, (3, )),
(2, (1, 1)),
(3, (2, 2)),
])
# yapf: enable
def test_is_pickleable(self, output_dim, hidden_sizes):
model = MLPModel(output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=None,
hidden_w_init=tf.ones_initializer(),
output_w_init=tf.ones_initializer())
outputs = model.build(self.input_var).outputs
# assign bias to all one
with tf.compat.v1.variable_scope('MLPModel/mlp', reuse=True):
bias = tf.compat.v1.get_variable('hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(outputs, feed_dict={self.input_var: self.obs})
h = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model_pickled = pickle.loads(h)
outputs = model_pickled.build(input_var).outputs
output2 = sess.run(outputs, feed_dict={input_var: self.obs})
assert np.array_equal(output1, output2)
| 4,651 | 34.51145 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_model.py | import collections
import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
from tests.fixtures import TfGraphTestCase
class SimpleModel(Model):
def __init__(self, output_dim=2, hidden_sizes=(4, 4), name=None):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
def network_output_spec(self):
return ['state', 'action']
# pylint: disable=arguments-differ
def _build(self, obs_input, name=None):
del name
state = mlp(obs_input, self._output_dim, self._hidden_sizes, 'state')
action = mlp(obs_input, self._output_dim, self._hidden_sizes, 'action')
return state, action
# This model doesn't implement network_output_spec
class SimpleModel2(Model):
def __init__(self, output_dim=2, hidden_sizes=(4, 4), name=None):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
# pylint: disable=arguments-differ
def _build(self, obs_input, name=None):
del name
action = mlp(obs_input, self._output_dim, self._hidden_sizes, 'state')
return action
class ComplicatedModel(Model):
def __init__(self, output_dim=2, name=None):
super().__init__(name)
self._output_dim = output_dim
self._simple_model_1 = SimpleModel(output_dim=4)
self._simple_model_2 = SimpleModel2(output_dim=output_dim,
name='simple_model_2')
def network_output_spec(self):
return ['action']
# pylint: disable=arguments-differ
def _build(self, obs_input, name=None):
del name
h1, _ = self._simple_model_1.build(obs_input).outputs
return self._simple_model_2.build(h1).outputs
# This model takes another model as constructor argument
class ComplicatedModel2(Model):
def __init__(self, parent_model, output_dim=2, name=None):
super().__init__(name)
self._output_dim = output_dim
self._parent_model = parent_model
self._output_model = SimpleModel2(output_dim=output_dim)
def network_output_spec(self):
return ['action']
# pylint: disable=arguments-differ
def _build(self, obs_input, name=None):
del name
h1, _ = self._parent_model.build(obs_input).outputs
return self._output_model.build(h1).outputs
class TestModel(TfGraphTestCase):
def test_model_creation(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model = SimpleModel(output_dim=2)
outputs = model.build(input_var).outputs
data = np.ones((3, 5))
out, model_out = self.sess.run(
[outputs, model._networks['default'].outputs],
feed_dict={model._networks['default'].input: data})
assert np.array_equal(out, model_out)
assert model.name == type(model).__name__
def test_model_creation_with_custom_name(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model = SimpleModel(output_dim=2, name='MySimpleModel')
outputs = model.build(input_var, name='network_2').outputs
data = np.ones((3, 5))
result, result2 = self.sess.run(
[outputs, model._networks['network_2'].outputs],
feed_dict={model._networks['network_2'].input: data})
assert np.array_equal(result, result2)
assert model.name == 'MySimpleModel'
def test_same_model_with_no_name(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
another_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 5))
model = SimpleModel(output_dim=2)
model.build(input_var)
with pytest.raises(ValueError):
model.build(another_input_var)
model2 = SimpleModel(output_dim=2)
with pytest.raises(ValueError):
model2.build(another_input_var)
def test_model_with_different_name(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
another_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 5))
model = SimpleModel(output_dim=2)
outputs_1 = model.build(input_var).outputs
outputs_2 = model.build(another_input_var, name='network_2').outputs
data = np.ones((3, 5))
results_1, results_2 = self.sess.run([outputs_1, outputs_2],
feed_dict={
input_var: data,
another_input_var: data
}) # noqa: E126
assert np.array_equal(results_1, results_2)
def test_model_with_different_name_in_different_order(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
another_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, 5))
model = SimpleModel(output_dim=2)
outputs_1 = model.build(input_var, name='network_1').outputs
outputs_2 = model.build(another_input_var).outputs
data = np.ones((3, 5))
results_1, results_2 = self.sess.run([outputs_1, outputs_2],
feed_dict={
input_var: data,
another_input_var: data
}) # noqa: E126
assert np.array_equal(results_1, results_2)
def test_model_in_model(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model = ComplicatedModel(output_dim=2)
outputs = model.build(input_var).outputs
data = np.ones((3, 5))
out, model_out = self.sess.run(
[outputs, model._networks['default'].outputs],
feed_dict={model._networks['default'].input: data})
assert np.array_equal(out, model_out)
def test_model_as_constructor_argument(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
parent_model = SimpleModel(output_dim=4)
model = ComplicatedModel2(parent_model=parent_model, output_dim=2)
outputs = model.build(input_var).outputs
data = np.ones((3, 5))
out, model_out = self.sess.run(
[outputs, model._networks['default'].outputs],
feed_dict={model._networks['default'].input: data})
assert np.array_equal(out, model_out)
def test_model_is_pickleable(self):
data = np.ones((3, 5))
model = SimpleModel(output_dim=2)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model.build(input_var)
# assign bias to all one
with tf.compat.v1.variable_scope('SimpleModel/state', reuse=True):
bias = tf.compat.v1.get_variable('hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
results = sess.run(
model._networks['default'].outputs,
feed_dict={model._networks['default'].input: data})
model_data = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model_pickled = pickle.loads(model_data)
outputs = model_pickled.build(input_var).outputs
results2 = sess.run(outputs, feed_dict={input_var: data})
assert np.array_equal(results, results2)
def test_model_pickle_without_building(self):
model = SimpleModel(output_dim=2)
model_data = pickle.dumps(model)
model_pickled = pickle.loads(model_data)
assert np.array_equal(model.name, model_pickled.name)
def test_complicated_model_is_pickleable(self):
data = np.ones((3, 5))
model = ComplicatedModel(output_dim=2)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
outputs = model.build(input_var).outputs
# assign bias to all one
with tf.compat.v1.variable_scope(
'ComplicatedModel/SimpleModel/state', reuse=True):
bias = tf.compat.v1.get_variable('hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
results = sess.run(
outputs, feed_dict={model._networks['default'].input: data})
model_data = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model_pickled = pickle.loads(model_data)
model_pickled.build(input_var)
results2 = sess.run(model_pickled._networks['default'].outputs,
feed_dict={input_var: data})
assert np.array_equal(results, results2)
def test_complicated_model2_is_pickleable(self):
data = np.ones((3, 5))
parent_model = SimpleModel(output_dim=4)
model = ComplicatedModel2(parent_model=parent_model, output_dim=2)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
outputs = model.build(input_var).outputs
# assign bias to all one
with tf.compat.v1.variable_scope(
'ComplicatedModel2/SimpleModel/state', reuse=True):
bias = tf.compat.v1.get_variable('hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
results = sess.run(
outputs, feed_dict={model._networks['default'].input: data})
model_data = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model_pickled = pickle.loads(model_data)
model_pickled.build(input_var)
results2 = sess.run(model_pickled._networks['default'].outputs,
feed_dict={input_var: data})
assert np.array_equal(results, results2)
def test_simple_model_is_pickleable_with_same_parameters(self):
model = SimpleModel(output_dim=2)
with tf.compat.v1.Session(graph=tf.Graph()):
state = tf.compat.v1.placeholder(shape=[None, 10, 5],
dtype=tf.float32)
model.build(state)
model.parameters = {
k: np.zeros_like(v)
for k, v in model.parameters.items()
}
all_one = {k: np.ones_like(v) for k, v in model.parameters.items()}
model.parameters = all_one
h_data = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()):
model_pickled = pickle.loads(h_data)
state = tf.compat.v1.placeholder(shape=[None, 10, 5],
dtype=tf.float32)
model_pickled.build(state)
np.testing.assert_equal(all_one, model_pickled.parameters)
def test_simple_model_is_pickleable_with_missing_parameters(self):
model = SimpleModel(output_dim=2)
with tf.compat.v1.Session(graph=tf.Graph()):
state = tf.compat.v1.placeholder(shape=[None, 10, 5],
dtype=tf.float32)
model.build(state)
model.parameters = {
k: np.zeros_like(v)
for k, v in model.parameters.items()
}
all_one = {k: np.ones_like(v) for k, v in model.parameters.items()}
model.parameters = all_one
h_data = pickle.dumps(model)
with tf.compat.v1.Session(graph=tf.Graph()):
model_pickled = pickle.loads(h_data)
state = tf.compat.v1.placeholder(shape=[None, 10, 5],
dtype=tf.float32)
# remove one of the parameters
del model_pickled._default_parameters[
'SimpleModel/state/hidden_0/kernel:0']
with pytest.warns(UserWarning):
model_pickled.build(state)
def test_model_set_parameters(self):
model1 = SimpleModel(output_dim=2, name='model1')
model2 = SimpleModel(output_dim=2, name='model2')
input_var = tf.compat.v1.placeholder(tf.float32, shape=(None, 5))
model1.build(input_var)
model2.build(input_var)
model1.parameters = model2.parameters
for m1, m2 in zip(
collections.OrderedDict(sorted(
model1.parameters.items())).values(),
collections.OrderedDict(sorted(
model2.parameters.items())).values()):
assert np.array_equal(m1, m2)
| 13,214 | 39.045455 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/models/test_parameter.py | import numpy as np
import tensorflow as tf
from garage.tf.models.parameter import parameter
from garage.tf.models.parameter import recurrent_parameter
from tests.fixtures import TfGraphTestCase
class TestParameter(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.input_vars = tf.compat.v1.placeholder(shape=[None, 2, 5],
dtype=tf.float32)
self.step_input_vars = tf.compat.v1.placeholder(shape=[None, 5],
dtype=tf.float32)
self.initial_params = np.array([48, 21, 33])
self.data = np.zeros(shape=[5, 2, 5])
self.step_data = np.zeros(shape=[5, 5])
self.feed_dict = {
self.input_vars: self.data,
self.step_input_vars: self.step_data
}
def test_param(self):
param = parameter(input_var=self.input_vars,
length=3,
initializer=tf.constant_initializer(
self.initial_params))
self.sess.run(tf.compat.v1.global_variables_initializer())
p = self.sess.run(param, feed_dict=self.feed_dict)
assert p.shape == (5, 3)
assert np.all(p == self.initial_params)
def test_recurrent_param(self):
param, _ = recurrent_parameter(input_var=self.input_vars,
step_input_var=self.step_input_vars,
length=3,
initializer=tf.constant_initializer(
self.initial_params))
self.sess.run(tf.compat.v1.global_variables_initializer())
p = self.sess.run(param, feed_dict=self.feed_dict)
assert p.shape == (5, 2, 3)
assert np.array_equal(p, np.full([5, 2, 3], self.initial_params))
| 1,901 | 38.625 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/optimizers/test_conjugate_gradient_optimizer.py | """Tests for garage.tf.optimizers.conjugateGradientOptimizer"""
import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.optimizers.conjugate_gradient_optimizer import (
cg, ConjugateGradientOptimizer, FiniteDifferenceHvp, PearlmutterHvp)
from garage.tf.policies import Policy
from tests.fixtures import TfGraphTestCase
class HelperPolicy(Policy):
"""Helper policy class for testing hvp classes"""
def __init__(self, n_vars, name='OneParamPolicy'):
super().__init__(name, None)
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
_ = [tf.Variable([0.]) for _ in range(n_vars)]
def get_action(self, observation):
pass
def get_actions(self, observations):
pass
class TestConjugateGradientOptimizer(TfGraphTestCase):
"""Test class for ConjugateGradientOptimizer and HVP classes"""
def test_cg(self):
"""Solve Ax = b using Conjugate gradient method."""
a = np.linspace(-np.pi, np.pi, 25).reshape((5, 5))
a = a.T.dot(a) # make sure a is positive semi-definite
b = np.linspace(-np.pi, np.pi, 5)
x = cg(a.dot, b, cg_iters=5)
assert np.allclose(a.dot(x), b)
def test_pickleable(self):
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0], dtype=np.float32)
a = tf.constant(a_val)
loss = a * (x**2)
constraint = (loss, 0.0)
self.sess.run(tf.compat.v1.global_variables_initializer())
opt = ConjugateGradientOptimizer()
opt.update_opt(loss, policy, constraint, [a])
opt.optimize([a_val])
loss_before = opt.loss([a_val])
opt = pickle.loads(pickle.dumps(opt))
opt.update_opt(loss, policy, constraint, [a])
loss_after = opt.loss([a_val])
assert np.equal(loss_before, loss_after)
class TestPearlmutterHvp(TfGraphTestCase):
"""Test class for PearlmutterHvp"""
def test_pearl_mutter_hvp_1x1(self):
"""Test Hessian-vector product for a function with one variable."""
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = a * (x**2)
expected_hessian = 2 * a_val
vector = np.array([10.0])
expected_hvp = expected_hessian * vector
reg_coeff = 1e-5
hvp = PearlmutterHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
hvp.update_hvp(f, policy, (a, ), reg_coeff)
hx = hvp.build_eval(np.array([a_val]))
computed_hvp = hx(vector)
assert np.allclose(computed_hvp, expected_hvp)
@pytest.mark.parametrize('a_val, b_val, x_val, y_val, vector', [
(1.0, 1.0, 1.0, 1.0, [10.0, 20.0]),
(5.0, 10.0, -2.0, 5.0, [0.0, -1.0]),
(0.0, 0.0, 1.1, 0.02, [0.0, 0.0]),
(-2.2, -1.5, -12.3, 34.8, [2.2, 5.3]),
(-1.5, 0.0, -0.002, 4.93, [0.1, -0.02]),
])
def test_pearl_mutter_hvp_2x2(self, a_val, b_val, x_val, y_val, vector):
"""Test Hessian-vector product for a function with two variables."""
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
x, y = params[0], params[1]
a = tf.constant(a_val)
b = tf.constant(b_val)
f = a * (x**2) + b * (y**2)
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-5
hvp = PearlmutterHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp, atol=1e-6)
@pytest.mark.parametrize('a_val, b_val, x_val, y_val, vector', [
(1.0, 1.0, 1.0, 1.0, [10.0, 20.0]),
(5.0, 10.0, -2.0, 5.0, [0.0, -1.0]),
(0.0, 0.0, 1.1, 0.02, [0.0, 0.0]),
(-2.2, -1.5, -12.3, 34.8, [2.2, 5.3]),
(-1.5, 0.0, -0.002, 4.93, [0.1, -0.02]),
])
def test_pearl_mutter_hvp_2x2_non_diagonal(self, a_val, b_val, x_val,
y_val, vector):
"""Test Hessian-vector product for a function with two variables whose Hessian
is non-diagonal.
"""
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
x, y = params[0], params[1]
a = tf.constant(a_val)
b = tf.constant(b_val)
f = a * (x**3) + b * (y**3) + (x**2) * y + (y**2) * x
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-5
hvp = PearlmutterHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp)
def test_pickleable(self):
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = a * (x**2)
vector = np.array([10.0])
reg_coeff = 1e-5
hvp = PearlmutterHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
hvp.update_hvp(f, policy, (a, ), reg_coeff)
hx = hvp.build_eval(np.array([a_val]))
before_pickle = hx(vector)
hvp = pickle.loads(pickle.dumps(hvp))
hvp.update_hvp(f, policy, (a, ), reg_coeff)
after_pickle = hx(vector)
assert np.equal(before_pickle, after_pickle)
class TestFiniteDifferenceHvp(TfGraphTestCase):
"""Test class for FiniteDifferenceHvp"""
def test_finite_difference_hvp(self):
"""Test Hessian-vector product for a function with one variable."""
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = a * (x**2)
expected_hessian = 2 * a_val
vector = np.array([10.0])
expected_hvp = expected_hessian * vector
reg_coeff = 1e-5
hvp = FiniteDifferenceHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
hvp.update_hvp(f, policy, (a, ), reg_coeff)
hx = hvp.build_eval(np.array([a_val]))
computed_hvp = hx(vector)
assert np.allclose(computed_hvp, expected_hvp)
@pytest.mark.parametrize('a_val, b_val, x_val, y_val, vector', [
(1.0, 1.0, 1.0, 1.0, [10.0, 20.0]),
(5.0, 10.0, -2.0, 5.0, [0.0, -1.0]),
(0.0, 0.0, 1.1, 0.02, [0.0, 0.0]),
(-2.2, -1.5, -12.3, 34.8, [2.2, 5.3]),
(-1.5, 0.0, -0.002, 4.93, [0.1, -0.02]),
])
def test_finite_difference_hvp_2x2(self, a_val, b_val, x_val, y_val,
vector):
"""Test Hessian-vector product for a function with two variables."""
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
x, y = params[0], params[1]
a = tf.constant(a_val)
b = tf.constant(b_val)
f = a * (x**2) + b * (y**2)
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-8
hvp = FiniteDifferenceHvp(base_eps=1.0)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp)
@pytest.mark.parametrize('a_val, b_val, x_val, y_val, vector', [
(1.0, 1.0, 1.0, 1.0, [10.0, 20.0]),
(5.0, 10.0, -2.0, 5.0, [0.0, -1.0]),
(0.0, 0.0, 1.1, 0.02, [0.0, 0.0]),
(-2.2, -1.5, -12.3, 34.8, [2.2, 5.3]),
(-1.5, 0.0, -0.002, 4.93, [0.1, -0.02]),
])
def test_finite_difference_hvp_2x2_non_diagonal(self, a_val, b_val, x_val,
y_val, vector):
"""Test Hessian-vector product for a function with two variables whose Hessian
is non-diagonal.
"""
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
x, y = params[0], params[1]
a = tf.constant(a_val)
b = tf.constant(b_val)
f = a * (x**3) + b * (y**3) + (x**2) * y + (y**2) * x
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-5
hvp = FiniteDifferenceHvp(base_eps=1)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp)
def test_pickleable(self):
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = a * (x**2)
vector = np.array([10.0])
reg_coeff = 1e-5
hvp = FiniteDifferenceHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
hvp.update_hvp(f, policy, (a, ), reg_coeff)
hx = hvp.build_eval(np.array([a_val]))
before_pickle = hx(vector)
hvp = pickle.loads(pickle.dumps(hvp))
hvp.update_hvp(f, policy, (a, ), reg_coeff)
after_pickle = hx(vector)
assert np.equal(before_pickle, after_pickle)
def compute_hessian(f, params):
h = []
for i in params:
h_i = []
for j in params:
h_ij = tf.gradients(tf.gradients(f, j)[0], i)[0]
h_ij = [0.] if h_ij is None else h_ij
h_i.append(h_ij)
h_i = tf.convert_to_tensor(h_i)
h.append(h_i)
h = tf.convert_to_tensor(h)
h = tf.reshape(h, (len(params), len(params)))
return h
| 10,947 | 35.372093 | 86 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_categorical_cnn_policy.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import CategoricalCNNPolicy
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyDictEnv, DummyDiscretePixelEnv
class TestCategoricalCNNPolicyWithModel(TfGraphTestCase):
@pytest.mark.parametrize('filters, strides, padding, hidden_sizes', [
(((3, (3, 3)), ), (1, ), 'VALID', (4, )),
(((3, (3, 3)), (3, (3, 3))), (1, 1), 'VALID', (4, 4)),
(((3, (3, 3)), (3, (3, 3))), (2, 2), 'SAME', (4, 4)),
])
def test_get_action(self, filters, strides, padding, hidden_sizes):
env = GarageEnv(DummyDiscretePixelEnv())
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes)
env.reset()
obs, _, _, _ = env.step(1)
action, _ = policy.get_action(obs)
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs, obs, obs])
for action in actions:
assert env.action_space.contains(action)
@pytest.mark.parametrize('filters, strides, padding, hidden_sizes', [
(((3, (3, 3)), ), (1, ), 'VALID', (4, )),
(((3, (3, 3)), (3, (3, 3))), (1, 1), 'VALID', (4, 4)),
(((3, (3, 3)), (3, (3, 3))), (2, 2), 'SAME', (4, 4)),
])
def test_build(self, filters, strides, padding, hidden_sizes):
env = GarageEnv(DummyDiscretePixelEnv())
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None) +
policy.input_dim)
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run([policy.distribution.probs],
feed_dict={policy.model.input: [[obs]]})
output2 = self.sess.run([dist_sym.probs],
feed_dict={state_input: [[obs]]})
assert np.array_equal(output1, output2)
def test_is_pickleable(self):
env = GarageEnv(DummyDiscretePixelEnv())
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=((3, (32, 32)), ),
strides=(1, ),
padding='SAME',
hidden_sizes=(4, ))
env.reset()
obs, _, _, _ = env.step(1)
with tf.compat.v1.variable_scope(
'CategoricalCNNPolicy/CategoricalCNNModel', reuse=True):
cnn_bias = tf.compat.v1.get_variable('CNNModel/cnn/h0/bias')
bias = tf.compat.v1.get_variable('MLPModel/mlp/hidden_0/bias')
cnn_bias.load(tf.ones_like(cnn_bias).eval())
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(policy.distribution.probs,
feed_dict={policy.model.input: [[obs]]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run(policy_pickled.distribution.probs,
feed_dict={policy_pickled.model.input: [[obs]]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('filters, strides, padding, hidden_sizes', [
(((3, (32, 32)), ), (1, ), 'VALID', (4, )),
])
def test_does_not_support_dict_obs_space(self, filters, strides, padding,
hidden_sizes):
"""Test that policy raises error if passed a dict obs space."""
env = GarageEnv(DummyDictEnv(act_space_type='discrete'))
with pytest.raises(ValueError):
CategoricalCNNPolicy(env_spec=env.spec,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes)
class TestCategoricalCNNPolicyImageObs(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(DummyDiscretePixelEnv(), is_image=True)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.env.reset()
@pytest.mark.parametrize('filters, strides, padding, hidden_sizes', [
(((3, (32, 32)), ), (1, ), 'VALID', (4, )),
])
def test_obs_unflattened(self, filters, strides, padding, hidden_sizes):
self.policy = CategoricalCNNPolicy(env_spec=self.env.spec,
filters=filters,
strides=strides,
padding=padding,
hidden_sizes=hidden_sizes)
obs = self.env.observation_space.sample()
action, _ = self.policy.get_action(
self.env.observation_space.flatten(obs))
self.env.step(action)
| 5,520 | 41.79845 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_categorical_gru_policy.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import CategoricalGRUPolicy
# yapf: disable
from tests.fixtures import TfGraphTestCase # noqa: I202
from tests.fixtures.envs.dummy import (DummyBoxEnv,
DummyDictEnv,
DummyDiscreteEnv)
# yapf: enable
class TestCategoricalGRUPolicy(TfGraphTestCase):
def test_invalid_env(self):
env = GarageEnv(DummyBoxEnv())
with pytest.raises(ValueError):
CategoricalGRUPolicy(env_spec=env.spec)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), 1, 4),
((2, ), 2, 4),
((1, 1), 1, 4),
((2, 2), 2, 4),
])
def test_get_action_state_include_action(self, obs_dim, action_dim,
hidden_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalGRUPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=True)
policy.reset()
obs = env.reset()
action, _ = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), 1, 4),
((2, ), 2, 4),
((1, 1), 1, 4),
((2, 2), 2, 4),
])
def test_build_state_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalGRUPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=True)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
concat_obs = np.concatenate([obs.flatten(), np.zeros(action_dim)])
output1 = self.sess.run(
[policy.distribution.probs],
feed_dict={policy.model.input: [[concat_obs], [concat_obs]]})
output2 = self.sess.run(
[dist_sym.probs],
feed_dict={state_input: [[concat_obs], [concat_obs]]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), 1, 4),
((2, ), 2, 4),
((1, 1), 1, 4),
((2, 2), 2, 4),
])
def test_build_state_not_include_action(self, obs_dim, action_dim,
hidden_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalGRUPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run(
[policy.distribution.probs],
feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
output2 = self.sess.run(
[dist_sym.probs],
feed_dict={state_input: [[obs.flatten()], [obs.flatten()]]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim, obs_type', [
((1, ), 1, 4, 'discrete'),
((2, ), 2, 4, 'discrete'),
((1, 1), 1, 4, 'discrete'),
((2, 2), 2, 4, 'discrete'),
((1, ), 1, 4, 'dict'),
])
def test_get_action(self, obs_dim, action_dim, hidden_dim, obs_type):
assert obs_type in ['discrete', 'dict']
if obs_type == 'discrete':
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
else:
env = GarageEnv(
DummyDictEnv(obs_space_type='box', act_space_type='discrete'))
policy = CategoricalGRUPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
if obs_type == 'discrete':
obs = obs.flatten()
action, _ = policy.get_action(obs)
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs])
for action in actions:
assert env.action_space.contains(action)
def test_is_pickleable(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(1, ), action_dim=1))
policy = CategoricalGRUPolicy(env_spec=env.spec,
state_include_action=False)
obs = env.reset()
policy.model._gru_cell.weights[0].load(
tf.ones_like(policy.model._gru_cell.weights[0]).eval())
output1 = self.sess.run(
[policy.distribution.probs],
feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
# yapf: disable
output2 = sess.run(
[policy_pickled.distribution.probs],
feed_dict={
policy_pickled.model.input: [[obs.flatten()],
[obs.flatten()]]
})
# yapf: enable
assert np.array_equal(output1, output2)
def test_state_info_specs(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(10, ), action_dim=4))
policy = CategoricalGRUPolicy(env_spec=env.spec,
state_include_action=False)
assert policy.state_info_specs == []
def test_state_info_specs_with_state_include_action(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(10, ), action_dim=4))
policy = CategoricalGRUPolicy(env_spec=env.spec,
state_include_action=True)
assert policy.state_info_specs == [('prev_action', (4, ))]
| 6,816 | 37.954286 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_categorical_lstm_policy.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import CategoricalLSTMPolicy
# yapf: disable
from tests.fixtures import TfGraphTestCase # noqa: I202
from tests.fixtures.envs.dummy import (DummyBoxEnv,
DummyDictEnv,
DummyDiscreteEnv)
# yapf: enable
class TestCategoricalLSTMPolicy(TfGraphTestCase):
def test_invalid_env(self):
env = GarageEnv(DummyBoxEnv())
with pytest.raises(ValueError):
CategoricalLSTMPolicy(env_spec=env.spec)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim, obs_type', [
((1, ), 1, 4, 'discrete'),
((2, ), 2, 4, 'discrete'),
((1, 1), 1, 4, 'discrete'),
((2, 2), 2, 4, 'discrete'),
((1, ), 1, 4, 'dict'),
])
def test_get_action_state_include_action(self, obs_dim, action_dim,
hidden_dim, obs_type):
assert obs_type in ['discrete', 'dict']
if obs_type == 'discrete':
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
else:
env = GarageEnv(
DummyDictEnv(obs_space_type='box', act_space_type='discrete'))
policy = CategoricalLSTMPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=True)
policy.reset()
obs = env.reset()
if obs_type == 'discrete':
obs = obs.flatten()
action, _ = policy.get_action(obs)
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs])
for action in actions:
assert env.action_space.contains(action)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), 1, 4),
((2, ), 2, 4),
((1, 1), 1, 4),
((2, 2), 2, 4),
])
def test_get_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalLSTMPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=False)
policy.reset()
obs = env.reset()
action, _ = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), 1, 4),
((2, ), 2, 4),
((1, 1), 1, 4),
((2, 2), 2, 4),
])
def test_build_state_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalLSTMPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=True)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
concat_obs = np.concatenate([obs.flatten(), np.zeros(action_dim)])
output1 = self.sess.run(
[policy.distribution.probs],
feed_dict={policy.model.input: [[concat_obs], [concat_obs]]})
output2 = self.sess.run(
[dist_sym.probs],
feed_dict={state_input: [[concat_obs], [concat_obs]]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), 1, 4),
((2, ), 2, 4),
((1, 1), 1, 4),
((2, 2), 2, 4),
])
def test_build_state_not_include_action(self, obs_dim, action_dim,
hidden_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalLSTMPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run(
[policy.distribution.probs],
feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
output2 = self.sess.run(
[dist_sym.probs],
feed_dict={state_input: [[obs.flatten()], [obs.flatten()]]})
assert np.array_equal(output1, output2)
def test_is_pickleable(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(1, ), action_dim=1))
policy = CategoricalLSTMPolicy(env_spec=env.spec,
state_include_action=False)
policy.reset()
obs = env.reset()
policy.model._lstm_cell.weights[0].load(
tf.ones_like(policy.model._lstm_cell.weights[0]).eval())
output1 = self.sess.run(
[policy.distribution.probs],
feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run([policy_pickled.distribution.probs],
feed_dict={
policy_pickled.model.input:
[[obs.flatten()], [obs.flatten()]]
}) # noqa: E126
assert np.array_equal(output1, output2)
def test_state_info_specs(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(10, ), action_dim=4))
policy = CategoricalLSTMPolicy(env_spec=env.spec,
state_include_action=False)
assert policy.state_info_specs == []
def test_state_info_specs_with_state_include_action(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(10, ), action_dim=4))
policy = CategoricalLSTMPolicy(env_spec=env.spec,
state_include_action=True)
assert policy.state_info_specs == [('prev_action', (4, ))]
| 6,823 | 37.994286 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_categorical_mlp_policy.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import CategoricalMLPPolicy
# yapf: disable
from tests.fixtures import TfGraphTestCase # noqa:I202
from tests.fixtures.envs.dummy import (DummyBoxEnv,
DummyDictEnv,
DummyDiscreteEnv)
# yapf: enable
class TestCategoricalMLPPolicy(TfGraphTestCase):
def test_invalid_env(self):
env = GarageEnv(DummyBoxEnv())
with pytest.raises(ValueError):
CategoricalMLPPolicy(env_spec=env.spec)
@pytest.mark.parametrize('obs_dim, action_dim, obs_type', [
((1, ), 1, 'discrete'),
((2, ), 2, 'discrete'),
((1, 1), 1, 'discrete'),
((2, 2), 2, 'discrete'),
((1, ), 1, 'dict'),
])
def test_get_action(self, obs_dim, action_dim, obs_type):
assert obs_type in ['discrete', 'dict']
if obs_type == 'discrete':
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
else:
env = GarageEnv(
DummyDictEnv(obs_space_type='box', act_space_type='discrete'))
policy = CategoricalMLPPolicy(env_spec=env.spec)
obs = env.reset()
if obs_type == 'discrete':
obs = obs.flatten()
action, _ = policy.get_action(obs)
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs, obs, obs])
for action in actions:
assert env.action_space.contains(action)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_build(self, obs_dim, action_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalMLPPolicy(env_spec=env.spec)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run(
[policy.distribution.probs],
feed_dict={policy.model.input: [[obs.flatten()]]})
output2 = self.sess.run([dist_sym.probs],
feed_dict={state_input: [[obs.flatten()]]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_is_pickleable(self, obs_dim, action_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalMLPPolicy(env_spec=env.spec)
obs = env.reset()
with tf.compat.v1.variable_scope(
'CategoricalMLPPolicy/CategoricalMLPModel', reuse=True):
bias = tf.compat.v1.get_variable('mlp/hidden_0/bias')
# assign it to all one
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(
[policy.distribution.probs],
feed_dict={policy.model.input: [[obs.flatten()]]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run(
[policy_pickled.distribution.probs],
feed_dict={policy_pickled.model.input: [[obs.flatten()]]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_get_regularizable_vars(self, obs_dim, action_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalMLPPolicy(env_spec=env.spec)
reg_vars = policy.get_regularizable_vars()
assert len(reg_vars) == 2
for var in reg_vars:
assert ('bias' not in var.name) and ('output' not in var.name)
| 4,229 | 34.546218 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_categorical_policies.py | """
This script creates a unittest that tests Categorical policies in
garage.tf.policies.
"""
import gym
import pytest
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.optimizers import ConjugateGradientOptimizer
from garage.tf.optimizers import FiniteDifferenceHvp
from garage.tf.policies import CategoricalGRUPolicy
from garage.tf.policies import CategoricalLSTMPolicy
from garage.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
policies = [CategoricalGRUPolicy, CategoricalLSTMPolicy, CategoricalMLPPolicy]
class TestCategoricalPolicies(TfGraphTestCase):
@pytest.mark.parametrize('policy_cls', [*policies])
def test_categorical_policies(self, policy_cls):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('CartPole-v0')))
policy = policy_cls(name='policy', env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(
base_eps=1e-5)),
)
runner.setup(algo, env)
runner.train(n_epochs=1, batch_size=4000)
env.close()
| 1,653 | 32.755102 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_continuous_mlp_policy.py | """Tests for garage.tf.policies.ContinuousMLPPolicy"""
import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import ContinuousMLPPolicy
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv, DummyDictEnv
from tests.fixtures.models import SimpleMLPModel
class TestContinuousMLPPolicy(TfGraphTestCase):
"""Test class for ContinuousMLPPolicy"""
@pytest.mark.parametrize(
'obs_dim, action_dim, obs_type',
[
# ((1, ), (1, ), 'box'),
((1, ), (2, ), 'box'),
((2, ), (2, ), 'box'),
((1, 1), (1, 1), 'box'),
((1, 1), (2, 2), 'box'),
((2, 2), (2, 2), 'box'),
((1, ), (1, ), 'dict'),
])
def test_get_action(self, obs_dim, action_dim, obs_type):
"""Test get_action method"""
assert obs_type in ['box', 'dict']
if obs_type == 'box':
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim,
action_dim=action_dim))
else:
env = GarageEnv(
DummyDictEnv(obs_space_type='box', act_space_type='box'))
with mock.patch(('garage.tf.policies.'
'continuous_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = ContinuousMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
if obs_type == 'box':
obs = obs.flatten()
action, _ = policy.get_action(obs)
expected_action = np.full(action_dim, 0.5)
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
actions, _ = policy.get_actions([obs, obs, obs])
for action in actions:
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_get_action_sym(self, obs_dim, action_dim):
"""Test get_action_sym method"""
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'continuous_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = ContinuousMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs_dim))
action_sym = policy.get_action_sym(state_input, name='action_sym')
expected_action = np.full(action_dim, 0.5)
action = self.sess.run(action_sym,
feed_dict={state_input: [obs.flatten()]})
action = policy.action_space.unflatten(action)
assert np.array_equal(action, expected_action)
assert env.action_space.contains(action)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_is_pickleable(self, obs_dim, action_dim):
"""Test if ContinuousMLPPolicy is pickleable"""
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'continuous_mlp_policy.MLPModel'),
new=SimpleMLPModel):
policy = ContinuousMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
with tf.compat.v1.variable_scope('ContinuousMLPPolicy/MLPModel',
reuse=True):
return_var = tf.compat.v1.get_variable('return_var')
# assign it to all one
return_var.load(tf.ones_like(return_var).eval())
output1 = self.sess.run(
policy.model.outputs,
feed_dict={policy.model.input: [obs.flatten()]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run(
policy_pickled.model.outputs,
feed_dict={policy_pickled.model.input: [obs.flatten()]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
])
def test_get_regularizable_vars(self, obs_dim, action_dim):
"""Test get_regularizable_vars method"""
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = ContinuousMLPPolicy(env_spec=env.spec)
reg_vars = policy.get_regularizable_vars()
assert len(reg_vars) == 2
for var in reg_vars:
assert ('bias' not in var.name) and ('output' not in var.name)
| 5,163 | 35.366197 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_gaussian_gru_policy.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import GaussianGRUPolicy
# yapf: disable
from tests.fixtures import TfGraphTestCase # noqa: I202
from tests.fixtures.envs.dummy import (DummyBoxEnv,
DummyDictEnv,
DummyDiscreteEnv)
# yapf: enable
class TestGaussianGRUPolicy(TfGraphTestCase):
def test_invalid_env(self):
env = GarageEnv(DummyDiscreteEnv())
with pytest.raises(ValueError):
GaussianGRUPolicy(env_spec=env.spec)
# yapf: disable
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), (1, ), 4),
((2, ), (2, ), 4),
((1, 1), (1, ), 4),
((2, 2), (2, ), 4)
])
# yapf: enable
def test_get_action_state_include_action(self, obs_dim, action_dim,
hidden_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianGRUPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=True)
policy.reset()
obs = env.reset()
action, _ = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
policy.reset()
actions, _ = policy.get_actions([obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
# yapf: disable
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), (1, ), 4),
((2, ), (2, ), 4),
((1, 1), (1, ), 4),
((2, 2), (2, ), 4)
])
# yapf: enable
def test_get_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianGRUPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
action, _ = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
def test_get_action_dict_space(self):
env = GarageEnv(
DummyDictEnv(obs_space_type='box', act_space_type='box'))
policy = GaussianGRUPolicy(env_spec=env.spec,
hidden_dim=4,
state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
action, _ = policy.get_action(obs)
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs, obs])
for action in actions:
assert env.action_space.contains(action)
# yapf: disable
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), (1, ), 4),
((2, ), (2, ), 4),
((1, 1), (1, ), 4),
((2, 2), (2, ), 4)
])
# yapf: enable
def test_build_state_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianGRUPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=True)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
concat_obs = np.concatenate([obs.flatten(), np.zeros(action_dim)])
output1 = self.sess.run(
[policy.distribution.loc],
feed_dict={policy.model.input: [[concat_obs], [concat_obs]]})
output2 = self.sess.run(
[dist_sym.loc],
feed_dict={state_input: [[concat_obs], [concat_obs]]})
assert np.array_equal(output1, output2)
# yapf: disable
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), (1, ), 4),
((2, ), (2, ), 4),
((1, 1), (1, ), 4),
((2, 2), (2, ), 4)
])
# yapf: enable
def test_build_state_not_include_action(self, obs_dim, action_dim,
hidden_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianGRUPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run(
[policy.distribution.loc],
feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
output2 = self.sess.run(
[dist_sym.loc],
feed_dict={state_input: [[obs.flatten()], [obs.flatten()]]})
assert np.array_equal(output1, output2)
def test_is_pickleable(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(1, ), action_dim=(1, )))
policy = GaussianGRUPolicy(env_spec=env.spec,
state_include_action=False)
env.reset()
obs = env.reset()
with tf.compat.v1.variable_scope('GaussianGRUPolicy/GaussianGRUModel',
reuse=True):
param = tf.compat.v1.get_variable(
'dist_params/log_std_param/parameter')
# assign it to all one
param.load(tf.ones_like(param).eval())
output1 = self.sess.run(
[policy.distribution.loc,
policy.distribution.stddev()],
feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
# yapf: disable
output2 = sess.run(
[
policy_pickled.distribution.loc,
policy_pickled.distribution.stddev()
],
feed_dict={
policy_pickled.model.input: [[obs.flatten()],
[obs.flatten()]]
})
assert np.array_equal(output1, output2)
# yapf: enable
def test_state_info_specs(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(4, ), action_dim=(4, )))
policy = GaussianGRUPolicy(env_spec=env.spec,
state_include_action=False)
assert policy.state_info_specs == []
def test_state_info_specs_with_state_include_action(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(4, ), action_dim=(4, )))
policy = GaussianGRUPolicy(env_spec=env.spec,
state_include_action=True)
assert policy.state_info_specs == [('prev_action', (4, ))]
| 7,502 | 37.086294 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_gaussian_lstm_policy.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import GaussianLSTMPolicy
# yapf: disable
from tests.fixtures import TfGraphTestCase # noqa: I202
from tests.fixtures.envs.dummy import (DummyBoxEnv,
DummyDictEnv,
DummyDiscreteEnv)
# yapf: enable
class TestGaussianLSTMPolicy(TfGraphTestCase):
def test_invalid_env(self):
env = GarageEnv(DummyDiscreteEnv())
with pytest.raises(ValueError):
GaussianLSTMPolicy(env_spec=env.spec)
# yapf: disable
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), (1, ), 4),
((2, ), (2, ), 4),
((1, 1), (1, ), 4),
((2, 2), (2, ), 4)
])
# yapf: enable
def test_get_action_state_include_action(self, obs_dim, action_dim,
hidden_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianLSTMPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=True)
policy.reset()
obs = env.reset()
action, _ = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
policy.reset()
actions, _ = policy.get_actions([obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
def test_get_action_dict_space(self):
env = GarageEnv(
DummyDictEnv(obs_space_type='box', act_space_type='box'))
policy = GaussianLSTMPolicy(env_spec=env.spec,
hidden_dim=4,
state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
action, _ = policy.get_action(obs)
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs, obs])
for action in actions:
assert env.action_space.contains(action)
# yapf: disable
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), (1, ), 4),
((2, ), (2, ), 4),
((1, 1), (1, ), 4),
((2, 2), (2, ), 4)
])
# yapf: enable
def test_get_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianLSTMPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=False)
policy.reset()
obs = env.reset()
action, _ = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
# yapf: disable
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), (1, ), 4),
((2, ), (2, ), 4),
((1, 1), (1, ), 4),
((2, 2), (2, ), 4)
])
# yapf: enable
def test_build_state_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianLSTMPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=True)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
concat_obs = np.concatenate([obs.flatten(), np.zeros(action_dim)])
output1 = self.sess.run(
[policy.distribution.loc],
feed_dict={policy.model.input: [[concat_obs], [concat_obs]]})
output2 = self.sess.run(
[dist_sym.loc],
feed_dict={state_input: [[concat_obs], [concat_obs]]})
assert np.array_equal(output1, output2)
# yapf: disable
@pytest.mark.parametrize('obs_dim, action_dim, hidden_dim', [
((1, ), (1, ), 4),
((2, ), (2, ), 4),
((1, 1), (1, ), 4),
((2, 2), (2, ), 4)
])
# yapf: enable
def test_build_state_not_include_action(self, obs_dim, action_dim,
hidden_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianLSTMPolicy(env_spec=env.spec,
hidden_dim=hidden_dim,
state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run(
[policy.distribution.loc],
feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
output2 = self.sess.run(
[dist_sym.loc],
feed_dict={state_input: [[obs.flatten()], [obs.flatten()]]})
assert np.array_equal(output1, output2)
def test_is_pickleable(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(1, ), action_dim=(1, )))
policy = GaussianLSTMPolicy(env_spec=env.spec,
state_include_action=False)
env.reset()
obs = env.reset()
with tf.compat.v1.variable_scope(
'GaussianLSTMPolicy/GaussianLSTMModel', reuse=True):
param = tf.compat.v1.get_variable(
'dist_params/log_std_param/parameter')
# assign it to all one
param.load(tf.ones_like(param).eval())
output1 = self.sess.run(
[policy.distribution.loc,
policy.distribution.stddev()],
feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
p = pickle.dumps(policy)
# yapf: disable
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run(
[
policy_pickled.distribution.loc,
policy_pickled.distribution.stddev()
],
feed_dict={
policy_pickled.model.input: [[obs.flatten()],
[obs.flatten()]]
})
assert np.array_equal(output1, output2)
# yapf: enable
def test_state_info_specs(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(4, ), action_dim=(4, )))
policy = GaussianLSTMPolicy(env_spec=env.spec,
state_include_action=False)
assert policy.state_info_specs == []
def test_state_info_specs_with_state_include_action(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(4, ), action_dim=(4, )))
policy = GaussianLSTMPolicy(env_spec=env.spec,
state_include_action=True)
assert policy.state_info_specs == [('prev_action', (4, ))]
| 7,481 | 37.173469 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_gaussian_mlp_policy.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import GaussianMLPPolicy
# yapf: disable
from tests.fixtures import TfGraphTestCase # noqa: I202
from tests.fixtures.envs.dummy import (DummyBoxEnv,
DummyDictEnv,
DummyDiscreteEnv)
# yapf: enable
class TestGaussianMLPPolicy(TfGraphTestCase):
def test_invalid_env(self):
env = GarageEnv(DummyDiscreteEnv())
with pytest.raises(ValueError):
GaussianMLPPolicy(env_spec=env.spec)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_get_action(self, obs_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianMLPPolicy(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
action, _ = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
actions, _ = policy.get_actions(
[obs.flatten(), obs.flatten(),
obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
def test_get_action_dict_space(self):
env = GarageEnv(
DummyDictEnv(obs_space_type='box', act_space_type='box'))
policy = GaussianMLPPolicy(env_spec=env.spec)
obs = env.reset()
action, _ = policy.get_action(obs)
assert env.action_space.contains(action)
actions, _ = policy.get_actions([obs, obs])
for action in actions:
assert env.action_space.contains(action)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_build(self, obs_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianMLPPolicy(env_spec=env.spec)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None,
policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run(
[policy.distribution.loc],
feed_dict={policy.model.input: [[obs.flatten()]]})
output2 = self.sess.run([dist_sym.loc],
feed_dict={state_input: [[obs.flatten()]]})
assert np.array_equal(output1, output2)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
])
def test_is_pickleable(self, obs_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = GaussianMLPPolicy(env_spec=env.spec)
obs = env.reset()
with tf.compat.v1.variable_scope('GaussianMLPPolicy/GaussianMLPModel',
reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_network/hidden_0/bias')
# assign it to all one
bias.load(tf.ones_like(bias).eval())
output1 = self.sess.run(
[policy.distribution.loc,
policy.distribution.stddev()],
feed_dict={policy.model.input: [[obs.flatten()]]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run(
[
policy_pickled.distribution.loc,
policy_pickled.distribution.stddev()
],
feed_dict={policy_pickled.model.input: [[obs.flatten()]]})
assert np.array_equal(output2, output1)
| 4,173 | 33.495868 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_gaussian_mlp_task_embedding_policy.py | from itertools import chain
import pickle
from unittest import mock
# pylint: disable=wrong-import-order
import akro
import numpy as np
import pytest
import tensorflow as tf
from garage import InOutSpec
from garage.envs import GarageEnv
from garage.tf.embeddings import GaussianMLPEncoder
from garage.tf.policies import GaussianMLPTaskEmbeddingPolicy
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv, DummyDictEnv
from tests.fixtures.models import SimpleGaussianMLPModel
class TestGaussianMLPTaskEmbeddingPolicy(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim', [(2, ), (2, 2)])
@pytest.mark.parametrize('task_num', [1, 5])
@pytest.mark.parametrize('latent_dim', [1, 5])
@pytest.mark.parametrize('action_dim', [(2, ), (2, 2)])
def test_get_action(self, obs_dim, task_num, latent_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(
input_space=akro.Box(low=np.zeros(task_num),
high=np.ones(task_num)),
output_space=akro.Box(low=np.zeros(latent_dim),
high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec)
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec,
encoder=encoder)
env.reset()
obs, _, _, _ = env.step(1)
latent = np.random.random((latent_dim, ))
task = np.zeros(task_num)
task[0] = 1
action1, _ = policy.get_action_given_latent(obs, latent)
action2, _ = policy.get_action_given_task(obs, task)
action3, _ = policy.get_action(np.concatenate([obs.flatten(), task]))
assert env.action_space.contains(action1)
assert env.action_space.contains(action2)
assert env.action_space.contains(action3)
obses, latents, tasks = [obs] * 3, [latent] * 3, [task] * 3
aug_obses = [np.concatenate([obs.flatten(), task])] * 3
action1n, _ = policy.get_actions_given_latents(obses, latents)
action2n, _ = policy.get_actions_given_tasks(obses, tasks)
action3n, _ = policy.get_actions(aug_obses)
for action in chain(action1n, action2n, action3n):
assert env.action_space.contains(action)
def test_get_latent(self):
obs_dim, action_dim, task_num, latent_dim = (2, ), (2, ), 5, 2
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(
input_space=akro.Box(low=np.zeros(task_num),
high=np.ones(task_num)),
output_space=akro.Box(low=np.zeros(latent_dim),
high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec)
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec,
encoder=encoder)
task_id = 3
task_onehot = np.zeros(task_num)
task_onehot[task_id] = 1
latent, latent_info = policy.get_latent(task_onehot)
assert latent.shape == (latent_dim, )
assert latent_info['mean'].shape == (latent_dim, )
assert latent_info['log_std'].shape == (latent_dim, )
def test_auxiliary(self):
obs_dim, action_dim, task_num, latent_dim = (2, ), (2, ), 2, 2
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(
input_space=akro.Box(low=np.zeros(task_num),
high=np.ones(task_num)),
output_space=akro.Box(low=np.zeros(latent_dim),
high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec)
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec,
encoder=encoder)
obs_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, 2))
task_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, 2))
policy.build(obs_input, task_input)
assert policy.distribution.loc.get_shape().as_list(
)[-1] == env.action_space.flat_dim
assert policy.encoder == encoder
assert policy.latent_space.flat_dim == latent_dim
assert policy.task_space.flat_dim == task_num
assert (policy.augmented_observation_space.flat_dim ==
env.observation_space.flat_dim + task_num)
assert policy.encoder_distribution.loc.get_shape().as_list(
)[-1] == latent_dim
def test_split_augmented_observation(self):
obs_dim, task_num = 3, 5
policy = mock.Mock(spec=GaussianMLPTaskEmbeddingPolicy)
policy.task_space = mock.Mock()
policy.task_space.flat_dim = task_num
policy.split_augmented_observation = \
GaussianMLPTaskEmbeddingPolicy.split_augmented_observation
obs = np.random.random(obs_dim)
task = np.random.random(task_num)
o, t = policy.split_augmented_observation(policy,
np.concatenate([obs, task]))
assert np.array_equal(obs, o)
assert np.array_equal(task, t)
def test_get_vars(self):
obs_dim, action_dim, task_num, latent_dim = (2, ), (2, ), 5, 2
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(
input_space=akro.Box(low=np.zeros(task_num),
high=np.ones(task_num)),
output_space=akro.Box(low=np.zeros(latent_dim),
high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec, hidden_sizes=[32, 32, 32])
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec,
encoder=encoder,
hidden_sizes=[32, 32, 32])
vars1 = sorted(policy.get_trainable_vars(), key=lambda v: v.name)
vars2 = sorted(policy.get_global_vars(), key=lambda v: v.name)
assert vars1 == vars2
# Two network. Each with 4 layers * (1 weight + 1 bias) + 1 log_std
assert len(vars1) == 2 * (4 * 2 + 1)
obs = np.random.random(obs_dim)
latent = np.random.random((latent_dim, ))
for var in vars1:
var.assign(np.ones(var.shape))
assert np.any(policy.get_action_given_latent(obs, latent) != 0)
for var in vars1:
var.assign(np.zeros(var.shape))
assert not np.all(policy.get_action_given_latent(obs, latent) == 0)
def test_pickling(self):
obs_dim, action_dim, task_num, latent_dim = (2, ), (2, ), 5, 2
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(
input_space=akro.Box(low=np.zeros(task_num),
high=np.ones(task_num)),
output_space=akro.Box(low=np.zeros(latent_dim),
high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec)
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec,
encoder=encoder)
pickled = pickle.dumps(policy)
with tf.compat.v1.variable_scope('resumed'):
unpickled = pickle.loads(pickled)
assert hasattr(unpickled, '_f_dist_obs_latent')
assert hasattr(unpickled, '_f_dist_obs_task')
def test_does_not_support_non_box_obs_space(self):
"""Test that policy raises error if passed a dict obs space."""
task_num, latent_dim = 5, 2
env = GarageEnv(DummyDictEnv(act_space_type='box'))
with pytest.raises(ValueError,
match=('This task embedding policy does not support'
'non akro.Box observation spaces.')):
embedding_spec = InOutSpec(
input_space=akro.Box(low=np.zeros(task_num),
high=np.ones(task_num)),
output_space=akro.Box(low=np.zeros(latent_dim),
high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec,
hidden_sizes=[32, 32, 32])
GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec,
encoder=encoder,
hidden_sizes=[32, 32, 32])
def test_does_not_support_non_box_action_space(self):
"""Test that policy raises error if passed a discrete action space."""
task_num, latent_dim = 5, 2
env = GarageEnv(DummyDictEnv(act_space_type='discrete'))
with pytest.raises(ValueError,
match=('This task embedding policy does not support'
'non akro.Box action spaces.')):
embedding_spec = InOutSpec(
input_space=akro.Box(low=np.zeros(task_num),
high=np.ones(task_num)),
output_space=akro.Box(low=np.zeros(latent_dim),
high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec,
hidden_sizes=[32, 32, 32])
GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec,
encoder=encoder,
hidden_sizes=[32, 32, 32])
| 9,691 | 46.048544 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_gaussian_policies.py | import gym
import pytest
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.optimizers import ConjugateGradientOptimizer
from garage.tf.optimizers import FiniteDifferenceHvp
from garage.tf.policies import GaussianGRUPolicy
from garage.tf.policies import GaussianLSTMPolicy
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
policies = [GaussianGRUPolicy, GaussianLSTMPolicy, GaussianMLPPolicy]
class TestGaussianPolicies(TfGraphTestCase):
@pytest.mark.parametrize('policy_cls', policies)
def test_gaussian_policies(self, policy_cls):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('Pendulum-v0')))
policy = policy_cls(name='policy', env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(
base_eps=1e-5)),
)
runner.setup(algo, env)
runner.train(n_epochs=1, batch_size=4000)
env.close()
| 1,531 | 33.818182 | 70 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_policies.py | import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.policies import CategoricalGRUPolicy
from garage.tf.policies import CategoricalLSTMPolicy
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.policies import GaussianGRUPolicy
from garage.tf.policies import GaussianLSTMPolicy
from garage.tf.policies import GaussianMLPPolicy
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv, DummyDiscreteEnv
class TestDiscretePolicies(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(DummyDiscreteEnv())
def teardown_method(self):
self.env.close()
super().teardown_method()
def test_categorial_gru_policy(self):
categorical_gru_policy = CategoricalGRUPolicy(
env_spec=self.env, hidden_dim=1, state_include_action=False)
categorical_gru_policy.reset()
obs = self.env.observation_space.high
assert categorical_gru_policy.get_action(obs)
def test_categorical_lstm_policy(self):
categorical_lstm_policy = CategoricalLSTMPolicy(
env_spec=self.env, hidden_dim=1, state_include_action=False)
categorical_lstm_policy.reset()
obs = self.env.observation_space.high
assert categorical_lstm_policy.get_action(obs)
def test_categorial_mlp_policy(self):
categorical_mlp_policy = CategoricalMLPPolicy(env_spec=self.env,
hidden_sizes=(1, ))
obs = self.env.observation_space.high
assert categorical_mlp_policy.get_action(obs)
class TestContinuousPolicies(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(DummyBoxEnv())
def teardown_method(self):
self.env.close()
super().teardown_method()
def test_continuous_mlp_policy(self):
continuous_mlp_policy = ContinuousMLPPolicy(env_spec=self.env,
hidden_sizes=(1, ))
obs = self.env.observation_space.high
assert continuous_mlp_policy.get_action(obs)
def test_gaussian_gru_policy(self):
gaussian_gru_policy = GaussianGRUPolicy(env_spec=self.env,
hidden_dim=1,
state_include_action=False)
gaussian_gru_policy.reset()
obs = self.env.observation_space.high
assert gaussian_gru_policy.get_action(obs)
def test_gaussian_lstm_policy(self):
gaussian_lstm_policy = GaussianLSTMPolicy(env_spec=self.env,
hidden_dim=1,
state_include_action=False)
gaussian_lstm_policy.reset()
obs = self.env.observation_space.high
assert gaussian_lstm_policy.get_action(obs)
def test_gaussian_mlp_policy(self):
gaussian_mlp_policy = GaussianMLPPolicy(env_spec=self.env,
hidden_sizes=(1, ))
obs = self.env.observation_space.high
assert gaussian_mlp_policy.get_action(obs)
| 3,262 | 36.505747 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/policies/test_qf_derived_policy.py | import pickle
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.envs.wrappers import AtariEnv
from garage.tf.policies import DiscreteQfDerivedPolicy
from garage.tf.q_functions import DiscreteCNNQFunction
# yapf: disable
from tests.fixtures import TfGraphTestCase # noqa: I202
from tests.fixtures.envs.dummy import (DummyDictEnv,
DummyDiscreteEnv,
DummyDiscretePixelEnvBaselines)
from tests.fixtures.q_functions import SimpleQFunction
# yapf: enable
class TestQfDerivedPolicy(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(DummyDiscreteEnv())
self.qf = SimpleQFunction(self.env.spec)
self.policy = DiscreteQfDerivedPolicy(env_spec=self.env.spec,
qf=self.qf)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.env.reset()
def test_discrete_qf_derived_policy(self):
obs, _, _, _ = self.env.step(1)
action, _ = self.policy.get_action(obs)
assert self.env.action_space.contains(action)
actions, _ = self.policy.get_actions([obs])
for action in actions:
assert self.env.action_space.contains(action)
def test_is_pickleable(self):
with tf.compat.v1.variable_scope('SimpleQFunction/SimpleMLPModel',
reuse=True):
return_var = tf.compat.v1.get_variable('return_var')
# assign it to all one
return_var.load(tf.ones_like(return_var).eval())
obs, _, _, _ = self.env.step(1)
action1, _ = self.policy.get_action(obs)
p = pickle.dumps(self.policy)
with tf.compat.v1.Session(graph=tf.Graph()):
policy_pickled = pickle.loads(p)
action2, _ = policy_pickled.get_action(obs)
assert action1 == action2
def test_does_not_support_dict_obs_space(self):
"""Test that policy raises error if passed a dict obs space."""
env = GarageEnv(DummyDictEnv(act_space_type='discrete'))
with pytest.raises(ValueError):
qf = SimpleQFunction(env.spec,
name='does_not_support_dict_obs_space')
DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
def test_invalid_action_spaces(self):
"""Test that policy raises error if passed a dict obs space."""
env = GarageEnv(DummyDictEnv(act_space_type='box'))
with pytest.raises(ValueError):
qf = SimpleQFunction(env.spec)
DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
class TestQfDerivedPolicyImageObs(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(AtariEnv(DummyDiscretePixelEnvBaselines()),
is_image=True)
self.qf = DiscreteCNNQFunction(env_spec=self.env.spec,
filters=((1, (1, 1)), ),
strides=(1, ),
dueling=False)
self.policy = DiscreteQfDerivedPolicy(env_spec=self.env.spec,
qf=self.qf)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.env.reset()
def test_obs_unflattened(self):
"""Test if a flattened image obs is passed to get_action
then it is unflattened.
"""
obs = self.env.observation_space.sample()
action, _ = self.policy.get_action(
self.env.observation_space.flatten(obs))
self.env.step(action)
| 3,698 | 38.351064 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/q_functions/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/q_functions/test_continuous_cnn_q_function.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.q_functions import ContinuousCNNQFunction
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyDictEnv
from tests.fixtures.envs.dummy import DummyDiscreteEnv
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
from tests.fixtures.models import SimpleCNNModel
from tests.fixtures.models import SimpleCNNModelWithMaxPooling
from tests.fixtures.models import SimpleMLPMergeModel
class TestContinuousCNNQFunction(TfGraphTestCase):
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((5, (3, 3)), ), (1, )),
(((5, (3, 3)), ), (2, )),
(((5, (3, 3)), (5, (3, 3))), (1, 1))
])
# yapf: enable
def test_get_qval(self, filters, strides):
env = GarageEnv(DummyDiscretePixelEnv())
obs = env.reset()
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousCNNQFunction(env_spec=env.spec,
filters=filters,
strides=strides)
action_dim = env.action_space.shape
obs, _, _, _ = env.step(1)
act = np.full(action_dim, 0.5)
expected_output = np.full((1, ), 0.5)
outputs = qf.get_qval([obs], [act])
assert np.array_equal(outputs[0], expected_output)
outputs = qf.get_qval([obs, obs, obs], [act, act, act])
for output in outputs:
assert np.array_equal(output, expected_output)
# make sure observations are unflattened
obs = env.observation_space.flatten(obs)
qf._f_qval = mock.MagicMock()
qf.get_qval([obs], [act])
unflattened_obs = qf._f_qval.call_args_list[0][0][0]
assert unflattened_obs.shape[1:] == env.spec.observation_space.shape
qf.get_qval([obs, obs], [act, act])
unflattened_obs = qf._f_qval.call_args_list[1][0][0]
assert unflattened_obs.shape[1:] == env.spec.observation_space.shape
# yapf: disable
@pytest.mark.parametrize('filters, strides, pool_strides, pool_shapes', [
(((5, (3, 3)), ), (1, ), (1, 1), (1, 1)),
(((5, (3, 3)), ), (2, ), (2, 2), (2, 2)),
(((5, (3, 3)), (5, (3, 3))), (1, 1), (1, 1), (1, 1)),
(((5, (3, 3)), (5, (3, 3))), (1, 1), (2, 2), (2, 2))
])
# yapf: enable
def test_get_qval_max_pooling(self, filters, strides, pool_strides,
pool_shapes):
env = GarageEnv(DummyDiscretePixelEnv())
obs = env.reset()
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.CNNModelWithMaxPooling'),
new=SimpleCNNModelWithMaxPooling):
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousCNNQFunction(env_spec=env.spec,
filters=filters,
strides=strides,
max_pooling=True,
pool_strides=pool_strides,
pool_shapes=pool_shapes)
action_dim = env.action_space.shape
obs, _, _, _ = env.step(1)
act = np.full(action_dim, 0.5)
expected_output = np.full((1, ), 0.5)
outputs = qf.get_qval([obs], [act])
assert np.array_equal(outputs[0], expected_output)
outputs = qf.get_qval([obs, obs, obs], [act, act, act])
for output in outputs:
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('obs_dim', [
(1, ),
(1, 1, 1, 1),
(2, 2, 2, 2)])
# yapf: enable
def test_invalid_obs_dim(self, obs_dim):
with pytest.raises(ValueError):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim))
ContinuousCNNQFunction(env_spec=env.spec,
filters=((5, (3, 3)), ),
strides=(1, ))
def test_not_box(self):
with pytest.raises(ValueError):
dict_env = GarageEnv(DummyDictEnv())
ContinuousCNNQFunction(env_spec=dict_env.spec,
filters=((5, (3, 3)), ),
strides=(1, ))
def test_obs_is_image(self):
image_env = GarageEnv(DummyDiscretePixelEnv(), is_image=True)
with mock.patch(('tests.fixtures.models.SimpleCNNModel._build'),
autospec=True,
side_effect=SimpleCNNModel._build) as build:
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousCNNQFunction(env_spec=image_env.spec,
filters=((5, (3, 3)), ),
strides=(1, ))
fake_obs = [
np.full(image_env.spec.observation_space.shape, 255)
]
# make sure image obses are normalized in _initialize()
# and get_qval
normalized_obs = build.call_args_list[0][0][1]
assert normalized_obs != qf.inputs[0]
assert (self.sess.run(normalized_obs,
feed_dict={qf.inputs[0]:
fake_obs}) == 1.).all()
# make sure image obses are normalized in get_qval_sim()
obs_dim = image_env.spec.observation_space.shape
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
obs_dim)
act_dim = image_env.spec.observation_space.shape
action_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
act_dim)
qf.get_qval_sym(state_input, action_input, name='another')
normalized_obs = build.call_args_list[1][0][1]
assert (self.sess.run(normalized_obs,
feed_dict={state_input:
fake_obs}) == 1.).all()
def test_obs_not_image(self):
env = GarageEnv(DummyDiscretePixelEnv())
with mock.patch(('tests.fixtures.models.SimpleCNNModel._build'),
autospec=True,
side_effect=SimpleCNNModel._build) as build:
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousCNNQFunction(env_spec=env.spec,
filters=((5, (3, 3)), ),
strides=(1, ))
# ensure non-image obses are not normalized
# in _initialize() and get_qval()
normalized_obs = build.call_args_list[0][0][1]
assert normalized_obs == qf.inputs[0]
fake_obs = [
np.full(env.spec.observation_space.shape, 255.)
]
assert (self.sess.run(normalized_obs,
feed_dict={qf.inputs[0]:
fake_obs}) == 255.).all()
# ensure non-image obses are not normalized
# in get_qval_sym()
obs_dim = env.spec.observation_space.shape
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
obs_dim)
act_dim = env.spec.observation_space.shape
action_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) +
act_dim)
qf.get_qval_sym(state_input, action_input, name='another')
normalized_obs = build.call_args_list[1][0][1]
assert (self.sess.run(normalized_obs,
feed_dict={state_input:
fake_obs}) == 255.).all()
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((5, (3, 3)), ), (1, )),
(((5, (3, 3)), ), (2, )),
(((5, (3, 3)), (5, (3, 3))), (1, 1))
])
# yapf: enable
def test_get_qval_sym(self, filters, strides):
env = GarageEnv(DummyDiscretePixelEnv())
obs = env.reset()
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousCNNQFunction(env_spec=env.spec,
filters=filters,
strides=strides)
action_dim = env.action_space.shape
obs, _, _, _ = env.step(1)
act = np.full(action_dim, 0.5)
output1 = qf.get_qval([obs], [act])
input_var1 = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + obs.shape)
input_var2 = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + act.shape)
q_vals = qf.get_qval_sym(input_var1, input_var2, 'another')
output2 = self.sess.run(q_vals,
feed_dict={
input_var1: [obs],
input_var2: [act]
})
expected_output = np.full((1, ), 0.5)
assert np.array_equal(output1, output2)
assert np.array_equal(output2[0], expected_output)
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((5, (3, 3)), ), (1, )),
(((5, (3, 3)), ), (2, )),
(((5, (3, 3)), (5, (3, 3))), (1, 1))
])
# yapf: enable
def test_is_pickleable(self, filters, strides):
env = GarageEnv(DummyDiscretePixelEnv())
obs = env.reset()
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousCNNQFunction(env_spec=env.spec,
filters=filters,
strides=strides)
action_dim = env.action_space.shape
obs, _, _, _ = env.step(1)
act = np.full(action_dim, 0.5)
_, _ = qf.inputs
with tf.compat.v1.variable_scope(
'ContinuousCNNQFunction/CNNMLPMergeModel/SimpleMLPMergeModel',
reuse=True):
return_var = tf.compat.v1.get_variable('return_var')
# assign it to all one
return_var.load(tf.ones_like(return_var).eval())
output1 = qf.get_qval([obs], [act])
h_data = pickle.dumps(qf)
with tf.compat.v1.Session(graph=tf.Graph()):
qf_pickled = pickle.loads(h_data)
_, _ = qf_pickled.inputs
output2 = qf_pickled.get_qval([obs], [act])
assert np.array_equal(output1, output2)
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((5, (3, 3)), ), (1, )),
(((5, (3, 3)), ), (2, )),
(((5, (3, 3)), (5, (3, 3))), (1, 1))
])
# yapf: enable
def test_clone(self, filters, strides):
env = GarageEnv(DummyDiscretePixelEnv())
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.models.'
'cnn_mlp_merge_model.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousCNNQFunction(env_spec=env.spec,
filters=filters,
strides=strides)
qf_clone = qf.clone('another_qf')
# pylint: disable=protected-access
assert qf_clone._filters == qf._filters
assert qf_clone._strides == qf._strides
# pylint: enable=protected-access
for cloned_param, param in zip(qf_clone.model.parameters.values(),
qf.model.parameters.values()):
assert np.array_equal(cloned_param, param)
| 14,300 | 39.86 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/q_functions/test_continuous_mlp_q_function.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.q_functions import ContinuousMLPQFunction
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv, DummyDictEnv
from tests.fixtures.models import SimpleMLPMergeModel
class TestContinuousMLPQFunction(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_q_vals(self, obs_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'continuous_mlp_q_function.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs = obs.flatten()
act = np.full(action_dim, 0.5).flatten()
expected_output = np.full((1, ), 0.5)
outputs = qf.get_qval([obs], [act])
assert np.array_equal(outputs[0], expected_output)
outputs = qf.get_qval([obs, obs, obs], [act, act, act])
for output in outputs:
assert np.array_equal(output, expected_output)
def test_q_vals_goal_conditioned(self):
env = GarageEnv(DummyDictEnv())
with mock.patch(('garage.tf.q_functions.'
'continuous_mlp_q_function.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs = np.concatenate(
(obs['observation'], obs['desired_goal'], obs['achieved_goal']),
axis=-1)
act = np.full((1, ), 0.5).flatten()
expected_output = np.full((1, ), 0.5)
outputs = qf.get_qval([obs], [act])
assert np.array_equal(outputs[0], expected_output)
outputs = qf.get_qval([obs, obs, obs], [act, act, act])
for output in outputs:
assert np.array_equal(output, expected_output)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_output_shape(self, obs_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'continuous_mlp_q_function.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs = obs.flatten()
act = np.full(action_dim, 0.5).flatten()
outputs = qf.get_qval([obs], [act])
assert outputs.shape == (1, 1)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_get_qval_sym(self, obs_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'continuous_mlp_q_function.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs = obs.flatten()
act = np.full(action_dim, 0.5).flatten()
output1 = qf.get_qval([obs], [act])
input_var1 = tf.compat.v1.placeholder(tf.float32,
shape=(None, obs.shape[0]))
input_var2 = tf.compat.v1.placeholder(tf.float32,
shape=(None, act.shape[0]))
q_vals = qf.get_qval_sym(input_var1, input_var2, 'another')
output2 = self.sess.run(q_vals,
feed_dict={
input_var1: [obs],
input_var2: [act]
})
expected_output = np.full((1, ), 0.5)
assert np.array_equal(output1, output2)
assert np.array_equal(output2[0], expected_output)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), (1, )),
((2, ), (2, )),
((1, 1), (1, )),
((2, 2), (2, )),
])
def test_is_pickleable(self, obs_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'continuous_mlp_q_function.MLPMergeModel'),
new=SimpleMLPMergeModel):
qf = ContinuousMLPQFunction(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs = obs.flatten()
act = np.full(action_dim, 0.5).flatten()
with tf.compat.v1.variable_scope(
'ContinuousMLPQFunction/SimpleMLPMergeModel', reuse=True):
return_var = tf.compat.v1.get_variable('return_var')
# assign it to all one
return_var.load(tf.ones_like(return_var).eval())
output1 = qf.get_qval([obs], [act])
h_data = pickle.dumps(qf)
with tf.compat.v1.Session(graph=tf.Graph()):
qf_pickled = pickle.loads(h_data)
output2 = qf_pickled.get_qval([obs], [act])
assert np.array_equal(output1, output2)
| 5,557 | 35.326797 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/q_functions/test_discrete_cnn_q_function.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.models import CNNModel
from garage.tf.q_functions import DiscreteCNNQFunction
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyDiscreteEnv
from tests.fixtures.envs.dummy import DummyDiscretePixelEnv
from tests.fixtures.models import SimpleCNNModel
from tests.fixtures.models import SimpleCNNModelWithMaxPooling
from tests.fixtures.models import SimpleMLPModel
class TestDiscreteCNNQFunction(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(DummyDiscretePixelEnv())
self.obs = self.env.reset()
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((5, (3, 3)), ), (1, )),
(((5, (3, 3)), ), (2, )),
(((5, (3, 3)), (5, (3, 3))), (1, 1)),
])
# yapf: enable
def test_get_action(self, filters, strides):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.MLPModel'),
new=SimpleMLPModel):
qf = DiscreteCNNQFunction(env_spec=self.env.spec,
filters=filters,
strides=strides,
dueling=False)
action_dim = self.env.action_space.n
expected_output = np.full(action_dim, 0.5)
outputs = self.sess.run(qf.q_vals, feed_dict={qf.input: [self.obs]})
assert np.array_equal(outputs[0], expected_output)
outputs = self.sess.run(
qf.q_vals, feed_dict={qf.input: [self.obs, self.obs, self.obs]})
for output in outputs:
assert np.array_equal(output, expected_output)
@pytest.mark.parametrize('obs_dim', [[1], [2], [1, 1, 1, 1], [2, 2, 2, 2]])
def test_invalid_obs_shape(self, obs_dim):
boxEnv = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim))
with pytest.raises(ValueError):
DiscreteCNNQFunction(env_spec=boxEnv.spec,
filters=((5, (3, 3)), ),
strides=(2, ),
dueling=False)
def test_obs_is_image(self):
image_env = GarageEnv(DummyDiscretePixelEnv(), is_image=True)
with mock.patch(('garage.tf.models.'
'categorical_cnn_model.CNNModel._build'),
autospec=True,
side_effect=CNNModel._build) as build:
qf = DiscreteCNNQFunction(env_spec=image_env.spec,
filters=((5, (3, 3)), ),
strides=(2, ),
dueling=False)
normalized_obs = build.call_args_list[0][0][1]
input_ph = qf.input
assert input_ph != normalized_obs
fake_obs = [np.full(image_env.spec.observation_space.shape, 255)]
assert (self.sess.run(normalized_obs,
feed_dict={input_ph: fake_obs}) == 1.).all()
obs_dim = image_env.spec.observation_space.shape
state_input = tf.compat.v1.placeholder(tf.uint8,
shape=(None, ) + obs_dim)
qf.get_qval_sym(state_input, name='another')
normalized_obs = build.call_args_list[1][0][1]
fake_obs = [np.full(image_env.spec.observation_space.shape, 255)]
assert (self.sess.run(normalized_obs,
feed_dict={state_input:
fake_obs}) == 1.).all()
def test_obs_not_image(self):
env = self.env
with mock.patch(('garage.tf.models.'
'categorical_cnn_model.CNNModel._build'),
autospec=True,
side_effect=CNNModel._build) as build:
qf = DiscreteCNNQFunction(env_spec=env.spec,
filters=((5, (3, 3)), ),
strides=(2, ),
dueling=False)
normalized_obs = build.call_args_list[0][0][1]
input_ph = qf.input
assert input_ph == normalized_obs
fake_obs = [np.full(env.spec.observation_space.shape, 255)]
assert (self.sess.run(normalized_obs,
feed_dict={input_ph:
fake_obs}) == 255.).all()
obs_dim = env.spec.observation_space.shape
state_input = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + obs_dim)
qf.get_qval_sym(state_input, name='another')
normalized_obs = build.call_args_list[1][0][1]
fake_obs = [np.full(env.spec.observation_space.shape, 255)]
assert (self.sess.run(normalized_obs,
feed_dict={state_input:
fake_obs}) == 255).all()
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((5, (3, 3)), ), (1, )),
(((5, (3, 3)), ), (2, )),
(((5, (3, 3)), (5, (3, 3))), (1, 1)),
])
# yapf: enable
def test_get_action_dueling(self, filters, strides):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.MLPDuelingModel'),
new=SimpleMLPModel):
qf = DiscreteCNNQFunction(env_spec=self.env.spec,
filters=filters,
strides=strides,
dueling=True)
action_dim = self.env.action_space.n
expected_output = np.full(action_dim, 0.5)
outputs = self.sess.run(qf.q_vals, feed_dict={qf.input: [self.obs]})
assert np.array_equal(outputs[0], expected_output)
outputs = self.sess.run(
qf.q_vals, feed_dict={qf.input: [self.obs, self.obs, self.obs]})
for output in outputs:
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('filters, strides, pool_strides, pool_shapes', [
(((5, (3, 3)), ), (1, ), (1, 1), (1, 1)), # noqa: E122
(((5, (3, 3)), ), (2, ), (2, 2), (2, 2)), # noqa: E122
(((5, (3, 3)), (5, (3, 3))), (1, 1), (1, 1), (1, 1)), # noqa: E122
(((5, (3, 3)), (5, (3, 3))), (1, 1), (2, 2), (2, 2)) # noqa: E122
]) # noqa: E122
# yapf: enable
def test_get_action_max_pooling(self, filters, strides, pool_strides,
pool_shapes):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.CNNModelWithMaxPooling'),
new=SimpleCNNModelWithMaxPooling):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.MLPModel'),
new=SimpleMLPModel):
qf = DiscreteCNNQFunction(env_spec=self.env.spec,
filters=filters,
strides=strides,
max_pooling=True,
pool_strides=pool_strides,
pool_shapes=pool_shapes,
dueling=False)
action_dim = self.env.action_space.n
expected_output = np.full(action_dim, 0.5)
outputs = self.sess.run(qf.q_vals, feed_dict={qf.input: [self.obs]})
assert np.array_equal(outputs[0], expected_output)
outputs = self.sess.run(
qf.q_vals, feed_dict={qf.input: [self.obs, self.obs, self.obs]})
for output in outputs:
assert np.array_equal(output, expected_output)
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((5, (3, 3)), ), (1, )),
(((5, (3, 3)), ), (2, )),
(((5, (3, 3)), (5, (3, 3))), (1, 1)),
])
# yapf: enable
def test_get_qval_sym(self, filters, strides):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.MLPModel'),
new=SimpleMLPModel):
qf = DiscreteCNNQFunction(env_spec=self.env.spec,
filters=filters,
strides=strides,
dueling=False)
output1 = self.sess.run(qf.q_vals, feed_dict={qf.input: [self.obs]})
obs_dim = self.env.observation_space.shape
action_dim = self.env.action_space.n
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + obs_dim)
q_vals = qf.get_qval_sym(input_var, 'another')
output2 = self.sess.run(q_vals, feed_dict={input_var: [self.obs]})
expected_output = np.full(action_dim, 0.5)
assert np.array_equal(output1, output2)
assert np.array_equal(output2[0], expected_output)
# yapf: disable
@pytest.mark.parametrize('filters, strides', [
(((5, (3, 3)), ), (1, )),
(((5, (3, 3)), ), (2, )),
(((5, (3, 3)), (5, (3, 3))), (1, 1)),
])
# yapf: enable
def test_is_pickleable(self, filters, strides):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.CNNModel'),
new=SimpleCNNModel):
with mock.patch(('garage.tf.q_functions.'
'discrete_cnn_q_function.MLPModel'),
new=SimpleMLPModel):
qf = DiscreteCNNQFunction(env_spec=self.env.spec,
filters=filters,
strides=strides,
dueling=False)
with tf.compat.v1.variable_scope(
'DiscreteCNNQFunction/Sequential/SimpleMLPModel', reuse=True):
return_var = tf.compat.v1.get_variable('return_var')
# assign it to all one
return_var.load(tf.ones_like(return_var).eval())
output1 = self.sess.run(qf.q_vals, feed_dict={qf.input: [self.obs]})
h_data = pickle.dumps(qf)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
qf_pickled = pickle.loads(h_data)
output2 = sess.run(qf_pickled.q_vals,
feed_dict={qf_pickled.input: [self.obs]})
assert np.array_equal(output1, output2)
| 11,327 | 43.423529 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/q_functions/test_discrete_mlp_q_function.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import GarageEnv
from garage.tf.q_functions.discrete_mlp_q_function import DiscreteMLPQFunction
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyDiscreteEnv
from tests.fixtures.models import SimpleMLPModel
class TestDiscreteMLPQFunction(TfGraphTestCase):
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_get_action(self, obs_dim, action_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'discrete_mlp_q_function.MLPModel'),
new=SimpleMLPModel):
qf = DiscreteMLPQFunction(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
expected_output = np.full(action_dim, 0.5)
outputs = self.sess.run(qf.q_vals, feed_dict={qf.input: [obs]})
assert np.array_equal(outputs[0], expected_output)
outputs = self.sess.run(qf.q_vals,
feed_dict={qf.input: [obs, obs, obs]})
for output in outputs:
assert np.array_equal(output, expected_output)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_output_shape(self, obs_dim, action_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'discrete_mlp_q_function.MLPModel'),
new=SimpleMLPModel):
qf = DiscreteMLPQFunction(env_spec=env.spec, dueling=False)
env.reset()
obs, _, _, _ = env.step(1)
outputs = self.sess.run(qf.q_vals, feed_dict={qf.input: [obs]})
assert outputs.shape == (1, action_dim)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_output_shape_dueling(self, obs_dim, action_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'discrete_mlp_q_function.MLPDuelingModel'),
new=SimpleMLPModel):
qf = DiscreteMLPQFunction(env_spec=env.spec, dueling=True)
env.reset()
obs, _, _, _ = env.step(1)
outputs = self.sess.run(qf.q_vals, feed_dict={qf.input: [obs]})
assert outputs.shape == (1, action_dim)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_get_qval_sym(self, obs_dim, action_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'discrete_mlp_q_function.MLPModel'),
new=SimpleMLPModel):
qf = DiscreteMLPQFunction(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
output1 = self.sess.run(qf.q_vals, feed_dict={qf.input: [obs]})
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + obs_dim)
q_vals = qf.get_qval_sym(input_var, 'another')
output2 = self.sess.run(q_vals, feed_dict={input_var: [obs]})
expected_output = np.full(action_dim, 0.5)
assert np.array_equal(output1, output2)
assert np.array_equal(output2[0], expected_output)
@pytest.mark.parametrize('obs_dim, action_dim', [
((1, ), 1),
((2, ), 2),
((1, 1), 1),
((2, 2), 2),
])
def test_is_pickleable(self, obs_dim, action_dim):
env = GarageEnv(
DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.q_functions.'
'discrete_mlp_q_function.MLPModel'),
new=SimpleMLPModel):
qf = DiscreteMLPQFunction(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
with tf.compat.v1.variable_scope('DiscreteMLPQFunction/SimpleMLPModel',
reuse=True):
return_var = tf.compat.v1.get_variable('return_var')
# assign it to all one
return_var.load(tf.ones_like(return_var).eval())
output1 = self.sess.run(qf.q_vals, feed_dict={qf.input: [obs]})
h_data = pickle.dumps(qf)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
qf_pickled = pickle.loads(h_data)
output2 = sess.run(qf_pickled.q_vals,
feed_dict={qf_pickled.input: [obs]})
assert np.array_equal(output1, output2)
| 5,051 | 35.085714 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/regressors/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/regressors/test_bernoulli_mlp_regressor.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.optimizers import ConjugateGradientOptimizer, LbfgsOptimizer
from garage.tf.regressors import BernoulliMLPRegressor
from tests.fixtures import TfGraphTestCase
def get_labels(input_shape, xs, output_dim):
if input_shape == (1, ):
label = [0, 0]
# [0, 1] if sign is positive else [1, 0]
ys = 0 if np.sin(xs) <= 0 else 1
label[ys] = 1
elif input_shape == (2, ):
ys = int(np.round(xs[0])) ^ int(np.round(xs[1]))
if output_dim == 1:
label = ys
else:
# [0, 1] if XOR is 1 else [1, 0]
label = [0, 0]
label[ys] = 1
return label
def get_train_data(input_shape, output_dim):
if input_shape == (1, ):
# Sign of sin function
data = np.linspace(-np.pi, np.pi, 1000)
obs = [{
'observations': [[x]],
'returns': [get_labels(input_shape, x, output_dim)]
} for x in data]
elif input_shape == (2, ):
# Generate 1000 points with coordinates in [0, 1] for XOR data
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 10)
data = np.dstack(np.meshgrid(x, y)).reshape(-1, 2)
obs = [{
'observations': [x],
'returns': [get_labels(input_shape, x, output_dim)]
} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
returns = returns.reshape((-1, output_dim))
return observations, returns
def get_test_data(input_shape, output_dim):
if input_shape == (1, ):
paths = {
'observations': [[-np.pi / 2], [-np.pi / 3], [-np.pi / 4],
[np.pi / 4], [np.pi / 3], [np.pi / 4]]
}
expected = [[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]]
elif input_shape == (2, ):
paths = {'observations': [[0, 0], [0, 1], [1, 0], [1, 1]]}
if output_dim == 1:
expected = [[0], [1], [1], [0]]
else:
expected = [[1, 0], [0, 1], [0, 1], [1, 0]]
return paths, expected
class TestBernoulliMLPRegressor(TfGraphTestCase):
# yapf: disable
@pytest.mark.parametrize('input_shape, output_dim', [
((1, ), 2),
((2, ), 1),
((2, ), 2),
])
# yapf: enable
def test_fit_normalized(self, input_shape, output_dim):
bmr = BernoulliMLPRegressor(input_shape=input_shape,
output_dim=output_dim)
observations, returns = get_train_data(input_shape, output_dim)
for _ in range(150):
bmr.fit(observations, returns)
paths, expected = get_test_data(input_shape, output_dim)
prediction = np.cast['int'](bmr.predict(paths['observations']))
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(bmr.model._networks['default'].x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
# yapf: disable
@pytest.mark.parametrize('input_shape, output_dim', [
((1, ), 2),
((2, ), 2),
((2, ), 1),
])
# yapf: enable
def test_fit_unnormalized(self, input_shape, output_dim):
bmr = BernoulliMLPRegressor(input_shape=input_shape,
output_dim=output_dim,
normalize_inputs=False)
observations, returns = get_train_data(input_shape, output_dim)
for _ in range(150):
bmr.fit(observations, returns)
paths, expected = get_test_data(input_shape, output_dim)
prediction = np.cast['int'](bmr.predict(paths['observations']))
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
x_mean_expected = np.zeros_like(x_mean)
x_std = self.sess.run(bmr.model._networks['default'].x_std)
x_std_expected = np.ones_like(x_std)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
# yapf: disable
@pytest.mark.parametrize('input_shape, output_dim', [
((1, ), 2),
((2, ), 2),
((2, ), 1),
])
# yapf: enable
def test_fit_with_no_trust_region(self, input_shape, output_dim):
bmr = BernoulliMLPRegressor(input_shape=input_shape,
output_dim=output_dim,
use_trust_region=False)
observations, returns = get_train_data(input_shape, output_dim)
for _ in range(150):
bmr.fit(observations, returns)
paths, expected = get_test_data(input_shape, output_dim)
prediction = np.cast['int'](bmr.predict(paths['observations']))
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(bmr.model._networks['default'].x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
def test_sample_predict(self):
n_sample = 100
input_dim = 50
output_dim = 1
bmr = BernoulliMLPRegressor(input_shape=(input_dim, ),
output_dim=output_dim)
xs = np.random.random((input_dim, ))
p = bmr._f_prob([xs])
ys = bmr.sample_predict([xs] * n_sample)
p_predict = np.count_nonzero(ys == 1) / n_sample
assert np.real_if_close(p, p_predict)
def test_predict_log_likelihood(self):
n_sample = 50
input_dim = 50
output_dim = 1
bmr = BernoulliMLPRegressor(input_shape=(input_dim, ),
output_dim=output_dim)
xs = np.random.random((n_sample, input_dim))
ys = np.random.randint(2, size=(n_sample, output_dim))
p = bmr._f_prob(xs)
ll = bmr.predict_log_likelihood(xs, ys)
ll_true = np.sum(np.log(p * ys + (1 - p) * (1 - ys)), axis=-1)
assert np.allclose(ll, ll_true)
# yapf: disable
@pytest.mark.parametrize('output_dim, input_shape', [
(1, (1, 1)),
(1, (2, 2)),
(2, (3, 2)),
(3, (2, 2)),
])
# yapf: enable
def test_log_likelihood_sym(self, output_dim, input_shape):
bmr = BernoulliMLPRegressor(input_shape=(input_shape[1], ),
output_dim=output_dim)
new_xs_var = tf.compat.v1.placeholder(tf.float32, input_shape)
new_ys_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='ys',
shape=(None, output_dim))
data = np.full(input_shape, 0.5)
one_hot_label = np.zeros((input_shape[0], output_dim))
one_hot_label[np.arange(input_shape[0]), 0] = 1
p = bmr._f_prob(np.asarray(data))
ll = bmr._dist.log_likelihood(np.asarray(one_hot_label), dict(p=p))
outputs = bmr.log_likelihood_sym(new_xs_var, new_ys_var, name='ll_sym')
ll_from_sym = self.sess.run(outputs,
feed_dict={
new_xs_var: data,
new_ys_var: one_hot_label
})
assert np.allclose(ll, ll_from_sym, rtol=0, atol=1e-5)
@mock.patch('tests.garage.tf.regressors.'
'test_bernoulli_mlp_regressor.'
'LbfgsOptimizer')
@mock.patch('tests.garage.tf.regressors.'
'test_bernoulli_mlp_regressor.'
'ConjugateGradientOptimizer')
def test_optimizer_args(self, mock_cg, mock_lbfgs):
lbfgs_args = dict(max_opt_itr=25)
cg_args = dict(cg_iters=15)
bmr = BernoulliMLPRegressor(input_shape=(1, ),
output_dim=2,
optimizer=LbfgsOptimizer,
optimizer_args=lbfgs_args,
tr_optimizer=ConjugateGradientOptimizer,
tr_optimizer_args=cg_args,
use_trust_region=True)
assert mock_lbfgs.return_value is bmr._optimizer
assert mock_cg.return_value is bmr._tr_optimizer
mock_lbfgs.assert_called_with(max_opt_itr=25)
mock_cg.assert_called_with(cg_iters=15)
def test_is_pickleable(self):
bmr = BernoulliMLPRegressor(input_shape=(1, ), output_dim=2)
with tf.compat.v1.variable_scope(
'BernoulliMLPRegressor/NormalizedInputMLPModel', reuse=True):
bias = tf.compat.v1.get_variable('mlp/hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
bias1 = bias.eval()
result1 = np.cast['int'](bmr.predict(np.ones((1, 1))))
h = pickle.dumps(bmr)
with tf.compat.v1.Session(graph=tf.Graph()):
bmr_pickled = pickle.loads(h)
result2 = np.cast['int'](bmr_pickled.predict(np.ones((1, 1))))
assert np.array_equal(result1, result2)
with tf.compat.v1.variable_scope(
'BernoulliMLPRegressor/NormalizedInputMLPModel',
reuse=True):
bias2 = tf.compat.v1.get_variable('mlp/hidden_0/bias').eval()
assert np.array_equal(bias1, bias2)
def test_is_pickleable2(self):
bmr = BernoulliMLPRegressor(input_shape=(1, ), output_dim=2)
with tf.compat.v1.variable_scope(
'BernoulliMLPRegressor/NormalizedInputMLPModel', reuse=True):
x_mean = tf.compat.v1.get_variable('normalized_vars/x_mean')
x_mean.load(tf.ones_like(x_mean).eval())
x1 = bmr.model._networks['default'].x_mean.eval()
h = pickle.dumps(bmr)
with tf.compat.v1.Session(graph=tf.Graph()):
bmr_pickled = pickle.loads(h)
x2 = bmr_pickled.model._networks['default'].x_mean.eval()
assert np.array_equal(x1, x2)
| 10,547 | 36.272085 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/regressors/test_categorical_mlp_regressor.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from garage.tf.optimizers import ConjugateGradientOptimizer, LbfgsOptimizer
from garage.tf.regressors import CategoricalMLPRegressor
from tests.fixtures import TfGraphTestCase
def get_labels(input_shape, xs):
label = [0, 0]
if input_shape == (1, ):
ys = 0 if np.sin(xs) <= 0 else 1
label[ys] = 1
elif input_shape == (2, ):
ys = int(np.round(xs[0])) ^ int(np.round(xs[1]))
label[ys] = 1
return label
def get_train_data(input_shape):
if input_shape == (1, ):
data = np.linspace(-np.pi, np.pi, 1000)
obs = [{
'observations': [[x]],
'returns': [get_labels(input_shape, x)]
} for x in data]
elif input_shape == (2, ):
data = [np.random.rand(2) for _ in range(1000)]
obs = [{
'observations': [x],
'returns': [get_labels(input_shape, x)]
} for x in data]
return obs
def get_test_data(input_shape):
if input_shape == (1, ):
paths = {
'observations': [[-np.pi / 2], [-np.pi / 3], [-np.pi / 4],
[np.pi / 4], [np.pi / 3], [np.pi / 4]]
}
expected = [[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]]
elif input_shape == (2, ):
paths = {'observations': [[0, 0], [0, 1], [1, 0], [1, 1]]}
expected = [[1, 0], [0, 1], [0, 1], [1, 0]]
return paths, expected
class TestCategoricalMLPRegressor(TfGraphTestCase):
def test_dist(self):
cmr = CategoricalMLPRegressor(input_shape=(1, ), output_dim=2)
dist = cmr._network.dist
assert isinstance(dist, tfp.distributions.OneHotCategorical)
@pytest.mark.parametrize('input_shape, output_dim', [((1, ), 2),
((2, ), 2)])
def test_fit_normalized(self, input_shape, output_dim):
cmr = CategoricalMLPRegressor(input_shape=input_shape,
output_dim=output_dim)
obs = get_train_data(input_shape)
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
returns = returns.reshape((-1, 2))
for _ in range(150):
cmr.fit(observations, returns)
paths, expected = get_test_data(input_shape)
prediction = cmr.predict(paths['observations'])
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(cmr._network.x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(cmr._network.x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
@pytest.mark.parametrize('input_shape, output_dim', [((1, ), 2),
((2, ), 2)])
def test_fit_unnormalized(self, input_shape, output_dim):
cmr = CategoricalMLPRegressor(input_shape=input_shape,
output_dim=output_dim,
normalize_inputs=False)
obs = get_train_data(input_shape)
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
returns = returns.reshape((-1, 2))
for _ in range(150):
cmr.fit(observations, returns)
paths, expected = get_test_data(input_shape)
prediction = cmr.predict(paths['observations'])
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(cmr._network.x_mean)
x_mean_expected = np.zeros_like(x_mean)
x_std = self.sess.run(cmr._network.x_std)
x_std_expected = np.ones_like(x_std)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
@pytest.mark.parametrize('input_shape, output_dim', [((1, ), 2),
((2, ), 2)])
def test_fit_without_initial_trust_region(self, input_shape, output_dim):
cmr = CategoricalMLPRegressor(input_shape=input_shape,
output_dim=output_dim,
use_trust_region=False)
obs = get_train_data(input_shape)
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
returns = returns.reshape((-1, 2))
for _ in range(150):
cmr.fit(observations, returns)
paths, expected = get_test_data(input_shape)
prediction = cmr.predict(paths['observations'])
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(cmr._network.x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(cmr._network.x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
@mock.patch('tests.garage.tf.regressors.'
'test_categorical_mlp_regressor.'
'LbfgsOptimizer')
@mock.patch('tests.garage.tf.regressors.'
'test_categorical_mlp_regressor.'
'ConjugateGradientOptimizer')
def test_optimizer_args(self, mock_cg, mock_lbfgs):
lbfgs_args = dict(max_opt_itr=25)
cg_args = dict(cg_iters=15)
cmr = CategoricalMLPRegressor(input_shape=(1, ),
output_dim=2,
optimizer=LbfgsOptimizer,
optimizer_args=lbfgs_args,
tr_optimizer=ConjugateGradientOptimizer,
tr_optimizer_args=cg_args,
use_trust_region=True)
assert mock_lbfgs.return_value is cmr._optimizer
assert mock_cg.return_value is cmr._tr_optimizer
mock_lbfgs.assert_called_with(max_opt_itr=25)
mock_cg.assert_called_with(cg_iters=15)
def test_is_pickleable(self):
cmr = CategoricalMLPRegressor(input_shape=(1, ), output_dim=2)
with tf.compat.v1.variable_scope(
'CategoricalMLPRegressor/CategoricalMLPRegressorModel',
reuse=True):
bias = tf.compat.v1.get_variable('mlp/hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
result1 = cmr.predict(np.ones((1, 1)))
h = pickle.dumps(cmr)
with tf.compat.v1.Session(graph=tf.Graph()):
cmr_pickled = pickle.loads(h)
result2 = cmr_pickled.predict(np.ones((1, 1)))
assert np.array_equal(result1, result2)
def test_is_pickleable2(self):
cmr = CategoricalMLPRegressor(input_shape=(1, ), output_dim=2)
with tf.compat.v1.variable_scope(
'CategoricalMLPRegressor/CategoricalMLPRegressorModel',
reuse=True):
x_mean = tf.compat.v1.get_variable('normalized_vars/x_mean')
x_mean.load(tf.ones_like(x_mean).eval())
x1 = cmr.model._networks['default'].x_mean.eval()
h = pickle.dumps(cmr)
with tf.compat.v1.Session(graph=tf.Graph()):
cmr_pickled = pickle.loads(h)
x2 = cmr_pickled.model._networks['default'].x_mean.eval()
assert np.array_equal(x1, x2)
| 7,693 | 36.349515 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/regressors/test_continuous_mlp_regressor.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.optimizers import LbfgsOptimizer
from garage.tf.regressors import ContinuousMLPRegressor
from tests.fixtures import TfGraphTestCase
class TestContinuousMLPRegressor(TfGraphTestCase):
def test_fit_normalized(self):
cmr = ContinuousMLPRegressor(input_shape=(1, ), output_dim=1)
data = np.linspace(-np.pi, np.pi, 1000)
obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
returns = returns.reshape((-1, 1))
for _ in range(150):
cmr.fit(observations, returns)
paths = {
'observations': [[-np.pi], [-np.pi / 2], [-np.pi / 4], [0],
[np.pi / 4], [np.pi / 2], [np.pi]]
}
prediction = cmr.predict(paths['observations'])
expected = [[0], [-1], [-0.707], [0], [0.707], [1], [0]]
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(cmr.model._networks['default'].x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(cmr.model._networks['default'].x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
def test_fit_unnormalized(self):
cmr = ContinuousMLPRegressor(input_shape=(1, ),
output_dim=1,
normalize_inputs=False)
data = np.linspace(-np.pi, np.pi, 1000)
obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
for _ in range(150):
cmr.fit(observations, returns.reshape((-1, 1)))
paths = {
'observations': [[-np.pi], [-np.pi / 2], [-np.pi / 4], [0],
[np.pi / 4], [np.pi / 2], [np.pi]]
}
prediction = cmr.predict(paths['observations'])
expected = [[0], [-1], [-0.707], [0], [0.707], [1], [0]]
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(cmr.model._networks['default'].x_mean)
x_mean_expected = np.zeros_like(x_mean)
x_std = self.sess.run(cmr.model._networks['default'].x_std)
x_std_expected = np.ones_like(x_std)
assert np.array_equal(x_mean, x_mean_expected)
assert np.array_equal(x_std, x_std_expected)
@pytest.mark.parametrize('output_dim, input_shape',
[(1, (1, )), (1, (2, )), (2, (3, )), (2, (1, 1)),
(3, (2, 2))])
def test_predict_sym(self, output_dim, input_shape):
cmr = ContinuousMLPRegressor(input_shape=input_shape,
output_dim=output_dim,
optimizer=LbfgsOptimizer,
optimizer_args=dict())
new_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape)
data = np.random.random(size=input_shape)
outputs = cmr.predict_sym(new_input_var, name='y_hat_sym')
y_hat_sym = self.sess.run(outputs, feed_dict={new_input_var: [data]})
y_hat = cmr._f_predict([data])
assert np.allclose(y_hat, y_hat_sym, rtol=0, atol=1e-5)
def test_is_pickleable(self):
cmr = ContinuousMLPRegressor(input_shape=(1, ), output_dim=1)
with tf.compat.v1.variable_scope(('ContinuousMLPRegressor/'
'NormalizedInputMLPModel'),
reuse=True):
bias = tf.compat.v1.get_variable('mlp/hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
result1 = cmr.predict(np.ones((1, 1)))
h = pickle.dumps(cmr)
with tf.compat.v1.Session(graph=tf.Graph()):
cmr_pickled = pickle.loads(h)
result2 = cmr_pickled.predict(np.ones((1, 1)))
assert np.array_equal(result1, result2)
| 4,360 | 39.757009 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/regressors/test_gaussian_cnn_regressor.py | import pickle
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.optimizers import LbfgsOptimizer
from garage.tf.regressors import GaussianCNNRegressor
from tests.fixtures import TfGraphTestCase
def get_train_test_data():
matrices = [
np.linspace(i - 0.5, i + 0.5, 300).reshape((10, 10, 3))
for i in range(110)
]
data = [np.sin(matrices[i]) for i in range(100)]
obs = [{'observations': [x], 'returns': [np.mean(x)]} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
returns = returns.reshape((-1, 1))
paths = {'observations': [np.sin(matrices[i]) for i in range(100, 110)]}
expected = [[np.mean(x)] for x in paths['observations']]
return (observations, returns), (paths, expected)
class TestGaussianCNNRegressor(TfGraphTestCase):
@pytest.mark.large
def test_fit_normalized(self):
gcr = GaussianCNNRegressor(input_shape=(10, 10, 3),
filters=((3, (3, 3)), (6, (3, 3))),
strides=(1, 1),
padding='SAME',
hidden_sizes=(32, ),
output_dim=1,
adaptive_std=False,
use_trust_region=True)
train_data, test_data = get_train_test_data()
observations, returns = train_data
for _ in range(20):
gcr.fit(observations, returns)
paths, expected = test_data
prediction = gcr.predict(paths['observations'])
average_error = 0.0
for i, exp in enumerate(expected):
average_error += np.abs(exp - prediction[i])
average_error /= len(expected)
assert average_error <= 0.1
x_mean = self.sess.run(gcr.model._networks['default'].x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(gcr.model._networks['default'].x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
y_mean = self.sess.run(gcr.model._networks['default'].y_mean)
y_mean_expected = np.mean(returns, axis=0, keepdims=True)
y_std = self.sess.run(gcr.model._networks['default'].y_std)
y_std_expected = np.std(returns, axis=0, keepdims=True)
assert np.allclose(y_mean, y_mean_expected)
assert np.allclose(y_std, y_std_expected)
@pytest.mark.large
def test_fit_unnormalized(self):
gcr = GaussianCNNRegressor(input_shape=(10, 10, 3),
filters=((3, (3, 3)), (6, (3, 3))),
strides=(1, 1),
padding='SAME',
hidden_sizes=(32, ),
output_dim=1,
adaptive_std=True,
normalize_inputs=False,
normalize_outputs=False)
train_data, test_data = get_train_test_data()
observations, returns = train_data
for _ in range(30):
gcr.fit(observations, returns)
paths, expected = test_data
prediction = gcr.predict(paths['observations'])
average_error = 0.0
for i, exp in enumerate(expected):
average_error += np.abs(exp - prediction[i])
average_error /= len(expected)
assert average_error <= 0.1
x_mean = self.sess.run(gcr.model._networks['default'].x_mean)
x_mean_expected = np.zeros_like(x_mean)
x_std = self.sess.run(gcr.model._networks['default'].x_std)
x_std_expected = np.ones_like(x_std)
assert np.array_equal(x_mean, x_mean_expected)
assert np.array_equal(x_std, x_std_expected)
y_mean = self.sess.run(gcr.model._networks['default'].y_mean)
y_mean_expected = np.zeros_like(y_mean)
y_std = self.sess.run(gcr.model._networks['default'].y_std)
y_std_expected = np.ones_like(y_std)
assert np.allclose(y_mean, y_mean_expected)
assert np.allclose(y_std, y_std_expected)
@pytest.mark.large
def test_fit_smaller_subsample_factor(self):
gcr = GaussianCNNRegressor(input_shape=(10, 10, 3),
filters=((3, (3, 3)), (6, (3, 3))),
strides=(1, 1),
padding='SAME',
hidden_sizes=(32, ),
output_dim=1,
subsample_factor=0.9,
adaptive_std=False)
train_data, test_data = get_train_test_data()
observations, returns = train_data
for _ in range(20):
gcr.fit(observations, returns)
paths, expected = test_data
prediction = gcr.predict(paths['observations'])
average_error = 0.0
for i, exp in enumerate(expected):
average_error += np.abs(exp - prediction[i])
average_error /= len(expected)
assert average_error <= 0.1
@pytest.mark.large
def test_fit_without_trusted_region(self):
gcr = GaussianCNNRegressor(input_shape=(10, 10, 3),
filters=((3, (3, 3)), (6, (3, 3))),
strides=(1, 1),
padding='SAME',
hidden_sizes=(32, ),
output_dim=1,
adaptive_std=False,
use_trust_region=False)
train_data, test_data = get_train_test_data()
observations, returns = train_data
for _ in range(20):
gcr.fit(observations, returns)
paths, expected = test_data
prediction = gcr.predict(paths['observations'])
average_error = 0.0
for i, exp in enumerate(expected):
average_error += np.abs(exp - prediction[i])
average_error /= len(expected)
assert average_error <= 0.1
@pytest.mark.parametrize('output_dim', [(1), (2), (3)])
def test_log_likelihood_sym(self, output_dim):
input_shape = (28, 28, 3)
gcr = GaussianCNNRegressor(input_shape=input_shape,
filters=((3, (3, 3)), (6, (3, 3))),
strides=(1, 1),
padding='SAME',
hidden_sizes=(32, ),
output_dim=1,
adaptive_std=False,
use_trust_region=False)
new_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, ) + input_shape)
new_ys_var = tf.compat.v1.placeholder(dtype=tf.float32,
name='ys',
shape=(None, output_dim))
data = np.full(input_shape, 0.5)
label = np.ones(output_dim)
outputs = gcr.log_likelihood_sym(new_input_var,
new_ys_var,
name='ll_sym')
ll_from_sym = self.sess.run(outputs,
feed_dict={
new_input_var: [data],
new_ys_var: [label]
})
mean, log_std = gcr._f_pdists([data])
ll = gcr.model._networks['default'].dist.log_likelihood(
[label], dict(mean=mean, log_std=log_std))
assert np.allclose(ll, ll_from_sym, rtol=0, atol=1e-5)
@mock.patch('tests.garage.tf.regressors.'
'test_gaussian_cnn_regressor.'
'LbfgsOptimizer')
def test_optimizer_args(self, mock_lbfgs):
lbfgs_args = dict(max_opt_itr=25)
gcr = GaussianCNNRegressor(input_shape=(10, 10, 3),
filters=((3, (3, 3)), (6, (3, 3))),
strides=(1, 1),
padding='SAME',
hidden_sizes=(32, ),
output_dim=1,
optimizer=LbfgsOptimizer,
optimizer_args=lbfgs_args,
use_trust_region=True)
assert mock_lbfgs.return_value is gcr._optimizer
mock_lbfgs.assert_called_with(max_opt_itr=25)
def test_is_pickleable(self):
input_shape = (28, 28, 3)
gcr = GaussianCNNRegressor(input_shape=input_shape,
filters=((3, (3, 3)), (6, (3, 3))),
strides=(1, 1),
padding='SAME',
hidden_sizes=(32, ),
output_dim=1,
adaptive_std=False,
use_trust_region=False)
with tf.compat.v1.variable_scope(
'GaussianCNNRegressor/GaussianCNNRegressorModel', reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_network/hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
result1 = gcr.predict([np.ones(input_shape)])
h = pickle.dumps(gcr)
with tf.compat.v1.Session(graph=tf.Graph()):
gcr_pickled = pickle.loads(h)
result2 = gcr_pickled.predict([np.ones(input_shape)])
assert np.array_equal(result1, result2)
def test_is_pickleable2(self):
input_shape = (28, 28, 3)
gcr = GaussianCNNRegressor(input_shape=input_shape,
filters=((3, (3, 3)), (6, (3, 3))),
strides=(1, 1),
padding='SAME',
hidden_sizes=(32, ),
output_dim=1,
adaptive_std=False,
use_trust_region=False)
with tf.compat.v1.variable_scope(
'GaussianCNNRegressor/GaussianCNNRegressorModel', reuse=True):
x_mean = tf.compat.v1.get_variable('normalized_vars/x_mean')
x_mean.load(tf.ones_like(x_mean).eval())
x1 = gcr.model._networks['default'].x_mean.eval()
h = pickle.dumps(gcr)
with tf.compat.v1.Session(graph=tf.Graph()):
gcr_pickled = pickle.loads(h)
x2 = gcr_pickled.model._networks['default'].x_mean.eval()
assert np.array_equal(x1, x2)
| 11,010 | 40.2397 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/regressors/test_gaussian_mlp_regressor.py | import pickle
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.optimizers import PenaltyLbfgsOptimizer
from garage.tf.regressors import GaussianMLPRegressor
from tests.fixtures import TfGraphTestCase
class TestGaussianMLPRegressor(TfGraphTestCase):
# unmarked to balance test jobs
# @pytest.mark.large
def test_fit_normalized(self):
gmr = GaussianMLPRegressor(input_shape=(1, ), output_dim=1)
data = np.linspace(-np.pi, np.pi, 1000)
obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
returns = returns.reshape((-1, 1))
for _ in range(150):
gmr.fit(observations, returns)
paths = {
'observations': [[-np.pi], [-np.pi / 2], [-np.pi / 4], [0],
[np.pi / 4], [np.pi / 2], [np.pi]]
}
prediction = gmr.predict(paths['observations'])
expected = [[0], [-1], [-0.707], [0], [0.707], [1], [0]]
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(gmr.model._networks['default'].x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(gmr.model._networks['default'].x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
y_mean = self.sess.run(gmr.model._networks['default'].y_mean)
y_mean_expected = np.mean(returns, axis=0, keepdims=True)
y_std = self.sess.run(gmr.model._networks['default'].y_std)
y_std_expected = np.std(returns, axis=0, keepdims=True)
assert np.allclose(y_mean, y_mean_expected)
assert np.allclose(y_std, y_std_expected)
# unmarked to balance test jobs
# @pytest.mark.large
def test_fit_unnormalized(self):
gmr = GaussianMLPRegressor(input_shape=(1, ),
output_dim=1,
subsample_factor=0.9,
normalize_inputs=False,
normalize_outputs=False)
data = np.linspace(-np.pi, np.pi, 1000)
obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
for _ in range(150):
gmr.fit(observations, returns.reshape((-1, 1)))
paths = {
'observations': [[-np.pi], [-np.pi / 2], [-np.pi / 4], [0],
[np.pi / 4], [np.pi / 2], [np.pi]]
}
prediction = gmr.predict(paths['observations'])
expected = [[0], [-1], [-0.707], [0], [0.707], [1], [0]]
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(gmr.model._networks['default'].x_mean)
x_mean_expected = np.zeros_like(x_mean)
x_std = self.sess.run(gmr.model._networks['default'].x_std)
x_std_expected = np.ones_like(x_std)
assert np.array_equal(x_mean, x_mean_expected)
assert np.array_equal(x_std, x_std_expected)
y_mean = self.sess.run(gmr.model._networks['default'].y_mean)
y_mean_expected = np.zeros_like(y_mean)
y_std = self.sess.run(gmr.model._networks['default'].y_std)
y_std_expected = np.ones_like(y_std)
assert np.allclose(y_mean, y_mean_expected)
assert np.allclose(y_std, y_std_expected)
# unmarked to balance test jobs
# @pytest.mark.large
def test_fit_smaller_subsample_factor(self):
gmr = GaussianMLPRegressor(input_shape=(1, ),
output_dim=1,
subsample_factor=0.9)
data = np.linspace(-np.pi, np.pi, 1000)
obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
for _ in range(150):
gmr.fit(observations, returns.reshape((-1, 1)))
paths = {
'observations': [[-np.pi], [-np.pi / 2], [-np.pi / 4], [0],
[np.pi / 4], [np.pi / 2], [np.pi]]
}
prediction = gmr.predict(paths['observations'])
expected = [[0], [-1], [-0.707], [0], [0.707], [1], [0]]
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
# unmarked to balance test jobs
# @pytest.mark.large
def test_fit_without_trusted_region(self):
gmr = GaussianMLPRegressor(input_shape=(1, ),
output_dim=1,
use_trust_region=False)
data = np.linspace(-np.pi, np.pi, 1000)
obs = [{'observations': [[x]], 'returns': [np.sin(x)]} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
for _ in range(150):
gmr.fit(observations, returns.reshape((-1, 1)))
paths = {
'observations': [[-np.pi], [-np.pi / 2], [-np.pi / 4], [0],
[np.pi / 4], [np.pi / 2], [np.pi]]
}
prediction = gmr.predict(paths['observations'])
expected = [[0], [-1], [-0.707], [0], [0.707], [1], [0]]
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
def test_is_pickleable(self):
gmr = GaussianMLPRegressor(input_shape=(1, ), output_dim=1)
with tf.compat.v1.variable_scope(
'GaussianMLPRegressor/GaussianMLPRegressorModel', reuse=True):
bias = tf.compat.v1.get_variable(
'dist_params/mean_network/hidden_0/bias')
bias.load(tf.ones_like(bias).eval())
result1 = gmr.predict(np.ones((1, 1)))
h = pickle.dumps(gmr)
with tf.compat.v1.Session(graph=tf.Graph()):
gmr_pickled = pickle.loads(h)
result2 = gmr_pickled.predict(np.ones((1, 1)))
assert np.array_equal(result1, result2)
def test_is_pickleable2(self):
gmr = GaussianMLPRegressor(input_shape=(1, ), output_dim=1)
with tf.compat.v1.variable_scope(
'GaussianMLPRegressor/GaussianMLPRegressorModel', reuse=True):
x_mean = tf.compat.v1.get_variable('normalized_vars/x_mean')
x_mean.load(tf.ones_like(x_mean).eval())
x1 = gmr.model._networks['default'].x_mean.eval()
h = pickle.dumps(gmr)
with tf.compat.v1.Session(graph=tf.Graph()):
gmr_pickled = pickle.loads(h)
x2 = gmr_pickled.model._networks['default'].x_mean.eval()
assert np.array_equal(x1, x2)
def test_auxiliary(self):
gmr = GaussianMLPRegressor(input_shape=(1, ), output_dim=5)
assert gmr.vectorized
assert gmr.distribution.event_shape.as_list() == [5]
| 7,128 | 39.276836 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/samplers/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/samplers/test_ray_batched_sampler_tf.py | """
Test whether tensorflow session is properly created and destroyed.
Other features of ray sampler are tested in
tests/garage/sampler/test_ray_sampler.py
"""
from unittest.mock import Mock
import ray
from garage.envs import GarageEnv, GridWorldEnv
from garage.np.policies import ScriptedPolicy
from garage.sampler import RaySampler, WorkerFactory
# pylint: disable=unused-import
from tests.fixtures.sampler import ray_local_session_fixture
class TestRaySamplerTF():
"""
Uses mock policy for 4x4 gridworldenv
'4x4': [
'SFFF',
'FHFH',
'FFFH',
'HFFG'
]
0: left
1: down
2: right
3: up
-1: no move
'S' : starting point
'F' or '.': free space
'W' or 'x': wall
'H' or 'o': hole (terminates episode)
'G' : goal
[2,2,1,0,3,1,1,1,2,2,1,1,1,2,2,1]
"""
def setup_method(self):
self.env = GarageEnv(GridWorldEnv(desc='4x4'))
self.policy = ScriptedPolicy(
scripted_actions=[2, 2, 1, 0, 3, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1])
self.algo = Mock(env_spec=self.env.spec,
policy=self.policy,
max_path_length=16)
def teardown_method(self):
self.env.close()
def test_ray_batch_sampler(self, ray_local_session_fixture):
del ray_local_session_fixture
assert ray.is_initialized()
workers = WorkerFactory(seed=100,
max_path_length=self.algo.max_path_length)
sampler1 = RaySampler(workers, self.policy, self.env)
sampler1.start_worker()
sampler1.shutdown_worker()
| 1,629 | 25.721311 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/samplers/test_task_embedding_worker.py | from unittest.mock import Mock
import numpy as np
from garage.envs import GarageEnv
from garage.tf.algos.te import TaskEmbeddingWorker
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestTaskEmbeddingWorker(TfGraphTestCase):
def test_task_embedding_worker(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(1, )))
env.active_task_one_hot = np.array([1., 0., 0., 0.])
env._active_task_one_hot = lambda: np.array([1., 0., 0., 0.])
a = np.random.random(env.action_space.shape)
z = np.random.random(5)
latent_info = dict(mean=np.random.random(5))
agent_info = dict(dummy='dummy')
policy = Mock()
policy.get_latent.return_value = (z, latent_info)
policy.latent_space.flatten.return_value = z
policy.get_action_given_latent.return_value = (a, agent_info)
worker = TaskEmbeddingWorker(seed=1,
max_path_length=100,
worker_number=1)
worker.update_agent(policy)
worker.update_env(env)
rollouts = worker.rollout()
assert 'task_onehot' in rollouts.env_infos
assert np.array_equal(rollouts.env_infos['task_onehot'][0],
env.active_task_one_hot)
assert 'latent' in rollouts.agent_infos
assert np.array_equal(rollouts.agent_infos['latent'][0], z)
assert 'latent_mean' in rollouts.agent_infos
assert np.array_equal(rollouts.agent_infos['latent_mean'][0],
latent_info['mean'])
| 1,618 | 36.651163 | 69 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/samplers/test_tf_batch_sampler.py | import pytest
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import VPG
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.samplers import BatchSampler
from tests.fixtures import snapshot_config
class TestTFSampler:
# Note:
# test_batch_sampler should pass if tested independently
# from other tests, but cannot be tested on CI.
#
# This is because tensorflow is not fork-safe.
@pytest.mark.flaky
def test_tf_batch_sampler(self):
max_cpus = 8
with LocalTFRunner(snapshot_config, max_cpus=max_cpus) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=1,
discount=0.99)
runner.setup(algo,
env,
sampler_cls=BatchSampler,
sampler_args={'n_envs': max_cpus})
try:
runner.initialize_tf_vars()
except BaseException:
raise AssertionError(
'LocalRunner should be able to initialize tf variables.')
runner._start_worker()
paths = runner._sampler.obtain_samples(0,
batch_size=8,
whole_paths=True)
assert len(paths) >= max_cpus, (
'BatchSampler should sample more than max_cpus={} '
'trajectories'.format(max_cpus))
| 1,960 | 34.654545 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/tf/samplers/test_tf_worker.py | import tensorflow as tf
from garage.experiment import LocalTFRunner
from garage.sampler import DefaultWorker
from garage.tf.samplers import TFWorkerWrapper
from tests.fixtures import snapshot_config
from tests.fixtures.envs.dummy import DummyBoxEnv
class TestTFWorker:
def test_tf_worker_with_default_session(self):
with LocalTFRunner(snapshot_config):
tf_worker = TFWorkerWrapper()
worker = DefaultWorker(seed=1,
max_path_length=100,
worker_number=1)
worker.update_env(DummyBoxEnv())
tf_worker._inner_worker = worker
tf_worker.worker_init()
assert tf_worker._sess == tf.compat.v1.get_default_session()
assert tf_worker._sess._closed
def test_tf_worker_without_default_session(self):
tf_worker = TFWorkerWrapper()
worker = DefaultWorker(seed=1, max_path_length=100, worker_number=1)
worker.update_env(DummyBoxEnv())
tf_worker._inner_worker = worker
tf_worker.worker_init()
assert tf_worker._sess == tf.compat.v1.get_default_session()
tf_worker.shutdown()
assert tf_worker._sess._closed
| 1,221 | 36.030303 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/test_functions.py | """Module to test garage.torch._functions."""
import numpy as np
import pytest
import torch
import torch.nn.functional as F
from garage.torch import compute_advantages, pad_to_last
from garage.torch import dict_np_to_torch, global_device
from garage.torch import product_of_gaussians, set_gpu_mode, torch_to_np
import garage.torch._functions as tu
from tests.fixtures import TfGraphTestCase
def stack(d, arr):
"""Stack 'arr' 'd' times."""
return np.repeat(np.expand_dims(arr, axis=0), repeats=d, axis=0)
ONES = np.ones((6, ))
ZEROS = np.zeros((6, ))
ARRANGE = np.arange(6)
PI_DIGITS = np.array([3, 1, 4, 1, 5, 9])
FIBS = np.array([1, 1, 2, 3, 5, 8])
nums_1d = np.arange(0, 4).astype(float)
nums_2d = np.arange(0, 4).astype(float).reshape(2, 2)
nums_3d = np.arange(0, 8).astype(float).reshape(2, 2, 2)
def test_utils_set_gpu_mode():
"""Test setting gpu mode to False to force CPU."""
if torch.cuda.is_available():
set_gpu_mode(mode=True)
assert global_device() == torch.device('cuda:0')
assert tu._USE_GPU
else:
set_gpu_mode(mode=False)
assert global_device() == torch.device('cpu')
assert not tu._USE_GPU
assert not tu._GPU_ID
def test_torch_to_np():
"""Test whether tuples of tensors can be converted to np arrays."""
tup = (torch.zeros(1), torch.zeros(1))
np_out_1, np_out_2 = torch_to_np(tup)
assert isinstance(np_out_1, np.ndarray)
assert isinstance(np_out_2, np.ndarray)
def test_dict_np_to_torch():
"""Test if dict whose values are tensors can be converted to np arrays."""
dic = {'a': np.zeros(1), 'b': np.ones(1)}
dict_np_to_torch(dic)
for tensor in dic.values():
assert isinstance(tensor, torch.Tensor)
def test_product_of_gaussians():
"""Test computing mu, sigma of product of gaussians."""
size = 5
mu = torch.ones(size)
sigmas_squared = torch.ones(size)
output = product_of_gaussians(mu, sigmas_squared)
assert output[0] == 1
assert output[1] == 1 / size
class TestTorchAlgoUtils(TfGraphTestCase):
"""Test class for torch algo utility functions."""
# yapf: disable
@pytest.mark.parametrize('discount', [1, 0.95])
@pytest.mark.parametrize('num_trajs', [1, 5])
@pytest.mark.parametrize('gae_lambda', [0, 0.5, 1])
@pytest.mark.parametrize('rewards_traj, baselines_traj', [
(ONES, ZEROS),
(PI_DIGITS, ARRANGE),
(ONES, FIBS),
])
# yapf: enable
def test_compute_advantages(self, num_trajs, discount, gae_lambda,
rewards_traj, baselines_traj):
"""Test compute_advantage function."""
def get_advantage(discount, gae_lambda, rewards, baselines):
adv = torch.zeros(rewards.shape)
for i in range(rewards.shape[0]):
acc = 0
for j in range(rewards.shape[1]):
acc = acc * discount * gae_lambda
acc += rewards[i][-j - 1] - baselines[i][-j - 1]
acc += discount * baselines[i][-j] if j else 0
adv[i][-j - 1] = acc
return adv
length = len(rewards_traj)
rewards = torch.Tensor(stack(num_trajs, rewards_traj))
baselines = torch.Tensor(stack(num_trajs, baselines_traj))
expected_adv = get_advantage(discount, gae_lambda, rewards, baselines)
computed_adv = compute_advantages(discount, gae_lambda, length,
baselines, rewards)
assert torch.allclose(expected_adv, computed_adv)
def test_add_padding_last_1d(self):
"""Test pad_to_last function for 1d."""
max_length = 10
expected = F.pad(torch.Tensor(nums_1d),
(0, max_length - nums_1d.shape[-1]))
tensor_padding = pad_to_last(nums_1d, total_length=max_length)
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_1d, total_length=10, axis=0)
assert expected.eq(tensor_padding).all()
def test_add_padding_last_2d(self):
"""Test pad_to_last function for 2d."""
max_length = 10
tensor_padding = pad_to_last(nums_2d, total_length=10)
expected = F.pad(torch.Tensor(nums_2d),
(0, max_length - nums_2d.shape[-1]))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_2d, total_length=10, axis=0)
expected = F.pad(torch.Tensor(nums_2d),
(0, 0, 0, max_length - nums_2d.shape[0]))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_2d, total_length=10, axis=1)
expected = F.pad(torch.Tensor(nums_2d),
(0, max_length - nums_2d.shape[-1], 0, 0))
assert expected.eq(tensor_padding).all()
def test_add_padding_last_3d(self):
"""Test pad_to_last function for 3d."""
max_length = 10
tensor_padding = pad_to_last(nums_3d, total_length=10)
expected = F.pad(torch.Tensor(nums_3d),
(0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_3d, total_length=10, axis=0)
expected = F.pad(torch.Tensor(nums_3d),
(0, 0, 0, 0, 0, max_length - nums_3d.shape[0]))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_3d, total_length=10, axis=1)
expected = F.pad(torch.Tensor(nums_3d),
(0, 0, 0, max_length - nums_3d.shape[-1], 0, 0))
assert expected.eq(tensor_padding).all()
tensor_padding = pad_to_last(nums_3d, total_length=10, axis=2)
expected = F.pad(torch.Tensor(nums_3d),
(0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))
assert expected.eq(tensor_padding).all()
@pytest.mark.parametrize('nums', [nums_1d, nums_2d, nums_3d])
def test_out_of_index_error(self, nums):
"""Test pad_to_last raises IndexError."""
with pytest.raises(IndexError):
pad_to_last(nums, total_length=10, axis=len(nums.shape))
| 6,198 | 36.11976 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_ddpg.py | """This script creates a test that fails when DDPG performance is too low."""
import gym
import pytest
import torch
from torch.nn import functional as F # NOQA
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import PathBuffer
from garage.torch.algos import DDPG
from garage.torch.policies import DeterministicMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config
class TestDDPG:
"""Test class for DDPG."""
@pytest.mark.mujoco_long
def test_ddpg_double_pendulum(self):
"""Test DDPG with Pendulum environment."""
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config)
env = GarageEnv(gym.make('InvertedDoublePendulum-v2'))
policy = DeterministicMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
algo = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
n_train_steps=50,
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
target_update_tau=1e-2,
discount=0.9)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 45
env.close()
@pytest.mark.mujoco_long
def test_ddpg_pendulum(self):
"""Test DDPG with Pendulum environment.
This environment has a [-3, 3] action_space bound.
"""
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config)
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = DeterministicMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
algo = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
n_train_steps=50,
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
target_update_tau=1e-2,
discount=0.9)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 10
env.close()
| 3,760 | 36.989899 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_maml.py | """Unit tests of MAML."""
from functools import partial
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import torch
from garage.envs import GarageEnv, normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.sampler import LocalSampler, WorkerFactory
from garage.torch.algos import MAMLPPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@pytest.mark.mujoco
class TestMAML:
"""Test class for MAML."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec,
hidden_sizes=(32, 32))
self.algo = MAMLPPO(env=self.env,
policy=self.policy,
value_function=self.value_function,
max_path_length=100,
meta_batch_size=5,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1)
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
@staticmethod
def _set_params(v, m):
"""Set the parameters of a module to a value."""
if isinstance(m, torch.nn.Linear):
m.weight.data.fill_(v)
m.bias.data.fill_(v)
@staticmethod
def _test_params(v, m):
"""Test if all parameters of a module equal to a value."""
if isinstance(m, torch.nn.Linear):
assert torch.all(torch.eq(m.weight.data, v))
assert torch.all(torch.eq(m.bias.data, v))
def test_get_exploration_policy(self):
"""Test if an independent copy of policy is returned."""
self.policy.apply(partial(self._set_params, 0.1))
adapt_policy = self.algo.get_exploration_policy()
adapt_policy.apply(partial(self._set_params, 0.2))
# Old policy should remain untouched
self.policy.apply(partial(self._test_params, 0.1))
adapt_policy.apply(partial(self._test_params, 0.2))
def test_adapt_policy(self):
"""Test if policy can adapt to samples."""
worker = WorkerFactory(seed=100, max_path_length=100)
sampler = LocalSampler.from_worker_factory(worker, self.policy,
self.env)
self.policy.apply(partial(self._set_params, 0.1))
adapt_policy = self.algo.get_exploration_policy()
trajs = sampler.obtain_samples(0, 100, adapt_policy)
self.algo.adapt_policy(adapt_policy, trajs)
# Old policy should remain untouched
self.policy.apply(partial(self._test_params, 0.1))
# Adapted policy should not be identical to old policy
for v1, v2 in zip(adapt_policy.parameters(), self.policy.parameters()):
if v1.data.ne(v2.data).sum() > 0:
break
else:
pytest.fail('Parameters of adapted policy should not be '
'identical to the old policy.')
| 3,881 | 37.435644 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_maml_ppo.py | """This script is a test that fails when MAML-TRPO performance is too low."""
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import torch
from garage.envs import GarageEnv, normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import MAMLPPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
@pytest.mark.mujoco
class TestMAMLPPO:
"""Test class for MAML-PPO."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec,
hidden_sizes=(32, 32))
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
def test_ppo_pendulum(self):
"""Test PPO with Pendulum environment."""
deterministic.set_seed(0)
rollouts_per_task = 5
max_path_length = 100
runner = LocalRunner(snapshot_config)
algo = MAMLPPO(env=self.env,
policy=self.policy,
value_function=self.value_function,
max_path_length=max_path_length,
meta_batch_size=5,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10,
batch_size=rollouts_per_task *
max_path_length)
assert last_avg_ret > -5
| 2,489 | 34.571429 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_maml_trpo.py | """This script is a test that fails when MAML-TRPO performance is too low."""
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import torch
from garage.envs import GarageEnv, normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import LocalRunner
from garage.torch.algos import MAMLTRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
from tests.fixtures.envs.dummy import DummyMultiTaskBoxEnv
@pytest.mark.mujoco
def test_maml_trpo_pendulum():
"""Test PPO with Pendulum environment."""
env = GarageEnv(normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32))
rollouts_per_task = 5
max_path_length = 100
runner = LocalRunner(snapshot_config)
algo = MAMLTRPO(env=env,
policy=policy,
value_function=value_function,
max_path_length=max_path_length,
meta_batch_size=5,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=5,
batch_size=rollouts_per_task * max_path_length)
assert last_avg_ret > -5
env.close()
def test_maml_trpo_dummy_named_env():
"""Test with dummy environment that has env_name."""
env = GarageEnv(
normalize(DummyMultiTaskBoxEnv(), expected_action_scale=10.))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32))
rollouts_per_task = 2
max_path_length = 100
runner = LocalRunner(snapshot_config)
algo = MAMLTRPO(env=env,
policy=policy,
value_function=value_function,
max_path_length=max_path_length,
meta_batch_size=5,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1)
runner.setup(algo, env)
runner.train(n_epochs=2, batch_size=rollouts_per_task * max_path_length)
| 3,098 | 33.054945 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_maml_vpg.py | """This script is a test that fails when MAML-VPG performance is too low."""
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import torch
from garage.envs import GarageEnv, normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import deterministic, LocalRunner, MetaEvaluator
from garage.experiment.task_sampler import SetTaskSampler
from garage.torch.algos import MAMLVPG
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
@pytest.mark.mujoco
class TestMAMLVPG:
"""Test class for MAML-VPG."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec,
hidden_sizes=(32, 32))
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
def test_ppo_pendulum(self):
"""Test PPO with Pendulum environment."""
deterministic.set_seed(0)
rollouts_per_task = 5
max_path_length = 100
task_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.)))
meta_evaluator = MetaEvaluator(test_task_sampler=task_sampler,
max_path_length=max_path_length,
n_test_tasks=1,
n_test_rollouts=10)
runner = LocalRunner(snapshot_config)
algo = MAMLVPG(env=self.env,
policy=self.policy,
value_function=self.value_function,
max_path_length=max_path_length,
meta_batch_size=5,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10,
batch_size=rollouts_per_task *
max_path_length)
assert last_avg_ret > -5
| 3,003 | 36.55 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_mtsac.py | """Module for testing MTSAC."""
import numpy as np
import pytest
import torch
from torch.nn import functional as F
from garage.envs import GarageEnv, MultiEnvWrapper
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import deterministic, LocalRunner
from garage.replay_buffer import PathBuffer
from garage.sampler import LocalSampler
from garage.torch import global_device, set_gpu_mode
from garage.torch.algos import MTSAC
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config
@pytest.mark.mujoco
def test_mtsac_get_log_alpha(monkeypatch):
"""Check that the private function _get_log_alpha functions correctly.
MTSAC uses disentangled alphas, meaning that
"""
env_names = ['CartPole-v0', 'CartPole-v1']
task_envs = [GarageEnv(env_name=name) for name in env_names]
env = MultiEnvWrapper(task_envs, sample_strategy=round_robin_strategy)
deterministic.set_seed(0)
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=torch.nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
num_tasks = 2
buffer_batch_size = 2
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=150,
max_path_length=150,
eval_env=env,
env_spec=env.spec,
num_tasks=num_tasks,
steps_per_epoch=5,
replay_buffer=replay_buffer,
min_buffer_size=1e3,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=buffer_batch_size)
monkeypatch.setattr(mtsac, '_log_alpha', torch.Tensor([1., 2.]))
for i, _ in enumerate(env_names):
obs = torch.Tensor([env.reset()] * buffer_batch_size)
log_alpha = mtsac._get_log_alpha(dict(observation=obs))
assert (log_alpha == torch.Tensor([i + 1, i + 1])).all().item()
assert log_alpha.size() == torch.Size([mtsac._buffer_batch_size])
@pytest.mark.mujoco
def test_mtsac_get_log_alpha_incorrect_num_tasks(monkeypatch):
"""Check that if the num_tasks passed does not match the number of tasks
in the environment, then the algorithm should raise an exception.
MTSAC uses disentangled alphas, meaning that
"""
env_names = ['CartPole-v0', 'CartPole-v1']
task_envs = [GarageEnv(env_name=name) for name in env_names]
env = MultiEnvWrapper(task_envs, sample_strategy=round_robin_strategy)
deterministic.set_seed(0)
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=torch.nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
buffer_batch_size = 2
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=150,
max_path_length=150,
eval_env=env,
env_spec=env.spec,
num_tasks=4,
steps_per_epoch=5,
replay_buffer=replay_buffer,
min_buffer_size=1e3,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=buffer_batch_size)
monkeypatch.setattr(mtsac, '_log_alpha', torch.Tensor([1., 2.]))
error_string = ('The number of tasks in the environment does '
'not match self._num_tasks. Are you sure that you passed '
'The correct number of tasks?')
obs = torch.Tensor([env.reset()] * buffer_batch_size)
with pytest.raises(ValueError, match=error_string):
mtsac._get_log_alpha(dict(observation=obs))
@pytest.mark.mujoco
def test_mtsac_inverted_double_pendulum():
"""Performance regression test of MTSAC on 2 InvDoublePendulum envs."""
env_names = ['InvertedDoublePendulum-v2', 'InvertedDoublePendulum-v2']
task_envs = [GarageEnv(env_name=name) for name in env_names]
env = MultiEnvWrapper(task_envs, sample_strategy=round_robin_strategy)
test_envs = MultiEnvWrapper(task_envs,
sample_strategy=round_robin_strategy)
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config=snapshot_config)
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
num_tasks = 2
buffer_batch_size = 128
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=100,
max_path_length=100,
eval_env=test_envs,
env_spec=env.spec,
num_tasks=num_tasks,
steps_per_epoch=5,
replay_buffer=replay_buffer,
min_buffer_size=1e3,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=buffer_batch_size)
runner.setup(mtsac, env, sampler_cls=LocalSampler)
ret = runner.train(n_epochs=8, batch_size=128, plot=False)
assert ret > 0
def test_to():
"""Test the torch function that moves modules to GPU.
Test that the policy and qfunctions are moved to gpu if gpu is
available.
"""
env_names = ['CartPole-v0', 'CartPole-v1']
task_envs = [GarageEnv(env_name=name) for name in env_names]
env = MultiEnvWrapper(task_envs, sample_strategy=round_robin_strategy)
deterministic.set_seed(0)
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=torch.nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[1, 1],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
num_tasks = 2
buffer_batch_size = 2
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=150,
max_path_length=150,
eval_env=env,
env_spec=env.spec,
num_tasks=num_tasks,
steps_per_epoch=5,
replay_buffer=replay_buffer,
min_buffer_size=1e3,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=buffer_batch_size)
set_gpu_mode(torch.cuda.is_available())
mtsac.to()
device = global_device()
for param in mtsac._qf1.parameters():
assert param.device == device
for param in mtsac._qf2.parameters():
assert param.device == device
for param in mtsac._qf2.parameters():
assert param.device == device
for param in mtsac.policy.parameters():
assert param.device == device
assert mtsac._log_alpha.device == device
@pytest.mark.mujoco
def test_fixed_alpha():
"""Test if using fixed_alpha ensures that alpha is non differentiable."""
env_names = ['InvertedDoublePendulum-v2', 'InvertedDoublePendulum-v2']
task_envs = [GarageEnv(env_name=name) for name in env_names]
env = MultiEnvWrapper(task_envs, sample_strategy=round_robin_strategy)
test_envs = MultiEnvWrapper(task_envs,
sample_strategy=round_robin_strategy)
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config=snapshot_config)
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
num_tasks = 2
buffer_batch_size = 128
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=100,
max_path_length=100,
eval_env=test_envs,
env_spec=env.spec,
num_tasks=num_tasks,
steps_per_epoch=1,
replay_buffer=replay_buffer,
min_buffer_size=1e3,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=buffer_batch_size,
fixed_alpha=np.exp(0.5))
if torch.cuda.is_available():
set_gpu_mode(True)
else:
set_gpu_mode(False)
mtsac.to()
assert torch.allclose(torch.Tensor([0.5] * num_tasks),
mtsac._log_alpha.to('cpu'))
runner.setup(mtsac, env, sampler_cls=LocalSampler)
runner.train(n_epochs=1, batch_size=128, plot=False)
assert torch.allclose(torch.Tensor([0.5] * num_tasks),
mtsac._log_alpha.to('cpu'))
assert not mtsac._use_automatic_entropy_tuning
| 11,055 | 36.993127 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_pearl.py | """This script is a test that fails when PEARL performance is too low."""
import pickle
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
from metaworld.benchmarks import ML1 # noqa: I100, I202
from garage.envs import GarageEnv, normalize, PointEnv
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import PEARL
from garage.torch.algos.pearl import PEARLWorker
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import (ContextConditionedPolicy,
TanhGaussianMLPPolicy)
from garage.torch.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config
@pytest.mark.mujoco
class TestPEARL:
"""Test class for PEARL."""
@pytest.mark.large
def test_pearl_ml1_push(self):
"""Test PEARL with ML1 Push environment."""
params = dict(seed=1,
num_epochs=1,
num_train_tasks=5,
num_test_tasks=1,
latent_size=7,
encoder_hidden_sizes=[10, 10, 10],
net_size=30,
meta_batch_size=16,
num_steps_per_epoch=40,
num_initial_steps=40,
num_tasks_sample=15,
num_steps_prior=15,
num_extra_rl_steps_posterior=15,
batch_size=256,
embedding_batch_size=8,
embedding_mini_batch_size=8,
max_path_length=50,
reward_scale=10.,
use_information_bottleneck=True,
use_next_obs_in_context=False,
use_gpu=False)
net_size = params['net_size']
set_seed(params['seed'])
env_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(ML1.get_train_tasks('push-v1'))))
env = env_sampler.sample(params['num_train_tasks'])
test_env_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(ML1.get_test_tasks('push-v1'))))
augmented_env = PEARL.augment_env_spec(env[0](), params['latent_size'])
qf = ContinuousMLPQFunction(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), params['latent_size'], 'vf')
vf = ContinuousMLPQFunction(
env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=params['num_train_tasks'],
num_test_tasks=params['num_test_tasks'],
latent_dim=params['latent_size'],
encoder_hidden_sizes=params['encoder_hidden_sizes'],
test_env_sampler=test_env_sampler,
meta_batch_size=params['meta_batch_size'],
num_steps_per_epoch=params['num_steps_per_epoch'],
num_initial_steps=params['num_initial_steps'],
num_tasks_sample=params['num_tasks_sample'],
num_steps_prior=params['num_steps_prior'],
num_extra_rl_steps_posterior=params[
'num_extra_rl_steps_posterior'],
batch_size=params['batch_size'],
embedding_batch_size=params['embedding_batch_size'],
embedding_mini_batch_size=params['embedding_mini_batch_size'],
max_path_length=params['max_path_length'],
reward_scale=params['reward_scale'],
)
set_gpu_mode(params['use_gpu'], gpu_id=0)
if params['use_gpu']:
pearl.to()
runner = LocalRunner(snapshot_config)
runner.setup(
algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
sampler_args=dict(max_path_length=params['max_path_length']),
n_workers=1,
worker_class=PEARLWorker)
runner.train(n_epochs=params['num_epochs'],
batch_size=params['batch_size'])
def test_pickling(self):
"""Test pickle and unpickle."""
net_size = 10
env_sampler = SetTaskSampler(PointEnv)
env = env_sampler.sample(5)
test_env_sampler = SetTaskSampler(PointEnv)
augmented_env = PEARL.augment_env_spec(env[0](), 5)
qf = ContinuousMLPQFunction(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), 5, 'vf')
vf = ContinuousMLPQFunction(
env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(env=env,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=5,
num_test_tasks=5,
latent_dim=5,
encoder_hidden_sizes=[10, 10],
test_env_sampler=test_env_sampler)
# This line is just to improve coverage
pearl.to()
pickled = pickle.dumps(pearl)
unpickled = pickle.loads(pickled)
assert hasattr(unpickled, '_replay_buffers')
assert hasattr(unpickled, '_context_replay_buffers')
assert unpickled._is_resuming
| 6,278 | 36.598802 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_pearl_worker.py | """This is a script to test PEARLWorker."""
import akro
import numpy as np
import torch.nn as nn
from torch.nn import functional as F # NOQA
from garage.envs import EnvSpec
from garage.envs import GarageEnv
from garage.torch.algos.pearl import PEARLWorker
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import ContextConditionedPolicy
from garage.torch.policies import TanhGaussianMLPPolicy
from tests.fixtures.envs.dummy import DummyBoxEnv
def test_methods():
"""Test PEARLWorker methods."""
env_spec = GarageEnv(DummyBoxEnv())
latent_dim = 5
latent_space = akro.Box(low=-1,
high=1,
shape=(latent_dim, ),
dtype=np.float32)
# add latent space to observation space to create a new space
augmented_obs_space = akro.Tuple(
(env_spec.observation_space, latent_space))
augmented_env_spec = EnvSpec(augmented_obs_space, env_spec.action_space)
obs_dim = int(np.prod(env_spec.observation_space.shape))
action_dim = int(np.prod(env_spec.action_space.shape))
reward_dim = 1
encoder_input_dim = obs_dim + action_dim + reward_dim
encoder_output_dim = latent_dim * 2
encoder_hidden_sizes = (3, 2, encoder_output_dim)
context_encoder = MLPEncoder(input_dim=encoder_input_dim,
output_dim=encoder_output_dim,
hidden_nonlinearity=None,
hidden_sizes=encoder_hidden_sizes,
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
policy = TanhGaussianMLPPolicy(env_spec=augmented_env_spec,
hidden_sizes=(3, 5, 7),
hidden_nonlinearity=F.relu,
output_nonlinearity=None)
context_policy = ContextConditionedPolicy(latent_dim=latent_dim,
context_encoder=context_encoder,
policy=policy,
use_information_bottleneck=True,
use_next_obs=False)
max_path_length = 20
worker1 = PEARLWorker(seed=1,
max_path_length=max_path_length,
worker_number=1)
worker1.update_agent(context_policy)
worker1.update_env(env_spec)
rollouts = worker1.rollout()
assert rollouts.observations.shape == (max_path_length, obs_dim)
assert rollouts.actions.shape == (max_path_length, action_dim)
assert rollouts.rewards.shape == (max_path_length, )
worker2 = PEARLWorker(seed=1,
max_path_length=max_path_length,
worker_number=1,
deterministic=True,
accum_context=True)
worker2.update_agent(context_policy)
worker2.update_env(env_spec)
rollouts = worker2.rollout()
assert context_policy.context.shape == (1, max_path_length,
encoder_input_dim)
assert rollouts.observations.shape == (max_path_length, obs_dim)
assert rollouts.actions.shape == (max_path_length, action_dim)
assert rollouts.rewards.shape == (max_path_length, )
| 3,383 | 40.268293 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_ppo.py | """This script creates a test that fails when PPO performance is too low."""
import gym
import pytest
import torch
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import PPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
class TestPPO:
"""Test class for PPO."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec)
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
@pytest.mark.mujoco
def test_ppo_pendulum(self):
"""Test PPO with Pendulum environment."""
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config)
algo = PPO(env_spec=self.env.spec,
policy=self.policy,
value_function=self.value_function,
max_path_length=100,
discount=0.99,
gae_lambda=0.97,
lr_clip_range=2e-1)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 0
| 1,645 | 32.591837 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_sac.py | """Module for testing SAC loss functions."""
from unittest.mock import MagicMock
import gym
import numpy as np
import pytest
import torch
from torch.nn import functional as F
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.replay_buffer import PathBuffer
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import SAC
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config
class _MockDistribution:
def __init__(self, action):
self._action = action
def rsample_with_pre_tanh_value(self, **kwargs):
del kwargs
return self._action, self._action
def rsample(self, **kwargs):
del kwargs
return self._action, self._action
def log_prob(self, value, **kwargs):
del kwargs
del value
return torch.Tensor([10.])
class DummyActorPolicy:
"""Dummy Policy Network."""
def __init__(self, action=1.):
self._action = action
def __call__(self, observation):
"""Dummy forward operation. Returns a dummy distribution."""
action = torch.Tensor([self._action])
return _MockDistribution(action), {}
def action(self, unused_observation):
"""Dummy action function. Always returns 1."""
del unused_observation
action = torch.Tensor([self._action], dtype=torch.float32)
return action
def parameters(self):
"""Mock Params function, returns all 0s."""
return torch.zeros(5)
class DummyCriticNet:
"""Mock QFunction."""
def __init__(self):
pass
def parameters(self):
"""Mock Params function, returns all 0s."""
return torch.zeros(5)
def __call__(self, observation, actions):
"""Mock Sampling function."""
# Biggest state is best state.
value = torch.max(observation, dim=-1).values
# Biggest action is best action.
q_value = torch.max(actions, axis=-1).values
ret = value + q_value
return ret
def testCriticLoss():
"""Test Sac Critic/QF loss."""
# pylint: disable=no-member
policy = DummyActorPolicy()
sac = SAC(env_spec=None,
policy=policy,
qf1=DummyCriticNet(),
qf2=DummyCriticNet(),
replay_buffer=None,
gradient_steps_per_itr=1,
discount=0.9,
buffer_batch_size=2,
target_entropy=3.0,
max_path_length=10,
optimizer=MagicMock)
observations = torch.FloatTensor([[1, 2], [3, 4]])
actions = torch.FloatTensor([[5], [6]])
rewards = torch.FloatTensor([10, 20])
terminals = torch.Tensor([[0.], [0.]])
next_observations = torch.FloatTensor([[5, 6], [7, 8]])
samples_data = {
'observation': observations,
'action': actions,
'reward': rewards,
'terminal': terminals,
'next_observation': next_observations
}
td_targets = [7.3, 19.1]
pred_td_targets = [7., 10.]
# Expected critic loss has factor of 2, for the two TD3 critics.
expected_loss = 2 * F.mse_loss(torch.Tensor(td_targets),
torch.Tensor(pred_td_targets))
loss = sac._critic_objective(samples_data)
assert np.all(np.isclose(np.sum(loss), expected_loss))
def testActorLoss():
"""Test Sac Actor/Policy loss."""
# pylint: disable=no-member
policy = DummyActorPolicy()
sac = SAC(env_spec=None,
policy=policy,
qf1=DummyCriticNet(),
qf2=DummyCriticNet(),
replay_buffer=None,
discount=1,
buffer_batch_size=2,
target_entropy=3.0,
initial_log_entropy=0,
optimizer=MagicMock,
max_path_length=10,
gradient_steps_per_itr=1)
observations = torch.Tensor([[1., 2.], [3., 4.]])
action_dists = policy(observations)[0]
actions = torch.Tensor(action_dists.rsample_with_pre_tanh_value())
samples_data = dict(observation=observations)
log_pi = action_dists.log_prob(actions)
expected_loss = (2 * 10 - (2 + 1) - (4 + 1)) / 2
loss = sac._actor_objective(samples_data, actions, log_pi)
assert np.all(np.isclose(loss, expected_loss))
def testTemperatureLoss():
"""Test Sac temperature loss."""
# pylint: disable=no-member
policy = DummyActorPolicy()
sac = SAC(env_spec=None,
policy=policy,
qf1=DummyCriticNet(),
qf2=DummyCriticNet(),
replay_buffer=None,
discount=1,
buffer_batch_size=2,
target_entropy=3.0,
initial_log_entropy=4.0,
optimizer=MagicMock,
max_path_length=10,
gradient_steps_per_itr=1)
observations = torch.Tensor([[1., 2.], [3., 4.]])
action_dists = policy(observations)[0]
actions = action_dists.rsample_with_pre_tanh_value()
log_pi = action_dists.log_prob(actions)
samples_data = dict(observation=observations, action=actions)
expected_loss = 4.0 * (-10 - 3)
loss = sac._temperature_objective(log_pi, samples_data).item()
assert np.all(np.isclose(loss, expected_loss))
@pytest.mark.mujoco
def test_sac_inverted_double_pendulum():
"""Test Sac performance on inverted pendulum."""
# pylint: disable=unexpected-keyword-arg
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
deterministic.set_seed(0)
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
runner = LocalRunner(snapshot_config=snapshot_config)
sac = SAC(env_spec=env.spec,
policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=100,
max_path_length=100,
replay_buffer=replay_buffer,
min_buffer_size=1e3,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=64,
reward_scale=1.,
steps_per_epoch=2)
runner.setup(sac, env, sampler_cls=LocalSampler)
if torch.cuda.is_available():
set_gpu_mode(True)
else:
set_gpu_mode(False)
sac.to()
ret = runner.train(n_epochs=12, batch_size=200, plot=False)
# check that automatic entropy tuning is used
assert sac._use_automatic_entropy_tuning
# assert that there was a gradient properly connected to alpha
# this doesn't verify that the path from the temperature objective is
# correct.
assert not torch.allclose(torch.Tensor([1.]), sac._log_alpha.to('cpu'))
# check that policy is learning beyond predecided threshold
assert ret > 80
@pytest.mark.mujoco
def test_fixed_alpha():
"""Test if using fixed_alpha ensures that alpha is non differentiable."""
# pylint: disable=unexpected-keyword-arg
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
deterministic.set_seed(0)
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
runner = LocalRunner(snapshot_config=snapshot_config)
sac = SAC(env_spec=env.spec,
policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=100,
max_path_length=100,
replay_buffer=replay_buffer,
min_buffer_size=100,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=64,
reward_scale=1.,
steps_per_epoch=1,
fixed_alpha=np.exp(0.5))
runner.setup(sac, env, sampler_cls=LocalSampler)
sac.to()
runner.train(n_epochs=1, batch_size=100, plot=False)
assert torch.allclose(torch.Tensor([0.5]), sac._log_alpha.cpu())
assert not sac._use_automatic_entropy_tuning
| 9,098 | 32.7 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/tests/garage/torch/algos/test_trpo.py | """This script creates a test that fails when TRPO performance is too low."""
import gym
import pytest
import torch
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import TRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
class TestTRPO:
"""Test class for TRPO."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec)
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
@pytest.mark.mujoco
def test_trpo_pendulum(self):
"""Test TRPO with Pendulum environment."""
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config)
algo = TRPO(env_spec=self.env.spec,
policy=self.policy,
value_function=self.value_function,
max_path_length=100,
discount=0.99,
gae_lambda=0.98)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 0
| 1,618 | 32.729167 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.