Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/ppo_pendulum.py | #!/usr/bin/env python3
"""This is an example to train a task with PPO algorithm.
Here it creates InvertedDoublePendulum using gym. And uses a PPO with 1M
steps.
Results:
AverageDiscountedReturn: 500
RiseTime: itr 40
"""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.policies import GaussianMLPPolicy
@wrap_experiment
def ppo_pendulum(ctxt=None, seed=1):
"""Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(
hidden_sizes=(32, 32),
use_trust_region=True,
),
)
# NOTE: make sure when setting entropy_method to 'max', set
# center_adv to False and turn off policy gradient. See
# tf.algos.NPO for detailed documentation.
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
)
runner.setup(algo, env)
runner.train(n_epochs=120, batch_size=2048, plot=False)
ppo_pendulum(seed=1)
| 2,296 | 27.358025 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/reps_gym_cartpole.py | #!/usr/bin/env python3
"""This is an example to train a task with REPS algorithm.
Here it runs gym CartPole env with 100 iterations.
Results:
AverageReturn: 100 +/- 40
RiseTime: itr 10 +/- 5
"""
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import REPS
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def reps_gym_cartpole(ctxt=None, seed=1):
"""Train REPS with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(env_spec=env.spec, hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = REPS(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=4000, plot=False)
reps_gym_cartpole()
| 1,465 | 26.660377 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/resume_training.py | #!/usr/bin/env python3
"""This is an example to resume training programmatically."""
# pylint: disable=no-value-for-parameter
import click
from garage import wrap_experiment
from garage.experiment import LocalTFRunner
@click.command()
@click.option('--saved_dir',
required=True,
help='Path where snapshots are saved.')
@wrap_experiment
def resume_experiment(ctxt, saved_dir):
"""Resume a Tensorflow experiment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
saved_dir (str): Path where snapshots are saved.
"""
with LocalTFRunner(snapshot_config=ctxt) as runner:
runner.restore(from_dir=saved_dir)
runner.resume()
resume_experiment()
| 808 | 25.966667 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/rl2_ppo_halfcheetah.py | #!/usr/bin/env python3
"""Example script to run RL2 in HalfCheetah."""
# pylint: disable=no-value-for-parameter
import click
from garage import wrap_experiment
from garage.envs.mujoco.half_cheetah_vel_env import HalfCheetahVelEnv
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2PPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.policies import GaussianGRUPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--max_path_length', default=100)
@click.option('--meta_batch_size', default=10)
@click.option('--n_epochs', default=10)
@click.option('--episode_per_task', default=4)
@wrap_experiment
def rl2_ppo_halfcheetah(ctxt, seed, max_path_length, meta_batch_size, n_epochs,
episode_per_task):
"""Train PPO with HalfCheetah environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_path_length (int): Maximum length of a single rollout.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
tasks = task_sampler.SetTaskSampler(lambda: RL2Env(
env=HalfCheetahVelEnv()))
env_spec = RL2Env(env=HalfCheetahVelEnv()).spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
algo = RL2PPO(rl2_max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=max_path_length * episode_per_task)
runner.setup(algo,
tasks.sample(meta_batch_size),
sampler_cls=LocalSampler,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_paths_per_trial=episode_per_task))
runner.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_path_length *
meta_batch_size)
rl2_ppo_halfcheetah()
| 3,326 | 37.686047 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/rl2_ppo_halfcheetah_meta_test.py | #!/usr/bin/env python3
"""Example script to run RL2PPO meta test in HalfCheetah."""
# pylint: disable=no-value-for-parameter
import click
from garage import wrap_experiment
from garage.envs.mujoco.half_cheetah_vel_env import HalfCheetahVelEnv
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.experiment.deterministic import set_seed
from garage.experiment.meta_evaluator import MetaEvaluator
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2PPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.policies import GaussianGRUPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--max_path_length', default=100)
@click.option('--meta_batch_size', default=10)
@click.option('--n_epochs', default=10)
@click.option('--episode_per_task', default=4)
@wrap_experiment
def rl2_ppo_halfcheetah_meta_test(ctxt, seed, max_path_length, meta_batch_size,
n_epochs, episode_per_task):
"""Perform meta-testing on RL2PPO with HalfCheetah environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_path_length (int): Maximum length of a single rollout.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
tasks = task_sampler.SetTaskSampler(lambda: RL2Env(
env=HalfCheetahVelEnv()))
env_spec = RL2Env(env=HalfCheetahVelEnv()).spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
meta_evaluator = MetaEvaluator(test_task_sampler=tasks,
n_exploration_traj=10,
n_test_rollouts=10,
max_path_length=max_path_length,
n_test_tasks=5)
algo = RL2PPO(rl2_max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=max_path_length * episode_per_task,
meta_evaluator=meta_evaluator,
n_epochs_per_eval=10)
runner.setup(algo,
tasks.sample(meta_batch_size),
sampler_cls=LocalSampler,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_paths_per_trial=episode_per_task))
runner.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_path_length *
meta_batch_size)
rl2_ppo_halfcheetah_meta_test()
| 3,859 | 39.631579 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/rl2_ppo_metaworld_ml10.py | #!/usr/bin/env python3
"""Example script to run RL2 in ML10."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
from garage import wrap_experiment
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2PPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.policies import GaussianGRUPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--max_path_length', default=150)
@click.option('--meta_batch_size', default=10)
@click.option('--n_epochs', default=10)
@click.option('--episode_per_task', default=10)
@wrap_experiment
def rl2_ppo_metaworld_ml10(ctxt, seed, max_path_length, meta_batch_size,
n_epochs, episode_per_task):
"""Train PPO with ML10 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_path_length (int): Maximum length of a single rollout.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
ml10_train_envs = [
RL2Env(mwb.ML10.from_task(task_name))
for task_name in mwb.ML10.get_train_tasks().all_task_names
]
tasks = task_sampler.EnvPoolSampler(ml10_train_envs)
tasks.grow_pool(meta_batch_size)
env_spec = ml10_train_envs[0].spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
algo = RL2PPO(rl2_max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=max_path_length * episode_per_task)
runner.setup(algo,
tasks.sample(meta_batch_size),
sampler_cls=LocalSampler,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_paths_per_trial=episode_per_task))
runner.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_path_length *
meta_batch_size)
rl2_ppo_metaworld_ml10()
| 3,437 | 37.2 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/rl2_ppo_metaworld_ml10_meta_test.py | #!/usr/bin/env python3
"""Example script to run RL2 in ML10 with meta-test."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
from garage import wrap_experiment
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.experiment.deterministic import set_seed
from garage.experiment.meta_evaluator import MetaEvaluator
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2PPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.policies import GaussianGRUPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--max_path_length', default=150)
@click.option('--meta_batch_size', default=10)
@click.option('--n_epochs', default=10)
@click.option('--episode_per_task', default=10)
@wrap_experiment
def rl2_ppo_metaworld_ml10_meta_test(ctxt, seed, max_path_length,
meta_batch_size, n_epochs,
episode_per_task):
"""Train PPO with ML10 environment with meta-test.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_path_length (int): Maximum length of a single rollout.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
ml10_train_envs = [
RL2Env(mwb.ML10.from_task(task_name))
for task_name in mwb.ML10.get_train_tasks().all_task_names
]
tasks = task_sampler.EnvPoolSampler(ml10_train_envs)
tasks.grow_pool(meta_batch_size)
ml10_test_envs = [
RL2Env(mwb.ML10.from_task(task_name))
for task_name in mwb.ML10.get_test_tasks().all_task_names
]
test_tasks = task_sampler.EnvPoolSampler(ml10_test_envs)
env_spec = ml10_train_envs[0].spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
meta_evaluator = MetaEvaluator(test_task_sampler=test_tasks,
n_exploration_traj=10,
n_test_rollouts=10,
max_path_length=max_path_length,
n_test_tasks=5)
algo = RL2PPO(rl2_max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=max_path_length * episode_per_task,
meta_evaluator=meta_evaluator,
n_epochs_per_eval=10)
runner.setup(algo,
tasks.sample(meta_batch_size),
sampler_cls=LocalSampler,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_paths_per_trial=episode_per_task))
runner.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_path_length *
meta_batch_size)
rl2_ppo_metaworld_ml10_meta_test()
| 4,231 | 38.924528 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/rl2_ppo_metaworld_ml1_push.py | #!/usr/bin/env python3
"""Example script to run RL2 in ML1."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
from garage import wrap_experiment
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2PPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.policies import GaussianGRUPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--max_path_length', default=150)
@click.option('--meta_batch_size', default=10)
@click.option('--n_epochs', default=10)
@click.option('--episode_per_task', default=10)
@wrap_experiment
def rl2_ppo_metaworld_ml1_push(ctxt, seed, max_path_length, meta_batch_size,
n_epochs, episode_per_task):
"""Train PPO with ML1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_path_length (int): Maximum length of a single rollout.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
tasks = task_sampler.SetTaskSampler(lambda: RL2Env(
env=mwb.ML1.get_train_tasks('push-v1')))
env_spec = RL2Env(env=mwb.ML1.get_train_tasks('push-v1')).spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
algo = RL2PPO(rl2_max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=max_path_length * episode_per_task)
runner.setup(algo,
tasks.sample(meta_batch_size),
sampler_cls=LocalSampler,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_paths_per_trial=episode_per_task))
runner.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_path_length *
meta_batch_size)
rl2_ppo_metaworld_ml1_push()
| 3,327 | 37.697674 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/rl2_ppo_metaworld_ml45.py | #!/usr/bin/env python3
"""Example script to run RL2 in ML45."""
# pylint: disable=no-value-for-parameter, wrong-import-order
import click
import metaworld.benchmarks as mwb
from garage import wrap_experiment
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2PPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.policies import GaussianGRUPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--max_path_length', default=150)
@click.option('--meta_batch_size', default=50)
@click.option('--n_epochs', default=10)
@click.option('--episode_per_task', default=10)
@wrap_experiment
def rl2_ppo_metaworld_ml45(ctxt, seed, max_path_length, meta_batch_size,
n_epochs, episode_per_task):
"""Train PPO with ML45 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_path_length (int): Maximum length of a single rollout.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
ml45_train_tasks = mwb.ML45.get_train_tasks()
ml45_train_envs = [
RL2Env(mwb.ML45.from_task(task_name))
for task_name in ml45_train_tasks.all_task_names
]
tasks = task_sampler.EnvPoolSampler(ml45_train_envs)
tasks.grow_pool(meta_batch_size)
env_spec = ml45_train_envs[0].spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
algo = RL2PPO(rl2_max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
max_path_length=max_path_length * episode_per_task)
runner.setup(algo,
tasks.sample(meta_batch_size),
sampler_cls=LocalSampler,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_paths_per_trial=episode_per_task))
runner.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_path_length *
meta_batch_size)
rl2_ppo_metaworld_ml45()
| 3,502 | 37.076087 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/rl2_trpo_halfcheetah.py | #!/usr/bin/env python3
"""Example script to run RL2 in HalfCheetah."""
# pylint: disable=no-value-for-parameter
import click
from garage import wrap_experiment
from garage.envs.mujoco.half_cheetah_vel_env import HalfCheetahVelEnv
from garage.experiment import LocalTFRunner
from garage.experiment import task_sampler
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2TRPO
from garage.tf.algos.rl2 import RL2Env
from garage.tf.algos.rl2 import RL2Worker
from garage.tf.optimizers import ConjugateGradientOptimizer
from garage.tf.optimizers import FiniteDifferenceHvp
from garage.tf.policies import GaussianGRUPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--max_path_length', default=100)
@click.option('--meta_batch_size', default=10)
@click.option('--n_epochs', default=10)
@click.option('--episode_per_task', default=4)
@wrap_experiment
def rl2_trpo_halfcheetah(ctxt, seed, max_path_length, meta_batch_size,
n_epochs, episode_per_task):
"""Train TRPO with HalfCheetah environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
max_path_length (int): Maximum length of a single rollout.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
tasks = task_sampler.SetTaskSampler(lambda: RL2Env(
env=HalfCheetahVelEnv()))
env_spec = RL2Env(env=HalfCheetahVelEnv()).spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
algo = RL2TRPO(rl2_max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
max_path_length=max_path_length * episode_per_task,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(
base_eps=1e-5)))
runner.setup(algo,
tasks.sample(meta_batch_size),
sampler_cls=LocalSampler,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_paths_per_trial=episode_per_task))
runner.train(n_epochs=n_epochs,
batch_size=episode_per_task * max_path_length *
meta_batch_size)
rl2_trpo_halfcheetah()
| 3,266 | 38.841463 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/td3_pendulum.py | #!/usr/bin/env python3
"""This is an example to train a task with TD3 algorithm.
Here, we create a gym environment InvertedDoublePendulum
and use a TD3 with 1M steps.
Results:
AverageReturn: 250
RiseTime: epoch 499
"""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.exploration_policies import AddGaussianNoise
from garage.replay_buffer import PathBuffer
from garage.tf.algos import TD3
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
@wrap_experiment(snapshot_mode='last')
def td3_pendulum(ctxt=None, seed=1):
"""Wrap TD3 training task in the run_task function.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(gym.make('InvertedDoublePendulum-v2'))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[400, 300],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddGaussianNoise(env.spec,
policy,
max_sigma=0.1,
min_sigma=0.1)
qf = ContinuousMLPQFunction(name='ContinuousMLPQFunction',
env_spec=env.spec,
hidden_sizes=[400, 300],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
qf2 = ContinuousMLPQFunction(name='ContinuousMLPQFunction2',
env_spec=env.spec,
hidden_sizes=[400, 300],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
td3 = TD3(env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
qf2=qf2,
replay_buffer=replay_buffer,
target_update_tau=1e-2,
steps_per_epoch=20,
n_train_steps=1,
smooth_return=False,
discount=0.99,
buffer_batch_size=100,
min_buffer_size=1e4,
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
runner.setup(td3, env)
runner.train(n_epochs=500, batch_size=250)
td3_pendulum(seed=1)
| 3,172 | 35.471264 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/te_ppo_metaworld_ml1_push.py | #!/usr/bin/env python3
"""This is an example to train Task Embedding PPO with PointEnv."""
# pylint: disable=no-value-for-parameter
import click
from metaworld.benchmarks import ML1
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs.multi_env_wrapper import MultiEnvWrapper
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearMultiFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import TEPPO
from garage.tf.algos.te import TaskEmbeddingWorker
from garage.tf.embeddings import GaussianMLPEncoder
from garage.tf.policies import GaussianMLPTaskEmbeddingPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--n_epochs', default=600)
@click.option('--batch_size_per_task', default=1024)
@wrap_experiment
def te_ppo_ml1_push(ctxt, seed, n_epochs, batch_size_per_task):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
"""
set_seed(seed)
envs = [GarageEnv(normalize(ML1.get_train_tasks('push-v1')))]
env = MultiEnvWrapper(envs, mode='del-onehot')
latent_length = 2
inference_window = 6
batch_size = batch_size_per_task
policy_ent_coeff = 2e-2
encoder_ent_coeff = 2e-4
inference_ce_coeff = 5e-2
max_path_length = 100
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = None
policy_min_std = None
with LocalTFRunner(snapshot_config=ctxt) as runner:
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 10),
std_share_network=True,
init_std=2.0,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
inference=inference,
max_path_length=max_path_length,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
center_adv=True,
stop_ce_gradient=True)
runner.setup(algo,
env,
sampler_cls=LocalSampler,
sampler_args=None,
worker_class=TaskEmbeddingWorker)
runner.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
te_ppo_ml1_push()
| 4,620 | 33.744361 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/te_ppo_metaworld_mt10.py | #!/usr/bin/env python3
"""This is an example to train Task Embedding PPO with PointEnv."""
# pylint: disable=no-value-for-parameter
import click
from metaworld.benchmarks import MT10
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs.multi_env_wrapper import MultiEnvWrapper
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearMultiFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import TEPPO
from garage.tf.algos.te import TaskEmbeddingWorker
from garage.tf.embeddings import GaussianMLPEncoder
from garage.tf.policies import GaussianMLPTaskEmbeddingPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--n_epochs', default=600)
@click.option('--batch_size_per_task', default=1024)
@wrap_experiment
def te_ppo_mt10(ctxt, seed, n_epochs, batch_size_per_task):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
"""
set_seed(seed)
tasks = MT10.get_train_tasks().all_task_names
envs = [normalize(GarageEnv(MT10.from_task(task))) for task in tasks]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='del-onehot')
latent_length = 4
inference_window = 6
batch_size = batch_size_per_task * len(tasks)
policy_ent_coeff = 2e-2
encoder_ent_coeff = 2e-4
inference_ce_coeff = 5e-2
max_path_length = 100
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = None
policy_min_std = None
with LocalTFRunner(snapshot_config=ctxt) as runner:
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 10),
std_share_network=True,
init_std=2.0,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
inference=inference,
max_path_length=max_path_length,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
center_adv=True,
stop_ce_gradient=True)
runner.setup(algo,
env,
sampler_cls=LocalSampler,
sampler_args=None,
worker_class=TaskEmbeddingWorker)
runner.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
te_ppo_mt10()
| 4,837 | 34.313869 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/te_ppo_metaworld_mt50.py | #!/usr/bin/env python3
"""This is an example to train Task Embedding PPO with PointEnv."""
# pylint: disable=no-value-for-parameter
import click
from metaworld.benchmarks import MT50
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs.multi_env_wrapper import MultiEnvWrapper
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearMultiFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import TEPPO
from garage.tf.algos.te import TaskEmbeddingWorker
from garage.tf.embeddings import GaussianMLPEncoder
from garage.tf.policies import GaussianMLPTaskEmbeddingPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--n_epochs', default=600)
@click.option('--batch_size_per_task', default=1024)
@wrap_experiment
def te_ppo_mt50(ctxt, seed, n_epochs, batch_size_per_task):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
"""
set_seed(seed)
tasks = MT50.get_train_tasks().all_task_names
envs = [normalize(GarageEnv(MT50.from_task(task))) for task in tasks]
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='del-onehot')
latent_length = 6
inference_window = 6
batch_size = batch_size_per_task * len(tasks)
policy_ent_coeff = 2e-2
encoder_ent_coeff = 2e-4
inference_ce_coeff = 5e-2
max_path_length = 100
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = None
policy_min_std = None
with LocalTFRunner(snapshot_config=ctxt) as runner:
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 10),
std_share_network=True,
init_std=2.0,
output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
inference=inference,
max_path_length=max_path_length,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_epochs=10,
),
center_adv=True,
stop_ce_gradient=True)
runner.setup(algo,
env,
sampler_cls=LocalSampler,
sampler_args=None,
worker_class=TaskEmbeddingWorker)
runner.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
te_ppo_mt50()
| 4,837 | 34.313869 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/te_ppo_point.py | #!/usr/bin/env python3
"""This is an example to train Task Embedding PPO with PointEnv."""
# pylint: disable=no-value-for-parameter
import click
import numpy as np
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, PointEnv
from garage.envs.multi_env_wrapper import MultiEnvWrapper
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearMultiFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import TEPPO
from garage.tf.algos.te import TaskEmbeddingWorker
from garage.tf.embeddings import GaussianMLPEncoder
from garage.tf.policies import GaussianMLPTaskEmbeddingPolicy
def circle(r, n):
"""Generate n points on a circle of radius r.
Args:
r (float): Radius of the circle.
n (int): Number of points to generate.
Yields:
tuple(float, float): Coordinate of a point.
"""
for t in np.arange(0, 2 * np.pi, 2 * np.pi / n):
yield r * np.sin(t), r * np.cos(t)
N = 4
goals = circle(3.0, N)
TASKS = {
str(i + 1): {
'args': [],
'kwargs': {
'goal': g,
'never_done': False,
'done_bonus': 10.0,
}
}
for i, g in enumerate(goals)
}
@click.command()
@click.option('--seed', default=1)
@click.option('--n_epochs', default=600)
@click.option('--batch_size_per_task', default=1024)
@wrap_experiment
def te_ppo_pointenv(ctxt, seed, n_epochs, batch_size_per_task):
"""Train Task Embedding PPO with PointEnv.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
n_epochs (int): Total number of epochs for training.
batch_size_per_task (int): Batch size of samples for each task.
"""
set_seed(seed)
tasks = TASKS
latent_length = 2
inference_window = 6
batch_size = batch_size_per_task * len(TASKS)
policy_ent_coeff = 1e-3
encoder_ent_coeff = 1e-3
inference_ce_coeff = 5e-2
max_path_length = 100
embedding_init_std = 0.1
embedding_max_std = 0.2
embedding_min_std = 1e-6
policy_init_std = 1.0
policy_max_std = 2.0
policy_min_std = None
task_names = sorted(tasks.keys())
task_args = [tasks[t]['args'] for t in task_names]
task_kwargs = [tasks[t]['kwargs'] for t in task_names]
with LocalTFRunner(snapshot_config=ctxt) as runner:
task_envs = [
GarageEnv(PointEnv(*t_args, **t_kwargs))
for t_args, t_kwargs in zip(task_args, task_kwargs)
]
env = MultiEnvWrapper(task_envs, round_robin_strategy, mode='vanilla')
task_embed_spec = TEPPO.get_encoder_spec(env.task_space,
latent_dim=latent_length)
task_encoder = GaussianMLPEncoder(
name='embedding',
embedding_spec=task_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=embedding_init_std,
max_std=embedding_max_std,
output_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
traj_embed_spec = TEPPO.get_infer_spec(
env.spec,
latent_dim=latent_length,
inference_window_size=inference_window)
inference = GaussianMLPEncoder(
name='inference',
embedding_spec=traj_embed_spec,
hidden_sizes=(20, 20),
std_share_network=True,
init_std=0.1,
output_nonlinearity=tf.nn.tanh,
std_output_nonlinearity=tf.nn.tanh,
min_std=embedding_min_std,
)
policy = GaussianMLPTaskEmbeddingPolicy(
name='policy',
env_spec=env.spec,
encoder=task_encoder,
hidden_sizes=(32, 16),
std_share_network=True,
max_std=policy_max_std,
init_std=policy_init_std,
min_std=policy_min_std,
)
baseline = LinearMultiFeatureBaseline(
env_spec=env.spec, features=['observations', 'tasks', 'latents'])
algo = TEPPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
inference=inference,
max_path_length=max_path_length,
discount=0.99,
lr_clip_range=0.2,
policy_ent_coeff=policy_ent_coeff,
encoder_ent_coeff=encoder_ent_coeff,
inference_ce_coeff=inference_ce_coeff,
use_softplus_entropy=True,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
inference_optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
center_adv=True,
stop_ce_gradient=True)
runner.setup(algo,
env,
sampler_cls=LocalSampler,
sampler_args=None,
worker_class=TaskEmbeddingWorker)
runner.train(n_epochs=n_epochs, batch_size=batch_size, plot=False)
te_ppo_pointenv()
| 5,676 | 31.44 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/trpo_cartpole.py | #!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm.
Here it runs CartPole-v1 environment with 100 iterations.
Results:
AverageReturn: 100
RiseTime: itr 13
"""
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def trpo_cartpole(ctxt=None, seed=1):
"""Train TRPO with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=4000)
trpo_cartpole()
| 1,537 | 28.576923 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/trpo_cartpole_batch_sampler.py | #!/usr/bin/env python3
"""This is an example to train a task with parallel sampling."""
import click
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.samplers import BatchSampler
@click.command()
@click.option('--batch_size', type=int, default=4000)
@click.option('--max_path_length', type=int, default=100)
@wrap_experiment
def trpo_cartpole_batch_sampler(ctxt=None,
seed=1,
batch_size=4000,
max_path_length=100):
"""Train TRPO with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
max_path_length (int): Number of timesteps to truncate paths to.
"""
set_seed(seed)
n_envs = batch_size // max_path_length
with LocalTFRunner(ctxt, max_cpus=n_envs) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=max_path_length,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo=algo,
env=env,
sampler_cls=BatchSampler,
sampler_args={'n_envs': n_envs})
runner.train(n_epochs=100, batch_size=4000, plot=False)
trpo_cartpole_batch_sampler()
| 2,140 | 34.098361 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/trpo_cartpole_recurrent.py | #!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm.
It uses an LSTM-based recurrent policy.
Here it runs CartPole-v1 environment with 100 iterations.
Results:
AverageReturn: 100
RiseTime: itr 13
"""
# pylint: disable=no-value-for-parameter
import click
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.optimizers import ConjugateGradientOptimizer
from garage.tf.optimizers import FiniteDifferenceHvp
from garage.tf.policies import CategoricalLSTMPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--n_epochs', default=100)
@click.option('--batch_size', default=4000)
@click.option('--plot', default=False)
@wrap_experiment
def trpo_cartpole_recurrent(ctxt, seed, n_epochs, batch_size, plot):
"""Train TRPO with a recurrent policy on CartPole.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
n_epochs (int): Number of epochs for training.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Batch size used for training.
plot (bool): Whether to plot or not.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalLSTMPolicy(name='policy', env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(
base_eps=1e-5)))
runner.setup(algo, env)
runner.train(n_epochs=n_epochs, batch_size=batch_size, plot=plot)
trpo_cartpole_recurrent()
| 2,240 | 31.955882 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/trpo_cubecrash.py | #!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm.
Here it runs CubeCrash-v0 environment with 100 iterations.
"""
import click
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.tf.algos import TRPO
from garage.tf.baselines import GaussianCNNBaseline
from garage.tf.policies import CategoricalCNNPolicy
@click.command()
@click.option('--batch_size', type=int, default=4000)
@wrap_experiment
def trpo_cubecrash(ctxt=None, seed=1, batch_size=4000):
"""Train TRPO with CubeCrash-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make('CubeCrash-v0')))
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32))
baseline = GaussianCNNBaseline(
env_spec=env.spec,
regressor_args=dict(filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32),
use_trust_region=True))
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
flatten_input=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=batch_size)
trpo_cubecrash()
| 2,245 | 34.09375 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/trpo_gym_tf_cartpole.py | #!/usr/bin/env python3
"""An example to train a task with TRPO algorithm."""
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def trpo_gym_tf_cartpole(ctxt=None, seed=1):
"""Train TRPO with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=200,
discount=0.99,
max_kl_step=0.01,
)
runner.setup(algo, env)
runner.train(n_epochs=120, batch_size=4000)
trpo_gym_tf_cartpole()
| 1,441 | 28.428571 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/trpo_swimmer.py | #!/usr/bin/env python3
"""An example to train a task with TRPO algorithm."""
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import GaussianMLPPolicy
@wrap_experiment
def trpo_swimmer(ctxt=None, seed=1, batch_size=4000):
"""Train TRPO with Swimmer-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(gym.make('Swimmer-v2'))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=500,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo, env)
runner.train(n_epochs=40, batch_size=batch_size)
trpo_swimmer()
| 1,425 | 30 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/trpo_swimmer_ray_sampler.py | #!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm.
Uses Ray sampler instead of on_policy vectorized
sampler.
Here it runs Swimmer-v2 environment with 40 iterations.
"""
import gym
import ray
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import RaySampler
from garage.tf.algos import TRPO
from garage.tf.policies import GaussianMLPPolicy
@wrap_experiment
def trpo_swimmer_ray_sampler(ctxt=None, seed=1):
"""tf_trpo_swimmer.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
# Since this is an example, we are running ray in a reduced state.
# One can comment this line out in order to run ray at full capacity
ray.init(memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_webui=False)
with LocalTFRunner(snapshot_config=ctxt) as runner:
set_seed(seed)
env = GarageEnv(gym.make('Swimmer-v2'))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=500,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo,
env,
sampler_cls=RaySampler,
sampler_args={'seed': seed})
runner.train(n_epochs=40, batch_size=4000)
trpo_swimmer_ray_sampler(seed=100)
| 1,969 | 30.269841 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/trpois_inverted_pendulum.py | #!/usr/bin/env python3
"""Example using TRPO with ISSampler.
Iterations alternate between live and importance sampled iterations.
"""
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import ISSampler
from garage.tf.algos import TRPO
from garage.tf.policies import GaussianMLPPolicy
@wrap_experiment
def trpois_inverted_pendulum(ctxt=None, seed=1):
"""Train TRPO on InvertedPendulum-v2 with importance sampling.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo,
env,
sampler_cls=ISSampler,
sampler_args=dict(n_backtrack=1))
runner.train(n_epochs=200, batch_size=4000)
trpois_inverted_pendulum()
| 1,617 | 30.115385 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/vpg_cartpole.py | #!/usr/bin/env python3
"""This is an example to train a task with VPG algorithm.
Here it runs CartPole-v1 environment with 100 iterations.
Results:
AverageReturn: 100
RiseTime: itr 16
"""
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import VPG
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def vpg_cartpole(ctxt=None, seed=1):
"""Train VPG with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
optimizer_args=dict(learning_rate=0.01, ))
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=10000)
vpg_cartpole()
| 1,568 | 29.173077 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/vpgis_inverted_pendulum.py | #!/usr/bin/env python3
"""Example using VPG with ISSampler.
Iterations alternate between live and importance sampled iterations.
"""
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import ISSampler
from garage.tf.algos import VPG
from garage.tf.policies import GaussianMLPPolicy
@wrap_experiment
def vpgis_inverted_pendulum(ctxt=None, seed=1):
"""Train TRPO with InvertedPendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01,
)
runner.setup(algo,
env,
sampler_cls=ISSampler,
sampler_args=dict(n_backtrack=1))
runner.train(n_epochs=40, batch_size=4000)
vpgis_inverted_pendulum()
| 1,583 | 28.333333 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/ddpg_pendulum.py | #!/usr/bin/env python3
"""This is an example to train a task with DDPG algorithm written in PyTorch.
Here it creates a gym environment InvertedDoublePendulum. And uses a DDPG with
1M steps.
"""
import gym
import torch
from torch.nn import functional as F
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import PathBuffer
from garage.torch.algos import DDPG
from garage.torch.policies import DeterministicMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@wrap_experiment(snapshot_mode='last')
def ddpg_pendulum(ctxt=None, seed=1, lr=1e-4):
"""Train DDPG with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
lr (float): Learning rate for policy optimization.
"""
set_seed(seed)
runner = LocalRunner(ctxt)
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
policy = DeterministicMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec, policy, sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
policy_optimizer = (torch.optim.Adagrad, {'lr': lr, 'lr_decay': 0.99})
ddpg = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
n_train_steps=50,
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
target_update_tau=1e-2,
discount=0.9,
policy_optimizer=policy_optimizer,
qf_optimizer=torch.optim.Adam)
runner.setup(algo=ddpg, env=env)
runner.train(n_epochs=500, batch_size=100)
ddpg_pendulum()
| 2,496 | 33.205479 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/maml_ppo_half_cheetah_dir.py | #!/usr/bin/env python3
"""This is an example to train MAML-VPG on HalfCheetahDirEnv environment."""
# pylint: disable=no-value-for-parameter
import click
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import LocalRunner, MetaEvaluator
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.torch.algos import MAMLPPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=300)
@click.option('--rollouts_per_task', default=40)
@click.option('--meta_batch_size', default=20)
@wrap_experiment(snapshot_mode='all')
def maml_ppo_half_cheetah_dir(ctxt, seed, epochs, rollouts_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
rollouts_per_task (int): Number of rollouts per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
env = GarageEnv(normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
max_path_length = 100
task_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.)))
meta_evaluator = MetaEvaluator(test_task_sampler=task_sampler,
max_path_length=max_path_length,
n_test_tasks=1,
n_test_rollouts=10)
runner = LocalRunner(ctxt)
algo = MAMLPPO(env=env,
policy=policy,
value_function=value_function,
max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
runner.setup(algo, env)
runner.train(n_epochs=epochs,
batch_size=rollouts_per_task * max_path_length)
maml_ppo_half_cheetah_dir()
| 3,034 | 36.012195 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/maml_trpo_half_cheetah_dir.py | #!/usr/bin/env python3
"""This is an example to train MAML-VPG on HalfCheetahDirEnv environment."""
# pylint: disable=no-value-for-parameter
import click
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import LocalRunner, MetaEvaluator
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.torch.algos import MAMLTRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=300)
@click.option('--rollouts_per_task', default=40)
@click.option('--meta_batch_size', default=20)
@wrap_experiment(snapshot_mode='all')
def maml_trpo_half_cheetah_dir(ctxt, seed, epochs, rollouts_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
rollouts_per_task (int): Number of rollouts per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
env = GarageEnv(normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
max_path_length = 100
task_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.)))
meta_evaluator = MetaEvaluator(test_task_sampler=task_sampler,
max_path_length=max_path_length,
n_test_tasks=1,
n_test_rollouts=10)
runner = LocalRunner(ctxt)
algo = MAMLTRPO(env=env,
policy=policy,
value_function=value_function,
max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
runner.setup(algo, env)
runner.train(n_epochs=epochs,
batch_size=rollouts_per_task * max_path_length)
maml_trpo_half_cheetah_dir()
| 3,048 | 36.182927 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/maml_trpo_metaworld_ml10.py | #!/usr/bin/env python3
"""This is an example to train MAML-TRPO on ML10 environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner, MetaEvaluator
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import EnvPoolSampler
from garage.torch.algos import MAMLTRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=300)
@click.option('--rollouts_per_task', default=10)
@click.option('--meta_batch_size', default=20)
@wrap_experiment(snapshot_mode='all')
def maml_trpo_metaworld_ml10(ctxt, seed, epochs, rollouts_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
rollouts_per_task (int): Number of rollouts per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
env = GarageEnv(
normalize(mwb.ML10.get_train_tasks(), expected_action_scale=10.))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(100, 100),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
max_path_length = 100
test_task_names = mwb.ML10.get_test_tasks().all_task_names
test_tasks = [
GarageEnv(
normalize(mwb.ML10.from_task(task), expected_action_scale=10.))
for task in test_task_names
]
test_sampler = EnvPoolSampler(test_tasks)
meta_evaluator = MetaEvaluator(test_task_sampler=test_sampler,
max_path_length=max_path_length,
n_test_tasks=len(test_task_names))
runner = LocalRunner(ctxt)
algo = MAMLTRPO(env=env,
policy=policy,
value_function=value_function,
max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
runner.setup(algo, env)
runner.train(n_epochs=epochs,
batch_size=rollouts_per_task * max_path_length)
maml_trpo_metaworld_ml10()
| 3,142 | 35.126437 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/maml_trpo_metaworld_ml1_push.py | #!/usr/bin/env python3
"""This is an example to train MAML-TRPO on ML1 Push environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner, MetaEvaluator
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.torch.algos import MAMLTRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=300)
@click.option('--rollouts_per_task', default=10)
@click.option('--meta_batch_size', default=20)
@wrap_experiment(snapshot_mode='all')
def maml_trpo_metaworld_ml1_push(ctxt, seed, epochs, rollouts_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
rollouts_per_task (int): Number of rollouts per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
env = GarageEnv(
normalize(mwb.ML1.get_train_tasks('push-v1'),
expected_action_scale=10.))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(100, 100),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
max_path_length = 100
test_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(mwb.ML1.get_test_tasks('push-v1'))))
meta_evaluator = MetaEvaluator(test_task_sampler=test_sampler,
max_path_length=max_path_length)
runner = LocalRunner(ctxt)
algo = MAMLTRPO(env=env,
policy=policy,
value_function=value_function,
max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
runner.setup(algo, env)
runner.train(n_epochs=epochs,
batch_size=rollouts_per_task * max_path_length)
maml_trpo_metaworld_ml1_push()
| 2,957 | 35.073171 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/maml_trpo_metaworld_ml45.py | #!/usr/bin/env python3
"""This is an example to train MAML-TRPO on ML45 environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner, MetaEvaluator
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import EnvPoolSampler
from garage.np.baselines import LinearFeatureBaseline
from garage.torch.algos import MAMLTRPO
from garage.torch.policies import GaussianMLPPolicy
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=300)
@click.option('--rollouts_per_task', default=10)
@click.option('--meta_batch_size', default=20)
@wrap_experiment(snapshot_mode='all')
def maml_trpo_metaworld_ml45(ctxt, seed, epochs, rollouts_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
rollouts_per_task (int): Number of rollouts per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
env = GarageEnv(
normalize(mwb.ML45.get_train_tasks(), expected_action_scale=10.))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(100, 100),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = LinearFeatureBaseline(env_spec=env.spec)
max_path_length = 100
test_task_names = mwb.ML45.get_test_tasks().all_task_names
test_tasks = [
GarageEnv(
normalize(mwb.ML45.from_task(task), expected_action_scale=10.))
for task in test_task_names
]
test_sampler = EnvPoolSampler(test_tasks)
meta_evaluator = MetaEvaluator(test_task_sampler=test_sampler,
max_path_length=max_path_length,
n_test_tasks=len(test_task_names))
runner = LocalRunner(ctxt)
algo = MAMLTRPO(env=env,
policy=policy,
value_function=value_function,
max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
runner.setup(algo, env)
runner.train(n_epochs=epochs,
batch_size=rollouts_per_task * max_path_length)
maml_trpo_metaworld_ml45()
| 2,908 | 33.630952 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/maml_vpg_half_cheetah_dir.py | #!/usr/bin/env python3
"""This is an example to train MAML-VPG on HalfCheetahDirEnv environment."""
# pylint: disable=no-value-for-parameter
import click
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import LocalRunner, MetaEvaluator
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.torch.algos import MAMLVPG
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=300)
@click.option('--rollouts_per_task', default=40)
@click.option('--meta_batch_size', default=20)
@wrap_experiment(snapshot_mode='all')
def maml_vpg_half_cheetah_dir(ctxt, seed, epochs, rollouts_per_task,
meta_batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
rollouts_per_task (int): Number of rollouts per epoch per task
for training.
meta_batch_size (int): Number of tasks sampled per batch.
"""
set_seed(seed)
env = GarageEnv(normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
max_path_length = 100
task_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.)))
meta_evaluator = MetaEvaluator(test_task_sampler=task_sampler,
max_path_length=max_path_length,
n_test_tasks=1,
n_test_rollouts=10)
runner = LocalRunner(ctxt)
algo = MAMLVPG(env=env,
policy=policy,
value_function=value_function,
max_path_length=max_path_length,
meta_batch_size=meta_batch_size,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1,
meta_evaluator=meta_evaluator)
runner.setup(algo, env)
runner.train(n_epochs=epochs,
batch_size=rollouts_per_task * max_path_length)
maml_vpg_half_cheetah_dir()
| 3,034 | 36.012195 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mtppo_metaworld_ml1_push.py | #!/usr/bin/env python3
"""This is an example to train PPO on ML1 Push environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import PPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=500)
@click.option('--batch_size', default=1024)
@wrap_experiment(snapshot_mode='all')
def mtppo_metaworld_ml1_push(ctxt, seed, epochs, batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
"""
set_seed(seed)
env = GarageEnv(normalize(mwb.ML1.get_train_tasks('push-v1')))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=128,
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2)
runner = LocalRunner(ctxt)
runner.setup(algo, env)
runner.train(n_epochs=epochs, batch_size=batch_size)
mtppo_metaworld_ml1_push()
| 2,049 | 32.064516 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mtppo_metaworld_mt10.py | #!/usr/bin/env python3
"""This is an example to train PPO on MT10 environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import psutil
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, MultiEnvWrapper, normalize
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import PPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=500)
@click.option('--batch_size', default=1024)
@click.option('--n_worker', default=psutil.cpu_count(logical=False))
@wrap_experiment(snapshot_mode='all')
def mtppo_metaworld_mt10(ctxt, seed, epochs, batch_size, n_worker):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_worker (int): The number of workers the sampler should use.
"""
set_seed(seed)
tasks = mwb.MT10.get_train_tasks().all_task_names
envs = []
for task in tasks:
envs.append(normalize(GarageEnv(mwb.MT10.from_task(task))))
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=128,
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2)
runner = LocalRunner(ctxt)
runner.setup(algo, env, n_workers=n_worker)
runner.train(n_epochs=epochs, batch_size=batch_size)
mtppo_metaworld_mt10()
| 2,630 | 34.554054 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mtppo_metaworld_mt50.py | #!/usr/bin/env python3
"""This is an example to train PPO on MT50 environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import psutil
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, MultiEnvWrapper, normalize
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import PPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=500)
@click.option('--batch_size', default=1024)
@click.option('--n_worker', default=psutil.cpu_count(logical=False))
@wrap_experiment(snapshot_mode='all')
def mtppo_metaworld_mt50(ctxt, seed, epochs, batch_size, n_worker):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_worker (int): The number of workers the sampler should use.
"""
set_seed(seed)
tasks = mwb.MT50.get_train_tasks().all_task_names
envs = []
for task in tasks:
envs.append(normalize(GarageEnv(mwb.MT50.from_task(task))))
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=128,
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2)
runner = LocalRunner(ctxt)
runner.setup(algo, env, n_workers=n_worker)
runner.train(n_epochs=epochs, batch_size=batch_size)
mtppo_metaworld_mt50()
| 2,630 | 34.554054 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mtsac_metaworld_ml1_pick_place.py | #!/usr/bin/env python3
"""MTSAC implementation based on Metaworld. Benchmarked on ML1.
This experiment shows how MTSAC adapts to 50 environents of the same type
but each environment has a goal variation.
https://arxiv.org/pdf/1910.10897.pdf
"""
import pickle
import click
import metaworld.benchmarks as mwb
import numpy as np
from torch import nn
from torch.nn import functional as F
from garage import wrap_experiment
from garage.envs import GarageEnv, MultiEnvWrapper, normalize
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import deterministic, LocalRunner
from garage.replay_buffer import PathBuffer
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import MTSAC
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--seed', 'seed', type=int, default=1)
@click.option('--gpu', '_gpu', type=int, default=None)
@wrap_experiment(snapshot_mode='none')
def mtsac_metaworld_ml1_pick_place(ctxt=None, seed=1, _gpu=None):
"""Train MTSAC with the ML1 pick-place-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
_gpu (int): The ID of the gpu to be used (used on multi-gpu machines).
"""
deterministic.set_seed(seed)
runner = LocalRunner(ctxt)
train_envs = []
test_envs = []
env_names = []
for i in range(50):
train_env = GarageEnv(
normalize(mwb.ML1.get_train_tasks('pick-place-v1'),
normalize_reward=True))
test_env = pickle.loads(pickle.dumps(train_env))
env_names.append('pick_place_{}'.format(i))
train_envs.append(train_env)
test_envs.append(test_env)
ml1_train_envs = MultiEnvWrapper(train_envs,
sample_strategy=round_robin_strategy,
env_names=env_names)
ml1_test_envs = MultiEnvWrapper(test_envs,
sample_strategy=round_robin_strategy,
env_names=env_names)
policy = TanhGaussianMLPPolicy(
env_spec=ml1_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=ml1_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=ml1_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
timesteps = 10000000
batch_size = int(150 * ml1_train_envs.num_tasks)
num_evaluation_points = 500
epochs = timesteps // batch_size
epoch_cycles = epochs // num_evaluation_points
epochs = epochs // epoch_cycles
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=150,
max_path_length=150,
max_eval_path_length=150,
eval_env=ml1_test_envs,
env_spec=ml1_train_envs.spec,
num_tasks=50,
steps_per_epoch=epoch_cycles,
replay_buffer=replay_buffer,
min_buffer_size=1500,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=1280)
if _gpu is not None:
set_gpu_mode(True, _gpu)
mtsac.to()
runner.setup(algo=mtsac,
env=ml1_train_envs,
sampler_cls=LocalSampler,
n_workers=1)
runner.train(n_epochs=epochs, batch_size=batch_size)
mtsac_metaworld_ml1_pick_place()
| 4,154 | 35.769912 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mtsac_metaworld_mt10.py | #!/usr/bin/env python3
"""MTSAC implementation based on Metaworld. Benchmarked on MT10.
https://arxiv.org/pdf/1910.10897.pdf
"""
import click
import metaworld.benchmarks as mwb
import numpy as np
from torch import nn
from torch.nn import functional as F
from garage import wrap_experiment
from garage.envs import GarageEnv, MultiEnvWrapper, normalize
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import deterministic, LocalRunner
from garage.replay_buffer import PathBuffer
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import MTSAC
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--seed', 'seed', type=int, default=1)
@click.option('--gpu', '_gpu', type=int, default=None)
@wrap_experiment(snapshot_mode='none')
def mtsac_metaworld_mt10(ctxt=None, seed=1, _gpu=None):
"""Train MTSAC with MT10 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
_gpu (int): The ID of the gpu to be used (used on multi-gpu machines).
"""
deterministic.set_seed(seed)
runner = LocalRunner(ctxt)
task_names = mwb.MT10.get_train_tasks().all_task_names
train_envs = []
test_envs = []
for task_name in task_names:
train_env = normalize(GarageEnv(mwb.MT10.from_task(task_name)),
normalize_reward=True)
test_env = normalize(GarageEnv(mwb.MT10.from_task(task_name)))
train_envs.append(train_env)
test_envs.append(test_env)
mt10_train_envs = MultiEnvWrapper(train_envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
mt10_test_envs = MultiEnvWrapper(test_envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = TanhGaussianMLPPolicy(
env_spec=mt10_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=mt10_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=mt10_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
timesteps = int(20e6)
batch_size = int(150 * mt10_train_envs.num_tasks)
epochs = 250
epoch_cycles = timesteps // (epochs * batch_size)
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=150,
max_path_length=150,
max_eval_path_length=150,
eval_env=mt10_test_envs,
env_spec=mt10_train_envs.spec,
num_tasks=10,
steps_per_epoch=epoch_cycles,
replay_buffer=replay_buffer,
min_buffer_size=1500,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=1280)
if _gpu is not None:
set_gpu_mode(True, _gpu)
mtsac.to()
runner.setup(algo=mtsac,
env=mt10_train_envs,
sampler_cls=LocalSampler,
n_workers=1)
runner.train(n_epochs=epochs, batch_size=batch_size)
mtsac_metaworld_mt10()
| 3,899 | 36.5 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mtsac_metaworld_mt50.py | #!/usr/bin/env python3
"""MTSAC implementation based on Metaworld. Benchmarked on MT50.
https://arxiv.org/pdf/1910.10897.pdf
"""
import click
import metaworld.benchmarks as mwb
import numpy as np
from torch import nn
from torch.nn import functional as F
from garage import wrap_experiment
from garage.envs import GarageEnv, MultiEnvWrapper, normalize
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import deterministic, LocalRunner
from garage.replay_buffer import PathBuffer
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import MTSAC
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--seed', 'seed', type=int, default=1)
@click.option('--use_gpu', 'use_gpu', type=bool, default=False)
@click.option('--gpu', '_gpu', type=int, default=0)
@wrap_experiment(snapshot_mode='none')
def mtsac_metaworld_mt50(ctxt=None, seed=1, use_gpu=False, _gpu=0):
"""Train MTSAC with MT50 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
use_gpu (bool): Used to enable ussage of GPU in training.
_gpu (int): The ID of the gpu (used on multi-gpu machines).
"""
deterministic.set_seed(seed)
runner = LocalRunner(ctxt)
task_names = mwb.MT50.get_train_tasks().all_task_names
train_envs = []
test_envs = []
for task_name in task_names:
train_env = normalize(GarageEnv(mwb.MT50.from_task(task_name)),
normalize_reward=True)
test_env = normalize(GarageEnv(mwb.MT50.from_task(task_name)))
train_envs.append(train_env)
test_envs.append(test_env)
mt50_train_envs = MultiEnvWrapper(train_envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
mt50_test_envs = MultiEnvWrapper(test_envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = TanhGaussianMLPPolicy(
env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=mt50_train_envs.spec,
hidden_sizes=[400, 400, 400],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6), )
timesteps = 100000000
batch_size = int(150 * mt50_train_envs.num_tasks)
num_evaluation_points = 500
epochs = timesteps // batch_size
epoch_cycles = epochs // num_evaluation_points
epochs = epochs // epoch_cycles
mtsac = MTSAC(policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=150,
max_path_length=150,
max_eval_path_length=150,
eval_env=mt50_test_envs,
env_spec=mt50_train_envs.spec,
num_tasks=50,
steps_per_epoch=epoch_cycles,
replay_buffer=replay_buffer,
min_buffer_size=7500,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=6400)
set_gpu_mode(use_gpu, _gpu)
mtsac.to()
runner.setup(algo=mtsac,
env=mt50_train_envs,
sampler_cls=LocalSampler,
n_workers=1)
runner.train(n_epochs=epochs, batch_size=batch_size)
mtsac_metaworld_mt50()
| 4,087 | 36.851852 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mttrpo_metaworld_ml1_push.py | #!/usr/bin/env python3
"""This is an example to train TRPO on ML1 Push environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import TRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=500)
@click.option('--batch_size', default=1024)
@wrap_experiment(snapshot_mode='all')
def mttrpo_metaworld_ml1_push(ctxt, seed, epochs, batch_size):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
"""
set_seed(seed)
env = GarageEnv(normalize(mwb.ML1.get_train_tasks('push-v1')))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=128,
discount=0.99,
gae_lambda=0.95)
runner = LocalRunner(ctxt)
runner.setup(algo, env)
runner.train(n_epochs=epochs, batch_size=batch_size)
mttrpo_metaworld_ml1_push()
| 2,093 | 32.774194 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mttrpo_metaworld_mt10.py | #!/usr/bin/env python3
"""This is an example to train TRPO on MT10 environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import psutil
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, MultiEnvWrapper, normalize
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import TRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=500)
@click.option('--batch_size', default=1024)
@click.option('--n_worker', default=psutil.cpu_count(logical=False))
@wrap_experiment(snapshot_mode='all')
def mttrpo_metaworld_mt10(ctxt, seed, epochs, batch_size, n_worker):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_worker (int): The number of workers the sampler should use.
"""
set_seed(seed)
tasks = mwb.MT10.get_train_tasks().all_task_names
envs = []
for task in tasks:
envs.append(normalize(GarageEnv(mwb.MT10.from_task(task))))
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=128,
discount=0.99,
gae_lambda=0.95)
runner = LocalRunner(ctxt)
runner.setup(algo, env, n_workers=n_worker)
runner.train(n_epochs=epochs, batch_size=batch_size)
mttrpo_metaworld_mt10()
| 2,574 | 34.763889 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/mttrpo_metaworld_mt50.py | #!/usr/bin/env python3
"""This is an example to train TRPO on MT50 environment."""
# pylint: disable=no-value-for-parameter
import click
import metaworld.benchmarks as mwb
import psutil
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, MultiEnvWrapper, normalize
from garage.envs.multi_env_wrapper import round_robin_strategy
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import TRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@click.command()
@click.option('--seed', default=1)
@click.option('--epochs', default=500)
@click.option('--batch_size', default=1024)
@click.option('--n_worker', default=psutil.cpu_count(logical=False))
@wrap_experiment(snapshot_mode='all')
def mttrpo_metaworld_mt50(ctxt, seed, epochs, batch_size, n_worker):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
epochs (int): Number of training epochs.
batch_size (int): Number of environment steps in one batch.
n_worker (int): The number of workers the sampler should use.
"""
set_seed(seed)
tasks = mwb.MT50.get_train_tasks().all_task_names
envs = []
for task in tasks:
envs.append(normalize(GarageEnv(mwb.MT50.from_task(task))))
env = MultiEnvWrapper(envs,
sample_strategy=round_robin_strategy,
mode='vanilla')
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=128,
discount=0.99,
gae_lambda=0.95)
runner = LocalRunner(ctxt)
runner.setup(algo, env, n_workers=n_worker)
runner.train(n_epochs=epochs, batch_size=batch_size)
mttrpo_metaworld_mt50()
| 2,574 | 34.763889 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/pearl_half_cheetah_vel.py | #!/usr/bin/env python3
"""PEARL HalfCheetahVel example."""
import click
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs.mujoco import HalfCheetahVelEnv
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import PEARL
from garage.torch.algos.pearl import PEARLWorker
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import ContextConditionedPolicy
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--num_epochs', default=500)
@click.option('--num_train_tasks', default=100)
@click.option('--num_test_tasks', default=30)
@click.option('--encoder_hidden_size', default=200)
@click.option('--net_size', default=300)
@click.option('--num_steps_per_epoch', default=2000)
@click.option('--num_initial_steps', default=2000)
@click.option('--num_steps_prior', default=400)
@click.option('--num_extra_rl_steps_posterior', default=600)
@click.option('--batch_size', default=256)
@click.option('--embedding_batch_size', default=100)
@click.option('--embedding_mini_batch_size', default=100)
@click.option('--max_path_length', default=200)
@wrap_experiment
def pearl_half_cheetah_vel(ctxt=None,
seed=1,
num_epochs=500,
num_train_tasks=100,
num_test_tasks=30,
latent_size=5,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=2000,
num_initial_steps=2000,
num_tasks_sample=5,
num_steps_prior=400,
num_extra_rl_steps_posterior=600,
batch_size=256,
embedding_batch_size=100,
embedding_mini_batch_size=100,
max_path_length=200,
reward_scale=5.,
use_gpu=False):
"""Train PEARL with HalfCheetahVel environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks for testing.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_path_length (int): Maximum path length.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
# create multi-task environment and sample tasks
env_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(HalfCheetahVelEnv())))
env = env_sampler.sample(num_train_tasks)
test_env_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(HalfCheetahVelEnv())))
runner = LocalRunner(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=num_train_tasks,
num_test_tasks=num_test_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
max_path_length=max_path_length,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
sampler_args=dict(max_path_length=max_path_length),
n_workers=1,
worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size)
pearl_half_cheetah_vel()
| 6,501 | 40.679487 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/pearl_metaworld_ml10.py | #!/usr/bin/env python3
"""PEARL ML10 example."""
import click
import metaworld.benchmarks as mwb
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import EnvPoolSampler
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import PEARL
from garage.torch.algos.pearl import PEARLWorker
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import ContextConditionedPolicy
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--num_epochs', default=1000)
@click.option('--num_train_tasks', default=10)
@click.option('--num_test_tasks', default=5)
@click.option('--encoder_hidden_size', default=200)
@click.option('--net_size', default=300)
@click.option('--num_steps_per_epoch', default=4000)
@click.option('--num_initial_steps', default=4000)
@click.option('--num_steps_prior', default=750)
@click.option('--num_extra_rl_steps_posterior', default=750)
@click.option('--batch_size', default=256)
@click.option('--embedding_batch_size', default=64)
@click.option('--embedding_mini_batch_size', default=64)
@click.option('--max_path_length', default=150)
@wrap_experiment
def pearl_metaworld_ml10(ctxt=None,
seed=1,
num_epochs=1000,
num_train_tasks=10,
num_test_tasks=5,
latent_size=7,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=4000,
num_initial_steps=4000,
num_tasks_sample=15,
num_steps_prior=750,
num_extra_rl_steps_posterior=750,
batch_size=256,
embedding_batch_size=64,
embedding_mini_batch_size=64,
max_path_length=150,
reward_scale=10.,
use_gpu=False):
"""Train PEARL with ML10 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks for testing.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_path_length (int): Maximum path length.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
# create multi-task environment and sample tasks
ML_train_envs = [
GarageEnv(normalize(mwb.ML10.from_task(task_name)))
for task_name in mwb.ML10.get_train_tasks().all_task_names
]
ML_test_envs = [
GarageEnv(normalize(mwb.ML10.from_task(task_name)))
for task_name in mwb.ML10.get_test_tasks().all_task_names
]
env_sampler = EnvPoolSampler(ML_train_envs)
env_sampler.grow_pool(num_train_tasks)
env = env_sampler.sample(num_train_tasks)
test_env_sampler = EnvPoolSampler(ML_test_envs)
test_env_sampler.grow_pool(num_test_tasks)
runner = LocalRunner(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=num_train_tasks,
num_test_tasks=num_test_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
max_path_length=max_path_length,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
sampler_args=dict(max_path_length=max_path_length),
n_workers=1,
worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size)
pearl_metaworld_ml10()
| 6,732 | 39.317365 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/pearl_metaworld_ml1_push.py | #!/usr/bin/env python3
"""PEARL ML1 example."""
import click
import metaworld.benchmarks as mwb
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import SetTaskSampler
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import PEARL
from garage.torch.algos.pearl import PEARLWorker
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import ContextConditionedPolicy
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--num_epochs', default=1000)
@click.option('--num_train_tasks', default=50)
@click.option('--num_test_tasks', default=10)
@click.option('--encoder_hidden_size', default=200)
@click.option('--net_size', default=300)
@click.option('--num_steps_per_epoch', default=4000)
@click.option('--num_initial_steps', default=4000)
@click.option('--num_steps_prior', default=750)
@click.option('--num_extra_rl_steps_posterior', default=750)
@click.option('--batch_size', default=256)
@click.option('--embedding_batch_size', default=64)
@click.option('--embedding_mini_batch_size', default=64)
@click.option('--max_path_length', default=150)
@wrap_experiment
def pearl_metaworld_ml1_push(ctxt=None,
seed=1,
num_epochs=1000,
num_train_tasks=50,
num_test_tasks=10,
latent_size=7,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=4000,
num_initial_steps=4000,
num_tasks_sample=15,
num_steps_prior=750,
num_extra_rl_steps_posterior=750,
batch_size=256,
embedding_batch_size=64,
embedding_mini_batch_size=64,
max_path_length=150,
reward_scale=10.,
use_gpu=False):
"""Train PEARL with ML1 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks for testing.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_path_length (int): Maximum path length.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
# create multi-task environment and sample tasks
env_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(mwb.ML1.get_train_tasks('push-v1'))))
env = env_sampler.sample(num_train_tasks)
test_env_sampler = SetTaskSampler(lambda: GarageEnv(
normalize(mwb.ML1.get_test_tasks('push-v1'))))
runner = LocalRunner(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=num_train_tasks,
num_test_tasks=num_test_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
max_path_length=max_path_length,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
sampler_args=dict(max_path_length=max_path_length),
n_workers=1,
worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size)
pearl_metaworld_ml1_push()
| 6,536 | 40.636943 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/pearl_metaworld_ml45.py | #!/usr/bin/env python3
"""PEARL ML45 example."""
import click
import metaworld.benchmarks as mwb
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.experiment.task_sampler import EnvPoolSampler
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import PEARL
from garage.torch.algos.pearl import PEARLWorker
from garage.torch.embeddings import MLPEncoder
from garage.torch.policies import ContextConditionedPolicy
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--num_epochs', default=1000)
@click.option('--num_train_tasks', default=45)
@click.option('--num_test_tasks', default=5)
@click.option('--encoder_hidden_size', default=200)
@click.option('--net_size', default=300)
@click.option('--num_steps_per_epoch', default=4000)
@click.option('--num_initial_steps', default=4000)
@click.option('--num_steps_prior', default=750)
@click.option('--num_extra_rl_steps_posterior', default=750)
@click.option('--batch_size', default=256)
@click.option('--embedding_batch_size', default=64)
@click.option('--embedding_mini_batch_size', default=64)
@click.option('--max_path_length', default=150)
@wrap_experiment
def pearl_metaworld_ml45(ctxt=None,
seed=1,
num_epochs=1000,
num_train_tasks=45,
num_test_tasks=5,
latent_size=7,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=4000,
num_initial_steps=4000,
num_tasks_sample=15,
num_steps_prior=750,
num_extra_rl_steps_posterior=750,
batch_size=256,
embedding_batch_size=64,
embedding_mini_batch_size=64,
max_path_length=150,
reward_scale=10.,
use_gpu=False):
"""Train PEARL with ML45 environments.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks for testing.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_path_length (int): Maximum path length.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
# create multi-task environment and sample tasks
ml45_train_envs = [
GarageEnv(normalize(mwb.ML45.from_task(task_name)))
for task_name in mwb.ML45.get_train_tasks().all_task_names
]
ml45_test_envs = [
GarageEnv(normalize(mwb.ML45.from_task(task_name)))
for task_name in mwb.ML45.get_test_tasks().all_task_names
]
env_sampler = EnvPoolSampler(ml45_train_envs)
env_sampler.grow_pool(num_train_tasks)
env = env_sampler.sample(num_train_tasks)
test_env_sampler = EnvPoolSampler(ml45_test_envs)
test_env_sampler.grow_pool(num_test_tasks)
runner = LocalRunner(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=num_train_tasks,
num_test_tasks=num_test_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
max_path_length=max_path_length,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
sampler_args=dict(max_path_length=max_path_length),
n_workers=1,
worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size)
pearl_metaworld_ml45()
| 6,740 | 39.365269 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/ppo_pendulum.py | #!/usr/bin/env python3
"""This is an example to train a task with PPO algorithm (PyTorch).
Here it runs InvertedDoublePendulum-v2 environment with 100 iterations.
"""
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import PPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@wrap_experiment
def ppo_pendulum(ctxt=None, seed=1):
"""Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
env = GarageEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=100,
discount=0.99,
center_adv=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=10000)
ppo_pendulum(seed=1)
| 1,794 | 31.636364 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/resume_training.py | #!/usr/bin/env python3
"""This is an example to resume training programmatically."""
# pylint: disable=no-value-for-parameter
import click
from garage import wrap_experiment
from garage.experiment import LocalRunner
@click.command()
@click.option('--saved_dir',
required=True,
help='Path where snapshots are saved.')
@wrap_experiment
def resume_experiment(ctxt, saved_dir):
"""Resume a PyTorch experiment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
saved_dir (str): Path where snapshots are saved.
"""
runner = LocalRunner(snapshot_config=ctxt)
runner.restore(from_dir=saved_dir)
runner.resume()
resume_experiment()
| 786 | 25.233333 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/sac_half_cheetah_batch.py | #!/usr/bin/env python3
"""This is an example to train a task with SAC algorithm written in PyTorch."""
import gym
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.replay_buffer import PathBuffer
from garage.sampler import LocalSampler
from garage.torch import set_gpu_mode
from garage.torch.algos import SAC
from garage.torch.policies import TanhGaussianMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
@wrap_experiment(snapshot_mode='none')
def sac_half_cheetah_batch(ctxt=None, seed=1):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
deterministic.set_seed(seed)
runner = LocalRunner(snapshot_config=ctxt)
env = GarageEnv(normalize(gym.make('HalfCheetah-v2')))
policy = TanhGaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=nn.ReLU,
output_nonlinearity=None,
min_std=np.exp(-20.),
max_std=np.exp(2.),
)
qf1 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu)
qf2 = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[256, 256],
hidden_nonlinearity=F.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
sac = SAC(env_spec=env.spec,
policy=policy,
qf1=qf1,
qf2=qf2,
gradient_steps_per_itr=1000,
max_path_length=1000,
max_eval_path_length=1000,
replay_buffer=replay_buffer,
min_buffer_size=1e4,
target_update_tau=5e-3,
discount=0.99,
buffer_batch_size=256,
reward_scale=1.,
steps_per_epoch=1)
if torch.cuda.is_available():
set_gpu_mode(True)
else:
set_gpu_mode(False)
sac.to()
runner.setup(algo=sac, env=env, sampler_cls=LocalSampler)
runner.train(n_epochs=1000, batch_size=1000)
s = np.random.randint(0, 1000)
sac_half_cheetah_batch(seed=521)
| 2,564 | 31.0625 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/trpo_pendulum.py | #!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm (PyTorch).
Here it runs InvertedDoublePendulum-v2 environment with 100 iterations.
"""
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import TRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@wrap_experiment
def trpo_pendulum(ctxt=None, seed=1):
"""Train TRPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
env = GarageEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=100,
discount=0.99,
center_adv=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=1024)
trpo_pendulum(seed=1)
| 1,804 | 31.818182 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/trpo_pendulum_ray_sampler.py | #!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm (PyTorch).
Uses Ray sampler instead of OnPolicyVectorizedSampler.
Here it runs InvertedDoublePendulum-v2 environment with 100 iterations.
"""
import numpy as np
import ray
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import deterministic, LocalRunner
from garage.sampler import RaySampler
from garage.torch.algos import TRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@wrap_experiment(snapshot_mode='none')
def trpo_pendulum_ray_sampler(ctxt=None, seed=1):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
# Since this is an example, we are running ray in a reduced state.
# One can comment this line out in order to run ray at full capacity
ray.init(memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_webui=False)
deterministic.set_seed(seed)
env = GarageEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=100,
discount=0.99,
center_adv=False)
runner.setup(algo, env, sampler_cls=RaySampler)
runner.train(n_epochs=100, batch_size=1024)
s = np.random.randint(0, 1000)
trpo_pendulum_ray_sampler(seed=s)
| 2,324 | 34.227273 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/torch/vpg_pendulum.py | #!/usr/bin/env python3
"""This is an example to train a task with VPG algorithm (PyTorch).
Here it runs InvertedDoublePendulum-v2 environment with 100 iterations.
Results:
AverageReturn: 450 - 650
"""
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalRunner
from garage.experiment.deterministic import set_seed
from garage.torch.algos import VPG
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@wrap_experiment
def vpg_pendulum(ctxt=None, seed=1):
"""Train PPO with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
env = GarageEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = VPG(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=100,
discount=0.99,
center_adv=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=10000)
vpg_pendulum()
| 1,827 | 30.517241 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/__init__.py | """Garage Base."""
from garage._dtypes import InOutSpec
from garage._dtypes import TimeStep
from garage._dtypes import TimeStepBatch
from garage._dtypes import TrajectoryBatch
from garage._functions import _Default
from garage._functions import log_multitask_performance
from garage._functions import log_performance
from garage._functions import make_optimizer
from garage.experiment.experiment import wrap_experiment
__all__ = [
'_Default', 'make_optimizer', 'wrap_experiment', 'TimeStep',
'TrajectoryBatch', 'log_multitask_performance', 'log_performance',
'InOutSpec', 'TimeStepBatch'
]
| 603 | 34.529412 | 70 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/_dtypes.py | """Data types for agent-based learning."""
import collections
import akro
import numpy as np
from garage.misc import tensor_utils
class TrajectoryBatch(
collections.namedtuple('TrajectoryBatch', [
'env_spec',
'observations',
'last_observations',
'actions',
'rewards',
'terminals',
'env_infos',
'agent_infos',
'lengths',
])):
# pylint: disable=missing-return-doc, missing-return-type-doc, missing-param-doc, missing-type-doc # noqa: E501
r"""A tuple representing a batch of whole trajectories.
Data type for on-policy algorithms.
A :class:`TrajectoryBatch` represents a batch of whole trajectories
produced when one or more agents interacts with one or more environments.
+-----------------------+-------------------------------------------------+
| Symbol | Description |
+=======================+=================================================+
| :math:`N` | Trajectory index dimension |
+-----------------------+-------------------------------------------------+
| :math:`[T]` | Variable-length time dimension of each |
| | trajectory |
+-----------------------+-------------------------------------------------+
| :math:`S^*` | Single-step shape of a time-series tensor |
+-----------------------+-------------------------------------------------+
| :math:`N \bullet [T]` | A dimension computed by flattening a |
| | variable-length time dimension :math:`[T]` into |
| | a single batch dimension with length |
| | :math:`sum_{i \in N} [T]_i` |
+-----------------------+-------------------------------------------------+
Attributes:
env_spec (garage.envs.EnvSpec): Specification for the environment from
which this data was sampled.
observations (numpy.ndarray): A numpy array of shape
:math:`(N \bullet [T], O^*)` containing the (possibly
multi-dimensional) observations for all time steps in this batch.
These must conform to :obj:`env_spec.observation_space`.
last_observations (numpy.ndarray): A numpy array of shape
:math:`(N, O^*)` containing the last observation of each
trajectory. This is necessary since there are one more
observations than actions every trajectory.
actions (numpy.ndarray): A numpy array of shape
:math:`(N \bullet [T], A^*)` containing the (possibly
multi-dimensional) actions for all time steps in this batch. These
must conform to :obj:`env_spec.action_space`.
rewards (numpy.ndarray): A numpy array of shape
:math:`(N \bullet [T])` containing the rewards for all time steps
in this batch.
terminals (numpy.ndarray): A boolean numpy array of shape
:math:`(N \bullet [T])` containing the termination signals for all
time steps in this batch.
env_infos (dict): A dict of numpy arrays arbitrary environment state
information. Each value of this dict should be a numpy array of
shape :math:`(N \bullet [T])` or :math:`(N \bullet [T], S^*)`.
agent_infos (numpy.ndarray): A dict of numpy arrays arbitrary agent
state information. Each value of this dict should be a numpy array
of shape :math:`(N \bullet [T])` or :math:`(N \bullet [T], S^*)`.
For example, this may contain the hidden states from an RNN policy.
lengths (numpy.ndarray): An integer numpy array of shape :math:`(N,)`
containing the length of each trajectory in this batch. This may be
used to reconstruct the individual trajectories.
Raises:
ValueError: If any of the above attributes do not conform to their
prescribed types and shapes.
"""
__slots__ = ()
def __new__(cls, env_spec, observations, last_observations, actions,
rewards, terminals, env_infos, agent_infos,
lengths): # noqa: D102
# pylint: disable=too-many-branches
first_observation = observations[0]
first_action = actions[0]
inferred_batch_size = lengths.sum()
# lengths
if len(lengths.shape) != 1:
raise ValueError(
'Lengths tensor must be a tensor of shape (N,), but got a '
'tensor of shape {} instead'.format(lengths.shape))
if not (lengths.dtype.kind == 'u' or lengths.dtype.kind == 'i'):
raise ValueError(
'Lengths tensor must have an integer dtype, but got dtype {} '
'instead.'.format(lengths.dtype))
# observations
if not env_spec.observation_space.contains(first_observation):
# Discrete actions can be either in the space normally, or one-hot
# encoded.
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
first_observation.shape):
raise ValueError('observations should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
first_observation.shape))
else:
raise ValueError(
'observations must conform to observation_space {}, but '
'got data with shape {} instead.'.format(
env_spec.observation_space, first_observation))
if observations.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of observations to be length {}, '
'but got length {} instead.'.format(inferred_batch_size,
observations.shape[0]))
# observations
if not env_spec.observation_space.contains(last_observations[0]):
# Discrete actions can be either in the space normally, or one-hot
# encoded.
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
last_observations[0].shape):
raise ValueError('last_observations should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
last_observations[0].shape))
else:
raise ValueError(
'last_observations must conform to observation_space {}, '
'but got data with shape {} instead.'.format(
env_spec.observation_space, last_observations[0]))
if last_observations.shape[0] != len(lengths):
raise ValueError(
'Expected batch dimension of last_observations to be length '
'{}, but got length {} instead.'.format(
len(lengths), last_observations.shape[0]))
# actions
if not env_spec.action_space.contains(first_action):
# Discrete actions can be either in the space normally, or one-hot
# encoded.
if isinstance(env_spec.action_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.action_space.flat_dim != np.prod(
first_action.shape):
raise ValueError('actions should have the same '
'dimensionality as the action_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.action_space.flat_dim,
first_action.shape))
else:
raise ValueError(
'actions must conform to action_space {}, but got data '
'with shape {} instead.'.format(env_spec.action_space,
first_action))
if actions.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of actions to be length {}, but got '
'length {} instead.'.format(inferred_batch_size,
actions.shape[0]))
# rewards
if rewards.shape != (inferred_batch_size, ):
raise ValueError(
'Rewards tensor must have shape {}, but got shape {} '
'instead.'.format(inferred_batch_size, rewards.shape))
# terminals
if terminals.shape != (inferred_batch_size, ):
raise ValueError(
'terminals tensor must have shape {}, but got shape {} '
'instead.'.format(inferred_batch_size, terminals.shape))
if terminals.dtype != np.bool:
raise ValueError(
'terminals tensor must be dtype np.bool, but got tensor '
'of dtype {} instead.'.format(terminals.dtype))
# env_infos
for key, val in env_infos.items():
if not isinstance(val, (dict, np.ndarray)):
raise ValueError(
'Each entry in env_infos must be a numpy array or '
'dictionary, but got key {} with value type {} instead.'.
format(key, type(val)))
if (isinstance(val, np.ndarray)
and val.shape[0] != inferred_batch_size):
if not (val.shape[0] == len(lengths) and sum([len(v) for v in val]) == inferred_batch_size):
raise ValueError(
'Each entry in env_infos must have a batch dimension of '
'length {}, but got key {} with batch size {} instead.'.
format(inferred_batch_size, key, val.shape[0]))
# agent_infos
for key, val in agent_infos.items():
if not isinstance(val, (dict, np.ndarray)):
raise ValueError(
'Each entry in agent_infos must be a numpy array or '
'dictionary, but got key {} with value type {} instead.'
'instead'.format(key, type(val)))
if (isinstance(val, np.ndarray)
and val.shape[0] != inferred_batch_size):
raise ValueError(
'Each entry in agent_infos must have a batch dimension of '
'length {}, but got key {} with batch size {} instead.'.
format(inferred_batch_size, key, val.shape[0]))
return super().__new__(TrajectoryBatch, env_spec, observations,
last_observations, actions, rewards, terminals,
env_infos, agent_infos, lengths)
@classmethod
def concatenate(cls, *batches):
"""Create a TrajectoryBatch by concatenating TrajectoryBatches.
Args:
batches (list[TrajectoryBatch]): Batches to concatenate.
Returns:
TrajectoryBatch: The concatenation of the batches.
"""
if __debug__:
for b in batches:
assert (set(b.env_infos.keys()) == set(
batches[0].env_infos.keys()))
assert (set(b.agent_infos.keys()) == set(
batches[0].agent_infos.keys()))
def _concatenate_env_info(x):
if not isinstance(x[0], np.ndarray):
return np.concatenate(x)
all_ndims = set([i.ndim for i in x])
if len(all_ndims) != 1 or len(set([i.shape[1:] for i in x])) != 1:
#assert 1 in all_ndims
#assert np.object in [i.dtype for i in x]
res = np.empty(sum(len(i) for i in x), np.object)
idx = 0
for i in x:
for j in i:
res[idx] = j
idx += 1
return res
return np.concatenate(x)
env_infos = {
#k: np.concatenate([b.env_infos[k] for b in batches])
k: _concatenate_env_info([b.env_infos[k] for b in batches])
for k in batches[0].env_infos.keys()
}
agent_infos = {
k: np.concatenate([b.agent_infos[k] for b in batches])
for k in batches[0].agent_infos.keys()
}
return cls(
batches[0].env_spec,
np.concatenate([batch.observations for batch in batches]),
np.concatenate([batch.last_observations for batch in batches]),
np.concatenate([batch.actions for batch in batches]),
np.concatenate([batch.rewards for batch in batches]),
np.concatenate([batch.terminals for batch in batches]), env_infos,
agent_infos, np.concatenate([batch.lengths for batch in batches]))
def split(self):
"""Split a TrajectoryBatch into a list of TrajectoryBatches.
The opposite of concatenate.
Returns:
list[TrajectoryBatch]: A list of TrajectoryBatches, with one
trajectory per batch.
"""
trajectories = []
start = 0
for i, length in enumerate(self.lengths):
stop = start + length
traj = TrajectoryBatch(env_spec=self.env_spec,
observations=self.observations[start:stop],
last_observations=np.asarray(
[self.last_observations[i]]),
actions=self.actions[start:stop],
rewards=self.rewards[start:stop],
terminals=self.terminals[start:stop],
env_infos=tensor_utils.slice_nested_dict(
self.env_infos, start, stop),
agent_infos=tensor_utils.slice_nested_dict(
self.agent_infos, start, stop),
lengths=np.asarray([length]))
trajectories.append(traj)
start = stop
return trajectories
def to_trajectory_list(self):
"""Convert the batch into a list of dictionaries.
Returns:
list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys:
* observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*) (the unflattened state
space of the current environment). observations[i] was
used by the agent to choose actions[i].
* next_observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*). next_observations[i] was
observed by the agent after taking actions[i].
* actions (np.ndarray): Non-flattened array of actions. Should
have shape (T, S^*) (the unflattened action space of the
current environment).
* rewards (np.ndarray): Array of rewards of shape (T,) (1D
array of length timesteps).
* dones (np.ndarray): Array of dones of shape (T,) (1D array
of length timesteps).
* agent_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `env_info` arrays.
"""
start = 0
trajectories = []
for i, length in enumerate(self.lengths):
stop = start + length
trajectories.append({
'observations':
self.observations[start:stop],
'next_observations':
np.concatenate((self.observations[1 + start:stop],
[self.last_observations[i]])),
'actions':
self.actions[start:stop],
'rewards':
self.rewards[start:stop],
'env_infos':
{k: v[start:stop]
for (k, v) in self.env_infos.items()},
'agent_infos':
{k: v[start:stop]
for (k, v) in self.agent_infos.items()},
'dones':
self.terminals[start:stop]
})
start = stop
return trajectories
@classmethod
def from_trajectory_list(cls, env_spec, paths):
"""Create a TrajectoryBatch from a list of trajectories.
Args:
env_spec (garage.envs.EnvSpec): Specification for the environment
from which this data was sampled.
paths (list[dict[str, np.ndarray or dict[str, np.ndarray]]]): Keys:
* observations (np.ndarray): Non-flattened array of
observations. Typically has shape (T, S^*) (the unflattened
state space of the current environment). observations[i]
was used by the agent to choose actions[i]. observations
may instead have shape (T + 1, S^*).
* next_observations (np.ndarray): Non-flattened array of
observations. Has shape (T, S^*). next_observations[i] was
observed by the agent after taking actions[i]. Optional.
Note that to ensure all information from the environment
was preserved, observations[i] should have shape (T + 1,
S^*), or this key should be set. However, this method is
lenient and will "duplicate" the last observation if the
original last observation has been lost.
* actions (np.ndarray): Non-flattened array of actions. Should
have shape (T, S^*) (the unflattened action space of the
current environment).
* rewards (np.ndarray): Array of rewards of shape (T,) (1D
array of length timesteps).
* dones (np.ndarray): Array of rewards of shape (T,) (1D array
of length timesteps).
* agent_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `agent_info` arrays.
* env_infos (dict[str, np.ndarray]): Dictionary of stacked,
non-flattened `env_info` arrays.
"""
lengths = np.asarray([len(p['rewards']) for p in paths])
if all(
len(path['observations']) == length + 1
for (path, length) in zip(paths, lengths)):
last_observations = np.asarray(
[p['observations'][-1] for p in paths])
observations = np.concatenate(
[p['observations'][:-1] for p in paths])
else:
# The number of observations and timesteps must match.
observations = np.concatenate([p['observations'] for p in paths])
if paths[0].get('next_observations') is not None:
last_observations = np.asarray(
[p['next_observations'][-1] for p in paths])
else:
last_observations = np.asarray(
[p['observations'][-1] for p in paths])
stacked_paths = tensor_utils.concat_tensor_dict_list(paths)
return cls(env_spec=env_spec,
observations=observations,
last_observations=last_observations,
actions=stacked_paths['actions'],
rewards=stacked_paths['rewards'],
terminals=stacked_paths['dones'],
env_infos=stacked_paths['env_infos'],
agent_infos=stacked_paths['agent_infos'],
lengths=lengths)
class TimeStep(
collections.namedtuple('TimeStep', [
'env_spec',
'observation',
'action',
'reward',
'next_observation',
'terminal',
'env_info',
'agent_info',
])):
# pylint: disable=missing-return-doc, missing-return-type-doc, missing-param-doc, missing-type-doc # noqa: E501
r"""A tuple representing a single TimeStep.
A :class:`TimeStep` represents a single sample when an agent interacts with
an environment.
Attributes:
env_spec (garage.envs.EnvSpec): Specification for the environment from
which this data was sampled.
observation (numpy.ndarray): A numpy array of shape :math:`(O^*)`
containing the observation for the this time step in the
environment. These must conform to
:obj:`env_spec.observation_space`.
action (numpy.ndarray): A numpy array of shape :math:`(A^*)`
containing the action for the this time step. These must conform
to :obj:`env_spec.action_space`.
reward (float): A float representing the reward for taking the action
given the observation, at the this time step.
terminals (bool): The termination signal for the this time step.
env_info (dict): A dict arbitrary environment state information.
agent_info (numpy.ndarray): A dict of arbitrary agent
state information. For example, this may contain the hidden states
from an RNN policy.
Raises:
ValueError: If any of the above attributes do not conform to their
prescribed types and shapes.
"""
def __new__(cls, env_spec, observation, action, reward, next_observation,
terminal, env_info, agent_info): # noqa: D102
# pylint: disable=too-many-branches
# observation
if not env_spec.observation_space.contains(observation):
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
observation.shape):
raise ValueError('observation should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
observation.shape))
else:
raise ValueError(
'observation must conform to observation_space {}, '
'but got data with shape {} instead.'.format(
env_spec.observation_space, observation))
if not env_spec.observation_space.contains(next_observation):
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
next_observation.shape):
raise ValueError('next_observation should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
next_observation.shape))
else:
raise ValueError(
'next_observation must conform to observation_space {}, '
'but got data with shape {} instead.'.format(
env_spec.observation_space, next_observation))
# action
if not env_spec.action_space.contains(action):
if isinstance(env_spec.action_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.action_space.flat_dim != np.prod(action.shape):
raise ValueError('action should have the same '
'dimensionality as the action_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.action_space.flat_dim,
action.shape))
else:
raise ValueError('action must conform to action_space {}, '
'but got data with shape {} instead.'.format(
env_spec.action_space, action))
if not isinstance(agent_info, dict):
raise ValueError('agent_info must be type {}, but got type {} '
'instead.'.format(dict, type(agent_info)))
if not isinstance(env_info, dict):
raise ValueError('env_info must be type {}, but got type {} '
'instead.'.format(dict, type(env_info)))
# rewards
if not isinstance(reward, float):
raise ValueError('reward must be type {}, but got type {} '
'instead.'.format(float, type(reward)))
if not isinstance(terminal, bool):
raise ValueError(
'terminal must be dtype bool, but got dtype {} instead.'.
format(type(terminal)))
return super().__new__(TimeStep, env_spec, observation, action, reward,
next_observation, terminal, env_info,
agent_info)
class InOutSpec:
"""Describes the input and output spaces of a primitive or module.
Args:
input_space (akro.Space): Input space of a module.
output_space (akro.Space): Output space of a module.
"""
def __init__(self, input_space, output_space):
self._input_space = input_space
self._output_space = output_space
@property
def input_space(self):
"""Get input space of the module.
Returns:
akro.Space: Input space of the module.
"""
return self._input_space
@property
def output_space(self):
"""Get output space of the module.
Returns:
akro.Space: Output space of the module.
"""
return self._output_space
class TimeStepBatch(
collections.namedtuple('TimeStepBatch', [
'env_spec',
'observations',
'actions',
'rewards',
'next_observations',
'terminals',
'env_infos',
'agent_infos',
])):
# pylint: disable=missing-param-doc, missing-type-doc
"""A tuple representing a batch of TimeSteps.
Data type for off-policy algorithms, imitation learning and batch-RL.
Attributes:
env_spec (garage.envs.EnvSpec): Specification for the environment from
which this data was sampled.
observations (numpy.ndarray): Non-flattened array of observations.
Typically has shape (batch_size, S^*) (the unflattened state space
of the current environment).
actions (numpy.ndarray): Non-flattened array of actions. Should
have shape (batch_size, S^*) (the unflattened action space of the
current environment).
rewards (numpy.ndarray): Array of rewards of shape (batch_size,) (1D
array of length batch_size).
next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*). next_observations[i] was
observed by the agent after taking actions[i].
terminals (numpy.ndarray): A boolean numpy array of shape
shape (batch_size,) containing the termination signals for all
transitions in this batch.
env_infos (dict): A dict arbitrary environment state
information.
agent_infos (dict): A dict of arbitrary agent state information. For
example, this may contain the hidden states from an RNN policy.
Raises:
ValueError: If any of the above attributes do not conform to their
prescribed types and shapes.
"""
__slots__ = ()
def __new__(cls, env_spec, observations, actions, rewards,
next_observations, terminals, env_infos,
agent_infos): # noqa: D102
# pylint: disable=missing-return-doc, missing-return-type-doc,
# pylint: disable=too-many-branches
inferred_batch_size = len(terminals)
if inferred_batch_size < 1:
raise ValueError(
'Expected batch dimension of terminals to be greater than 1, '
'but got length {} instead.'.format(inferred_batch_size))
first_observation = observations[0]
first_action = actions[0]
# observation
if not env_spec.observation_space.contains(first_observation):
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
first_observation.shape):
raise ValueError('observations should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
first_observation.shape))
else:
raise ValueError(
'observations must conform to observation_space {}, '
'but got data with shape {} instead.'.format(
env_spec.observation_space, first_observation.shape))
if observations.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of observations to be length {}, '
'but got length {} instead.'.format(inferred_batch_size,
observations.shape[0]))
# next_observation
if not env_spec.observation_space.contains(next_observations[0]):
if isinstance(env_spec.observation_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.observation_space.flat_dim != np.prod(
next_observations[0].shape):
raise ValueError('next_observations should have the same '
'dimensionality as the observation_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.observation_space.flat_dim,
next_observations[0].shape))
else:
raise ValueError(
'next_observations must conform to observation_space {}, '
'but got data with shape {} instead.'.format(
env_spec.observation_space,
next_observations[0].shape[0]))
if next_observations.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of next_observations to be length {'
'}, but got length {} instead.'.format(
inferred_batch_size, next_observations[0].shape[0]))
# action
if not env_spec.action_space.contains(first_action):
if isinstance(env_spec.action_space,
(akro.Box, akro.Discrete, akro.Dict)):
if env_spec.action_space.flat_dim != np.prod(
first_action.shape):
raise ValueError('actions should have the same '
'dimensionality as the action_space '
'({}), but got data with shape {} '
'instead'.format(
env_spec.action_space.flat_dim,
first_action.shape))
else:
raise ValueError('actions must conform to action_space {}, '
'but got data with shape {} instead.'.format(
env_spec.action_space,
first_action.shape))
if actions.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of actions to be length {}, but got '
'length {} instead.'.format(inferred_batch_size,
actions.shape[0]))
# rewards
if rewards.shape[0] != inferred_batch_size:
raise ValueError(
'Expected batch dimension of rewards to be length {}, but got '
'length {} instead.'.format(inferred_batch_size,
rewards.shape[0]))
# terminals
if terminals.dtype != np.bool:
raise ValueError(
'terminals tensor must be dtype np.bool, but got tensor '
'of dtype {} instead.'.format(terminals.dtype))
# env_infos
for key, val in env_infos.items():
if not isinstance(val, (dict, np.ndarray)):
raise ValueError(
'Each entry in env_infos must be a numpy array or '
'dictionary, but got key {} with value type {} '
'instead.'.format(key, type(val)))
if (isinstance(val, np.ndarray)
and val.shape[0] != inferred_batch_size):
raise ValueError(
'Each entry in env_infos must have a batch dimension '
'of '
'length {}, but got key {} with batch size {} instead.'.
format(inferred_batch_size, key, val.shape[0]))
# agent_infos
for key, val in agent_infos.items():
if not isinstance(val, (dict, np.ndarray)):
raise ValueError(
'Each entry in agent_infos must be a numpy array or '
'dictionary, but got key {} with value type {} instead.'
'instead'.format(key, type(val)))
if (isinstance(val, np.ndarray)
and val.shape[0] != inferred_batch_size):
raise ValueError(
'Each entry in agent_infos must have a batch '
'dimension of '
'length {}, but got key {} with batch size {} instead.'.
format(inferred_batch_size, key, val.shape[0]))
return super().__new__(TimeStepBatch, env_spec, observations, actions,
rewards, next_observations, terminals,
env_infos, agent_infos)
@classmethod
def concatenate(cls, *batches):
"""Create a TimeStepBatch by concatenating TimeStepBatches.
Args:
batches (list[TimeStepBatch]): Batches to concatenate.
Returns:
TimeStepBatch: The concatenation of the batches.
Raises:
ValueError: If no TimeStepBatches are provided.
"""
if len(batches) < 1:
raise ValueError('Please provide at least one TimeStepBatch to '
'concatenate')
env_infos = {
k: np.concatenate([b.env_infos[k] for b in batches])
for k in batches[0].env_infos.keys()
}
agent_infos = {
k: np.concatenate([b.agent_infos[k] for b in batches])
for k in batches[0].agent_infos.keys()
}
return cls(
batches[0].env_spec,
np.concatenate([batch.observations for batch in batches]),
np.concatenate([batch.actions for batch in batches]),
np.concatenate([batch.rewards for batch in batches]),
np.concatenate([batch.next_observations for batch in batches]),
np.concatenate([batch.terminals for batch in batches]), env_infos,
agent_infos)
def split(self):
"""Split a TimeStepBatch into a list of TimeStepBatches.
The opposite of concatenate.
Returns:
list[TimeStepBatch]: A list of TimeStepBatches, with one
TimeStep per TimeStepBatch.
"""
time_steps = []
for i in range(len(self.terminals)):
time_step = TimeStepBatch(
env_spec=self.env_spec,
observations=np.asarray([self.observations[i]]),
actions=np.asarray([self.actions[i]]),
rewards=np.asarray([self.rewards[i]]),
next_observations=np.asarray([self.next_observations[i]]),
terminals=np.asarray([self.terminals[i]]),
env_infos={
k: np.asarray([v[i]])
for (k, v) in self.env_infos.items()
},
agent_infos={
k: np.asarray([v[i]])
for (k, v) in self.agent_infos.items()
},
)
time_steps.append(time_step)
return time_steps
def to_time_step_list(self):
"""Convert the batch into a list of dictionaries.
This breaks the TimeStepBatch object into a list of single
time step sample dictionaries. len(terminals) (or the number of
discrete time step) dictionaries are returned
Returns:
list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys:
observations (numpy.ndarray): Non-flattened array of
observations.
Typically has shape (batch_size, S^*) (the unflattened
state space
of the current environment).
actions (numpy.ndarray): Non-flattened array of actions. Should
have shape (batch_size, S^*) (the unflattened action
space of the
current environment).
rewards (numpy.ndarray): Array of rewards of shape (
batch_size,) (1D array of length batch_size).
next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*).
next_observations[i] was
observed by the agent after taking actions[i].
terminals (numpy.ndarray): A boolean numpy array of shape
shape (batch_size,) containing the termination signals
for all
transitions in this batch.
env_infos (dict): A dict arbitrary environment state
information.
agent_infos (dict): A dict of arbitrary agent state
information. For example, this may contain the
hidden states from an RNN policy.
"""
samples = []
for i in range(len(self.terminals)):
samples.append({
'observations':
np.asarray([self.observations[i]]),
'actions':
np.asarray([self.actions[i]]),
'rewards':
np.asarray([self.rewards[i]]),
'next_observations':
np.asarray([self.next_observations[i]]),
'terminals':
np.asarray([self.terminals[i]]),
'env_infos':
{k: np.asarray([v[i]])
for (k, v) in self.env_infos.items()},
'agent_infos':
{k: np.asarray([v[i]])
for (k, v) in self.agent_infos.items()},
})
return samples
@classmethod
def from_time_step_list(cls, env_spec, ts_samples):
"""Create a TimeStepBatch from a list of time step dictionaries.
Args:
env_spec (garage.envs.EnvSpec): Specification for the environment
from which this data was sampled.
ts_samples (list[dict[str, np.ndarray or dict[str, np.ndarray]]]):
keys:
* observations (numpy.ndarray): Non-flattened array of
observations.
Typically has shape (batch_size, S^*) (the unflattened
state space of the current environment).
* actions (numpy.ndarray): Non-flattened array of actions.
Should have shape (batch_size, S^*) (the unflattened action
space of the current environment).
* rewards (numpy.ndarray): Array of rewards of shape (
batch_size,) (1D array of length batch_size).
* next_observation (numpy.ndarray): Non-flattened array of next
observations. Has shape (batch_size, S^*).
next_observations[i] was observed by the agent after
taking actions[i].
* terminals (numpy.ndarray): A boolean numpy array of shape
shape (batch_size,) containing the termination signals
for all transitions in this batch.
* env_infos (dict): A dict arbitrary environment state
information.
* agent_infos (dict): A dict of arbitrary agent
state information. For example, this may contain the
hidden states from an RNN policy.
Returns:
TimeStepBatch: The concatenation of samples.
Raises:
ValueError: If no dicts are provided.
"""
if len(ts_samples) < 1:
raise ValueError('Please provide at least one dict')
ts_batches = [
TimeStepBatch(env_spec=env_spec,
observations=sample['observations'],
actions=sample['actions'],
rewards=sample['rewards'],
next_observations=sample['next_observations'],
terminals=sample['terminals'],
env_infos=sample['env_infos'],
agent_infos=sample['agent_infos'])
for sample in ts_samples
]
return TimeStepBatch.concatenate(*ts_batches)
| 43,670 | 45.458511 | 116 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/_functions.py | """Functions exposed directly in the garage namespace."""
from collections import defaultdict
from dowel import tabular
import numpy as np
import garage
from garage.misc.tensor_utils import discount_cumsum
class _Default: # pylint: disable=too-few-public-methods
"""A wrapper class to represent default arguments.
Args:
val (object): Argument value.
"""
def __init__(self, val):
self.val = val
def make_optimizer(optimizer_type, module=None, **kwargs):
"""Create an optimizer for pyTorch & tensorflow algos.
Args:
optimizer_type (Union[type, tuple[type, dict]]): Type of optimizer.
This can be an optimizer type such as 'torch.optim.Adam' or a
tuple of type and dictionary, where dictionary contains arguments
to initialize the optimizer e.g. (torch.optim.Adam, {'lr' : 1e-3})
module (optional): If the optimizer type is a `torch.optimizer`.
The `torch.nn.Module` module whose parameters needs to be optimized
must be specify.
kwargs (dict): Other keyword arguments to initialize optimizer. This
is not used when `optimizer_type` is tuple.
Returns:
torch.optim.Optimizer: Constructed optimizer.
Raises:
ValueError: Raises value error when `optimizer_type` is tuple, and
non-default argument is passed in `kwargs`.
"""
if isinstance(optimizer_type, tuple):
opt_type, opt_args = optimizer_type
for name, arg in kwargs.items():
if not isinstance(arg, _Default):
raise ValueError('Should not specify {} and explicit \
optimizer args at the same time'.format(name))
if module is not None:
return opt_type(module.parameters(), **opt_args)
else:
return opt_type(**opt_args)
opt_args = {
k: v.val if isinstance(v, _Default) else v
for k, v in kwargs.items()
}
if module is not None:
return optimizer_type(module.parameters(), **opt_args)
else:
return optimizer_type(**opt_args)
def log_multitask_performance(itr, batch, discount, name_map=None):
r"""Log performance of trajectories from multiple tasks.
Args:
itr (int): Iteration number to be logged.
batch (garage.TrajectoryBatch): Batch of trajectories. The trajectories
should have either the "task_name" or "task_id" `env_infos`. If the
"task_name" is not present, then `name_map` is required, and should
map from task id's to task names.
discount (float): Discount used in computing returns.
name_map (dict[int, str] or None): Mapping from task id's to task
names. Optional if the "task_name" environment info is present.
Note that if provided, all tasks listed in this map will be logged,
even if there are no trajectories present for them.
Returns:
numpy.ndarray: Undiscounted returns averaged across all tasks. Has
shape :math:`(N \bullet [T])`.
"""
traj_by_name = defaultdict(list)
for trajectory in batch.split():
task_name = '__unnamed_task__'
if 'task_name' in trajectory.env_infos:
task_name = trajectory.env_infos['task_name'][0]
elif 'task_id' in trajectory.env_infos:
name_map = {} if name_map is None else name_map
task_id = trajectory.env_infos['task_id'][0]
task_name = name_map.get(task_id, 'Task #{}'.format(task_id))
traj_by_name[task_name].append(trajectory)
if name_map is None:
task_names = traj_by_name.keys()
else:
task_names = name_map.values()
for task_name in task_names:
if task_name in traj_by_name:
trajectories = traj_by_name[task_name]
log_performance(itr,
garage.TrajectoryBatch.concatenate(*trajectories),
discount,
prefix=task_name)
else:
with tabular.prefix(task_name + '/'):
tabular.record('Iteration', itr)
tabular.record('NumTrajs', 0)
tabular.record('AverageDiscountedReturn', np.nan)
tabular.record('AverageReturn', np.nan)
tabular.record('StdReturn', np.nan)
tabular.record('MaxReturn', np.nan)
tabular.record('MinReturn', np.nan)
tabular.record('CompletionRate', np.nan)
tabular.record('SuccessRate', np.nan)
return log_performance(itr, batch, discount=discount, prefix='Average')
def log_performance(itr, batch, discount, prefix='Evaluation'):
"""Evaluate the performance of an algorithm on a batch of trajectories.
Args:
itr (int): Iteration number.
batch (TrajectoryBatch): The trajectories to evaluate with.
discount (float): Discount value, from algorithm's property.
prefix (str): Prefix to add to all logged keys.
Returns:
numpy.ndarray: Undiscounted returns.
"""
returns = []
undiscounted_returns = []
completion = []
success = []
for trajectory in batch.split():
returns.append(discount_cumsum(trajectory.rewards, discount))
undiscounted_returns.append(sum(trajectory.rewards))
completion.append(float(trajectory.terminals.any()))
if 'success' in trajectory.env_infos:
success.append(float(trajectory.env_infos['success'].any()))
average_discounted_return = np.mean([rtn[0] for rtn in returns])
with tabular.prefix(prefix + '/'):
tabular.record('Iteration', itr)
tabular.record('NumTrajs', len(returns))
tabular.record('AverageDiscountedReturn', average_discounted_return)
tabular.record('AverageReturn', np.mean(undiscounted_returns))
tabular.record('StdReturn', np.std(undiscounted_returns))
tabular.record('MaxReturn', np.max(undiscounted_returns))
tabular.record('MinReturn', np.min(undiscounted_returns))
tabular.record('CompletionRate', np.mean(completion))
if success:
tabular.record('SuccessRate', np.mean(success))
return undiscounted_returns
| 6,267 | 37.691358 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/__init__.py | """Garage wrappers for gym environments."""
from garage.envs.env_spec import EnvSpec
from garage.envs.garage_env import GarageEnv
from garage.envs.grid_world_env import GridWorldEnv
from garage.envs.multi_env_wrapper import MultiEnvWrapper
from garage.envs.normalized_env import normalize
from garage.envs.point_env import PointEnv
from garage.envs.step import Step
from garage.envs.task_onehot_wrapper import TaskOnehotWrapper
__all__ = [
'GarageEnv',
'Step',
'EnvSpec',
'GridWorldEnv',
'MultiEnvWrapper',
'normalize',
'PointEnv',
'TaskOnehotWrapper',
]
| 589 | 25.818182 | 61 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/env_spec.py | """EnvSpec class."""
from garage import InOutSpec
class EnvSpec(InOutSpec):
"""Describes the action and observation spaces of an environment.
Args:
observation_space (akro.Space): The observation space of the env.
action_space (akro.Space): The action space of the env.
"""
def __init__(self, observation_space, action_space):
super().__init__(action_space, observation_space)
@property
def action_space(self):
"""Get action space.
Returns:
akro.Space: Action space of the env.
"""
return self.input_space
@property
def observation_space(self):
"""Get observation space of the env.
Returns:
akro.Space: Observation space.
"""
return self.output_space
@action_space.setter
def action_space(self, action_space):
"""Set action space of the env.
Args:
action_space (akro.Space): Action space.
"""
self._input_space = action_space
@observation_space.setter
def observation_space(self, observation_space):
"""Set observation space of the env.
Args:
observation_space (akro.Space): Observation space.
"""
self._output_space = observation_space
def __eq__(self, other):
"""See :meth:`object.__eq__`.
Args:
other (EnvSpec): :class:`~EnvSpec` to compare with.
Returns:
bool: Whether these :class:`~EnvSpec` instances are equal.
"""
return (self.observation_space == other.observation_space
and self.action_space == other.action_space)
| 1,679 | 23 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/garage_env.py | """Wrapper class that converts gym.Env into GarageEnv."""
import copy
import akro
import gym
from garage.envs.env_spec import EnvSpec
# The gym environments using one of the packages in the following lists as
# entry points don't close their viewer windows.
KNOWN_GYM_NOT_CLOSE_VIEWER = [
# Please keep alphabetized
'gym.envs.atari',
'gym.envs.box2d',
'gym.envs.classic_control'
]
KNOWN_GYM_NOT_CLOSE_MJ_VIEWER = [
# Please keep alphabetized
'gym.envs.mujoco',
'gym.envs.robotics'
]
class GarageEnv(gym.Wrapper):
"""Returns an abstract Garage wrapper class for gym.Env.
In order to provide pickling (serialization) and parameterization
for gym.Envs, they must be wrapped with a GarageEnv. This ensures
compatibility with existing samplers and checkpointing when the
envs are passed internally around garage.
Furthermore, classes inheriting from GarageEnv should silently
convert action_space and observation_space from gym.Spaces to
akro.spaces.
Args:
env (gym.Env): An env that will be wrapped
env_name (str): If the env_name is speficied, a gym environment
with that name will be created. If such an environment does not
exist, a `gym.error` is thrown.
is_image (bool): True if observations contain pixel values,
false otherwise. Setting this to true converts a gym.Spaces.Box
obs space to an akro.Image and normalizes pixel values.
"""
def __init__(self, env=None, env_name='', is_image=False):
# Needed for deserialization
self._env_name = env_name
self._env = env
if env_name:
super().__init__(gym.make(env_name))
else:
super().__init__(env)
self.action_space = akro.from_gym(self.env.action_space)
self.observation_space = akro.from_gym(self.env.observation_space,
is_image=is_image)
self.__spec = EnvSpec(action_space=self.action_space,
observation_space=self.observation_space)
@property
def spec(self):
"""Return the environment specification.
This property needs to exist, since it's defined as a property in
gym.Wrapper in a way that makes it difficult to overwrite.
Returns:
garage.envs.env_spec.EnvSpec: The envionrment specification.
"""
return self.__spec
def close(self):
"""Close the wrapped env."""
self._close_viewer_window()
self.env.close()
def _close_viewer_window(self):
"""Close viewer window.
Unfortunately, some gym environments don't close the viewer windows
properly, which leads to "out of memory" issues when several of
these environments are tested one after the other.
This method searches for the viewer object of type MjViewer, Viewer
or SimpleImageViewer, based on environment, and if the environment
is wrapped in other environment classes, it performs depth search
in those as well.
This method can be removed once OpenAI solves the issue.
"""
# We need to do some strange things here to fix-up flaws in gym
# pylint: disable=import-outside-toplevel
if self.env.spec:
if any(package in getattr(self.env.spec, 'entry_point', '')
for package in KNOWN_GYM_NOT_CLOSE_MJ_VIEWER):
# This import is not in the header to avoid a MuJoCo dependency
# with non-MuJoCo environments that use this base class.
try:
from mujoco_py.mjviewer import MjViewer
import glfw
except ImportError:
# If we can't import mujoco_py, we must not have an
# instance of a class that we know how to close here.
return
if (hasattr(self.env, 'viewer')
and isinstance(self.env.viewer, MjViewer)):
glfw.destroy_window(self.env.viewer.window)
elif any(package in getattr(self.env.spec, 'entry_point', '')
for package in KNOWN_GYM_NOT_CLOSE_VIEWER):
if hasattr(self.env, 'viewer'):
from gym.envs.classic_control.rendering import (
Viewer, SimpleImageViewer)
if (isinstance(self.env.viewer,
(SimpleImageViewer, Viewer))):
self.env.viewer.close()
def reset(self, **kwargs):
"""Call reset on wrapped env.
This method is necessary to suppress a deprecated warning
thrown by gym.Wrapper.
Args:
kwargs: Keyword args
Returns:
object: The initial observation.
"""
return self.env.reset(**kwargs)
def step(self, action):
"""Call step on wrapped env.
This method is necessary to suppress a deprecated warning
thrown by gym.Wrapper.
Args:
action (object): An action provided by the agent.
Returns:
object: Agent's observation of the current environment
float : Amount of reward returned after previous action
bool : Whether the episode has ended, in which case further step()
calls will return undefined results
dict: Contains auxiliary diagnostic information (helpful for
debugging, and sometimes learning)
"""
observation, reward, done, info = self.env.step(action)
# gym envs that are wrapped in TimeLimit wrapper modify
# the done/termination signal to be true whenever a time
# limit expiration occurs. The following statement sets
# the done signal to be True only if caused by an
# environment termination, and not a time limit
# termination. The time limit termination signal
# will be saved inside env_infos as
# 'GarageEnv.TimeLimitTerminated'
if 'TimeLimit.truncated' in info:
info['GarageEnv.TimeLimitTerminated'] = info['TimeLimit.truncated']
if info['TimeLimit.truncated']:
done = False
else:
info['TimeLimit.truncated'] = False
info['GarageEnv.TimeLimitTerminated'] = False
return observation, reward, done, info
def __getstate__(self):
"""See `Object.__getstate__.
Returns:
dict: The instance’s dictionary to be pickled.
"""
# the viewer object is not pickleable
# we first make a copy of the viewer
env = self.env
# get the inner env if it is a gym.Wrapper
if issubclass(env.__class__, gym.Wrapper):
env = env.unwrapped
if 'viewer' in env.__dict__:
_viewer = env.viewer
# remove the viewer and make a copy of the state
env.viewer = None
state = copy.deepcopy(self.__dict__)
# assign the viewer back to self.__dict__
env.viewer = _viewer
# the returned state doesn't have the viewer
return state
return self.__dict__
def __setstate__(self, state):
"""See `Object.__setstate__.
Args:
state (dict): Unpickled state of this object.
"""
self.__init__(state['_env'], state['_env_name'])
| 7,503 | 35.784314 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/grid_world_env.py | import gym
import numpy as np
from garage.envs.step import Step
MAPS = {
'chain': ['GFFFFFFFFFFFFFSFFFFFFFFFFFFFG'],
'4x4_safe': [
'SFFF',
'FWFW',
'FFFW',
'WFFG'
],
'4x4': [
'SFFF',
'FHFH',
'FFFH',
'HFFG'
],
'8x8': [
'SFFFFFFF',
'FFFFFFFF',
'FFFHFFFF',
'FFFFFHFF',
'FFFHFFFF',
'FHHFFFHF',
'FHFFHFHF',
'FFFHFFFG'
],
} # yapf: disable
class GridWorldEnv(gym.Env):
"""
| 'S' : starting point
| 'F' or '.': free space
| 'W' or 'x': wall
| 'H' or 'o': hole (terminates episode)
| 'G' : goal
"""
def __init__(self, desc='4x4'):
if isinstance(desc, str):
desc = MAPS[desc]
desc = np.array(list(map(list, desc)))
desc[desc == '.'] = 'F'
desc[desc == 'o'] = 'H'
desc[desc == 'x'] = 'W'
self.desc = desc
self.n_row, self.n_col = desc.shape
(start_x, ), (start_y, ) = np.nonzero(desc == 'S')
self.start_state = start_x * self.n_col + start_y
self.state = None
self.domain_fig = None
def reset(self):
self.state = self.start_state
return self.state
@staticmethod
def action_from_direction(d):
"""
Return the action corresponding to the given direction. This is a
helper method for debugging and testing purposes.
:return: the action index corresponding to the given direction
"""
return dict(left=0, down=1, right=2, up=3)[d]
def step(self, action):
"""
action map:
0: left
1: down
2: right
3: up
:param action: should be a one-hot vector encoding the action
:return:
"""
possible_next_states = self.get_possible_next_states(
self.state, action)
probs = [x[1] for x in possible_next_states]
next_state_idx = np.random.choice(len(probs), p=probs)
next_state = possible_next_states[next_state_idx][0]
next_x = next_state // self.n_col
next_y = next_state % self.n_col
next_state_type = self.desc[next_x, next_y]
if next_state_type == 'H':
done = True
reward = 0
elif next_state_type in ['F', 'S']:
done = False
reward = 0
elif next_state_type == 'G':
done = True
reward = 1
else:
raise NotImplementedError
self.state = next_state
return Step(observation=self.state, reward=reward, done=done)
def get_possible_next_states(self, state, action):
"""
Given the state and action, return a list of possible next states and
their probabilities. Only next states with nonzero probabilities will
be returned
:param state: start state
:param action: action
:return: a list of pairs (s', p(s'|s,a))
"""
# assert self.observation_space.contains(state)
# assert self.action_space.contains(action)
x = state // self.n_col
y = state % self.n_col
coords = np.array([x, y])
increments = np.array([[0, -1], [1, 0], [0, 1], [-1, 0]])
next_coords = np.clip(coords + increments[action], [0, 0],
[self.n_row - 1, self.n_col - 1])
next_state = next_coords[0] * self.n_col + next_coords[1]
state_type = self.desc[x, y]
next_state_type = self.desc[next_coords[0], next_coords[1]]
if next_state_type == 'W' or state_type == 'H' or state_type == 'G':
return [(state, 1.)]
else:
return [(next_state, 1.)]
@property
def action_space(self):
return gym.spaces.Discrete(4)
@property
def observation_space(self):
return gym.spaces.Discrete(self.n_row * self.n_col)
def render(self, mode='human'):
pass
def log_diagnostics(self, paths):
pass
| 4,028 | 26.979167 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/multi_env_wrapper.py | """A wrapper env that handles multiple tasks from different envs.
Useful while training multi-task reinforcement learning algorithms.
It provides observations augmented with one-hot representation of tasks.
"""
import random
import akro
import gym
import numpy as np
from garage.envs.garage_env import GarageEnv
def round_robin_strategy(num_tasks, last_task=None):
"""A function for sampling tasks in round robin fashion.
Args:
num_tasks (int): Total number of tasks.
last_task (int): Previously sampled task.
Returns:
int: task id.
"""
if last_task is None:
return 0
return (last_task + 1) % num_tasks
def uniform_random_strategy(num_tasks, _):
"""A function for sampling tasks uniformly at random.
Args:
num_tasks (int): Total number of tasks.
_ (object): Ignored by this sampling strategy.
Returns:
int: task id.
"""
return random.randint(0, num_tasks - 1)
class MultiEnvWrapper(gym.Wrapper):
"""A wrapper class to handle multiple environments.
This wrapper adds an integer 'task_id' to env_info every timestep.
Args:
envs (list(gym.Env)):
A list of objects implementing gym.Env.
sample_strategy (function(int, int)):
Sample strategy to be used when sampling a new task.
mode (str): A string from 'vanilla`, 'add-onehot' and 'del-onehot'.
The type of observation to use.
- 'vanilla' provides the observation as it is.
Use case: metaworld environments with MT* algorithms,
gym environments with Task Embedding.
- 'add-onehot' will append an one-hot task id to observation.
Use case: gym environments with MT* algorithms.
- 'del-onehot' assumes an one-hot task id is appended to
observation, and it excludes that.
Use case: metaworld environments with Task Embedding.
env_names (list(str)): The names of the environments corresponding to
envs. The index of an env_name must correspond to the index of the
corresponding env in envs. An env_name in env_names must be unique.
"""
def __init__(self,
envs,
sample_strategy=uniform_random_strategy,
mode='add-onehot',
env_names=None):
assert mode in ['vanilla', 'add-onehot', 'del-onehot']
self._sample_strategy = sample_strategy
self._num_tasks = len(envs)
self._active_task_index = None
self._observation_space = None
self._mode = mode
for i, env in enumerate(envs):
if not isinstance(env, GarageEnv):
envs[i] = GarageEnv(env)
super().__init__(envs[0])
if env_names is not None:
assert isinstance(env_names, list), 'env_names must be a list'
msg = ('env_names are not unique or there is not an env_name',
'corresponding to each env in envs')
assert len(set(env_names)) == len(envs), msg
self._env_names = env_names
self._task_envs = []
for env in envs:
if (env.observation_space.shape !=
self.env.observation_space.shape):
raise ValueError(
'Observation space of all envs should be same.')
if env.action_space.shape != self.env.action_space.shape:
raise ValueError('Action space of all envs should be same.')
self._task_envs.append(env)
self.env.spec.observation_space = self.observation_space
self._spec = self.env.spec
@property
def spec(self):
"""Describes the action and observation spaces of the wrapped envs.
Returns:
garage.envs.EnvSpec: the action and observation spaces of the
wrapped environments.
"""
return self._spec
@property
def num_tasks(self):
"""Total number of tasks.
Returns:
int: number of tasks.
"""
return len(self._task_envs)
@property
def task_space(self):
"""Task Space.
Returns:
akro.Box: Task space.
"""
one_hot_ub = np.ones(self.num_tasks)
one_hot_lb = np.zeros(self.num_tasks)
return akro.Box(one_hot_lb, one_hot_ub)
@property
def active_task_index(self):
"""Index of active task env.
Returns:
int: Index of active task.
"""
if hasattr(self.env, 'active_task_index'):
return self.env.active_task_index
else:
return self._active_task_index
return self._active_task_index
@property
def observation_space(self):
"""Observation space.
Returns:
akro.Box: Observation space.
"""
if self._mode == 'vanilla':
return self.env.observation_space
elif self._mode == 'add-onehot':
task_lb, task_ub = self.task_space.bounds
env_lb, env_ub = self._observation_space.bounds
return akro.Box(np.concatenate([env_lb, task_lb]),
np.concatenate([env_ub, task_ub]))
else: # self._mode == 'del-onehot'
env_lb, env_ub = self._observation_space.bounds
num_tasks = self._num_tasks
return akro.Box(env_lb[:-num_tasks], env_ub[:-num_tasks])
@observation_space.setter
def observation_space(self, observation_space):
"""Observation space setter.
Args:
observation_space (akro.Box): Observation space.
"""
self._observation_space = observation_space
def _active_task_one_hot(self):
"""One-hot representation of active task.
Returns:
numpy.ndarray: one-hot representation of active task
"""
one_hot = np.zeros(self.task_space.shape)
index = self.active_task_index or 0
one_hot[index] = self.task_space.high[index]
return one_hot
def reset(self, **kwargs):
"""Sample new task and call reset on new task env.
Args:
kwargs (dict): Keyword arguments to be passed to gym.Env.reset
Returns:
numpy.ndarray: active task one-hot representation + observation
"""
self._active_task_index = self._sample_strategy(
self._num_tasks, self._active_task_index)
self.env = self._task_envs[self._active_task_index]
obs = self.env.reset(**kwargs)
if self._mode == 'vanilla':
return obs
elif self._mode == 'add-onehot':
return np.concatenate([obs, self._active_task_one_hot()])
else: # self._mode == 'del-onehot'
return obs[:-self._num_tasks]
def step(self, action):
"""gym.Env step for the active task env.
Args:
action (object): object to be passed in gym.Env.reset(action)
Returns:
object: agent's observation of the current environment
float: amount of reward returned after previous action
bool: whether the episode has ended
dict: contains auxiliary diagnostic information
"""
obs, reward, done, info = self.env.step(action)
if self._mode == 'add-onehot':
obs = np.concatenate([obs, self._active_task_one_hot()])
elif self._mode == 'del-onehot':
obs = obs[:-self._num_tasks]
if 'task_id' not in info:
info['task_id'] = self._active_task_index
if self._env_names is not None:
info['task_name'] = self._env_names[self._active_task_index]
return obs, reward, done, info
def close(self):
"""Close all task envs."""
for env in self._task_envs:
env.close()
| 7,899 | 31.113821 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/normalized_env.py | """An environment wrapper that normalizes action, observation and reward."""
import akro
import gym
import gym.spaces
import gym.spaces.utils
import numpy as np
from garage.envs import EnvSpec
from garagei.envs.akro_wrapper import AkroWrapperTrait
class NormalizedEnv(AkroWrapperTrait, gym.Wrapper):
"""An environment wrapper for normalization.
This wrapper normalizes action, and optionally observation and reward.
Args:
env (garage.envs.GarageEnv): An environment instance.
scale_reward (float): Scale of environment reward.
normalize_obs (bool): If True, normalize observation.
normalize_reward (bool): If True, normalize reward. scale_reward is
applied after normalization.
expected_action_scale (float): Assuming action falls in the range of
[-expected_action_scale, expected_action_scale] when normalize it.
flatten_obs (bool): Flatten observation if True.
obs_alpha (float): Update rate of moving average when estimating the
mean and variance of observations.
reward_alpha (float): Update rate of moving average when estimating the
mean and variance of rewards.
"""
def __init__(
self,
env,
scale_reward=1.,
normalize_obs=False,
normalize_reward=False,
expected_action_scale=1.,
flatten_obs=True,
obs_alpha=0.001,
reward_alpha=0.001,
):
super().__init__(env)
self._scale_reward = scale_reward
self._normalize_obs = normalize_obs
self._normalize_reward = normalize_reward
self._expected_action_scale = expected_action_scale
self._flatten_obs = flatten_obs
self._obs_alpha = obs_alpha
flat_obs_dim = gym.spaces.utils.flatdim(env.observation_space)
self._obs_mean = np.zeros(flat_obs_dim)
self._obs_var = np.ones(flat_obs_dim)
self._reward_alpha = reward_alpha
self._reward_mean = 0.
self._reward_var = 1.
if isinstance(self.env.action_space, gym.spaces.Box):
self.action_space = akro.Box(low=-self._expected_action_scale,
high=self._expected_action_scale,
shape=self.env.action_space.shape)
else:
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
#@property
#def spec(self):
# return EnvSpec(action_space=self.action_space,
# observation_space=self.observation_space)
def _update_obs_estimate(self, obs):
flat_obs = gym.spaces.utils.flatten(self.env.observation_space, obs)
self._obs_mean = (
1 - self._obs_alpha) * self._obs_mean + self._obs_alpha * flat_obs
self._obs_var = (
1 - self._obs_alpha) * self._obs_var + self._obs_alpha * np.square(
flat_obs - self._obs_mean)
def _update_reward_estimate(self, reward):
self._reward_mean = (1 - self._reward_alpha) * \
self._reward_mean + self._reward_alpha * reward
self._reward_var = (
1 - self._reward_alpha
) * self._reward_var + self._reward_alpha * np.square(
reward - self._reward_mean)
def _apply_normalize_obs(self, obs):
"""Compute normalized observation.
Args:
obs (np.ndarray): Observation.
Returns:
np.ndarray: Normalized observation.
"""
self._update_obs_estimate(obs)
flat_obs = gym.spaces.utils.flatten(self.env.observation_space, obs)
normalized_obs = (flat_obs -
self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8)
if not self._flatten_obs:
normalized_obs = gym.spaces.utils.unflatten(
self.env.observation_space, normalized_obs)
return normalized_obs
def _apply_normalize_reward(self, reward):
"""Compute normalized reward.
Args:
reward (float): Reward.
Returns:
float: Normalized reward.
"""
self._update_reward_estimate(reward)
return reward / (np.sqrt(self._reward_var) + 1e-8)
def reset(self, **kwargs):
"""Reset environment.
Args:
**kwargs: Additional parameters for reset.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step.
* infos (dict): Environment-dependent additional information.
"""
ret = self.env.reset(**kwargs)
if self._normalize_obs:
return self._apply_normalize_obs(ret)
else:
return ret
def step(self, action):
"""Feed environment with one step of action and get result.
Args:
action (np.ndarray): An action fed to the environment.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step.
* infos (dict): Environment-dependent additional information.
"""
if isinstance(self.env.action_space, gym.spaces.Box):
# rescale the action when the bounds are not inf
lb, ub = self.env.action_space.low, self.env.action_space.high
if np.all(lb != -np.inf) and np.all(ub != -np.inf):
scaled_action = lb + (action + self._expected_action_scale) * (
0.5 * (ub - lb) / self._expected_action_scale)
scaled_action = np.clip(scaled_action, lb, ub)
else:
scaled_action = action
else:
scaled_action = action
next_obs, reward, done, info = self.env.step(scaled_action)
if self._normalize_obs:
next_obs = self._apply_normalize_obs(next_obs)
if self._normalize_reward:
reward = self._apply_normalize_reward(reward)
return next_obs, reward * self._scale_reward, done, info
normalize = NormalizedEnv
| 6,471 | 34.756906 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/point_env.py | """Simple 2D environment containing a point and a goal location."""
import gym
import numpy as np
from garage.envs.step import Step
class PointEnv(gym.Env):
"""A simple 2D point environment.
Attributes:
observation_space (gym.spaces.Box): The observation space
action_space (gym.spaces.Box): The action space
Args:
goal (np.ndarray): A 2D array representing the goal position
arena_size (float): The size of arena where the point is constrained
within (-arena_size, arena_size) in each dimension
done_bonus (float): A numerical bonus added to the reward
once the point as reached the goal
never_done (bool): Never send a `done` signal, even if the
agent achieves the goal
"""
def __init__(
self,
goal=np.array((1., 1.), dtype=np.float32),
arena_size=5.,
done_bonus=0.,
never_done=False,
):
goal = np.array(goal, dtype=np.float32)
self._goal = goal
self._done_bonus = done_bonus
self._never_done = never_done
self._arena_size = arena_size
assert ((goal >= -arena_size) & (goal <= arena_size)).all()
self._point = np.zeros_like(self._goal)
self._task = {'goal': self._goal}
@property
def observation_space(self):
"""gym.spaces.Box: The observation space."""
return gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=(3, ),
dtype=np.float32)
@property
def action_space(self):
"""gym.spaces.Box: The action space."""
return gym.spaces.Box(low=-0.1,
high=0.1,
shape=(2, ),
dtype=np.float32)
def reset(self):
"""Reset the environment.
Returns:
np.ndarray: Observation of the environment.
"""
self._point = np.zeros_like(self._goal)
dist = np.linalg.norm(self._point - self._goal)
return np.concatenate([self._point, (dist, )])
def step(self, action):
"""Step the environment state.
Args:
action (np.ndarray): The action to take in the environment.
Returns:
np.ndarray: Observation. The observation of the environment.
float: Reward. The reward acquired at this time step.
boolean: Done. Whether the environment was completed at this
time step. Always False for this environment.
"""
# enforce action space
a = action.copy() # NOTE: we MUST copy the action before modifying it
a = np.clip(a, self.action_space.low, self.action_space.high)
self._point = np.clip(self._point + a, -self._arena_size,
self._arena_size)
dist = np.linalg.norm(self._point - self._goal)
succ = dist < np.linalg.norm(self.action_space.low)
# dense reward
reward = -dist
# done bonus
if succ:
reward += self._done_bonus
# sometimes we don't want to terminate
done = succ and not self._never_done
obs = np.concatenate([self._point, (dist, )])
return Step(obs, reward, done, task=self._task, success=succ)
def render(self, mode='human'):
"""Draw the environment.
Not implemented.
Args:
mode (str): Ignored.
"""
# pylint: disable=no-self-use
def sample_tasks(self, num_tasks):
"""Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, np.ndarray]]: A list of "tasks", where each task is
a dictionary containing a single key, "goal", mapping to a
point in 2D space.
"""
goals = np.random.uniform(-2, 2, size=(num_tasks, 2))
tasks = [{'goal': goal} for goal in goals]
return tasks
def set_task(self, task):
"""Reset with a task.
Args:
task (dict[str, np.ndarray]): A task (a dictionary containing a
single key, "goal", which should be a point in 2D space).
"""
self._task = task
self._goal = task['goal']
| 4,375 | 29.601399 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/step.py | """Environment step data type."""
import collections
def Step(observation, reward, done, **kwargs): # noqa: N802
"""Create a namedtuple from the results of environment.step(action).
Provides the option to put extra diagnostic info in the kwargs (if it
exists) without demanding an explicit positional argument.
Args:
observation (object): Agent's observation of the current environment
reward (float) : Amount of reward returned after previous action
done (bool): Whether the episode has ended, in which case further
step() calls will return undefined results
kwargs: Keyword args
Returns:
collections.namedtuple: A named tuple of the arguments.
"""
return _Step(observation, reward, done, kwargs)
_Step = collections.namedtuple('Step',
['observation', 'reward', 'done', 'info'])
| 900 | 32.37037 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/task_onehot_wrapper.py | """Wrapper for appending one-hot task encodings to individual task envs.
See `~TaskOnehotWrapper.wrap_env_list` for the main way of using this module.
"""
import akro
import gym
import numpy as np
from garage.envs.env_spec import EnvSpec
class TaskOnehotWrapper(gym.Wrapper):
"""Append a one-hot task representation to an environment.
See TaskOnehotWrapper.wrap_env_list for the recommended way of creating
this class.
Args:
env (gym.Env): The environment to wrap.
task_index (int): The index of this task among the tasks.
n_total_tasks (int): The number of total tasks.
"""
def __init__(self, env, task_index, n_total_tasks):
assert 0 <= task_index < n_total_tasks
super().__init__(env)
self._task_index = task_index
self._n_total_tasks = n_total_tasks
env_lb = self.env.observation_space.low
env_ub = self.env.observation_space.high
one_hot_ub = np.ones(self._n_total_tasks)
one_hot_lb = np.zeros(self._n_total_tasks)
self.observation_space = akro.Box(np.concatenate([env_lb, one_hot_lb]),
np.concatenate([env_ub, one_hot_ub]))
self.__spec = EnvSpec(action_space=self.action_space,
observation_space=self.observation_space)
@property
def spec(self):
"""Return the environment specification.
Returns:
garage.envs.env_spec.EnvSpec: The envionrment specification.
"""
return self.__spec
def reset(self, **kwargs):
"""Sample new task and call reset on new task env.
Args:
kwargs (dict): Keyword arguments to be passed to env.reset
Returns:
numpy.ndarray: active task one-hot representation + observation
"""
return self._obs_with_one_hot(self.env.reset(**kwargs))
def step(self, action):
"""gym.Env step for the active task env.
Args:
action (np.ndarray): Action performed by the agent in the
environment.
Returns:
tuple:
np.ndarray: Agent's observation of the current environment.
float: Amount of reward yielded by previous action.
bool: True iff the episode has ended.
dict[str, np.ndarray]: Contains auxiliary diagnostic
information about this time-step.
"""
obs, reward, done, info = self.env.step(action)
oh_obs = self._obs_with_one_hot(obs)
info['task_id'] = self._task_index
return oh_obs, reward, done, info
def _obs_with_one_hot(self, obs):
"""Concatenate observation and task one-hot.
Args:
obs (numpy.ndarray): observation
Returns:
numpy.ndarray: observation + task one-hot.
"""
one_hot = np.zeros(self._n_total_tasks)
one_hot[self._task_index] = 1.0
return np.concatenate([obs, one_hot])
@classmethod
def wrap_env_list(cls, envs):
"""Wrap a list of environments, giving each environment a one-hot.
This is the primary way of constructing instances of this class.
It's mostly useful when training multi-task algorithms using a
multi-task aware sampler.
For example:
'''
.. code-block:: python
envs = get_mt10_envs()
wrapped = TaskOnehotWrapper.wrap_env_list(envs)
sampler = runner.make_sampler(LocalSampler, env=wrapped)
'''
Args:
envs (list[gym.Env]): List of environments to wrap. Note that the
order these environments are passed in determines the value of
their one-hot encoding. It is essential that this list is
always in the same order, or the resulting encodings will be
inconsistent.
Returns:
list[TaskOnehotWrapper]: The wrapped environments.
"""
n_total_tasks = len(envs)
wrapped = []
for i, env in enumerate(envs):
wrapped.append(cls(env, task_index=i, n_total_tasks=n_total_tasks))
return wrapped
@classmethod
def wrap_env_cons_list(cls, env_cons):
"""Wrap a list of environment constructors, giving each a one-hot.
This function is useful if you want to avoid constructing any
environments in the main experiment process, and are using a multi-task
aware remote sampler (i.e. `~RaySampler`).
For example:
'''
.. code-block:: python
env_constructors = get_mt10_env_cons()
wrapped = TaskOnehotWrapper.wrap_env_cons_list(env_constructors)
env_updates = [NewEnvUpdate(wrapped_con)
for wrapped_con in wrapped]
sampler = runner.make_sampler(RaySampler, env=env_updates)
'''
Args:
env_cons (list[Callable[gym.Env]]): List of environment constructor
to wrap. Note that the order these constructors are passed in
determines the value of their one-hot encoding. It is essential
that this list is always in the same order, or the resulting
encodings will be inconsistent.
Returns:
list[Callable[TaskOnehotWrapper]]: The wrapped environments.
"""
n_total_tasks = len(env_cons)
wrapped = []
for i, con in enumerate(env_cons):
# Manually capture this value of i by introducing a new scope.
wrapped.append(lambda i=i, con=con: cls(
con(), task_index=i, n_total_tasks=n_total_tasks))
return wrapped
| 5,737 | 32.952663 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/dm_control/__init__.py | """
Wrappers for the DeepMind Control Suite.
See https://github.com/deepmind/dm_control
"""
try:
import dm_control # noqa: F401
except ImportError:
raise ImportError("To use garage's dm_control wrappers, please install "
'garage[dm_control].')
from garage.envs.dm_control.dm_control_viewer import DmControlViewer
from garage.envs.dm_control.dm_control_env import DmControlEnv # noqa: I100
__all__ = ['DmControlViewer', 'DmControlEnv']
| 470 | 28.4375 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/dm_control/dm_control_env.py | from dm_control import suite
from dm_control.rl.control import flatten_observation
from dm_env import StepType
import gym
import numpy as np
from garage.envs import Step
from garage.envs.dm_control.dm_control_viewer import DmControlViewer
class DmControlEnv(gym.Env):
"""
Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_
"""
def __init__(self, env, name=None):
self._name = name or type(env.task).__name__
self._env = env
self._viewer = None
@classmethod
def from_suite(cls, domain_name, task_name):
return cls(suite.load(domain_name, task_name),
name='{}.{}'.format(domain_name, task_name))
def step(self, action):
time_step = self._env.step(action)
return Step(
flatten_observation(time_step.observation)['observations'],
time_step.reward, time_step.step_type == StepType.LAST,
**time_step.observation)
def reset(self):
time_step = self._env.reset()
return flatten_observation(time_step.observation)['observations']
def render(self, mode='human'):
# pylint: disable=inconsistent-return-statements
if mode == 'human':
if not self._viewer:
title = 'dm_control {}'.format(self._name)
self._viewer = DmControlViewer(title=title)
self._viewer.launch(self._env)
self._viewer.render()
return None
elif mode == 'rgb_array':
return self._env.physics.render()
else:
raise NotImplementedError
def close(self):
if self._viewer:
self._viewer.close()
self._env.close()
self._viewer = None
self._env = None
def _flat_shape(self, observation):
return np.sum(int(np.prod(v.shape)) for k, v in observation.items())
@property
def action_space(self):
action_spec = self._env.action_spec()
if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or
np.inf in action_spec.maximum):
return gym.spaces.Discrete(np.prod(action_spec.shape))
else:
return gym.spaces.Box(action_spec.minimum,
action_spec.maximum,
dtype=np.float32)
@property
def observation_space(self):
flat_dim = self._flat_shape(self._env.observation_spec())
return gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=[flat_dim],
dtype=np.float32)
def __getstate__(self):
d = self.__dict__.copy()
d['_viewer'] = None
return d
| 2,773 | 32.02381 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/dm_control/dm_control_viewer.py | """ Wrapper for the dm_control viewer which allows single-stepping """
import dm_control.viewer.application as dm_viewer_app
import glfw
class DmControlViewer(dm_viewer_app.Application):
def render(self):
# Don't try to render into closed windows
if not self._window._context:
return
self._render_once()
# Just keep rendering if we're paused, but hold onto control flow
while self._pause_subject.value:
self._render_once()
def _render_once(self):
# See https://github.com/deepmind/dm_control/blob/92f9913013face0468442cd0964d5973ea2089ea/dm_control/viewer/gui/glfw_gui.py#L280 # noqa: E501
window = self._window
tick_func = self._tick_func
if (window._context
and not glfw.window_should_close(window._context.window)):
pixels = tick_func()
with window._context.make_current() as ctx:
ctx.call(window._update_gui_on_render_thread,
window._context.window, pixels)
window._mouse.process_events()
window._keyboard.process_events()
else:
window.close()
def launch(self, environment_loader, policy=None):
# See https://github.com/deepmind/dm_control/blob/92f9913013face0468442cd0964d5973ea2089ea/dm_control/viewer/application.py#L304 # noqa: E501
if environment_loader is None:
raise ValueError('"environment_loader" argument is required.')
if callable(environment_loader):
self._environment_loader = environment_loader
else:
self._environment_loader = lambda: environment_loader
self._policy = policy
self._load_environment(zoom_to_scene=True)
def tick():
self._viewport.set_size(*self._window.shape)
self._tick()
return self._renderer.pixels
self._tick_func = tick
# Start unpaused
self._pause_subject.value = False
def close(self):
self._window.close()
| 2,059 | 35.785714 | 151 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/mujoco/__init__.py | """Garage wrappers for mujoco based gym environments."""
try:
import mujoco_py # noqa: F401
except Exception as e:
raise e
from garage.envs.mujoco.half_cheetah_dir_env import HalfCheetahDirEnv
from garage.envs.mujoco.half_cheetah_vel_env import HalfCheetahVelEnv
__all__ = [
'HalfCheetahDirEnv',
'HalfCheetahVelEnv',
]
| 338 | 23.214286 | 69 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/mujoco/half_cheetah_dir_env.py | """Variant of the HalfCheetahEnv with different target directions."""
import numpy as np
from garage.envs.mujoco.half_cheetah_env_meta_base import HalfCheetahEnvMetaBase # noqa: E501
class HalfCheetahDirEnv(HalfCheetahEnvMetaBase):
"""Half-cheetah environment with target direction, as described in [1].
The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/half_cheetah_env_rand_direc.py
The half-cheetah follows the dynamics from MuJoCo [2], and receives at each
time step a reward composed of a control cost and a reward equal to its
velocity in the target direction. The tasks are generated by sampling the
target directions from a Bernoulli distribution on {-1, 1} with parameter
0.5 (-1: backward, +1: forward).
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] Emanuel Todorov, Tom Erez, Yuval Tassa, "MuJoCo: A physics engine for
model-based control", 2012
(https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)
Args:
task (dict or None):
direction (float): Target direction, either -1 or 1.
"""
def __init__(self, task=None):
super().__init__(task or {'direction': 1.})
def step(self, action):
"""Take one step in the environment.
Equivalent to step in HalfCheetahEnv, but with different rewards.
Args:
action (np.ndarray): The action to take in the environment.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step. Always False for this environment.
* infos (dict):
* reward_forward (float): Reward for moving, ignoring the
control cost.
* reward_ctrl (float): The reward for acting i.e. the
control cost (always negative).
* task_dir (float): Target direction. 1.0 for forwards,
-1.0 for backwards.
"""
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = self._task['direction'] * forward_vel
ctrl_cost = 0.5 * 1e-1 * np.sum(np.square(action))
observation = self._get_obs()
reward = forward_reward - ctrl_cost
done = False
infos = dict(reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
task_dir=self._task['direction'])
return observation, reward, done, infos
def sample_tasks(self, num_tasks):
"""Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, float]]: A list of "tasks," where each task is a
dictionary containing a single key, "direction", mapping to -1
or 1.
"""
directions = (
2 * self.np_random.binomial(1, p=0.5, size=(num_tasks, )) - 1)
tasks = [{'direction': direction} for direction in directions]
return tasks
def set_task(self, task):
"""Reset with a task.
Args:
task (dict[str, float]): A task (a dictionary containing a single
key, "direction", mapping to -1 or 1).
"""
self._task = task
| 3,802 | 37.03 | 132 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/mujoco/half_cheetah_env_meta_base.py | """Base class of HalfCheetah meta-environments."""
from gym.envs.mujoco import HalfCheetahEnv as HalfCheetahEnv_
import numpy as np
class HalfCheetahEnvMetaBase(HalfCheetahEnv_):
"""Base class of HalfCheetah meta-environments.
Code is adapted from
https://github.com/tristandeleu/pytorch-maml-rl/blob/493e677e724aa67a531250b0e215c8dbc9a7364a/maml_rl/envs/mujoco/half_cheetah.py
Which was in turn adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/half_cheetah_env_rand.py
Args:
task (dict): Subclass specific task information.
"""
def __init__(self, task):
self._task = task
super().__init__()
def _get_obs(self):
"""Get a low-dimensional observation of the state.
Returns:
np.ndarray: Contains the flattened angle quaternion, angular
velocity quaternion, and cartesian position.
"""
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
self.get_body_com('torso').flat,
]).astype(np.float32).flatten()
def viewer_setup(self):
"""Start the viewer."""
camera_id = self.model.camera_name2id('track')
self.viewer.cam.type = 2
self.viewer.cam.fixedcamid = camera_id
self.viewer.cam.distance = self.model.stat.extent * 0.35
# Hide the overlay
# This code was inheritted, so we'll ignore this access violation for
# now.
# pylint: disable=protected-access
self.viewer._hide_overlay = True
def __getstate__(self):
"""See `Object.__getstate__.
Returns:
dict: The instance’s dictionary to be pickled.
"""
return dict(task=self._task)
def __setstate__(self, state):
"""See `Object.__setstate__.
Args:
state (dict): Unpickled state of this object.
"""
self.__init__(task=state['task'])
| 2,020 | 29.164179 | 133 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/mujoco/half_cheetah_vel_env.py | """Variant of the HalfCheetahEnv with different target velocity."""
import numpy as np
from garage.envs.mujoco.half_cheetah_env_meta_base import HalfCheetahEnvMetaBase # noqa: E501
class HalfCheetahVelEnv(HalfCheetahEnvMetaBase):
"""Half-cheetah environment with target velocity, as described in [1].
The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/half_cheetah_env_rand.py
The half-cheetah follows the dynamics from MuJoCo [2], and receives at each
time step a reward composed of a control cost and a penalty equal to the
difference between its current velocity and the target velocity. The tasks
are generated by sampling the target velocities from the uniform
distribution on [0, 2].
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] Emanuel Todorov, Tom Erez, Yuval Tassa, "MuJoCo: A physics engine for
model-based control", 2012
(https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)
Args:
task (dict or None):
velocity (float): Target velocity, usually between 0 and 2.
"""
def __init__(self, task=None):
super().__init__(task or {'velocity': 0.})
def step(self, action):
"""Take one step in the environment.
Equivalent to step in HalfCheetahEnv, but with different rewards.
Args:
action (np.ndarray): The action to take in the environment.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step. Always False for this environment.
* infos (dict):
* reward_forward (float): Reward for moving, ignoring the
control cost.
* reward_ctrl (float): The reward for acting i.e. the
control cost (always negative).
* task_vel (float): Target velocity.
Usually between 0 and 2.
"""
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = -1.0 * abs(forward_vel - self._task['velocity'])
ctrl_cost = 0.5 * 1e-1 * np.sum(np.square(action))
observation = self._get_obs()
reward = forward_reward - ctrl_cost
done = False
infos = dict(reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
task_vel=self._task['velocity'])
return observation, reward, done, infos
def sample_tasks(self, num_tasks):
"""Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, float]]: A list of "tasks," where each task is a
dictionary containing a single key, "velocity", mapping to a
value between 0 and 2.
"""
velocities = self.np_random.uniform(0.0, 2.0, size=(num_tasks, ))
tasks = [{'velocity': velocity} for velocity in velocities]
return tasks
def set_task(self, task):
"""Reset with a task.
Args:
task (dict[str, float]): A task (a dictionary containing a single
key, "velocity", usually between 0 and 2).
"""
self._task = task
| 3,771 | 37.10101 | 126 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/__init__.py | """gym.Env wrappers.
Used to transform an environment in a modular way.
It is also possible to apply multiple wrappers at the same
time.
Example:
StackFrames(GrayScale(gym.make('env')))
"""
from garage.envs.wrappers.atari_env import AtariEnv
from garage.envs.wrappers.clip_reward import ClipReward
from garage.envs.wrappers.episodic_life import EpisodicLife
from garage.envs.wrappers.fire_reset import FireReset
from garage.envs.wrappers.grayscale import Grayscale
from garage.envs.wrappers.max_and_skip import MaxAndSkip
from garage.envs.wrappers.noop import Noop
from garage.envs.wrappers.resize import Resize
from garage.envs.wrappers.stack_frames import StackFrames
__all__ = [
'AtariEnv', 'ClipReward', 'EpisodicLife', 'FireReset', 'Grayscale',
'MaxAndSkip', 'Noop', 'Resize', 'StackFrames'
]
| 814 | 31.6 | 71 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/atari_env.py | """Episodic life wrapper for gym.Env."""
import gym
import numpy as np
class AtariEnv(gym.Wrapper):
"""Atari environment wrapper for gym.Env.
This wrapper convert the observations returned from baselines wrapped
environment, which is a LazyFrames object into numpy arrays.
Args:
env (gym.Env): The environment to be wrapped.
"""
def __init__(self, env):
super().__init__(env)
def step(self, action):
"""gym.Env step function."""
obs, reward, done, info = self.env.step(action)
return np.asarray(obs), reward, done, info
def reset(self, **kwargs):
"""gym.Env reset function."""
return np.asarray(self.env.reset())
| 708 | 25.259259 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/clip_reward.py | """Clip reward for gym.Env."""
import gym
import numpy as np
class ClipReward(gym.Wrapper):
"""Clip the reward by its sign."""
def step(self, ac):
"""gym.Env step function."""
obs, reward, done, info = self.env.step(ac)
return obs, np.sign(reward), done, info
def reset(self):
"""gym.Env reset."""
return self.env.reset()
| 378 | 21.294118 | 51 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/episodic_life.py | """Episodic life wrapper for gym.Env."""
import gym
class EpisodicLife(gym.Wrapper):
"""Episodic life wrapper for gym.Env.
This wrapper makes episode end when a life is lost, but only reset
when all lives are lost.
Args:
env: The environment to be wrapped.
"""
def __init__(self, env):
super().__init__(env)
self._lives = 0
self._was_real_done = True
def step(self, action):
"""gym.Env step function."""
obs, reward, done, info = self.env.step(action)
self._was_real_done = done
lives = self.env.unwrapped.ale.lives()
if lives < self._lives and lives > 0:
done = True
self._lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""
gym.Env reset function.
Reset only when lives are lost.
"""
if self._was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step
obs, _, _, _ = self.env.step(0)
self._lives = self.env.unwrapped.ale.lives()
return obs
| 1,112 | 24.883721 | 70 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/fire_reset.py | """Fire reset wrapper for gym.Env."""
import gym
class FireReset(gym.Wrapper):
"""Fire reset wrapper for gym.Env.
Take action "fire" on reset.
Args:
env (gym.Env): The environment to be wrapped.
"""
def __init__(self, env):
super().__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (
'Only use fire reset wrapper for suitable environment!')
assert len(env.unwrapped.get_action_meanings()) >= 3, (
'Only use fire reset wrapper for suitable environment!')
def step(self, action):
"""gym.Env step function."""
return self.env.step(action)
def reset(self, **kwargs):
"""gym.Env reset function."""
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
obs = self.env.reset(**kwargs)
return obs
| 884 | 26.65625 | 68 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/grayscale.py | """Grayscale wrapper for gym.Env."""
import warnings
import gym
import gym.spaces
import numpy as np
from skimage import color
from skimage import img_as_ubyte
class Grayscale(gym.Wrapper):
"""Grayscale wrapper for gym.Env, converting frames to grayscale.
Only works with gym.spaces.Box environment with 2D RGB frames.
The last dimension (RGB) of environment observation space will be removed.
Example:
env = gym.make('Env')
# env.observation_space = (100, 100, 3)
env_wrapped = Grayscale(gym.make('Env'))
# env.observation_space = (100, 100)
Args:
env: gym.Env to wrap.
Raises:
ValueError: If observation space shape is not 3
or environment is not gym.spaces.Box.
"""
def __init__(self, env):
if not isinstance(env.observation_space, gym.spaces.Box):
raise ValueError(
'Grayscale only works with gym.spaces.Box environment.')
if len(env.observation_space.shape) != 3:
raise ValueError('Grayscale only works with 2D RGB images')
super().__init__(env)
_low = env.observation_space.low.flatten()[0]
_high = env.observation_space.high.flatten()[0]
assert _low == 0
assert _high == 255
self._observation_space = gym.spaces.Box(
_low,
_high,
shape=env.observation_space.shape[:-1],
dtype=np.uint8)
@property
def observation_space(self):
"""gym.Env observation space."""
return self._observation_space
@observation_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
def _observation(self, obs):
with warnings.catch_warnings():
"""
Suppressing warning for possible precision loss
when converting from float64 to uint8
"""
warnings.simplefilter('ignore')
return img_as_ubyte(color.rgb2gray((obs)))
def reset(self):
"""gym.Env reset function."""
return self._observation(self.env.reset())
def step(self, action):
"""gym.Env step function."""
obs, reward, done, info = self.env.step(action)
return self._observation(obs), reward, done, info
| 2,319 | 28.367089 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/max_and_skip.py | """Max and Skip wrapper for gym.Env."""
import gym
import numpy as np
class MaxAndSkip(gym.Wrapper):
"""Max and skip wrapper for gym.Env.
It returns only every `skip`-th frame. Action are repeated and rewards are
sum for the skipped frames.
It also takes element-wise maximum over the last two consecutive frames,
which helps algorithm deal with the problem of how certain Atari games only
render their sprites every other game frame.
Args:
env: The environment to be wrapped.
skip: The environment only returns `skip`-th frame.
"""
def __init__(self, env, skip=4):
super().__init__(env)
self._obs_buffer = np.zeros((2, ) + env.observation_space.shape,
dtype=np.uint8)
self._skip = skip
def step(self, action):
"""
gym.Env step.
Repeat action, sum reward, and max over last two observations.
"""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
elif i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self):
"""gym.Env reset."""
return self.env.reset()
| 1,515 | 28.72549 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/noop.py | """Noop wrapper for gym.Env."""
import gym
import numpy as np
class Noop(gym.Wrapper):
"""Noop wrapper for gym.Env.
It samples initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
Args:
env (gym.Env): The environment to be wrapped.
noop_max (int): Maximum number no-op to be performed on reset.
"""
def __init__(self, env, noop_max=30):
super().__init__(env)
self._noop_max = noop_max
self._noop_action = 0
assert noop_max > 0, 'noop_max should be larger than 0!'
assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (
"No-op should be the 0-th action but it's not in {}!".format(env))
def step(self, action):
"""gym.Env step function."""
return self.env.step(action)
def reset(self, **kwargs):
"""gym.Env reset function."""
obs = self.env.reset(**kwargs)
noops = np.random.randint(1, self._noop_max + 1)
for _ in range(noops):
obs, _, done, _ = self.step(self._noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
| 1,172 | 29.868421 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/resize.py | """Resize wrapper for gym.Env."""
import warnings
import gym
import gym.spaces
import numpy as np
from skimage import img_as_ubyte
from skimage.transform import resize
class Resize(gym.Wrapper):
"""gym.Env wrapper for resizing frame to (width, height).
Only works with gym.spaces.Box environment with 2D single channel frames.
Example:
| env = gym.make('Env')
| # env.observation_space = (100, 100)
| env_wrapped = Resize(gym.make('Env'), width=64, height=64)
| # env.observation_space = (64, 64)
Args:
env: gym.Env to wrap.
width: resized frame width.
height: resized frame height.
Raises:
ValueError: If observation space shape is not 2
or environment is not gym.spaces.Box.
"""
def __init__(self, env, width, height):
if not isinstance(env.observation_space, gym.spaces.Box):
raise ValueError('Resize only works with Box environment.')
if len(env.observation_space.shape) != 2:
raise ValueError('Resize only works with 2D single channel image.')
super().__init__(env)
_low = env.observation_space.low.flatten()[0]
_high = env.observation_space.high.flatten()[0]
self._dtype = env.observation_space.dtype
self._observation_space = gym.spaces.Box(_low,
_high,
shape=[width, height],
dtype=self._dtype)
self._width = width
self._height = height
@property
def observation_space(self):
"""gym.Env observation space."""
return self._observation_space
@observation_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
def _observation(self, obs):
with warnings.catch_warnings():
"""
Suppressing warnings for
1. possible precision loss when converting from float64 to uint8
2. anti-aliasing will be enabled by default in skimage 0.15
"""
warnings.simplefilter('ignore')
obs = resize(obs, (self._width, self._height)) # now it's float
if self._dtype == np.uint8:
obs = img_as_ubyte(obs)
return obs
def reset(self):
"""gym.Env reset function."""
return self._observation(self.env.reset())
def step(self, action):
"""gym.Env step function."""
obs, reward, done, info = self.env.step(action)
return self._observation(obs), reward, done, info
| 2,671 | 31.192771 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/envs/wrappers/stack_frames.py | """Stack frames wrapper for gym.Env."""
from collections import deque
import gym
import gym.spaces
import numpy as np
class StackFrames(gym.Wrapper):
"""gym.Env wrapper to stack multiple frames.
Useful for training feed-forward agents on dynamic games.
Only works with gym.spaces.Box environment with 2D single channel frames.
Args:
env: gym.Env to wrap.
n_frames: number of frames to stack.
Raises:
ValueError: If observation space shape is not 2 or
environment is not gym.spaces.Box.
"""
def __init__(self, env, n_frames):
if not isinstance(env.observation_space, gym.spaces.Box):
raise ValueError('Stack frames only works with gym.spaces.Box '
'environment.')
if len(env.observation_space.shape) != 2:
raise ValueError(
'Stack frames only works with 2D single channel images')
super().__init__(env)
self._n_frames = n_frames
self._frames = deque(maxlen=n_frames)
new_obs_space_shape = env.observation_space.shape + (n_frames, )
_low = env.observation_space.low.flatten()[0]
_high = env.observation_space.high.flatten()[0]
self._observation_space = gym.spaces.Box(
_low,
_high,
shape=new_obs_space_shape,
dtype=env.observation_space.dtype)
@property
def observation_space(self):
"""gym.Env observation space."""
return self._observation_space
@observation_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
def _stack_frames(self):
return np.stack(self._frames, axis=2)
def reset(self):
"""gym.Env reset function."""
observation = self.env.reset()
self._frames.clear()
for i in range(self._n_frames):
self._frames.append(observation)
return self._stack_frames()
def step(self, action):
"""gym.Env step function."""
new_observation, reward, done, info = self.env.step(action)
self._frames.append(new_observation)
return self._stack_frames(), reward, done, info
| 2,228 | 28.72 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/__init__.py | """Experiment functions."""
from garage.experiment.experiment import run_experiment
from garage.experiment.experiment import to_local_command
from garage.experiment.local_runner import LocalRunner
from garage.experiment.local_tf_runner import LocalTFRunner
from garage.experiment.meta_evaluator import MetaEvaluator
from garage.experiment.snapshotter import SnapshotConfig, Snapshotter
from garage.experiment.task_sampler import TaskSampler
__all__ = [
'run_experiment',
'to_local_command',
'LocalRunner',
'LocalTFRunner',
'MetaEvaluator',
'Snapshotter',
'SnapshotConfig',
'TaskSampler',
]
| 623 | 30.2 | 69 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/deterministic.py | """Utilities for ensuring that experiments are deterministic."""
import random
import sys
import warnings
import numpy as np
seed_ = None
seed_stream_ = None
def set_seed(seed):
"""Set the process-wide random seed.
Args:
seed (int): A positive integer
"""
seed %= 4294967294
# pylint: disable=global-statement
global seed_
global seed_stream_
seed_ = seed
random.seed(seed)
np.random.seed(seed)
if 'tensorflow' in sys.modules:
import tensorflow as tf # pylint: disable=import-outside-toplevel
tf.compat.v1.set_random_seed(seed)
try:
# pylint: disable=import-outside-toplevel
import tensorflow_probability as tfp
seed_stream_ = tfp.util.SeedStream(seed_, salt='garage')
except ImportError:
pass
if 'torch' in sys.modules:
warnings.warn(
'Enabeling deterministic mode in PyTorch can have a performance '
'impact when using GPU.')
import torch # pylint: disable=import-outside-toplevel
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_seed():
"""Get the process-wide random seed.
Returns:
int: The process-wide random seed
"""
return seed_
def get_tf_seed_stream():
"""Get the pseudo-random number generator (PRNG) for TensorFlow ops.
Returns:
int: A seed generated by a PRNG with fixed global seed.
"""
if seed_stream_ is None:
set_seed(0)
return seed_stream_() % 4294967294
| 1,613 | 23.830769 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/experiment.py | """Tools for running experiments with Garage."""
import base64
import collections
import datetime
import enum
import functools
import gc
import inspect
import json
import os
import os.path as osp
import pathlib
import pickle
import re
import subprocess
import warnings
import cloudpickle
import dateutil.tz
import dowel
from dowel import logger
import dowel_wrapper
import __main__ as main # noqa: I100
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
EIGHT_MEBIBYTES = 8 * 2**20
def run_experiment(method_call=None,
batch_tasks=None,
exp_prefix='experiment',
exp_name=None,
log_dir=None,
script='garage.experiment.experiment_wrapper',
python_command='python',
dry=False,
env=None,
variant=None,
force_cpu=False,
pre_commands=None,
**kwargs):
# pylint: disable=missing-raises-doc,too-many-branches,global-statement
"""Serialize the method call and run the experiment using specified mode.
Args:
method_call (callable): A method call.
batch_tasks (list[dict]): A batch of method calls.
exp_prefix (str): Name prefix for the experiment.
exp_name (str): Name of the experiment.
log_dir (str): Log directory for the experiment.
script (str): The name of the entrance point python script.
python_command (str): Python command to run the experiment.
dry (bool): Whether to do a dry-run, which only prints the
commands without executing them.
env (dict): Extra environment variables.
variant (dict): If provided, should be a dictionary of parameters.
force_cpu (bool): Whether to set all GPU devices invisible
to force use CPU.
pre_commands (str): Pre commands to run the experiment.
kwargs (dict): Additional parameters.
"""
warnings.warn(
DeprecationWarning(
'run_experiment is deprecated, and will be removed in the next '
'release. Please use wrap_experiment instead.'))
if method_call is None and batch_tasks is None:
raise Exception(
'Must provide at least either method_call or batch_tasks')
for task in (batch_tasks or [method_call]):
if not hasattr(task, '__call__'):
raise ValueError('batch_tasks should be callable')
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(kwargs,
pre_commands=pre_commands,
method_call=method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant)
]
global exp_count
if force_cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
for task in batch_tasks:
call = task.pop('method_call')
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
if task.get('exp_name', None) is None:
task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp,
exp_count)
if task.get('log_dir', None) is None:
task['log_dir'] = (
'{log_dir}/local/{exp_prefix}/{exp_name}'.format(
log_dir=osp.join(os.getcwd(), 'data'),
exp_prefix=exp_prefix.replace('_', '-'),
exp_name=task['exp_name']))
if task.get('variant', None) is not None:
variant = task.pop('variant')
if 'exp_name' not in variant:
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(
pickle.dumps(variant)).decode('utf-8')
elif 'variant' in task:
del task['variant']
task['env'] = task.get('env', dict()) or dict()
task['env']['GARAGE_FORCE_CPU'] = str(force_cpu)
for task in batch_tasks:
env = task.pop('env', None)
command = to_local_command(task,
python_command=python_command,
script=script)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.run(command,
shell=True,
env=dict(os.environ, **env),
check=True)
except Exception as e:
print(e)
raise
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def _shellquote(s):
"""Return a shell-escaped version of the string *s*.
Args:
s (str): String to shell quote.
Returns:
str: The shell-quoted string.
"""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
"""Return a shell-escaped version of v.
Args:
v (object): object to shell quote
Returns:
str: The shell-quoted string.
"""
if v is None:
return ''
elif isinstance(v, list):
return ' '.join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(
params,
python_command='python',
script='garage.experiment.experiment_wrapper'): # noqa: D103,E501
# noqa:E501 ; pylint: disable=eval-used,missing-return-doc,missing-return-type-doc,missing-function-docstring
command = python_command + ' -m ' + script
garage_env = eval(os.environ.get('GARAGE_ENV', '{}'))
for k, v in garage_env.items():
command = '{}={} '.format(k, v) + command
pre_commands = params.pop('pre_commands', None)
post_commands = params.pop('post_commands', None)
if pre_commands is not None or post_commands is not None:
print('Not executing the pre_commands: ', pre_commands,
', nor post_commands: ', post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == '_name':
command += ' --{} {}'.format(k, _to_param_val(nv))
else:
command += \
' --{}_{} {}'.format(k, nk, _to_param_val(nv))
else:
command += ' --{} {}'.format(k, _to_param_val(v))
return command
def _make_sequential_log_dir(log_dir):
"""Creates log_dir, appending a number if necessary.
Attempts to create the directory `log_dir`. If it already exists, appends
"_1". If that already exists, appends "_2" instead, etc.
Args:
log_dir (str): The log directory to attempt to create.
Returns:
str: The log directory actually created.
"""
i = 0
while True:
try:
if i == 0:
os.makedirs(log_dir)
else:
possible_log_dir = '{}_{}'.format(log_dir, i)
os.makedirs(possible_log_dir)
log_dir = possible_log_dir
return log_dir
except FileExistsError:
i += 1
def _make_experiment_signature(function):
"""Generate an ExperimentTemplate's signature from its function.
Checks that the first parameter is named ctxt and removes it from the
signature. Makes all other parameters keyword only.
Args:
function (callable[ExperimentContext, ...]): The wrapped function.
Returns:
inspect.Signature: The signature of the ExperimentTemplate.
Raises:
ValueError: If the wrapped function's first parameter is not 'ctxt'.
"""
func_sig = inspect.signature(function)
new_params = []
saw_first_param = False
for param in func_sig.parameters.values():
if not saw_first_param:
# Don't output it to the experiment params, since it will contain
# the context.
if param.name != 'ctxt':
raise ValueError(
'Experiment functions should have a first '
"parameter named 'ctxt' instead of {!r}".format(
param.name))
saw_first_param = True
else:
new_params.append(
inspect.Parameter(name=param.name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=param.default,
annotation=param.annotation))
if not saw_first_param:
raise ValueError(
'Experiment functions should have a first parameter '
"named 'ctxt', but {!r} has no parameters".format(function))
return inspect.Signature(new_params,
return_annotation=func_sig.return_annotation)
class ExperimentContext:
"""Context in which an experiment is being run.
Currently, this class implements the same interface as SnapshotConfig, but
it will be extended in the future.
Args:
snapshot_dir (str): The full directory to put snapshots in.
snapshot_mode (str): Policy for which snapshots to keep (or make at
all). Can be either "all" (all iterations will be saved), "last"
(only the last iteration will be saved), "gap" (every snapshot_gap
iterations are saved), or "none" (do not save snapshots).
snapshot_gap (int): Gap between snapshot iterations. Waits this number
of iterations before taking another snapshot.
"""
# pylint: disable=too-few-public-methods
def __init__(self, *, snapshot_dir, snapshot_mode, snapshot_gap):
self.snapshot_dir = snapshot_dir
self.snapshot_mode = snapshot_mode
self.snapshot_gap = snapshot_gap
def get_git_commit_hash():
import subprocess
p = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit, _ = p.communicate()
git_commit = git_commit.strip().decode('utf-8')
return git_commit
def save_git_diff_to_file(git_diff_file_path):
import subprocess
git_diff_file = open(git_diff_file_path, 'w')
p = subprocess.Popen(['git', 'diff', '--patch', 'HEAD'], stdout=git_diff_file)
p.wait()
class ExperimentTemplate:
"""Creates experiment log directories and runs an experiment.
This class should only be created by calling garage.wrap_experiment.
Generally, it's used as a decorator like this:
@wrap_experiment(snapshot_mode='all')
def my_experiment(ctxt, seed, lr=0.5):
...
my_experiment(seed=1)
Even though this class could be implemented as a closure in
wrap_experiment(), it's more readable (and easier to pickle) implemented as
a class.
Note that the full path that will be created is
f'{data}/local/{prefix}/{name}'.
Args:
function (callable or None): The experiment function to wrap.
log_dir (str or None): The full log directory to log to. Will be
computed from `name` if omitted.
name (str or None): The name of this experiment template. Will be
filled from the wrapped function's name if omitted.
prefix (str): Directory under data/local in which to place the
experiment directory.
snapshot_mode (str): Policy for which snapshots to keep (or make at
all). Can be either "all" (all iterations will be saved), "last"
(only the last iteration will be saved), "gap" (every snapshot_gap
iterations are saved), or "none" (do not save snapshots).
snapshot_gap (int): Gap between snapshot iterations. Waits this number
of iterations before taking another snapshot.
archive_launch_repo (bool): Whether to save an archive of the
repository containing the launcher script. This is a potentially
expensive operation which is useful for ensuring reproducibility.
name_parameters (str or None): Parameters to insert into the experiment
name. Should be either None (the default), 'all' (all parameters
will be used), or 'passed' (only passed parameters will be used).
The used parameters will be inserted in the order they appear in
the function definition.
use_existing_dir (bool): If true, (re)use the directory for this
experiment, even if it already contains data.
"""
# pylint: disable=too-few-public-methods
def __init__(self, *, function, log_dir, name, prefix, snapshot_mode,
snapshot_gap, archive_launch_repo, name_parameters,
use_existing_dir):
self.function = function
self.log_dir = log_dir
self.name = name
self.prefix = prefix
self.snapshot_mode = snapshot_mode
self.snapshot_gap = snapshot_gap
self.archive_launch_repo = archive_launch_repo
self.name_parameters = name_parameters
self.use_existing_dir = use_existing_dir
if self.function is not None:
self._update_wrap_params()
def _update_wrap_params(self):
"""Update self to "look like" the wrapped funciton.
Mostly, this involves creating a function signature for the
ExperimentTemplate that looks like the wrapped function, but with the
first argument (ctxt) excluded, and all other arguments required to be
keyword only.
"""
functools.update_wrapper(self, self.function)
self.__signature__ = _make_experiment_signature(self.function)
@classmethod
def _augment_name(cls, options, name, params):
"""Augment the experiment name with parameters.
Args:
options (dict): Options to `wrap_experiment` itself. See the
function documentation for details.
name (str): Name without parameter names.
params (dict): Dictionary of parameters.
Raises:
ValueError: If self.name_parameters is not set to None, "passed",
or "all".
Returns:
str: Returns the augmented name.
"""
name_parameters = collections.OrderedDict()
if options['name_parameters'] == 'passed':
for param in options['signature'].parameters.values():
try:
name_parameters[param.name] = params[param.name]
except KeyError:
pass
elif options['name_parameters'] == 'all':
for param in options['signature'].parameters.values():
name_parameters[param.name] = params.get(
param.name, param.default)
elif options['name_parameters'] is not None:
raise ValueError('wrap_experiment.name_parameters should be set '
'to one of None, "passed", or "all"')
param_str = '_'.join('{}={}'.format(k, v)
for (k, v) in name_parameters.items())
if param_str:
return '{}_{}'.format(name, param_str)
else:
return name
def _get_options(self, *args):
"""Get the options for wrap_experiment.
This method combines options passed to `wrap_experiment` itself and to
the wrapped experiment.
Args:
args (list[dict]): Unnamed arguments to the wrapped experiment. May
be an empty list or a list containing a single dictionary.
Raises:
ValueError: If args contains more than one value, or the value is
not a dictionary containing at most the same keys as are
arguments to `wrap_experiment`.
Returns:
dict: The final options.
"""
options = dict(name=self.name,
function=self.function,
prefix=self.prefix,
name_parameters=self.name_parameters,
log_dir=self.log_dir,
archive_launch_repo=self.archive_launch_repo,
snapshot_gap=self.snapshot_gap,
snapshot_mode=self.snapshot_mode,
use_existing_dir=self.use_existing_dir,
signature=self.__signature__)
if args:
if len(args) == 1 and isinstance(args[0], dict):
for k in args[0]:
if k not in options:
raise ValueError('Unknown key {} in wrap_experiment '
'options'.format(k))
options.update(args[0])
else:
raise ValueError('garage.experiment currently only supports '
'keyword arguments')
return options
@classmethod
def _make_context(cls, options, **kwargs):
"""Make a context from the template information and variant args.
Currently, all arguments should be keyword arguments.
Args:
options (dict): Options to `wrap_experiment` itself. See the
function documentation for details.
kwargs (dict): Keyword arguments for the wrapped function. Will be
logged to `variant.json`
Returns:
ExperimentContext: The created experiment context.
"""
name = options['name']
if name is None:
name = options['function'].__name__
name = cls._augment_name(options, name, kwargs)
log_dir = options['log_dir']
if log_dir is None:
log_dir = ('{data}/local/{prefix}/{name}'.format(
data=os.path.join(os.getcwd(), 'data'),
prefix=options['prefix'],
name=name))
if options['use_existing_dir']:
os.makedirs(log_dir, exist_ok=True)
else:
log_dir = _make_sequential_log_dir(log_dir)
tabular_log_file = os.path.join(log_dir, 'progress.csv')
text_log_file = os.path.join(log_dir, 'debug.log')
variant_log_file = os.path.join(log_dir, 'variant.json')
metadata_log_file = os.path.join(log_dir, 'metadata.json')
tb_dir = os.path.join(log_dir, 'tb')
tabular_log_file_eval = os.path.join(log_dir, 'progress_eval.csv')
text_log_file_eval = os.path.join(log_dir, 'debug_eval.log')
tb_dir_eval = os.path.join(log_dir, 'tb_eval')
tb_dir_plot = os.path.join(log_dir, 'tb_plot')
dump_json(variant_log_file, kwargs)
git_root_path, metadata = get_metadata()
dump_json(metadata_log_file, metadata)
if git_root_path and options['archive_launch_repo']:
make_launcher_archive(git_root_path=git_root_path, log_dir=log_dir)
logger.add_output(dowel.TextOutput(text_log_file))
logger.add_output(dowel.CsvOutput(tabular_log_file))
logger.add_output(
dowel.TensorBoardOutput(tb_dir, x_axis='TotalEnvSteps'))
logger.add_output(dowel.StdOutput())
dowel_eval = dowel_wrapper.get_dowel('eval')
logger_eval = dowel_eval.logger
logger_eval.add_output(dowel_eval.TextOutput(text_log_file_eval))
logger_eval.add_output(dowel_eval.CsvOutput(tabular_log_file_eval))
logger_eval.add_output(
dowel_eval.TensorBoardOutput(tb_dir_eval, x_axis='TotalEnvSteps'))
logger_eval.add_output(dowel_eval.StdOutput())
dowel_plot = dowel_wrapper.get_dowel('plot')
logger_plot = dowel_plot.logger
logger_plot.add_output(
dowel_plot.TensorBoardOutput(tb_dir_plot, x_axis='TotalEnvSteps'))
logger.push_prefix('[{}] '.format(name))
logger.log('Logging to {}'.format(log_dir))
git_commit = get_git_commit_hash()
logger.log('Git commit: {}'.format(git_commit))
git_diff_file_path = os.path.join(log_dir, 'git_diff_{}.patch'.format(git_commit))
save_git_diff_to_file(git_diff_file_path)
return ExperimentContext(snapshot_dir=log_dir,
snapshot_mode=options['snapshot_mode'],
snapshot_gap=options['snapshot_gap'])
def __call__(self, *args, **kwargs):
"""Wrap a function to turn it into an ExperimentTemplate.
Note that this docstring will be overriden to match the function's
docstring on the ExperimentTemplate once a function is passed in.
Args:
args (list): If no function has been set yet, must be a list
containing a single callable. If the function has been set, may
be a single value, a dictionary containing overrides for the
original arguments to `wrap_experiment`.
kwargs (dict): Arguments passed onto the wrapped function.
Returns:
object: The returned value of the wrapped function.
Raises:
ValueError: If not passed a single callable argument.
"""
if self.function is None:
if len(args) != 1 or len(kwargs) != 0 or not callable(args[0]):
raise ValueError('Please apply the result of '
'wrap_experiment() to a single function')
# Apply ourselves as a decorator
self.function = args[0]
self._update_wrap_params()
return self
else:
ctxt = self._make_context(self._get_options(*args), **kwargs)
result = self.function(ctxt, **kwargs)
logger.remove_all()
logger.pop_prefix()
gc.collect() # See dowel issue #44
return result
def wrap_experiment(function=None,
*,
log_dir=None,
prefix='experiment',
name=None,
snapshot_mode='last',
snapshot_gap=1,
archive_launch_repo=True,
name_parameters=None,
use_existing_dir=False):
"""Decorate a function to turn it into an ExperimentTemplate.
When invoked, the wrapped function will receive an ExperimentContext, which
will contain the log directory into which the experiment should log
information.
This decorator can be invoked in two differed ways.
Without arguments, like this:
@wrap_experiment
def my_experiment(ctxt, seed, lr=0.5):
...
Or with arguments:
@wrap_experiment(snapshot_mode='all')
def my_experiment(ctxt, seed, lr=0.5):
...
All arguments must be keyword arguments.
Args:
function (callable or None): The experiment function to wrap.
log_dir (str or None): The full log directory to log to. Will be
computed from `name` if omitted.
name (str or None): The name of this experiment template. Will be
filled from the wrapped function's name if omitted.
prefix (str): Directory under data/local in which to place the
experiment directory.
snapshot_mode (str): Policy for which snapshots to keep (or make at
all). Can be either "all" (all iterations will be saved), "last"
(only the last iteration will be saved), "gap" (every snapshot_gap
iterations are saved), or "none" (do not save snapshots).
snapshot_gap (int): Gap between snapshot iterations. Waits this number
of iterations before taking another snapshot.
archive_launch_repo (bool): Whether to save an archive of the
repository containing the launcher script. This is a potentially
expensive operation which is useful for ensuring reproducibility.
name_parameters (str or None): Parameters to insert into the experiment
name. Should be either None (the default), 'all' (all parameters
will be used), or 'passed' (only passed parameters will be used).
The used parameters will be inserted in the order they appear in
the function definition.
use_existing_dir (bool): If true, (re)use the directory for this
experiment, even if it already contains data.
Returns:
callable: The wrapped function.
"""
return ExperimentTemplate(function=function,
log_dir=log_dir,
prefix=prefix,
name=name,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
archive_launch_repo=archive_launch_repo,
name_parameters=name_parameters,
use_existing_dir=use_existing_dir)
def dump_json(filename, data):
"""Dump a dictionary to a file in JSON format.
Args:
filename(str): Filename for the file.
data(dict): Data to save to file.
"""
pathlib.Path(os.path.dirname(filename)).mkdir(parents=True, exist_ok=True)
with open(filename, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True, cls=LogEncoder)
def get_metadata():
"""Get metadata about the main script.
The goal of this function is to capture the additional information needed
to re-run an experiment, assuming that the launcher script that started the
experiment is located in a clean git repository.
Returns:
tuple[str, dict[str, str]]:
* Absolute path to root directory of launcher's git repo.
* Directory containing:
* githash (str): Hash of the git revision of the repo the
experiment was started from. "-dirty" will be appended to this
string if the repo has uncommitted changes. May not be present
if the main script is not in a git repo.
* launcher (str): Relative path to the main script from the base of
the repo the experiment was started from. If the main script
was not started from a git repo, this will instead be an
absolute path to the main script.
"""
main_file = getattr(main, '__file__', None)
if not main_file:
return None, {}
main_file_path = os.path.abspath(main_file)
try:
git_root_path = subprocess.check_output(
('git', 'rev-parse', '--show-toplevel'),
cwd=os.path.dirname(main_file_path),
stderr=subprocess.DEVNULL)
git_root_path = git_root_path.strip()
except subprocess.CalledProcessError:
# This file is always considered not to exist.
git_root_path = ''
# We check that the path exists since in old versions of git the above
# rev-parse command silently exits with 0 when run outside of a git repo.
if not os.path.exists(git_root_path):
return None, {
'launcher': main_file_path,
}
launcher_path = os.path.relpath(bytes(main_file_path, encoding='utf8'),
git_root_path)
git_hash = subprocess.check_output(('git', 'rev-parse', 'HEAD'),
cwd=git_root_path)
git_hash = git_hash.decode('utf-8').strip()
git_status = subprocess.check_output(('git', 'status', '--short'),
cwd=git_root_path)
git_status = git_status.decode('utf-8').strip()
if git_status != '':
git_hash = git_hash + '-dirty'
return git_root_path, {
'githash': git_hash,
'launcher': launcher_path.decode('utf-8'),
}
def make_launcher_archive(*, git_root_path, log_dir):
"""Saves an archive of the launcher's git repo to the log directory.
Args:
git_root_path (str): Absolute path to git repo to archive.
log_dir (str): Absolute path to the log directory.
"""
git_files = subprocess.check_output(
('git', 'ls-files', '--others', '--exclude-standard', '--cached',
'-z'),
cwd=git_root_path).strip()
repo_size = 0
files_to_archive = []
for f in git_files.split(b'\0'):
try:
file_size = os.stat(os.path.join(git_root_path, f)).st_size
repo_size += file_size
if file_size < EIGHT_MEBIBYTES:
files_to_archive.append(f)
except FileNotFoundError:
pass
if repo_size >= EIGHT_MEBIBYTES:
warnings.warn('Archiving a launch repo larger than 8MiB. This may be '
'slow. Set archive_launch_repo=False in wrap_experiment '
'to disable this behavior.')
archive_path = os.path.join(log_dir, 'launch_archive.tar.xz')
subprocess.run(('tar', '--null', '--files-from', '-', '--xz', '--create',
'--file', archive_path),
input=b'\0'.join(files_to_archive),
cwd=git_root_path,
check=True)
class LogEncoder(json.JSONEncoder):
"""Encoder to be used as cls in json.dump."""
def default(self, o):
"""Perform JSON encoding.
Args:
o (object): Object to encode.
Returns:
str: Object encoded in JSON.
"""
# Why is this method hidden? What does that mean?
# pylint: disable=method-hidden
if isinstance(o, type):
return {'$class': o.__module__ + '.' + o.__name__}
elif isinstance(o, enum.Enum):
return {
'$enum':
o.__module__ + '.' + o.__class__.__name__ + '.' + o.name
}
elif callable(o):
return {'$function': o.__module__ + '.' + o.__name__}
return json.JSONEncoder.default(self, o)
| 30,140 | 36.582294 | 113 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/experiment_wrapper.py | #!/usr/bin/env python3
"""Run an experiment triggered by `run_experiment()` in `experiment.py`."""
import argparse
import ast
import base64
import datetime
import gc
import json
import os
import pathlib
import sys
import uuid
import cloudpickle
import dateutil.tz
import dowel
from dowel import logger
import psutil
import garage.experiment
import garage.plotter
import garage.tf.plotter
# pylint: disable=too-many-statements
def run_experiment(argv):
"""Run experiment.
Args:
argv (list[str]): Command line arguments.
Raises:
BaseException: Propagate any exception in the experiment.
"""
now = datetime.datetime.now(dateutil.tz.tzlocal())
# avoid name clashes when running distributed jobs
rand_id = str(uuid.uuid4())[:5]
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S_%f_%Z')
default_exp_name = 'experiment_%s_%s' % (timestamp, rand_id)
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name',
type=str,
default=default_exp_name,
help='Name of the experiment.')
parser.add_argument('--log_dir',
type=str,
default=None,
help='Path to save the log and iteration snapshot.')
parser.add_argument('--snapshot_mode',
type=str,
default='last',
help='Mode to save the snapshot. Can be either "all" '
'(all iterations will be saved), "last" (only '
'the last iteration will be saved), "gap" (every'
'`snapshot_gap` iterations are saved), or "none" '
'(do not save snapshots)')
parser.add_argument('--snapshot_gap',
type=int,
default=1,
help='Gap between snapshot iterations.')
parser.add_argument(
'--resume_from_dir',
type=str,
default=None,
help='Directory of the pickle file to resume experiment from.')
parser.add_argument('--resume_from_epoch',
type=str,
default=None,
help='Index of iteration to restore from. '
'Can be "first", "last" or a number. '
'Not applicable when snapshot_mode="last"')
parser.add_argument('--tabular_log_file',
type=str,
default='progress.csv',
help='Name of the tabular log file (in csv).')
parser.add_argument('--text_log_file',
type=str,
default='debug.log',
help='Name of the text log file (in pure text).')
parser.add_argument('--tensorboard_step_key',
type=str,
default=None,
help='Name of the step key in tensorboard_summary.')
parser.add_argument('--params_log_file',
type=str,
default='params.json',
help='Name of the parameter log file (in json).')
parser.add_argument('--variant_log_file',
type=str,
default='variant.json',
help='Name of the variant log file (in json).')
parser.add_argument('--plot',
type=ast.literal_eval,
default=False,
help='Whether to plot the iteration results')
parser.add_argument(
'--log_tabular_only',
type=ast.literal_eval,
default=False,
help='Print only the tabular log information (in a horizontal format)')
parser.add_argument('--seed',
type=int,
default=None,
help='Random seed for numpy')
parser.add_argument('--args_data',
type=str,
help='Pickled data for objects')
parser.add_argument('--variant_data',
type=str,
help='Pickled data for variant configuration')
args = parser.parse_args(argv[1:])
if args.seed is not None:
garage.experiment.deterministic.set_seed(args.seed)
if args.log_dir is None:
log_dir = os.path.join(os.path.join(os.getcwd(), 'data'),
args.exp_name)
else:
log_dir = args.log_dir
tabular_log_file = os.path.join(log_dir, args.tabular_log_file)
text_log_file = os.path.join(log_dir, args.text_log_file)
params_log_file = os.path.join(log_dir, args.params_log_file)
if args.variant_data is not None:
variant_data = cloudpickle.loads(base64.b64decode(args.variant_data))
variant_log_file = os.path.join(log_dir, args.variant_log_file)
garage.experiment.experiment.dump_json(variant_log_file, variant_data)
else:
variant_data = None
log_parameters(params_log_file, args)
logger.add_output(dowel.TextOutput(text_log_file))
logger.add_output(dowel.CsvOutput(tabular_log_file))
logger.add_output(dowel.TensorBoardOutput(log_dir, x_axis='TotalEnvSteps'))
logger.add_output(dowel.StdOutput())
logger.push_prefix('[%s] ' % args.exp_name)
snapshot_config = \
garage.experiment.SnapshotConfig(snapshot_dir=log_dir,
snapshot_mode=args.snapshot_mode,
snapshot_gap=args.snapshot_gap)
method_call = cloudpickle.loads(base64.b64decode(args.args_data))
try:
method_call(snapshot_config, variant_data, args.resume_from_dir,
args.resume_from_epoch)
except BaseException:
children = garage.plotter.Plotter.get_plotters()
children += garage.tf.plotter.Plotter.get_plotters()
child_proc_shutdown(children)
raise
logger.remove_all()
logger.pop_prefix()
gc.collect() # See dowel issue #44
def child_proc_shutdown(children):
"""Shut down children processes.
Args:
children (list[garage.plotter.Plotter]): Instances of plotter to
shutdown.
"""
run_exp_proc = psutil.Process()
alive = run_exp_proc.children(recursive=True)
for proc in alive:
if any([
'multiprocessing.semaphore_tracker' in cmd
for cmd in proc.cmdline()
]):
alive.remove(proc)
for c in children:
c.close()
max_retries = 5
for _ in range(max_retries):
_, alive = psutil.wait_procs(alive, 1.0)
if not alive:
break
if alive:
error_msg = ''
for child in alive:
error_msg += '{}\n'.format(
str(
child.as_dict(
attrs=['ppid', 'pid', 'name', 'status', 'cmdline'])))
error_msg = ("The following processes didn't die after the shutdown "
'of run_experiment:\n') + error_msg
error_msg += ('This is a sign of an unclean shutdown. Please reopen '
'the following issue\nwith a detailed description '
'of how the error was produced:\n')
error_msg += ('https://github.com/rlworkgroup/garage/issues/120')
print(error_msg)
def log_parameters(log_file, args):
"""Log parameters to file.
Args:
log_file (str): Log filename.
args (argparse.Namespace): Parsed command line arguments.
"""
log_params = {}
for param_name, param_value in args.__dict__.items():
log_params[param_name] = param_value
if args.args_data is not None:
log_params['json_args'] = dict()
pathlib.Path(os.path.dirname(log_file)).mkdir(parents=True, exist_ok=True)
with open(log_file, 'w') as f:
json.dump(log_params,
f,
indent=2,
sort_keys=True,
cls=garage.experiment.experiment.LogEncoder)
garage.experiment.experiment.dump_json(log_file, log_params)
if __name__ == '__main__':
run_experiment(sys.argv)
| 8,231 | 34.482759 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/local_runner.py | """Provides algorithms with access to most of garage's features."""
import copy
import os
import time
import cloudpickle
from dowel import logger, tabular
import psutil
from garage.experiment.deterministic import get_seed, set_seed
from garage.experiment.snapshotter import Snapshotter
from garage.sampler import parallel_sampler
from garage.sampler.sampler_deprecated import BaseSampler
# This is avoiding a circular import
from garage.sampler.default_worker import DefaultWorker # noqa: I100
from garage.sampler.worker_factory import WorkerFactory
import dowel_wrapper
class ExperimentStats:
# pylint: disable=too-few-public-methods
"""Statistics of a experiment.
Args:
total_epoch (int): Total epoches.
total_itr (int): Total Iterations.
total_env_steps (int): Total environment steps collected.
last_path (list[dict]): Last sampled paths.
"""
def __init__(self, total_epoch, total_itr, total_env_steps, last_path):
self.total_epoch = total_epoch
self.total_itr = total_itr
self.total_env_steps = total_env_steps
self.last_path = last_path
class SetupArgs:
# pylint: disable=too-few-public-methods
"""Arguments to setup a runner.
Args:
sampler_cls (garage.sampler.Sampler): A sampler class.
sampler_args (dict): Arguments to be passed to sampler constructor.
seed (int): Random seed.
"""
def __init__(self, sampler_cls, sampler_args, seed):
self.sampler_cls = sampler_cls
self.sampler_args = sampler_args
self.seed = seed
class TrainArgs:
# pylint: disable=too-few-public-methods
"""Arguments to call train() or resume().
Args:
n_epochs (int): Number of epochs.
batch_size (int): Number of environment steps in one batch.
plot (bool): Visualize policy by doing rollout after each epoch.
store_paths (bool): Save paths in snapshot.
pause_for_plot (bool): Pause for plot.
start_epoch (int): The starting epoch. Used for resume().
"""
def __init__(self, n_epochs, batch_size, plot, store_paths, pause_for_plot,
start_epoch):
self.n_epochs = n_epochs
self.batch_size = batch_size
self.plot = plot
self.store_paths = store_paths
self.pause_for_plot = pause_for_plot
self.start_epoch = start_epoch
class LocalRunner:
"""Base class of local runner.
Use Runner.setup(algo, env) to setup algorithm and environement for runner
and Runner.train() to start training.
Args:
snapshot_config (garage.experiment.SnapshotConfig): The snapshot
configuration used by LocalRunner to create the snapshotter.
If None, it will create one with default settings.
max_cpus (int): The maximum number of parallel sampler workers.
Note:
For the use of any TensorFlow environments, policies and algorithms,
please use LocalTFRunner().
Examples:
| # to train
| runner = LocalRunner()
| env = Env(...)
| policy = Policy(...)
| algo = Algo(
| env=env,
| policy=policy,
| ...)
| runner.setup(algo, env)
| runner.train(n_epochs=100, batch_size=4000)
| # to resume immediately.
| runner = LocalRunner()
| runner.restore(resume_from_dir)
| runner.resume()
| # to resume with modified training arguments.
| runner = LocalRunner()
| runner.restore(resume_from_dir)
| runner.resume(n_epochs=20)
"""
def __init__(self, snapshot_config, max_cpus=1):
self._snapshotter = Snapshotter(snapshot_config.snapshot_dir,
snapshot_config.snapshot_mode,
snapshot_config.snapshot_gap)
parallel_sampler.initialize(max_cpus)
seed = get_seed()
if seed is not None:
parallel_sampler.set_seed(seed)
self._has_setup = False
self._plot = False
self._setup_args = None
self._train_args = None
self._stats = ExperimentStats(total_itr=0,
total_env_steps=0,
total_epoch=0,
last_path=None)
self._algo = None
self._env = None
self._sampler = None
self._plotter = None
self._start_time = None
self._itr_start_time = None
self.step_itr = None
self.step_path = None
# only used for off-policy algorithms
self.enable_logging = True
self._n_workers = None
self._worker_class = None
self._worker_args = None
def make_sampler(self,
sampler_cls,
*,
seed=None,
n_workers=psutil.cpu_count(logical=False),
max_path_length=None,
worker_class=DefaultWorker,
sampler_args=None,
worker_args=None):
"""Construct a Sampler from a Sampler class.
Args:
sampler_cls (type): The type of sampler to construct.
seed (int): Seed to use in sampler workers.
max_path_length (int): Maximum path length to be sampled by the
sampler. Paths longer than this will be truncated.
n_workers (int): The number of workers the sampler should use.
worker_class (type): Type of worker the Sampler should use.
sampler_args (dict or None): Additional arguments that should be
passed to the sampler.
worker_args (dict or None): Additional arguments that should be
passed to the sampler.
Raises:
ValueError: If `max_path_length` isn't passed and the algorithm
doesn't contain a `max_path_length` field, or if the algorithm
doesn't have a policy field.
Returns:
sampler_cls: An instance of the sampler class.
"""
if not hasattr(self._algo, 'policy'):
raise ValueError('If the runner is used to construct a sampler, '
'the algorithm must have a `policy` field.')
if max_path_length is None:
if hasattr(self._algo, 'max_path_length'):
max_path_length = self._algo.max_path_length
else:
raise ValueError('If `sampler_cls` is specified in '
'runner.setup, the algorithm must have '
'a `max_path_length` field.')
if seed is None:
seed = get_seed()
if sampler_args is None:
sampler_args = {}
if worker_args is None:
worker_args = {}
if issubclass(sampler_cls, BaseSampler):
return sampler_cls(self._algo, self._env, **sampler_args)
else:
return sampler_cls.from_worker_factory(WorkerFactory(
seed=seed,
max_path_length=max_path_length,
n_workers=n_workers,
worker_class=worker_class,
worker_args=worker_args),
agents=self._algo.policy,
envs=self._env)
def setup(self,
algo,
env,
sampler_cls=None,
sampler_args=None,
n_workers=psutil.cpu_count(logical=False),
worker_class=None,
worker_args=None):
"""Set up runner for algorithm and environment.
This method saves algo and env within runner and creates a sampler.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (garage.np.algos.RLAlgorithm): An algorithm instance.
env (garage.envs.GarageEnv): An environement instance.
sampler_cls (garage.sampler.Sampler): A sampler class.
sampler_args (dict): Arguments to be passed to sampler constructor.
n_workers (int): The number of workers the sampler should use.
worker_class (type): Type of worker the sampler should use.
worker_args (dict or None): Additional arguments that should be
passed to the worker.
Raises:
ValueError: If sampler_cls is passed and the algorithm doesn't
contain a `max_path_length` field.
"""
self._algo = algo
self._env = env
self._n_workers = n_workers
self._worker_class = worker_class
if sampler_args is None:
sampler_args = {}
if sampler_cls is None:
sampler_cls = getattr(algo, 'sampler_cls', None)
if worker_class is None:
worker_class = getattr(algo, 'worker_cls', DefaultWorker)
if worker_args is None:
worker_args = {}
self._worker_args = worker_args
if sampler_cls is None:
self._sampler = None
else:
self._sampler = self.make_sampler(sampler_cls,
sampler_args=sampler_args,
n_workers=n_workers,
worker_class=worker_class,
worker_args=worker_args)
self._has_setup = True
self._setup_args = SetupArgs(sampler_cls=sampler_cls,
sampler_args=sampler_args,
seed=get_seed())
def _start_worker(self):
"""Start Plotter and Sampler workers."""
if isinstance(self._sampler, BaseSampler):
self._sampler.start_worker()
if self._plot:
# pylint: disable=import-outside-toplevel
from garage.plotter import Plotter
self._plotter = Plotter()
self._plotter.init_plot(self.get_env_copy(), self._algo.policy)
def _shutdown_worker(self):
"""Shutdown Plotter and Sampler workers."""
if self._sampler is not None:
self._sampler.shutdown_worker()
if self._plot:
self._plotter.close()
def obtain_samples(self,
itr,
batch_size=None,
agent_update=None,
env_update=None):
"""Obtain one batch of samples.
Args:
itr (int): Index of iteration (epoch).
batch_size (int): Number of steps in batch.
This is a hint that the sampler may or may not respect.
agent_update (object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update (object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Raises:
ValueError: Raised if the runner was initialized without a sampler,
or batch_size wasn't provided here or to train.
Returns:
list[dict]: One batch of samples.
"""
if self._sampler is None:
raise ValueError('Runner was not initialized with `sampler_cls`. '
'Either provide `sampler_cls` to runner.setup, '
' or set `algo.sampler_cls`.')
if batch_size is None and self._train_args.batch_size is None:
raise ValueError('Runner was not initialized with `batch_size`. '
'Either provide `batch_size` to runner.train, '
' or pass `batch_size` to runner.obtain_samples.')
paths = None
if isinstance(self._sampler, BaseSampler):
paths = self._sampler.obtain_samples(
itr, (batch_size or self._train_args.batch_size))
else:
if agent_update is None:
agent_update = self._algo.policy.get_param_values()
paths = self._sampler.obtain_samples(
itr, (batch_size or self._train_args.batch_size),
agent_update=agent_update,
env_update=env_update)
paths = paths.to_trajectory_list()
self._stats.total_env_steps += sum([len(p['rewards']) for p in paths])
return paths
def save(self, epoch):
"""Save snapshot of current batch.
Args:
epoch (int): Epoch.
Raises:
NotSetupError: if save() is called before the runner is set up.
"""
if not self._has_setup:
raise NotSetupError('Use setup() to setup runner before saving.')
logger.log('Saving snapshot...')
params = dict()
# Save arguments
params['setup_args'] = self._setup_args
params['train_args'] = self._train_args
params['stats'] = self._stats
# Save states
params['env'] = self._env
params['algo'] = self._algo
params['n_workers'] = self._n_workers
params['worker_class'] = self._worker_class
params['worker_args'] = self._worker_args
self._snapshotter.save_itr_params(epoch, params)
logger.log('Saved')
def restore(self, from_dir, from_epoch='last'):
"""Restore experiment from snapshot.
Args:
from_dir (str): Directory of the pickle file
to resume experiment from.
from_epoch (str or int): The epoch to restore from.
Can be 'first', 'last' or a number.
Not applicable when snapshot_mode='last'.
Returns:
TrainArgs: Arguments for train().
"""
saved = self._snapshotter.load(from_dir, from_epoch)
self._setup_args = saved['setup_args']
self._train_args = saved['train_args']
self._stats = saved['stats']
set_seed(self._setup_args.seed)
self.setup(env=saved['env'],
algo=saved['algo'],
sampler_cls=self._setup_args.sampler_cls,
sampler_args=self._setup_args.sampler_args,
n_workers=saved['n_workers'],
worker_class=saved['worker_class'],
worker_args=saved['worker_args'])
n_epochs = self._train_args.n_epochs
last_epoch = self._stats.total_epoch
last_itr = self._stats.total_itr
total_env_steps = self._stats.total_env_steps
batch_size = self._train_args.batch_size
store_paths = self._train_args.store_paths
pause_for_plot = self._train_args.pause_for_plot
fmt = '{:<20} {:<15}'
logger.log('Restore from snapshot saved in %s' %
self._snapshotter.snapshot_dir)
logger.log(fmt.format('-- Train Args --', '-- Value --'))
logger.log(fmt.format('n_epochs', n_epochs))
logger.log(fmt.format('last_epoch', last_epoch))
logger.log(fmt.format('batch_size', batch_size))
logger.log(fmt.format('store_paths', store_paths))
logger.log(fmt.format('pause_for_plot', pause_for_plot))
logger.log(fmt.format('-- Stats --', '-- Value --'))
logger.log(fmt.format('last_itr', last_itr))
logger.log(fmt.format('total_env_steps', total_env_steps))
self._train_args.start_epoch = last_epoch + 1
return copy.copy(self._train_args)
def log_diagnostics(self, pause_for_plot=False):
"""Log diagnostics.
Args:
pause_for_plot (bool): Pause for plot.
"""
logger.log('Time %.2f s' % (time.time() - self._start_time))
logger.log('EpochTime %.2f s' % (time.time() - self._itr_start_time))
tabular.record('TotalEnvSteps', self._stats.total_env_steps)
logger.log(tabular)
if self._plot:
self._plotter.update_plot(self._algo.policy,
self._algo.max_path_length)
if pause_for_plot:
input('Plotting evaluation run: Press Enter to " "continue...')
def train(self,
n_epochs,
batch_size=None,
plot=False,
store_paths=False,
pause_for_plot=False):
"""Start training.
Args:
n_epochs (int): Number of epochs.
batch_size (int or None): Number of environment steps in one batch.
plot (bool): Visualize policy by doing rollout after each epoch.
store_paths (bool): Save paths in snapshot.
pause_for_plot (bool): Pause for plot.
Raises:
NotSetupError: If train() is called before setup().
Returns:
float: The average return in last epoch cycle.
"""
if not self._has_setup:
raise NotSetupError('Use setup() to setup runner before training.')
# Save arguments for restore
self._train_args = TrainArgs(n_epochs=n_epochs,
batch_size=batch_size,
plot=plot,
store_paths=store_paths,
pause_for_plot=pause_for_plot,
start_epoch=0)
self._plot = plot
average_return = self._algo.train(self)
self._shutdown_worker()
return average_return
def step_epochs(self):
"""Step through each epoch.
This function returns a magic generator. When iterated through, this
generator automatically performs services such as snapshotting and log
management. It is used inside train() in each algorithm.
The generator initializes two variables: `self.step_itr` and
`self.step_path`. To use the generator, these two have to be
updated manually in each epoch, as the example shows below.
Yields:
int: The next training epoch.
Examples:
for epoch in runner.step_epochs():
runner.step_path = runner.obtain_samples(...)
self.train_once(...)
runner.step_itr += 1
"""
self._start_worker()
self._start_time = time.time()
self.step_itr = self._stats.total_itr
self.step_path = None
# Used by integration tests to ensure examples can run one epoch.
n_epochs = int(
os.environ.get('GARAGE_EXAMPLE_TEST_N_EPOCHS',
self._train_args.n_epochs))
logger.log('Obtaining samples...')
for epoch in range(self._train_args.start_epoch, n_epochs):
self._itr_start_time = time.time()
with logger.prefix('epoch #%d | ' % epoch):
yield epoch
save_path = (self.step_path
if self._train_args.store_paths else None)
self._stats.last_path = save_path
self._stats.total_epoch = epoch
self._stats.total_itr = self.step_itr
self.save(epoch)
if self.enable_logging:
self.log_diagnostics(self._train_args.pause_for_plot)
logger.dump_all(self.step_itr)
tabular.clear()
def resume(self,
n_epochs=None,
batch_size=None,
plot=None,
store_paths=None,
pause_for_plot=None):
"""Resume from restored experiment.
This method provides the same interface as train().
If not specified, an argument will default to the
saved arguments from the last call to train().
Args:
n_epochs (int): Number of epochs.
batch_size (int): Number of environment steps in one batch.
plot (bool): Visualize policy by doing rollout after each epoch.
store_paths (bool): Save paths in snapshot.
pause_for_plot (bool): Pause for plot.
Raises:
NotSetupError: If resume() is called before restore().
Returns:
float: The average return in last epoch cycle.
"""
if self._train_args is None:
raise NotSetupError('You must call restore() before resume().')
self._train_args.n_epochs = n_epochs or self._train_args.n_epochs
self._train_args.batch_size = batch_size or self._train_args.batch_size
if plot is not None:
self._train_args.plot = plot
if store_paths is not None:
self._train_args.store_paths = store_paths
if pause_for_plot is not None:
self._train_args.pause_for_plot = pause_for_plot
average_return = self._algo.train(self)
self._shutdown_worker()
return average_return
def get_env_copy(self):
"""Get a copy of the environment.
Returns:
garage.envs.GarageEnv: An environement instance.
"""
return cloudpickle.loads(cloudpickle.dumps(self._env))
@property
def total_env_steps(self):
"""Total environment steps collected.
Returns:
int: Total environment steps collected.
"""
return self._stats.total_env_steps
class NotSetupError(Exception):
"""Raise when an experiment is about to run without setup."""
| 21,957 | 34.879085 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/local_tf_runner.py | """The local runner for TensorFlow algorithms.
A runner setup context for algorithms during initialization and
pipelines data between sampler and algorithm during training.
"""
from dowel import logger
import psutil
from garage.experiment import LocalRunner
from garage.sampler import DefaultWorker
tf = False
TFWorkerClassWrapper = False
try:
import tensorflow as tf
from garage.tf.samplers import TFWorkerClassWrapper # noqa: E501; pylint: disable=ungrouped-imports
except ImportError:
pass
class LocalTFRunner(LocalRunner):
"""This class implements a local runner for TensorFlow algorithms.
A local runner provides a default TensorFlow session using python context.
This is useful for those experiment components (e.g. policy) that require a
TensorFlow session during construction.
Use Runner.setup(algo, env) to setup algorithm and environement for runner
and Runner.train() to start training.
Args:
snapshot_config (garage.experiment.SnapshotConfig): The snapshot
configuration used by LocalRunner to create the snapshotter.
If None, it will create one with default settings.
max_cpus (int): The maximum number of parallel sampler workers.
sess (tf.Session): An optional TensorFlow session.
A new session will be created immediately if not provided.
Note:
The local runner will set up a joblib task pool of size max_cpus
possibly later used by BatchSampler. If BatchSampler is not used,
the processes in the pool will remain dormant.
This setup is required to use TensorFlow in a multiprocess
environment before a TensorFlow session is created
because TensorFlow is not fork-safe. See
https://github.com/tensorflow/tensorflow/issues/2448.
When resume via command line, new snapshots will be
saved into the SAME directory if not specified.
When resume programmatically, snapshot directory should be
specify manually or through run_experiment() interface.
Examples:
# to train
with LocalTFRunner() as runner:
env = gym.make('CartPole-v1')
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32))
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=4000)
# to resume immediately.
with LocalTFRunner() as runner:
runner.restore(resume_from_dir)
runner.resume()
# to resume with modified training arguments.
with LocalTFRunner() as runner:
runner.restore(resume_from_dir)
runner.resume(n_epochs=20)
"""
def __init__(self, snapshot_config, sess=None, max_cpus=1):
super().__init__(snapshot_config=snapshot_config, max_cpus=max_cpus)
self.sess = sess or tf.compat.v1.Session()
self.sess_entered = False
def __enter__(self):
"""Set self.sess as the default session.
Returns:
LocalTFRunner: This local runner.
"""
if tf.compat.v1.get_default_session() is not self.sess:
self.sess.__enter__()
self.sess_entered = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Leave session.
Args:
exc_type (str): Type.
exc_val (object): Value.
exc_tb (object): Traceback.
"""
if tf.compat.v1.get_default_session(
) is self.sess and self.sess_entered:
self.sess.__exit__(exc_type, exc_val, exc_tb)
self.sess_entered = False
def make_sampler(self,
sampler_cls,
*,
seed=None,
n_workers=psutil.cpu_count(logical=False),
max_path_length=None,
worker_class=DefaultWorker,
sampler_args=None,
worker_args=None):
"""Construct a Sampler from a Sampler class.
Args:
sampler_cls (type): The type of sampler to construct.
seed (int): Seed to use in sampler workers.
max_path_length (int): Maximum path length to be sampled by the
sampler. Paths longer than this will be truncated.
n_workers (int): The number of workers the sampler should use.
worker_class (type): Type of worker the sampler should use.
sampler_args (dict or None): Additional arguments that should be
passed to the sampler.
worker_args (dict or None): Additional arguments that should be
passed to the worker.
Returns:
sampler_cls: An instance of the sampler class.
"""
# pylint: disable=useless-super-delegation
return super().make_sampler(
sampler_cls,
seed=seed,
n_workers=n_workers,
max_path_length=max_path_length,
worker_class=TFWorkerClassWrapper(worker_class),
sampler_args=sampler_args,
worker_args=worker_args)
def setup(self,
algo,
env,
sampler_cls=None,
sampler_args=None,
n_workers=psutil.cpu_count(logical=False),
worker_class=DefaultWorker,
worker_args=None):
"""Set up runner and sessions for algorithm and environment.
This method saves algo and env within runner and creates a sampler,
and initializes all uninitialized variables in session.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (garage.np.algos.RLAlgorithm): An algorithm instance.
env (garage.envs.GarageEnv): An environement instance.
sampler_cls (garage.sampler.Sampler): A sampler class.
sampler_args (dict): Arguments to be passed to sampler constructor.
n_workers (int): The number of workers the sampler should use.
worker_class (type): Type of worker the sampler should use.
worker_args (dict or None): Additional arguments that should be
passed to the worker.
"""
self.initialize_tf_vars()
logger.log(self.sess.graph)
super().setup(algo, env, sampler_cls, sampler_args, n_workers,
worker_class, worker_args)
def _start_worker(self):
"""Start Plotter and Sampler workers."""
self._sampler.start_worker()
if self._plot:
# pylint: disable=import-outside-toplevel
from garage.tf.plotter import Plotter
self._plotter = Plotter(self.get_env_copy(),
self._algo.policy,
sess=tf.compat.v1.get_default_session())
self._plotter.start()
def initialize_tf_vars(self):
"""Initialize all uninitialized variables in session."""
with tf.name_scope('initialize_tf_vars'):
uninited_set = [
e.decode() for e in self.sess.run(
tf.compat.v1.report_uninitialized_variables())
]
self.sess.run(
tf.compat.v1.variables_initializer([
v for v in tf.compat.v1.global_variables()
if v.name.split(':')[0] in uninited_set
]))
class __FakeLocalTFRunner:
# noqa: E501; pylint: disable=missing-param-doc,too-few-public-methods,no-method-argument
"""Raises an ImportError for environments without TensorFlow."""
def __init__(*args, **kwargs):
raise ImportError(
'LocalTFRunner requires TensorFlow. To use it, please install '
'TensorFlow.')
if not tf:
LocalTFRunner = __FakeLocalTFRunner # noqa: F811
| 8,263 | 36.058296 | 104 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/meta_evaluator.py | """Evaluator which tests Meta-RL algorithms on test environments."""
from dowel import logger, tabular
from garage import log_multitask_performance, TrajectoryBatch
from garage.experiment.deterministic import get_seed
from garage.sampler import DefaultWorker
from garage.sampler import LocalSampler
from garage.sampler import WorkerFactory
class MetaEvaluator:
"""Evaluates Meta-RL algorithms on test environments.
Args:
test_task_sampler (garage.experiment.TaskSampler): Sampler for test
tasks. To demonstrate the effectiveness of a meta-learning method,
these should be different from the training tasks.
max_path_length (int): Maximum path length used for evaluation
trajectories.
n_test_tasks (int or None): Number of test tasks to sample each time
evaluation is performed. Note that tasks are sampled "without
replacement". If None, is set to `test_task_sampler.n_tasks`.
n_exploration_traj (int): Number of trajectories to gather from the
exploration policy before requesting the meta algorithm to produce
an adapted policy.
n_test_rollouts (int): Number of rollouts to use for each adapted
policy. The adapted policy should forget previous rollouts when
`.reset()` is called.
prefix (str): Prefix to use when logging. Defaults to MetaTest. For
example, this results in logging the key 'MetaTest/SuccessRate'.
If not set to `MetaTest`, it should probably be set to `MetaTrain`.
test_task_names (list[str]): List of task names to test. Should be in
an order consistent with the `task_id` env_info, if that is
present.
worker_class (type): Type of worker the Sampler should use.
worker_args (dict or None): Additional arguments that should be
passed to the worker.
"""
# pylint: disable=too-few-public-methods
def __init__(self,
*,
test_task_sampler,
max_path_length,
n_exploration_traj=10,
n_test_tasks=None,
n_test_rollouts=1,
prefix='MetaTest',
test_task_names=None,
worker_class=DefaultWorker,
worker_args=None):
self._test_task_sampler = test_task_sampler
self._worker_class = worker_class
if worker_args is None:
self._worker_args = {}
else:
self._worker_args = worker_args
if n_test_tasks is None:
n_test_tasks = test_task_sampler.n_tasks
self._n_test_tasks = n_test_tasks
self._n_test_rollouts = n_test_rollouts
self._n_exploration_traj = n_exploration_traj
self._max_path_length = max_path_length
self._eval_itr = 0
self._prefix = prefix
self._test_task_names = test_task_names
self._test_sampler = None
def evaluate(self, algo, test_rollouts_per_task=None):
"""Evaluate the Meta-RL algorithm on the test tasks.
Args:
algo (garage.np.algos.MetaRLAlgorithm): The algorithm to evaluate.
test_rollouts_per_task (int or None): Number of rollouts per task.
"""
if test_rollouts_per_task is None:
test_rollouts_per_task = self._n_test_rollouts
adapted_trajectories = []
logger.log('Sampling for adapation and meta-testing...')
if self._test_sampler is None:
self._test_sampler = LocalSampler.from_worker_factory(
WorkerFactory(seed=get_seed(),
max_path_length=self._max_path_length,
n_workers=1,
worker_class=self._worker_class,
worker_args=self._worker_args),
agents=algo.get_exploration_policy(),
envs=self._test_task_sampler.sample(1))
for env_up in self._test_task_sampler.sample(self._n_test_tasks):
policy = algo.get_exploration_policy()
traj = TrajectoryBatch.concatenate(*[
self._test_sampler.obtain_samples(self._eval_itr, 1, policy,
env_up)
for _ in range(self._n_exploration_traj)
])
adapted_policy = algo.adapt_policy(policy, traj)
adapted_traj = self._test_sampler.obtain_samples(
self._eval_itr, test_rollouts_per_task * self._max_path_length,
adapted_policy)
adapted_trajectories.append(adapted_traj)
logger.log('Finished meta-testing...')
if self._test_task_names is not None:
name_map = dict(enumerate(self._test_task_names))
else:
name_map = None
with tabular.prefix(self._prefix + '/' if self._prefix else ''):
log_multitask_performance(
self._eval_itr,
TrajectoryBatch.concatenate(*adapted_trajectories),
getattr(algo, 'discount', 1.0),
name_map=name_map)
self._eval_itr += 1
| 5,196 | 42.672269 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/snapshotter.py | """Defines SnapshotConfig and Snapshotter."""
import collections
import errno
import os
import pathlib
import cloudpickle
import joblib
SnapshotConfig = collections.namedtuple(
'SnapshotConfig', ['snapshot_dir', 'snapshot_mode', 'snapshot_gap'])
class Snapshotter:
"""Snapshotter snapshots training data.
When training, it saves data to binary files. When resuming,
it loads from saved data.
Args:
snapshot_dir (str): Path to save the log and iteration snapshot.
snapshot_mode (str): Mode to save the snapshot. Can be either "all"
(all iterations will be saved), "last" (only the last iteration
will be saved), "gap" (every snapshot_gap iterations are saved),
or "none" (do not save snapshots).
snapshot_gap (int): Gap between snapshot iterations. Wait this number
of iterations before taking another snapshot.
"""
def __init__(self,
snapshot_dir=os.path.join(os.getcwd(),
'data/local/experiment'),
snapshot_mode='last',
snapshot_gap=1):
self._snapshot_dir = snapshot_dir
self._snapshot_mode = snapshot_mode
self._snapshot_gap = snapshot_gap
pathlib.Path(snapshot_dir).mkdir(parents=True, exist_ok=True)
@property
def snapshot_dir(self):
"""Return the directory of snapshot.
Returns:
str: The directory of snapshot
"""
return self._snapshot_dir
@property
def snapshot_mode(self):
"""Return the type of snapshot.
Returns:
str: The type of snapshot. Can be "all", "last" or "gap"
"""
return self._snapshot_mode
@property
def snapshot_gap(self):
"""Return the gap number of snapshot.
Returns:
int: The gap number of snapshot.
"""
return self._snapshot_gap
def save_itr_params(self, itr, params):
"""Save the parameters if at the right iteration.
Args:
itr (int): Number of iterations. Used as the index of snapshot.
params (obj): Content of snapshot to be saved.
Raises:
ValueError: If snapshot_mode is not one of "all", "last" or "gap".
"""
file_name = None
if self._snapshot_mode == 'all':
file_name = os.path.join(self._snapshot_dir, 'itr_%d.pkl' % itr)
elif self._snapshot_mode == 'last':
# override previous params
file_name = os.path.join(self._snapshot_dir, 'params.pkl')
elif self._snapshot_mode == 'gap':
if itr % self._snapshot_gap == 0:
file_name = os.path.join(self._snapshot_dir,
'itr_%d.pkl' % itr)
elif self._snapshot_mode == 'gap_and_last':
if itr % self._snapshot_gap == 0:
file_name = os.path.join(self._snapshot_dir,
'itr_%d.pkl' % itr)
file_name_last = os.path.join(self._snapshot_dir, 'params.pkl')
with open(file_name_last, 'wb') as file:
cloudpickle.dump(params, file)
elif self._snapshot_mode == 'none':
pass
else:
raise ValueError('Invalid snapshot mode {}'.format(
self._snapshot_mode))
if file_name:
with open(file_name, 'wb') as file:
cloudpickle.dump(params, file)
def load(self, load_dir, itr='last'):
# pylint: disable=no-self-use
"""Load one snapshot of parameters from disk.
Args:
load_dir (str): Directory of the cloudpickle file
to resume experiment from.
itr (int or string): Iteration to load.
Can be an integer, 'last' or 'first'.
Returns:
dict: Loaded snapshot.
Raises:
ValueError: If itr is neither an integer nor
one of ("last", "first").
FileNotFoundError: If the snapshot file is not found in load_dir.
NotAFileError: If the snapshot exists but is not a file.
"""
if isinstance(itr, int) or itr.isdigit():
load_from_file = os.path.join(load_dir, 'itr_{}.pkl'.format(itr))
else:
if itr not in ('last', 'first'):
raise ValueError(
"itr should be an integer or 'last' or 'first'")
load_from_file = os.path.join(load_dir, 'params.pkl')
if not os.path.isfile(load_from_file):
files = [f for f in os.listdir(load_dir) if f.endswith('.pkl')]
if not files:
raise FileNotFoundError(errno.ENOENT,
os.strerror(errno.ENOENT),
'*.pkl file in', load_dir)
files.sort()
load_from_file = files[0] if itr == 'first' else files[-1]
load_from_file = os.path.join(load_dir, load_from_file)
if not os.path.isfile(load_from_file):
raise NotAFileError('File not existing: ', load_from_file)
with open(load_from_file, 'rb') as file:
return joblib.load(file)
class NotAFileError(Exception):
"""Raise when the snapshot is not a file."""
| 5,388 | 33.107595 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/experiment/task_sampler.py | """Efficient and general interfaces for sampling tasks for Meta-RL."""
import abc
import copy
import math
import numpy as np
from garage.sampler.env_update import (ExistingEnvUpdate, NewEnvUpdate,
SetTaskUpdate)
def _sample_indices(n_to_sample, n_available_tasks, with_replacement):
"""Select indices of tasks to sample.
Args:
n_to_sample (int): Number of environments to sample. May be greater
than n_available_tasks.
n_available_tasks (int): Number of available tasks. Task indices will
be selected in the range [0, n_available_tasks).
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
np.ndarray[int]: Array of task indices.
"""
if with_replacement:
return np.random.randint(n_available_tasks, size=n_to_sample)
else:
blocks = []
for _ in range(math.ceil(n_to_sample / n_available_tasks)):
s = np.arange(n_available_tasks)
np.random.shuffle(s)
blocks.append(s)
return np.concatenate(blocks)[:n_to_sample]
class TaskSampler(abc.ABC):
"""Class for sampling batches of tasks, represented as `~EnvUpdate`s.
Attributes:
n_tasks (int or None): Number of tasks, if known and finite.
"""
@abc.abstractmethod
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
@property
def n_tasks(self):
"""int or None: The number of tasks if known and finite."""
return None
class ConstructEnvsSampler(TaskSampler):
"""TaskSampler where each task has its own constructor.
Generally, this is used when the different tasks are completely different
environments.
Args:
env_constructors (list[Callable[gym.Env]]): Callables that produce
environments (for example, environment types).
"""
def __init__(self, env_constructors):
self._env_constructors = env_constructors
@property
def n_tasks(self):
"""int: the number of tasks."""
return len(self._env_constructors)
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
return [
NewEnvUpdate(self._env_constructors[i]) for i in _sample_indices(
n_tasks, len(self._env_constructors), with_replacement)
]
class SetTaskSampler(TaskSampler):
"""TaskSampler where the environment can sample "task objects".
This is used for environments that implement `sample_tasks` and `set_task`.
For example, :py:class:`~HalfCheetahVelEnv`, as implemented in Garage.
Args:
env_constructor (Callable[gym.Env]): Callable that produces
an environment (for example, an environment type).
"""
def __init__(self, env_constructor):
self._env_constructor = env_constructor
self._env = env_constructor()
@property
def n_tasks(self):
"""int or None: The number of tasks if known and finite."""
return getattr(self._env, 'num_tasks', None)
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Note that if more tasks are sampled than exist, then tasks may
repeat, but only after every environment has been included at
least once in this batch. Ignored for continuous task spaces.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
return [
SetTaskUpdate(self._env_constructor, task)
for task in self._env.sample_tasks(n_tasks)
]
class EnvPoolSampler(TaskSampler):
"""TaskSampler that samples from a finite pool of environments.
This can be used with any environments, but is generally best when using
in-process samplers with environments that are expensive to construct.
Args:
envs (list[gym.Env]): List of environments to use as a pool.
"""
def __init__(self, envs):
self._envs = envs
@property
def n_tasks(self):
"""int: the number of tasks."""
return len(self._envs)
def sample(self, n_tasks, with_replacement=False):
"""Sample a list of environment updates.
Args:
n_tasks (int): Number of updates to sample.
with_replacement (bool): Whether tasks can repeat when sampled.
Since this cannot be easily implemented for an object pool,
setting this to True results in ValueError.
Raises:
ValueError: If the number of requested tasks is larger than the
pool, or with_replacement is set.
Returns:
list[EnvUpdate]: Batch of sampled environment updates, which, when
invoked on environments, will configure them with new tasks.
See :py:class:`~EnvUpdate` for more information.
"""
if n_tasks > len(self._envs):
raise ValueError('Cannot sample more environments than are '
'present in the pool. If more tasks are needed, '
'call grow_pool to copy random existing tasks.')
if with_replacement:
raise ValueError('EnvPoolSampler cannot meaningfully sample with '
'replacement.')
envs = list(self._envs)
np.random.shuffle(envs)
return [ExistingEnvUpdate(env) for env in envs[:n_tasks]]
def grow_pool(self, new_size):
"""Increase the size of the pool by copying random tasks in it.
Note that this only copies the tasks already in the pool, and cannot
create new original tasks in any way.
Args:
new_size (int): Size the pool should be after growning.
"""
if new_size <= len(self._envs):
return
to_copy = _sample_indices(new_size - len(self._envs),
len(self._envs),
with_replacement=False)
for idx in to_copy:
self._envs.append(copy.deepcopy(self._envs[idx]))
| 7,992 | 34.683036 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/misc/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/misc/tensor_utils.py | """Utiliy functions for tensors."""
import numpy as np
import scipy.signal
def discount_cumsum(x, discount):
"""Discounted cumulative sum.
See https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html#difference-equation-filtering # noqa: E501
Here, we have y[t] - discount*y[t+1] = x[t]
or rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
Args:
x (np.ndarrary): Input.
discount (float): Discount factor.
Returns:
np.ndarrary: Discounted cumulative sum.
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1],
axis=0)[::-1]
def explained_variance_1d(ypred, y, valids=None):
"""Explained variation for 1D inputs.
It is the proportion of the variance in one variable that is explained or
predicted from another variable.
Args:
ypred (np.ndarray): Sample data from the first variable.
Shape: :math:`(N, max_path_length)`.
y (np.ndarray): Sample data from the second variable.
Shape: :math:`(N, max_path_length)`.
valids (np.ndarray): Optional argument. Array indicating valid indices.
If None, it assumes the entire input array are valid.
Shape: :math:`(N, max_path_length)`.
Returns:
float: The explained variance.
"""
if valids is not None:
ypred = ypred[valids.astype(np.bool)]
y = y[valids.astype(np.bool)]
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
if np.isclose(vary, 0):
if np.var(ypred) > 0:
return 0
return 1
return 1 - np.var(y - ypred) / (vary + 1e-8)
def flatten_tensors(tensors):
"""Flatten a list of tensors.
Args:
tensors (list[numpy.ndarray]): List of tensors to be flattened.
Returns:
numpy.ndarray: Flattened tensors.
"""
if tensors:
return np.concatenate([np.reshape(x, [-1]) for x in tensors])
return np.asarray([])
def unflatten_tensors(flattened, tensor_shapes):
"""Unflatten a flattened tensors into a list of tensors.
Args:
flattened (numpy.ndarray): Flattened tensors.
tensor_shapes (tuple): Tensor shapes.
Returns:
list[numpy.ndarray]: Unflattened list of tensors.
"""
tensor_sizes = list(map(np.prod, tensor_shapes))
indices = np.cumsum(tensor_sizes)[:-1]
return [
np.reshape(pair[0], pair[1])
for pair in zip(np.split(flattened, indices), tensor_shapes)
]
def pad_tensor(x, max_len, mode='zero'):
"""Pad tensors.
Args:
x (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
mode (str): If 'last', pad with the last element, otherwise pad with 0.
Returns:
numpy.ndarray: Padded tensor.
"""
padding = np.zeros_like(x[0])
if mode == 'last':
padding = x[-1]
return np.concatenate(
[x, np.tile(padding, (max_len - len(x), ) + (1, ) * np.ndim(x[0]))])
def pad_tensor_n(xs, max_len):
"""Pad array of tensors.
Args:
xs (numpy.ndarray): Tensors to be padded.
max_len (int): Maximum length.
Returns:
numpy.ndarray: Padded tensor.
"""
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
def pad_tensor_dict(tensor_dict, max_len, mode='zero'):
"""Pad dictionary of tensors.
Args:
tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
max_len (int): Maximum length.
mode (str): If 'last', pad with the last element, otherwise pad with 0.
Returns:
dict[numpy.ndarray]: Padded tensor.
"""
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len, mode=mode)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len, mode=mode)
return ret
def stack_tensor_dict_list(tensor_dict_list):
"""Stack a list of dictionaries of {tensors or dictionary of tensors}.
Args:
tensor_dict_list (dict[list]): a list of dictionaries of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = stack_tensor_dict_list(dict_list)
else:
v = np.array(dict_list)
ret[k] = v
return ret
def stack_and_pad_tensor_dict_list(tensor_dict_list, max_len):
"""Stack and pad array of list of tensors.
Input paths are a list of N dicts, each with values of shape
:math:`(D, S^*)`. This function stack and pad the values with the input
key with max_len, so output will be shape :math:`(N, D, S^*)`.
Args:
tensor_dict_list (list[dict]): List of dict to be stacked and padded.
Value of each dict will be shape of :math:`(D, S^*)`.
max_len (int): Maximum length for padding.
Returns:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}. Shape: :math:`(N, D, S^*)`
where N is the len of input paths.
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = stack_and_pad_tensor_dict_list(dict_list, max_len)
else:
v = pad_tensor_n(np.array(dict_list), max_len)
ret[k] = v
return ret
def concat_tensor_dict_list(tensor_dict_list):
"""Concatenate dictionary of list of tensor.
Args:
tensor_dict_list (dict[list]): a list of dictionaries of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
dict_list = [x[k] if k in x else [] for x in tensor_dict_list]
if isinstance(example, dict):
v = concat_tensor_dict_list(dict_list)
else:
v = np.concatenate(dict_list, axis=0)
ret[k] = v
return ret
def split_tensor_dict_list(tensor_dict):
"""Split dictionary of list of tensor.
Args:
tensor_dict (dict[numpy.ndarray]): a dictionary of {tensors or
dictionary of tensors}.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
keys = list(tensor_dict.keys())
ret = None
for k in keys:
vals = tensor_dict[k]
if isinstance(vals, dict):
vals = split_tensor_dict_list(vals)
if ret is None:
ret = [{k: v} for v in vals]
else:
for v, cur_dict in zip(vals, ret):
cur_dict[k] = v
return ret
def truncate_tensor_dict(tensor_dict, truncated_len):
"""Truncate dictionary of list of tensor.
Args:
tensor_dict (dict[numpy.ndarray]): a dictionary of {tensors or
dictionary of tensors}.
truncated_len (int): Length to truncate.
Return:
dict: a dictionary of {stacked tensors or dictionary of
stacked tensors}
"""
ret = dict()
for k, v in tensor_dict.items():
if isinstance(v, dict):
ret[k] = truncate_tensor_dict(v, truncated_len)
else:
ret[k] = v[:truncated_len]
return ret
def normalize_pixel_batch(observations):
"""Normalize the observations (images).
Normalize pixel values to be between [0, 1].
Args:
observations (numpy.ndarray): Observations from environment.
obses should be unflattened and should contain pixel
values.
Returns:
numpy.ndarray: Normalized observations.
"""
return [obs.astype(np.float32) / 255.0 for obs in observations]
def slice_nested_dict(dict_or_array, start, stop):
"""Slice a dictionary containing arrays (or dictionaries).
This function is primarily intended for un-batching env_infos and
action_infos.
Args:
dict_or_array (dict[str, dict or np.ndarray] or np.ndarray): A nested
dictionary should only contain dictionaries and numpy arrays
(recursively).
start (int): First index to be included in the slice.
stop (int): First index to be excluded from the slice. In other words,
these are typical python slice indices.
Returns:
dict or np.ndarray: The input, but sliced.
"""
if isinstance(dict_or_array, dict):
return {
k: slice_nested_dict(v, start, stop)
for (k, v) in dict_or_array.items()
}
else:
# It *should* be a numpy array (unless someone ignored the type
# signature).
return dict_or_array[start:stop]
def rrse(actual, predicted):
"""Root Relative Squared Error.
Args:
actual (np.ndarray): The actual value.
predicted (np.ndarray): The predicted value.
Returns:
float: The root relative square error between the actual and the
predicted value.
"""
return np.sqrt(
np.sum(np.square(actual - predicted)) /
np.sum(np.square(actual - np.mean(actual))))
def sliding_window(t, window, smear=False):
"""Create a sliding window over a tensor.
Args:
t (np.ndarray): A tensor to create sliding window from,
with shape :math:`(N, D)`, where N is the length of a trajectory,
D is the dimension of each step in trajectory.
window (int): Window size, mush be less than N.
smear (bool): If true, copy the last window so that N windows are
generated.
Returns:
np.ndarray: All windows generate over t, with shape :math:`(M, W, D)`,
where W is the window size. If smear if False, M is :math:`N-W+1`,
otherwise M is N.
Raises:
NotImplementedError: If step_size is not 1.
ValueError: If window size is larger than the input tensor.
"""
if window > t.shape[0]:
raise ValueError('`window` must be <= `t.shape[0]`')
if window == t.shape[0]:
return np.stack([t] * window)
# The stride trick works only on the last dimension of an ndarray, so we
# operate on the transpose, which reverses the dimensions of t.
t_T = t.T
shape = t_T.shape[:-1] + (t_T.shape[-1] - window, window)
strides = t_T.strides + (t_T.strides[-1], )
t_T_win = np.lib.stride_tricks.as_strided(t_T,
shape=shape,
strides=strides)
# t_T_win has shape (d_k, d_k-1, ..., (n - window_size), window_size)
# To arrive at the final shape, we first transpose the result to arrive at
# (window_size, (n - window_size), d_1, ..., d_k), then swap the firs two
# axes
t_win = np.swapaxes(t_T_win.T, 0, 1)
# Optionally smear the last element to preserve the first dimension
if smear:
t_win = pad_tensor(t_win, t.shape[0], mode='last')
return t_win
| 11,535 | 28.808786 | 115 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/__init__.py | """Reinforcement Learning Algorithms which use NumPy as a numerical backend."""
from garage.np._functions import obtain_evaluation_samples
from garage.np._functions import paths_to_tensors
from garage.np._functions import samples_to_tensors
__all__ = [
'obtain_evaluation_samples', 'paths_to_tensors', 'samples_to_tensors'
]
| 330 | 35.777778 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/_functions.py | """Utility functions for NumPy-based Reinforcement learning algorithms."""
import numpy as np
from garage._dtypes import TrajectoryBatch
from garage.misc import tensor_utils
from garage.sampler.utils import rollout
def samples_to_tensors(paths):
"""Return processed sample data based on the collected paths.
Args:
paths (list[dict]): A list of collected paths.
Returns:
dict: Processed sample data, with keys
* undiscounted_returns (list[float])
* success_history (list[float])
* complete (list[bool])
"""
success_history = [
path['success_count'] / path['running_length'] for path in paths
]
undiscounted_returns = [path['undiscounted_return'] for path in paths]
# check if the last path is complete
complete = [path['dones'][-1] for path in paths]
samples_data = dict(undiscounted_returns=undiscounted_returns,
success_history=success_history,
complete=complete)
return samples_data
def obtain_evaluation_samples(policy, env, max_path_length=1000,
num_trajs=100):
"""Sample the policy for num_trajs trajectories and return average values.
Args:
policy (garage.Policy): Policy to use as the actor when
gathering samples.
env (garage.envs.GarageEnv): The environement used to obtain
trajectories.
max_path_length (int): Maximum path length. The episode will
terminate when length of trajectory reaches max_path_length.
num_trajs (int): Number of trajectories.
Returns:
TrajectoryBatch: Evaluation trajectories, representing the best
current performance of the algorithm.
"""
paths = []
# Use a finite length rollout for evaluation.
for _ in range(num_trajs):
path = rollout(env,
policy,
max_path_length=max_path_length,
deterministic=True)
paths.append(path)
return TrajectoryBatch.from_trajectory_list(env.spec, paths)
def paths_to_tensors(paths, max_path_length, baseline_predictions, discount):
"""Return processed sample data based on the collected paths.
Args:
paths (list[dict]): A list of collected paths.
max_path_length (int): Maximum length of a single rollout.
baseline_predictions(numpy.ndarray): : Predicted value of GAE
(Generalized Advantage Estimation) Baseline.
discount (float): Environment reward discount.
Returns:
dict: Processed sample data, with key
* observations (numpy.ndarray): Padded array of the observations of
the environment
* actions (numpy.ndarray): Padded array of the actions fed to the
the environment
* rewards (numpy.ndarray): Padded array of the acquired rewards
* agent_infos (dict): a dictionary of {stacked tensors or
dictionary of stacked tensors}
* env_infos (dict): a dictionary of {stacked tensors or
dictionary of stacked tensors}
* rewards (numpy.ndarray): Padded array of the validity information
"""
baselines = []
returns = []
for idx, path in enumerate(paths):
# baselines
path['baselines'] = baseline_predictions[idx]
baselines.append(path['baselines'])
# returns
path['returns'] = tensor_utils.discount_cumsum(path['rewards'],
discount)
returns.append(path['returns'])
obs = [path['observations'] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
actions = [path['actions'] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path['rewards'] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
agent_infos = [path['agent_infos'] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([
tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos
])
env_infos = [path['env_infos'] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list(
[tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos])
valids = [np.ones_like(path['returns']) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
samples_data = dict(observations=obs,
actions=actions,
rewards=rewards,
agent_infos=agent_infos,
env_infos=env_infos,
valids=valids)
return samples_data
| 4,795 | 34.791045 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/algos/__init__.py | """Reinforcement learning algorithms which use NumPy as a numerical backend."""
from garage.np.algos.cem import CEM
from garage.np.algos.cma_es import CMAES
from garage.np.algos.meta_rl_algorithm import MetaRLAlgorithm
from garage.np.algos.nop import NOP
from garage.np.algos.rl_algorithm import RLAlgorithm
__all__ = [
'RLAlgorithm',
'CEM',
'CMAES',
'MetaRLAlgorithm',
'NOP',
]
| 400 | 25.733333 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/algos/cem.py | """Cross Entropy Method."""
import collections
from dowel import logger, tabular
import numpy as np
from garage import log_performance, TrajectoryBatch
from garage.np import paths_to_tensors
from garage.np.algos.rl_algorithm import RLAlgorithm
from garage.tf.samplers import BatchSampler
class CEM(RLAlgorithm):
"""Cross Entropy Method.
CEM works by iteratively optimizing a gaussian distribution of policy.
In each epoch, CEM does the following:
1. Sample n_samples policies from a gaussian distribution of
mean cur_mean and std cur_std.
2. Do rollouts for each policy.
3. Update cur_mean and cur_std by doing Maximum Likelihood Estimation
over the n_best top policies in terms of return.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.np.policies.Policy): Action policy.
baseline(garage.np.baselines.Baseline): Baseline for GAE
(Generalized Advantage Estimation).
n_samples (int): Number of policies sampled in one epoch.
discount (float): Environment reward discount.
max_path_length (int): Maximum length of a single rollout.
best_frac (float): The best fraction.
init_std (float): Initial std for policy param distribution.
extra_std (float): Decaying std added to param distribution.
extra_decay_time (float): Epochs that it takes to decay extra std.
"""
def __init__(self,
env_spec,
policy,
baseline,
n_samples,
discount=0.99,
max_path_length=500,
init_std=1,
best_frac=0.05,
extra_std=1.,
extra_decay_time=100):
self.policy = policy
self.max_path_length = max_path_length
self.sampler_cls = BatchSampler
self._best_frac = best_frac
self._baseline = baseline
self._init_std = init_std
self._extra_std = extra_std
self._extra_decay_time = extra_decay_time
self._episode_reward_mean = collections.deque(maxlen=100)
self._env_spec = env_spec
self._discount = discount
self._n_samples = n_samples
self._cur_std = None
self._cur_mean = None
self._cur_params = None
self._all_returns = None
self._all_params = None
self._n_best = None
self._n_params = None
def _sample_params(self, epoch):
"""Return sample parameters.
Args:
epoch (int): Epoch number.
Returns:
np.ndarray: A numpy array of parameter values.
"""
extra_var_mult = max(1.0 - epoch / self._extra_decay_time, 0)
sample_std = np.sqrt(
np.square(self._cur_std) +
np.square(self._extra_std) * extra_var_mult)
return np.random.standard_normal(
self._n_params) * sample_std + self._cur_mean
def train(self, runner):
"""Initialize variables and start training.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
# epoch-wise
self._cur_std = self._init_std
self._cur_mean = self.policy.get_param_values()
# epoch-cycle-wise
self._cur_params = self._cur_mean
self._all_returns = []
self._all_params = [self._cur_mean.copy()]
# constant
self._n_best = int(self._n_samples * self._best_frac)
assert self._n_best >= 1, (
'n_samples is too low. Make sure that n_samples * best_frac >= 1')
self._n_params = len(self._cur_mean)
# start actual training
last_return = None
for _ in runner.step_epochs():
for _ in range(self._n_samples):
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr,
runner.step_path)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
float: The average return of epoch cycle.
"""
# -- Stage: Calculate baseline
if hasattr(self._baseline, 'predict_n'):
baseline_predictions = self._baseline.predict_n(paths)
else:
baseline_predictions = [
self._baseline.predict(path) for path in paths
]
# -- Stage: Pre-process samples based on collected paths
samples_data = paths_to_tensors(paths, self.max_path_length,
baseline_predictions, self._discount)
# -- Stage: Run and calculate performance of the algorithm
undiscounted_returns = log_performance(
itr,
TrajectoryBatch.from_trajectory_list(self._env_spec, paths),
discount=self._discount)
self._episode_reward_mean.extend(undiscounted_returns)
tabular.record('Extras/EpisodeRewardMean',
np.mean(self._episode_reward_mean))
samples_data['average_return'] = np.mean(undiscounted_returns)
epoch = itr // self._n_samples
i_sample = itr - epoch * self._n_samples
tabular.record('Epoch', epoch)
tabular.record('# Sample', i_sample)
# -- Stage: Process samples_data
rtn = samples_data['average_return']
self._all_returns.append(samples_data['average_return'])
# -- Stage: Update policy distribution.
if (itr + 1) % self._n_samples == 0:
avg_rtns = np.array(self._all_returns)
best_inds = np.argsort(-avg_rtns)[:self._n_best]
best_params = np.array(self._all_params)[best_inds]
# MLE of normal distribution
self._cur_mean = best_params.mean(axis=0)
self._cur_std = best_params.std(axis=0)
self.policy.set_param_values(self._cur_mean)
# Clear for next epoch
rtn = max(self._all_returns)
self._all_returns.clear()
self._all_params.clear()
# -- Stage: Generate a new policy for next path sampling
self._cur_params = self._sample_params(itr)
self._all_params.append(self._cur_params.copy())
self.policy.set_param_values(self._cur_params)
logger.log(tabular)
return rtn
| 6,825 | 34.73822 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/algos/cma_es.py | """Covariance Matrix Adaptation Evolution Strategy."""
import collections
import cma
from dowel import logger, tabular
import numpy as np
from garage import log_performance, TrajectoryBatch
from garage.np import paths_to_tensors
from garage.np.algos.rl_algorithm import RLAlgorithm
from garage.tf.samplers import BatchSampler
class CMAES(RLAlgorithm):
"""Covariance Matrix Adaptation Evolution Strategy.
Note:
The CMA-ES method can hardly learn a successful policy even for
simple task. It is still maintained here only for consistency with
original rllab paper.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.np.policies.Policy): Action policy.
baseline (garage.np.baselines.Baseline): Baseline for GAE
(Generalized Advantage Estimation).
n_samples (int): Number of policies sampled in one epoch.
discount (float): Environment reward discount.
max_path_length (int): Maximum length of a single rollout.
sigma0 (float): Initial std for param distribution.
"""
def __init__(self,
env_spec,
policy,
baseline,
n_samples,
discount=0.99,
max_path_length=500,
sigma0=1.):
self.policy = policy
self.max_path_length = max_path_length
self.sampler_cls = BatchSampler
self._env_spec = env_spec
self._discount = discount
self._sigma0 = sigma0
self._n_samples = n_samples
self._baseline = baseline
self._episode_reward_mean = collections.deque(maxlen=100)
self._es = None
self._all_params = None
self._cur_params = None
self._all_returns = None
def _sample_params(self):
"""Return sample parameters.
Returns:
np.ndarray: A numpy array of parameter values.
"""
return self._es.ask()
def train(self, runner):
"""Initialize variables and start training.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
Returns:
float: The average return in last epoch cycle.
"""
init_mean = self.policy.get_param_values()
self._es = cma.CMAEvolutionStrategy(init_mean, self._sigma0,
{'popsize': self._n_samples})
self._all_params = self._sample_params()
self._cur_params = self._all_params[0]
self.policy.set_param_values(self._cur_params)
self._all_returns = []
# start actual training
last_return = None
for _ in runner.step_epochs():
for _ in range(self._n_samples):
runner.step_path = runner.obtain_samples(runner.step_itr)
last_return = self.train_once(runner.step_itr,
runner.step_path)
runner.step_itr += 1
return last_return
def train_once(self, itr, paths):
"""Perform one step of policy optimization given one batch of samples.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
float: The average return in last epoch cycle.
"""
# -- Stage: Calculate baseline
if hasattr(self._baseline, 'predict_n'):
baseline_predictions = self._baseline.predict_n(paths)
else:
baseline_predictions = [
self._baseline.predict(path) for path in paths
]
# -- Stage: Pre-process samples based on collected paths
samples_data = paths_to_tensors(paths, self.max_path_length,
baseline_predictions, self._discount)
# -- Stage: Run and calculate performance of the algorithm
undiscounted_returns = log_performance(
itr,
TrajectoryBatch.from_trajectory_list(self._env_spec, paths),
discount=self._discount)
self._episode_reward_mean.extend(undiscounted_returns)
tabular.record('Extras/EpisodeRewardMean',
np.mean(self._episode_reward_mean))
samples_data['average_return'] = np.mean(undiscounted_returns)
epoch = itr // self._n_samples
i_sample = itr - epoch * self._n_samples
tabular.record('Epoch', epoch)
tabular.record('# Sample', i_sample)
rtn = samples_data['average_return']
self._all_returns.append(samples_data['average_return'])
if (itr + 1) % self._n_samples == 0:
avg_rtns = np.array(self._all_returns)
self._es.tell(self._all_params, -avg_rtns)
self.policy.set_param_values(self._es.best.get()[0])
# Clear for next epoch
rtn = max(self._all_returns)
self._all_returns.clear()
self._all_params = self._sample_params()
self._cur_params = self._all_params[(i_sample + 1) % self._n_samples]
self.policy.set_param_values(self._cur_params)
logger.log(tabular)
return rtn
| 5,339 | 33.230769 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/algos/meta_rl_algorithm.py | """Interface of Meta-RL ALgorithms."""
import abc
from garage.np.algos.rl_algorithm import RLAlgorithm
class MetaRLAlgorithm(RLAlgorithm, abc.ABC):
"""Base class for Meta-RL Algorithms."""
@abc.abstractmethod
def get_exploration_policy(self):
"""Return a policy used before adaptation to a specific task.
Each time it is retrieved, this policy should only be evaluated in one
task.
Returns:
garage.Policy: The policy used to obtain samples that are later
used for meta-RL adaptation.
"""
@abc.abstractmethod
def adapt_policy(self, exploration_policy, exploration_trajectories):
"""Produce a policy adapted for a task.
Args:
exploration_policy (garage.Policy): A policy which was returned
from get_exploration_policy(), and which generated
exploration_trajectories by interacting with an environment.
The caller may not use this object after passing it into this
method.
exploration_trajectories (garage.TrajectoryBatch): Trajectories to
adapt to, generated by exploration_policy exploring the
environment.
Returns:
garage.Policy: A policy adapted to the task represented by the
exploration_trajectories.
"""
| 1,387 | 32.047619 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/algos/nop.py | """NOP (no optimization performed) policy search algorithm."""
from garage.np.algos.rl_algorithm import RLAlgorithm
class NOP(RLAlgorithm):
"""NOP (no optimization performed) policy search algorithm."""
def init_opt(self):
"""Initialize the optimization procedure."""
def optimize_policy(self, paths):
"""Optimize the policy using the samples.
Args:
paths (list[dict]): A list of collected paths.
"""
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
"""
| 798 | 27.535714 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/algos/rl_algorithm.py | """Interface of RLAlgorithm."""
import abc
class RLAlgorithm(abc.ABC):
"""Base class for all the algorithms.
Note:
If the field sampler_cls exists, it will be by LocalRunner.setup to
initialize a sampler.
"""
# pylint: disable=too-few-public-methods
@abc.abstractmethod
def train(self, runner):
"""Obtain samplers and start actual training for each epoch.
Args:
runner (LocalRunner): LocalRunner is passed to give algorithm
the access to runner.step_epochs(), which provides services
such as snapshotting and sampler control.
"""
| 646 | 23.884615 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/src/garage/np/baselines/__init__.py | """Baselines (value functions) which use NumPy as a numerical backend."""
from garage.np.baselines.baseline import Baseline
from garage.np.baselines.linear_feature_baseline import LinearFeatureBaseline
from garage.np.baselines.linear_multi_feature_baseline import (
LinearMultiFeatureBaseline)
from garage.np.baselines.zero_baseline import ZeroBaseline
__all__ = [
'Baseline', 'LinearFeatureBaseline', 'LinearMultiFeatureBaseline',
'ZeroBaseline'
]
| 462 | 37.583333 | 77 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.