Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
signal_transformer | signal_transformer-master/training/training.py | import os
from argparse import ArgumentParser
import tensorflow as tf
from tensorflow_addons.optimizers import AdamW
import yaml
from utils.dataset import load_dataset
from utils.experiment import ExperimentHandler, restore_from_checkpoint_latest, ds_tqdm
from utils.device import allow_memory_growth
from model.signal_transformer import SignalTransformer
from loss.triplet_loss import batch_all_triplet_loss
def main(args):
# load data
train_ds, val_ds = load_dataset(args.dataset_path, args.batch_size)
model = SignalTransformer(num_signals=6,
num_layers=args.num_encoder_layers,
d_model=args.d_model,
num_heads=args.num_heads,
dff=args.dff,
latent_vector_size=args.lv_size,
input_signal_length=160)
model.warmup()
model.summary()
# prepare optimization method and helpers
eta_f = tf.keras.optimizers.schedules.ExponentialDecay(args.eta, args.eta_decay_steps, args.eta_beta)
wd_f = tf.keras.experimental.CosineDecay(args.weight_decay, args.weight_decay_steps, args.weight_decay_alpha)
eta = tf.Variable(args.eta)
wd = tf.Variable(args.weight_decay)
optimizer = AdamW(wd, eta)
experiment_handler = ExperimentHandler(
args.working_path, args.out_name,
model=model,
optimizer=optimizer
)
# restore if provided
if args.restore_path is not None:
restore_from_checkpoint_latest(
path=args.restore_path,
model=model,
optimizer=optimizer
)
def query(signals, positions, training):
latent_vectors = model(signals, tf.convert_to_tensor(training))
triplet_loss, fraction_tl = batch_all_triplet_loss(positions, latent_vectors,
margin=args.margin,
dist_threshold=args.dist_threshold)
return triplet_loss, latent_vectors, fraction_tl
def train_step_fn(signals, positions):
with tf.GradientTape() as tape:
loss, latent_vectors, fraction_tl = query(signals, positions, True)
total_loss = tf.reduce_mean(loss)
t_vars = model.trainable_variables
grads = tape.gradient(total_loss, t_vars)
optimizer.apply_gradients(zip(grads, t_vars))
return loss, latent_vectors, fraction_tl
# run training and validation
epoch = 0
train_step, val_step = 0, 0
best_result = None
late_stop = 0
mean_loss = tf.metrics.Mean('triplet_loss')
mean_fraction_valid_triplets = tf.metrics.Mean('fraction_valid_triplets')
while True:
experiment_handler.log_training()
mean_loss.reset_states()
mean_fraction_valid_triplets.reset_states()
for i, signals, positions in ds_tqdm('Train', train_ds, epoch, args.batch_size):
eta.assign(eta_f(train_step))
wd.assign(wd_f(train_step))
loss, latent_vectors, fraction_tl = train_step_fn(signals, positions)
mean_loss(loss)
mean_fraction_valid_triplets(fraction_tl)
if train_step % args.log_interval == 0:
tf.summary.scalar('info/eta', eta, step=train_step)
tf.summary.scalar('info/weight_decay', wd, step=train_step)
tf.summary.scalar('metrics/triplet_loss', tf.reduce_mean(loss), step=train_step)
tf.summary.scalar('metrics/fraction_valid_triplets', fraction_tl, step=train_step)
train_step += 1
result_loss = mean_loss.result()
result_fraction_valid_triplets = mean_fraction_valid_triplets.result()
tf.summary.scalar('epoch/triplet_loss', result_loss, step=epoch)
tf.summary.scalar('epoch/fraction_valid_triplets', result_fraction_valid_triplets, step=epoch)
experiment_handler.save_last()
experiment_handler.flush()
experiment_handler.log_validation()
mean_loss.reset_states()
mean_fraction_valid_triplets.reset_states()
for i, signals, positions in ds_tqdm('Validation', val_ds, epoch, args.batch_size):
loss, latent_vectors, fraction_tl = query(signals, positions, False)
mean_loss(loss)
mean_fraction_valid_triplets(fraction_tl)
if val_step % args.log_interval == 0:
tf.summary.scalar('metrics/triplet_loss', tf.reduce_mean(loss), step=val_step)
tf.summary.scalar('metrics/fraction_valid_triplets', fraction_tl, step=val_step)
val_step += 1
result_loss = mean_loss.result()
result_fraction_valid_triplets = mean_fraction_valid_triplets.result()
tf.summary.scalar('epoch/triplet_loss', result_loss, step=epoch)
tf.summary.scalar('epoch/fraction_valid_triplets', result_fraction_valid_triplets, step=epoch)
experiment_handler.flush()
if best_result is None or result_loss < best_result:
experiment_handler.save_best()
model.save_weights(os.path.join(experiment_handler.export_model_path, 'model'))
best_result = result_loss
late_stop = 0
elif epoch >= args.num_epochs > 0:
late_stop += 1
if late_stop > args.late_stop_threshold:
break
else:
late_stop = 0
epoch += 1
if __name__ == '__main__':
import os
path = os.getcwd()
print(path)
parser = ArgumentParser()
parser.add_argument('--dataset-path', type=str, required=True)
parser.add_argument('--working-path', type=str, default='./workspace')
parser.add_argument('--restore-path', type=str)
parser.add_argument('--log-interval', type=int, default=1)
parser.add_argument('--batch-size', type=int, required=True)
parser.add_argument('--eta', type=float, default=5e-4)
parser.add_argument('--eta-decay-steps', type=int, default=100)
parser.add_argument('--eta-beta', type=float, default=0.99)
parser.add_argument('--num-epochs', type=int, default=-1)
parser.add_argument('--late-stop-threshold', type=int, default=-1)
parser.add_argument('--out-name', type=str, required=True)
parser.add_argument('--weight-decay', type=float, default=2e-4)
parser.add_argument('--weight-decay-steps', type=float, default=20000)
parser.add_argument('--weight-decay-alpha', type=float, default=1e-3)
parser.add_argument('--allow-memory-growth', action='store_true', default=False)
parser.add_argument('--dist-threshold', type=float, default=0.25)
parser.add_argument('--margin', type=float, default=0.1)
parser.add_argument('--num-encoder-layers', type=int, default=8)
parser.add_argument('--d-model', type=int, default=16)
parser.add_argument('--num-heads', type=int, default=8)
parser.add_argument('--dff', type=int, default=2048)
parser.add_argument('--lv-size', type=int, default=256)
args, _ = parser.parse_known_args()
config = {
'dist_threshold': args.dist_threshold,
'margin': args.margin,
'num_encoder_layers': args.num_encoder_layers,
'd_model': args.d_model,
'num_heads': args.num_heads,
'dff': args.dff,
'lv_size': args.lv_size
}
os.mkdir(os.path.join(args.working_path, args.out_name))
with open(os.path.join(args.working_path, args.out_name, 'model_config.yaml'), 'w') as outfile:
yaml.dump(config, outfile, default_flow_style=False)
if args.allow_memory_growth:
allow_memory_growth()
main(args)
| 7,668 | 38.530928 | 113 | py |
signal_transformer | signal_transformer-master/utils/__init__.py | 0 | 0 | 0 | py |
|
signal_transformer | signal_transformer-master/utils/dataset.py | import tensorflow as tf
import pickle
def tf_dataset(dataset, start_cut=0, cut_length=160):
def generator():
for step in dataset:
s = step['signal'][start_cut:(start_cut + cut_length)]
signal = tf.convert_to_tensor(s, tf.float32)
position = tf.convert_to_tensor(step['position_optitrack'], tf.float32)
yield signal, position
return tf.data.Dataset.from_generator(
generator=generator,
output_types=(tf.float32, tf.float32),
output_shapes=(tf.TensorShape([None, 6]), tf.TensorShape([3]))
)
def load_dataset(path, batch_size):
with open(path, 'rb') as fp:
dataset = pickle.load(fp)
# load data
train_ds = tf_dataset(dataset['train_ds'], 0, 160) \
.shuffle(512) \
.padded_batch(batch_size, ([None, 6], [3]))
val_ds = tf_dataset(dataset['val_ds'], 0, 160) \
.shuffle(512) \
.padded_batch(batch_size, ([None, 6], [3]))
return train_ds, val_ds
| 997 | 29.242424 | 83 | py |
signal_transformer | signal_transformer-master/utils/device.py | import tensorflow as tf
def allow_memory_growth():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
| 600 | 36.5625 | 81 | py |
signal_transformer | signal_transformer-master/utils/experiment.py | import io
import os
import zipfile
import requests
import tensorflow as tf
from tqdm import tqdm
class ExperimentHandler(object):
def __init__(self, working_path, out_name, max_to_keep=3, **objects_to_save):
super(ExperimentHandler, self).__init__()
# prepare log writers
train_log_path = _get_or_create_dir(working_path, out_name, 'logs', 'train')
val_log_path = _get_or_create_dir(working_path, out_name, 'logs', 'val')
self.export_model_path = _get_or_create_dir(working_path, out_name, 'export_model')
self.extras_path = _get_or_create_dir(working_path, out_name, 'extras')
self.train_writer = tf.summary.create_file_writer(train_log_path)
self.val_writer = tf.summary.create_file_writer(val_log_path)
# prepare checkpoints
self.last_path = _get_or_create_dir(working_path, out_name, 'checkpoints', 'last')
self.best_path = _get_or_create_dir(working_path, out_name, 'checkpoints', 'best')
self.checkpoint_last, self.checkpoint_manager_last = _prepare_checkpoint_manager(
self.last_path, max_to_keep,
**objects_to_save
)
self.checkpoint_best, self.checkpoint_manager_best = _prepare_checkpoint_manager(
self.best_path, max_to_keep,
**objects_to_save
)
def log_training(self):
self.train_writer.set_as_default()
def log_validation(self):
self.val_writer.set_as_default()
def flush(self):
self.train_writer.flush()
self.val_writer.flush()
def save_last(self):
self.checkpoint_manager_last.save()
def save_best(self):
self.checkpoint_manager_best.save()
def restore_best(self):
self.checkpoint_best.restore(self.checkpoint_manager_best.latest_checkpoint)
def restore(self, path):
self.checkpoint_last.restore(tf.train.latest_checkpoint(path)).assert_consumed()
def restore_from_checkpoint(path, **kwargs):
checkpoint = tf.train.Checkpoint(**kwargs)
return checkpoint.restore(path)
def restore_from_checkpoint_latest(path, **kwargs):
return restore_from_checkpoint(tf.train.latest_checkpoint(path), **kwargs)
def _prepare_checkpoint_manager(path, max_to_keep, **kwargs):
checkpoint = tf.train.Checkpoint(**kwargs)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint=checkpoint,
directory=path,
max_to_keep=max_to_keep
)
return checkpoint, checkpoint_manager
def _get_or_create_dir(*paths):
join_path = os.path.join(*paths)
if not os.path.exists(join_path):
os.makedirs(join_path)
return join_path
def _tqdm_template(t, i, s=None):
if s is None:
bar = '%s epoch %d | Elapsed: {elapsed} | Rate: {rate_fmt} | Inverted Rate: {rate_inv_fmt}' % (t, i)
else:
bar = '%s epoch %d | {l_bar}{bar} %s' % (t, i, '| Remaining: {remaining} | Inverted Rate: {rate_inv_fmt}')
return tqdm(ncols=80, total=s, bar_format=bar)
def ds_tqdm(title, ds, i, batch_size, ds_size=None):
with _tqdm_template(title, i, ds_size) as pbar:
for i, data in enumerate(ds):
if not isinstance(data, tuple):
data = (data,)
yield (i,) + data
pbar.update(batch_size)
| 3,291 | 30.961165 | 114 | py |
CSD-manipulation | CSD-manipulation-master/README.md | # Controllability-Aware Unsupervised Skill Discovery
## Overview
This is the official implementation of [**Controllability-aware Skill Discovery** (**CSD**)](https://arxiv.org/abs/2302.05103) on manipulation environments (Fetch and Kitchen).
The codebase is based on the implementation of [MUSIC](https://github.com/ruizhaogit/music).
We refer to http://github.com/seohongpark/CSD-locomotion for the implementation of CSD on locomotion environments.
Please visit [our project page](https://seohong.me/projects/csd/) for videos.
## Installation
```
conda create --name csd-manipulation python=3.8
conda activate csd-manipulation
pip install -r requirements.txt
```
## Examples
FetchPush (2-D continuous skills)
```
python train.py --run_group Exp --env_name FetchPush-v1 --n_epochs 1002 --num_cpu 1 --logging True --note DIAYN --hidden 256 --layers 2 --skill_type continuous --num_skills 2 --n_cycles 40 --policy_save_interval 500 --plot_freq 25 --plot_repeats 4 --max_path_length 50 --n_batches 10 --rollout_batch_size 2 --sk_clip 0 --et_clip 1 --seed 0 --buffer_size 100000 --polyak 0.995 --algo_name csd --inner 1 --algo csd --dual_reg 1 --dual_lam_opt adam --dual_dist s2_from_s --dual_init_lambda 3000 --dual_slack 1e-06 --train_start_epoch 50 --sk_r_scale 500 --et_r_scale 0.02
```
FetchSlide (2-D continuous skills)
```
python train.py --run_group Exp --env_name FetchSlide-v1 --n_epochs 1002 --num_cpu 1 --logging True --note DIAYN --hidden 256 --layers 2 --skill_type continuous --num_skills 2 --n_cycles 40 --policy_save_interval 500 --plot_freq 25 --plot_repeats 4 --max_path_length 50 --n_batches 10 --rollout_batch_size 2 --sk_clip 0 --et_clip 1 --seed 0 --buffer_size 100000 --polyak 0.995 --algo_name csd --inner 1 --algo csd --dual_reg 1 --dual_lam_opt adam --dual_dist s2_from_s --dual_init_lambda 3000 --dual_slack 1e-06 --train_start_epoch 50 --sk_r_scale 500 --et_r_scale 0.02
```
FetchPickAndPlace (3-D continuous skills)
```
python train.py --run_group Exp --env_name FetchPickAndPlace-v1 --n_epochs 1002 --num_cpu 1 --logging True --note DIAYN --hidden 256 --layers 2 --skill_type continuous --num_skills 3 --n_cycles 40 --policy_save_interval 500 --plot_freq 25 --plot_repeats 4 --max_path_length 50 --n_batches 10 --rollout_batch_size 2 --sk_clip 0 --et_clip 1 --seed 0 --buffer_size 100000 --polyak 0.995 --algo_name csd --inner 1 --algo csd --dual_reg 1 --dual_lam_opt adam --dual_dist s2_from_s --dual_init_lambda 3000 --dual_slack 1e-06 --train_start_epoch 50 --sk_r_scale 500 --et_r_scale 0.02
```
Kitchen (2-D continuous skills)
```
python train.py --run_group Exp --env_name Kitchen --n_epochs 502 --num_cpu 1 --logging True --note DIAYN --hidden 256 --layers 2 --skill_type continuous --num_skills 2 --n_cycles 40 --policy_save_interval 500 --plot_freq 25 --plot_repeats 4 --max_path_length 50 --n_batches 10 --rollout_batch_size 2 --sk_clip 0 --et_clip 1 --seed 0 --buffer_size 100000 --polyak 0.995 --n_random_trajectories 50 --algo_name csd --inner 1 --algo csd --dual_reg 1 --dual_lam_opt adam --dual_dist s2_from_s --dual_init_lambda 3000 --dual_slack 1e-06 --train_start_epoch 50 --sk_r_scale 500 --et_r_scale 0.02
```
Kitchen (16 discrete skills)
```
python train.py --run_group Exp --env_name Kitchen --n_epochs 502 --num_cpu 1 --logging True --note DIAYN --hidden 256 --layers 2 --skill_type discrete --num_skills 16 --n_cycles 40 --policy_save_interval 500 --plot_freq 25 --plot_repeats 4 --max_path_length 50 --n_batches 10 --rollout_batch_size 2 --sk_clip 0 --et_clip 1 --seed 0 --buffer_size 100000 --polyak 0.995 --n_random_trajectories 50 --algo_name csd --inner 1 --algo csd --dual_reg 1 --dual_lam_opt adam --dual_dist s2_from_s --dual_init_lambda 3000 --dual_slack 1e-06 --train_start_epoch 50 --sk_r_scale 500 --et_r_scale 0.02
```
## Licence
MIT
| 3,808 | 81.804348 | 589 | md |
CSD-manipulation | CSD-manipulation-master/save_weight.py | import os
import pickle
import subprocess
import click
import tensorflow as tf
from baselines.her.util import save_weight
@click.command()
@click.option('--policy_file', type=str, default=None)
@click.option('--run_group', type=str, default=None)
@click.option('--epoch', type=int, default=None)
def main(policy_file, run_group, epoch):
import glob
tf.compat.v1.disable_eager_execution()
if policy_file is not None:
policy_file = glob.glob(policy_file)[0]
base = os.path.splitext(policy_file)[0]
with open(policy_file, 'rb') as f:
pretrain = pickle.load(f)
pretrain_weights = save_weight(pretrain.sess)
output_file = open(base + '_weight.pkl', 'wb')
pickle.dump(pretrain_weights, output_file)
output_file.close()
else:
runs = glob.glob(f'logs/{run_group}*/*')
print(runs)
for run in sorted(runs):
policy_file = f'{run}/policy_{epoch}.pkl'
print(policy_file)
subprocess.Popen(['python', 'save_weight.py', f'--policy_file={policy_file}'])
if __name__ == '__main__':
main()
| 1,125 | 28.631579 | 90 | py |
CSD-manipulation | CSD-manipulation-master/train.py | import os
import pathlib
import sys
import time
from collections import defaultdict
import click
import gym
import numpy as np
import json
from mpi4py import MPI
from baselines import logger
from baselines.common import set_global_seeds
from baselines.common.mpi_moments import mpi_moments
import baselines.her.experiment.config as config
from baselines.her.rollout import RolloutWorker
from baselines.her.util import mpi_fork, snn
import os.path as osp
import tempfile
import datetime
from baselines.her.util import (dumpJson, loadJson, save_video, save_weight, load_weight)
import pickle
import tensorflow as tf
import wandb
from utils import FigManager, plot_trajectories, setup_evaluation, record_video, draw_2d_gaussians, RunningMeanStd, \
get_option_colors
g_start_time = int(datetime.datetime.now().timestamp())
def mpi_average(value):
if value == []:
value = [0.]
if not isinstance(value, list):
value = [value]
return mpi_moments(np.array(value))[0]
def sample_skill(num_skills, rollout_batch_size, use_skill_n=None, skill_type='discrete'):
# sample skill z
if skill_type == 'discrete':
z_s = np.random.randint(0, num_skills, rollout_batch_size)
if use_skill_n:
use_skill_n = use_skill_n - 1
z_s.fill(use_skill_n)
z_s_onehot = np.zeros([rollout_batch_size, num_skills])
z_s = np.array(z_s).reshape(rollout_batch_size, 1)
for i in range(rollout_batch_size):
z_s_onehot[i, z_s[i]] = 1
return z_s, z_s_onehot
else:
z_s = np.zeros((rollout_batch_size, 1))
z_s_onehot = np.random.randn(rollout_batch_size, num_skills)
return z_s, z_s_onehot
def iod_eval(eval_dir, env_name, evaluator, video_evaluator, num_skills, skill_type, plot_repeats, epoch, goal_generation, n_random_trajectories):
if goal_generation == 'Zero':
generated_goal = np.zeros(evaluator.g.shape)
else:
generated_goal = False
if env_name != 'Maze':
# Video eval
if skill_type == 'discrete':
video_eval_options = np.eye(num_skills)
if num_skills == 1:
video_eval_options = np.ones((9, 1))
else:
if num_skills == 2:
video_eval_options = []
for dist in [4.5]:
for angle in [3, 2, 1, 4]:
video_eval_options.append([dist * np.cos(angle * np.pi / 4), dist * np.sin(angle * np.pi / 4)])
video_eval_options.append([0, 0])
for dist in [4.5]:
for angle in [0, 5, 6, 7]:
video_eval_options.append([dist * np.cos(angle * np.pi / 4), dist * np.sin(angle * np.pi / 4)])
video_eval_options = np.array(video_eval_options)
elif num_skills <= 5:
video_eval_options = []
for dist in [-4.5, -2.25, 2.25, 4.5]:
for dim in range(num_skills):
cur_option = [0] * num_skills
cur_option[dim] = dist
video_eval_options.append(cur_option)
video_eval_options.append([0.] * num_skills)
video_eval_options = np.array(video_eval_options)
else:
video_eval_options = np.random.randn(9, num_skills) * 4.5 / 1.25
# Record each option twice
video_eval_options = np.repeat(video_eval_options, 2, axis=0)
if skill_type == 'continuous':
video_eval_options = video_eval_options / 4.5 * 1.25
video_evaluator.clear_history()
video_evaluator.render = 'rgb_array'
i = 0
imgss = []
while i < len(video_eval_options):
z = video_eval_options[i:i + video_evaluator.rollout_batch_size]
if len(z) != video_evaluator.rollout_batch_size:
remainder = video_evaluator.rollout_batch_size - z.shape[0]
z = np.concatenate([z, np.zeros((remainder, z.shape[1]))], axis=0)
else:
remainder = 0
imgs, _ = video_evaluator.generate_rollouts(generated_goal=generated_goal, z_s_onehot=z)
for j in range(len(imgs) - remainder):
imgss.append(imgs[j])
# filename = eval_dir + f'/videos/video_epoch_{epoch}_skill_{z[j]}.avi'
# save_video(imgs[j], filename)
i += video_evaluator.rollout_batch_size
video_evaluator.render = False
filename = eval_dir + f'/videos/video_epoch_{epoch}.mp4'
record_video(filename, imgss)
label = 'video'
logger.record_tabular(label, (filename, label))
# Plot eval
if skill_type == 'discrete':
eval_options = np.eye(num_skills)
colors = np.arange(0, num_skills)
eval_options = eval_options.repeat(plot_repeats, axis=0)
colors = colors.repeat(plot_repeats, axis=0)
num_evals = len(eval_options)
eval_option_colors = []
from matplotlib import cm
cmap = 'tab10' if num_skills <= 10 else 'tab20'
for i in range(num_evals):
eval_option_colors.extend([cm.get_cmap(cmap)(colors[i])[:3]])
eval_option_colors = np.array(eval_option_colors)
random_eval_options = eval_options
random_eval_option_colors = eval_option_colors
else:
random_eval_options = np.random.randn(n_random_trajectories, num_skills)
random_eval_option_colors = get_option_colors(random_eval_options * 2)
for cur_type in ['Random']:
grips = []
achs = []
xzs = []
yzs = []
xyzs = []
obs = []
options = []
infos = defaultdict(list)
evaluator.clear_history()
i = 0
num_trajs = len(random_eval_options)
cur_colors = random_eval_option_colors
while i < num_trajs:
z = random_eval_options[i:i + evaluator.rollout_batch_size]
if len(z) != evaluator.rollout_batch_size:
remainder = evaluator.rollout_batch_size - z.shape[0]
z = np.concatenate([z, np.zeros((remainder, z.shape[1]))], axis=0)
else:
remainder = 0
rollouts = evaluator.generate_rollouts(generated_goal=generated_goal, z_s_onehot=z)
grip_coords = rollouts['o'][:, :, 0:2]
if 'Kitchen' in env_name:
target_coords = [23, 24, 25]
else:
target_coords = [3, 4, 5]
ach_coords = rollouts['o'][:, :, [target_coords[0], target_coords[1]]]
xz_coords = rollouts['o'][:, :, [target_coords[0], target_coords[2]]]
yz_coords = rollouts['o'][:, :, [target_coords[1], target_coords[2]]]
xyz_coords = rollouts['o'][:, :, target_coords]
ob = rollouts['o'][:, :, :]
grips.extend(grip_coords[:evaluator.rollout_batch_size - remainder])
achs.extend(ach_coords[:evaluator.rollout_batch_size - remainder])
xzs.extend(xz_coords[:evaluator.rollout_batch_size - remainder])
yzs.extend(yz_coords[:evaluator.rollout_batch_size - remainder])
xyzs.extend(xyz_coords[:evaluator.rollout_batch_size - remainder])
obs.extend(ob[:evaluator.rollout_batch_size - remainder])
options.extend(z[:evaluator.rollout_batch_size - remainder])
if 'Kitchen' in env_name:
for key, val in rollouts.items():
if not key.startswith('info_Task'):
continue
infos[key].extend(val[:, :, 0].max(axis=1))
i += evaluator.rollout_batch_size
for label, trajs in [(f'EvalOp__TrajPlotWithCFrom{cur_type}', achs), (f'EvalOp__GripPlotWithCFrom{cur_type}', grips),
(f'EvalOp__XzPlotWithCFrom{cur_type}', xzs), (f'EvalOp__YzPlotWithCFrom{cur_type}', yzs)]:
with FigManager(label, epoch, eval_dir) as fm:
if 'Fetch' in env_name:
plot_axis = [0, 2, 0, 2]
elif env_name == 'Maze':
plot_axis = [-2, 2, -2, 2]
elif 'Kitchen' in env_name:
plot_axis = [-3, 3, -3, 3]
else:
plot_axis = None
plot_trajectories(
trajs, cur_colors, plot_axis=plot_axis, ax=fm.ax
)
if cur_type == 'Random':
coords = np.concatenate(xyzs, axis=0)
coords = coords * 10
uniq_coords = np.unique(np.floor(coords), axis=0)
uniq_xy_coords = np.unique(np.floor(coords[:, :2]), axis=0)
logger.record_tabular('Fetch/NumTrajs', len(xyzs))
logger.record_tabular('Fetch/AvgTrajLen', len(coords) / len(xyzs) - 1)
logger.record_tabular('Fetch/NumCoords', len(coords))
logger.record_tabular('Fetch/NumUniqueXYZCoords', len(uniq_coords))
logger.record_tabular('Fetch/NumUniqueXYCoords', len(uniq_xy_coords))
if 'Kitchen' in env_name:
for key, val in infos.items():
logger.record_tabular(f'Kitchen/{key[9:]}', np.minimum(1., np.max(val)))
def train(
logdir, policy, rollout_worker, env_name,
evaluator, video_evaluator, n_epochs, train_start_epoch, n_test_rollouts, n_cycles, n_batches, policy_save_interval,
save_policies, num_cpu, collect_data, collect_video, goal_generation, num_skills, use_skill_n, batch_size,
sk_r_scale,
skill_type, plot_freq, plot_repeats, n_random_trajectories, sk_clip, et_clip, done_ground,
**kwargs
):
rank = MPI.COMM_WORLD.Get_rank()
latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pkl')
best_policy_path = os.path.join(logger.get_dir(), 'policy_best.pkl')
periodic_policy_path = os.path.join(logger.get_dir(), 'policy_{}.pkl')
restore_info_path = os.path.join(logger.get_dir(), 'restore_info.pkl')
with open(restore_info_path, 'wb') as f:
pickle.dump(dict(
dimo=policy.dimo,
dimz=policy.dimz,
dimg=policy.dimg,
dimu=policy.dimu,
hidden=policy.hidden,
layers=policy.layers,
), f)
logger.info("Training...")
best_success_rate = -1
t = 1
start_time = time.time()
cur_time = time.time()
for epoch in range(n_epochs):
# train
episodes = []
rollout_worker.clear_history()
for cycle in range(n_cycles):
z_s, z_s_onehot = sample_skill(num_skills, rollout_worker.rollout_batch_size, use_skill_n, skill_type=skill_type)
if goal_generation == 'Zero':
generated_goal = np.zeros(rollout_worker.g.shape)
else:
generated_goal = False
if train_start_epoch <= epoch:
episode = rollout_worker.generate_rollouts(generated_goal=generated_goal, z_s_onehot=z_s_onehot)
else:
episode = rollout_worker.generate_rollouts(generated_goal=generated_goal, z_s_onehot=z_s_onehot, random_action=True)
episodes.append(episode)
policy.store_episode(episode)
for batch in range(n_batches):
t = epoch
if train_start_epoch <= epoch:
policy.train(t)
# train skill discriminator
if sk_r_scale > 0:
o_s = policy.buffer.buffers['o'][0: policy.buffer.current_size]
o2_s = policy.buffer.buffers['o'][0: policy.buffer.current_size][:, 1:, :]
z_s = policy.buffer.buffers['z'][0: policy.buffer.current_size]
u_s = policy.buffer.buffers['u'][0: policy.buffer.current_size]
T = z_s.shape[-2]
episode_idxs = np.random.randint(0, policy.buffer.current_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
o_s_batch = o_s[episode_idxs, t_samples]
o2_s_batch = o2_s[episode_idxs, t_samples]
z_s_batch = z_s[episode_idxs, t_samples]
u_s_batch = u_s[episode_idxs, t_samples]
if train_start_epoch <= epoch:
policy.train_sk(o_s_batch, z_s_batch, o2_s_batch, u_s_batch)
if policy.dual_dist != 'l2':
add_dict = dict()
policy.train_sk_dist(o_s_batch, z_s_batch, o2_s_batch, add_dict)
# #
if train_start_epoch <= epoch:
policy.update_target_net()
if collect_data and (rank == 0):
dumpJson(logdir, episodes, epoch, rank)
if plot_freq != 0 and epoch % plot_freq == 0:
iod_eval(logdir, env_name, evaluator, video_evaluator, num_skills, skill_type, plot_repeats, epoch, goal_generation, n_random_trajectories)
# test
evaluator.clear_history()
for _ in range(n_test_rollouts):
z_s, z_s_onehot = sample_skill(num_skills, evaluator.rollout_batch_size, use_skill_n, skill_type=skill_type)
evaluator.generate_rollouts(generated_goal=False, z_s_onehot=z_s_onehot)
# record logs
logger.record_tabular('time/total_time', time.time() - start_time)
logger.record_tabular('time/epoch_time', time.time() - cur_time)
cur_time = time.time()
logger.record_tabular('epoch', epoch)
for key, val in evaluator.logs('test'):
logger.record_tabular(key, mpi_average(val))
if n_cycles != 0:
for key, val in rollout_worker.logs('train'):
logger.record_tabular(key, mpi_average(val))
for key, val in policy.logs(is_policy_training=(train_start_epoch <= epoch)):
logger.record_tabular(key, mpi_average(val))
logger.record_tabular('best_success_rate', best_success_rate)
if rank == 0:
logger.dump_tabular()
# save the policy if it's better than the previous ones
success_rate = mpi_average(evaluator.current_success_rate())
if rank == 0 and success_rate >= best_success_rate and save_policies:
best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))
evaluator.save_policy(best_policy_path)
evaluator.save_policy(latest_policy_path)
if rank == 0 and policy_save_interval > 0 and epoch % policy_save_interval == 0 and save_policies:
policy_path = periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
evaluator.save_policy(policy_path)
# make sure that different threads have different seeds
local_uniform = np.random.uniform(size=(1,))
root_uniform = local_uniform.copy()
MPI.COMM_WORLD.Bcast(root_uniform, root=0)
if rank != 0:
assert local_uniform[0] != root_uniform[0]
def launch(
run_group, env_name, n_epochs, train_start_epoch, num_cpu, seed, replay_strategy, policy_save_interval, clip_return, binding, logging,
num_skills, version, n_cycles, note, skill_type, plot_freq, plot_repeats, n_random_trajectories,
sk_r_scale, et_r_scale, sk_clip, et_clip, done_ground,
max_path_length, hidden, layers, rollout_batch_size, n_batches, polyak, spectral_normalization,
dual_reg, dual_init_lambda, dual_lam_opt, dual_slack, dual_dist,
inner, algo, random_eps, noise_eps, lr, sk_lam_lr, buffer_size, algo_name,
load_weight, override_params={}, save_policies=True,
):
tf.compat.v1.disable_eager_execution()
# Fork for multi-CPU MPI implementation.
if num_cpu > 1:
whoami = mpi_fork(num_cpu, binding)
if whoami == 'parent':
sys.exit(0)
import baselines.common.tf_util as U
U.single_threaded_session().__enter__()
rank = MPI.COMM_WORLD.Get_rank()
# Configure logging
if logging:
logdir = ''
logdir += f'logs/{run_group}/'
logdir += f'sd{seed:03d}_'
if 'SLURM_JOB_ID' in os.environ:
logdir += f's_{os.environ["SLURM_JOB_ID"]}.'
if 'SLURM_PROCID' in os.environ:
logdir += f'{os.environ["SLURM_PROCID"]}.'
if 'SLURM_RESTART_COUNT' in os.environ:
logdir += f'rs_{os.environ["SLURM_RESTART_COUNT"]}.'
logdir += f'{g_start_time}_'
logdir += str(env_name)
logdir += '_ns' + str(num_skills)
logdir += '_sn' + str(spectral_normalization)
logdir += '_dr' + str(dual_reg)
logdir += '_in' + str(inner)
logdir += '_sk' + str(sk_r_scale)
logdir += '_et' + str(et_r_scale)
else:
logdir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
if rank == 0:
if logdir or logger.get_dir() is None:
logger.configure(dir=logdir)
else:
logger.configure() # use temp folder for other rank
logdir = logger.get_dir()
assert logdir is not None
os.makedirs(logdir, exist_ok=True)
# Seed everything.
rank_seed = seed + 1000000 * rank
set_global_seeds(rank_seed)
# Prepare params.
params = config.DEFAULT_PARAMS
params['env_name'] = env_name
params['seed'] = seed
params['replay_strategy'] = replay_strategy
params['binding'] = binding
params['max_timesteps'] = n_epochs * params['n_cycles'] * params['n_batches'] * num_cpu
params['version'] = version
params['n_cycles'] = n_cycles
params['num_cpu'] = num_cpu
params['note'] = note or params['note']
if note:
with open('params/'+note+'.json', 'r') as file:
override_params = json.loads(file.read())
params.update(**override_params)
##########################################33
params['num_skills'] = num_skills
params['skill_type'] = skill_type
params['plot_freq'] = plot_freq
params['plot_repeats'] = plot_repeats
params['n_random_trajectories'] = n_random_trajectories
if sk_r_scale is not None:
params['sk_r_scale'] = sk_r_scale
if et_r_scale is not None:
params['et_r_scale'] = et_r_scale
params['sk_clip'] = sk_clip
params['et_clip'] = et_clip
params['done_ground'] = done_ground
params['max_path_length'] = max_path_length
params['hidden'] = hidden
params['layers'] = layers
params['rollout_batch_size'] = rollout_batch_size
params['n_batches'] = n_batches
params['polyak'] = polyak
params['spectral_normalization'] = spectral_normalization
params['dual_reg'] = dual_reg
params['dual_init_lambda'] = dual_init_lambda
params['dual_lam_opt'] = dual_lam_opt
params['dual_slack'] = dual_slack
params['dual_dist'] = dual_dist
params['inner'] = inner
params['algo'] = algo
params['random_eps'] = random_eps
params['noise_eps'] = noise_eps
params['lr'] = lr
params['sk_lam_lr'] = sk_lam_lr
params['buffer_size'] = buffer_size
params['algo_name'] = algo_name
params['train_start_epoch'] = train_start_epoch
if load_weight is not None:
params['load_weight'] = load_weight
if params['load_weight']:
if type(params['load_weight']) is list:
params['load_weight'] = params['load_weight'][seed]
import glob
base = os.path.splitext(params['load_weight'])[0]
policy_path = base + '_weight.pkl'
policy_path = glob.glob(policy_path)[0]
policy_weight_file = open(policy_path, 'rb')
pretrain_weights = pickle.load(policy_weight_file)
policy_weight_file.close()
else:
pretrain_weights = None
if env_name in config.DEFAULT_ENV_PARAMS:
params.update(config.DEFAULT_ENV_PARAMS[env_name]) # merge env-specific parameters in
with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
json.dump(params, f)
params = config.prepare_params(params)
exp_name = logdir.split('/')[-1]
if 'WANDB_API_KEY' in os.environ:
wandb.init(project="", entity="", group=run_group, name=exp_name, config=params) # Fill out this
def make_env():
if env_name == 'Maze':
from envs.maze_env import MazeEnv
env = MazeEnv(n=max_path_length)
elif env_name == 'Kitchen':
from d4rl_alt.kitchen.kitchen_envs import KitchenMicrowaveKettleLightTopLeftBurnerV0Custom
from gym.wrappers.time_limit import TimeLimit
env = KitchenMicrowaveKettleLightTopLeftBurnerV0Custom(control_mode='end_effector')
max_episode_steps = max_path_length
env = TimeLimit(env, max_episode_steps=max_episode_steps)
else:
env = gym.make(env_name)
if 'max_path_length' in params:
env = env.env
from gym.wrappers.time_limit import TimeLimit
max_episode_steps = params['max_path_length']
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
params['make_env'] = make_env
##########################################################
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
policy = config.configure_ddpg(dims=dims, params=params, pretrain_weights=pretrain_weights, clip_return=clip_return)
render = False
if params['collect_video']:
render = 'rgb_array'
rollout_params = {
'exploit': False,
'use_target_net': False,
'use_demo_states': True,
'compute_Q': False,
'T': params['T'],
'render': render,
}
eval_params = {
'exploit': True,
'use_target_net': params['test_with_polyak'],
'use_demo_states': False,
'compute_Q': True,
'T': params['T'],
}
for name in ['T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:
rollout_params[name] = params[name]
eval_params[name] = params[name]
rollout_worker = RolloutWorker(make_env, policy, dims, logger, **rollout_params)
rollout_worker.seed(rank_seed)
evaluator = RolloutWorker(make_env, policy, dims, logger, **eval_params)
evaluator.seed(rank_seed)
video_evaluator = RolloutWorker(make_env, policy, dims, logger, **dict(eval_params, rollout_batch_size=1))
video_evaluator.seed(rank_seed)
train(
logdir=logdir, policy=policy, rollout_worker=rollout_worker, env_name=env_name,
evaluator=evaluator, video_evaluator=video_evaluator, n_epochs=n_epochs, train_start_epoch=train_start_epoch, n_test_rollouts=params['n_test_rollouts'], n_cycles=params['n_cycles'], n_batches=params['n_batches'], policy_save_interval=policy_save_interval, save_policies=save_policies, num_cpu=num_cpu, collect_data=params['collect_data'], collect_video=params['collect_video'], goal_generation=params['goal_generation'], num_skills=params['num_skills'], use_skill_n=params['use_skill_n'], batch_size=params['_batch_size'], sk_r_scale=params['sk_r_scale'],
skill_type=params['skill_type'], plot_freq=params['plot_freq'], plot_repeats=params['plot_repeats'], n_random_trajectories=params['n_random_trajectories'], sk_clip=params['sk_clip'], et_clip=params['et_clip'], done_ground=params['done_ground'],
)
@click.command()
@click.option('--run_group', type=str, default='EXP')
@click.option('--env_name', type=click.Choice(['FetchPush-v1', 'FetchSlide-v1', 'FetchPickAndPlace-v1', 'Maze', 'Kitchen']))
@click.option('--n_epochs', type=int, default=50, help='the number of training epochs to run')
@click.option('--train_start_epoch', type=int, default=0)
@click.option('--num_cpu', type=int, default=1, help='the number of CPU cores to use (using MPI)')
@click.option('--seed', type=int, default=0, help='the random seed used to seed both the environment and the training code')
@click.option('--policy_save_interval', type=int, default=1, help='the interval with which policy pickles are saved. If set to 0, only the best and latest policy will be pickled.')
@click.option('--n_cycles', type=int, default=50, help='n_cycles')
@click.option('--replay_strategy', type=click.Choice(['future', 'final', 'none']), default='future', help='replay strategy to be used.')
@click.option('--clip_return', type=int, default=0, help='whether or not returns should be clipped')
@click.option('--binding', type=click.Choice(['none', 'core']), default='core', help='configure mpi using bind-to none or core.')
@click.option('--logging', type=bool, default=False, help='whether or not logging')
@click.option('--num_skills', type=int, default=5)
@click.option('--version', type=int, default=0, help='version')
@click.option('--note', type=str, default=None, help='unique notes')
@click.option('--skill_type', type=str, default='discrete')
@click.option('--plot_freq', type=int, default=1)
@click.option('--plot_repeats', type=int, default=1)
@click.option('--n_random_trajectories', type=int, default=200)
@click.option('--sk_r_scale', type=float, default=None)
@click.option('--et_r_scale', type=float, default=None)
@click.option('--sk_clip', type=int, default=1)
@click.option('--et_clip', type=int, default=1)
@click.option('--done_ground', type=int, default=0)
@click.option('--max_path_length', type=int, default=50)
@click.option('--hidden', type=int, default=256)
@click.option('--layers', type=int, default=3)
@click.option('--rollout_batch_size', type=int, default=2)
@click.option('--n_batches', type=int, default=40)
@click.option('--polyak', type=float, default=0.95)
@click.option('--spectral_normalization', type=int, default=0)
@click.option('--dual_reg', type=int, default=0)
@click.option('--dual_init_lambda', type=float, default=1)
@click.option('--dual_lam_opt', type=str, default='adam')
@click.option('--dual_slack', type=float, default=0.)
@click.option('--dual_dist', type=str, default='l2')
@click.option('--inner', type=int, default=0)
@click.option('--algo', type=str, default='csd')
@click.option('--random_eps', type=float, default=0.3)
@click.option('--noise_eps', type=float, default=0.2)
@click.option('--lr', type=float, default=0.001)
@click.option('--sk_lam_lr', type=float, default=0.001)
@click.option('--buffer_size', type=int, default=1000000)
@click.option('--algo_name', type=str, default=None) # Only for logging, not used
@click.option('--load_weight', type=str, default=None)
def main(**kwargs):
launch(**kwargs)
if __name__ == '__main__':
main()
| 26,726 | 43.031301 | 563 | py |
CSD-manipulation | CSD-manipulation-master/utils.py | from matplotlib import figure
import pathlib
import numpy as np
from matplotlib.patches import Ellipse
from baselines import logger
from moviepy import editor as mpy
class FigManager:
def __init__(self, label, epoch, eval_dir):
self.label = label
self.epoch = epoch
self.fig = figure.Figure()
self.ax = self.fig.add_subplot()
self.eval_dir = eval_dir
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
plot_path = (pathlib.Path(self.eval_dir)
/ 'plots'
/ f'{self.label}_{self.epoch}.png')
plot_path.parent.mkdir(parents=True, exist_ok=True)
self.fig.savefig(plot_path, dpi=300)
logger.record_tabular(self.label, (plot_path, self.label))
def get_2d_colors(points, min_point, max_point):
points = np.array(points)
min_point = np.array(min_point)
max_point = np.array(max_point)
colors = (points - min_point) / (max_point - min_point)
colors = np.hstack((
colors,
(2 - np.sum(colors, axis=1, keepdims=True)) / 2,
))
colors = np.clip(colors, 0, 1)
colors = np.c_[colors, np.full(len(colors), 0.8)]
return colors
def get_option_colors(options):
num_options = options.shape[0]
dim_option = options.shape[1]
if dim_option <= 2:
# Use a predefined option color scheme
if dim_option == 1:
options = np.hstack((options, options))
option_colors = get_2d_colors(options, (-4, -4), (4, 4))
else:
if dim_option > 3 and num_options >= 3:
from sklearn import decomposition
pca = decomposition.PCA(n_components=3)
# Add random noises to break symmetry.
pca_options = np.vstack((options, np.random.randn(dim_option, dim_option)))
pca.fit(pca_options)
option_colors = np.array(pca.transform(options))
elif dim_option > 3 and num_options < 3:
option_colors = options[:, :3]
elif dim_option == 3:
option_colors = options
# max_colors = np.max(option_colors, axis=0)
# min_colors = np.min(option_colors, axis=0)
max_colors = np.array([4] * 3)
min_colors = np.array([-4] * 3)
if all((max_colors - min_colors) > 0):
option_colors = (option_colors - min_colors) / (max_colors - min_colors)
option_colors = np.clip(option_colors, 0, 1)
option_colors = np.c_[option_colors, np.full(len(option_colors), 0.8)]
return option_colors
def setup_evaluation(num_eval_options, num_eval_trajectories_per_option, num_skills):
dim_option = num_skills
if dim_option == 2:
# If dim_option is 2, use predefined options for evaluation.
eval_options = [[0, 0]]
for dist in [1.5, 3.0, 4.5, 0.75, 2.25, 3.75]:
for angle in [0, 4, 2, 6, 1, 5, 3, 7]:
eval_options.append([dist * np.cos(angle * np.pi / 4), dist * np.sin(angle * np.pi / 4)])
eval_options = eval_options[:num_eval_options]
eval_options = np.array(eval_options)
else:
eval_options = [[0] * dim_option]
for dist in [1.5, -1.5, 3.0, -3.0, 4.5, -4.5]:
for dim in range(dim_option):
cur_option = [0] * dim_option
cur_option[dim] = dist
eval_options.append(cur_option)
eval_options = eval_options[:num_eval_options]
eval_options = np.array(eval_options)
eval_options = np.repeat(eval_options, num_eval_trajectories_per_option, axis=0)
eval_option_colors = get_option_colors(eval_options)
return eval_options, eval_option_colors
def plot_trajectory(trajectory, color, ax):
ax.plot(trajectory[:, 0], trajectory[:, 1], color=color, linewidth=0.7)
def plot_trajectories(trajectories, colors, plot_axis, ax):
"""Plot trajectories onto given ax."""
square_axis_limit = 0.0
for trajectory, color in zip(trajectories, colors):
trajectory = np.array(trajectory)
plot_trajectory(trajectory, color, ax)
square_axis_limit = max(square_axis_limit, np.max(np.abs(trajectory[:, :2])))
square_axis_limit = square_axis_limit * 1.2
if plot_axis == 'free':
return
if plot_axis is None:
plot_axis = [-square_axis_limit, square_axis_limit, -square_axis_limit, square_axis_limit]
if plot_axis is not None:
ax.axis(plot_axis)
ax.set_aspect('equal')
else:
ax.axis('scaled')
def draw_2d_gaussians(means, stddevs, colors, ax, fill=False, alpha=0.8, use_adaptive_axis=False, draw_unit_gaussian=True, plot_axis=None):
means = np.clip(means, -1000, 1000)
stddevs = np.clip(stddevs, -1000, 1000)
square_axis_limit = 2.0
if draw_unit_gaussian:
ellipse = Ellipse(xy=(0, 0), width=2, height=2,
edgecolor='r', lw=1, facecolor='none', alpha=0.5)
ax.add_patch(ellipse)
for mean, stddev, color in zip(means, stddevs, colors):
if len(mean) == 1:
mean = np.concatenate([mean, [0.]])
stddev = np.concatenate([stddev, [0.1]])
ellipse = Ellipse(xy=mean, width=stddev[0] * 2, height=stddev[1] * 2,
edgecolor=color, lw=1, facecolor='none' if not fill else color, alpha=alpha)
ax.add_patch(ellipse)
square_axis_limit = max(
square_axis_limit,
np.abs(mean[0] + stddev[0]),
np.abs(mean[0] - stddev[0]),
np.abs(mean[1] + stddev[1]),
np.abs(mean[1] - stddev[1]),
)
square_axis_limit = square_axis_limit * 1.2
ax.axis('scaled')
if plot_axis is None:
if use_adaptive_axis:
ax.set_xlim(-square_axis_limit, square_axis_limit)
ax.set_ylim(-square_axis_limit, square_axis_limit)
else:
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
else:
ax.axis(plot_axis)
def prepare_video(v, n_cols=None):
orig_ndim = v.ndim
if orig_ndim == 4:
v = v[None, ]
#b, t, c, h, w = v.shape
_, t, c, h, w = v.shape
if v.dtype == np.uint8:
v = np.float32(v) / 255.
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
# pad to nearest power of 2, all at once
# if not is_power2(v.shape[0]):
# len_addition = int(2**v.shape[0].bit_length() - v.shape[0])
# v = np.concatenate(
# (v, np.zeros(shape=(len_addition, t, c, h, w))), axis=0)
# n_rows = 2**((b.bit_length() - 1) // 2)
if n_cols is None:
if v.shape[0] <= 3:
n_cols = v.shape[0]
elif v.shape[0] <= 9:
n_cols = 3
elif v.shape[0] <= 18:
n_cols = 6
else:
n_cols = 8
if v.shape[0] % n_cols != 0:
len_addition = n_cols - v.shape[0] % n_cols
v = np.concatenate(
(v, np.zeros(shape=(len_addition, t, c, h, w))), axis=0)
n_rows = v.shape[0] // n_cols
v = np.reshape(v, newshape=(n_rows, n_cols, t, c, h, w))
v = np.transpose(v, axes=(2, 0, 4, 1, 5, 3))
v = np.reshape(v, newshape=(t, n_rows * h, n_cols * w, c))
return v
def save_video(path, tensor, fps=15, n_cols=None):
def _to_uint8(t):
# If user passes in uint8, then we don't need to rescale by 255
if t.dtype != np.uint8:
t = (t * 255.0).astype(np.uint8)
return t
if tensor.dtype in [np.object]:
tensor = [_to_uint8(prepare_video(t, n_cols)) for t in tensor]
else:
tensor = prepare_video(tensor, n_cols)
tensor = _to_uint8(tensor)
# Encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
plot_path = pathlib.Path(path)
plot_path.parent.mkdir(parents=True, exist_ok=True)
clip.write_videofile(str(plot_path), audio=False, verbose=False, logger=None)
def record_video(path, trajectories, n_cols=None):
renders = []
for trajectory in trajectories:
render = trajectory.transpose(0, 3, 1, 2).astype(np.uint8)
renders.append(render)
max_length = max([len(render) for render in renders])
for i, render in enumerate(renders):
renders[i] = np.concatenate([render, np.zeros((max_length - render.shape[0], *render.shape[1:]), dtype=render.dtype)], axis=0)
renders = np.array(renders)
save_video(path, renders, n_cols=n_cols)
class RunningMeanStd(object):
def __init__(self, epsilon=1e-5, shape=()):
self.mean = np.zeros(shape, np.float32)
self.var = np.ones(shape, np.float32)
self.count = epsilon
def update(self, arr: np.ndarray) -> None:
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean: np.ndarray, batch_var: np.ndarray, batch_count: int) -> None:
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
| 9,494 | 34.297398 | 139 | py |
CSD-manipulation | CSD-manipulation-master/baselines/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/baselines/logger.py | import io
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
import matplotlib.pyplot as plt
import wandb
from PIL import Image
import numpy as np
import tensorflow as tf
LOG_OUTPUT_FORMATS = ['stdout', 'log', 'csv', 'tensorboard', 'wandb']
LOG_OUTPUT_FORMATS_MPI = ['log']
# Also valid: json, tensorboard
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 0
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
# self.writer = tf.summary.create_file_writer(dir)
self.writer = tf.compat.v1.summary.FileWriter(path)
def writekvs(self, kvs):
def summary_val(k, v):
if isinstance(v, tuple):
if 'video' in k:
return None
plot_path, label = v
image = Image.open(str(plot_path)).resize((800, 600))
b = io.BytesIO()
image.save(b, format='PNG')
b = b.getvalue()
return tf.compat.v1.Summary.Value(
tag=f'{label}',
image=tf.compat.v1.Summary.Image(encoded_image_string=b)
)
else:
try:
kwargs = {'tag': k, 'simple_value': float(v)}
except (TypeError, ValueError):
return None
return tf.compat.v1.Summary.Value(**kwargs)
values = []
for k, v in kvs.items():
value = summary_val(k, v)
if value is None:
continue
values.append(value)
summary = tf.compat.v1.Summary(value=values)
self.writer.add_summary(summary, self.step)
self.writer.flush()
self.step += 1
def close(self):
if self.writer:
self.writer.close()
self.writer = None
class WandbOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self):
if 'WANDB_API_KEY' in os.environ:
self.wandb = True
else:
self.wandb = False
self.step = 0
def writekvs(self, kvs):
if not self.wandb:
return
for k, v in kvs.items():
if isinstance(v, tuple):
if 'video' in k:
video_path, label = v
wandb.log({label: wandb.Video(video_path)}, step=self.step)
else:
plot_path, label = v
image = Image.open(str(plot_path)).resize((800, 600))
wandb.log({label: wandb.Image(image)}, step=self.step)
else:
if isinstance(v, str):
table = wandb.Table(columns=[k])
table.add_data(v)
wandb.log({k: table}, step=self.step)
elif isinstance(v, np.ndarray) and v.size > 1:
data = [[i, val] for i, val in enumerate(v)]
table = wandb.Table(data=data, columns=['idx', 'val'])
wandb.log({k: wandb.plot.bar(table, 'idx', 'val', k)}, step=self.step)
else:
wandb.log({k: v}, step=self.step)
self.step += 1
def close(self):
if not self.wandb:
return
wandb.finish()
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
elif format == 'wandb':
return WandbOutputFormat()
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
Logger.CURRENT.logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
Logger.CURRENT.logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
class ProfileKV:
"""
Usage:
with logger.ProfileKV("interesting_scope"):
code
"""
def __init__(self, n):
self.n = "wait_" + n
def __enter__(self):
self.t1 = time.time()
def __exit__(self ,type, value, traceback):
Logger.CURRENT.name2val[self.n] += time.time() - self.t1
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with ProfileKV(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
if val is None:
self.name2val[key] = None
return
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.level == DISABLED: return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
self.name2cnt.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
Logger.DEFAULT = Logger.CURRENT = Logger(dir=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(dir=None, format_strs=None):
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
log_suffix = ''
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
if rank > 0:
log_suffix = "-rank%03i" % rank
if format_strs is None:
strs, strs_mpi = os.getenv('OPENAI_LOG_FORMAT'), os.getenv('OPENAI_LOG_FORMAT_MPI')
format_strs = strs_mpi if rank>0 else strs
if format_strs is not None:
format_strs = format_strs.split(',')
else:
format_strs = LOG_OUTPUT_FORMATS_MPI if rank>0 else LOG_OUTPUT_FORMATS
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s'%dir)
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = 33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.compat.v1.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| 16,175 | 28.735294 | 122 | py |
CSD-manipulation | CSD-manipulation-master/baselines/results_plotter.py | import numpy as np
import matplotlib
matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
from baselines.bench.monitor import load_results
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
y = ts.r.values
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
y = ts.r.values
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
y = ts.r.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, title):
plt.figure(figsize=(8,2))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel("Episode Rewards")
plt.tight_layout()
def plot_results(dirs, num_timesteps, xaxis, task_name):
tslist = []
for dir in dirs:
ts = load_results(dir)
ts = ts[ts.l.cumsum() <= num_timesteps]
tslist.append(ts)
xy_list = [ts2xy(ts, xaxis) for ts in tslist]
plot_curves(xy_list, xaxis, task_name)
# Example usage in jupyter-notebook
# from baselines import log_viewer
# %matplotlib inline
# log_viewer.plot_results(["./log"], 10e6, log_viewer.X_TIMESTEPS, "Breakout")
# Here ./log is a directory containing the monitor.csv files
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs = '*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--task_name', help = 'Title of plot', default = 'Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(dir) for dir in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.task_name)
plt.show()
if __name__ == '__main__':
main() | 3,080 | 34.413793 | 115 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/__init__.py | # flake8: noqa F403
from baselines.common.console_util import *
from baselines.common.dataset import Dataset
from baselines.common.math_util import *
from baselines.common.misc_util import *
| 191 | 31 | 44 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/atari_wrappers.py | import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def make_atari(env_id):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
| 8,037 | 33.059322 | 112 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/cg.py | import numpy as np
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v*p
r -= v*z
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631
return x | 896 | 25.382353 | 88 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/cmd_util.py | """
Helpers for scripts like run_atari.py.
"""
import os
import gym
from gym.wrappers import FlattenDictWrapper
from baselines import logger
from baselines.bench import Monitor
from baselines.common import set_global_seeds
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
def make_atari_env(env_id, num_env, seed, wrapper_kwargs=None, start_index=0):
"""
Create a wrapped, monitored SubprocVecEnv for Atari.
"""
if wrapper_kwargs is None: wrapper_kwargs = {}
def make_env(rank): # pylint: disable=C0111
def _thunk():
env = make_atari(env_id)
env.seed(seed + rank)
env = Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
return wrap_deepmind(env, **wrapper_kwargs)
return _thunk
set_global_seeds(seed)
return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
def make_mujoco_env(env_id, seed):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = Monitor(env, logger.get_dir())
env.seed(seed)
return env
def make_robotics_env(env_id, seed, rank=0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(
env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),
info_keywords=('is_success',))
env.seed(seed)
return env
def arg_parser():
"""
Create an empty argparse.ArgumentParser.
"""
import argparse
return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def atari_arg_parser():
"""
Create an argparse.ArgumentParser for run_atari.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(10e6))
return parser
def mujoco_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser
def robotics_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser
| 2,940 | 32.420455 | 94 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/console_util.py | from __future__ import print_function
from contextlib import contextmanager
import numpy as np
import time
# ================================================================
# Misc
# ================================================================
def fmt_row(width, row, header=False):
out = " | ".join(fmt_item(x, width) for x in row)
if header: out = out + "\n" + "-"*len(out)
return out
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert x.ndim==0
x = x.item()
if isinstance(x, (float, np.float32, np.float64)):
v = abs(x)
if (v < 1e-4 or v > 1e+4) and v > 0:
rep = "%7.2e" % x
else:
rep = "%7.5f" % x
else: rep = str(x)
return " "*(l - len(rep)) + rep
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
MESSAGE_DEPTH = 0
@contextmanager
def timed(msg):
global MESSAGE_DEPTH #pylint: disable=W0603
print(colorize('\t'*MESSAGE_DEPTH + '=: ' + msg, color='magenta'))
tstart = time.time()
MESSAGE_DEPTH += 1
yield
MESSAGE_DEPTH -= 1
print(colorize('\t'*MESSAGE_DEPTH + "done in %.3f seconds"%(time.time() - tstart), color='magenta'))
| 1,504 | 24.083333 | 104 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/dataset.py | import numpy as np
class Dataset(object):
def __init__(self, data_map, deterministic=False, shuffle=True):
self.data_map = data_map
self.deterministic = deterministic
self.enable_shuffle = shuffle
self.n = next(iter(data_map.values())).shape[0]
self._next_id = 0
self.shuffle()
def shuffle(self):
if self.deterministic:
return
perm = np.arange(self.n)
np.random.shuffle(perm)
for key in self.data_map:
self.data_map[key] = self.data_map[key][perm]
self._next_id = 0
def next_batch(self, batch_size):
if self._next_id >= self.n and self.enable_shuffle:
self.shuffle()
cur_id = self._next_id
cur_batch_size = min(batch_size, self.n - self._next_id)
self._next_id += cur_batch_size
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][cur_id:cur_id+cur_batch_size]
return data_map
def iterate_once(self, batch_size):
if self.enable_shuffle: self.shuffle()
while self._next_id <= self.n - batch_size:
yield self.next_batch(batch_size)
self._next_id = 0
def subset(self, num_elements, deterministic=True):
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][:num_elements]
return Dataset(data_map, deterministic)
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True):
assert (num_batches is None) != (batch_size is None), 'Provide num_batches or batch_size, but not both'
arrays = tuple(map(np.asarray, arrays))
n = arrays[0].shape[0]
assert all(a.shape[0] == n for a in arrays[1:])
inds = np.arange(n)
if shuffle: np.random.shuffle(inds)
sections = np.arange(0, n, batch_size)[1:] if num_batches is None else num_batches
for batch_inds in np.array_split(inds, sections):
if include_final_partial_batch or len(batch_inds) == batch_size:
yield tuple(a[batch_inds] for a in arrays)
| 2,132 | 33.967213 | 110 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/distributions.py | import tensorflow as tf
import numpy as np
import baselines.common.tf_util as U
from baselines.a2c.utils import fc
from tensorflow.python.ops import math_ops
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return - self.neglogp(x)
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def pdfromlatent(self, latent_vector):
raise NotImplementedError
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.compat.v1.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.compat.v1.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
class CategoricalPdType(PdType):
def __init__(self, ncat):
self.ncat = ncat
def pdclass(self):
return CategoricalPd
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
pdparam = fc(latent_vector, 'pi', self.ncat, init_scale=init_scale, init_bias=init_bias)
return self.pdfromflat(pdparam), pdparam
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int32
class MultiCategoricalPdType(PdType):
def __init__(self, nvec):
self.ncats = nvec
def pdclass(self):
return MultiCategoricalPd
def pdfromflat(self, flat):
return MultiCategoricalPd(self.ncats, flat)
def param_shape(self):
return [sum(self.ncats)]
def sample_shape(self):
return [len(self.ncats)]
def sample_dtype(self):
return tf.int32
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
mean = fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
logstd = tf.compat.v1.get_variable(name='logstd', shape=[1, self.size], initializer=tf.compat.v1.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
return self.pdfromflat(pdparam), mean
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class BernoulliPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return BernoulliPd
def param_shape(self):
return [self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.int32
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self, logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls, flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits, axis=-1)
# def logp(self, x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
# def kl(self, other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
class CategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return tf.argmax(input=self.logits, axis=-1)
def neglogp(self, x):
# return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
# Note: we can't use sparse_softmax_cross_entropy_with_logits because
# the implementation does not allow second-order derivatives...
one_hot_actions = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
return tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits,
labels=tf.stop_gradient(one_hot_actions))
def kl(self, other):
a0 = self.logits - tf.reduce_max(input_tensor=self.logits, axis=-1, keepdims=True)
a1 = other.logits - tf.reduce_max(input_tensor=other.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(input_tensor=ea0, axis=-1, keepdims=True)
z1 = tf.reduce_sum(input_tensor=ea1, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(input_tensor=p0 * (a0 - tf.math.log(z0) - a1 + tf.math.log(z1)), axis=-1)
def entropy(self):
a0 = self.logits - tf.reduce_max(input_tensor=self.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(input_tensor=ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(input_tensor=p0 * (tf.math.log(z0) - a0), axis=-1)
def sample(self):
u = tf.random.uniform(tf.shape(input=self.logits))
return tf.argmax(input=self.logits - tf.math.log(-tf.math.log(u)), axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class MultiCategoricalPd(Pd):
def __init__(self, nvec, flat):
self.flat = flat
self.categoricals = list(map(CategoricalPd, tf.split(flat, nvec, axis=-1)))
def flatparam(self):
return self.flat
def mode(self):
return tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32)
def neglogp(self, x):
return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))])
def kl(self, other):
return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)])
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
return tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32)
@classmethod
def fromflat(cls, flat):
raise NotImplementedError
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * tf.reduce_sum(input_tensor=tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(input=x)[-1], dtype=tf.float32) \
+ tf.reduce_sum(input_tensor=self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return tf.reduce_sum(input_tensor=other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return tf.reduce_sum(input_tensor=self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)
def sample(self):
return self.mean + self.std * tf.random.normal(tf.shape(input=self.mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class BernoulliPd(Pd):
def __init__(self, logits):
self.logits = logits
self.ps = tf.sigmoid(logits)
def flatparam(self):
return self.logits
def mode(self):
return tf.round(self.ps)
def neglogp(self, x):
return tf.reduce_sum(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(x, dtype=tf.float32)), axis=-1)
def kl(self, other):
return tf.reduce_sum(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=-1) - tf.reduce_sum(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def entropy(self):
return tf.reduce_sum(input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def sample(self):
u = tf.random.uniform(tf.shape(input=self.ps))
return tf.cast(math_ops.less(u, self.ps), dtype=tf.float32)
@classmethod
def fromflat(cls, flat):
return cls(flat)
def make_pdtype(ac_space):
from gym import spaces
if isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1
return DiagGaussianPdType(ac_space.shape[0])
elif isinstance(ac_space, spaces.Discrete):
return CategoricalPdType(ac_space.n)
elif isinstance(ac_space, spaces.MultiDiscrete):
return MultiCategoricalPdType(ac_space.nvec)
elif isinstance(ac_space, spaces.MultiBinary):
return BernoulliPdType(ac_space.n)
else:
raise NotImplementedError
def shape_el(v, i):
maybe = v.get_shape()[i]
if maybe is not None:
return maybe
else:
return tf.shape(input=v)[i]
@U.in_session
def test_probtypes():
np.random.seed(0)
pdparam_diag_gauss = np.array([-.2, .3, .4, -.5, .1, -.5, .1, 0.8])
diag_gauss = DiagGaussianPdType(pdparam_diag_gauss.size // 2) #pylint: disable=E1101
validate_probtype(diag_gauss, pdparam_diag_gauss)
pdparam_categorical = np.array([-.2, .3, .5])
categorical = CategoricalPdType(pdparam_categorical.size) #pylint: disable=E1101
validate_probtype(categorical, pdparam_categorical)
nvec = [1,2,3]
pdparam_multicategorical = np.array([-.2, .3, .5, .1, 1, -.1])
multicategorical = MultiCategoricalPdType(nvec) #pylint: disable=E1101
validate_probtype(multicategorical, pdparam_multicategorical)
pdparam_bernoulli = np.array([-.2, .3, .5])
bernoulli = BernoulliPdType(pdparam_bernoulli.size) #pylint: disable=E1101
validate_probtype(bernoulli, pdparam_bernoulli)
def validate_probtype(probtype, pdparam):
N = 100000
# Check to see if mean negative log likelihood == differential entropy
Mval = np.repeat(pdparam[None, :], N, axis=0)
M = probtype.param_placeholder([N])
X = probtype.sample_placeholder([N])
pd = probtype.pdfromflat(M)
calcloglik = U.function([X, M], pd.logp(X))
calcent = U.function([M], pd.entropy())
Xval = tf.compat.v1.get_default_session().run(pd.sample(), feed_dict={M:Mval})
logliks = calcloglik(Xval, Mval)
entval_ll = - logliks.mean() #pylint: disable=E1101
entval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
entval = calcent(Mval).mean() #pylint: disable=E1101
assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas
# Check to see if kldiv[p,q] = - ent[p] - E_p[log q]
M2 = probtype.param_placeholder([N])
pd2 = probtype.pdfromflat(M2)
q = pdparam + np.random.randn(pdparam.size) * 0.1
Mval2 = np.repeat(q[None, :], N, axis=0)
calckl = U.function([M, M2], pd.kl(pd2))
klval = calckl(Mval, Mval2).mean() #pylint: disable=E1101
logliks = calcloglik(Xval, Mval2)
klval_ll = - entval - logliks.mean() #pylint: disable=E1101
klval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas
print('ok on', probtype, pdparam)
| 12,186 | 38.312903 | 243 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/filters.py | from .running_stat import RunningStat
from collections import deque
import numpy as np
class Filter(object):
def __call__(self, x, update=True):
raise NotImplementedError
def reset(self):
pass
class IdentityFilter(Filter):
def __call__(self, x, update=True):
return x
class CompositionFilter(Filter):
def __init__(self, fs):
self.fs = fs
def __call__(self, x, update=True):
for f in self.fs:
x = f(x)
return x
def output_shape(self, input_space):
out = input_space.shape
for f in self.fs:
out = f.output_shape(out)
return out
class ZFilter(Filter):
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update: self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std+1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
class AddClock(Filter):
def __init__(self):
self.count = 0
def reset(self):
self.count = 0
def __call__(self, x, update=True):
return np.append(x, self.count/100.0)
def output_shape(self, input_space):
return (input_space.shape[0]+1,)
class FlattenFilter(Filter):
def __call__(self, x, update=True):
return x.ravel()
def output_shape(self, input_space):
return (int(np.prod(input_space.shape)),)
class Ind2OneHotFilter(Filter):
def __init__(self, n):
self.n = n
def __call__(self, x, update=True):
out = np.zeros(self.n)
out[x] = 1
return out
def output_shape(self, input_space):
return (input_space.n,)
class DivFilter(Filter):
def __init__(self, divisor):
self.divisor = divisor
def __call__(self, x, update=True):
return x / self.divisor
def output_shape(self, input_space):
return input_space.shape
class StackFilter(Filter):
def __init__(self, length):
self.stack = deque(maxlen=length)
def reset(self):
self.stack.clear()
def __call__(self, x, update=True):
self.stack.append(x)
while len(self.stack) < self.stack.maxlen:
self.stack.append(x)
return np.concatenate(self.stack, axis=-1)
def output_shape(self, input_space):
return input_space.shape[:-1] + (input_space.shape[-1] * self.stack.maxlen,)
| 2,742 | 26.707071 | 84 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/math_util.py | import numpy as np
import scipy.signal
def discount(x, gamma):
"""
computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1
"""
assert x.ndim >= 1
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def explained_variance_2d(ypred, y):
assert y.ndim == 2 and ypred.ndim == 2
vary = np.var(y, axis=0)
out = 1 - np.var(y-ypred)/vary
out[vary < 1e-10] = 0
return out
def ncc(ypred, y):
return np.corrcoef(ypred, y)[1,0]
def flatten_arrays(arrs):
return np.concatenate([arr.flat for arr in arrs])
def unflatten_vector(vec, shapes):
i=0
arrs = []
for shape in shapes:
size = np.prod(shape)
arr = vec[i:i+size].reshape(shape)
arrs.append(arr)
i += size
return arrs
def discount_with_boundaries(X, New, gamma):
"""
X: 2d array of floats, time x features
New: 2d array of bools, indicating when a new episode has started
"""
Y = np.zeros_like(X)
T = X.shape[0]
Y[T-1] = X[T-1]
for t in range(T-2, -1, -1):
Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1])
return Y
def test_discount_with_boundaries():
gamma=0.9
x = np.array([1.0, 2.0, 3.0, 4.0], 'float32')
starts = [1.0, 0.0, 0.0, 1.0]
y = discount_with_boundaries(x, starts, gamma)
assert np.allclose(y, [
1 + gamma * 2 + gamma**2 * 3,
2 + gamma * 3,
3,
4
]) | 2,093 | 23.635294 | 75 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/misc_util.py | import gym
import numpy as np
import os
import pickle
import random
import tempfile
import zipfile
def zipsame(*seqs):
L = len(seqs[0])
assert all(len(seq) == L for seq in seqs[1:])
return zip(*seqs)
def unpack(seq, sizes):
"""
Unpack 'seq' into a sequence of lists, with lengths specified by 'sizes'.
None = just one bare element, not a list
Example:
unpack([1,2,3,4,5,6], [3,None,2]) -> ([1,2,3], 4, [5,6])
"""
seq = list(seq)
it = iter(seq)
assert sum(1 if s is None else s for s in sizes) == len(seq), "Trying to unpack %s into %s" % (seq, sizes)
for size in sizes:
if size is None:
yield it.__next__()
else:
li = []
for _ in range(size):
li.append(it.__next__())
yield li
class EzPickle(object):
"""Objects that are pickled and unpickled via their constructor
arguments.
Example usage:
class Dog(Animal, EzPickle):
def __init__(self, furcolor, tailkind="bushy"):
Animal.__init__()
EzPickle.__init__(furcolor, tailkind)
...
When this object is unpickled, a new Dog will be constructed by passing the provided
furcolor and tailkind into the constructor. However, philosophers are still not sure
whether it is still the same dog.
This is generally needed only for environments which wrap C/C++ code, such as MuJoCo
and Atari.
"""
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {"_ezpickle_args": self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs}
def __setstate__(self, d):
out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"])
self.__dict__.update(out.__dict__)
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.compat.v1.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def pretty_eta(seconds_left):
"""Print the number of seconds in human readable format.
Examples:
2 days
2 hours and 37 minutes
less than a minute
Paramters
---------
seconds_left: int
Number of seconds to be converted to the ETA
Returns
-------
eta: str
String representing the pretty ETA.
"""
minutes_left = seconds_left // 60
seconds_left %= 60
hours_left = minutes_left // 60
minutes_left %= 60
days_left = hours_left // 24
hours_left %= 24
def helper(cnt, name):
return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else ''))
if days_left > 0:
msg = helper(days_left, 'day')
if hours_left > 0:
msg += ' and ' + helper(hours_left, 'hour')
return msg
if hours_left > 0:
msg = helper(hours_left, 'hour')
if minutes_left > 0:
msg += ' and ' + helper(minutes_left, 'minute')
return msg
if minutes_left > 0:
return helper(minutes_left, 'minute')
return 'less than a minute'
class RunningAvg(object):
def __init__(self, gamma, init_value=None):
"""Keep a running estimate of a quantity. This is a bit like mean
but more sensitive to recent changes.
Parameters
----------
gamma: float
Must be between 0 and 1, where 0 is the most sensitive to recent
changes.
init_value: float or None
Initial value of the estimate. If None, it will be set on the first update.
"""
self._value = init_value
self._gamma = gamma
def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val
def __float__(self):
"""Get the current estimate"""
return self._value
def boolean_flag(parser, name, default=False, help=None):
"""Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest)
def get_wrapper_by_name(env, classname):
"""Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname
"""
currentenv = env
while True:
if classname == currentenv.class_name():
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s" % classname)
def relatively_safe_pickle_dump(obj, path, compression=False):
"""This is just like regular pickle dump, except from the fact that failure cases are
different:
- It's never possible that we end up with a pickle in corrupted state.
- If a there was a different file at the path, that file will remain unchanged in the
even of failure (provided that filesystem rename is atomic).
- it is sometimes possible that we end up with useless temp file which needs to be
deleted manually (it will be removed automatically on the next function call)
The indended use case is periodic checkpoints of experiment state, such that we never
corrupt previous checkpoints if the current one fails.
Parameters
----------
obj: object
object to pickle
path: str
path to the output file
compression: bool
if true pickle will be compressed
"""
temp_storage = path + ".relatively_safe"
if compression:
# Using gzip here would be simpler, but the size is limited to 2GB
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, "data")
else:
with open(temp_storage, "wb") as f:
pickle.dump(obj, f)
os.rename(temp_storage, path)
def pickle_load(path, compression=False):
"""Unpickle a possible compressed pickle.
Parameters
----------
path: str
path to the output file
compression: bool
if true assumes that pickle was compressed when created and attempts decompression.
Returns
-------
obj: object
the unpickled object
"""
if compression:
with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open("data") as f:
return pickle.load(f)
else:
with open(path, "rb") as f:
return pickle.load(f)
| 7,603 | 28.359073 | 110 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/mpi_adam.py | from mpi4py import MPI
import baselines.common.tf_util as U
import tensorflow as tf
import numpy as np
class MpiAdam(object):
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None):
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum(U.numel(v) for v in var_list)
self.m = np.zeros(size, 'float32')
self.v = np.zeros(size, 'float32')
self.t = 0
self.setfromflat = U.SetFromFlat(var_list)
self.getflat = U.GetFlat(var_list)
self.comm = MPI.COMM_WORLD if comm is None else comm
def update(self, localg, stepsize):
if self.t % 100 == 0:
self.check_synced()
localg = localg.astype('float32')
globalg = np.zeros_like(localg)
self.comm.Allreduce(localg, globalg, op=MPI.SUM)
if self.scale_grad_by_procs:
globalg /= self.comm.Get_size()
self.t += 1
a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon)
self.setfromflat(self.getflat() + step)
def sync(self):
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
if self.comm.Get_rank() == 0: # this is root
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
@U.in_session
def test_MpiAdam():
np.random.seed(0)
tf.compat.v1.set_random_seed(0)
a = tf.Variable(np.random.randn(3).astype('float32'))
b = tf.Variable(np.random.randn(2,5).astype('float32'))
loss = tf.reduce_sum(input_tensor=tf.square(a)) + tf.reduce_sum(input_tensor=tf.sin(b))
stepsize = 1e-2
update_op = tf.compat.v1.train.AdamOptimizer(stepsize).minimize(loss)
do_update = U.function([], loss, updates=[update_op])
tf.compat.v1.get_default_session().run(tf.compat.v1.global_variables_initializer())
for i in range(10):
print(i,do_update())
tf.compat.v1.set_random_seed(0)
tf.compat.v1.get_default_session().run(tf.compat.v1.global_variables_initializer())
var_list = [a,b]
lossandgrad = U.function([], [loss, U.flatgrad(loss, var_list)], updates=[update_op])
adam = MpiAdam(var_list)
for i in range(10):
l,g = lossandgrad()
adam.update(g, stepsize)
print(i,l) | 2,882 | 35.493671 | 112 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/mpi_fork.py | import os, subprocess, sys
def mpi_fork(n, bind_to_core=False):
"""Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children
"""
if n<=1:
return "child"
if os.getenv("IN_MPI") is None:
env = os.environ.copy()
env.update(
MKL_NUM_THREADS="1",
OMP_NUM_THREADS="1",
IN_MPI="1"
)
args = ["mpirun", "-np", str(n), "--oversubscribe"]
if bind_to_core:
args += ["-bind-to", "core"]
args += [sys.executable] + sys.argv
subprocess.check_call(args, env=env)
return "parent"
else:
return "child"
| 687 | 27.666667 | 66 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/mpi_moments.py | from mpi4py import MPI
import numpy as np
from baselines.common import zipsame
def mpi_mean(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert x.ndim > 0
if comm is None: comm = MPI.COMM_WORLD
xsum = x.sum(axis=axis, keepdims=keepdims)
n = xsum.size
localsum = np.zeros(n+1, x.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = x.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
return globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n]
def mpi_moments(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert x.ndim > 0
mean, count = mpi_mean(x, axis=axis, comm=comm, keepdims=True)
sqdiffs = np.square(x - mean)
meansqdiff, count1 = mpi_mean(sqdiffs, axis=axis, comm=comm, keepdims=True)
assert count1 == count
std = np.sqrt(meansqdiff)
if not keepdims:
newshape = mean.shape[:axis] + mean.shape[axis+1:]
mean = mean.reshape(newshape)
std = std.reshape(newshape)
return mean, std, count
def test_runningmeanstd():
import subprocess
subprocess.check_call(['mpirun', '-np', '3',
'python','-c',
'from baselines.common.mpi_moments import _helper_runningmeanstd; _helper_runningmeanstd()'])
def _helper_runningmeanstd():
comm = MPI.COMM_WORLD
np.random.seed(0)
for (triple,axis) in [
((np.random.randn(3), np.random.randn(4), np.random.randn(5)),0),
((np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),0),
((np.random.randn(2,3), np.random.randn(2,4), np.random.randn(2,4)),1),
]:
x = np.concatenate(triple, axis=axis)
ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]]
ms2 = mpi_moments(triple[comm.Get_rank()],axis=axis)
for (a1,a2) in zipsame(ms1, ms2):
print(a1, a2)
assert np.allclose(a1, a2)
print("ok!")
| 1,963 | 31.196721 | 101 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/mpi_running_mean_std.py | from mpi4py import MPI
import tensorflow as tf, baselines.common.tf_util as U, numpy as np
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-2, shape=()):
self._sum = tf.compat.v1.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.compat.v1.constant_initializer(0.0),
name="runningsum", trainable=False)
self._sumsq = tf.compat.v1.get_variable(
dtype=tf.float64,
shape=shape,
initializer=tf.compat.v1.constant_initializer(epsilon),
name="runningsumsq", trainable=False)
self._count = tf.compat.v1.get_variable(
dtype=tf.float64,
shape=(),
initializer=tf.compat.v1.constant_initializer(epsilon),
name="count", trainable=False)
self.shape = shape
self.mean = tf.cast(self._sum / self._count, dtype=tf.float32)
self.std = tf.sqrt( tf.maximum( tf.cast(self._sumsq / self._count, dtype=tf.float32) - tf.square(self.mean) , 1e-2 ))
newsum = tf.compat.v1.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
newsumsq = tf.compat.v1.placeholder(shape=self.shape, dtype=tf.float64, name='var')
newcount = tf.compat.v1.placeholder(shape=[], dtype=tf.float64, name='count')
self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
updates=[tf.compat.v1.assign_add(self._sum, newsum),
tf.compat.v1.assign_add(self._sumsq, newsumsq),
tf.compat.v1.assign_add(self._count, newcount)])
def update(self, x):
x = x.astype('float64')
n = int(np.prod(self.shape))
totalvec = np.zeros(n*2+1, 'float64')
addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')])
MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n])
@U.in_session
def test_runningmeanstd():
for (x1, x2, x3) in [
(np.random.randn(3), np.random.randn(4), np.random.randn(5)),
(np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),
]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
U.initialize()
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.std(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean.eval(), rms.std.eval()]
assert np.allclose(ms1, ms2)
@U.in_session
def test_dist():
np.random.seed(0)
p1,p2,p3=(np.random.randn(3,1), np.random.randn(4,1), np.random.randn(5,1))
q1,q2,q3=(np.random.randn(6,1), np.random.randn(7,1), np.random.randn(8,1))
# p1,p2,p3=(np.random.randn(3), np.random.randn(4), np.random.randn(5))
# q1,q2,q3=(np.random.randn(6), np.random.randn(7), np.random.randn(8))
comm = MPI.COMM_WORLD
assert comm.Get_size()==2
if comm.Get_rank()==0:
x1,x2,x3 = p1,p2,p3
elif comm.Get_rank()==1:
x1,x2,x3 = q1,q2,q3
else:
assert False
rms = RunningMeanStd(epsilon=0.0, shape=(1,))
U.initialize()
rms.update(x1)
rms.update(x2)
rms.update(x3)
bigvec = np.concatenate([p1,p2,p3,q1,q2,q3])
def checkallclose(x,y):
print(x,y)
return np.allclose(x,y)
assert checkallclose(
bigvec.mean(axis=0),
rms.mean.eval(),
)
assert checkallclose(
bigvec.std(axis=0),
rms.std.eval(),
)
if __name__ == "__main__":
# Run with mpirun -np 2 python <filename>
test_dist()
| 3,777 | 33.981481 | 126 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/mpi_sgd.py | from mpi4py import MPI
import baselines.common.tf_util as U
import tensorflow as tf
import numpy as np
class MpiSgd(object):
def __init__(self, var_list, *, scale_grad_by_procs=True, comm=None):
self.var_list = var_list
self.scale_grad_by_procs = scale_grad_by_procs
size = sum(U.numel(v) for v in var_list)
self.t = 0
self.setfromflat = U.SetFromFlat(var_list)
self.getflat = U.GetFlat(var_list)
self.comm = MPI.COMM_WORLD if comm is None else comm
def update(self, localg, stepsize):
if self.t % 100 == 0:
self.check_synced()
localg = localg.astype('float32')
globalg = np.zeros_like(localg)
self.comm.Allreduce(localg, globalg, op=MPI.SUM)
if self.scale_grad_by_procs:
globalg /= self.comm.Get_size()
self.t += 1
step = (- stepsize) * globalg
self.setfromflat(self.getflat() + step)
def sync(self):
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
if self.comm.Get_rank() == 0: # this is root
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
| 1,451 | 32.767442 | 75 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/runners.py | import numpy as np
from abc import ABC, abstractmethod
class AbstractEnvRunner(ABC):
def __init__(self, *, env, model, nsteps):
self.env = env
self.model = model
nenv = env.num_envs
self.batch_ob_shape = (nenv*nsteps,) + env.observation_space.shape
self.obs = np.zeros((nenv,) + env.observation_space.shape, dtype=model.train_model.X.dtype.name)
self.obs[:] = env.reset()
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
@abstractmethod
def run(self):
raise NotImplementedError
| 620 | 31.684211 | 104 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/running_mean_std.py | import numpy as np
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def test_runningmeanstd():
for (x1, x2, x3) in [
(np.random.randn(3), np.random.randn(4), np.random.randn(5)),
(np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),
]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.var(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean, rms.var]
assert np.allclose(ms1, ms2)
| 1,618 | 33.446809 | 97 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/running_stat.py | import numpy as np
# http://www.johndcook.com/blog/standard_deviation/
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM)/self._n
self._S[...] = self._S + (x - oldM)*(x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S/(self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
def test_running_stat():
for shp in ((), (3,), (3,4)):
li = []
rs = RunningStat(shp)
for _ in range(5):
val = np.random.randn(*shp)
rs.push(val)
li.append(val)
m = np.mean(li, axis=0)
assert np.allclose(rs.mean, m)
v = np.square(m) if (len(li) == 1) else np.var(li, ddof=1, axis=0)
assert np.allclose(rs.var, v)
| 1,320 | 27.106383 | 78 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/schedules.py | """This file is used for specifying various schedules that evolve over
time throughout the execution of the algorithm, such as:
- learning rate for the optimizer
- exploration epsilon for the epsilon greedy exploration strategy
- beta parameter for beta parameter in prioritized replay
Each schedule has a function `value(t)` which returns the current value
of the parameter given the timestep t of the optimization procedure.
"""
class Schedule(object):
def value(self, t):
"""Value of the schedule at time t"""
raise NotImplementedError()
class ConstantSchedule(object):
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
def value(self, t):
"""See Schedule.value"""
return self._v
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
"""Piecewise schedule.
endpoints: [(int, int)]
list of pairs `(time, value)` meanining that schedule should output
`value` when `t==time`. All the values for time must be sorted in
an increasing order. When t is between two times, e.g. `(time_a, value_a)`
and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs
`interpolation(value_a, value_b, alpha)` where alpha is a fraction of
time passed between `time_a` and `time_b` for time `t`.
interpolation: lambda float, float, float: float
a function that takes value to the left and to the right of t according
to the `endpoints`. Alpha is the fraction of distance from left endpoint to
right endpoint that t has covered. See linear_interpolation for example.
outside_value: float
if the value is requested outside of all the intervals sepecified in
`endpoints` this value is returned. If None then AssertionError is
raised when outside value is requested.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
if self._outside_value is not None:
# assert self._outside_value is not None
return self._outside_value
else:
return self._endpoints[-1][1]
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
| 3,812 | 36.019417 | 90 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/segment_tree.py | import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
| 4,899 | 32.561644 | 109 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/tf_util.py | import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(pred=tf.cast(condition, 'bool'),
true_fn=lambda: then_expression,
false_fn=lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.compat.v1.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.compat.v1.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.compat.v1.InteractiveSession(config=tf_config, graph=graph)
else:
return tf.compat.v1.Session(config=tf_config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.compat.v1.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.compat.v1.global_variables()) - ALREADY_INITIALIZED
tf.compat.v1.get_default_session().run(tf.compat.v1.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.compat.v1.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.compat.v1.get_variable("W", filter_shape, dtype, tf.compat.v1.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.compat.v1.get_variable("b", [1, 1, 1, num_filters], initializer=tf.compat.v1.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.compat.v1.summary.image(summary_tag,
tf.transpose(a=tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
perm=[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(input=x, filters=w, strides=stride_shape, padding=pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.compat.v1.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(ys=loss, xs=var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.compat.v1.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.compat.v1.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.compat.v1.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.compat.v1.get_default_session().run(self.op)
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
assert dtype1 == dtype and shape1 == shape
return out
else:
out = tf.compat.v1.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/biases" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
| 11,046 | 38.173759 | 132 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/tests/test_schedules.py | import numpy as np
from baselines.common.schedules import ConstantSchedule, PiecewiseSchedule
def test_piecewise_schedule():
ps = PiecewiseSchedule([(-5, 100), (5, 200), (10, 50), (100, 50), (200, -50)], outside_value=500)
assert np.isclose(ps.value(-10), 500)
assert np.isclose(ps.value(0), 150)
assert np.isclose(ps.value(5), 200)
assert np.isclose(ps.value(9), 80)
assert np.isclose(ps.value(50), 50)
assert np.isclose(ps.value(80), 50)
assert np.isclose(ps.value(150), 0)
assert np.isclose(ps.value(175), -25)
assert np.isclose(ps.value(201), 500)
assert np.isclose(ps.value(500), 500)
assert np.isclose(ps.value(200 - 1e-10), -50)
def test_constant_schedule():
cs = ConstantSchedule(5)
for i in range(-100, 100):
assert np.isclose(cs.value(i), 5)
| 823 | 29.518519 | 101 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/tests/test_segment_tree.py | import numpy as np
from baselines.common.segment_tree import SumSegmentTree, MinSegmentTree
def test_tree_set():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert np.isclose(tree.sum(), 4.0)
assert np.isclose(tree.sum(0, 2), 0.0)
assert np.isclose(tree.sum(0, 3), 1.0)
assert np.isclose(tree.sum(2, 3), 1.0)
assert np.isclose(tree.sum(2, -1), 1.0)
assert np.isclose(tree.sum(2, 4), 4.0)
def test_tree_set_overlap():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[2] = 3.0
assert np.isclose(tree.sum(), 3.0)
assert np.isclose(tree.sum(2, 3), 3.0)
assert np.isclose(tree.sum(2, -1), 3.0)
assert np.isclose(tree.sum(2, 4), 3.0)
assert np.isclose(tree.sum(1, 2), 0.0)
def test_prefixsum_idx():
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert tree.find_prefixsum_idx(0.0) == 2
assert tree.find_prefixsum_idx(0.5) == 2
assert tree.find_prefixsum_idx(0.99) == 2
assert tree.find_prefixsum_idx(1.01) == 3
assert tree.find_prefixsum_idx(3.00) == 3
assert tree.find_prefixsum_idx(4.00) == 3
def test_prefixsum_idx2():
tree = SumSegmentTree(4)
tree[0] = 0.5
tree[1] = 1.0
tree[2] = 1.0
tree[3] = 3.0
assert tree.find_prefixsum_idx(0.00) == 0
assert tree.find_prefixsum_idx(0.55) == 1
assert tree.find_prefixsum_idx(0.99) == 1
assert tree.find_prefixsum_idx(1.51) == 2
assert tree.find_prefixsum_idx(3.00) == 3
assert tree.find_prefixsum_idx(5.50) == 3
def test_max_interval_tree():
tree = MinSegmentTree(4)
tree[0] = 1.0
tree[2] = 0.5
tree[3] = 3.0
assert np.isclose(tree.min(), 0.5)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.5)
assert np.isclose(tree.min(0, -1), 0.5)
assert np.isclose(tree.min(2, 4), 0.5)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 0.7
assert np.isclose(tree.min(), 0.7)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.7)
assert np.isclose(tree.min(0, -1), 0.7)
assert np.isclose(tree.min(2, 4), 0.7)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 4.0
assert np.isclose(tree.min(), 1.0)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 1.0)
assert np.isclose(tree.min(0, -1), 1.0)
assert np.isclose(tree.min(2, 4), 3.0)
assert np.isclose(tree.min(2, 3), 4.0)
assert np.isclose(tree.min(2, -1), 4.0)
assert np.isclose(tree.min(3, 4), 3.0)
if __name__ == '__main__':
test_tree_set()
test_tree_set_overlap()
test_prefixsum_idx()
test_prefixsum_idx2()
test_max_interval_tree()
| 2,691 | 24.884615 | 72 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/tests/test_tf_util.py | # tests for tf_util
import tensorflow as tf
from baselines.common.tf_util import (
function,
initialize,
single_threaded_session
)
def test_function():
with tf.Graph().as_default():
x = tf.compat.v1.placeholder(tf.int32, (), name="x")
y = tf.compat.v1.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(2, 2) == 10
def test_multikwargs():
with tf.Graph().as_default():
x = tf.compat.v1.placeholder(tf.int32, (), name="x")
with tf.compat.v1.variable_scope("other"):
x2 = tf.compat.v1.placeholder(tf.int32, (), name="x")
z = 3 * x + 2 * x2
lin = function([x, x2], z, givens={x2: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(2, 2) == 10
if __name__ == '__main__':
test_function()
test_multikwargs()
| 1,050 | 24.634146 | 65 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/vec_env/__init__.py | from abc import ABC, abstractmethod
from baselines import logger
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def render(self):
logger.warn('Render not defined for %s'%self)
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
| 3,378 | 25.606299 | 90 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/vec_env/dummy_vec_env.py | import numpy as np
from gym import spaces
from collections import OrderedDict
from . import VecEnv
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
for key, box in obs_space.spaces.items():
assert isinstance(box, spaces.Box)
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
else:
box = obs_space
assert isinstance(box, spaces.Box)
self.keys = [None]
shapes, dtypes = { None: box.shape }, { None: box.dtype }
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(self.actions[e])
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if self.keys==[None]:
return self.buf_obs[None]
else:
return self.buf_obs
| 2,294 | 34.307692 | 111 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/vec_env/subproc_vec_env.py | import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
| 2,864 | 33.107143 | 97 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/vec_env/vec_frame_stack.py | from baselines.common.vec_env import VecEnvWrapper
import numpy as np
from gym import spaces
class VecFrameStack(VecEnvWrapper):
"""
Vectorized environment base class
"""
def __init__(self, venv, nstack):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
low = np.repeat(wos.low, self.nstack, axis=-1)
high = np.repeat(wos.high, self.nstack, axis=-1)
self.stackedobs = np.zeros((venv.num_envs,)+low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1)
for (i, new) in enumerate(news):
if new:
self.stackedobs[i] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs, rews, news, infos
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
def close(self):
self.venv.close()
| 1,319 | 32.846154 | 94 | py |
CSD-manipulation | CSD-manipulation-master/baselines/common/vec_env/vec_normalize.py | from baselines.common.vec_env import VecEnvWrapper
from baselines.common.running_mean_std import RunningMeanStd
import numpy as np
class VecNormalize(VecEnvWrapper):
"""
Vectorized environment base class
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
return self._obfilt(obs)
| 1,679 | 34 | 120 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/baselines/her/actor_critic.py | import tensorflow as tf
from baselines.her.util import store_args, nn
import numpy as np
EPS = 1e-8
LOG_STD_MAX = 2
LOG_STD_MIN = -5
def gaussian_likelihood(x, mu, log_std):
pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi))
return tf.reduce_sum(input_tensor=pre_sum, axis=1)
def clip_but_pass_gradient(x, l=-1., u=1.):
clip_up = tf.cast(x > u, tf.float32)
clip_low = tf.cast(x < l, tf.float32)
return x + tf.stop_gradient((u - x)*clip_up + (l - x)*clip_low)
def mlp_gaussian_policy(x, act_dim, hidden, layers):
net = nn(x, [hidden] * (layers+1))
mu = tf.compat.v1.layers.dense(net, act_dim, activation=None)
log_std = tf.compat.v1.layers.dense(net, act_dim, activation=tf.tanh)
log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1)
std = tf.exp(log_std)
pi = mu + tf.random.normal(tf.shape(input=mu)) * std
logp_pi = gaussian_likelihood(pi, mu, log_std)
return mu, pi, logp_pi
def apply_squashing_func(mu, pi, logp_pi):
mu = tf.tanh(mu)
pi = tf.tanh(pi)
logp_pi -= tf.reduce_sum(input_tensor=tf.math.log(clip_but_pass_gradient(1 - pi**2, l=0, u=1) + 1e-6), axis=1)
return mu, pi, logp_pi
class ActorCritic:
@store_args
def __init__(self, inputs_tf, dimo, dimg, dimu, max_u, o_stats, g_stats, hidden, layers, sac, **kwargs):
"""The actor-critic network and related training code.
Args:
inputs_tf (dict of tensors): all necessary inputs for the network: the
observation (o), the goal (g), and the action (u)
dimo (int): the dimension of the observations
dimg (int): the dimension of the goals
dimu (int): the dimension of the actions
max_u (float): the maximum magnitude of actions; action outputs will be scaled
accordingly
o_stats (baselines.her.Normalizer): normalizer for observations
g_stats (baselines.her.Normalizer): normalizer for goals
hidden (int): number of hidden units that should be used in hidden layers
layers (int): number of hidden layers
"""
self.o_tf = inputs_tf['o']
self.z_tf = inputs_tf['z']
self.g_tf = inputs_tf['g']
self.u_tf = inputs_tf['u']
# Prepare inputs for actor and critic.
o = self.o_stats.normalize(self.o_tf)
g = self.g_stats.normalize(self.g_tf)
z = self.z_tf
input_pi = tf.concat(axis=1, values=[o, z, g]) # for actor
# policy net
if sac:
with tf.compat.v1.variable_scope('pi'):
mu, pi, logp_pi = mlp_gaussian_policy(input_pi, self.dimu, self.hidden, self.layers)
mu, pi, self.logp_pi_tf = apply_squashing_func(mu, pi, logp_pi)
# make sure actions are in correct range
self.mu_tf = mu * self.max_u
self.pi_tf = pi * self.max_u
self.neg_logp_pi_tf = - self.logp_pi_tf
else: # ddpg
with tf.compat.v1.variable_scope('pi'):
self.pi_tf = self.max_u * tf.tanh(nn(
input_pi, [self.hidden] * self.layers + [self.dimu]))
# Q value net
with tf.compat.v1.variable_scope('Q'):
# for policy training
input_Q = tf.concat(axis=1, values=[o, z, g, self.pi_tf / self.max_u])
self.Q_pi_tf = nn(input_Q, [self.hidden] * self.layers + [1])
# for critic training
input_Q = tf.concat(axis=1, values=[o, z, g, self.u_tf / self.max_u])
self._input_Q = input_Q # exposed for tests
self.Q_tf = nn(input_Q, [self.hidden] * self.layers + [1], reuse=True)
| 3,727 | 39.967033 | 114 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/ddpg.py | from collections import OrderedDict, defaultdict
import numpy as np
import tensorflow as tf
# from tensorflow.contrib.staging import StagingArea
from tensorflow.python.ops.data_flow_ops import StagingArea
from baselines import logger
from baselines.her.util import (
import_function, store_args, flatten_grads, transitions_in_episode_batch, save_weight, load_weight)
from baselines.her.normalizer import Normalizer
from baselines.her.replay_buffer import ReplayBuffer
from baselines.common.mpi_adam import MpiAdam
from baselines.common.mpi_sgd import MpiSgd
import baselines.common.tf_util as U
import json
from collections import deque
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
class DDPG(object):
@store_args
def __init__(
self, input_dims, buffer_size, hidden, layers, network_class_actor_critic, network_class_discriminator,
polyak, batch_size, Q_lr, pi_lr, sk_lr, r_scale, sk_r_scale, et_r_scale, norm_eps,
norm_clip, max_u, action_l2, clip_obs, scope, T, rollout_batch_size, subtract_goals, relative_goals,
clip_pos_returns, clip_return, sample_transitions, gamma, env_name, max_timesteps, pretrain_weights,
finetune_pi, sac, reuse=False, history_len=10000,
skill_type='discrete', sk_clip=1, et_clip=1, done_ground=0, obj_prior=0, spectral_normalization=0,
dual_reg=0, dual_init_lambda=1., dual_lam_opt='adam', dual_slack=0., dual_dist='l2',
inner=0, algo='csd', sk_lam_lr=0.001,
**kwargs
):
if self.clip_return is None:
self.clip_return = np.inf
self.create_actor_critic = import_function(self.network_class_actor_critic)
self.create_discriminator = import_function(self.network_class_discriminator)
input_shapes = dims_to_shapes(self.input_dims)
self.dimo = self.input_dims['o']
self.dimz = self.input_dims['z']
self.dimg = self.input_dims['g']
self.dimu = self.input_dims['u']
self.skill_type = skill_type
self.sk_clip = sk_clip
self.et_clip = et_clip
self.done_ground = done_ground
self.obj_prior = obj_prior
self.spectral_normalization = spectral_normalization
self.dual_reg = dual_reg
self.dual_init_lambda = dual_init_lambda
self.dual_lam_opt = dual_lam_opt
self.dual_slack = dual_slack
self.dual_dist = dual_dist
self.inner = inner
self.algo = algo
self.sk_lam_lr = sk_lam_lr
self.env_name = env_name
# Prepare staging area for feeding data to the model.
stage_shapes = OrderedDict()
for key in sorted(self.input_dims.keys()):
if key.startswith('info_'):
continue
stage_shapes[key] = (None, *input_shapes[key])
for key in ['o', 'g']:
stage_shapes[key + '_2'] = stage_shapes[key]
stage_shapes['r'] = (None,)
stage_shapes['w'] = (None,)
stage_shapes['s'] = (None,)
stage_shapes['s_w'] = ()
stage_shapes['r_w'] = ()
stage_shapes['e_w'] = ()
stage_shapes['myd'] = (None,)
self.stage_shapes = stage_shapes
# Create network.
with tf.compat.v1.variable_scope(self.scope):
self.staging_tf = StagingArea(
dtypes=[tf.float32 for _ in self.stage_shapes.keys()],
shapes=list(self.stage_shapes.values()))
self.buffer_ph_tf = [
tf.compat.v1.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]
self.stage_op = self.staging_tf.put(self.buffer_ph_tf)
self._create_network(pretrain_weights, reuse=reuse)
# Configure the replay buffer.
buffer_shapes = {key: (self.T if key != 'o' else self.T+1, *input_shapes[key])
for key, val in input_shapes.items()}
buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)
buffer_shapes['ag'] = (self.T+1, self.dimg)
buffer_shapes['myr'] = (self.T,)
buffer_shapes['myd'] = (self.T,)
buffer_shapes['myv'] = (self.T,)
buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
self.gl_r_history = deque(maxlen=history_len)
self.sk_r_history = deque(maxlen=history_len)
self.et_r_history = deque(maxlen=history_len)
self.logp_current = 0
self.finetune_pi = finetune_pi
self.info_history = defaultdict(lambda: deque(maxlen=history_len))
def _random_action(self, n):
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))
def _preprocess_og(self, o, ag, g):
if self.relative_goals:
g_shape = g.shape
g = g.reshape(-1, self.dimg)
ag = ag.reshape(-1, self.dimg)
g = self.subtract_goals(g, ag)
g = g.reshape(*g_shape)
o = np.clip(o, -self.clip_obs, self.clip_obs)
g = np.clip(g, -self.clip_obs, self.clip_obs)
return o, g
def get_actions(self, o, z, ag, g, noise_eps=0., random_eps=0., use_target_net=False, compute_Q=False, exploit=False):
o, g = self._preprocess_og(o, ag, g)
policy = self.target if use_target_net else self.main
# values to compute
if self.sac:
vals = [policy.mu_tf]
else:
vals = [policy.pi_tf]
if compute_Q:
vals += [policy.Q_pi_tf]
feed = {
policy.o_tf: o.reshape(-1, self.dimo),
policy.z_tf: z.reshape(-1, self.dimz),
policy.g_tf: g.reshape(-1, self.dimg),
policy.u_tf: np.zeros((o.size // self.dimo, self.dimu), dtype=np.float32)
}
ret = self.sess.run(vals, feed_dict=feed)
# action postprocessing
u = ret[0]
noise = noise_eps * self.max_u * np.random.randn(*u.shape) # gaussian noise
u += noise
u = np.clip(u, -self.max_u, self.max_u)
u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u) # eps-greedy
if u.shape[0] == 1:
u = u[0]
u = u.copy()
ret[0] = u
if len(ret) == 1:
return ret[0]
else:
return ret
def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T
"""
episode_batch['s'] = np.empty([episode_batch['o'].shape[0], 1])
# #
self.buffer.store_episode(episode_batch, self)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(self, False, episode_batch, num_normalizing_transitions, 0, 0)
o, o_2, g, ag = transitions['o'], transitions['o_2'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
def get_current_buffer_size(self):
return self.buffer.get_current_size()
def _sync_optimizers(self):
self.Q_adam.sync()
self.pi_adam.sync()
self.sk_adam.sync()
if self.dual_reg:
self.sk_dual_opt.sync()
if self.dual_dist != 'l2':
self.sk_dist_adam.sync()
def _grads_sk(self, o_s_batch, z_s_batch, o2_s_batch, u_s_batch):
run_list = [self.main_ir.sk_tf, self.sk_grad_tf]
if self.dual_reg:
run_list.extend([self.main_ir.sk_lambda_tf, self.sk_dual_grad_tf])
result = self.sess.run(run_list, feed_dict={
self.main_ir.o_tf: o_s_batch, self.main_ir.z_tf: z_s_batch, self.main_ir.o2_tf: o2_s_batch,
self.main_ir.u_tf: u_s_batch, self.main_ir.is_training: True,
})
return result
def _grads_sk_dist(self, o_s_batch, z_s_batch, o2_s_batch, add_dict):
feed_dict = {self.main_ir.o_tf: o_s_batch, self.main_ir.z_tf: z_s_batch, self.main_ir.o2_tf: o2_s_batch, self.main_ir.is_training: True}
if self.dual_dist == 's2_from_s':
sk_dist, sk_dist_grad, sk_cst_dist = self.sess.run([self.main_ir.sk_dist_tf, self.sk_dist_grad_tf, self.main_ir.cst_dist], feed_dict=feed_dict)
self.info_history['sk_cst_dist'].extend(sk_cst_dist)
self.info_history['sk_dist'].extend(sk_dist)
return sk_dist, sk_dist_grad
def _grads(self):
critic_loss, actor_loss, Q_grad, pi_grad, neg_logp_pi, e_w, log_et_r_scale = self.sess.run([
self.Q_loss_tf,
self.pi_loss_tf,
self.Q_grad_tf,
self.pi_grad_tf,
self.main.neg_logp_pi_tf,
self.e_w_tf,
self.log_et_r_scale_tf,
])
return critic_loss, actor_loss, Q_grad, pi_grad, neg_logp_pi, e_w, log_et_r_scale
def _update(self, Q_grad, pi_grad):
self.Q_adam.update(Q_grad, self.Q_lr)
self.pi_adam.update(pi_grad, self.pi_lr)
def sample_batch(self, ir, t):
transitions = self.buffer.sample(self, ir, self.batch_size, self.sk_r_scale, t)
weights = np.ones_like(transitions['r']).copy()
if ir:
if self.sk_clip:
self.sk_r_history.extend(((np.clip(self.sk_r_scale * transitions['s'], *(-1, 0)))*1.00).tolist())
else:
self.sk_r_history.extend(((self.sk_r_scale * transitions['s']) * 1.00).tolist())
self.gl_r_history.extend(self.r_scale * transitions['r'])
o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']
ag, ag_2 = transitions['ag'], transitions['ag_2']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
transitions['o_2'], transitions['g_2'] = self._preprocess_og(o_2, ag_2, g)
transitions['w'] = weights.flatten().copy() # note: ordered dict
transitions_batch = [transitions[key] for key in self.stage_shapes.keys()]
return transitions_batch
def stage_batch(self, ir, t, batch=None):
if batch is None:
batch = self.sample_batch(ir, t)
assert len(self.buffer_ph_tf) == len(batch)
self.sess.run(self.stage_op, feed_dict=dict(zip(self.buffer_ph_tf, batch)))
def run_sk(self, o, z, o2=None, u=None):
feed_dict = {self.main_ir.o_tf: o, self.main_ir.z_tf: z, self.main_ir.o2_tf: o2, self.main_ir.u_tf: u, self.main_ir.is_training: True}
if self.dual_reg:
sk_r, cst_twoside, cst_oneside = self.sess.run([self.main_ir.sk_r_tf, self.main_ir.cst_twoside, self.main_ir.cst_oneside], feed_dict=feed_dict)
self.info_history['cst_twoside'].extend(cst_twoside)
self.info_history['cst_oneside'].extend(cst_oneside)
else:
sk_r = self.sess.run(self.main_ir.sk_r_tf, feed_dict=feed_dict)
return sk_r
def train_sk(self, o_s_batch, z_s_batch, o2_s_batch, u_s_batch, stage=True):
result = self._grads_sk(o_s_batch, z_s_batch, o2_s_batch, u_s_batch)
if self.dual_reg:
sk, sk_grad, sk_lambda, sk_dual_grad = result
self.sk_dual_opt.update(sk_dual_grad, self.sk_lam_lr)
else:
sk, sk_grad = result
self.sk_adam.update(sk_grad, self.sk_lr)
return -sk.mean()
def train_sk_dist(self, o_s_batch, z_s_batch, o2_s_batch, add_dict, stage=True):
sk_dist, sk_dist_grad = self._grads_sk_dist(o_s_batch, z_s_batch, o2_s_batch, add_dict)
self.sk_dist_adam.update(sk_dist_grad, self.sk_lr)
return -sk_dist.mean()
def train(self, t, stage=True):
if not self.buffer.current_size==0:
if stage:
self.stage_batch(ir=True, t=t)
critic_loss, actor_loss, Q_grad, pi_grad, neg_logp_pi, e_w, log_et_r_scale = self._grads()
self.info_history['critic_loss'].extend([critic_loss] * neg_logp_pi.shape[0])
self.info_history['actor_loss'].extend([actor_loss] * neg_logp_pi.shape[0])
self._update(Q_grad, pi_grad)
et_r_scale = np.exp(log_et_r_scale)
if self.et_clip:
self.et_r_history.extend((( np.clip((et_r_scale * neg_logp_pi), *(-1, 0))) * e_w ).tolist())
else:
self.et_r_history.extend((( et_r_scale * neg_logp_pi) * e_w ).tolist())
self.et_r_scale_current = et_r_scale
self.logp_current = -neg_logp_pi.mean()
return critic_loss, actor_loss
def _init_target_net(self):
self.sess.run(self.init_target_net_op)
def update_target_net(self):
self.sess.run(self.update_target_net_op)
def clear_buffer(self):
self.buffer.clear_buffer()
def _vars(self, scope):
res = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope + '/' + scope)
assert len(res) > 0
return res
def _global_vars(self, scope):
res = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=self.scope + '/' + scope)
return res
def _create_network(self, pretrain_weights, reuse=False):
if self.sac:
logger.info("Creating a SAC agent with action space %d x %s..." % (self.dimu, self.max_u))
else:
logger.info("Creating a DDPG agent with action space %d x %s..." % (self.dimu, self.max_u))
self.sess = tf.compat.v1.get_default_session()
if self.sess is None:
self.sess = tf.compat.v1.InteractiveSession()
# running averages
with tf.compat.v1.variable_scope('o_stats') as vs:
if reuse:
vs.reuse_variables()
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)
with tf.compat.v1.variable_scope('g_stats') as vs:
if reuse:
vs.reuse_variables()
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)
# mini-batch sampling.
batch = self.staging_tf.get()
batch_tf = OrderedDict([(key, batch[i])
for i, key in enumerate(self.stage_shapes.keys())])
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
batch_tf['w'] = tf.reshape(batch_tf['w'], [-1, 1])
batch_tf['s'] = tf.reshape(batch_tf['s'], [-1, 1])
batch_tf['myd'] = tf.reshape(batch_tf['myd'], [-1, 1])
self.o_tau_tf = tf.compat.v1.placeholder(tf.float32, shape=(None, None, self.dimo))
# networks
with tf.compat.v1.variable_scope('main') as vs:
if reuse:
vs.reuse_variables()
self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)
vs.reuse_variables()
with tf.compat.v1.variable_scope('target') as vs:
if reuse:
vs.reuse_variables()
target_batch_tf = batch_tf.copy()
target_batch_tf['o'] = batch_tf['o_2']
target_batch_tf['g'] = batch_tf['g_2']
self.target = self.create_actor_critic(
target_batch_tf, net_type='target', **self.__dict__)
vs.reuse_variables()
assert len(self._vars("main")) == len(self._vars("target"))
# intrinsic reward (ir) network for mutual information
with tf.compat.v1.variable_scope('ir') as vs:
if reuse:
vs.reuse_variables()
self.main_ir = self.create_discriminator(batch_tf, net_type='ir', **self.__dict__)
vs.reuse_variables()
# loss functions
sk_grads_tf = tf.gradients(ys=tf.reduce_mean(input_tensor=self.main_ir.sk_tf), xs=self._vars('ir/skill_ds'))
assert len(self._vars('ir/skill_ds')) == len(sk_grads_tf)
self.sk_grads_vars_tf = zip(sk_grads_tf, self._vars('ir/skill_ds')) # Seems not used
self.sk_grad_tf = flatten_grads(grads=sk_grads_tf, var_list=self._vars('ir/skill_ds'))
self.sk_adam = MpiAdam(self._vars('ir/skill_ds'), scale_grad_by_procs=False)
if self.dual_reg:
sk_dual_grads_tf = tf.gradients(ys=tf.reduce_mean(input_tensor=self.main_ir.sk_lambda_tf), xs=self._vars('ir/skill_dual'))
assert len(self._vars('ir/skill_dual')) == len(sk_dual_grads_tf)
self.sk_dual_grad_tf = flatten_grads(grads=sk_dual_grads_tf, var_list=self._vars('ir/skill_dual'))
if self.dual_lam_opt == 'adam':
self.sk_dual_opt = MpiAdam(self._vars('ir/skill_dual'), scale_grad_by_procs=False)
else:
self.sk_dual_opt = MpiSgd(self._vars('ir/skill_dual'), scale_grad_by_procs=False)
if self.dual_dist != 'l2':
sk_dist_grads_tf = tf.gradients(ys=tf.reduce_mean(input_tensor=self.main_ir.sk_dist_tf), xs=self._vars('ir/skill_dist'))
assert len(self._vars('ir/skill_dist')) == len(sk_dist_grads_tf)
self.sk_dist_grad_tf = flatten_grads(grads=sk_dist_grads_tf, var_list=self._vars('ir/skill_dist'))
self.sk_dist_adam = MpiAdam(self._vars('ir/skill_dist'), scale_grad_by_procs=False)
target_Q_pi_tf = self.target.Q_pi_tf
clip_range = (-self.clip_return, self.clip_return if self.clip_pos_returns else np.inf)
self.e_w_tf = batch_tf['e_w']
if not self.sac:
self.main.neg_logp_pi_tf = tf.zeros(1)
et_r_scale_init = tf.constant_initializer(np.log(self.et_r_scale))
self.log_et_r_scale_tf = tf.compat.v1.get_variable('alpha/log_et_r_scale', (), tf.float32, initializer=et_r_scale_init)
et_r_scale = tf.exp(self.log_et_r_scale_tf)
target_tf = tf.clip_by_value(self.r_scale * batch_tf['r'] * batch_tf['r_w']
+ (tf.clip_by_value( self.sk_r_scale * batch_tf['s'], *(-1, 0)) if self.sk_clip else self.sk_r_scale * batch_tf['s']) * batch_tf['s_w']
+ (tf.clip_by_value( et_r_scale * self.main.neg_logp_pi_tf, *(-1, 0)) if self.et_clip else et_r_scale * self.main.neg_logp_pi_tf) * self.e_w_tf
+ (self.gamma * target_Q_pi_tf * (1 - batch_tf['myd']) if self.done_ground else self.gamma * target_Q_pi_tf), *clip_range)
self.td_error_tf = tf.stop_gradient(target_tf) - self.main.Q_tf
self.errors_tf = tf.square(self.td_error_tf)
self.errors_tf = tf.reduce_mean(input_tensor=batch_tf['w'] * self.errors_tf)
self.Q_loss_tf = tf.reduce_mean(input_tensor=self.errors_tf)
self.pi_loss_tf = -tf.reduce_mean(input_tensor=self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(input_tensor=tf.square(self.main.pi_tf / self.max_u))
Q_grads_tf = tf.gradients(ys=self.Q_loss_tf, xs=self._vars('main/Q'))
pi_grads_tf = tf.gradients(ys=self.pi_loss_tf, xs=self._vars('main/pi'))
assert len(self._vars('main/Q')) == len(Q_grads_tf)
assert len(self._vars('main/pi')) == len(pi_grads_tf)
self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))
self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))
self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))
self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))
# optimizers
self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)
self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)
self.main_vars = self._vars('main/Q') + self._vars('main/pi')
self.target_vars = self._vars('target/Q') + self._vars('target/pi')
# polyak averaging
self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')
self.init_target_net_op = list(
map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
# initialize all variables
tf.compat.v1.variables_initializer(self._global_vars('')).run()
if pretrain_weights:
load_weight(self.sess, pretrain_weights, [''])
self._sync_optimizers()
# if pretrain_weights and self.finetune_pi:
# load_weight(self.sess, pretrain_weights, ['target'])
# else:
# self._init_target_net()
def logs(self, prefix='', is_policy_training=True):
logs = []
logs += [('stats_o/mean', np.mean(self.sess.run([self.o_stats.mean])))]
logs += [('stats_o/std', np.mean(self.sess.run([self.o_stats.std])))]
logs += [('stats_g/mean', np.mean(self.sess.run([self.g_stats.mean])))]
logs += [('stats_g/std', np.mean(self.sess.run([self.g_stats.std])))]
if is_policy_training:
logs += [('sk_reward/mean', np.mean(self.sk_r_history))]
logs += [('sk_reward/std', np.std(self.sk_r_history))]
logs += [('sk_reward/max', np.max(self.sk_r_history))]
logs += [('sk_reward/min', np.min(self.sk_r_history))]
logs += [('et_reward/mean', np.mean(self.et_r_history))]
logs += [('et_reward/std', np.std(self.et_r_history))]
logs += [('et_reward/max', np.max(self.et_r_history))]
logs += [('et_reward/min', np.min(self.et_r_history))]
logs += [('et_train/logp', self.logp_current)]
logs += [('et_train/et_r_scale', self.et_r_scale_current)]
logs += [('gl_reward/mean', np.mean(self.gl_r_history))]
logs += [('gl_reward/std', np.std(self.gl_r_history))]
logs += [('gl_reward/max', np.max(self.gl_r_history))]
logs += [('gl_reward/min', np.min(self.gl_r_history))]
logs += [('loss/actor_loss', np.mean(self.info_history['actor_loss']))]
logs += [('loss/critic_loss', np.mean(self.info_history['critic_loss']))]
if self.dual_reg:
logs += [('sk_dual/sk_lambda', np.exp(self.sess.run(self.main_ir.log_sk_lambda)))]
logs += [('sk_dual/cst_twoside_mean', np.mean(self.info_history['cst_twoside']))]
logs += [('sk_dual/cst_oneside_mean', np.mean(self.info_history['cst_oneside']))]
if self.dual_reg and self.dual_dist != 'l2':
logs += [('sk_dual/sk_cst_dist', np.mean(self.info_history['sk_cst_dist']))]
logs += [('sk_dual/sk_dist', np.mean(self.info_history['sk_dist']))]
if prefix is not '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
def __getstate__(self):
"""Our policies can be loaded from pkl, but after unpickling you cannot continue training.
"""
excluded_subnames = ['_tf', '_op', '_vars', '_adam', '_sgd', 'buffer', 'sess', '_stats',
'main', 'target', 'lock', 'sample_transitions',
'stage_shapes', 'create_actor_critic', 'create_discriminator', '_history']
state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}
state['buffer_size'] = self.buffer_size
state['tf'] = self.sess.run([x for x in self._global_vars('') if 'buffer' not in x.name])
return state
def __setstate__(self, state):
if 'sample_transitions' not in state:
# We don't need this for playing the policy.
state['sample_transitions'] = None
if 'env_name' not in state:
state['env_name'] = 'FetchPickAndPlace-v1'
if 'network_class_discriminator' not in state:
state['network_class_discriminator'] = 'baselines.her.discriminator:Discriminator'
if 'sk_r_scale' not in state:
state['sk_r_scale'] = 1
if 'sk_lr' not in state:
state['sk_lr'] = 0.001
if 'et_r_scale' not in state:
state['et_r_scale'] = 1
if 'finetune_pi' not in state:
state['finetune_pi'] = None
if 'load_weight' not in state:
state['load_weight'] = None
if 'pretrain_weights' not in state:
state['pretrain_weights'] = None
if 'sac' not in state:
state['sac'] = None
self.__init__(**state)
# set up stats (they are overwritten in __init__)
for k, v in state.items():
if k[-6:] == '_stats':
self.__dict__[k] = v
# load TF variables
vars = [x for x in self._global_vars('') if 'buffer' not in x.name]
assert(len(vars) == len(state["tf"]))
node = [tf.compat.v1.assign(var, val) for var, val in zip(vars, state["tf"])]
self.sess.run(node)
| 25,444 | 45.860037 | 180 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/discriminator.py | import tensorflow as tf
from baselines.her.normalizer import Normalizer
from baselines.her.util import store_args, nn, snn
import numpy as np
class Discriminator:
@store_args
def __init__(self, inputs_tf, dimo, dimz, dimg, dimu, max_u, o_stats, g_stats, hidden, layers, env_name, **kwargs):
"""The discriminator network and related training code.
Args:
inputs_tf (dict of tensors): all necessary inputs for the network: the
observation (o), the goal (g), and the action (u)
dimo (int): the dimension of the observations
dimg (int): the dimension of the goals
dimu (int): the dimension of the actions
max_u (float): the maximum magnitude of actions; action outputs will be scaled
accordingly
o_stats (baselines.her.Normalizer): normalizer for observations
g_stats (baselines.her.Normalizer): normalizer for goals
hidden (int): number of hidden units that should be used in hidden layers
layers (int): number of hidden layers
"""
self.o_tf = tf.compat.v1.placeholder(tf.float32, shape=(None, self.dimo))
self.o2_tf = tf.compat.v1.placeholder(tf.float32, shape=(None, self.dimo))
self.z_tf = tf.compat.v1.placeholder(tf.float32, shape=(None, self.dimz))
self.g_tf = tf.compat.v1.placeholder(tf.float32, shape=(None, self.dimg))
self.u_tf = tf.compat.v1.placeholder(tf.float32, shape=(None, self.dimu))
self.is_training = tf.compat.v1.placeholder(tf.bool, shape=())
o_tau_tf = self.o_tau_tf
o_tf = self.o_tf
o2_tf = self.o2_tf
obs_focus = o_tf
obs2_focus = o2_tf
if self.dual_reg:
with tf.compat.v1.variable_scope('skill_dual', reuse=tf.compat.v1.AUTO_REUSE):
sk_lambda_init = tf.constant_initializer(np.log(self.dual_init_lambda))
self.log_sk_lambda = tf.compat.v1.get_variable('lam/log_sk_lambda', (), tf.float32, initializer=sk_lambda_init)
with tf.compat.v1.variable_scope('skill_dist', reuse=tf.compat.v1.AUTO_REUSE):
if self.dual_dist == 'l2':
pass
elif self.dual_dist == 's2_from_s':
dims2 = obs2_focus.shape[1]
self.s2_mean_tf = snn(obs_focus, [int(self.hidden / 2)] * self.layers + [dims2], name='s2_mean', is_training=self.is_training)
self.s2_log_std_tf = snn(obs_focus, [int(self.hidden / 2)] * self.layers + [dims2], name='s2_log_std', is_training=self.is_training)
self.s2_clamped_log_std_tf = tf.clip_by_value(self.s2_log_std_tf, -13.8155, 1000)
self.s2_clamped_std_tf = tf.exp(self.s2_clamped_log_std_tf)
# Predict delta_s
self.sk_dist_tf = tf.math.reduce_sum(input_tensor=(tf.abs(self.s2_mean_tf - (obs2_focus - obs_focus)) / self.s2_clamped_std_tf) ** 2 + self.s2_clamped_std_tf, axis=1)
with tf.compat.v1.variable_scope('skill_ds', reuse=tf.compat.v1.AUTO_REUSE):
if self.skill_type == 'discrete':
eye_z = tf.tile(tf.expand_dims(tf.eye(self.dimz), 0), [tf.shape(input=obs_focus)[0], 1, 1])
self.mean_tf = snn(obs_focus, [int(self.hidden / 2)] * self.layers + [self.dimz], name='mean', sn=self.spectral_normalization, is_training=self.is_training)
self.mean2_tf = snn(obs2_focus, [int(self.hidden / 2)] * self.layers + [self.dimz], name='mean', sn=self.spectral_normalization, is_training=self.is_training)
self.mean_diff_tf = self.mean2_tf - self.mean_tf
mean_diff_tf = tf.expand_dims(self.mean_diff_tf, 1)
logits = tf.math.reduce_sum(input_tensor=eye_z * mean_diff_tf, axis=2)
masks = self.z_tf * self.dimz / (self.dimz - 1) - 1 / (self.dimz - 1)
self.sk_tf = -tf.reduce_sum(input_tensor=logits * masks, axis=1)
self.sk_r_tf = -1 * self.sk_tf
else:
self.mean_tf = snn(obs_focus, [int(self.hidden / 2)] * self.layers + [self.dimz], name='mean', sn=self.spectral_normalization, is_training=self.is_training)
self.mean2_tf = snn(obs2_focus, [int(self.hidden / 2)] * self.layers + [self.dimz], name='mean', sn=self.spectral_normalization, is_training=self.is_training)
self.mean_diff_tf = self.mean2_tf - self.mean_tf
self.sk_tf = -tf.math.reduce_sum(input_tensor=self.mean_diff_tf * self.z_tf, axis=1)
self.sk_r_tf = -1 * self.sk_tf
if self.dual_reg:
x = obs_focus
y = obs2_focus
phi_x = self.mean_tf
phi_y = self.mean2_tf
if self.dual_dist == 'l2':
self.cst_dist = tf.reduce_mean(tf.square(y - x), axis=1)
elif self.dual_dist == 's2_from_s':
self.scaling_factor = 1. / (self.s2_clamped_std_tf)
self.geo_mean = tf.exp(tf.reduce_mean(tf.math.log(self.scaling_factor), axis=1, keepdims=True))
self.normalized_scaling_factor = (self.scaling_factor / self.geo_mean) ** 2
self.cst_dist = tf.reduce_mean(tf.abs(self.s2_mean_tf - (obs2_focus - obs_focus)) ** 2 * self.normalized_scaling_factor, axis=1)
self.phi_dist = tf.reduce_mean(tf.square(phi_y - phi_x), axis=1)
self.cst_twoside = self.cst_dist - self.phi_dist
self.cst_oneside = tf.minimum(self.dual_slack, self.cst_twoside)
self.cst_penalty = -tf.stop_gradient(tf.exp(self.log_sk_lambda)) * self.cst_oneside
self.sk_lambda_tf = self.log_sk_lambda * tf.stop_gradient(self.cst_oneside)
self.sk_tf = self.sk_tf + self.cst_penalty
| 5,880 | 60.260417 | 186 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/her.py | import numpy as np
import random
from baselines.common.schedules import PiecewiseSchedule
from scipy.stats import rankdata
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun, et_w_schedule):
if (replay_strategy == 'future') or (replay_strategy == 'final'):
future_p = 1 - (1. / (1 + replay_k))
else:
future_p = 0
et_w_scheduler = PiecewiseSchedule(endpoints=et_w_schedule)
def _sample_her_transitions(ddpg, ir, episode_batch, batch_size_in_transitions, sk_r_scale, t):
"""episode_batch is {key: array(buffer_size x T x dim_key)}
"""
T = episode_batch['u'].shape[1]
rollout_batch_size = episode_batch['u'].shape[0]
batch_size = batch_size_in_transitions
# Select which episodes and time steps to use.
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
# t_samples = np.random.randint(T, size=batch_size)
t_samples = []
for i in range(batch_size):
max_t = episode_batch['myv'][episode_idxs[i]].sum().astype(int)
t_sample = np.random.randint(max_t)
t_samples.append(t_sample)
t_samples = np.array(t_samples)
# calculate intrinsic rewards
sk_trans = np.zeros([episode_idxs.shape[0], 1])
if ir:
o_curr = episode_batch['o'][episode_idxs, t_samples].copy()
o_curr = np.reshape(o_curr, (o_curr.shape[0], 1, o_curr.shape[-1]))
o_next = episode_batch['o'][episode_idxs, t_samples+1].copy()
o_next = np.reshape(o_next, (o_next.shape[0], 1, o_next.shape[-1]))
o = episode_batch['o'][episode_idxs, t_samples].copy()
o2 = episode_batch['o_2'][episode_idxs, t_samples].copy()
z = episode_batch['z'][episode_idxs, t_samples].copy()
u = episode_batch['u'][episode_idxs, t_samples].copy()
if sk_r_scale > 0:
sk_r = ddpg.run_sk(o, z, o2, u)
sk_trans = sk_r.copy()
# #
transitions = {}
for key in episode_batch.keys():
if not (key == 's' or key == 'p'):
transitions[key] = episode_batch[key][episode_idxs, t_samples].copy()
else:
transitions[key] = episode_batch[key][episode_idxs].copy()
transitions['s'] = transitions['s'].flatten().copy()
her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
if replay_strategy == 'final':
future_t[:] = T
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
info = {}
for key, value in transitions.items():
if key.startswith('info_'):
info[key.replace('info_', '')] = value
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
reward_params['info'] = info
# transitions['r'] = reward_fun(**reward_params)
transitions['r'] = transitions['myr']
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:])
for k in transitions.keys()}
if ir:
transitions['s'] = sk_trans.flatten().copy()
transitions['s_w'] = 1.0
transitions['r_w'] = 1.0
transitions['e_w'] = et_w_scheduler.value(t)
assert(transitions['u'].shape[0] == batch_size_in_transitions)
return transitions
return _sample_her_transitions
| 3,679 | 38.569892 | 99 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/normalizer.py | import threading
import numpy as np
from mpi4py import MPI
import tensorflow as tf
from baselines.her.util import reshape_for_broadcasting
class Normalizer:
def __init__(self, size, eps=1e-2, default_clip_range=np.inf, sess=None):
"""A normalizer that ensures that observations are approximately distributed according to
a standard Normal distribution (i.e. have mean zero and variance one).
Args:
size (int): the size of the observation to be normalized
eps (float): a small constant that avoids underflows
default_clip_range (float): normalized observations are clipped to be in
[-default_clip_range, default_clip_range]
sess (object): the TensorFlow session to be used
"""
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
self.sess = sess if sess is not None else tf.compat.v1.get_default_session()
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
self.sum_tf = tf.compat.v1.get_variable(
initializer=tf.compat.v1.zeros_initializer(), shape=self.local_sum.shape, name='sum',
trainable=False, dtype=tf.float32)
self.sumsq_tf = tf.compat.v1.get_variable(
initializer=tf.compat.v1.zeros_initializer(), shape=self.local_sumsq.shape, name='sumsq',
trainable=False, dtype=tf.float32)
self.count_tf = tf.compat.v1.get_variable(
initializer=tf.compat.v1.ones_initializer(), shape=self.local_count.shape, name='count',
trainable=False, dtype=tf.float32)
self.mean = tf.compat.v1.get_variable(
initializer=tf.compat.v1.zeros_initializer(), shape=(self.size,), name='mean',
trainable=False, dtype=tf.float32)
self.std = tf.compat.v1.get_variable(
initializer=tf.compat.v1.ones_initializer(), shape=(self.size,), name='std',
trainable=False, dtype=tf.float32)
self.count_pl = tf.compat.v1.placeholder(name='count_pl', shape=(1,), dtype=tf.float32)
self.sum_pl = tf.compat.v1.placeholder(name='sum_pl', shape=(self.size,), dtype=tf.float32)
self.sumsq_pl = tf.compat.v1.placeholder(name='sumsq_pl', shape=(self.size,), dtype=tf.float32)
self.update_op = tf.group(
self.count_tf.assign_add(self.count_pl),
self.sum_tf.assign_add(self.sum_pl),
self.sumsq_tf.assign_add(self.sumsq_pl)
)
self.recompute_op = tf.group(
tf.compat.v1.assign(self.mean, self.sum_tf / self.count_tf),
tf.compat.v1.assign(self.std, tf.sqrt(tf.maximum(
tf.square(self.eps),
self.sumsq_tf / self.count_tf - tf.square(self.sum_tf / self.count_tf)
))),
)
self.lock = threading.Lock()
def update(self, v):
v = v.reshape(-1, self.size)
with self.lock:
self.local_sum += v.sum(axis=0)
self.local_sumsq += (np.square(v)).sum(axis=0)
self.local_count[0] += v.shape[0]
def normalize(self, v, clip_range=None, zero_mean=True):
if clip_range is None:
clip_range = self.default_clip_range
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
if zero_mean:
return tf.clip_by_value((v - mean) / std, -clip_range, clip_range)
else:
return tf.clip_by_value(v / (mean + 1e-8), -clip_range, clip_range)
def denormalize(self, v):
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return mean + v * std
def _mpi_average(self, x):
buf = np.zeros_like(x)
MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
buf /= MPI.COMM_WORLD.Get_size()
return buf
def synchronize(self, local_sum, local_sumsq, local_count, root=None):
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
return local_sum, local_sumsq, local_count
def recompute_stats(self):
with self.lock:
# Copy over results.
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
# Reset.
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
# We perform the synchronization outside of the lock to keep the critical section as short
# as possible.
synced_sum, synced_sumsq, synced_count = self.synchronize(
local_sum=local_sum, local_sumsq=local_sumsq, local_count=local_count)
self.sess.run(self.update_op, feed_dict={
self.count_pl: synced_count,
self.sum_pl: synced_sum,
self.sumsq_pl: synced_sumsq,
})
self.sess.run(self.recompute_op)
class IdentityNormalizer:
def __init__(self, size, std=1.):
self.size = size
self.mean = tf.zeros(self.size, tf.float32)
self.std = std * tf.ones(self.size, tf.float32)
def update(self, x):
pass
def normalize(self, x, clip_range=None):
return x / self.std
def denormalize(self, x):
return self.std * x
def synchronize(self):
pass
def recompute_stats(self):
pass
| 5,600 | 37.895833 | 103 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/replay_buffer.py | import threading
import numpy as np
class ReplayBuffer:
def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions):
"""Creates a replay buffer.
Args:
buffer_shapes (dict of ints): the shape for all buffers that are used in the replay
buffer
size_in_transitions (int): the size of the buffer, measured in transitions
T (int): the time horizon for episodes
sample_transitions (function): a function that samples from the replay buffer
"""
self.buffer_shapes = buffer_shapes
self.size = size_in_transitions // T
self.T = T
self.sample_transitions = sample_transitions
self.buffers = {key: np.empty([self.size, *shape])
for key, shape in buffer_shapes.items()}
# add key for intrinsic rewards
self.buffers['s'] = np.empty([self.size, 1])
# memory management
self.current_size = 0
self.n_transitions_stored = 0
self.lock = threading.Lock()
@property
def full(self):
with self.lock:
return self.current_size == self.size
def sample(self, ddpg, ir, batch_size, sk_r_scale, t):
"""Returns a dict {key: array(batch_size x shapes[key])}
"""
buffers = {}
with self.lock:
assert self.current_size > 0
for key in self.buffers.keys():
buffers[key] = self.buffers[key][:self.current_size]
buffers['o_2'] = buffers['o'][:, 1:, :]
buffers['ag_2'] = buffers['ag'][:, 1:, :]
transitions = self.sample_transitions(ddpg, ir, buffers, batch_size, sk_r_scale, t)
for key in (['r', 'o_2', 'ag_2'] + list(self.buffers.keys())):
if not (key == 's' or key == 'p'):
assert key in transitions, "key %s missing from transitions" % key
return transitions
def store_episode(self, episode_batch, ddpg):
"""episode_batch: array(batch_size x (T or T+1) x dim_key)
"""
batch_sizes = [len(episode_batch[key]) for key in episode_batch.keys()]
assert np.all(np.array(batch_sizes) == batch_sizes[0])
batch_size = batch_sizes[0]
with self.lock:
idxs = self._get_storage_idx(batch_size)
# load inputs into buffers
for key in self.buffers.keys():
self.buffers[key][idxs] = episode_batch[key]
self.n_transitions_stored += batch_size * self.T
def get_current_episode_size(self):
with self.lock:
return self.current_size
def get_current_size(self):
with self.lock:
return self.current_size * self.T
def get_transitions_stored(self):
with self.lock:
return self.n_transitions_stored
def clear_buffer(self):
with self.lock:
self.current_size = 0
def _get_storage_idx(self, inc=None):
inc = inc or 1 # size increment
assert inc <= self.size, "Batch committed to replay is too large!"
# go consecutively until you hit the end, and then go randomly.
if self.current_size+inc <= self.size:
idx = np.arange(self.current_size, self.current_size+inc)
elif self.current_size < self.size:
overflow = inc - (self.size - self.current_size)
idx_a = np.arange(self.current_size, self.size)
idx_b = np.random.randint(0, self.current_size, overflow)
idx = np.concatenate([idx_a, idx_b])
else:
idx = np.random.randint(0, self.size, inc)
# update replay size
self.current_size = min(self.size, self.current_size+inc)
if inc == 1:
idx = idx[0]
return idx
| 3,789 | 33.770642 | 95 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/rollout.py | from collections import deque
import numpy as np
import pickle
from mujoco_py import MujocoException
from baselines.her.util import convert_episode_to_batch_major, store_args
class RolloutWorker:
@store_args
def __init__(self, make_env, policy, dims, logger, T, rollout_batch_size=1,
exploit=False, use_target_net=False, compute_Q=False, noise_eps=0,
random_eps=0, history_len=100, render=False, **kwargs):
"""Rollout worker generates experience by interacting with one or many environments.
Args:
make_env (function): a factory function that creates a new instance of the environment
when called
policy (object): the policy that is used to act
dims (dict of ints): the dimensions for observations (o), goals (g), and actions (u)
logger (object): the logger that is used by the rollout worker
rollout_batch_size (int): the number of parallel rollouts that should be used
exploit (boolean): whether or not to exploit, i.e. to act optimally according to the
current policy without any exploration
use_target_net (boolean): whether or not to use the target net for rollouts
compute_Q (boolean): whether or not to compute the Q values alongside the actions
noise_eps (float): scale of the additive Gaussian noise
random_eps (float): probability of selecting a completely random action
history_len (int): length of history for statistics smoothing
render (boolean): whether or not to render the rollouts
"""
self.envs = [make_env() for _ in range(rollout_batch_size)]
assert self.T > 0
self.info_keys = [key.replace('info_', '') for key in dims.keys() if key.startswith('info_')]
self.success_history = deque(maxlen=history_len)
self.Q_history = deque(maxlen=history_len)
self.episode_length_history = deque(maxlen=history_len)
self.once_success_history = deque(maxlen=history_len)
self.return_history = deque(maxlen=history_len)
self.n_episodes = 0
self.g = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # goals
self.initial_o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
self.initial_ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
self.reset_all_rollouts()
self.clear_history()
def reset_rollout(self, i, generated_goal):
"""Resets the `i`-th rollout environment, re-samples a new goal, and updates the `initial_o`
and `g` arrays accordingly.
"""
obs = self.envs[i].reset()
if isinstance(obs, dict):
self.g[i] = obs['desired_goal']
if isinstance(generated_goal, np.ndarray):
self.g[i] = self.envs[i].env.goal = generated_goal[i].copy()
self.initial_o[i] = obs['observation']
self.initial_ag[i] = obs['achieved_goal']
else:
self.g[i] = np.zeros_like(self.g[i])
self.initial_o[i] = obs
self.initial_ag[i] = np.zeros_like(self.initial_ag[i])
def reset_all_rollouts(self, generated_goal=False):
"""Resets all `rollout_batch_size` rollout workers.
"""
for i in range(self.rollout_batch_size):
self.reset_rollout(i, generated_goal)
def generate_rollouts(self, generated_goal=False, z_s_onehot=False, random_action=False):
"""Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current
policy acting on it accordingly.
"""
self.reset_all_rollouts(generated_goal)
# compute observations
o = np.empty((self.rollout_batch_size, self.dims['o']), np.float32) # observations
ag = np.empty((self.rollout_batch_size, self.dims['g']), np.float32) # achieved goals
o[:] = self.initial_o
ag[:] = self.initial_ag
z = z_s_onehot.copy() # selected skills
# generate episodes
obs, zs, achieved_goals, acts, goals, successes = [], [], [], [], [], []
rewards, dones, valids = [], [], []
HW = 200
if self.render == 'rgb_array':
imgs = np.empty([self.rollout_batch_size, self.T, HW, HW, 3])
elif self.render == 'human':
imgs = np.empty([self.rollout_batch_size, self.T, 992, 1648, 3])
info_values = [np.empty((self.T, self.rollout_batch_size, self.dims['info_' + key]), np.float32) for key in self.info_keys]
Qs = []
cur_valid = np.ones(self.rollout_batch_size)
lengths = np.full(self.rollout_batch_size, -1)
once_successes = np.full(self.rollout_batch_size, 0)
returns = np.zeros(self.rollout_batch_size)
for t in range(self.T):
policy_output = self.policy.get_actions(
o, z, ag, self.g,
compute_Q=self.compute_Q,
noise_eps=self.noise_eps if not self.exploit else 0.,
random_eps=(self.random_eps if not self.exploit else 0.) if not random_action else 1.,
use_target_net=self.use_target_net,
exploit=self.exploit,
)
if self.compute_Q:
u, Q = policy_output
Qs.append(Q)
else:
u = policy_output
if u.ndim == 1:
# The non-batched case should still have a reasonable shape.
u = u.reshape(1, -1)
o_new = np.empty((self.rollout_batch_size, self.dims['o']))
ag_new = np.empty((self.rollout_batch_size, self.dims['g']))
success = np.zeros(self.rollout_batch_size)
cur_reward = np.zeros(self.rollout_batch_size)
cur_done = np.zeros(self.rollout_batch_size)
# compute new states and observations
for i in range(self.rollout_batch_size):
try:
curr_o_new, reward, done, info = self.envs[i].step(u[i])
if 'is_success' in info:
success[i] = info['is_success']
cur_reward[i] = reward
cur_done[i] = done
if (done or t == self.T - 1) and lengths[i] == -1:
if 'cur_step' in info:
lengths[i] = info['cur_step']
else:
lengths[i] = t + 1
if success[i] > 0:
once_successes[i] = 1
if cur_valid[i]:
returns[i] += reward
if isinstance(curr_o_new, dict):
o_new[i] = curr_o_new['observation']
ag_new[i] = curr_o_new['achieved_goal']
for idx, key in enumerate(self.info_keys):
info_values[idx][t, i] = info[key]
else:
o_new[i] = curr_o_new
ag_new[i] = np.zeros_like(ag_new[i])
if self.render:
if self.render == 'rgb_array':
imgs[i][t] = self.envs[i].render(mode='rgb_array', width=HW, height=HW)
elif self.render == 'human':
imgs[i][t] = self.envs[i].render()
except MujocoException as e:
return self.generate_rollouts()
if np.isnan(o_new).any():
self.logger.warning('NaN caught during rollout generation. Trying again...')
self.reset_all_rollouts()
return self.generate_rollouts()
obs.append(o.copy())
rewards.append(cur_reward.copy())
dones.append(cur_done.copy())
valids.append(cur_valid.copy())
zs.append(z.copy())
achieved_goals.append(ag.copy())
successes.append(success.copy())
acts.append(u.copy())
goals.append(self.g.copy())
o[...] = o_new
ag[...] = ag_new
for i in range(len(cur_valid)):
if cur_done[i]:
cur_valid[i] = 0
# success: success at the last step
# once_success: once success
obs.append(o.copy())
achieved_goals.append(ag.copy())
self.initial_o[:] = o
successful = np.array(successes)[-1, :].copy()
episode = dict(
o=obs,
z=zs,
u=acts,
g=goals,
ag=achieved_goals,
myr=rewards,
myd=dones,
myv=valids,
)
for key, value in zip(self.info_keys, info_values):
episode['info_{}'.format(key)] = value
# stats
assert successful.shape == (self.rollout_batch_size,)
success_rate = np.mean(successful)
self.success_history.append(success_rate)
self.once_success_history.append(np.mean(once_successes))
self.return_history.append(np.mean(returns))
self.episode_length_history.append(np.mean(lengths))
if self.compute_Q:
self.Q_history.append(np.mean(Qs))
self.n_episodes += self.rollout_batch_size
if self.render == 'rgb_array' or self.render == 'human':
return imgs, convert_episode_to_batch_major(episode)
return convert_episode_to_batch_major(episode)
def clear_history(self):
"""Clears all histories that are used for statistics
"""
self.success_history.clear()
self.Q_history.clear()
self.episode_length_history.clear()
self.once_success_history.clear()
self.return_history.clear()
def current_success_rate(self):
return np.mean(self.success_history)
def current_mean_Q(self):
return np.mean(self.Q_history)
def save_policy(self, path):
"""Pickles the current policy for later inspection.
"""
with open(path, 'wb') as f:
pickle.dump(self.policy, f)
def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('num_trajs', len(self.success_history) * self.rollout_batch_size)]
logs += [('success_rate', np.mean(self.success_history))]
logs += [('once_success_rate', np.mean(self.once_success_history))]
logs += [('return', np.mean(self.return_history))]
logs += [('episode_length', np.mean(self.episode_length_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix is not '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
def seed(self, seed):
"""Seeds each environment with a distinct seed derived from the passed in global seed.
"""
for idx, env in enumerate(self.envs):
env.seed(seed + 1000 * idx)
| 11,198 | 41.744275 | 131 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/util.py | import os
import subprocess
import sys
import importlib
import inspect
import functools
import tensorflow as tf
import numpy as np
from baselines.common import tf_util as U
import platform
import json
import math
def store_args(method):
"""Stores provided method args as instance attributes.
"""
argspec = inspect.getfullargspec(method)
defaults = {}
if argspec.defaults is not None:
defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
if argspec.kwonlydefaults is not None:
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
# Get default arg values
args = defaults.copy()
# Add provided arg values
for name, value in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
def import_function(spec):
"""Import a function identified by a string like "pkg.module:fn_name".
"""
mod_name, fn_name = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn
def flatten_grads(var_list, grads):
"""Flattens a variables and their gradients.
"""
return tf.concat([tf.reshape(grad, [U.numel(v)])
for (v, grad) in zip(var_list, grads)], 0)
def l2_norm(v, eps=1e-12):
return v / (tf.reduce_sum(input_tensor=v ** 2) ** 0.5 + eps)
def spectral_norm(w, iteration=1, name='', is_training=None):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.compat.v1.get_variable(f"{name}_u", [1, w_shape[-1]], initializer=tf.compat.v1.truncated_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
v_ = tf.matmul(u_hat, tf.transpose(a=w))
v_hat = l2_norm(v_)
u_ = tf.matmul(v_hat, w)
u_hat = l2_norm(u_)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(a=u_hat))
w_norm = w / sigma
# w_norm = tf.Print(w_norm, [u, u_hat, is_training], 'u_before')
def assign_f():
with tf.control_dependencies([u.assign(u_hat)]):
return tf.reshape(w_norm, w_shape)
def noassign_f():
return tf.reshape(w_norm, w_shape)
w_norm = tf.cond(pred=is_training, true_fn=assign_f, false_fn=noassign_f)
# w_norm = tf.Print(w_norm, [u, u_hat, is_training], 'u_after')
return w_norm
def fully_conneted(x, units, use_bias=True, sn=False, name='fully_0', is_training=None):
x = tf.compat.v1.layers.flatten(x)
shape = x.get_shape().as_list()
channels = shape[-1]
if sn:
w = tf.compat.v1.get_variable(f"{name}_kernel", [channels, units], tf.float32, initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"))
if use_bias:
bias = tf.compat.v1.get_variable(f"{name}_bias", [units], initializer=tf.compat.v1.constant_initializer(0.0))
x = tf.matmul(x, spectral_norm(w, name=name, is_training=is_training)) + bias
else:
x = tf.matmul(x, spectral_norm(w, name=name, is_training=is_training))
else:
x = tf.compat.v1.layers.dense(x, units=units, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"), use_bias=use_bias, name=name)
return x
def nn(input, layers_sizes, reuse=None, flatten=False, name=""):
"""Creates a simple neural network
"""
for i, size in enumerate(layers_sizes):
activation = tf.nn.relu if i < len(layers_sizes)-1 else None
input = tf.compat.v1.layers.dense(inputs=input,
units=size,
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
reuse=reuse,
name=name+'_'+str(i))
if activation:
input = activation(input)
if flatten:
assert layers_sizes[-1] == 1
input = tf.reshape(input, [-1])
return input
def snn(input, layers_sizes, flatten=False, name="", sn=False, is_training=None):
for i, size in enumerate(layers_sizes):
activation = tf.nn.relu if i < len(layers_sizes)-1 else None
input = fully_conneted(input, units=size, name=f'{name}_fully_{i}', sn=sn, is_training=is_training)
if activation:
input = activation(input)
if flatten:
assert layers_sizes[-1] == 1
input = tf.reshape(input, [-1])
return input
def install_mpi_excepthook():
import sys
from mpi4py import MPI
old_hook = sys.excepthook
def new_hook(a, b, c):
old_hook(a, b, c)
sys.stdout.flush()
sys.stderr.flush()
MPI.COMM_WORLD.Abort()
sys.excepthook = new_hook
def mpi_fork(n, binding="core"):
"""Re-launches the current script with workers
Returns "parent" for original parent, "child" for MPI children
"""
if n <= 1:
return "child"
if os.getenv("IN_MPI") is None:
env = os.environ.copy()
env.update(
MKL_NUM_THREADS="1",
OMP_NUM_THREADS="1",
IN_MPI="1"
)
# "-bind-to core" is crucial for good performance
if platform.system() == 'Darwin':
args = [
"mpirun",
"-np",
str(n),
# "-allow-run-as-root",
sys.executable
]
else:
args = [
"mpirun",
"--oversubscribe",
"-np",
str(n),
"-bind-to",
binding, # core or none
"-allow-run-as-root",
sys.executable
]
args += sys.argv
subprocess.check_call(args, env=env)
return "parent"
else:
install_mpi_excepthook()
return "child"
def convert_episode_to_batch_major(episode):
"""Converts an episode to have the batch dimension in the major (first)
dimension.
"""
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
# make inputs batch-major instead of time-major
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch
def transitions_in_episode_batch(episode_batch):
"""Number of transitions in a given episode batch.
"""
shape = episode_batch['u'].shape
return shape[0] * shape[1]
def reshape_for_broadcasting(source, target):
"""Reshapes a tensor (source) to have the correct shape and dtype of the target
before broadcasting it with MPI.
"""
dim = len(target.get_shape())
shape = ([1] * (dim-1)) + [-1]
return tf.reshape(tf.cast(source, target.dtype), shape)
def make_dir(filename):
folder = os.path.dirname(filename)
if not os.path.exists(folder):
os.makedirs(folder)
def save_video(ims, filename, lib='cv2'):
make_dir(filename)
fps = 30.0
(height, width, _) = ims[0].shape
if lib == 'cv2':
import cv2
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'MJPG') # MJPG, XVID
writer = cv2.VideoWriter(filename, fourcc, fps, (width, height))
elif lib == 'imageio':
import imageio
writer = imageio.get_writer(filename, fps=fps)
for i in range(ims.shape[0]):
if lib == 'cv2':
# Fix color error by converting RGB to BGR
writer.write(cv2.cvtColor(np.uint8(ims[i]), cv2.COLOR_RGB2BGR))
elif lib == 'imageio':
writer.append_data(ims[i])
if lib == 'cv2':
writer.release()
elif lib == 'imageio':
writer.close()
def dumpJson(dirname, episodes, epoch, rank):
os = []
for episode in episodes:
episode['o'] = episode['o'].tolist()
os.append(episode['o'])
with open(dirname+'/rollout_{0}_{1}.txt'.format(epoch, rank), 'w') as file:
file.write(json.dumps(os))
def loadJson(dirname, epoch, rank):
filename = '/rollout_{0}_{1}.txt'.format(epoch, rank)
with open(dirname+filename, 'r') as file:
os = json.loads(file.read())
return os
def save_weight(sess, collection=tf.compat.v1.GraphKeys.GLOBAL_VARIABLES):
return {v.name: sess.run(v) for v in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='ddpg' + '/' + '')}
def load_weight(sess, data, include=[]):
# include: ['stats','main','target','state_mi','skill_ds']
for scope in include:
for v in tf.compat.v1.global_variables():
if (v.name in data.keys()) and (scope in v.name):
if v.shape == data[v.name].shape:
sess.run(v.assign(data[v.name]))
print('load weight: ', v.name)
| 9,069 | 31.743682 | 202 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/experiment/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/baselines/her/experiment/config.py | from copy import deepcopy
import numpy as np
import json
import os
import gym
from baselines import logger
from baselines.her.ddpg import DDPG
from baselines.her.her import make_sample_her_transitions
DEFAULT_ENV_PARAMS = {
'FetchReach-v0': {
'n_cycles': 10,
},
}
DEFAULT_PARAMS = {
# env
'max_u': 1., # max absolute value of actions on different coordinates
# ddpg
'layers': 3, # number of layers in the critic/actor networks
'hidden': 256, # number of neurons in each hidden layers
'network_class_actor_critic': 'baselines.her.actor_critic:ActorCritic',
'network_class_discriminator': 'baselines.her.discriminator:Discriminator',
'Q_lr': 0.001, # critic learning rate
'pi_lr': 0.001, # actor learning rate
'sk_lr': 0.001, # skill discriminator learning rate
'buffer_size': int(1E6),
'polyak': 0.95, # polyak averaging coefficient
'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)
'clip_obs': 200.,
'scope': 'ddpg', # can be tweaked for testing
'relative_goals': False,
# training
'n_cycles': 50, # per epoch
'rollout_batch_size': 2, # per mpi thread
'n_batches': 40, # training batches per cycle
'batch_size': 256, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
'test_with_polyak': False, # run test episodes with the target network
# exploration
'random_eps': 0.3, # percentage of time a random action is taken
'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u
# normalization
'norm_eps': 0.01, # epsilon used for observation normalization
'norm_clip': 5, # normalized observations are cropped to this values
# HER
'replay_strategy': 'future',
'collect_data': False, # True: collect rollouts data
'collect_video': False, # True: collect rollouts video
'num_objective': 4, # number of rl objectives
###############################################################################
'note': 'DIAYN',
'sac': True, # Soft-Actor-Critic
'replay_k': 0,
'r_scale': 0,
'sk_r_scale': 0,
'et_r_scale': 0.02,
'goal_generation': 'Zero', # 'Env', 'Zero'
'num_skills': 1, # number of skills to learn
'use_skill_n': None, # starts from 1 to num_skills or None
'et_w_schedule': [(0,0.2),(24,0.2),(200,0.2)], # Entropy regularization coefficient (SAC)
'finetune_pi': False, # pi: policy
'load_weight': None
}
CACHED_ENVS = {}
def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
def prepare_params(kwargs):
# DDPG params
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
# Only for getting max_episode_steps
if env_name == 'Maze':
from envs.maze_env import MazeEnv
env = MazeEnv(n=10)
elif env_name == 'Kitchen':
from d4rl_alt.kitchen.kitchen_envs import KitchenMicrowaveKettleLightTopLeftBurnerV0Custom
from gym.wrappers.time_limit import TimeLimit
env = KitchenMicrowaveKettleLightTopLeftBurnerV0Custom(control_mode='end_effector')
env = TimeLimit(env, max_episode_steps=kwargs['max_path_length'])
else:
env = gym.make(env_name)
if 'max_path_length' in kwargs:
env = env.env
from gym.wrappers.time_limit import TimeLimit
env = TimeLimit(env, max_episode_steps=kwargs['max_path_length'])
return env
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = np.array(kwargs['max_u']) if type(kwargs['max_u']) == list else kwargs['max_u']
kwargs['gamma'] = 1. - 1. / kwargs['T']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
kwargs['sk_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers',
'network_class_actor_critic', 'network_class_discriminator',
'polyak',
'batch_size', 'Q_lr', 'pi_lr', 'sk_lr',
'norm_eps', 'norm_clip', 'max_u',
'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
def reward_fun(ag_2, g, info): # vectorized
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
her_params = {
'reward_fun': reward_fun,
}
for name in ['replay_strategy', 'replay_k', 'et_w_schedule']:
her_params[name] = params[name]
params['_' + name] = her_params[name]
del params[name]
sample_her_transitions = make_sample_her_transitions(**her_params)
return sample_her_transitions
def simple_goal_subtract(a, b):
assert a.shape == b.shape
return a - b
def configure_ddpg(dims, params, pretrain_weights, reuse=False, use_mpi=True, clip_return=True):
sample_her_transitions = configure_her(params)
# Extract relevant parameters.
gamma = params['gamma']
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
env_name = params['env_name']
max_timesteps = params['max_timesteps']
num_objective = params['num_objective']
input_dims = dims.copy()
# DDPG agent
env = cached_make_env(params['make_env'])
env.reset()
ddpg_params.update({'input_dims': input_dims, # agent takes an input observations
'T': params['T'],
'clip_pos_returns': True, # clip positive returns
'clip_return': (1. / (1. - gamma))*num_objective if clip_return else np.inf, # max abs of return
'rollout_batch_size': rollout_batch_size,
'subtract_goals': simple_goal_subtract,
'sample_transitions': sample_her_transitions,
'gamma': gamma,
'env_name': env_name,
'max_timesteps': max_timesteps,
'r_scale': params['r_scale'],
'sk_r_scale': params['sk_r_scale'],
'et_r_scale': params['et_r_scale'],
'pretrain_weights': pretrain_weights,
'finetune_pi': params['finetune_pi'],
'sac': params['sac'],
'skill_type': params['skill_type'],
'sk_clip': params['sk_clip'],
'et_clip': params['et_clip'],
'done_ground': params['done_ground'],
'spectral_normalization': params['spectral_normalization'],
'dual_reg': params['dual_reg'],
'dual_init_lambda': params['dual_init_lambda'],
'dual_lam_opt': params['dual_lam_opt'],
'dual_slack': params['dual_slack'],
'dual_dist': params['dual_dist'],
'inner': params['inner'],
'algo': params['algo'],
'random_eps': params['random_eps'],
'noise_eps': params['noise_eps'],
'sk_lam_lr': params['sk_lam_lr'],
'algo_name': params['algo_name'],
'train_start_epoch': params['train_start_epoch'],
})
ddpg_params['info'] = {
'env_name': params['env_name'],
}
policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)
return policy
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
obs, _, _, info = env.step(env.action_space.sample())
if isinstance(obs, dict):
dims = {
'o': obs['observation'].shape[0],
'z': params['num_skills'],
'u': env.action_space.shape[0],
'g': obs['desired_goal'].shape[0],
}
for key, value in info.items():
if 'TimeLimit' in key:
continue
value = np.array(value)
if value.ndim == 0:
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
else:
dims = {
'o': obs.shape[0],
'z': params['num_skills'],
'u': env.action_space.shape[0],
'g': 3,
}
return dims
| 9,452 | 36.511905 | 121 | py |
CSD-manipulation | CSD-manipulation-master/baselines/her/experiment/play.py | import click
import numpy as np
import pickle
from baselines import logger
from baselines.common import set_global_seeds
import baselines.her.experiment.config as config
from baselines.her.rollout import RolloutWorker
from baselines.her.util import save_video
import os
import json
@click.command()
@click.argument('policy_file', type=str)
@click.option('--seed', type=int, default=0)
@click.option('--n_test_rollouts', type=int, default=20)
@click.option('--render', type=click.Choice(['human', 'rgb_array']), default='rgb_array')
@click.option('--exploit', type=bool, default=True)
@click.option('--compute_q', type=bool, default=True)
@click.option('--collect_data', type=bool, default=True)
@click.option('--goal_generation', type=str, default='Zero')
@click.option('--note', type=str, default=None, help='unique notes')
def main(policy_file, seed, n_test_rollouts, render, exploit, compute_q, collect_data, goal_generation, note):
set_global_seeds(seed)
# Load policy.
with open(policy_file, 'rb') as f:
policy = pickle.load(f)
env_name = policy.info['env_name']
# Prepare params.
params = config.DEFAULT_PARAMS
params['note'] = note or params['note']
if note:
with open('params/'+env_name+'/'+note+'.json', 'r') as file:
override_params = json.loads(file.read())
params.update(**override_params)
if env_name in config.DEFAULT_ENV_PARAMS:
params.update(config.DEFAULT_ENV_PARAMS[env_name]) # merge env-specific parameters in
params['env_name'] = env_name
goal_generation = params['goal_generation']
params = config.prepare_params(params)
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
eval_params = {
'exploit': exploit, # eval: True, train: False
'use_target_net': params['test_with_polyak'], # eval/train: False
'compute_Q': compute_q, # eval: True, train: False
'rollout_batch_size': 1,
'render': render,
}
for name in ['T', 'gamma', 'noise_eps', 'random_eps']:
eval_params[name] = params[name]
evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)
evaluator.seed(seed)
# Run evaluation.
evaluator.clear_history()
num_skills = params['num_skills']
if goal_generation == 'Zero':
generated_goal = np.zeros(evaluator.g.shape)
else:
generated_goal = False
for z in range(num_skills):
assert(evaluator.rollout_batch_size==1)
z_s_onehot = np.zeros([evaluator.rollout_batch_size, num_skills])
z_s_onehot[0, z] = 1
base = os.path.splitext(policy_file)[0]
for i_test_rollouts in range(n_test_rollouts):
if render == 'rgb_array' or render == 'human':
imgs, episode = evaluator.generate_rollouts(generated_goal=generated_goal, z_s_onehot=z_s_onehot)
end = '_test_{:02d}_exploit_{}_compute_q_{}_skill_{}.avi'.format(i_test_rollouts, exploit, compute_q, z)
test_filename = base + end
save_video(imgs[0], test_filename, lib='cv2')
else:
episode = evaluator.generate_rollouts(generated_goal=generated_goal, z_s_onehot=z_s_onehot)
if collect_data:
end = '_test_{:02d}_exploit_{}_compute_q_{}_skill_{}.txt'.format(i_test_rollouts, exploit, compute_q, z)
test_filename = base + end
with open(test_filename, 'w') as file:
file.write(json.dumps(episode['o'].tolist()))
# record logs
for key, val in evaluator.logs('test'):
logger.record_tabular(key, np.mean(val))
logger.dump_tabular()
if __name__ == '__main__':
main()
| 3,754 | 35.456311 | 120 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/__init__.py | import collections
import os
import sys
import numpy as np
import d4rl_alt.locomotion
#import d4rl_alt.hand_manipulation_suite
import d4rl_alt.pointmaze
import d4rl_alt.gym_minigrid
import d4rl_alt.gym_mujoco
from d4rl_alt.offline_env import get_keys, set_dataset_path
SUPPRESS_MESSAGES = bool(os.environ.get("D4RL_SUPPRESS_IMPORT_ERROR", 0))
_ERROR_MESSAGE = "Warning: %s failed to import. Set the environment variable D4RL_SUPPRESS_IMPORT_ERROR=1 to suppress this message."
try:
import d4rl_alt.flow
except ImportError as e:
if not SUPPRESS_MESSAGES:
print(_ERROR_MESSAGE % "Flow", file=sys.stderr)
print(e, file=sys.stderr)
try:
import d4rl_alt.kitchen
except ImportError as e:
if not SUPPRESS_MESSAGES:
print(_ERROR_MESSAGE % "FrankaKitchen", file=sys.stderr)
print(e, file=sys.stderr)
try:
import d4rl_alt.carla
except ImportError as e:
if not SUPPRESS_MESSAGES:
print(_ERROR_MESSAGE % "CARLA", file=sys.stderr)
print(e, file=sys.stderr)
def qlearning_dataset(env, dataset=None, terminate_on_end=False, **kwargs):
"""
Returns datasets formatted for use by standard Q-learning algorithms,
with observations, actions, next_observations, rewards, and a terminal
flag.
Args:
env: An OfflineEnv object.
dataset: An optional dataset to pass in for processing. If None,
the dataset will default to env.get_dataset()
terminate_on_end (bool): Set done=True on the last timestep
in a trajectory. Default is False, and will discard the
last timestep in each trajectory.
**kwargs: Arguments to pass to env.get_dataset().
Returns:
A dictionary containing keys:
observations: An N x dim_obs array of observations.
actions: An N x dim_action array of actions.
next_observations: An N x dim_obs array of next observations.
rewards: An N-dim float array of rewards.
terminals: An N-dim boolean array of "done" or episode termination flags.
"""
if dataset is None:
dataset = env.get_dataset(**kwargs)
N = dataset["rewards"].shape[0]
obs_ = []
next_obs_ = []
action_ = []
reward_ = []
done_ = []
# The newer version of the dataset adds an explicit
# timeouts field. Keep old method for backwards compatability.
use_timeouts = False
if "timeouts" in dataset:
use_timeouts = True
episode_step = 0
for i in range(N - 1):
obs = dataset["observations"][i]
new_obs = dataset["observations"][i + 1]
action = dataset["actions"][i]
reward = dataset["rewards"][i]
done_bool = bool(dataset["terminals"][i])
if use_timeouts:
final_timestep = dataset["timeouts"][i]
else:
final_timestep = episode_step == env._max_episode_steps - 1
if (not terminate_on_end) and final_timestep:
# Skip this transition and don't apply terminals on the last step of an episode
episode_step = 0
continue
if done_bool or final_timestep:
episode_step = 0
obs_.append(obs)
next_obs_.append(new_obs)
action_.append(action)
reward_.append(reward)
done_.append(done_bool)
episode_step += 1
return {
"observations": np.array(obs_),
"actions": np.array(action_),
"next_observations": np.array(next_obs_),
"rewards": np.array(reward_),
"terminals": np.array(done_),
}
def sequence_dataset(env, dataset=None, **kwargs):
"""
Returns an iterator through trajectories.
Args:
env: An OfflineEnv object.
dataset: An optional dataset to pass in for processing. If None,
the dataset will default to env.get_dataset()
**kwargs: Arguments to pass to env.get_dataset().
Returns:
An iterator through dictionaries with keys:
observations
actions
rewards
terminals
"""
if dataset is None:
dataset = env.get_dataset(**kwargs)
N = dataset["rewards"].shape[0]
data_ = collections.defaultdict(list)
# The newer version of the dataset adds an explicit
# timeouts field. Keep old method for backwards compatability.
use_timeouts = False
if "timeouts" in dataset:
use_timeouts = True
episode_step = 0
for i in range(N):
done_bool = bool(dataset["terminals"][i])
if use_timeouts:
final_timestep = dataset["timeouts"][i]
else:
final_timestep = episode_step == env._max_episode_steps - 1
if done_bool or final_timestep:
episode_step = 0
episode_data = {}
for k in data_:
episode_data[k] = np.array(data_[k])
yield episode_data
data_ = collections.defaultdict(list)
for k in dataset:
data_[k].append(dataset[k][i])
episode_step += 1
| 5,046 | 30.154321 | 132 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/infos.py | """
This file holds all URLs and reference scores.
"""
# TODO(Justin): This is duplicated. Make all __init__ file URLs and scores point to this file.
DATASET_URLS = {
"maze2d-open-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-open-sparse.hdf5",
"maze2d-umaze-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-umaze-sparse-v1.hdf5",
"maze2d-medium-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-medium-sparse-v1.hdf5",
"maze2d-large-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-large-sparse-v1.hdf5",
"maze2d-eval-umaze-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-umaze-sparse-v1.hdf5",
"maze2d-eval-medium-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-medium-sparse-v1.hdf5",
"maze2d-eval-large-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-large-sparse-v1.hdf5",
"maze2d-open-dense-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-open-dense.hdf5",
"maze2d-umaze-dense-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-umaze-dense-v1.hdf5",
"maze2d-medium-dense-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-medium-dense-v1.hdf5",
"maze2d-large-dense-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-large-dense-v1.hdf5",
"maze2d-eval-umaze-dense-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-umaze-dense-v1.hdf5",
"maze2d-eval-medium-dense-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-medium-dense-v1.hdf5",
"maze2d-eval-large-dense-v1": "http://rail.eecs.berkeley.edu/datasets/offline_rl/maze2d/maze2d-eval-large-dense-v1.hdf5",
"minigrid-fourrooms-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/minigrid/minigrid4rooms.hdf5",
"minigrid-fourrooms-random-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/minigrid/minigrid4rooms_random.hdf5",
"pen-human-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/pen-v0_demos_clipped.hdf5",
"pen-cloned-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/pen-demos-v0-bc-combined.hdf5",
"pen-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/pen-v0_expert_clipped.hdf5",
"hammer-human-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/hammer-v0_demos_clipped.hdf5",
"hammer-cloned-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/hammer-demos-v0-bc-combined.hdf5",
"hammer-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/hammer-v0_expert_clipped.hdf5",
"relocate-human-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/relocate-v0_demos_clipped.hdf5",
"relocate-cloned-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/relocate-demos-v0-bc-combined.hdf5",
"relocate-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/relocate-v0_expert_clipped.hdf5",
"door-human-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/door-v0_demos_clipped.hdf5",
"door-cloned-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/door-demos-v0-bc-combined.hdf5",
"door-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/door-v0_expert_clipped.hdf5",
"halfcheetah-random-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_random.hdf5",
"halfcheetah-medium-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium.hdf5",
"halfcheetah-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_expert.hdf5",
"halfcheetah-medium-replay-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_mixed.hdf5",
"halfcheetah-medium-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium_expert.hdf5",
"walker2d-random-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_random.hdf5",
"walker2d-medium-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium.hdf5",
"walker2d-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_expert.hdf5",
"walker2d-medium-replay-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker_mixed.hdf5",
"walker2d-medium-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium_expert.hdf5",
"hopper-random-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_random.hdf5",
"hopper-medium-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium.hdf5",
"hopper-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_expert.hdf5",
"hopper-medium-replay-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_mixed.hdf5",
"hopper-medium-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium_expert.hdf5",
"ant-random-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random.hdf5",
"ant-medium-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium.hdf5",
"ant-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_expert.hdf5",
"ant-medium-replay-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_mixed.hdf5",
"ant-medium-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium_expert.hdf5",
"ant-random-expert-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random_expert.hdf5",
"antmaze-umaze-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_False_multigoal_False_sparse.hdf5",
"antmaze-umaze-diverse-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_u-maze_noisy_multistart_True_multigoal_True_sparse.hdf5",
"antmaze-medium-play-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_big-maze_noisy_multistart_True_multigoal_False_sparse.hdf5",
"antmaze-medium-diverse-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_big-maze_noisy_multistart_True_multigoal_True_sparse.hdf5",
"antmaze-large-play-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_hardest-maze_noisy_multistart_True_multigoal_False_sparse.hdf5",
"antmaze-large-diverse-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/ant_maze_new/Ant_maze_hardest-maze_noisy_multistart_True_multigoal_True_sparse.hdf5",
"flow-ring-random-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/flow/flow-ring-v0-random.hdf5",
"flow-ring-controller-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/flow/flow-ring-v0-idm.hdf5",
"flow-merge-random-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/flow/flow-merge-v0-random.hdf5",
"flow-merge-controller-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/flow/flow-merge-v0-idm.hdf5",
"kitchen-complete-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/kitchen/mini_kitchen_microwave_kettle_light_slider-v0.hdf5",
"kitchen-partial-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/kitchen/kitchen_microwave_kettle_light_slider-v0.hdf5",
"kitchen-mixed-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/kitchen/kitchen_microwave_kettle_bottomburner_light-v0.hdf5",
"carla-lane-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/carla/carla_lane_follow_flat-v0.hdf5",
"carla-town-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/carla/carla_town_subsamp_flat-v0.hdf5",
"carla-town-full-v0": "http://rail.eecs.berkeley.edu/datasets/offline_rl/carla/carla_town_flat-v0.hdf5",
}
REF_MIN_SCORE = {
"maze2d-open-v0": 0.01,
"maze2d-umaze-v1": 23.85,
"maze2d-medium-v1": 13.13,
"maze2d-large-v1": 6.7,
"maze2d-open-dense-v0": 11.17817,
"maze2d-umaze-dense-v1": 68.537689,
"maze2d-medium-dense-v1": 44.264742,
"maze2d-large-dense-v1": 30.569041,
"minigrid-fourrooms-v0": 0.01442,
"minigrid-fourrooms-random-v0": 0.01442,
"pen-human-v0": 96.262799,
"pen-cloned-v0": 96.262799,
"pen-expert-v0": 96.262799,
"hammer-human-v0": -274.856578,
"hammer-cloned-v0": -274.856578,
"hammer-expert-v0": -274.856578,
"relocate-human-v0": -6.425911,
"relocate-cloned-v0": -6.425911,
"relocate-expert-v0": -6.425911,
"door-human-v0": -56.512833,
"door-cloned-v0": -56.512833,
"door-expert-v0": -56.512833,
"halfcheetah-random-v0": -280.178953,
"halfcheetah-medium-v0": -280.178953,
"halfcheetah-expert-v0": -280.178953,
"halfcheetah-medium-replay-v0": -280.178953,
"halfcheetah-medium-expert-v0": -280.178953,
"walker2d-random-v0": 1.629008,
"walker2d-medium-v0": 1.629008,
"walker2d-expert-v0": 1.629008,
"walker2d-medium-replay-v0": 1.629008,
"walker2d-medium-expert-v0": 1.629008,
"hopper-random-v0": -20.272305,
"hopper-medium-v0": -20.272305,
"hopper-expert-v0": -20.272305,
"hopper-medium-replay-v0": -20.272305,
"hopper-medium-expert-v0": -20.272305,
"antmaze-umaze-v0": 0.0,
"antmaze-umaze-diverse-v0": 0.0,
"antmaze-medium-play-v0": 0.0,
"antmaze-medium-diverse-v0": 0.0,
"antmaze-large-play-v0": 0.0,
"antmaze-large-diverse-v0": 0.0,
"kitchen-complete-v0": 0.0,
"kitchen-partial-v0": 0.0,
"kitchen-mixed-v0": 0.0,
"flow-ring-random-v0": -165.22,
"flow-ring-controller-v0": -165.22,
"flow-merge-random-v0": 118.67993,
"flow-merge-controller-v0": 118.67993,
"carla-lane-v0": -0.8503839912088142,
"carla-town-v0": -122.228455,
}
REF_MAX_SCORE = {
"maze2d-open-v0": 20.66,
"maze2d-umaze-v1": 161.86,
"maze2d-medium-v1": 277.39,
"maze2d-large-v1": 273.99,
"maze2d-open-dense-v0": 27.166538620695782,
"maze2d-umaze-dense-v1": 193.66285642381482,
"maze2d-medium-dense-v1": 297.4552547777125,
"maze2d-large-dense-v1": 303.4857382709002,
"minigrid-fourrooms-v0": 2.89685,
"minigrid-fourrooms-random-v0": 2.89685,
"pen-human-v0": 3076.8331017826877,
"pen-cloned-v0": 3076.8331017826877,
"pen-expert-v0": 3076.8331017826877,
"hammer-human-v0": 12794.134825156867,
"hammer-cloned-v0": 12794.134825156867,
"hammer-expert-v0": 12794.134825156867,
"relocate-human-v0": 4233.877797728884,
"relocate-cloned-v0": 4233.877797728884,
"relocate-expert-v0": 4233.877797728884,
"door-human-v0": 2880.5693087298737,
"door-cloned-v0": 2880.5693087298737,
"door-expert-v0": 2880.5693087298737,
"halfcheetah-random-v0": 12135.0,
"halfcheetah-medium-v0": 12135.0,
"halfcheetah-expert-v0": 12135.0,
"halfcheetah-medium-replay-v0": 12135.0,
"halfcheetah-medium-expert-v0": 12135.0,
"walker2d-random-v0": 4592.3,
"walker2d-medium-v0": 4592.3,
"walker2d-expert-v0": 4592.3,
"walker2d-medium-replay-v0": 4592.3,
"walker2d-medium-expert-v0": 4592.3,
"hopper-random-v0": 3234.3,
"hopper-medium-v0": 3234.3,
"hopper-expert-v0": 3234.3,
"hopper-medium-replay-v0": 3234.3,
"hopper-medium-expert-v0": 3234.3,
"antmaze-umaze-v0": 1.0,
"antmaze-umaze-diverse-v0": 1.0,
"antmaze-medium-play-v0": 1.0,
"antmaze-medium-diverse-v0": 1.0,
"antmaze-large-play-v0": 1.0,
"antmaze-large-diverse-v0": 1.0,
"kitchen-complete-v0": 4.0,
"kitchen-partial-v0": 4.0,
"kitchen-mixed-v0": 4.0,
"flow-ring-random-v0": 24.42,
"flow-ring-controller-v0": 24.42,
"flow-merge-random-v0": 330.03179,
"flow-merge-controller-v0": 330.03179,
"carla-lane-v0": 1023.5784385429523,
"carla-town-v0": -64.62967840318221,
}
| 11,967 | 64.043478 | 168 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/offline_env.py | import os
import urllib.request
import gym
import h5py
def set_dataset_path(path):
global DATASET_PATH
DATASET_PATH = path
os.makedirs(path, exist_ok=True)
set_dataset_path(
os.environ.get("D4RL_DATASET_DIR", os.path.expanduser("~/.d4rl_alt/datasets"))
)
def get_keys(h5file):
keys = []
def visitor(name, item):
if isinstance(item, h5py.Dataset):
keys.append(name)
h5file.visititems(visitor)
return keys
def filepath_from_url(dataset_url):
_, dataset_name = os.path.split(dataset_url)
dataset_filepath = os.path.join(DATASET_PATH, dataset_name)
return dataset_filepath
def download_dataset_from_url(dataset_url):
dataset_filepath = filepath_from_url(dataset_url)
if not os.path.exists(dataset_filepath):
print("Downloading dataset:", dataset_url, "to", dataset_filepath)
urllib.request.urlretrieve(dataset_url, dataset_filepath)
if not os.path.exists(dataset_filepath):
raise IOError("Failed to download dataset from %s" % dataset_url)
return dataset_filepath
class OfflineEnv(gym.Env):
"""
Base class for offline RL envs.
Args:
dataset_url: URL pointing to the dataset.
ref_max_score: Maximum score (for score normalization)
ref_min_score: Minimum score (for score normalization)
"""
def __init__(
self, dataset_url=None, ref_max_score=None, ref_min_score=None, **kwargs
):
super(OfflineEnv, self).__init__(**kwargs)
self.dataset_url = self._dataset_url = dataset_url
self.ref_max_score = ref_max_score
self.ref_min_score = ref_min_score
def get_normalized_score(self, score):
if (self.ref_max_score is None) or (self.ref_min_score is None):
raise ValueError("Reference score not provided for env")
return (score - self.ref_min_score) / (self.ref_max_score - self.ref_min_score)
@property
def dataset_filepath(self):
return filepath_from_url(self.dataset_url)
def get_dataset(self, h5path=None):
if h5path is None:
if self._dataset_url is None:
raise ValueError("Offline env not configured with a dataset URL.")
h5path = download_dataset_from_url(self.dataset_url)
dataset_file = h5py.File(h5path, "r")
data_dict = {k: dataset_file[k][:] for k in get_keys(dataset_file)}
dataset_file.close()
# Run a few quick sanity checks
for key in ["observations", "actions", "rewards", "terminals"]:
assert key in data_dict, "Dataset is missing key %s" % key
N_samples = data_dict["observations"].shape[0]
if self.observation_space.shape is not None:
assert (
data_dict["observations"].shape[1:] == self.observation_space.shape
), "Observation shape does not match env: %s vs %s" % (
str(data_dict["observations"].shape[1:]),
str(self.observation_space.shape),
)
assert (
data_dict["actions"].shape[1:] == self.action_space.shape
), "Action shape does not match env: %s vs %s" % (
str(data_dict["actions"].shape[1:]),
str(self.action_space.shape),
)
if data_dict["rewards"].shape == (N_samples, 1):
data_dict["rewards"] = data_dict["rewards"][:, 0]
assert data_dict["rewards"].shape == (
N_samples,
), "Reward has wrong shape: %s" % (str(data_dict["rewards"].shape))
if data_dict["terminals"].shape == (N_samples, 1):
data_dict["terminals"] = data_dict["terminals"][:, 0]
assert data_dict["terminals"].shape == (
N_samples,
), "Terminals has wrong shape: %s" % (str(data_dict["rewards"].shape))
return data_dict
def get_dataset_chunk(self, chunk_id, h5path=None):
"""
Returns a slice of the full dataset.
Args:
chunk_id (int): An integer representing which slice of the dataset to return.
Returns:
A dictionary containing observtions, actions, rewards, and terminals.
"""
if h5path is None:
if self._dataset_url is None:
raise ValueError("Offline env not configured with a dataset URL.")
h5path = download_dataset_from_url(self.dataset_url)
dataset_file = h5py.File(h5path, "r")
if "virtual" not in dataset_file.keys():
raise ValueError("Dataset is not a chunked dataset")
available_chunks = [
int(_chunk) for _chunk in list(dataset_file["virtual"].keys())
]
if chunk_id not in available_chunks:
raise ValueError(
"Chunk id not found: %d. Available chunks: %s"
% (chunk_id, str(available_chunks))
)
load_keys = ["observations", "actions", "rewards", "terminals"]
data_dict = {
k: dataset_file["virtual/%d/%s" % (chunk_id, k)][:] for k in load_keys
}
dataset_file.close()
return data_dict
class OfflineEnvWrapper(gym.Wrapper, OfflineEnv):
"""
Wrapper class for offline RL envs.
"""
def __init__(self, env, **kwargs):
gym.Wrapper.__init__(self, env)
OfflineEnv.__init__(self, **kwargs)
def reset(self):
return self.env.reset()
| 5,386 | 32.880503 | 89 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/ope.py | """
Metrics for off-policy evaluation.
"""
import numpy as np
from d4rl_alt import infos
UNDISCOUNTED_POLICY_RETURNS = {
"halfcheetah-medium": 3985.8150261686337,
"halfcheetah-random": -199.26067391425954,
"halfcheetah-expert": 12330.945945279545,
"hopper-medium": 2260.1983114487352,
"hopper-random": 1257.9757846810203,
"hopper-expert": 3624.4696022560997,
"walker2d-medium": 2760.3310101980005,
"walker2d-random": 896.4751989935487,
"walker2d-expert": 4005.89370727539,
}
DISCOUNTED_POLICY_RETURNS = {
"halfcheetah-medium": 324.83583782709877,
"halfcheetah-random": -16.836944753939207,
"halfcheetah-expert": 827.7278887047698,
"hopper-medium": 235.7441494727478,
"hopper-random": 215.04955086664955,
"hopper-expert": 271.6925087260701,
"walker2d-medium": 202.23983424823822,
"walker2d-random": 78.46052021427765,
"walker2d-expert": 396.8752247768766,
}
def get_returns(policy_id, discounted=False):
if discounted:
return DISCOUNTED_POLICY_RETURNS[policy_id]
return UNDISCOUNTED_POLICY_RETURNS[policy_id]
def normalize(policy_id, score):
key = policy_id + "-v0"
min_score = infos.REF_MIN_SCORE[key]
max_score = infos.REF_MAX_SCORE[key]
return (score - min_score) / (max_score - min_score)
def ranking_correlation_metric(policies, discounted=False):
"""
Computes Spearman's rank correlation coefficient.
A score of 1.0 means the policies are ranked correctly according to their values.
A score of -1.0 means the policies are ranked inversely.
Args:
policies: A list of policy string identifiers.
Valid identifiers must be contained in POLICY_RETURNS.
Returns:
A correlation value between [-1, 1]
"""
return_values = np.array(
[get_returns(policy_key, discounted=discounted) for policy_key in policies]
)
ranks = np.argsort(-return_values)
N = len(policies)
diff = ranks - np.arange(N)
return 1.0 - (6 * np.sum(diff ** 2)) / (N * (N ** 2 - 1))
def precision_at_k_metric(policies, k=1, n_rel=None, discounted=False):
"""
Computes precision@k.
Args:
policies: A list of policy string identifiers.
k (int): Number of top items.
n_rel (int): Number of relevant items. Default is k.
Returns:
Fraction of top k policies in the top n_rel of the true rankings.
"""
assert len(policies) >= k
if n_rel is None:
n_rel = k
top_k = sorted(
policies, reverse=True, key=lambda x: get_returns(x, discounted=discounted)
)[:n_rel]
policy_k = policies[:k]
score = sum([policy in top_k for policy in policy_k])
return float(score) / k
def recall_at_k_metric(policies, k=1, n_rel=None, discounted=False):
"""
Computes recall@k.
Args:
policies: A list of policy string identifiers.
k (int): Number of top items.
n_rel (int): Number of relevant items. Default is k.
Returns:
Fraction of top n_rel true policy rankings in the top k of the given policies
"""
assert len(policies) >= k
if n_rel is None:
n_rel = k
top_k = sorted(
policies, reverse=True, key=lambda x: get_returns(x, discounted=discounted)
)[:n_rel]
policy_k = policies[:k]
score = sum([policy in policy_k for policy in top_k])
return float(score) / k
def value_error_metric(policy, value, discounted=False):
"""
Returns the absolute error in estimated value.
Args:
policy (str): A policy string identifier.
value (float): Estimated value
"""
return abs(
normalize(policy, value) - normalize(policy, get_returns(policy, discounted))
)
def policy_regret_metric(policy, expert_policies, discounted=False):
"""
Returns the regret of the given policy against a set of expert policies.
Args:
policy (str): A policy string identifier.
expert_policies (list[str]): A list of expert policies
Returns:
The regret, which is value of the best expert minus the value of the policy.
"""
best_returns = max(
[
get_returns(policy_key, discounted=discounted)
for policy_key in expert_policies
]
)
return normalize(policy, best_returns) - normalize(
policy, get_returns(policy, discounted=discounted)
)
| 4,393 | 28.891156 | 85 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/carla/__init__.py | from gym.envs.registration import register
from .carla_env import CarlaObsDictEnv, CarlaObsEnv
register(
id="carla-lane-v0",
entry_point="d4rl_alt.carla:CarlaObsEnv",
max_episode_steps=250,
kwargs={
"ref_min_score": -0.8503839912088142,
"ref_max_score": 1023.5784385429523,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/carla/carla_lane_follow_flat-v0.hdf5",
"reward_type": "lane_follow",
"carla_args": dict(
vision_size=48,
vision_fov=48,
weather=False,
frame_skip=1,
steps=250,
multiagent=True,
lane=0,
lights=False,
record_dir="None",
),
},
)
register(
id="carla-lane-render-v0",
entry_point="d4rl_alt.carla:CarlaDictEnv",
max_episode_steps=250,
kwargs={
"ref_min_score": -0.8503839912088142,
"ref_max_score": 1023.5784385429523,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/carla/carla_lane_follow-v0.hdf5",
"reward_type": "lane_follow",
"render_images": True,
"carla_args": dict(
vision_size=48,
vision_fov=48,
weather=False,
frame_skip=1,
steps=250,
multiagent=True,
lane=0,
lights=False,
record_dir="None",
),
},
)
TOWN_STEPS = 1000
register(
id="carla-town-v0",
entry_point="d4rl_alt.carla:CarlaObsEnv",
max_episode_steps=TOWN_STEPS,
kwargs={
"ref_min_score": -114.81579500772153, # Average random returns
"ref_max_score": 2440.1772022247314, # Average dataset returns
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/carla/carla_town_subsamp_flat-v0.hdf5",
"reward_type": "goal_reaching",
"carla_args": dict(
vision_size=48,
vision_fov=48,
weather=False,
frame_skip=1,
steps=TOWN_STEPS,
multiagent=True,
lane=0,
lights=False,
record_dir="None",
),
},
)
register(
id="carla-town-full-v0",
entry_point="d4rl_alt.carla:CarlaObsEnv",
max_episode_steps=TOWN_STEPS,
kwargs={
"ref_min_score": -114.81579500772153, # Average random returns
"ref_max_score": 2440.1772022247314, # Average dataset returns
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/carla/carla_town_flat-v0.hdf5",
"reward_type": "goal_reaching",
"carla_args": dict(
vision_size=48,
vision_fov=48,
weather=False,
frame_skip=1,
steps=TOWN_STEPS,
multiagent=True,
lane=0,
lights=False,
record_dir="None",
),
},
)
register(
id="carla-town-render-v0",
entry_point="d4rl_alt.carla:CarlaObsEnv",
max_episode_steps=TOWN_STEPS,
kwargs={
"ref_min_score": None,
"ref_max_score": None,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/carla/carla_town_flat-v0.hdf5",
"render_images": True,
"reward_type": "goal_reaching",
"carla_args": dict(
vision_size=48,
vision_fov=48,
weather=False,
frame_skip=1,
steps=TOWN_STEPS,
multiagent=True,
lane=0,
lights=False,
record_dir="None",
),
},
)
| 3,546 | 27.376 | 113 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/carla/carla_env.py | import argparse
import datetime
import glob
import os
import random
import sys
import time
import gym
import gym.spaces as spaces
from gym import Env
from PIL import Image
from PIL.PngImagePlugin import PngInfo
# from . import proxy_env
from d4rl_alt.offline_env import OfflineEnv
try:
sys.path.append(
glob.glob(
"../carla/dist/carla-*%d.%d-%s.egg"
% (
sys.version_info.major,
sys.version_info.minor,
"win-amd64" if os.name == "nt" else "linux-x86_64",
)
)[0]
)
except IndexError:
pass
import math
import carla
from dotmap import DotMap
try:
import pygame
except ImportError:
raise RuntimeError("cannot import pygame, make sure pygame package is installed")
try:
import numpy as np
except ImportError:
raise RuntimeError("cannot import numpy, make sure numpy package is installed")
try:
import queue
except ImportError:
import Queue as queue
# This is CARLA agent
from agents.navigation.agent import Agent, AgentState
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.navigation.local_planner import LocalPlanner
from agents.tools.misc import compute_magnitude_angle, is_within_distance_ahead
def is_within_distance(
target_location,
current_location,
orientation,
max_distance,
d_angle_th_up,
d_angle_th_low=0,
):
"""
Check if a target object is within a certain distance from a reference object.
A vehicle in front would be something around 0 deg, while one behind around 180 deg.
:param target_location: location of the target object
:param current_location: location of the reference object
:param orientation: orientation of the reference object
:param max_distance: maximum allowed distance
:param d_angle_th_up: upper thereshold for angle
:param d_angle_th_low: low thereshold for angle (optional, default is 0)
:return: True if target object is within max_distance ahead of the reference object
"""
target_vector = np.array(
[target_location.x - current_location.x, target_location.y - current_location.y]
)
norm_target = np.linalg.norm(target_vector)
# If the vector is too short, we can simply stop here
if norm_target < 0.001:
return True
if norm_target > max_distance:
return False
forward_vector = np.array(
[math.cos(math.radians(orientation)), math.sin(math.radians(orientation))]
)
d_angle = math.degrees(
math.acos(
np.clip(np.dot(forward_vector, target_vector) / norm_target, -1.0, 1.0)
)
)
return d_angle_th_low < d_angle < d_angle_th_up
def compute_distance(location_1, location_2):
"""
Euclidean distance between 3D po-0.427844-0.427844ints
:param location_1, location_2: 3D points
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return norm
class CustomGlobalRoutePlanner(GlobalRoutePlanner):
def __init__(self, dao):
super(CustomGlobalRoutePlanner, self).__init__(dao=dao)
def compute_direction_velocities(self, origin, velocity, destination):
node_list = super(CustomGlobalRoutePlanner, self)._path_search(
origin=origin, destination=destination
)
origin_xy = np.array([origin.x, origin.y])
velocity_xy = np.array([velocity.x, velocity.y])
first_node_xy = self._graph.nodes[node_list[0]]["vertex"]
first_node_xy = np.array([first_node_xy[0], first_node_xy[1]])
target_direction_vector = first_node_xy - origin_xy
target_unit_vector = np.array(target_direction_vector) / np.linalg.norm(
target_direction_vector
)
vel_s = np.dot(velocity_xy, target_unit_vector)
unit_velocity = velocity_xy / (np.linalg.norm(velocity_xy) + 1e-8)
angle = np.arccos(np.clip(np.dot(unit_velocity, target_unit_vector), -1.0, 1.0))
vel_perp = np.linalg.norm(velocity_xy) * np.sin(angle)
return vel_s, vel_perp
def compute_distance(self, origin, destination):
node_list = super(CustomGlobalRoutePlanner, self)._path_search(
origin=origin, destination=destination
)
# print('Node list:', node_list)
first_node_xy = self._graph.nodes[node_list[1]]["vertex"]
# print('Diff:', origin, first_node_xy)
# distance = 0.0
distances = []
distances.append(
np.linalg.norm(
np.array([origin.x, origin.y, 0.0]) - np.array(first_node_xy)
)
)
for idx in range(len(node_list) - 1):
distances.append(
super(CustomGlobalRoutePlanner, self)._distance_heuristic(
node_list[idx], node_list[idx + 1]
)
)
# print('Distances:', distances)
# import pdb; pdb.set_trace()
return np.sum(distances)
class CarlaSyncMode(object):
"""
Context manager to synchronize output from different sensors. Synchronous
mode is enabled as long as we are inside this context
with CarlaSyncMode(world, sensors) as sync_mode:
while True:
data = sync_mode.tick(timeout=1.0)
"""
def __init__(self, world, *sensors, **kwargs):
self.world = world
self.sensors = sensors
self.frame = None
self.delta_seconds = 1.0 / kwargs.get("fps", 20)
self._queues = []
self._settings = None
self.start()
def start(self):
self._settings = self.world.get_settings()
self.frame = self.world.apply_settings(
carla.WorldSettings(
no_rendering_mode=False,
synchronous_mode=True,
fixed_delta_seconds=self.delta_seconds,
)
)
def make_queue(register_event):
q = queue.Queue()
register_event(q.put)
self._queues.append(q)
make_queue(self.world.on_tick)
for sensor in self.sensors:
make_queue(sensor.listen)
def tick(self, timeout):
self.frame = self.world.tick()
data = [self._retrieve_data(q, timeout) for q in self._queues]
assert all(x.frame == self.frame for x in data)
return data
def __exit__(self, *args, **kwargs):
self.world.apply_settings(self._settings)
def _retrieve_data(self, sensor_queue, timeout):
while True:
data = sensor_queue.get(timeout=timeout)
if data.frame == self.frame:
return data
class Sun(object):
def __init__(self, azimuth, altitude):
self.azimuth = azimuth
self.altitude = altitude
self._t = 0.0
def tick(self, delta_seconds):
self._t += 0.008 * delta_seconds
self._t %= 2.0 * math.pi
self.azimuth += 0.25 * delta_seconds
self.azimuth %= 360.0
min_alt, max_alt = [20, 90]
self.altitude = 0.5 * (max_alt + min_alt) + 0.5 * (
max_alt - min_alt
) * math.cos(self._t)
def __str__(self):
return "Sun(alt: %.2f, azm: %.2f)" % (self.altitude, self.azimuth)
class Storm(object):
def __init__(self, precipitation):
self._t = precipitation if precipitation > 0.0 else -50.0
self._increasing = True
self.clouds = 0.0
self.rain = 0.0
self.wetness = 0.0
self.puddles = 0.0
self.wind = 0.0
self.fog = 0.0
def tick(self, delta_seconds):
delta = (1.3 if self._increasing else -1.3) * delta_seconds
self._t = clamp(delta + self._t, -250.0, 100.0)
self.clouds = clamp(self._t + 40.0, 0.0, 90.0)
self.clouds = clamp(self._t + 40.0, 0.0, 60.0)
self.rain = clamp(self._t, 0.0, 80.0)
delay = -10.0 if self._increasing else 90.0
self.puddles = clamp(self._t + delay, 0.0, 85.0)
self.wetness = clamp(self._t * 5, 0.0, 100.0)
self.wind = 5.0 if self.clouds <= 20 else 90 if self.clouds >= 70 else 40
self.fog = clamp(self._t - 10, 0.0, 30.0)
if self._t == -250.0:
self._increasing = True
if self._t == 100.0:
self._increasing = False
def __str__(self):
return "Storm(clouds=%d%%, rain=%d%%, wind=%d%%)" % (
self.clouds,
self.rain,
self.wind,
)
class Weather(object):
def __init__(self, world, changing_weather_speed):
self.world = world
self.reset()
self.weather = world.get_weather()
self.changing_weather_speed = changing_weather_speed
self._sun = Sun(self.weather.sun_azimuth_angle, self.weather.sun_altitude_angle)
self._storm = Storm(self.weather.precipitation)
def reset(self):
weather_params = carla.WeatherParameters(sun_altitude_angle=90.0)
self.world.set_weather(weather_params)
def tick(self):
self._sun.tick(self.changing_weather_speed)
self._storm.tick(self.changing_weather_speed)
self.weather.cloudiness = self._storm.clouds
self.weather.precipitation = self._storm.rain
self.weather.precipitation_deposits = self._storm.puddles
self.weather.wind_intensity = self._storm.wind
self.weather.fog_density = self._storm.fog
self.weather.wetness = self._storm.wetness
self.weather.sun_azimuth_angle = self._sun.azimuth
self.weather.sun_altitude_angle = self._sun.altitude
self.world.set_weather(self.weather)
def __str__(self):
return "%s %s" % (self._sun, self._storm)
def clamp(value, minimum=0.0, maximum=100.0):
return max(minimum, min(value, maximum))
## Now the actual env
class CarlaEnv(object):
"""
CARLA agent, we will wrap this in a proxy env to get a gym env
"""
def __init__(
self,
render=False,
carla_port=2000,
record=False,
record_dir=None,
args=None,
record_vision=False,
reward_type="lane_follow",
**kwargs
):
self.render_display = render
self.record_display = record
print("[CarlaEnv] record_vision:", record_vision)
self.record_vision = record_vision
self.record_dir = record_dir
self.reward_type = reward_type
self.vision_size = args["vision_size"]
self.vision_fov = args["vision_fov"]
self.changing_weather_speed = float(args["weather"])
self.frame_skip = args["frame_skip"]
self.max_episode_steps = args["steps"] # DMC uses this
self.multiagent = args["multiagent"]
self.start_lane = args["lane"]
self.follow_traffic_lights = args["lights"]
if self.record_display:
assert self.render_display
self.actor_list = []
if self.render_display:
pygame.init()
self.render_display = pygame.display.set_mode(
(800, 600), pygame.HWSURFACE | pygame.DOUBLEBUF
)
self.font = get_font()
self.clock = pygame.time.Clock()
self.client = carla.Client("localhost", carla_port)
self.client.set_timeout(2.0)
self.world = self.client.get_world()
self.map = self.world.get_map()
# tests specific to map 4:
if self.start_lane and self.map.name != "Town04":
raise NotImplementedError
# remove old vehicles and sensors (in case they survived)
self.world.tick()
actor_list = self.world.get_actors()
for vehicle in actor_list.filter("*vehicle*"):
print("Warning: removing old vehicle")
vehicle.destroy()
for sensor in actor_list.filter("*sensor*"):
print("Warning: removing old sensor")
sensor.destroy()
self.vehicle = None
self.vehicles_list = [] # their ids
self.reset_vehicle() # creates self.vehicle
self.actor_list.append(self.vehicle)
blueprint_library = self.world.get_blueprint_library()
if self.render_display:
self.camera_display = self.world.spawn_actor(
blueprint_library.find("sensor.camera.rgb"),
carla.Transform(
carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)
),
attach_to=self.vehicle,
)
self.actor_list.append(self.camera_display)
bp = blueprint_library.find("sensor.camera.rgb")
bp.set_attribute("image_size_x", str(self.vision_size))
bp.set_attribute("image_size_y", str(self.vision_size))
bp.set_attribute("fov", str(self.vision_fov))
location = carla.Location(x=1.6, z=1.7)
self.camera_vision = self.world.spawn_actor(
bp,
carla.Transform(location, carla.Rotation(yaw=0.0)),
attach_to=self.vehicle,
)
self.actor_list.append(self.camera_vision)
if self.record_display or self.record_vision:
if self.record_dir is None:
self.record_dir = "carla-{}-{}x{}-fov{}".format(
self.map.name.lower(),
self.vision_size,
self.vision_size,
self.vision_fov,
)
if self.frame_skip > 1:
self.record_dir += "-{}".format(self.frame_skip)
if self.changing_weather_speed > 0.0:
self.record_dir += "-weather"
if self.multiagent:
self.record_dir += "-mutiagent"
if self.follow_traffic_lights:
self.record_dir += "-lights"
self.record_dir += "-{}k".format(self.max_episode_steps // 1000)
now = datetime.datetime.now()
self.record_dir += now.strftime("-%Y-%m-%d-%H-%M-%S")
os.mkdir(self.record_dir)
if self.render_display:
self.sync_mode = CarlaSyncMode(
self.world, self.camera_display, self.camera_vision, fps=20
)
else:
self.sync_mode = CarlaSyncMode(self.world, self.camera_vision, fps=20)
# weather
self.weather = Weather(self.world, self.changing_weather_speed)
# dummy variables, to match deep mind control's APIs
low = -1.0
high = 1.0
self.action_space = spaces.Box(
low=np.array((low, low)), high=np.array((high, high))
)
self.observation_space = DotMap()
self.observation_space.shape = (3, self.vision_size, self.vision_size)
self.observation_space.dtype = np.dtype(np.uint8)
self.reward_range = None
self.metadata = None
# self.action_space.sample = lambda: np.random.uniform(low=low, high=high, size=self.action_space.shape[0]).astype(np.float32)
self.horizon = self.max_episode_steps
self.image_shape = (3, self.vision_size, self.vision_size)
# roaming carla agent
self.count = 0
self.world.tick()
self.reset_init()
self._proximity_threshold = 10.0
self._traffic_light_threshold = 5.0
self.actor_list = self.world.get_actors()
# for idx in range(len(self.actor_list)):
# print (idx, self.actor_list[idx])
# import ipdb; ipdb.set_trace()
self.vehicle_list = self.actor_list.filter("*vehicle*")
self.lights_list = self.actor_list.filter("*traffic_light*")
self.object_list = self.actor_list.filter("*traffic.*")
# town nav
self.route_planner_dao = GlobalRoutePlannerDAO(
self.map, sampling_resolution=0.1
)
self.route_planner = CustomGlobalRoutePlanner(self.route_planner_dao)
self.route_planner.setup()
self.target_location = carla.Location(x=-13.473097, y=134.311234, z=-0.010433)
# roaming carla agent
# self.agent = None
# self.count = 0
# self.world.tick()
self.reset() # creates self.agent
def reset_init(self):
self.reset_vehicle()
self.world.tick()
self.reset_other_vehicles()
self.world.tick()
#
self.count = 0
def reset(self):
# self.reset_vehicle()
# self.world.tick()
# self.reset_other_vehicles()
# self.world.tick()
# self.count = 0
# get obs:
# for _ in range(5):
# self.world.tick()
# obs, _, _, _ = self.step()
obs, _, done, _ = self.step()
# keep resetting until vehicle is not collided
total_resets = 0
while done:
self.reset_vehicle()
self.world.tick()
obs, _, done, _ = self.step()
total_resets += 1
if total_resets > 10:
break
return obs
def reset_vehicle(self):
if self.map.name == "Town04":
self.start_lane = (
-1
) # np.random.choice([-1, -2, -3, -4]) # their positive values, not negative
start_x = 5.0
vehicle_init_transform = carla.Transform(
carla.Location(x=start_x, y=0, z=0.1), carla.Rotation(yaw=-90)
)
else:
init_transforms = self.world.get_map().get_spawn_points()
vehicle_init_transform = random.choice(init_transforms)
# print('MyInitTransform', vehicle_init_transform)
if self.vehicle is None: # then create the ego vehicle
blueprint_library = self.world.get_blueprint_library()
vehicle_blueprint = blueprint_library.find("vehicle.audi.a2")
self.vehicle = self.world.spawn_actor(
vehicle_blueprint, vehicle_init_transform
)
self.vehicle.set_transform(vehicle_init_transform)
self.vehicle.set_velocity(carla.Vector3D())
self.vehicle.set_angular_velocity(carla.Vector3D())
def reset_other_vehicles(self):
if not self.multiagent:
return
# clear out old vehicles
self.client.apply_batch(
[carla.command.DestroyActor(x) for x in self.vehicles_list]
)
self.world.tick()
self.vehicles_list = []
traffic_manager = self.client.get_trafficmanager()
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
traffic_manager.set_synchronous_mode(True)
blueprints = self.world.get_blueprint_library().filter("vehicle.*")
blueprints = [
x for x in blueprints if int(x.get_attribute("number_of_wheels")) == 4
]
num_vehicles = 20
if self.map.name == "Town04":
road_id = 47
road_length = 117.0
init_transforms = []
for _ in range(num_vehicles):
lane_id = random.choice([-1, -2, -3, -4])
vehicle_s = np.random.uniform(road_length) # length of road 47
init_transforms.append(
self.map.get_waypoint_xodr(road_id, lane_id, vehicle_s).transform
)
else:
init_transforms = self.world.get_map().get_spawn_points()
init_transforms = np.random.choice(init_transforms, num_vehicles)
# print('OtherInitTransforms:')
# for transf in init_transforms:
# print(transf)
# --------------
# Spawn vehicles
# --------------
batch = []
for transform in init_transforms:
transform.location.z += (
0.1 # otherwise can collide with the road it starts on
)
blueprint = random.choice(blueprints)
if blueprint.has_attribute("color"):
color = random.choice(
blueprint.get_attribute("color").recommended_values
)
blueprint.set_attribute("color", color)
if blueprint.has_attribute("driver_id"):
driver_id = random.choice(
blueprint.get_attribute("driver_id").recommended_values
)
blueprint.set_attribute("driver_id", driver_id)
blueprint.set_attribute("role_name", "autopilot")
batch.append(
carla.command.SpawnActor(blueprint, transform).then(
carla.command.SetAutopilot(carla.command.FutureActor, True)
)
)
for response in self.client.apply_batch_sync(batch, False):
self.vehicles_list.append(response.actor_id)
for response in self.client.apply_batch_sync(batch):
if response.error:
pass
else:
self.vehicles_list.append(response.actor_id)
traffic_manager.global_percentage_speed_difference(30.0)
def step(self, action=None, traffic_light_color=""):
"""
rewards = []
for _ in range(self.frame_skip): # default 1
next_obs, reward, done, info = self._simulator_step(action, traffic_light_color)
rewards.append(reward)
if done:
break
return next_obs, np.mean(rewards), done, info
"""
return self._simulator_step(action, traffic_light_color)
def _is_vehicle_hazard(self, vehicle, vehicle_list):
"""
:param vehicle_list: list of potential obstacle to check
:return: a tuple given by (bool_flag, vehicle), where
- bool_flag is True if there is a vehicle ahead blocking us
and False otherwise
- vehicle is the blocker object itself
"""
ego_vehicle_location = vehicle.get_location()
ego_vehicle_waypoint = self.map.get_waypoint(ego_vehicle_location)
for target_vehicle in vehicle_list:
# do not account for the ego vehicle
if target_vehicle.id == vehicle.id:
continue
# if the object is not in our lane it's not an obstacle
target_vehicle_waypoint = self.map.get_waypoint(
target_vehicle.get_location()
)
if (
target_vehicle_waypoint.road_id != ego_vehicle_waypoint.road_id
or target_vehicle_waypoint.lane_id != ego_vehicle_waypoint.lane_id
):
continue
if is_within_distance_ahead(
target_vehicle.get_transform(),
vehicle.get_transform(),
self._proximity_threshold / 10.0,
):
return (True, -1.0, target_vehicle)
return (False, 0.0, None)
def _is_object_hazard(self, vehicle, object_list):
"""
:param vehicle_list: list of potential obstacle to check
:return: a tuple given by (bool_flag, vehicle), where
- bool_flag is True if there is a vehicle ahead blocking us
and False otherwise
- vehicle is the blocker object itself
"""
ego_vehicle_location = vehicle.get_location()
ego_vehicle_waypoint = self.map.get_waypoint(ego_vehicle_location)
for target_vehicle in object_list:
# do not account for the ego vehicle
if target_vehicle.id == vehicle.id:
continue
# if the object is not in our lane it's not an obstacle
target_vehicle_waypoint = self.map.get_waypoint(
target_vehicle.get_location()
)
if (
target_vehicle_waypoint.road_id != ego_vehicle_waypoint.road_id
or target_vehicle_waypoint.lane_id != ego_vehicle_waypoint.lane_id
):
continue
if is_within_distance_ahead(
target_vehicle.get_transform(),
vehicle.get_transform(),
self._proximity_threshold / 40.0,
):
return (True, -1.0, target_vehicle)
return (False, 0.0, None)
def _is_light_red(self, vehicle):
"""
Method to check if there is a red light affecting us. This version of
the method is compatible with both European and US style traffic lights.
:param lights_list: list containing TrafficLight objects
:return: a tuple given by (bool_flag, traffic_light), where
- bool_flag is True if there is a traffic light in RED
affecting us and False otherwise
- traffic_light is the object itself or None if there is no
red traffic light affecting us
"""
ego_vehicle_location = vehicle.get_location()
ego_vehicle_waypoint = self.map.get_waypoint(ego_vehicle_location)
for traffic_light in self.lights_list:
object_location = self._get_trafficlight_trigger_location(traffic_light)
object_waypoint = self.map.get_waypoint(object_location)
if object_waypoint.road_id != ego_vehicle_waypoint.road_id:
continue
ve_dir = ego_vehicle_waypoint.transform.get_forward_vector()
wp_dir = object_waypoint.transform.get_forward_vector()
dot_ve_wp = ve_dir.x * wp_dir.x + ve_dir.y * wp_dir.y + ve_dir.z * wp_dir.z
if dot_ve_wp < 0:
continue
if is_within_distance_ahead(
object_waypoint.transform,
vehicle.get_transform(),
self._traffic_light_threshold,
):
if traffic_light.state == carla.TrafficLightState.Red:
return (True, -0.1, traffic_light)
return (False, 0.0, None)
def _get_trafficlight_trigger_location(
self, traffic_light
): # pylint: disable=no-self-use
"""
Calculates the yaw of the waypoint that represents the trigger volume of the traffic light
"""
def rotate_point(point, radians):
"""
rotate a given point by a given angle
"""
rotated_x = math.cos(radians) * point.x - math.sin(radians) * point.y
rotated_y = math.sin(radians) * point.x - math.cos(radians) * point.y
return carla.Vector3D(rotated_x, rotated_y, point.z)
base_transform = traffic_light.get_transform()
base_rot = base_transform.rotation.yaw
area_loc = base_transform.transform(traffic_light.trigger_volume.location)
area_ext = traffic_light.trigger_volume.extent
point = rotate_point(carla.Vector3D(0, 0, area_ext.z), math.radians(base_rot))
point_location = area_loc + carla.Location(x=point.x, y=point.y)
return carla.Location(point_location.x, point_location.y, point_location.z)
def _get_collision_reward(self, vehicle):
vehicle_hazard, reward, vehicle_id = self._is_vehicle_hazard(
vehicle, self.vehicle_list
)
# Check the lane ids
loc = vehicle.get_location()
if loc is not None:
w = self.map.get_waypoint(loc)
if w is not None:
current_lane_id = w.lane_id
if current_lane_id not in [-1, 1]:
# print ('Lane: ', current_lane_id, self.start_lane)
vehicle_hazard = True
reward = -1.0
else:
vehicle_hazard = True
reward = -1.0
else:
vehicle_hazard = True
reward = -1.0
# print ('vehicle: ', loc, current_lane_id, self.start_lane)
return vehicle_hazard, reward
def _get_traffic_light_reward(self, vehicle):
traffic_light_hazard, reward, traffic_light_id = self._is_light_red(vehicle)
return traffic_light_hazard, 0.0
def _get_object_collided_reward(self, vehicle):
object_hazard, reward, object_id = self._is_object_hazard(
vehicle, self.object_list
)
return object_hazard, reward
def goal_reaching_reward(self, vehicle):
# Now we will write goal_reaching_rewards
vehicle_location = vehicle.get_location()
vehicle_velocity = vehicle.get_velocity()
target_location = self.target_location
# This is the distance computation
try:
dist = self.route_planner.compute_distance(
vehicle_location, target_location
)
vel_forward, vel_perp = self.route_planner.compute_direction_velocities(
vehicle_location, vehicle_velocity, target_location
)
except TypeError:
# Weird bug where the graph disappears
vel_forward = 0
vel_perp = 0
# print('[GoalReachReward] VehLoc: %s Target: %s Dist: %s VelF:%s' % (str(vehicle_location), str(target_location), str(dist), str(vel_forward)))
# base_reward = -1.0 * (dist / 100.0) + 5.0
base_reward = vel_forward
collided_done, collision_reward = self._get_collision_reward(vehicle)
traffic_light_done, traffic_light_reward = self._get_traffic_light_reward(
vehicle
)
object_collided_done, object_collided_reward = self._get_object_collided_reward(
vehicle
)
total_reward = (
base_reward + 100 * collision_reward
) # + 100 * traffic_light_reward + 100.0 * object_collided_reward
reward_dict = dict()
reward_dict["collision"] = collision_reward
reward_dict["traffic_light"] = traffic_light_reward
reward_dict["object_collision"] = object_collided_reward
reward_dict["base_reward"] = base_reward
done_dict = dict()
done_dict["collided_done"] = collided_done
done_dict["traffic_light_done"] = traffic_light_done
done_dict["object_collided_done"] = object_collided_done
return total_reward, reward_dict, done_dict
def lane_follow_reward(self, vehicle):
# assume on highway
vehicle_location = vehicle.get_location()
vehicle_waypoint = self.map.get_waypoint(vehicle_location)
vehicle_xy = np.array([vehicle_location.x, vehicle_location.y])
vehicle_s = vehicle_waypoint.s
vehicle_velocity = vehicle.get_velocity() # Vector3D
vehicle_velocity_xy = np.array([vehicle_velocity.x, vehicle_velocity.y])
# print ('Velocity: ', vehicle_velocity_xy)
speed = np.linalg.norm(vehicle_velocity_xy)
vehicle_waypoint_closest_to_road = self.map.get_waypoint(
vehicle_location, project_to_road=True, lane_type=carla.LaneType.Driving
)
road_id = vehicle_waypoint_closest_to_road.road_id
assert road_id is not None
goal_abs_lane_id = 1 # just for goal-following
lane_id_sign = int(np.sign(vehicle_waypoint_closest_to_road.lane_id))
assert lane_id_sign in [-1, 1]
goal_lane_id = goal_abs_lane_id * lane_id_sign
current_waypoint = self.map.get_waypoint(
vehicle_location, project_to_road=False
)
goal_waypoint = self.map.get_waypoint_xodr(road_id, goal_lane_id, vehicle_s)
# Check for valid goal waypoint
if goal_waypoint is None:
print("goal waypoint is None...")
# try to fix, bit of a hack, with CARLA waypoint discretizations
carla_waypoint_discretization = 0.02 # meters
goal_waypoint = self.map.get_waypoint_xodr(
road_id, goal_lane_id, vehicle_s - carla_waypoint_discretization
)
if goal_waypoint is None:
goal_waypoint = self.map.get_waypoint_xodr(
road_id, goal_lane_id, vehicle_s + carla_waypoint_discretization
)
# set distance to 100 if the waypoint is off the road
if goal_waypoint is None:
print(
"Episode fail: goal waypoint is off the road! (frame %d)" % self.count
)
done, dist, vel_s = True, 100.0, 0.0
else:
goal_location = goal_waypoint.transform.location
goal_xy = np.array([goal_location.x, goal_location.y])
# dist = np.linalg.norm(vehicle_xy - goal_xy)
dists = []
for abs_lane_id in [1, 2, 3, 4]:
lane_id_ = abs_lane_id * lane_id_sign
wp = self.map.get_waypoint_xodr(road_id, lane_id_, vehicle_s)
if (
wp is not None
): # lane 4 might not exist where the highway has a turnoff
loc = wp.transform.location
xy = np.array([loc.x, loc.y])
dists.append(np.linalg.norm(vehicle_xy - xy))
if dists:
dist = min(dists) # just try to get to the center of one of the lanes
else:
dist = 0.0
next_goal_waypoint = goal_waypoint.next(
0.1
) # waypoints are ever 0.02 meters
if len(next_goal_waypoint) != 1:
print("warning: {} waypoints (not 1)".format(len(next_goal_waypoint)))
if len(next_goal_waypoint) == 0:
print("Episode done: no more waypoints left. (frame %d)" % self.count)
done, vel_s, vel_perp = True, 0.0, 0.0
else:
location_ahead = next_goal_waypoint[0].transform.location
highway_vector = (
np.array([location_ahead.x, location_ahead.y]) - goal_xy
)
highway_unit_vector = np.array(highway_vector) / np.linalg.norm(
highway_vector
)
vel_s = np.dot(vehicle_velocity_xy, highway_unit_vector)
unit_velocity = vehicle_velocity_xy / (
np.linalg.norm(vehicle_velocity_xy) + 1e-8
)
angle = np.arccos(
np.clip(np.dot(unit_velocity, highway_unit_vector), -1.0, 1.0)
)
# vel_forward = np.linalg.norm(vehicle_velocity_xy) * np.cos(angle)
vel_perp = np.linalg.norm(vehicle_velocity_xy) * np.sin(angle)
# print('R:', np.clip(vel_s-5*vel_perp, -5.0, 5.0), 'vel_s:', vel_s, 'vel_perp:', vel_perp)
# import pdb; pdb.set_trace()
done = False
# not algorithm's fault, but the simulator sometimes throws the car in the air wierdly
# usually in initial few frames, which can be ignored
"""
if vehicle_velocity.z > 1. and self.count < 20:
print("Episode done: vertical velocity too high ({}), usually a simulator glitch (frame {})".format(vehicle_velocity.z, self.count))
done = True
if vehicle_location.z > 0.5 and self.count < 20:
print("Episode done: vertical velocity too high ({}), usually a simulator glitch (frame {})".format(vehicle_location.z, self.count))
done = True
"""
## Add rewards for collision and optionally traffic lights
vehicle_location = vehicle.get_location()
base_reward = np.clip(vel_s - 5 * vel_perp, -5.0, 5.0)
collided_done, collision_reward = self._get_collision_reward(vehicle)
traffic_light_done, traffic_light_reward = self._get_traffic_light_reward(
vehicle
)
object_collided_done, object_collided_reward = self._get_object_collided_reward(
vehicle
)
total_reward = (
base_reward
+ 100 * collision_reward
+ 100 * traffic_light_reward
+ 100.0 * object_collided_reward
)
reward_dict = dict()
reward_dict["collision"] = collision_reward
reward_dict["traffic_light"] = traffic_light_reward
reward_dict["object_collision"] = object_collided_reward
reward_dict["base_reward"] = base_reward
reward_dict["base_reward_vel_s"] = vel_s
reward_dict["base_reward_vel_perp"] = vel_perp
done_dict = dict()
done_dict["collided_done"] = collided_done
done_dict["traffic_light_done"] = traffic_light_done
done_dict["object_collided_done"] = object_collided_done
done_dict["base_done"] = done
return total_reward, reward_dict, done_dict
def _simulator_step(self, action, traffic_light_color):
if action is None:
throttle, steer, brake = 0.0, 0.0, 0.0
else:
steer = float(action[1])
throttle_brake = float(action[0])
if throttle_brake >= 0.0:
throttle = throttle_brake
brake = 0.0
else:
throttle = 0.0
brake = -throttle_brake
vehicle_control = carla.VehicleControl(
throttle=float(throttle),
steer=float(steer),
brake=float(brake),
hand_brake=False,
reverse=False,
manual_gear_shift=False,
)
self.vehicle.apply_control(vehicle_control)
# Advance the simulation and wait for the data.
if self.render_display:
snapshot, display_image, vision_image = self.sync_mode.tick(timeout=2.0)
else:
snapshot, vision_image = self.sync_mode.tick(timeout=2.0)
# Weather evolves
self.weather.tick()
# Draw the display.
if self.render_display:
self.render_display.blit(
self.font.render("Frame %d" % self.count, True, (255, 255, 255)),
(8, 10),
)
self.render_display.blit(
self.font.render(
"Control: %5.2f thottle, %5.2f steer, %5.2f brake"
% (throttle, steer, brake),
True,
(255, 255, 255),
),
(8, 28),
)
self.render_display.blit(
self.font.render(
"Traffic light: " + traffic_light_color, True, (255, 255, 255)
),
(8, 46),
)
self.render_display.blit(
self.font.render(str(self.weather), True, (255, 255, 255)), (8, 64)
)
pygame.display.flip()
# Format rl image
bgra = np.array(vision_image.raw_data).reshape(
self.vision_size, self.vision_size, 4
) # BGRA format
bgr = bgra[:, :, :3] # BGR format (84 x 84 x 3)
rgb = np.flip(bgr, axis=2) # RGB format (84 x 84 x 3)
if self.render_display and self.record_display:
image_name = os.path.join(self.record_dir, "display%08d.jpg" % self.count)
pygame.image.save(self.render_display, image_name)
# # Can animate with:
# ffmpeg -r 20 -pattern_type glob -i 'display*.jpg' carla.mp4
if self.record_vision:
image_name = os.path.join(self.record_dir, "vision%08d.png" % self.count)
print("savedimg:", image_name)
im = Image.fromarray(rgb)
# add any meta data you like into the image before we save it:
metadata = PngInfo()
metadata.add_text("throttle", str(throttle))
metadata.add_text("steer", str(steer))
metadata.add_text("brake", str(brake))
metadata.add_text("lights", traffic_light_color)
# acceleration
acceleration = self.vehicle.get_acceleration()
metadata.add_text("acceleration_x", str(acceleration.x))
metadata.add_text("acceleration_y", str(acceleration.y))
metadata.add_text("acceleration_z", str(acceleration.z))
# angular velocity
angular_velocity = self.vehicle.get_angular_velocity()
metadata.add_text("angular_velocity_x", str(angular_velocity.x))
metadata.add_text("angular_velocity_y", str(angular_velocity.y))
metadata.add_text("angular_velocity_z", str(angular_velocity.z))
# location
location = self.vehicle.get_location()
metadata.add_text("location_x", str(location.x))
metadata.add_text("location_y", str(location.y))
metadata.add_text("location_z", str(location.z))
# rotation
rotation = self.vehicle.get_transform().rotation
metadata.add_text("rotation_pitch", str(rotation.pitch))
metadata.add_text("rotation_yaw", str(rotation.yaw))
metadata.add_text("rotation_roll", str(rotation.roll))
forward_vector = rotation.get_forward_vector()
metadata.add_text("forward_vector_x", str(forward_vector.x))
metadata.add_text("forward_vector_y", str(forward_vector.y))
metadata.add_text("forward_vector_z", str(forward_vector.z))
# velocity
velocity = self.vehicle.get_velocity()
metadata.add_text("velocity_x", str(velocity.x))
metadata.add_text("velocity_y", str(velocity.y))
metadata.add_text("velocity_z", str(velocity.z))
# weather
metadata.add_text(
"weather_cloudiness ", str(self.weather.weather.cloudiness)
)
metadata.add_text(
"weather_precipitation", str(self.weather.weather.precipitation)
)
metadata.add_text(
"weather_precipitation_deposits",
str(self.weather.weather.precipitation_deposits),
)
metadata.add_text(
"weather_wind_intensity", str(self.weather.weather.wind_intensity)
)
metadata.add_text(
"weather_fog_density", str(self.weather.weather.fog_density)
)
metadata.add_text("weather_wetness", str(self.weather.weather.wetness))
metadata.add_text(
"weather_sun_azimuth_angle", str(self.weather.weather.sun_azimuth_angle)
)
# settings
metadata.add_text("settings_map", self.map.name)
metadata.add_text("settings_vision_size", str(self.vision_size))
metadata.add_text("settings_vision_fov", str(self.vision_fov))
metadata.add_text(
"settings_changing_weather_speed", str(self.changing_weather_speed)
)
metadata.add_text("settings_multiagent", str(self.multiagent))
# traffic lights
metadata.add_text("traffic_lights_color", "UNLABELED")
metadata.add_text("reward", str(reward))
## Add in reward dict
for key in reward_dict:
metadata.add_text("reward_" + str(key), str(reward_dict[key]))
for key in done_dict:
metadata.add_text("done_" + str(key), str(done_dict[key]))
## Save the target location as well
metadata.add_text("target_location_x", str(self.target_location.x))
metadata.add_text("target_location_y", str(self.target_location.y))
metadata.add_text("target_location_z", str(self.target_location.z))
im.save(image_name, "PNG", pnginfo=metadata)
self.count += 1
next_obs = rgb
done = False
if done:
print(
"Episode success: I've reached the episode horizon ({}).".format(
self.max_episode_steps
)
)
if self.reward_type == "lane_follow":
reward, reward_dict, done_dict = self.lane_follow_reward(self.vehicle)
elif self.reward_type == "goal_reaching":
reward, reward_dict, done_dict = self.goal_reaching_reward(self.vehicle)
else:
raise ValueError("unknown reward type:", self.reward_type)
info = reward_dict
info.update(done_dict)
done = False
for key in done_dict:
done = done or done_dict[key]
# if done:
# print('done_dict:', done_dict, 'r:', reward)
return next_obs, reward, done, info
def finish(self):
print("destroying actors.")
for actor in self.actor_list:
actor.destroy()
print("\ndestroying %d vehicles" % len(self.vehicles_list))
self.client.apply_batch(
[carla.command.DestroyActor(x) for x in self.vehicles_list]
)
time.sleep(0.5)
pygame.quit()
print("done.")
class CarlaObsDictEnv(OfflineEnv):
def __init__(
self,
carla_args=None,
carla_port=2000,
reward_type="lane_follow",
render_images=False,
**kwargs
):
self._wrapped_env = CarlaEnv(
carla_port=carla_port,
args=carla_args,
reward_type=reward_type,
record_vision=render_images,
)
print("[CarlaObsDictEnv] render_images:", render_images)
self._wrapped_env = CarlaEnv(
carla_port=carla_port, args=carla_args, record_vision=render_images
)
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
self.observation_size = int(np.prod(self._wrapped_env.observation_space.shape))
self.observation_space = spaces.Dict(
{
"image": spaces.Box(
low=np.array([0.0] * self.observation_size),
high=np.array(
[
256.0,
]
* self.observation_size
),
)
}
)
print(self.observation_space)
super(CarlaObsDictEnv, self).__init__(**kwargs)
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
self._wrapped_env.reset_init()
obs = self._wrapped_env.reset(**kwargs)
obs_dict = dict()
# Also normalize obs
obs_dict["image"] = (obs.astype(np.float32) / 255.0).flatten()
return obs_dict
def step(self, action):
# print ('Action: ', action)
next_obs, reward, done, info = self._wrapped_env.step(action)
next_obs_dict = dict()
next_obs_dict["image"] = (next_obs.astype(np.float32) / 255.0).flatten()
# print ('Reward: ', reward)
# print ('Done dict: ', info)
return next_obs_dict, reward, done, info
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self._wrapped_env.terminate()
def __getattr__(self, attr):
if attr == "_wrapped_env":
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return "{}({})".format(type(self).__name__, self.wrapped_env)
class CarlaObsEnv(OfflineEnv):
def __init__(
self,
carla_args=None,
carla_port=2000,
reward_type="lane_follow",
render_images=False,
**kwargs
):
self._wrapped_env = CarlaEnv(
carla_port=carla_port,
args=carla_args,
reward_type=reward_type,
record_vision=render_images,
)
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
self.observation_size = int(np.prod(self._wrapped_env.observation_space.shape))
self.observation_space = spaces.Box(
low=np.array([0.0] * self.observation_size),
high=np.array(
[
256.0,
]
* self.observation_size
),
)
# self.observation_space = spaces.Dict({
# 'image':spaces.Box(low=np.array([0.0] * self.observation_size), high=np.array([256.0,] * self.observation_size))
# })
super(CarlaObsEnv, self).__init__(**kwargs)
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
self._wrapped_env.reset_init()
obs = self._wrapped_env.reset(**kwargs)
obs_dict = dict()
# Also normalize obs
obs_dict = (obs.astype(np.float32) / 255.0).flatten()
return obs_dict
def step(self, action):
# print ('Action: ', action)
next_obs, reward, done, info = self._wrapped_env.step(action)
# next_obs_dict = dict()
# next_obs_dict['image'] = (next_obs.astype(np.float32) / 255.0).flatten()
next_obs_dict = (next_obs.astype(np.float32) / 255.0).flatten()
# print ('Reward: ', reward)
# print ('Done dict: ', info)
return next_obs_dict, reward, done, info
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self._wrapped_env.terminate()
def __getattr__(self, attr):
if attr == "_wrapped_env":
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return "{}({})".format(type(self).__name__, self.wrapped_env)
if __name__ == "__main__":
variant = dict()
variant["vision_size"] = 48
variant["vision_fov"] = 48
variant["weather"] = False
variant["frame_skip"] = 1
variant["steps"] = 100000
variant["multiagent"] = False
variant["lane"] = 0
variant["lights"] = False
variant["record_dir"] = None
env = CarlaEnv(args=variant)
carla_gym_env = proxy_env.ProxyEnv(env)
| 50,962 | 36.064 | 152 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/carla/data_collection_agent_lane.py | # !/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# Modified by Rowan McAllister on 20 April 2020
import argparse
import datetime
import glob
import os
import random
import sys
import time
from PIL import Image
from PIL.PngImagePlugin import PngInfo
try:
sys.path.append(
glob.glob(
"../carla/dist/carla-*%d.%d-%s.egg"
% (
sys.version_info.major,
sys.version_info.minor,
"win-amd64" if os.name == "nt" else "linux-x86_64",
)
)[0]
)
except IndexError:
pass
import math
import carla
from dotmap import DotMap
try:
import pygame
except ImportError:
raise RuntimeError("cannot import pygame, make sure pygame package is installed")
try:
import numpy as np
except ImportError:
raise RuntimeError("cannot import numpy, make sure numpy package is installed")
try:
import queue
except ImportError:
import Queue as queue
from agents.navigation.agent import Agent, AgentState
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.navigation.local_planner import LocalPlanner
from agents.tools.misc import compute_magnitude_angle, is_within_distance_ahead
def is_within_distance(
target_location,
current_location,
orientation,
max_distance,
d_angle_th_up,
d_angle_th_low=0,
):
"""
Check if a target object is within a certain distance from a reference object.
A vehicle in front would be something around 0 deg, while one behind around 180 deg.
:param target_location: location of the target object
:param current_location: location of the reference object
:param orientation: orientation of the reference object
:param max_distance: maximum allowed distance
:param d_angle_th_up: upper thereshold for angle
:param d_angle_th_low: low thereshold for angle (optional, default is 0)
:return: True if target object is within max_distance ahead of the reference object
"""
target_vector = np.array(
[target_location.x - current_location.x, target_location.y - current_location.y]
)
norm_target = np.linalg.norm(target_vector)
# If the vector is too short, we can simply stop here
if norm_target < 0.001:
return True
if norm_target > max_distance:
return False
forward_vector = np.array(
[math.cos(math.radians(orientation)), math.sin(math.radians(orientation))]
)
d_angle = math.degrees(
math.acos(
np.clip(np.dot(forward_vector, target_vector) / norm_target, -1.0, 1.0)
)
)
return d_angle_th_low < d_angle < d_angle_th_up
def compute_distance(location_1, location_2):
"""
Euclidean distance between 3D points
:param location_1, location_2: 3D points
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return norm
class CarlaSyncMode(object):
"""
Context manager to synchronize output from different sensors. Synchronous
mode is enabled as long as we are inside this context
with CarlaSyncMode(world, sensors) as sync_mode:
while True:
data = sync_mode.tick(timeout=1.0)
"""
def __init__(self, world, *sensors, **kwargs):
self.world = world
self.sensors = sensors
self.frame = None
self.delta_seconds = 1.0 / kwargs.get("fps", 20)
self._queues = []
self._settings = None
self.start()
def start(self):
self._settings = self.world.get_settings()
self.frame = self.world.apply_settings(
carla.WorldSettings(
no_rendering_mode=False,
synchronous_mode=True,
fixed_delta_seconds=self.delta_seconds,
)
)
def make_queue(register_event):
q = queue.Queue()
register_event(q.put)
self._queues.append(q)
make_queue(self.world.on_tick)
for sensor in self.sensors:
make_queue(sensor.listen)
def tick(self, timeout):
self.frame = self.world.tick()
data = [self._retrieve_data(q, timeout) for q in self._queues]
assert all(x.frame == self.frame for x in data)
return data
def __exit__(self, *args, **kwargs):
self.world.apply_settings(self._settings)
def _retrieve_data(self, sensor_queue, timeout):
while True:
data = sensor_queue.get(timeout=timeout)
if data.frame == self.frame:
return data
def draw_image(surface, image, blend=False):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if blend:
image_surface.set_alpha(100)
surface.blit(image_surface, (0, 0))
def get_font():
fonts = [x for x in pygame.font.get_fonts()]
default_font = "ubuntumono"
font = default_font if default_font in fonts else fonts[0]
font = pygame.font.match_font(font)
return pygame.font.Font(font, 14)
def should_quit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
def clamp(value, minimum=0.0, maximum=100.0):
return max(minimum, min(value, maximum))
class Sun(object):
def __init__(self, azimuth, altitude):
self.azimuth = azimuth
self.altitude = altitude
self._t = 0.0
def tick(self, delta_seconds):
self._t += 0.008 * delta_seconds
self._t %= 2.0 * math.pi
self.azimuth += 0.25 * delta_seconds
self.azimuth %= 360.0
min_alt, max_alt = [20, 90]
self.altitude = 0.5 * (max_alt + min_alt) + 0.5 * (
max_alt - min_alt
) * math.cos(self._t)
def __str__(self):
return "Sun(alt: %.2f, azm: %.2f)" % (self.altitude, self.azimuth)
class Storm(object):
def __init__(self, precipitation):
self._t = precipitation if precipitation > 0.0 else -50.0
self._increasing = True
self.clouds = 0.0
self.rain = 0.0
self.wetness = 0.0
self.puddles = 0.0
self.wind = 0.0
self.fog = 0.0
def tick(self, delta_seconds):
delta = (1.3 if self._increasing else -1.3) * delta_seconds
self._t = clamp(delta + self._t, -250.0, 100.0)
self.clouds = clamp(self._t + 40.0, 0.0, 90.0)
self.clouds = clamp(self._t + 40.0, 0.0, 60.0)
self.rain = clamp(self._t, 0.0, 80.0)
delay = -10.0 if self._increasing else 90.0
self.puddles = clamp(self._t + delay, 0.0, 85.0)
self.wetness = clamp(self._t * 5, 0.0, 100.0)
self.wind = 5.0 if self.clouds <= 20 else 90 if self.clouds >= 70 else 40
self.fog = clamp(self._t - 10, 0.0, 30.0)
if self._t == -250.0:
self._increasing = True
if self._t == 100.0:
self._increasing = False
def __str__(self):
return "Storm(clouds=%d%%, rain=%d%%, wind=%d%%)" % (
self.clouds,
self.rain,
self.wind,
)
class Weather(object):
def __init__(self, world, changing_weather_speed):
self.world = world
self.reset()
self.weather = world.get_weather()
self.changing_weather_speed = changing_weather_speed
self._sun = Sun(self.weather.sun_azimuth_angle, self.weather.sun_altitude_angle)
self._storm = Storm(self.weather.precipitation)
def reset(self):
weather_params = carla.WeatherParameters(sun_altitude_angle=90.0)
self.world.set_weather(weather_params)
def tick(self):
self._sun.tick(self.changing_weather_speed)
self._storm.tick(self.changing_weather_speed)
self.weather.cloudiness = self._storm.clouds
self.weather.precipitation = self._storm.rain
self.weather.precipitation_deposits = self._storm.puddles
self.weather.wind_intensity = self._storm.wind
self.weather.fog_density = self._storm.fog
self.weather.wetness = self._storm.wetness
self.weather.sun_azimuth_angle = self._sun.azimuth
self.weather.sun_altitude_angle = self._sun.altitude
self.world.set_weather(self.weather)
def __str__(self):
return "%s %s" % (self._sun, self._storm)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--vision_size", type=int, default=84)
parser.add_argument("--vision_fov", type=int, default=90)
parser.add_argument("--weather", default=False, action="store_true")
parser.add_argument("--frame_skip", type=int, default=1),
parser.add_argument("--steps", type=int, default=100000)
parser.add_argument("--multiagent", default=False, action="store_true"),
parser.add_argument("--lane", type=int, default=0)
parser.add_argument("--lights", default=False, action="store_true")
args = parser.parse_args()
return args
class LocalPlannerModified(LocalPlanner):
def __del__(self):
pass # otherwise it deletes our vehicle object
def run_step(self):
return super().run_step(
debug=False
) # otherwise by default shows waypoints, that interfere with our camera
class RoamingAgent(Agent):
"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
NOTE: need to re-create after each env reset
"""
def __init__(self, env):
"""
:param vehicle: actor to apply to local planner logic onto
"""
vehicle = env.vehicle
follow_traffic_lights = env.follow_traffic_lights
super(RoamingAgent, self).__init__(vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
self._follow_traffic_lights = follow_traffic_lights
def compute_action(self):
action, traffic_light = self.run_step()
throttle = action.throttle
brake = action.brake
steer = action.steer
# print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
def run_step(self):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
traffic_light_color = self._is_light_red(lights_list)
if traffic_light_color == "RED" and self._follow_traffic_lights:
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
# print ('Action chosen: ', control)
return control, traffic_light_color
# override case class
def _is_light_red_europe_style(self, lights_list):
"""
This method is specialized to check European style traffic lights.
Only suitable for Towns 03 -- 07.
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
for traffic_light in lights_list:
object_waypoint = self._map.get_waypoint(traffic_light.get_location())
if (
object_waypoint.road_id != ego_vehicle_waypoint.road_id
or object_waypoint.lane_id != ego_vehicle_waypoint.lane_id
):
continue
if is_within_distance_ahead(
traffic_light.get_transform(),
self._vehicle.get_transform(),
self._proximity_threshold,
):
if traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif traffic_light.state == carla.TrafficLightState.Yellow:
traffic_light_color = "YELLOW"
elif traffic_light.state == carla.TrafficLightState.Green:
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb
pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
return traffic_light_color
# override case class
def _is_light_red_us_style(self, lights_list, debug=False):
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
return "JUNCTION"
if self._local_planner.target_waypoint is not None:
if self._local_planner.target_waypoint.is_junction:
min_angle = 180.0
sel_magnitude = 0.0
sel_traffic_light = None
for traffic_light in lights_list:
loc = traffic_light.get_location()
magnitude, angle = compute_magnitude_angle(
loc,
ego_vehicle_location,
self._vehicle.get_transform().rotation.yaw,
)
if magnitude < 60.0 and angle < min(25.0, min_angle):
sel_magnitude = magnitude
sel_traffic_light = traffic_light
min_angle = angle
if sel_traffic_light is not None:
if debug:
print(
"=== Magnitude = {} | Angle = {} | ID = {}".format(
sel_magnitude, min_angle, sel_traffic_light.id
)
)
if self._last_traffic_light is None:
self._last_traffic_light = sel_traffic_light
if self._last_traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif (
self._last_traffic_light.state == carla.TrafficLightState.Yellow
):
traffic_light_color = "YELLOW"
elif (
self._last_traffic_light.state == carla.TrafficLightState.Green
):
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb
pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
else:
self._last_traffic_light = None
return traffic_light_color
if __name__ == "__main__":
# example call:
# ./PythonAPI/util/config.py --map Town01 --delta-seconds 0.05
# python PythonAPI/carla/agents/navigation/data_collection_agent.py --vision_size 256 --vision_fov 90 --steps 10000 --weather --lights
args = parse_args()
env = CarlaEnv(args)
try:
done = False
while not done:
action, traffic_light_color = env.compute_action()
next_obs, reward, done, info = env.step(action, traffic_light_color)
print(
"Reward: ",
reward,
"Done: ",
done,
"Location: ",
env.vehicle.get_location(),
)
if done:
# env.reset_init()
# env.reset()
done = False
finally:
env.finish()
| 17,418 | 32.757752 | 138 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/carla/data_collection_town.py | #!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# Modified by Rowan McAllister on 20 April 2020
import argparse
import datetime
import glob
import os
import random
import sys
import time
from PIL import Image
from PIL.PngImagePlugin import PngInfo
try:
sys.path.append(
glob.glob(
"../carla/dist/carla-*%d.%d-%s.egg"
% (
sys.version_info.major,
sys.version_info.minor,
"win-amd64" if os.name == "nt" else "linux-x86_64",
)
)[0]
)
except IndexError:
pass
import math
import carla
from dotmap import DotMap
try:
import pygame
except ImportError:
raise RuntimeError("cannot import pygame, make sure pygame package is installed")
try:
import numpy as np
except ImportError:
raise RuntimeError("cannot import numpy, make sure numpy package is installed")
try:
import queue
except ImportError:
import Queue as queue
from agents.navigation.agent import Agent, AgentState
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.navigation.local_planner import LocalPlanner
from agents.tools.misc import ( # , is_within_distance, compute_distance
compute_magnitude_angle,
is_within_distance_ahead,
)
def is_within_distance(
target_location,
current_location,
orientation,
max_distance,
d_angle_th_up,
d_angle_th_low=0,
):
"""
Check if a target object is within a certain distance from a reference object.
A vehicle in front would be something around 0 deg, while one behind around 180 deg.
:param target_location: location of the target object
:param current_location: location of the reference object
:param orientation: orientation of the reference object
:param max_distance: maximum allowed distance
:param d_angle_th_up: upper thereshold for angle
:param d_angle_th_low: low thereshold for angle (optional, default is 0)
:return: True if target object is within max_distance ahead of the reference object
"""
target_vector = np.array(
[target_location.x - current_location.x, target_location.y - current_location.y]
)
norm_target = np.linalg.norm(target_vector)
# If the vector is too short, we can simply stop here
if norm_target < 0.001:
return True
if norm_target > max_distance:
return False
forward_vector = np.array(
[math.cos(math.radians(orientation)), math.sin(math.radians(orientation))]
)
d_angle = math.degrees(
math.acos(
np.clip(np.dot(forward_vector, target_vector) / norm_target, -1.0, 1.0)
)
)
return d_angle_th_low < d_angle < d_angle_th_up
def compute_distance(location_1, location_2):
"""
Euclidean distance between 3D points
:param location_1, location_2: 3D points
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return norm
class CustomGlobalRoutePlanner(GlobalRoutePlanner):
def __init__(self, dao):
super(CustomGlobalRoutePlanner, self).__init__(dao=dao)
"""
def compute_distance(self, origin, destination):
node_list = super(CustomGlobalRoutePlanner, self)._path_search(origin=origin, destination=destination)
distance = 0.0
for idx in range(len(node_list) - 1):
distance += (super(CustomGlobalRoutePlanner, self)._distance_heuristic(node_list[idx], node_list[idx+1]))
# print ('Distance: ', distance)
return distance
"""
def compute_direction_velocities(self, origin, velocity, destination):
node_list = super(CustomGlobalRoutePlanner, self)._path_search(
origin=origin, destination=destination
)
origin_xy = np.array([origin.x, origin.y])
velocity_xy = np.array([velocity.x, velocity.y])
first_node_xy = self._graph.nodes[node_list[1]]["vertex"]
first_node_xy = np.array([first_node_xy[0], first_node_xy[1]])
target_direction_vector = first_node_xy - origin_xy
target_unit_vector = np.array(target_direction_vector) / np.linalg.norm(
target_direction_vector
)
vel_s = np.dot(velocity_xy, target_unit_vector)
unit_velocity = velocity_xy / (np.linalg.norm(velocity_xy) + 1e-8)
angle = np.arccos(np.clip(np.dot(unit_velocity, target_unit_vector), -1.0, 1.0))
vel_perp = np.linalg.norm(velocity_xy) * np.sin(angle)
return vel_s, vel_perp
def compute_distance(self, origin, destination):
node_list = super(CustomGlobalRoutePlanner, self)._path_search(
origin=origin, destination=destination
)
# print('Node list:', node_list)
first_node_xy = self._graph.nodes[node_list[0]]["vertex"]
# print('Diff:', origin, first_node_xy)
# distance = 0.0
distances = []
distances.append(
np.linalg.norm(
np.array([origin.x, origin.y, 0.0]) - np.array(first_node_xy)
)
)
for idx in range(len(node_list) - 1):
distances.append(
super(CustomGlobalRoutePlanner, self)._distance_heuristic(
node_list[idx], node_list[idx + 1]
)
)
# print('Distances:', distances)
# import pdb; pdb.set_trace()
return np.sum(distances)
class CarlaSyncMode(object):
"""
Context manager to synchronize output from different sensors. Synchronous
mode is enabled as long as we are inside this context
with CarlaSyncMode(world, sensors) as sync_mode:
while True:
data = sync_mode.tick(timeout=1.0)
"""
def __init__(self, world, *sensors, **kwargs):
self.world = world
self.sensors = sensors
self.frame = None
self.delta_seconds = 1.0 / kwargs.get("fps", 20)
self._queues = []
self._settings = None
self.start()
def start(self):
self._settings = self.world.get_settings()
self.frame = self.world.apply_settings(
carla.WorldSettings(
no_rendering_mode=False,
synchronous_mode=True,
fixed_delta_seconds=self.delta_seconds,
)
)
def make_queue(register_event):
q = queue.Queue()
register_event(q.put)
self._queues.append(q)
make_queue(self.world.on_tick)
for sensor in self.sensors:
make_queue(sensor.listen)
def tick(self, timeout):
self.frame = self.world.tick()
data = [self._retrieve_data(q, timeout) for q in self._queues]
assert all(x.frame == self.frame for x in data)
return data
def __exit__(self, *args, **kwargs):
self.world.apply_settings(self._settings)
def _retrieve_data(self, sensor_queue, timeout):
while True:
data = sensor_queue.get(timeout=timeout)
if data.frame == self.frame:
return data
def draw_image(surface, image, blend=False):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if blend:
image_surface.set_alpha(100)
surface.blit(image_surface, (0, 0))
def get_font():
fonts = [x for x in pygame.font.get_fonts()]
default_font = "ubuntumono"
font = default_font if default_font in fonts else fonts[0]
font = pygame.font.match_font(font)
return pygame.font.Font(font, 14)
def should_quit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
def clamp(value, minimum=0.0, maximum=100.0):
return max(minimum, min(value, maximum))
class Sun(object):
def __init__(self, azimuth, altitude):
self.azimuth = azimuth
self.altitude = altitude
self._t = 0.0
def tick(self, delta_seconds):
self._t += 0.008 * delta_seconds
self._t %= 2.0 * math.pi
self.azimuth += 0.25 * delta_seconds
self.azimuth %= 360.0
min_alt, max_alt = [20, 90]
self.altitude = 0.5 * (max_alt + min_alt) + 0.5 * (
max_alt - min_alt
) * math.cos(self._t)
def __str__(self):
return "Sun(alt: %.2f, azm: %.2f)" % (self.altitude, self.azimuth)
class Storm(object):
def __init__(self, precipitation):
self._t = precipitation if precipitation > 0.0 else -50.0
self._increasing = True
self.clouds = 0.0
self.rain = 0.0
self.wetness = 0.0
self.puddles = 0.0
self.wind = 0.0
self.fog = 0.0
def tick(self, delta_seconds):
delta = (1.3 if self._increasing else -1.3) * delta_seconds
self._t = clamp(delta + self._t, -250.0, 100.0)
self.clouds = clamp(self._t + 40.0, 0.0, 90.0)
self.clouds = clamp(self._t + 40.0, 0.0, 60.0)
self.rain = clamp(self._t, 0.0, 80.0)
delay = -10.0 if self._increasing else 90.0
self.puddles = clamp(self._t + delay, 0.0, 85.0)
self.wetness = clamp(self._t * 5, 0.0, 100.0)
self.wind = 5.0 if self.clouds <= 20 else 90 if self.clouds >= 70 else 40
self.fog = clamp(self._t - 10, 0.0, 30.0)
if self._t == -250.0:
self._increasing = True
if self._t == 100.0:
self._increasing = False
def __str__(self):
return "Storm(clouds=%d%%, rain=%d%%, wind=%d%%)" % (
self.clouds,
self.rain,
self.wind,
)
class Weather(object):
def __init__(self, world, changing_weather_speed):
self.world = world
self.reset()
self.weather = world.get_weather()
self.changing_weather_speed = changing_weather_speed
self._sun = Sun(self.weather.sun_azimuth_angle, self.weather.sun_altitude_angle)
self._storm = Storm(self.weather.precipitation)
def reset(self):
weather_params = carla.WeatherParameters(sun_altitude_angle=90.0)
self.world.set_weather(weather_params)
def tick(self):
self._sun.tick(self.changing_weather_speed)
self._storm.tick(self.changing_weather_speed)
self.weather.cloudiness = self._storm.clouds
self.weather.precipitation = self._storm.rain
self.weather.precipitation_deposits = self._storm.puddles
self.weather.wind_intensity = self._storm.wind
self.weather.fog_density = self._storm.fog
self.weather.wetness = self._storm.wetness
self.weather.sun_azimuth_angle = self._sun.azimuth
self.weather.sun_altitude_angle = self._sun.altitude
self.world.set_weather(self.weather)
def __str__(self):
return "%s %s" % (self._sun, self._storm)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--vision_size", type=int, default=84)
parser.add_argument("--vision_fov", type=int, default=90)
parser.add_argument("--weather", default=False, action="store_true")
parser.add_argument("--frame_skip", type=int, default=1),
parser.add_argument("--steps", type=int, default=100000)
parser.add_argument("--multiagent", default=False, action="store_true"),
parser.add_argument("--lane", type=int, default=0)
parser.add_argument("--lights", default=False, action="store_true")
args = parser.parse_args()
return args
class CarlaEnv(object):
def __init__(self, args):
self.render_display = False
self.record_display = False
self.record_vision = True
self.record_dir = None #'/nfs/kun1/users/aviralkumar/carla_data/'
self.vision_size = args.vision_size
self.vision_fov = args.vision_fov
self.changing_weather_speed = float(args.weather)
self.frame_skip = args.frame_skip
self.max_episode_steps = args.steps
self.multiagent = args.multiagent
self.start_lane = args.lane
self.follow_traffic_lights = args.lights
if self.record_display:
assert self.render_display
self.actor_list = []
if self.render_display:
pygame.init()
self.render_display = pygame.display.set_mode(
(800, 600), pygame.HWSURFACE | pygame.DOUBLEBUF
)
self.font = get_font()
self.clock = pygame.time.Clock()
self.client = carla.Client("localhost", 2000)
self.client.set_timeout(2.0)
self.world = self.client.get_world()
self.map = self.world.get_map()
## Define the route planner
self.route_planner_dao = GlobalRoutePlannerDAO(
self.map, sampling_resolution=0.1
)
self.route_planner = CustomGlobalRoutePlanner(self.route_planner_dao)
# tests specific to map 4:
if self.start_lane and self.map.name != "Town04":
raise NotImplementedError
# remove old vehicles and sensors (in case they survived)
self.world.tick()
actor_list = self.world.get_actors()
for vehicle in actor_list.filter("*vehicle*"):
print("Warning: removing old vehicle")
vehicle.destroy()
for sensor in actor_list.filter("*sensor*"):
print("Warning: removing old sensor")
sensor.destroy()
self.vehicle = None
self.vehicles_list = [] # their ids
self.reset_vehicle() # creates self.vehicle
self.actor_list.append(self.vehicle)
blueprint_library = self.world.get_blueprint_library()
if self.render_display:
self.camera_display = self.world.spawn_actor(
blueprint_library.find("sensor.camera.rgb"),
carla.Transform(
carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)
),
attach_to=self.vehicle,
)
self.actor_list.append(self.camera_display)
bp = blueprint_library.find("sensor.camera.rgb")
bp.set_attribute("image_size_x", str(self.vision_size))
bp.set_attribute("image_size_y", str(self.vision_size))
bp.set_attribute("fov", str(self.vision_fov))
location = carla.Location(x=1.6, z=1.7)
self.camera_vision = self.world.spawn_actor(
bp,
carla.Transform(location, carla.Rotation(yaw=0.0)),
attach_to=self.vehicle,
)
self.actor_list.append(self.camera_vision)
if self.record_display or self.record_vision:
if self.record_dir is None:
self.record_dir = "carla-{}-{}x{}-fov{}".format(
self.map.name.lower(),
self.vision_size,
self.vision_size,
self.vision_fov,
)
if self.frame_skip > 1:
self.record_dir += "-{}".format(self.frame_skip)
if self.changing_weather_speed > 0.0:
self.record_dir += "-weather"
if self.multiagent:
self.record_dir += "-mutiagent"
if self.follow_traffic_lights:
self.record_dir += "-lights"
self.record_dir += "-{}k".format(self.max_episode_steps // 1000)
now = datetime.datetime.now()
self.record_dir += now.strftime("-%Y-%m-%d-%H-%M-%S")
if not os.path.exists(self.record_dir):
os.mkdir(self.record_dir)
if self.render_display:
self.sync_mode = CarlaSyncMode(
self.world, self.camera_display, self.camera_vision, fps=20
)
else:
self.sync_mode = CarlaSyncMode(self.world, self.camera_vision, fps=20)
# weather
self.weather = Weather(self.world, self.changing_weather_speed)
# dummy variables, to match deep mind control's APIs
low = -1.0
high = 1.0
self.action_space = DotMap()
self.action_space.low.min = lambda: low
self.action_space.high.max = lambda: high
self.action_space.shape = [2]
self.observation_space = DotMap()
self.observation_space.shape = (3, self.vision_size, self.vision_size)
self.observation_space.dtype = np.dtype(np.uint8)
self.reward_range = None
self.metadata = None
self.action_space.sample = lambda: np.random.uniform(
low=low, high=high, size=self.action_space.shape[0]
).astype(np.float32)
# roaming carla agent
self.agent = None
self.world.tick()
self.reset_init() # creates self.agent
## Initialize the route planner
self.route_planner.setup()
## Collision detection
self._proximity_threshold = 10.0
self._traffic_light_threshold = 5.0
self.actor_list = self.world.get_actors()
for idx in range(len(self.actor_list)):
print(idx, self.actor_list[idx])
# import ipdb; ipdb.set_trace()
self.vehicle_list = self.actor_list.filter("*vehicle*")
self.lights_list = self.actor_list.filter("*traffic_light*")
self.object_list = self.actor_list.filter("*traffic.*")
## Initialize the route planner
self.route_planner.setup()
## The map is deterministic so for reward relabelling, we can
## instantiate the environment object and then query the distance function
## in the env, which directly uses this map_graph, and we need not save it.
self._map_graph = self.route_planner._graph
## This is a dummy for the target location, we can make this an input
## to the env in RL code.
self.target_location = carla.Location(x=-13.473097, y=134.311234, z=-0.010433)
## Now reset the env once
self.reset()
def reset_init(self):
self.reset_vehicle()
self.world.tick()
self.reset_other_vehicles()
self.world.tick()
self.agent = RoamingAgent(
self.vehicle, follow_traffic_lights=self.follow_traffic_lights
)
self.count = 0
self.ts = int(time.time())
def reset(self):
# get obs:
obs, _, _, _ = self.step()
return obs
def reset_vehicle(self):
if self.map.name == "Town04":
start_lane = -1
start_x = 5.0
vehicle_init_transform = carla.Transform(
carla.Location(x=start_x, y=0, z=0.1), carla.Rotation(yaw=-90)
)
else:
init_transforms = self.world.get_map().get_spawn_points()
vehicle_init_transform = random.choice(init_transforms)
# TODO(aviral): start lane not defined for town, also for the town, we may not want to have
# the lane following reward, so it should be okay.
if self.vehicle is None: # then create the ego vehicle
blueprint_library = self.world.get_blueprint_library()
vehicle_blueprint = blueprint_library.find("vehicle.audi.a2")
self.vehicle = self.world.spawn_actor(
vehicle_blueprint, vehicle_init_transform
)
self.vehicle.set_transform(vehicle_init_transform)
self.vehicle.set_velocity(carla.Vector3D())
self.vehicle.set_angular_velocity(carla.Vector3D())
def reset_other_vehicles(self):
if not self.multiagent:
return
# clear out old vehicles
self.client.apply_batch(
[carla.command.DestroyActor(x) for x in self.vehicles_list]
)
self.world.tick()
self.vehicles_list = []
traffic_manager = self.client.get_trafficmanager()
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
traffic_manager.set_synchronous_mode(True)
blueprints = self.world.get_blueprint_library().filter("vehicle.*")
blueprints = [
x for x in blueprints if int(x.get_attribute("number_of_wheels")) == 4
]
num_vehicles = 20
if self.map.name == "Town04":
road_id = 47
road_length = 117.0
init_transforms = []
for _ in range(num_vehicles):
lane_id = random.choice([-1, -2, -3, -4])
vehicle_s = np.random.uniform(road_length) # length of road 47
init_transforms.append(
self.map.get_waypoint_xodr(road_id, lane_id, vehicle_s).transform
)
else:
init_transforms = self.world.get_map().get_spawn_points()
init_transforms = np.random.choice(init_transforms, num_vehicles)
# --------------
# Spawn vehicles
# --------------
batch = []
for transform in init_transforms:
transform.location.z += (
0.1 # otherwise can collide with the road it starts on
)
blueprint = random.choice(blueprints)
if blueprint.has_attribute("color"):
color = random.choice(
blueprint.get_attribute("color").recommended_values
)
blueprint.set_attribute("color", color)
if blueprint.has_attribute("driver_id"):
driver_id = random.choice(
blueprint.get_attribute("driver_id").recommended_values
)
blueprint.set_attribute("driver_id", driver_id)
blueprint.set_attribute("role_name", "autopilot")
batch.append(
carla.command.SpawnActor(blueprint, transform).then(
carla.command.SetAutopilot(carla.command.FutureActor, True)
)
)
for response in self.client.apply_batch_sync(batch, False):
self.vehicles_list.append(response.actor_id)
for response in self.client.apply_batch_sync(batch):
if response.error:
pass
else:
self.vehicles_list.append(response.actor_id)
traffic_manager.global_percentage_speed_difference(30.0)
def compute_action(self):
return self.agent.run_step()
def step(self, action=None, traffic_light_color=""):
rewards = []
for _ in range(self.frame_skip): # default 1
next_obs, reward, done, info = self._simulator_step(
action, traffic_light_color
)
rewards.append(reward)
if done:
break
return next_obs, np.mean(rewards), done, info
def _is_vehicle_hazard(self, vehicle, vehicle_list):
"""
:param vehicle_list: list of potential obstacle to check
:return: a tuple given by (bool_flag, vehicle), where
- bool_flag is True if there is a vehicle ahead blocking us
and False otherwise
- vehicle is the blocker object itself
"""
ego_vehicle_location = vehicle.get_location()
ego_vehicle_waypoint = self.map.get_waypoint(ego_vehicle_location)
for target_vehicle in vehicle_list:
# do not account for the ego vehicle
if target_vehicle.id == vehicle.id:
continue
# if the object is not in our lane it's not an obstacle
target_vehicle_waypoint = self.map.get_waypoint(
target_vehicle.get_location()
)
if (
target_vehicle_waypoint.road_id != ego_vehicle_waypoint.road_id
or target_vehicle_waypoint.lane_id != ego_vehicle_waypoint.lane_id
):
continue
if is_within_distance_ahead(
target_vehicle.get_transform(),
vehicle.get_transform(),
self._proximity_threshold / 10.0,
):
return (True, -1.0, target_vehicle)
return (False, 0.0, None)
def _is_object_hazard(self, vehicle, object_list):
"""
:param vehicle_list: list of potential obstacle to check
:return: a tuple given by (bool_flag, vehicle), where
- bool_flag is True if there is a vehicle ahead blocking us
and False otherwise
- vehicle is the blocker object itself
"""
ego_vehicle_location = vehicle.get_location()
ego_vehicle_waypoint = self.map.get_waypoint(ego_vehicle_location)
for target_vehicle in object_list:
# do not account for the ego vehicle
if target_vehicle.id == vehicle.id:
continue
# if the object is not in our lane it's not an obstacle
target_vehicle_waypoint = self.map.get_waypoint(
target_vehicle.get_location()
)
if (
target_vehicle_waypoint.road_id != ego_vehicle_waypoint.road_id
or target_vehicle_waypoint.lane_id != ego_vehicle_waypoint.lane_id
):
continue
if is_within_distance_ahead(
target_vehicle.get_transform(),
vehicle.get_transform(),
self._proximity_threshold / 40.0,
):
return (True, -1.0, target_vehicle)
return (False, 0.0, None)
def _is_light_red(self, vehicle):
"""
Method to check if there is a red light affecting us. This version of
the method is compatible with both European and US style traffic lights.
:param lights_list: list containing TrafficLight objects
:return: a tuple given by (bool_flag, traffic_light), where
- bool_flag is True if there is a traffic light in RED
affecting us and False otherwise
- traffic_light is the object itself or None if there is no
red traffic light affecting us
"""
ego_vehicle_location = vehicle.get_location()
ego_vehicle_waypoint = self.map.get_waypoint(ego_vehicle_location)
for traffic_light in self.lights_list:
object_location = self._get_trafficlight_trigger_location(traffic_light)
object_waypoint = self.map.get_waypoint(object_location)
if object_waypoint.road_id != ego_vehicle_waypoint.road_id:
continue
ve_dir = ego_vehicle_waypoint.transform.get_forward_vector()
wp_dir = object_waypoint.transform.get_forward_vector()
dot_ve_wp = ve_dir.x * wp_dir.x + ve_dir.y * wp_dir.y + ve_dir.z * wp_dir.z
if dot_ve_wp < 0:
continue
if is_within_distance_ahead(
object_waypoint.transform,
vehicle.get_transform(),
self._traffic_light_threshold,
):
if traffic_light.state == carla.TrafficLightState.Red:
return (True, -0.1, traffic_light)
return (False, 0.0, None)
def _get_trafficlight_trigger_location(
self, traffic_light
): # pylint: disable=no-self-use
"""
Calculates the yaw of the waypoint that represents the trigger volume of the traffic light
"""
def rotate_point(point, radians):
"""
rotate a given point by a given angle
"""
rotated_x = math.cos(radians) * point.x - math.sin(radians) * point.y
rotated_y = math.sin(radians) * point.x - math.cos(radians) * point.y
return carla.Vector3D(rotated_x, rotated_y, point.z)
base_transform = traffic_light.get_transform()
base_rot = base_transform.rotation.yaw
area_loc = base_transform.transform(traffic_light.trigger_volume.location)
area_ext = traffic_light.trigger_volume.extent
point = rotate_point(carla.Vector3D(0, 0, area_ext.z), math.radians(base_rot))
point_location = area_loc + carla.Location(x=point.x, y=point.y)
return carla.Location(point_location.x, point_location.y, point_location.z)
def _get_collision_reward(self, vehicle):
vehicle_hazard, reward, vehicle_id = self._is_vehicle_hazard(
vehicle, self.vehicle_list
)
return vehicle_hazard, reward
def _get_traffic_light_reward(self, vehicle):
traffic_light_hazard, reward, traffic_light_id = self._is_light_red(vehicle)
return traffic_light_hazard, 0.0
def _get_object_collided_reward(self, vehicle):
object_hazard, reward, object_id = self._is_object_hazard(
vehicle, self.object_list
)
return object_hazard, reward
def goal_reaching_reward(self, vehicle):
# Now we will write goal_reaching_rewards
vehicle_location = vehicle.get_location()
target_location = self.target_location
# This is the distance computation
"""
dist = self.route_planner.compute_distance(vehicle_location, target_location)
base_reward = -1.0 * dist
collided_done, collision_reward = self._get_collision_reward(vehicle)
traffic_light_done, traffic_light_reward = self._get_traffic_light_reward(vehicle)
object_collided_done, object_collided_reward = self._get_object_collided_reward(vehicle)
total_reward = base_reward + 100 * collision_reward + 100 * traffic_light_reward + 100.0 * object_collided_reward
"""
vehicle_velocity = vehicle.get_velocity()
dist = self.route_planner.compute_distance(vehicle_location, target_location)
vel_forward, vel_perp = self.route_planner.compute_direction_velocities(
vehicle_location, vehicle_velocity, target_location
)
# print('[GoalReachReward] VehLoc: %s Target: %s Dist: %s VelF:%s' % (str(vehicle_location), str(target_location), str(dist), str(vel_forward)))
# base_reward = -1.0 * (dist / 100.0) + 5.0
base_reward = vel_forward
collided_done, collision_reward = self._get_collision_reward(vehicle)
traffic_light_done, traffic_light_reward = self._get_traffic_light_reward(
vehicle
)
object_collided_done, object_collided_reward = self._get_object_collided_reward(
vehicle
)
total_reward = (
base_reward + 100 * collision_reward
) # + 100 * traffic_light_reward + 100.0 * object_collided_reward
reward_dict = dict()
reward_dict["collision"] = collision_reward
reward_dict["traffic_light"] = traffic_light_reward
reward_dict["object_collision"] = object_collided_reward
reward_dict["base_reward"] = base_reward
reward_dict["vel_forward"] = vel_forward
reward_dict["vel_perp"] = vel_perp
done_dict = dict()
done_dict["collided_done"] = collided_done
done_dict["traffic_light_done"] = traffic_light_done
done_dict["object_collided_done"] = object_collided_done
return total_reward, reward_dict, done_dict
def _simulator_step(self, action, traffic_light_color):
if self.render_display:
if should_quit():
return
self.clock.tick()
if action is None:
throttle, steer, brake = 0.0, 0.0, 0.0
else:
throttle, steer, brake = action.throttle, action.steer, action.brake
# throttle = clamp(throttle, minimum=0.005, maximum=0.995) + np.random.uniform(low=-0.003, high=0.003)
# steer = clamp(steer, minimum=-0.995, maximum=0.995) + np.random.uniform(low=-0.003, high=0.003)
# brake = clamp(brake, minimum=0.005, maximum=0.995) + np.random.uniform(low=-0.003, high=0.003)
vehicle_control = carla.VehicleControl(
throttle=throttle, # [0,1]
steer=steer, # [-1,1]
brake=brake, # [0,1]
hand_brake=False,
reverse=False,
manual_gear_shift=False,
)
self.vehicle.apply_control(vehicle_control)
# Advance the simulation and wait for the data.
if self.render_display:
snapshot, display_image, vision_image = self.sync_mode.tick(timeout=2.0)
else:
snapshot, vision_image = self.sync_mode.tick(timeout=2.0)
# Weather evolves
self.weather.tick()
# Draw the display.
if self.render_display:
draw_image(self.render_display, display_image)
self.render_display.blit(
self.font.render("Frame %d" % self.count, True, (255, 255, 255)),
(8, 10),
)
self.render_display.blit(
self.font.render(
"Control: %5.2f thottle, %5.2f steer, %5.2f brake"
% (throttle, steer, brake),
True,
(255, 255, 255),
),
(8, 28),
)
self.render_display.blit(
self.font.render(
"Traffic light: " + traffic_light_color, True, (255, 255, 255)
),
(8, 46),
)
self.render_display.blit(
self.font.render(str(self.weather), True, (255, 255, 255)), (8, 64)
)
pygame.display.flip()
# Format rl image
bgra = np.array(vision_image.raw_data).reshape(
self.vision_size, self.vision_size, 4
) # BGRA format
bgr = bgra[:, :, :3] # BGR format (84 x 84 x 3)
rgb = np.flip(bgr, axis=2) # RGB format (84 x 84 x 3)
reward, reward_dict, done_dict = self.goal_reaching_reward(self.vehicle)
if self.render_display and self.record_display:
image_name = os.path.join(self.record_dir, "display%08d.jpg" % self.count)
pygame.image.save(self.render_display, image_name)
# # Can animate with:
# ffmpeg -r 20 -pattern_type glob -i 'display*.jpg' carla.mp4
if self.record_vision:
image_name = os.path.join(
self.record_dir, "vision_%d_%08d.png" % (self.ts, self.count)
)
im = Image.fromarray(rgb)
# add any eta data you like into the image before we save it:
metadata = PngInfo()
# control
metadata.add_text("control_throttle", str(throttle))
metadata.add_text("control_steer", str(steer))
metadata.add_text("control_brake", str(brake))
metadata.add_text("control_repeat", str(self.frame_skip))
# acceleration
acceleration = self.vehicle.get_acceleration()
metadata.add_text("acceleration_x", str(acceleration.x))
metadata.add_text("acceleration_y", str(acceleration.y))
metadata.add_text("acceleration_z", str(acceleration.z))
# angular velocity
angular_velocity = self.vehicle.get_angular_velocity()
metadata.add_text("angular_velocity_x", str(angular_velocity.x))
metadata.add_text("angular_velocity_y", str(angular_velocity.y))
metadata.add_text("angular_velocity_z", str(angular_velocity.z))
# location
location = self.vehicle.get_location()
print("Location:", location)
metadata.add_text("location_x", str(location.x))
metadata.add_text("location_y", str(location.y))
metadata.add_text("location_z", str(location.z))
# rotation
rotation = self.vehicle.get_transform().rotation
metadata.add_text("rotation_pitch", str(rotation.pitch))
metadata.add_text("rotation_yaw", str(rotation.yaw))
metadata.add_text("rotation_roll", str(rotation.roll))
forward_vector = rotation.get_forward_vector()
metadata.add_text("forward_vector_x", str(forward_vector.x))
metadata.add_text("forward_vector_y", str(forward_vector.y))
metadata.add_text("forward_vector_z", str(forward_vector.z))
# velocity
velocity = self.vehicle.get_velocity()
metadata.add_text("velocity_x", str(velocity.x))
metadata.add_text("velocity_y", str(velocity.y))
metadata.add_text("velocity_z", str(velocity.z))
# weather
metadata.add_text(
"weather_cloudiness ", str(self.weather.weather.cloudiness)
)
metadata.add_text(
"weather_precipitation", str(self.weather.weather.precipitation)
)
metadata.add_text(
"weather_precipitation_deposits",
str(self.weather.weather.precipitation_deposits),
)
metadata.add_text(
"weather_wind_intensity", str(self.weather.weather.wind_intensity)
)
metadata.add_text(
"weather_fog_density", str(self.weather.weather.fog_density)
)
metadata.add_text("weather_wetness", str(self.weather.weather.wetness))
metadata.add_text(
"weather_sun_azimuth_angle", str(self.weather.weather.sun_azimuth_angle)
)
# settings
metadata.add_text("settings_map", self.map.name)
metadata.add_text("settings_vision_size", str(self.vision_size))
metadata.add_text("settings_vision_fov", str(self.vision_fov))
metadata.add_text(
"settings_changing_weather_speed", str(self.changing_weather_speed)
)
metadata.add_text("settings_multiagent", str(self.multiagent))
# traffic lights
metadata.add_text("traffic_lights_color", "UNLABELED")
metadata.add_text("reward", str(reward))
## Add in reward dict
for key in reward_dict:
metadata.add_text("reward_" + str(key), str(reward_dict[key]))
for key in done_dict:
metadata.add_text("done_" + str(key), str(done_dict[key]))
## Save the target location as well
metadata.add_text("target_location_x", str(self.target_location.x))
metadata.add_text("target_location_y", str(self.target_location.y))
metadata.add_text("target_location_z", str(self.target_location.z))
im.save(image_name, "PNG", pnginfo=metadata)
# # To read these images later, you can run something like this:
# from PIL.PngImagePlugin import PngImageFile
# im = PngImageFile("vision00001234.png")
# throttle = float(im.text['throttle']) # range [0, 1]
# steer = float(im.text['steer']) # range [-1, 1]
# brake = float(im.text['brake']) # range [0, 1]
# lights = im.text['lights'] # traffic lights color, [NONE, JUNCTION, RED, YELLOW, GREEN]
self.count += 1
next_obs = rgb # 84 x 84 x 3
# # To inspect images, run:
# import pdb; pdb.set_trace()
# import matplotlib.pyplot as plt
# plt.imshow(next_obs)
# plt.show()
done = False # self.count >= self.max_episode_steps
if done:
print(
"Episode success: I've reached the episode horizon ({}).".format(
self.max_episode_steps
)
)
# print ('reward: ', reward)
info = reward_dict
info.update(done_dict)
done = False
for key in done_dict:
done = done or done_dict[key]
return next_obs, reward, done, info
def finish(self):
print("destroying actors.")
for actor in self.actor_list:
actor.destroy()
print("\ndestroying %d vehicles" % len(self.vehicles_list))
self.client.apply_batch(
[carla.command.DestroyActor(x) for x in self.vehicles_list]
)
time.sleep(0.5)
pygame.quit()
print("done.")
class LocalPlannerModified(LocalPlanner):
def __del__(self):
pass # otherwise it deletes our vehicle object
def run_step(self):
return super().run_step(
debug=False
) # otherwise by default shows waypoints, that interfere with our camera
class RoamingAgent(Agent):
"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
"""
def __init__(self, vehicle, follow_traffic_lights=True):
"""
:param vehicle: actor to apply to local planner logic onto
"""
super(RoamingAgent, self).__init__(vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
self._follow_traffic_lights = follow_traffic_lights
def run_step(self):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
traffic_light_color = self._is_light_red(lights_list)
if traffic_light_color == "RED" and self._follow_traffic_lights:
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
return control, traffic_light_color
# override case class
def _is_light_red_europe_style(self, lights_list):
"""
This method is specialized to check European style traffic lights.
Only suitable for Towns 03 -- 07.
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
for traffic_light in lights_list:
object_waypoint = self._map.get_waypoint(traffic_light.get_location())
if (
object_waypoint.road_id != ego_vehicle_waypoint.road_id
or object_waypoint.lane_id != ego_vehicle_waypoint.lane_id
):
continue
if is_within_distance_ahead(
traffic_light.get_transform(),
self._vehicle.get_transform(),
self._proximity_threshold,
):
if traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif traffic_light.state == carla.TrafficLightState.Yellow:
traffic_light_color = "YELLOW"
elif traffic_light.state == carla.TrafficLightState.Green:
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb
pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
return traffic_light_color
# override case class
def _is_light_red_us_style(self, lights_list, debug=False):
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
return "JUNCTION"
if self._local_planner.target_waypoint is not None:
if self._local_planner.target_waypoint.is_junction:
min_angle = 180.0
sel_magnitude = 0.0
sel_traffic_light = None
for traffic_light in lights_list:
loc = traffic_light.get_location()
magnitude, angle = compute_magnitude_angle(
loc,
ego_vehicle_location,
self._vehicle.get_transform().rotation.yaw,
)
if magnitude < 60.0 and angle < min(25.0, min_angle):
sel_magnitude = magnitude
sel_traffic_light = traffic_light
min_angle = angle
if sel_traffic_light is not None:
if debug:
print(
"=== Magnitude = {} | Angle = {} | ID = {}".format(
sel_magnitude, min_angle, sel_traffic_light.id
)
)
if self._last_traffic_light is None:
self._last_traffic_light = sel_traffic_light
if self._last_traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif (
self._last_traffic_light.state == carla.TrafficLightState.Yellow
):
traffic_light_color = "YELLOW"
elif (
self._last_traffic_light.state == carla.TrafficLightState.Green
):
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb
pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
else:
self._last_traffic_light = None
return traffic_light_color
if __name__ == "__main__":
# example call:
# ./PythonAPI/util/config.py --map Town01 --delta-seconds 0.05
# python PythonAPI/carla/agents/navigation/data_collection_agent.py --vision_size 256 --vision_fov 90 --steps 10000 --weather --lights
args = parse_args()
env = CarlaEnv(args)
curr_steps = 0
try:
done = False
while not done:
curr_steps += 1
action, traffic_light_color = env.compute_action()
next_obs, reward, done, info = env.step(action, traffic_light_color)
print(
"Reward: ",
reward,
"Done: ",
done,
"Location: ",
env.vehicle.get_location(),
)
if done:
# env.reset_init()
# env.reset()
done = False
if curr_steps % 5000 == 4999:
env.reset_init()
env.reset()
finally:
env.finish()
| 48,118 | 36.859166 | 152 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/carla/town_agent.py | # A baseline town agent.
import numpy as np
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
class RoamingAgent(Agent):
"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
NOTE: need to re-create after each env reset
"""
def __init__(self, env):
"""
:param vehicle: actor to apply to local planner logic onto
"""
vehicle = env.vehicle
follow_traffic_lights = env.follow_traffic_lights
super(RoamingAgent, self).__init__(vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
self._follow_traffic_lights = follow_traffic_lights
def compute_action(self):
action, traffic_light = self.run_step()
throttle = action.throttle
brake = action.brake
steer = action.steer
# print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
def run_step(self):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
throttle = control.throttle
brake = control.brake
steer = control.steer
# print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
class LocalPlannerModified(LocalPlanner):
def __del__(self):
pass # otherwise it deletes our vehicle object
def run_step(self):
return super().run_step(
debug=False
) # otherwise by default shows waypoints, that interfere with our camera
class DummyTownAgent(Agent):
"""
A simple agent for the town driving task.
If the car is currently facing on a path towards the goal, drive forward.
If the car would start drivign away, apply maximum brakes.
"""
def __init__(self, env):
"""
:param vehicle: actor to apply to local planner logic onto
"""
self.env = env
super(DummyTownAgent, self).__init__(self.env.vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
def compute_action(self):
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
rotation = self.env.vehicle.get_transform().rotation
forward_vector = rotation.get_forward_vector()
origin = self.env.vehicle.get_location()
destination = self.env.target_location
node_list = self.env.route_planner._path_search(
origin=origin, destination=destination
)
origin_xy = np.array([origin.x, origin.y])
forward_xy = np.array([forward_vector.x, forward_vector.y])
first_node_xy = self.env.route_planner._graph.nodes[node_list[0]]["vertex"]
first_node_xy = np.array([first_node_xy[0], first_node_xy[1]])
target_direction_vector = first_node_xy - origin_xy
target_unit_vector = np.array(target_direction_vector) / np.linalg.norm(
target_direction_vector
)
vel_s = np.dot(forward_xy, target_unit_vector)
if vel_s < 0:
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
throttle = control.throttle
brake = control.brake
steer = control.steer
# print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
| 5,410 | 34.136364 | 97 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/flow/__init__.py | import os
from copy import deepcopy
import flow
import flow.envs
import gym
from flow.controllers import (
RLController,
SimCarFollowingController,
SimLaneChangeController,
)
from flow.controllers.car_following_models import IDMController
from flow.controllers.routing_controllers import ContinuousRouter
from flow.core.params import (
EnvParams,
InFlows,
InitialConfig,
NetParams,
SumoCarFollowingParams,
SumoLaneChangeParams,
SumoParams,
TrafficLightParams,
VehicleParams,
)
from flow.envs import BayBridgeEnv, TrafficLightGridPOEnv, WaveAttenuationPOEnv
from flow.envs.ring.accel import AccelEnv
from flow.networks.ring import ADDITIONAL_NET_PARAMS, RingNetwork
from flow.utils.registry import make_create_env
from gym.envs.registration import register
from d4rl_alt import offline_env
from d4rl_alt.flow import bottleneck, merge, traffic_light_grid
def flow_register(flow_params, render=None, **kwargs):
exp_tag = flow_params["exp_tag"]
env_params = flow_params["env"]
net_params = flow_params["net"]
env_class = flow_params["env_name"]
initial_config = flow_params.get("initial", InitialConfig())
traffic_lights = flow_params.get("tls", TrafficLightParams())
sim_params = deepcopy(flow_params["sim"])
vehicles = deepcopy(flow_params["veh"])
sim_params.render = render or sim_params.render
if isinstance(flow_params["network"], str):
print(
"""Passing of strings for network will be deprecated.
Please pass the Network instance instead."""
)
module = __import__("flow.networks", fromlist=[flow_params["network"]])
network_class = getattr(module, flow_params["network"])
else:
network_class = flow_params["network"]
network = network_class(
name=exp_tag,
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=traffic_lights,
)
flow_env = env_class(
env_params=env_params,
sim_params=sim_params,
network=network,
simulator=flow_params["simulator"],
)
env = offline_env.OfflineEnvWrapper(flow_env, **kwargs)
return env
def ring_env(render="drgb"):
name = "ring"
network_name = RingNetwork
env_name = WaveAttenuationPOEnv
net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS)
initial_config = InitialConfig(spacing="uniform", shuffle=False)
vehicles = VehicleParams()
vehicles.add(
"human",
acceleration_controller=(IDMController, {}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=21,
)
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=1,
)
sim_params = SumoParams(sim_step=0.5, render=render, save_render=True)
HORIZON = 100
env_params = EnvParams(
# length of one rollout
horizon=HORIZON,
additional_params={
# maximum acceleration of autonomous vehicles
"max_accel": 1,
# maximum deceleration of autonomous vehicles
"max_decel": 1,
# bounds on the ranges of ring road lengths the autonomous vehicle
# is trained on
"ring_length": [220, 270],
},
)
flow_params = dict(
exp_tag=name,
env_name=env_name,
network=network_name,
simulator="traci",
sim=sim_params,
env=env_params,
net=net_params,
veh=vehicles,
initial=initial_config,
)
return flow_params
RING_RANDOM_SCORE = -165.22
RING_EXPERT_SCORE = 24.42
register(
id="flow-ring-v0",
entry_point="d4rl_alt.flow:flow_register",
max_episode_steps=500,
kwargs={
"flow_params": ring_env(render=False),
"dataset_url": None,
"ref_min_score": RING_RANDOM_SCORE,
"ref_max_score": RING_EXPERT_SCORE,
},
)
register(
id="flow-ring-render-v0",
entry_point="d4rl_alt.flow:flow_register",
max_episode_steps=500,
kwargs={
"flow_params": ring_env(render="drgb"),
"dataset_url": None,
"ref_min_score": RING_RANDOM_SCORE,
"ref_max_score": RING_EXPERT_SCORE,
},
)
register(
id="flow-ring-random-v0",
entry_point="d4rl_alt.flow:flow_register",
max_episode_steps=500,
kwargs={
"flow_params": ring_env(render=False),
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/flow/flow-ring-v0-random.hdf5",
"ref_min_score": RING_RANDOM_SCORE,
"ref_max_score": RING_EXPERT_SCORE,
},
)
register(
id="flow-ring-controller-v0",
entry_point="d4rl_alt.flow:flow_register",
max_episode_steps=500,
kwargs={
"flow_params": ring_env(render=False),
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/flow/flow-ring-v0-idm.hdf5",
"ref_min_score": RING_RANDOM_SCORE,
"ref_max_score": RING_EXPERT_SCORE,
},
)
MERGE_RANDOM_SCORE = 118.67993
MERGE_EXPERT_SCORE = 330.03179
register(
id="flow-merge-v0",
entry_point="d4rl_alt.flow:flow_register",
max_episode_steps=750,
kwargs={
"flow_params": merge.gen_env(render=False),
"dataset_url": None,
"ref_min_score": MERGE_RANDOM_SCORE,
"ref_max_score": MERGE_EXPERT_SCORE,
},
)
register(
id="flow-merge-render-v0",
entry_point="d4rl_alt.flow:flow_register",
max_episode_steps=750,
kwargs={
"flow_params": merge.gen_env(render="drgb"),
"dataset_url": None,
"ref_min_score": MERGE_RANDOM_SCORE,
"ref_max_score": MERGE_EXPERT_SCORE,
},
)
register(
id="flow-merge-random-v0",
entry_point="d4rl_alt.flow:flow_register",
max_episode_steps=750,
kwargs={
"flow_params": merge.gen_env(render=False),
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/flow/flow-merge-v0-random.hdf5",
"ref_min_score": MERGE_RANDOM_SCORE,
"ref_max_score": MERGE_EXPERT_SCORE,
},
)
register(
id="flow-merge-controller-v0",
entry_point="d4rl_alt.flow:flow_register",
max_episode_steps=750,
kwargs={
"flow_params": merge.gen_env(render=False),
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/flow/flow-merge-v0-idm.hdf5",
"ref_min_score": MERGE_RANDOM_SCORE,
"ref_max_score": MERGE_EXPERT_SCORE,
},
)
| 6,530 | 27.030043 | 106 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/flow/bottleneck.py | import flow
import flow.envs
from flow.controllers import (
RLController,
SimCarFollowingController,
SimLaneChangeController,
)
from flow.controllers.routing_controllers import ContinuousRouter
from flow.core.params import (
EnvParams,
InFlows,
InitialConfig,
NetParams,
SumoCarFollowingParams,
SumoLaneChangeParams,
SumoParams,
TrafficLightParams,
VehicleParams,
)
from flow.envs import BottleneckDesiredVelocityEnv
from flow.networks import BottleneckNetwork
from flow.networks.ring import ADDITIONAL_NET_PARAMS
def bottleneck(render="drgb"):
# time horizon of a single rollout
HORIZON = 1500
SCALING = 1
NUM_LANES = 4 * SCALING # number of lanes in the widest highway
DISABLE_TB = True
DISABLE_RAMP_METER = True
AV_FRAC = 0.10
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode=9,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode=0,
),
num_vehicles=1 * SCALING,
)
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode=9,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode=0,
),
num_vehicles=1 * SCALING,
)
controlled_segments = [
("1", 1, False),
("2", 2, True),
("3", 2, True),
("4", 2, True),
("5", 1, False),
]
num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)]
additional_env_params = {
"target_velocity": 40,
"disable_tb": True,
"disable_ramp_metering": True,
"controlled_segments": controlled_segments,
"symmetric": False,
"observed_segments": num_observed_segments,
"reset_inflow": False,
"lane_change_duration": 5,
"max_accel": 3,
"max_decel": 3,
"inflow_range": [1200, 2500],
}
# flow rate
flow_rate = 2500 * SCALING
# percentage of flow coming out of each lane
inflow = InFlows()
inflow.add(
veh_type="human",
edge="1",
vehs_per_hour=flow_rate * (1 - AV_FRAC),
depart_lane="random",
depart_speed=10,
)
inflow.add(
veh_type="rl",
edge="1",
vehs_per_hour=flow_rate * AV_FRAC,
depart_lane="random",
depart_speed=10,
)
traffic_lights = TrafficLightParams()
if not DISABLE_TB:
traffic_lights.add(node_id="2")
if not DISABLE_RAMP_METER:
traffic_lights.add(node_id="3")
additional_net_params = {"scaling": SCALING, "speed_limit": 23}
net_params = NetParams(inflows=inflow, additional_params=additional_net_params)
flow_params = dict(
# name of the experiment
exp_tag="bottleneck_0",
# name of the flow environment the experiment is running on
env_name=BottleneckDesiredVelocityEnv,
# name of the network class the experiment is running on
network=BottleneckNetwork,
# simulator that is used by the experiment
simulator="traci",
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
sim_step=0.5,
render=render,
save_render=True,
print_warnings=False,
restart_instance=True,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
warmup_steps=40,
sims_per_step=1,
horizon=HORIZON,
additional_params=additional_env_params,
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
inflows=inflow,
additional_params=additional_net_params,
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(
spacing="uniform",
min_gap=5,
lanes_distribution=float("inf"),
edges_distribution=["2", "3", "4", "5"],
),
# traffic lights to be introduced to specific nodes (see
# flow.core.params.TrafficLightParams)
tls=traffic_lights,
)
return flow_params
| 4,745 | 29.037975 | 83 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/flow/merge.py | """Open merge example.
Trains a a small percentage of rl vehicles to dissipate shockwaves caused by
on-ramp merge to a single lane open highway network.
"""
from copy import deepcopy
from flow.controllers import RLController, SimCarFollowingController
from flow.core.params import (
EnvParams,
InFlows,
InitialConfig,
NetParams,
SumoCarFollowingParams,
SumoParams,
VehicleParams,
)
from flow.envs import MergePOEnv
from flow.networks import MergeNetwork
from flow.networks.merge import ADDITIONAL_NET_PARAMS
def gen_env(render="drgb"):
# time horizon of a single rollout
HORIZON = 750
# inflow rate at the highway
FLOW_RATE = 2000
# percent of autonomous vehicles
RL_PENETRATION = 0.1
# num_rl term (see ADDITIONAL_ENV_PARAMs)
NUM_RL = 5
# We consider a highway network with an upstream merging lane producing
# shockwaves
additional_net_params = deepcopy(ADDITIONAL_NET_PARAMS)
additional_net_params["merge_lanes"] = 1
additional_net_params["highway_lanes"] = 1
additional_net_params["pre_merge_length"] = 500
# RL vehicles constitute 5% of the total number of vehicles
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {}),
car_following_params=SumoCarFollowingParams(
speed_mode=9,
),
num_vehicles=5,
)
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
car_following_params=SumoCarFollowingParams(
speed_mode=9,
),
num_vehicles=0,
)
# Vehicles are introduced from both sides of merge, with RL vehicles entering
# from the highway portion as well
inflow = InFlows()
inflow.add(
veh_type="human",
edge="inflow_highway",
vehs_per_hour=(1 - RL_PENETRATION) * FLOW_RATE,
depart_lane="free",
depart_speed=10,
)
inflow.add(
veh_type="rl",
edge="inflow_highway",
vehs_per_hour=RL_PENETRATION * FLOW_RATE,
depart_lane="free",
depart_speed=10,
)
inflow.add(
veh_type="human",
edge="inflow_merge",
vehs_per_hour=100,
depart_lane="free",
depart_speed=7.5,
)
flow_params = dict(
# name of the experiment
exp_tag="merge_0",
# name of the flow environment the experiment is running on
env_name=MergePOEnv,
# name of the network class the experiment is running on
network=MergeNetwork,
# simulator that is used by the experiment
simulator="traci",
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
restart_instance=True, sim_step=0.5, render=render, save_render=True
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
sims_per_step=2,
warmup_steps=0,
additional_params={
"max_accel": 1.5,
"max_decel": 1.5,
"target_velocity": 20,
"num_rl": NUM_RL,
},
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
inflows=inflow,
additional_params=additional_net_params,
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(),
)
return flow_params
| 3,824 | 30.352459 | 81 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/flow/traffic_light_grid.py | """Traffic Light Grid example."""
from flow.controllers import GridRouter, SimCarFollowingController
from flow.core.params import (
EnvParams,
InFlows,
InitialConfig,
NetParams,
SumoCarFollowingParams,
SumoParams,
VehicleParams,
)
from flow.envs import TrafficLightGridBenchmarkEnv
from flow.networks import TrafficLightGridNetwork
def gen_env(render="drgb"):
# time horizon of a single rollout
HORIZON = 400
# inflow rate of vehicles at every edge
EDGE_INFLOW = 300
# enter speed for departing vehicles
V_ENTER = 30
# number of row of bidirectional lanes
N_ROWS = 3
# number of columns of bidirectional lanes
N_COLUMNS = 3
# length of inner edges in the grid network
INNER_LENGTH = 300
# length of final edge in route
LONG_LENGTH = 100
# length of edges that vehicles start on
SHORT_LENGTH = 300
# number of vehicles originating in the left, right, top, and bottom edges
N_LEFT, N_RIGHT, N_TOP, N_BOTTOM = 1, 1, 1, 1
# we place a sufficient number of vehicles to ensure they confirm with the
# total number specified above. We also use a "right_of_way" speed mode to
# support traffic light compliance
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {}),
car_following_params=SumoCarFollowingParams(
min_gap=2.5,
max_speed=V_ENTER,
decel=7.5, # avoid collisions at emergency stops
speed_mode="right_of_way",
),
routing_controller=(GridRouter, {}),
num_vehicles=(N_LEFT + N_RIGHT) * N_COLUMNS + (N_BOTTOM + N_TOP) * N_ROWS,
)
# inflows of vehicles are place on all outer edges (listed here)
outer_edges = []
outer_edges += ["left{}_{}".format(N_ROWS, i) for i in range(N_COLUMNS)]
outer_edges += ["right0_{}".format(i) for i in range(N_ROWS)]
outer_edges += ["bot{}_0".format(i) for i in range(N_ROWS)]
outer_edges += ["top{}_{}".format(i, N_COLUMNS) for i in range(N_ROWS)]
# equal inflows for each edge (as dictate by the EDGE_INFLOW constant)
inflow = InFlows()
for edge in outer_edges:
inflow.add(
veh_type="human",
edge=edge,
vehs_per_hour=EDGE_INFLOW,
depart_lane="free",
depart_speed=V_ENTER,
)
flow_params = dict(
# name of the experiment
exp_tag="grid_0",
# name of the flow environment the experiment is running on
env_name=TrafficLightGridBenchmarkEnv,
# name of the network class the experiment is running on
network=TrafficLightGridNetwork,
# simulator that is used by the experiment
simulator="traci",
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
restart_instance=True,
sim_step=1,
render=render,
save_render=True,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
additional_params={
"target_velocity": 50,
"switch_time": 3,
"num_observed": 2,
"discrete": False,
"tl_type": "actuated",
},
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
inflows=inflow,
additional_params={
"speed_limit": V_ENTER + 5,
"grid_array": {
"short_length": SHORT_LENGTH,
"inner_length": INNER_LENGTH,
"long_length": LONG_LENGTH,
"row_num": N_ROWS,
"col_num": N_COLUMNS,
"cars_left": N_LEFT,
"cars_right": N_RIGHT,
"cars_top": N_TOP,
"cars_bot": N_BOTTOM,
},
"horizontal_lanes": 1,
"vertical_lanes": 1,
},
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(
spacing="custom",
shuffle=True,
),
)
return flow_params
| 4,593 | 34.338462 | 82 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/__init__.py | from gym.envs.registration import register
register(
id="minigrid-fourrooms-v0",
entry_point="d4rl_alt.gym_minigrid.envs.fourrooms:FourRoomsEnv",
max_episode_steps=50,
kwargs={
"ref_min_score": 0.01442,
"ref_max_score": 2.89685,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/minigrid/minigrid4rooms.hdf5",
},
)
register(
id="minigrid-fourrooms-random-v0",
entry_point="d4rl_alt.gym_minigrid.envs.fourrooms:FourRoomsEnv",
max_episode_steps=50,
kwargs={
"ref_min_score": 0.01442,
"ref_max_score": 2.89685,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/minigrid/minigrid4rooms_random.hdf5",
},
)
| 723 | 29.166667 | 111 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/fourroom_controller.py | import random
import numpy as np
from d4rl_alt.pointmaze import q_iteration
from d4rl_alt.pointmaze.gridcraft import grid_env, grid_spec
MAZE = (
"###################\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOOOOOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "####O#########O####\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "#OOOOOOOOOOOOOOOOO#\\"
+ "#OOOOOOOO#OOOOOOOO#\\"
+ "###################\\"
)
# NLUDR -> RDLU
TRANSLATE_DIRECTION = {
0: None,
1: 3, # 3,
2: 1, # 1,
3: 2, # 2,
4: 0, # 0,
}
RIGHT = 1
LEFT = 0
FORWARD = 2
class FourRoomController(object):
def __init__(self):
self.env = grid_env.GridEnv(grid_spec.spec_from_string(MAZE))
self.reset_locations = list(zip(*np.where(self.env.gs.spec == grid_spec.EMPTY)))
def sample_target(self):
return random.choice(self.reset_locations)
def set_target(self, target):
self.target = target
self.env.gs[target] = grid_spec.REWARD
self.q_values = q_iteration.q_iteration(
env=self.env, num_itrs=32, discount=0.99
)
self.env.gs[target] = grid_spec.EMPTY
def get_action(self, pos, orientation):
if tuple(pos) == tuple(self.target):
done = True
else:
done = False
env_pos_idx = self.env.gs.xy_to_idx(pos)
qvalues = self.q_values[env_pos_idx]
direction = TRANSLATE_DIRECTION[np.argmax(qvalues)]
# tgt_pos, _ = self.env.step_stateless(env_pos_idx, np.argmax(qvalues))
# tgt_pos = self.env.gs.idx_to_xy(tgt_pos)
# print('\tcmd_dir:', direction, np.argmax(qvalues), qvalues, tgt_pos)
# infos = {}
# infos['tgt_pos'] = tgt_pos
if orientation == direction or direction == None:
return FORWARD, done
else:
return get_turn(orientation, direction), done
# RDLU
TURN_DIRS = [
[None, RIGHT, RIGHT, LEFT], # R
[LEFT, None, RIGHT, RIGHT], # D
[RIGHT, LEFT, None, RIGHT], # L
[RIGHT, RIGHT, LEFT, None], # U
]
def get_turn(ori, tgt_ori):
return TURN_DIRS[ori][tgt_ori]
| 2,417 | 25.571429 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/minigrid.py | import math
from enum import IntEnum
import gym
import numpy as np
from gym import error, spaces, utils
from gym.utils import seeding
from d4rl_alt import offline_env
from d4rl_alt.gym_minigrid.rendering import *
# Size in pixels of a tile in the full-scale human view
TILE_PIXELS = 32
# Map of color names to RGB values
COLORS = {
"red": np.array([255, 0, 0]),
"green": np.array([0, 255, 0]),
"blue": np.array([0, 0, 255]),
"purple": np.array([112, 39, 195]),
"yellow": np.array([255, 255, 0]),
"grey": np.array([100, 100, 100]),
}
COLOR_NAMES = sorted(list(COLORS.keys()))
# Used to map colors to integers
COLOR_TO_IDX = {"red": 0, "green": 1, "blue": 2, "purple": 3, "yellow": 4, "grey": 5}
IDX_TO_COLOR = dict(zip(COLOR_TO_IDX.values(), COLOR_TO_IDX.keys()))
# Map of object type to integers
OBJECT_TO_IDX = {
"unseen": 0,
"empty": 1,
"wall": 2,
"floor": 3,
"door": 4,
"key": 5,
"ball": 6,
"box": 7,
"goal": 8,
"lava": 9,
"agent": 10,
}
IDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))
# Map of state names to integers
STATE_TO_IDX = {
"open": 0,
"closed": 1,
"locked": 2,
}
# Map of agent direction indices to vectors
DIR_TO_VEC = [
# Pointing right (positive X)
np.array((1, 0)),
# Down (positive Y)
np.array((0, 1)),
# Pointing left (negative X)
np.array((-1, 0)),
# Up (negative Y)
np.array((0, -1)),
]
class WorldObj:
"""
Base class for grid world objects
"""
def __init__(self, type, color):
assert type in OBJECT_TO_IDX, type
assert color in COLOR_TO_IDX, color
self.type = type
self.color = color
self.contains = None
# Initial position of the object
self.init_pos = None
# Current position of the object
self.cur_pos = None
def can_overlap(self):
"""Can the agent overlap with this?"""
return False
def can_pickup(self):
"""Can the agent pick this up?"""
return False
def can_contain(self):
"""Can this contain another object?"""
return False
def see_behind(self):
"""Can the agent see behind this object?"""
return True
def toggle(self, env, pos):
"""Method to trigger/toggle an action this object performs"""
return False
def encode(self):
"""Encode the a description of this object as a 3-tuple of integers"""
return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], 0)
@staticmethod
def decode(type_idx, color_idx, state):
"""Create an object from a 3-tuple state description"""
obj_type = IDX_TO_OBJECT[type_idx]
color = IDX_TO_COLOR[color_idx]
if obj_type == "empty" or obj_type == "unseen":
return None
# State, 0: open, 1: closed, 2: locked
is_open = state == 0
is_locked = state == 2
if obj_type == "wall":
v = Wall(color)
elif obj_type == "floor":
v = Floor(color)
elif obj_type == "ball":
v = Ball(color)
elif obj_type == "key":
v = Key(color)
elif obj_type == "box":
v = Box(color)
elif obj_type == "door":
v = Door(color, is_open, is_locked)
elif obj_type == "goal":
v = Goal()
elif obj_type == "lava":
v = Lava()
else:
assert False, "unknown object type in decode '%s'" % objType
return v
def render(self, r):
"""Draw this object with the given renderer"""
raise NotImplementedError
class Goal(WorldObj):
def __init__(self):
super().__init__("goal", "green")
def can_overlap(self):
return True
def render(self, img):
fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])
class Floor(WorldObj):
"""
Colored floor tile the agent can walk over
"""
def __init__(self, color="blue"):
super().__init__("floor", color)
def can_overlap(self):
return True
def render(self, r):
# Give the floor a pale color
c = COLORS[self.color]
r.setLineColor(100, 100, 100, 0)
r.setColor(*c / 2)
r.drawPolygon(
[(1, TILE_PIXELS), (TILE_PIXELS, TILE_PIXELS), (TILE_PIXELS, 1), (1, 1)]
)
class Lava(WorldObj):
def __init__(self):
super().__init__("lava", "red")
def can_overlap(self):
return True
def render(self, img):
c = (255, 128, 0)
# Background color
fill_coords(img, point_in_rect(0, 1, 0, 1), c)
# Little waves
for i in range(3):
ylo = 0.3 + 0.2 * i
yhi = 0.4 + 0.2 * i
fill_coords(img, point_in_line(0.1, ylo, 0.3, yhi, r=0.03), (0, 0, 0))
fill_coords(img, point_in_line(0.3, yhi, 0.5, ylo, r=0.03), (0, 0, 0))
fill_coords(img, point_in_line(0.5, ylo, 0.7, yhi, r=0.03), (0, 0, 0))
fill_coords(img, point_in_line(0.7, yhi, 0.9, ylo, r=0.03), (0, 0, 0))
class Wall(WorldObj):
def __init__(self, color="grey"):
super().__init__("wall", color)
def see_behind(self):
return False
def render(self, img):
fill_coords(img, point_in_rect(0, 1, 0, 1), COLORS[self.color])
class Door(WorldObj):
def __init__(self, color, is_open=False, is_locked=False):
super().__init__("door", color)
self.is_open = is_open
self.is_locked = is_locked
def can_overlap(self):
"""The agent can only walk over this cell when the door is open"""
return self.is_open
def see_behind(self):
return self.is_open
def toggle(self, env, pos):
# If the player has the right key to open the door
if self.is_locked:
if isinstance(env.carrying, Key) and env.carrying.color == self.color:
self.is_locked = False
self.is_open = True
return True
return False
self.is_open = not self.is_open
return True
def encode(self):
"""Encode the a description of this object as a 3-tuple of integers"""
# State, 0: open, 1: closed, 2: locked
if self.is_open:
state = 0
elif self.is_locked:
state = 2
elif not self.is_open:
state = 1
return (OBJECT_TO_IDX[self.type], COLOR_TO_IDX[self.color], state)
def render(self, img):
c = COLORS[self.color]
if self.is_open:
fill_coords(img, point_in_rect(0.88, 1.00, 0.00, 1.00), c)
fill_coords(img, point_in_rect(0.92, 0.96, 0.04, 0.96), (0, 0, 0))
return
# Door frame and door
if self.is_locked:
fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)
fill_coords(img, point_in_rect(0.06, 0.94, 0.06, 0.94), 0.45 * np.array(c))
# Draw key slot
fill_coords(img, point_in_rect(0.52, 0.75, 0.50, 0.56), c)
else:
fill_coords(img, point_in_rect(0.00, 1.00, 0.00, 1.00), c)
fill_coords(img, point_in_rect(0.04, 0.96, 0.04, 0.96), (0, 0, 0))
fill_coords(img, point_in_rect(0.08, 0.92, 0.08, 0.92), c)
fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), (0, 0, 0))
# Draw door handle
fill_coords(img, point_in_circle(cx=0.75, cy=0.50, r=0.08), c)
class Key(WorldObj):
def __init__(self, color="blue"):
super(Key, self).__init__("key", color)
def can_pickup(self):
return True
def render(self, img):
c = COLORS[self.color]
# Vertical quad
fill_coords(img, point_in_rect(0.50, 0.63, 0.31, 0.88), c)
# Teeth
fill_coords(img, point_in_rect(0.38, 0.50, 0.59, 0.66), c)
fill_coords(img, point_in_rect(0.38, 0.50, 0.81, 0.88), c)
# Ring
fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.190), c)
fill_coords(img, point_in_circle(cx=0.56, cy=0.28, r=0.064), (0, 0, 0))
class Ball(WorldObj):
def __init__(self, color="blue"):
super(Ball, self).__init__("ball", color)
def can_pickup(self):
return True
def render(self, img):
fill_coords(img, point_in_circle(0.5, 0.5, 0.31), COLORS[self.color])
class Box(WorldObj):
def __init__(self, color, contains=None):
super(Box, self).__init__("box", color)
self.contains = contains
def can_pickup(self):
return True
def render(self, img):
c = COLORS[self.color]
# Outline
fill_coords(img, point_in_rect(0.12, 0.88, 0.12, 0.88), c)
fill_coords(img, point_in_rect(0.18, 0.82, 0.18, 0.82), (0, 0, 0))
# Horizontal slit
fill_coords(img, point_in_rect(0.16, 0.84, 0.47, 0.53), c)
def toggle(self, env, pos):
# Replace the box by its contents
env.grid.set(*pos, self.contains)
return True
class Grid:
"""
Represent a grid and operations on it
"""
# Static cache of pre-renderer tiles
tile_cache = {}
def __init__(self, width, height):
assert width >= 3
assert height >= 3
self.width = width
self.height = height
self.grid = [None] * width * height
def __contains__(self, key):
if isinstance(key, WorldObj):
for e in self.grid:
if e is key:
return True
elif isinstance(key, tuple):
for e in self.grid:
if e is None:
continue
if (e.color, e.type) == key:
return True
if key[0] is None and key[1] == e.type:
return True
return False
def __eq__(self, other):
grid1 = self.encode()
grid2 = other.encode()
return np.array_equal(grid2, grid1)
def __ne__(self, other):
return not self == other
def copy(self):
from copy import deepcopy
return deepcopy(self)
def set(self, i, j, v):
assert i >= 0 and i < self.width
assert j >= 0 and j < self.height
self.grid[j * self.width + i] = v
def get(self, i, j):
assert i >= 0 and i < self.width
assert j >= 0 and j < self.height
return self.grid[j * self.width + i]
def horz_wall(self, x, y, length=None, obj_type=Wall):
if length is None:
length = self.width - x
for i in range(0, length):
self.set(x + i, y, obj_type())
def vert_wall(self, x, y, length=None, obj_type=Wall):
if length is None:
length = self.height - y
for j in range(0, length):
self.set(x, y + j, obj_type())
def wall_rect(self, x, y, w, h):
self.horz_wall(x, y, w)
self.horz_wall(x, y + h - 1, w)
self.vert_wall(x, y, h)
self.vert_wall(x + w - 1, y, h)
def rotate_left(self):
"""
Rotate the grid to the left (counter-clockwise)
"""
grid = Grid(self.height, self.width)
for i in range(self.width):
for j in range(self.height):
v = self.get(i, j)
grid.set(j, grid.height - 1 - i, v)
return grid
def slice(self, topX, topY, width, height):
"""
Get a subset of the grid
"""
grid = Grid(width, height)
for j in range(0, height):
for i in range(0, width):
x = topX + i
y = topY + j
if x >= 0 and x < self.width and y >= 0 and y < self.height:
v = self.get(x, y)
else:
v = Wall()
grid.set(i, j, v)
return grid
@classmethod
def render_tile(
cls, obj, agent_dir=None, highlight=False, tile_size=TILE_PIXELS, subdivs=3
):
"""
Render a tile and cache the result
"""
# Hash map lookup key for the cache
key = (agent_dir, highlight, tile_size)
key = obj.encode() + key if obj else key
if key in cls.tile_cache:
return cls.tile_cache[key]
img = np.zeros(
shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8
)
# Draw the grid lines (top and left edges)
fill_coords(img, point_in_rect(0, 0.031, 0, 1), (100, 100, 100))
fill_coords(img, point_in_rect(0, 1, 0, 0.031), (100, 100, 100))
if obj != None:
obj.render(img)
# Overlay the agent on top
if agent_dir is not None:
tri_fn = point_in_triangle(
(0.12, 0.19),
(0.87, 0.50),
(0.12, 0.81),
)
# Rotate the agent based on its direction
tri_fn = rotate_fn(tri_fn, cx=0.5, cy=0.5, theta=0.5 * math.pi * agent_dir)
fill_coords(img, tri_fn, (255, 0, 0))
# Highlight the cell if needed
if highlight:
highlight_img(img)
# Downsample the image to perform supersampling/anti-aliasing
img = downsample(img, subdivs)
# Cache the rendered tile
cls.tile_cache[key] = img
return img
def render(self, tile_size, agent_pos=None, agent_dir=None, highlight_mask=None):
"""
Render this grid at a given scale
:param r: target renderer object
:param tile_size: tile size in pixels
"""
if highlight_mask is None:
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# Compute the total grid size
width_px = self.width * tile_size
height_px = self.height * tile_size
img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)
# Render the grid
for j in range(0, self.height):
for i in range(0, self.width):
cell = self.get(i, j)
agent_here = np.array_equal(agent_pos, (i, j))
tile_img = Grid.render_tile(
cell,
agent_dir=agent_dir if agent_here else None,
highlight=highlight_mask[i, j],
tile_size=tile_size,
)
ymin = j * tile_size
ymax = (j + 1) * tile_size
xmin = i * tile_size
xmax = (i + 1) * tile_size
img[ymin:ymax, xmin:xmax, :] = tile_img
return img
def encode(self, vis_mask=None):
"""
Produce a compact numpy encoding of the grid
"""
if vis_mask is None:
vis_mask = np.ones((self.width, self.height), dtype=bool)
array = np.zeros((self.width, self.height, 3), dtype="uint8")
for i in range(self.width):
for j in range(self.height):
if vis_mask[i, j]:
v = self.get(i, j)
if v is None:
array[i, j, 0] = OBJECT_TO_IDX["empty"]
array[i, j, 1] = 0
array[i, j, 2] = 0
else:
array[i, j, :] = v.encode()
return array
@staticmethod
def decode(array):
"""
Decode an array grid encoding back into a grid
"""
width, height, channels = array.shape
assert channels == 3
vis_mask = np.ones(shape=(width, height), dtype=np.bool)
grid = Grid(width, height)
for i in range(width):
for j in range(height):
type_idx, color_idx, state = array[i, j]
v = WorldObj.decode(type_idx, color_idx, state)
grid.set(i, j, v)
vis_mask[i, j] = type_idx != OBJECT_TO_IDX["unseen"]
return grid, vis_mask
def process_vis(grid, agent_pos):
mask = np.zeros(shape=(grid.width, grid.height), dtype=np.bool)
mask[agent_pos[0], agent_pos[1]] = True
for j in reversed(range(0, grid.height)):
for i in range(0, grid.width - 1):
if not mask[i, j]:
continue
cell = grid.get(i, j)
if cell and not cell.see_behind():
continue
mask[i + 1, j] = True
if j > 0:
mask[i + 1, j - 1] = True
mask[i, j - 1] = True
for i in reversed(range(1, grid.width)):
if not mask[i, j]:
continue
cell = grid.get(i, j)
if cell and not cell.see_behind():
continue
mask[i - 1, j] = True
if j > 0:
mask[i - 1, j - 1] = True
mask[i, j - 1] = True
for j in range(0, grid.height):
for i in range(0, grid.width):
if not mask[i, j]:
grid.set(i, j, None)
return mask
class MiniGridEnv(offline_env.OfflineEnv):
"""
2D grid world game environment
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 10}
# Enumeration of possible actions
class Actions(IntEnum):
# Turn left, turn right, move forward
left = 0
right = 1
forward = 2
# Pick up an object
pickup = 3
# Drop an object
drop = 4
# Toggle/activate an object
toggle = 5
# Done completing task
done = 6
def __init__(
self,
grid_size=None,
width=None,
height=None,
max_steps=100,
see_through_walls=False,
seed=1337,
agent_view_size=7,
**kwargs
):
offline_env.OfflineEnv.__init__(self, **kwargs)
# Can't set both grid_size and width/height
if grid_size:
assert width == None and height == None
width = grid_size
height = grid_size
# Action enumeration for this environment
self.actions = MiniGridEnv.Actions
# Actions are discrete integer values
self.action_space = spaces.Discrete(len(self.actions))
# Number of cells (width and height) in the agent view
self.agent_view_size = agent_view_size
# Observations are dictionaries containing an
# encoding of the grid and a textual 'mission' string
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(self.agent_view_size, self.agent_view_size, 3),
dtype="uint8",
)
self.observation_space = spaces.Dict({"image": self.observation_space})
# Range of possible rewards
self.reward_range = (0, 1)
# Window to use for human rendering mode
self.window = None
# Environment configuration
self.width = width
self.height = height
self.max_steps = max_steps
self.see_through_walls = see_through_walls
# Current position and direction of the agent
self.agent_pos = None
self.agent_dir = None
# Initialize the RNG
self.seed(seed=seed)
# Initialize the state
self.reset()
def reset(self):
# Current position and direction of the agent
self.agent_pos = None
self.agent_dir = None
# Generate a new random grid at the start of each episode
# To keep the same grid for each episode, call env.seed() with
# the same seed before calling env.reset()
self._gen_grid(self.width, self.height)
# These fields should be defined by _gen_grid
assert self.agent_pos is not None
assert self.agent_dir is not None
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos)
assert start_cell is None or start_cell.can_overlap()
# Item picked up, being carried, initially nothing
self.carrying = None
# Step count since episode start
self.step_count = 0
# Return first observation
obs = self.gen_obs()
return obs
def seed(self, seed=1337):
# Seed the random number generator
self.np_random, _ = seeding.np_random(seed)
return [seed]
@property
def steps_remaining(self):
return self.max_steps - self.step_count
def __str__(self):
"""
Produce a pretty string of the environment's grid along with the agent.
A grid cell is represented by 2-character string, the first one for
the object and the second one for the color.
"""
# Map of object types to short string
OBJECT_TO_STR = {
"wall": "W",
"floor": "F",
"door": "D",
"key": "K",
"ball": "A",
"box": "B",
"goal": "G",
"lava": "V",
}
# Short string for opened door
OPENDED_DOOR_IDS = "_"
# Map agent's direction to short string
AGENT_DIR_TO_STR = {0: ">", 1: "V", 2: "<", 3: "^"}
str = ""
for j in range(self.grid.height):
for i in range(self.grid.width):
if i == self.agent_pos[0] and j == self.agent_pos[1]:
str += 2 * AGENT_DIR_TO_STR[self.agent_dir]
continue
c = self.grid.get(i, j)
if c == None:
str += " "
continue
if c.type == "door":
if c.is_open:
str += "__"
elif c.is_locked:
str += "L" + c.color[0].upper()
else:
str += "D" + c.color[0].upper()
continue
str += OBJECT_TO_STR[c.type] + c.color[0].upper()
if j < self.grid.height - 1:
str += "\n"
return str
def _gen_grid(self, width, height):
assert False, "_gen_grid needs to be implemented by each environment"
def _reward(self):
"""
Compute the reward to be given upon success
"""
return 1 - 0.9 * (self.step_count / self.max_steps)
def _rand_int(self, low, high):
"""
Generate random integer in [low,high[
"""
return self.np_random.randint(low, high)
def _rand_float(self, low, high):
"""
Generate random float in [low,high[
"""
return self.np_random.uniform(low, high)
def _rand_bool(self):
"""
Generate random boolean value
"""
return self.np_random.randint(0, 2) == 0
def _rand_elem(self, iterable):
"""
Pick a random element in a list
"""
lst = list(iterable)
idx = self._rand_int(0, len(lst))
return lst[idx]
def _rand_subset(self, iterable, num_elems):
"""
Sample a random subset of distinct elements of a list
"""
lst = list(iterable)
assert num_elems <= len(lst)
out = []
while len(out) < num_elems:
elem = self._rand_elem(lst)
lst.remove(elem)
out.append(elem)
return out
def _rand_color(self):
"""
Generate a random color name (string)
"""
return self._rand_elem(COLOR_NAMES)
def _rand_pos(self, xLow, xHigh, yLow, yHigh):
"""
Generate a random (x,y) position tuple
"""
return (
self.np_random.randint(xLow, xHigh),
self.np_random.randint(yLow, yHigh),
)
def place_obj(self, obj, top=None, size=None, reject_fn=None, max_tries=math.inf):
"""
Place an object at an empty position in the grid
:param top: top-left position of the rectangle where to place
:param size: size of the rectangle where to place
:param reject_fn: function to filter out potential positions
"""
if top is None:
top = (0, 0)
else:
top = (max(top[0], 0), max(top[1], 0))
if size is None:
size = (self.grid.width, self.grid.height)
num_tries = 0
while True:
# This is to handle with rare cases where rejection sampling
# gets stuck in an infinite loop
if num_tries > max_tries:
raise RecursionError("rejection sampling failed in place_obj")
num_tries += 1
pos = np.array(
(
self._rand_int(top[0], min(top[0] + size[0], self.grid.width)),
self._rand_int(top[1], min(top[1] + size[1], self.grid.height)),
)
)
# Don't place the object on top of another object
if self.grid.get(*pos) != None:
continue
# Don't place the object where the agent is
if np.array_equal(pos, self.agent_pos):
continue
# Check if there is a filtering criterion
if reject_fn and reject_fn(self, pos):
continue
break
self.grid.set(*pos, obj)
if obj is not None:
obj.init_pos = pos
obj.cur_pos = pos
return pos
def put_obj(self, obj, i, j):
"""
Put an object at a specific position in the grid
"""
self.grid.set(i, j, obj)
obj.init_pos = (i, j)
obj.cur_pos = (i, j)
def place_agent(self, top=None, size=None, rand_dir=True, max_tries=math.inf):
"""
Set the agent's starting point at an empty position in the grid
"""
self.agent_pos = None
pos = self.place_obj(None, top, size, max_tries=max_tries)
self.agent_pos = pos
if rand_dir:
self.agent_dir = self._rand_int(0, 4)
return pos
@property
def dir_vec(self):
"""
Get the direction vector for the agent, pointing in the direction
of forward movement.
"""
assert self.agent_dir >= 0 and self.agent_dir < 4
return DIR_TO_VEC[self.agent_dir]
@property
def right_vec(self):
"""
Get the vector pointing to the right of the agent.
"""
dx, dy = self.dir_vec
return np.array((-dy, dx))
@property
def front_pos(self):
"""
Get the position of the cell that is right in front of the agent
"""
return self.agent_pos + self.dir_vec
def get_view_coords(self, i, j):
"""
Translate and rotate absolute grid coordinates (i, j) into the
agent's partially observable view (sub-grid). Note that the resulting
coordinates may be negative or outside of the agent's view size.
"""
ax, ay = self.agent_pos
dx, dy = self.dir_vec
rx, ry = self.right_vec
# Compute the absolute coordinates of the top-left view corner
sz = self.agent_view_size
hs = self.agent_view_size // 2
tx = ax + (dx * (sz - 1)) - (rx * hs)
ty = ay + (dy * (sz - 1)) - (ry * hs)
lx = i - tx
ly = j - ty
# Project the coordinates of the object relative to the top-left
# corner onto the agent's own coordinate system
vx = rx * lx + ry * ly
vy = -(dx * lx + dy * ly)
return vx, vy
def get_view_exts(self):
"""
Get the extents of the square set of tiles visible to the agent
Note: the bottom extent indices are not included in the set
"""
# Facing right
if self.agent_dir == 0:
topX = self.agent_pos[0]
topY = self.agent_pos[1] - self.agent_view_size // 2
# Facing down
elif self.agent_dir == 1:
topX = self.agent_pos[0] - self.agent_view_size // 2
topY = self.agent_pos[1]
# Facing left
elif self.agent_dir == 2:
topX = self.agent_pos[0] - self.agent_view_size + 1
topY = self.agent_pos[1] - self.agent_view_size // 2
# Facing up
elif self.agent_dir == 3:
topX = self.agent_pos[0] - self.agent_view_size // 2
topY = self.agent_pos[1] - self.agent_view_size + 1
else:
assert False, "invalid agent direction"
botX = topX + self.agent_view_size
botY = topY + self.agent_view_size
return (topX, topY, botX, botY)
def relative_coords(self, x, y):
"""
Check if a grid position belongs to the agent's field of view, and returns the corresponding coordinates
"""
vx, vy = self.get_view_coords(x, y)
if vx < 0 or vy < 0 or vx >= self.agent_view_size or vy >= self.agent_view_size:
return None
return vx, vy
def in_view(self, x, y):
"""
check if a grid position is visible to the agent
"""
return self.relative_coords(x, y) is not None
def agent_sees(self, x, y):
"""
Check if a non-empty grid position is visible to the agent
"""
coordinates = self.relative_coords(x, y)
if coordinates is None:
return False
vx, vy = coordinates
obs = self.gen_obs()
obs_grid, _ = Grid.decode(obs["image"])
obs_cell = obs_grid.get(vx, vy)
world_cell = self.grid.get(x, y)
return obs_cell is not None and obs_cell.type == world_cell.type
def step(self, action):
self.step_count += 1
reward = 0
done = False
# Get the position in front of the agent
fwd_pos = self.front_pos
# Get the contents of the cell in front of the agent
fwd_cell = self.grid.get(*fwd_pos)
# Rotate left
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
# Rotate right
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
# Move forward
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
if fwd_cell != None and fwd_cell.type == "goal":
done = True
reward = self._reward()
if fwd_cell != None and fwd_cell.type == "lava":
done = True
# Pick up an object
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
# Done action (not used by default)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
if self.step_count >= self.max_steps:
done = True
obs = self.gen_obs()
return obs, reward, done, {}
def gen_obs_grid(self):
"""
Generate the sub-grid observed by the agent.
This method also outputs a visibility mask telling us which grid
cells the agent can actually see.
"""
topX, topY, botX, botY = self.get_view_exts()
grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)
for i in range(self.agent_dir + 1):
grid = grid.rotate_left()
# Process occluders and visibility
# Note that this incurs some performance cost
if not self.see_through_walls:
vis_mask = grid.process_vis(
agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1)
)
else:
vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool)
# Make it so the agent sees what it's carrying
# We do this by placing the carried object at the agent's position
# in the agent's partially observable view
agent_pos = grid.width // 2, grid.height - 1
if self.carrying:
grid.set(*agent_pos, self.carrying)
else:
grid.set(*agent_pos, None)
return grid, vis_mask
def gen_obs(self):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, vis_mask = self.gen_obs_grid()
# Encode the partially observable view into a numpy array
image = grid.encode(vis_mask)
assert hasattr(
self, "mission"
), "environments must define a textual mission string"
# Observations are dictionaries containing:
# - an image (partially observable view of the environment)
# - the agent's direction/orientation (acting as a compass)
# - a textual mission string (instructions for the agent)
obs = {"image": image, "direction": self.agent_dir, "mission": self.mission}
return obs
def get_obs_render(self, obs, tile_size=TILE_PIXELS // 2):
"""
Render an agent observation for visualization
"""
grid, vis_mask = Grid.decode(obs)
# Render the whole grid
img = grid.render(
tile_size,
agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1),
agent_dir=3,
highlight_mask=vis_mask,
)
return img
def render(self, mode="human", close=False, highlight=True, tile_size=TILE_PIXELS):
"""
Render the whole-grid human view
"""
if close:
if self.window:
self.window.close()
return
if mode == "human" and not self.window:
import d4rl_alt.gym_minigrid.window
self.window = d4rl_alt.gym_minigrid.window.Window("gym_minigrid")
self.window.show(block=False)
# Compute which cells are visible to the agent
_, vis_mask = self.gen_obs_grid()
# Compute the world coordinates of the bottom-left corner
# of the agent's view area
f_vec = self.dir_vec
r_vec = self.right_vec
top_left = (
self.agent_pos
+ f_vec * (self.agent_view_size - 1)
- r_vec * (self.agent_view_size // 2)
)
# Mask of which cells to highlight
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# For each cell in the visibility mask
for vis_j in range(0, self.agent_view_size):
for vis_i in range(0, self.agent_view_size):
# If this cell is not visible, don't highlight it
if not vis_mask[vis_i, vis_j]:
continue
# Compute the world coordinates of this cell
abs_i, abs_j = top_left - (f_vec * vis_j) + (r_vec * vis_i)
if abs_i < 0 or abs_i >= self.width:
continue
if abs_j < 0 or abs_j >= self.height:
continue
# Mark this cell to be highlighted
highlight_mask[abs_i, abs_j] = True
# Render the whole grid
img = self.grid.render(
tile_size,
self.agent_pos,
self.agent_dir,
highlight_mask=highlight_mask if highlight else None,
)
if mode == "human":
self.window.show_img(img)
self.window.set_caption(self.mission)
return img
| 36,217 | 27.540583 | 112 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/register.py | from gym.envs.registration import register as gym_register
env_list = []
def register(id, entry_point, reward_threshold=0.95):
assert id.startswith("MiniGrid-")
assert id not in env_list
# Register the environment with OpenAI gym
gym_register(id=id, entry_point=entry_point, reward_threshold=reward_threshold)
# Add the environment to the set
env_list.append(id)
| 392 | 25.2 | 83 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/rendering.py | import math
import numpy as np
def downsample(img, factor):
"""
Downsample an image along both dimensions by some factor
"""
assert img.shape[0] % factor == 0
assert img.shape[1] % factor == 0
img = img.reshape(
[img.shape[0] // factor, factor, img.shape[1] // factor, factor, 3]
)
img = img.mean(axis=3)
img = img.mean(axis=1)
return img
def fill_coords(img, fn, color):
"""
Fill pixels of an image with coordinates matching a filter function
"""
for y in range(img.shape[0]):
for x in range(img.shape[1]):
yf = (y + 0.5) / img.shape[0]
xf = (x + 0.5) / img.shape[1]
if fn(xf, yf):
img[y, x] = color
return img
def rotate_fn(fin, cx, cy, theta):
def fout(x, y):
x = x - cx
y = y - cy
x2 = cx + x * math.cos(-theta) - y * math.sin(-theta)
y2 = cy + y * math.cos(-theta) + x * math.sin(-theta)
return fin(x2, y2)
return fout
def point_in_line(x0, y0, x1, y1, r):
p0 = np.array([x0, y0])
p1 = np.array([x1, y1])
dir = p1 - p0
dist = np.linalg.norm(dir)
dir = dir / dist
xmin = min(x0, x1) - r
xmax = max(x0, x1) + r
ymin = min(y0, y1) - r
ymax = max(y0, y1) + r
def fn(x, y):
# Fast, early escape test
if x < xmin or x > xmax or y < ymin or y > ymax:
return False
q = np.array([x, y])
pq = q - p0
# Closest point on line
a = np.dot(pq, dir)
a = np.clip(a, 0, dist)
p = p0 + a * dir
dist_to_line = np.linalg.norm(q - p)
return dist_to_line <= r
return fn
def point_in_circle(cx, cy, r):
def fn(x, y):
return (x - cx) * (x - cx) + (y - cy) * (y - cy) <= r * r
return fn
def point_in_rect(xmin, xmax, ymin, ymax):
def fn(x, y):
return x >= xmin and x <= xmax and y >= ymin and y <= ymax
return fn
def point_in_triangle(a, b, c):
a = np.array(a)
b = np.array(b)
c = np.array(c)
def fn(x, y):
v0 = c - a
v1 = b - a
v2 = np.array((x, y)) - a
# Compute dot products
dot00 = np.dot(v0, v0)
dot01 = np.dot(v0, v1)
dot02 = np.dot(v0, v2)
dot11 = np.dot(v1, v1)
dot12 = np.dot(v1, v2)
# Compute barycentric coordinates
inv_denom = 1 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * inv_denom
v = (dot00 * dot12 - dot01 * dot02) * inv_denom
# Check if point is in triangle
return (u >= 0) and (v >= 0) and (u + v) < 1
return fn
def highlight_img(img, color=(255, 255, 255), alpha=0.30):
"""
Add highlighting to an image
"""
blend_img = img + alpha * (np.array(color, dtype=np.uint8) - img)
blend_img = blend_img.clip(0, 255).astype(np.uint8)
img[:, :, :] = blend_img
| 2,923 | 21.151515 | 75 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/roomgrid.py | from d4rl_alt.gym_minigrid.minigrid import *
def reject_next_to(env, pos):
"""
Function to filter out object positions that are right next to
the agent's starting point
"""
sx, sy = env.agent_pos
x, y = pos
d = abs(sx - x) + abs(sy - y)
return d < 2
class Room:
def __init__(self, top, size):
# Top-left corner and size (tuples)
self.top = top
self.size = size
# List of door objects and door positions
# Order of the doors is right, down, left, up
self.doors = [None] * 4
self.door_pos = [None] * 4
# List of rooms adjacent to this one
# Order of the neighbors is right, down, left, up
self.neighbors = [None] * 4
# Indicates if this room is behind a locked door
self.locked = False
# List of objects contained
self.objs = []
def rand_pos(self, env):
topX, topY = self.top
sizeX, sizeY = self.size
return env._randPos(topX + 1, topX + sizeX - 1, topY + 1, topY + sizeY - 1)
def pos_inside(self, x, y):
"""
Check if a position is within the bounds of this room
"""
topX, topY = self.top
sizeX, sizeY = self.size
if x < topX or y < topY:
return False
if x >= topX + sizeX or y >= topY + sizeY:
return False
return True
class RoomGrid(MiniGridEnv):
"""
Environment with multiple rooms and random objects.
This is meant to serve as a base class for other environments.
"""
def __init__(self, room_size=7, num_rows=3, num_cols=3, max_steps=100, seed=0):
assert room_size > 0
assert room_size >= 3
assert num_rows > 0
assert num_cols > 0
self.room_size = room_size
self.num_rows = num_rows
self.num_cols = num_cols
height = (room_size - 1) * num_rows + 1
width = (room_size - 1) * num_cols + 1
# By default, this environment has no mission
self.mission = ""
super().__init__(
width=width,
height=height,
max_steps=max_steps,
see_through_walls=False,
seed=seed,
)
def room_from_pos(self, x, y):
"""Get the room a given position maps to"""
assert x >= 0
assert y >= 0
i = x // (self.room_size - 1)
j = y // (self.room_size - 1)
assert i < self.num_cols
assert j < self.num_rows
return self.room_grid[j][i]
def get_room(self, i, j):
assert i < self.num_cols
assert j < self.num_rows
return self.room_grid[j][i]
def _gen_grid(self, width, height):
# Create the grid
self.grid = Grid(width, height)
self.room_grid = []
# For each row of rooms
for j in range(0, self.num_rows):
row = []
# For each column of rooms
for i in range(0, self.num_cols):
room = Room(
(i * (self.room_size - 1), j * (self.room_size - 1)),
(self.room_size, self.room_size),
)
row.append(room)
# Generate the walls for this room
self.grid.wall_rect(*room.top, *room.size)
self.room_grid.append(row)
# For each row of rooms
for j in range(0, self.num_rows):
# For each column of rooms
for i in range(0, self.num_cols):
room = self.room_grid[j][i]
x_l, y_l = (room.top[0] + 1, room.top[1] + 1)
x_m, y_m = (
room.top[0] + room.size[0] - 1,
room.top[1] + room.size[1] - 1,
)
# Door positions, order is right, down, left, up
if i < self.num_cols - 1:
room.neighbors[0] = self.room_grid[j][i + 1]
room.door_pos[0] = (x_m, self._rand_int(y_l, y_m))
if j < self.num_rows - 1:
room.neighbors[1] = self.room_grid[j + 1][i]
room.door_pos[1] = (self._rand_int(x_l, x_m), y_m)
if i > 0:
room.neighbors[2] = self.room_grid[j][i - 1]
room.door_pos[2] = room.neighbors[2].door_pos[0]
if j > 0:
room.neighbors[3] = self.room_grid[j - 1][i]
room.door_pos[3] = room.neighbors[3].door_pos[1]
# The agent starts in the middle, facing right
self.agent_pos = (
(self.num_cols // 2) * (self.room_size - 1) + (self.room_size // 2),
(self.num_rows // 2) * (self.room_size - 1) + (self.room_size // 2),
)
self.agent_dir = 0
def place_in_room(self, i, j, obj):
"""
Add an existing object to room (i, j)
"""
room = self.get_room(i, j)
pos = self.place_obj(
obj, room.top, room.size, reject_fn=reject_next_to, max_tries=1000
)
room.objs.append(obj)
return obj, pos
def add_object(self, i, j, kind=None, color=None):
"""
Add a new object to room (i, j)
"""
if kind == None:
kind = self._rand_elem(["key", "ball", "box"])
if color == None:
color = self._rand_color()
# TODO: we probably want to add an Object.make helper function
assert kind in ["key", "ball", "box"]
if kind == "key":
obj = Key(color)
elif kind == "ball":
obj = Ball(color)
elif kind == "box":
obj = Box(color)
return self.place_in_room(i, j, obj)
def add_door(self, i, j, door_idx=None, color=None, locked=None):
"""
Add a door to a room, connecting it to a neighbor
"""
room = self.get_room(i, j)
if door_idx == None:
# Need to make sure that there is a neighbor along this wall
# and that there is not already a door
while True:
door_idx = self._rand_int(0, 4)
if room.neighbors[door_idx] and room.doors[door_idx] is None:
break
if color == None:
color = self._rand_color()
if locked is None:
locked = self._rand_bool()
assert room.doors[door_idx] is None, "door already exists"
room.locked = locked
door = Door(color, is_locked=locked)
pos = room.door_pos[door_idx]
self.grid.set(*pos, door)
door.cur_pos = pos
neighbor = room.neighbors[door_idx]
room.doors[door_idx] = door
neighbor.doors[(door_idx + 2) % 4] = door
return door, pos
def remove_wall(self, i, j, wall_idx):
"""
Remove a wall between two rooms
"""
room = self.get_room(i, j)
assert wall_idx >= 0 and wall_idx < 4
assert room.doors[wall_idx] is None, "door exists on this wall"
assert room.neighbors[wall_idx], "invalid wall"
neighbor = room.neighbors[wall_idx]
tx, ty = room.top
w, h = room.size
# Ordering of walls is right, down, left, up
if wall_idx == 0:
for i in range(1, h - 1):
self.grid.set(tx + w - 1, ty + i, None)
elif wall_idx == 1:
for i in range(1, w - 1):
self.grid.set(tx + i, ty + h - 1, None)
elif wall_idx == 2:
for i in range(1, h - 1):
self.grid.set(tx, ty + i, None)
elif wall_idx == 3:
for i in range(1, w - 1):
self.grid.set(tx + i, ty, None)
else:
assert False, "invalid wall index"
# Mark the rooms as connected
room.doors[wall_idx] = True
neighbor.doors[(wall_idx + 2) % 4] = True
def place_agent(self, i=None, j=None, rand_dir=True):
"""
Place the agent in a room
"""
if i == None:
i = self._rand_int(0, self.num_cols)
if j == None:
j = self._rand_int(0, self.num_rows)
room = self.room_grid[j][i]
# Find a position that is not right in front of an object
while True:
super().place_agent(room.top, room.size, rand_dir, max_tries=1000)
front_cell = self.grid.get(*self.front_pos)
if front_cell is None or front_cell.type is "wall":
break
return self.agent_pos
def connect_all(self, door_colors=COLOR_NAMES, max_itrs=5000):
"""
Make sure that all rooms are reachable by the agent from its
starting position
"""
start_room = self.room_from_pos(*self.agent_pos)
added_doors = []
def find_reach():
reach = set()
stack = [start_room]
while len(stack) > 0:
room = stack.pop()
if room in reach:
continue
reach.add(room)
for i in range(0, 4):
if room.doors[i]:
stack.append(room.neighbors[i])
return reach
num_itrs = 0
while True:
# This is to handle rare situations where random sampling produces
# a level that cannot be connected, producing in an infinite loop
if num_itrs > max_itrs:
raise RecursionError("connect_all failed")
num_itrs += 1
# If all rooms are reachable, stop
reach = find_reach()
if len(reach) == self.num_rows * self.num_cols:
break
# Pick a random room and door position
i = self._rand_int(0, self.num_cols)
j = self._rand_int(0, self.num_rows)
k = self._rand_int(0, 4)
room = self.get_room(i, j)
# If there is already a door there, skip
if not room.door_pos[k] or room.doors[k]:
continue
if room.locked or room.neighbors[k].locked:
continue
color = self._rand_elem(door_colors)
door, _ = self.add_door(i, j, k, color, False)
added_doors.append(door)
return added_doors
def add_distractors(self, i=None, j=None, num_distractors=10, all_unique=True):
"""
Add random objects that can potentially distract/confuse the agent.
"""
# Collect a list of existing objects
objs = []
for row in self.room_grid:
for room in row:
for obj in room.objs:
objs.append((obj.type, obj.color))
# List of distractors added
dists = []
while len(dists) < num_distractors:
color = self._rand_elem(COLOR_NAMES)
type = self._rand_elem(["key", "ball", "box"])
obj = (type, color)
if all_unique and obj in objs:
continue
# Add the object to a random room if no room specified
room_i = i
room_j = j
if room_i == None:
room_i = self._rand_int(0, self.num_cols)
if room_j == None:
room_j = self._rand_int(0, self.num_rows)
dist, pos = self.add_object(room_i, room_j, *obj)
objs.append(obj)
dists.append(dist)
return dists
| 11,471 | 28.720207 | 83 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/window.py | import sys
import numpy as np
# Only ask users to install matplotlib if they actually need it
try:
import matplotlib.pyplot as plt
except:
print("To display the environment in a window, please install matplotlib, eg:")
print("pip3 install --user matplotlib")
sys.exit(-1)
class Window:
"""
Window to draw a gridworld instance using Matplotlib
"""
def __init__(self, title):
self.fig = None
self.imshow_obj = None
# Create the figure and axes
self.fig, self.ax = plt.subplots()
# Show the env name in the window title
self.fig.canvas.set_window_title(title)
# Turn off x/y axis numbering/ticks
self.ax.set_xticks([], [])
self.ax.set_yticks([], [])
# Flag indicating the window was closed
self.closed = False
def close_handler(evt):
self.closed = True
self.fig.canvas.mpl_connect("close_event", close_handler)
def show_img(self, img):
"""
Show an image or update the image being shown
"""
# Show the first image of the environment
if self.imshow_obj is None:
self.imshow_obj = self.ax.imshow(img, interpolation="bilinear")
self.imshow_obj.set_data(img)
self.fig.canvas.draw()
# Let matplotlib process UI events
# This is needed for interactive mode to work properly
plt.pause(0.001)
def set_caption(self, text):
"""
Set/update the caption text below the image
"""
plt.xlabel(text)
def reg_key_handler(self, key_handler):
"""
Register a keyboard event handler
"""
# Keyboard handler
self.fig.canvas.mpl_connect("key_press_event", key_handler)
def show(self, block=True):
"""
Show the window, and start an event loop
"""
# If not blocking, trigger interactive mode
if not block:
plt.ion()
# Show the plot
# In non-interative mode, this enters the matplotlib event loop
# In interactive mode, this call does not block
plt.show()
def close(self):
"""
Close the window
"""
plt.close()
| 2,251 | 23.215054 | 83 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/wrappers.py | import math
import operator
from functools import reduce
import gym
import numpy as np
from gym import error, spaces, utils
from d4rl_alt.gym_minigrid.minigrid import COLOR_TO_IDX, OBJECT_TO_IDX, STATE_TO_IDX
class ReseedWrapper(gym.core.Wrapper):
"""
Wrapper to always regenerate an environment with the same set of seeds.
This can be used to force an environment to always keep the same
configuration when reset.
"""
def __init__(self, env, seeds=[0], seed_idx=0):
self.seeds = list(seeds)
self.seed_idx = seed_idx
super().__init__(env)
def reset(self, **kwargs):
seed = self.seeds[self.seed_idx]
self.seed_idx = (self.seed_idx + 1) % len(self.seeds)
self.env.seed(seed)
return self.env.reset(**kwargs)
def step(self, action):
obs, reward, done, info = self.env.step(action)
return obs, reward, done, info
class ActionBonus(gym.core.Wrapper):
"""
Wrapper which adds an exploration bonus.
This is a reward to encourage exploration of less
visited (state,action) pairs.
"""
def __init__(self, env):
super().__init__(env)
self.counts = {}
def step(self, action):
obs, reward, done, info = self.env.step(action)
env = self.unwrapped
tup = (tuple(env.agent_pos), env.agent_dir, action)
# Get the count for this (s,a) pair
pre_count = 0
if tup in self.counts:
pre_count = self.counts[tup]
# Update the count for this (s,a) pair
new_count = pre_count + 1
self.counts[tup] = new_count
bonus = 1 / math.sqrt(new_count)
reward += bonus
return obs, reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class StateBonus(gym.core.Wrapper):
"""
Adds an exploration bonus based on which positions
are visited on the grid.
"""
def __init__(self, env):
super().__init__(env)
self.counts = {}
def step(self, action):
obs, reward, done, info = self.env.step(action)
# Tuple based on which we index the counts
# We use the position after an update
env = self.unwrapped
tup = tuple(env.agent_pos)
# Get the count for this key
pre_count = 0
if tup in self.counts:
pre_count = self.counts[tup]
# Update the count for this key
new_count = pre_count + 1
self.counts[tup] = new_count
bonus = 1 / math.sqrt(new_count)
reward += bonus
return obs, reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ImgObsWrapper(gym.core.ObservationWrapper):
"""
Use the image as the only observation output, no language/mission.
"""
def __init__(self, env):
super().__init__(env)
self.observation_space = env.observation_space.spaces["image"]
def observation(self, obs):
return obs["image"]
class OneHotPartialObsWrapper(gym.core.ObservationWrapper):
"""
Wrapper to get a one-hot encoding of a partially observable
agent view as observation.
"""
def __init__(self, env, tile_size=8):
super().__init__(env)
self.tile_size = tile_size
obs_shape = env.observation_space["image"].shape
# Number of bits per cell
num_bits = len(OBJECT_TO_IDX) + len(COLOR_TO_IDX) + len(STATE_TO_IDX)
self.observation_space.spaces["image"] = spaces.Box(
low=0, high=255, shape=(obs_shape[0], obs_shape[1], num_bits), dtype="uint8"
)
def observation(self, obs):
img = obs["image"]
out = np.zeros(self.observation_space.shape, dtype="uint8")
for i in range(img.shape[0]):
for j in range(img.shape[1]):
type = img[i, j, 0]
color = img[i, j, 1]
state = img[i, j, 2]
out[i, j, type] = 1
out[i, j, len(OBJECT_TO_IDX) + color] = 1
out[i, j, len(OBJECT_TO_IDX) + len(COLOR_TO_IDX) + state] = 1
return {"mission": obs["mission"], "image": out}
class RGBImgObsWrapper(gym.core.ObservationWrapper):
"""
Wrapper to use fully observable RGB image as the only observation output,
no language/mission. This can be used to have the agent to solve the
gridworld in pixel space.
"""
def __init__(self, env, tile_size=8):
super().__init__(env)
self.tile_size = tile_size
self.observation_space.spaces["image"] = spaces.Box(
low=0,
high=255,
shape=(self.env.width * tile_size, self.env.height * tile_size, 3),
dtype="uint8",
)
def observation(self, obs):
env = self.unwrapped
rgb_img = env.render(
mode="rgb_array", highlight=False, tile_size=self.tile_size
)
return {"mission": obs["mission"], "image": rgb_img}
class RGBImgPartialObsWrapper(gym.core.ObservationWrapper):
"""
Wrapper to use partially observable RGB image as the only observation output
This can be used to have the agent to solve the gridworld in pixel space.
"""
def __init__(self, env, tile_size=8):
super().__init__(env)
self.tile_size = tile_size
obs_shape = env.observation_space["image"].shape
self.observation_space.spaces["image"] = spaces.Box(
low=0,
high=255,
shape=(obs_shape[0] * tile_size, obs_shape[1] * tile_size, 3),
dtype="uint8",
)
def observation(self, obs):
env = self.unwrapped
rgb_img_partial = env.get_obs_render(obs["image"], tile_size=self.tile_size)
return {"mission": obs["mission"], "image": rgb_img_partial}
class FullyObsWrapper(gym.core.ObservationWrapper):
"""
Fully observable gridworld using a compact grid encoding
"""
def __init__(self, env):
super().__init__(env)
self.observation_space.spaces["image"] = spaces.Box(
low=0,
high=255,
shape=(self.env.width, self.env.height, 3), # number of cells
dtype="uint8",
)
def observation(self, obs):
env = self.unwrapped
full_grid = env.grid.encode()
full_grid[env.agent_pos[0]][env.agent_pos[1]] = np.array(
[OBJECT_TO_IDX["agent"], COLOR_TO_IDX["red"], env.agent_dir]
)
return {"mission": obs["mission"], "image": full_grid}
class FlatObsWrapper(gym.core.ObservationWrapper):
"""
Encode mission strings using a one-hot scheme,
and combine these with observed images into one flat array
"""
def __init__(self, env, maxStrLen=96):
super().__init__(env)
self.maxStrLen = maxStrLen
self.numCharCodes = 27
imgSpace = env.observation_space.spaces["image"]
imgSize = reduce(operator.mul, imgSpace.shape, 1)
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(1, imgSize + self.numCharCodes * self.maxStrLen),
dtype="uint8",
)
self.cachedStr = None
self.cachedArray = None
def observation(self, obs):
image = obs["image"]
mission = obs["mission"]
# Cache the last-encoded mission string
if mission != self.cachedStr:
assert (
len(mission) <= self.maxStrLen
), "mission string too long ({} chars)".format(len(mission))
mission = mission.lower()
strArray = np.zeros(
shape=(self.maxStrLen, self.numCharCodes), dtype="float32"
)
for idx, ch in enumerate(mission):
if ch >= "a" and ch <= "z":
chNo = ord(ch) - ord("a")
elif ch == " ":
chNo = ord("z") - ord("a") + 1
assert chNo < self.numCharCodes, "%s : %d" % (ch, chNo)
strArray[idx, chNo] = 1
self.cachedStr = mission
self.cachedArray = strArray
obs = np.concatenate((image.flatten(), self.cachedArray.flatten()))
return obs
class ViewSizeWrapper(gym.core.Wrapper):
"""
Wrapper to customize the agent field of view size.
This cannot be used with fully observable wrappers.
"""
def __init__(self, env, agent_view_size=7):
super().__init__(env)
# Override default view size
env.unwrapped.agent_view_size = agent_view_size
# Compute observation space with specified view size
observation_space = gym.spaces.Box(
low=0, high=255, shape=(agent_view_size, agent_view_size, 3), dtype="uint8"
)
# Override the environment's observation space
self.observation_space = spaces.Dict({"image": observation_space})
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
return self.env.step(action)
| 9,067 | 27.515723 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/envs/__init__.py | from d4rl_alt.gym_minigrid.envs.empty import *
from d4rl_alt.gym_minigrid.envs.fourrooms import *
| 98 | 32 | 50 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/envs/empty.py | from d4rl_alt.gym_minigrid.minigrid import *
from d4rl_alt.gym_minigrid.register import register
class EmptyEnv(MiniGridEnv):
"""
Empty grid environment, no obstacles, sparse reward
"""
def __init__(
self,
size=8,
agent_start_pos=(1, 1),
agent_start_dir=0,
):
self.agent_start_pos = agent_start_pos
self.agent_start_dir = agent_start_dir
super().__init__(
grid_size=size,
max_steps=4 * size * size,
# Set this to True for maximum speed
see_through_walls=True,
)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
self.put_obj(Goal(), width - 2, height - 2)
# Place the agent
if self.agent_start_pos is not None:
self.agent_pos = self.agent_start_pos
self.agent_dir = self.agent_start_dir
else:
self.place_agent()
self.mission = "get to the green goal square"
class EmptyEnv5x5(EmptyEnv):
def __init__(self):
super().__init__(size=5)
class EmptyRandomEnv5x5(EmptyEnv):
def __init__(self):
super().__init__(size=5, agent_start_pos=None)
class EmptyEnv6x6(EmptyEnv):
def __init__(self):
super().__init__(size=6)
class EmptyRandomEnv6x6(EmptyEnv):
def __init__(self):
super().__init__(size=6, agent_start_pos=None)
class EmptyEnv16x16(EmptyEnv):
def __init__(self):
super().__init__(size=16)
register(id="MiniGrid-Empty-5x5-v0", entry_point="gym_minigrid.envs:EmptyEnv5x5")
register(
id="MiniGrid-Empty-Random-5x5-v0", entry_point="gym_minigrid.envs:EmptyRandomEnv5x5"
)
register(id="MiniGrid-Empty-6x6-v0", entry_point="gym_minigrid.envs:EmptyEnv6x6")
register(
id="MiniGrid-Empty-Random-6x6-v0", entry_point="gym_minigrid.envs:EmptyRandomEnv6x6"
)
register(id="MiniGrid-Empty-8x8-v0", entry_point="gym_minigrid.envs:EmptyEnv")
register(id="MiniGrid-Empty-16x16-v0", entry_point="gym_minigrid.envs:EmptyEnv16x16")
| 2,220 | 24.825581 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_minigrid/envs/fourrooms.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from d4rl_alt.gym_minigrid.minigrid import *
from d4rl_alt.gym_minigrid.register import register
class FourRoomsEnv(MiniGridEnv):
"""
Classic 4 rooms gridworld environment.
Can specify agent and goal position, if not it set at random.
"""
def __init__(self, agent_pos=None, goal_pos=None, **kwargs):
self._agent_default_pos = agent_pos
if goal_pos is None:
goal_pos = (12, 12)
self._goal_default_pos = goal_pos
super().__init__(grid_size=19, max_steps=100, **kwargs)
def get_target(self):
return self._goal_default_pos
def _gen_grid(self, width, height):
# Create the grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(0, 0)
self.grid.horz_wall(0, height - 1)
self.grid.vert_wall(0, 0)
self.grid.vert_wall(width - 1, 0)
room_w = width // 2
room_h = height // 2
# For each row of rooms
for j in range(0, 2):
# For each column
for i in range(0, 2):
xL = i * room_w
yT = j * room_h
xR = xL + room_w
yB = yT + room_h
# Bottom wall and door
if i + 1 < 2:
self.grid.vert_wall(xR, yT, room_h)
pos = (xR, self._rand_int(yT + 1, yB))
self.grid.set(*pos, None)
# Bottom wall and door
if j + 1 < 2:
self.grid.horz_wall(xL, yB, room_w)
pos = (self._rand_int(xL + 1, xR), yB)
self.grid.set(*pos, None)
# Randomize the player start position and orientation
if self._agent_default_pos is not None:
self.agent_pos = self._agent_default_pos
self.grid.set(*self._agent_default_pos, None)
self.agent_dir = self._rand_int(0, 4) # assuming random start direction
else:
self.place_agent()
if self._goal_default_pos is not None:
goal = Goal()
self.put_obj(goal, *self._goal_default_pos)
goal.init_pos, goal.cur_pos = self._goal_default_pos
else:
self.place_obj(Goal())
self.mission = "Reach the goal"
def step(self, action):
obs, reward, done, info = MiniGridEnv.step(self, action)
return obs, reward, done, info
register(id="MiniGrid-FourRooms-v0", entry_point="gym_minigrid.envs:FourRoomsEnv")
| 2,581 | 30.487805 | 84 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_mujoco/__init__.py | from gym.envs.registration import register
from d4rl_alt.gym_mujoco import gym_envs
HOPPER_RANDOM_SCORE = -20.272305
HALFCHEETAH_RANDOM_SCORE = -280.178953
WALKER_RANDOM_SCORE = 1.629008
ANT_RANDOM_SCORE = -325.6
HOPPER_EXPERT_SCORE = 3234.3
HALFCHEETAH_EXPERT_SCORE = 12135.0
WALKER_EXPERT_SCORE = 4592.3
ANT_EXPERT_SCORE = 3879.7
# Single Policy datasets
register(
id="hopper-medium-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_hopper_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HOPPER_RANDOM_SCORE,
"ref_max_score": HOPPER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium.hdf5",
},
)
register(
id="halfcheetah-medium-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_cheetah_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HALFCHEETAH_RANDOM_SCORE,
"ref_max_score": HALFCHEETAH_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium.hdf5",
},
)
register(
id="walker2d-medium-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_walker_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": WALKER_RANDOM_SCORE,
"ref_max_score": WALKER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium.hdf5",
},
)
register(
id="hopper-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_hopper_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HOPPER_RANDOM_SCORE,
"ref_max_score": HOPPER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_expert.hdf5",
},
)
register(
id="halfcheetah-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_cheetah_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HALFCHEETAH_RANDOM_SCORE,
"ref_max_score": HALFCHEETAH_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_expert.hdf5",
},
)
register(
id="walker2d-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_walker_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": WALKER_RANDOM_SCORE,
"ref_max_score": WALKER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_expert.hdf5",
},
)
register(
id="hopper-random-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_hopper_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HOPPER_RANDOM_SCORE,
"ref_max_score": HOPPER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_random.hdf5",
},
)
register(
id="halfcheetah-random-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_cheetah_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HALFCHEETAH_RANDOM_SCORE,
"ref_max_score": HALFCHEETAH_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_random.hdf5",
},
)
register(
id="walker2d-random-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_walker_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": WALKER_RANDOM_SCORE,
"ref_max_score": WALKER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_random.hdf5",
},
)
# Mixed datasets
register(
id="hopper-medium-replay-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_hopper_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HOPPER_RANDOM_SCORE,
"ref_max_score": HOPPER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_mixed.hdf5",
},
)
register(
id="walker2d-medium-replay-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_walker_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": WALKER_RANDOM_SCORE,
"ref_max_score": WALKER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker_mixed.hdf5",
},
)
register(
id="halfcheetah-medium-replay-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_cheetah_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HALFCHEETAH_RANDOM_SCORE,
"ref_max_score": HALFCHEETAH_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_mixed.hdf5",
},
)
# Mixtures of random/medium and experts
register(
id="walker2d-medium-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_walker_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": WALKER_RANDOM_SCORE,
"ref_max_score": WALKER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/walker2d_medium_expert.hdf5",
},
)
register(
id="halfcheetah-medium-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_cheetah_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HALFCHEETAH_RANDOM_SCORE,
"ref_max_score": HALFCHEETAH_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/halfcheetah_medium_expert.hdf5",
},
)
register(
id="hopper-medium-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_hopper_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": HOPPER_RANDOM_SCORE,
"ref_max_score": HOPPER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/hopper_medium_expert.hdf5",
},
)
register(
id="ant-medium-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_ant_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": ANT_RANDOM_SCORE,
"ref_max_score": ANT_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium_expert.hdf5",
},
)
register(
id="ant-medium-replay-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_ant_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": ANT_RANDOM_SCORE,
"ref_max_score": ANT_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_mixed.hdf5",
},
)
register(
id="ant-medium-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_ant_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": ANT_RANDOM_SCORE,
"ref_max_score": ANT_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_medium.hdf5",
},
)
register(
id="ant-random-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_ant_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": ANT_RANDOM_SCORE,
"ref_max_score": ANT_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random.hdf5",
},
)
register(
id="ant-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_ant_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": ANT_RANDOM_SCORE,
"ref_max_score": ANT_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_expert.hdf5",
},
)
register(
id="ant-random-expert-v0",
entry_point="d4rl_alt.gym_mujoco.gym_envs:get_ant_env",
max_episode_steps=1000,
kwargs={
"ref_min_score": ANT_RANDOM_SCORE,
"ref_max_score": ANT_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/gym_mujoco/ant_random_expert.hdf5",
},
)
| 7,873 | 30.75 | 117 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_mujoco/gym_envs.py | from gym.envs.mujoco import AntEnv, HalfCheetahEnv, HopperEnv, Walker2dEnv
from .. import offline_env
from .wrappers import NormalizedBoxEnv
class OfflineAntEnv(AntEnv, offline_env.OfflineEnv):
def __init__(self, **kwargs):
AntEnv.__init__(
self,
)
offline_env.OfflineEnv.__init__(self, **kwargs)
class OfflineHopperEnv(HopperEnv, offline_env.OfflineEnv):
def __init__(self, **kwargs):
HopperEnv.__init__(
self,
)
offline_env.OfflineEnv.__init__(self, **kwargs)
class OfflineHalfCheetahEnv(HalfCheetahEnv, offline_env.OfflineEnv):
def __init__(self, **kwargs):
HalfCheetahEnv.__init__(
self,
)
offline_env.OfflineEnv.__init__(self, **kwargs)
class OfflineWalker2dEnv(Walker2dEnv, offline_env.OfflineEnv):
def __init__(self, **kwargs):
Walker2dEnv.__init__(
self,
)
offline_env.OfflineEnv.__init__(self, **kwargs)
def get_ant_env(**kwargs):
return NormalizedBoxEnv(OfflineAntEnv(**kwargs))
def get_cheetah_env(**kwargs):
return NormalizedBoxEnv(OfflineHalfCheetahEnv(**kwargs))
def get_hopper_env(**kwargs):
return NormalizedBoxEnv(OfflineHopperEnv(**kwargs))
def get_walker_env(**kwargs):
return NormalizedBoxEnv(OfflineWalker2dEnv(**kwargs))
if __name__ == "__main__":
"""Example usage of these envs"""
pass
| 1,412 | 23.362069 | 74 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/gym_mujoco/wrappers.py | import itertools
from collections import deque
import numpy as np
from gym import Env
from gym.spaces import Box, Discrete
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
def seed(self, seed=0):
return self._wrapped_env.seed(seed=seed)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self.wrapped_env.terminate()
def __getattr__(self, attr):
if attr == "_wrapped_env":
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return "{}({})".format(type(self).__name__, self.wrapped_env)
class HistoryEnv(ProxyEnv, Env):
def __init__(self, wrapped_env, history_len):
super().__init__(wrapped_env)
self.history_len = history_len
high = np.inf * np.ones(self.history_len * self.observation_space.low.size)
low = -high
self.observation_space = Box(
low=low,
high=high,
)
self.history = deque(maxlen=self.history_len)
def step(self, action):
state, reward, done, info = super().step(action)
self.history.append(state)
flattened_history = self._get_history().flatten()
return flattened_history, reward, done, info
def reset(self, **kwargs):
state = super().reset()
self.history = deque(maxlen=self.history_len)
self.history.append(state)
flattened_history = self._get_history().flatten()
return flattened_history
def _get_history(self):
observations = list(self.history)
obs_count = len(observations)
for _ in range(self.history_len - obs_count):
dummy = np.zeros(self._wrapped_env.observation_space.low.size)
observations.append(dummy)
return np.c_[observations]
class DiscretizeEnv(ProxyEnv, Env):
def __init__(self, wrapped_env, num_bins):
super().__init__(wrapped_env)
low = self.wrapped_env.action_space.low
high = self.wrapped_env.action_space.high
action_ranges = [
np.linspace(low[i], high[i], num_bins) for i in range(len(low))
]
self.idx_to_continuous_action = [
np.array(x) for x in itertools.product(*action_ranges)
]
self.action_space = Discrete(len(self.idx_to_continuous_action))
def step(self, action):
continuous_action = self.idx_to_continuous_action[action]
return super().step(continuous_action)
class NormalizedBoxEnv(ProxyEnv):
"""
Normalize action to in [-1, 1].
Optionally normalize observations and scale reward.
"""
def __init__(
self,
env,
reward_scale=1.0,
obs_mean=None,
obs_std=None,
):
ProxyEnv.__init__(self, env)
self._should_normalize = not (obs_mean is None and obs_std is None)
if self._should_normalize:
if obs_mean is None:
obs_mean = np.zeros_like(env.observation_space.low)
else:
obs_mean = np.array(obs_mean)
if obs_std is None:
obs_std = np.ones_like(env.observation_space.low)
else:
obs_std = np.array(obs_std)
self._reward_scale = reward_scale
self._obs_mean = obs_mean
self._obs_std = obs_std
ub = np.ones(self._wrapped_env.action_space.shape)
self.action_space = Box(-1 * ub, ub)
def estimate_obs_stats(self, obs_batch, override_values=False):
if self._obs_mean is not None and not override_values:
raise Exception(
"Observation mean and std already set. To "
"override, set override_values to True."
)
self._obs_mean = np.mean(obs_batch, axis=0)
self._obs_std = np.std(obs_batch, axis=0)
def _apply_normalize_obs(self, obs):
return (obs - self._obs_mean) / (self._obs_std + 1e-8)
def step(self, action):
lb = self._wrapped_env.action_space.low
ub = self._wrapped_env.action_space.high
scaled_action = lb + (action + 1.0) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
wrapped_step = self._wrapped_env.step(scaled_action)
next_obs, reward, done, info = wrapped_step
if self._should_normalize:
next_obs = self._apply_normalize_obs(next_obs)
return next_obs, reward * self._reward_scale, done, info
def __str__(self):
return "Normalized: %s" % self._wrapped_env
| 5,512 | 31.052326 | 83 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/hand_manipulation_suite/__init__.py | from gym.envs.registration import register
from mjrl.envs.mujoco_env import MujocoEnv
from d4rl_alt.hand_manipulation_suite.door_v0 import DoorEnvV0
from d4rl_alt.hand_manipulation_suite.hammer_v0 import HammerEnvV0
from d4rl_alt.hand_manipulation_suite.pen_v0 import PenEnvV0
from d4rl_alt.hand_manipulation_suite.relocate_v0 import RelocateEnvV0
DOOR_RANDOM_SCORE = -56.512833
DOOR_EXPERT_SCORE = 2880.5693087298737
HAMMER_RANDOM_SCORE = -274.856578
HAMMER_EXPERT_SCORE = 12794.134825156867
PEN_RANDOM_SCORE = 96.262799
PEN_EXPERT_SCORE = 3076.8331017826877
RELOCATE_RANDOM_SCORE = -6.425911
RELOCATE_EXPERT_SCORE = 4233.877797728884
# Swing the door open
register(
id="door-v0",
entry_point="d4rl_alt.hand_manipulation_suite:DoorEnvV0",
max_episode_steps=200,
)
register(
id="door-human-v0",
entry_point="d4rl_alt.hand_manipulation_suite:DoorEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": DOOR_RANDOM_SCORE,
"ref_max_score": DOOR_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/door-v0_demos_clipped.hdf5",
},
)
register(
id="door-cloned-v0",
entry_point="d4rl_alt.hand_manipulation_suite:DoorEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": DOOR_RANDOM_SCORE,
"ref_max_score": DOOR_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/door-demos-v0-bc-combined.hdf5",
},
)
register(
id="door-expert-v0",
entry_point="d4rl_alt.hand_manipulation_suite:DoorEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": DOOR_RANDOM_SCORE,
"ref_max_score": DOOR_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/door-v0_expert_clipped.hdf5",
},
)
# Hammer a nail into the board
register(
id="hammer-v0",
entry_point="d4rl_alt.hand_manipulation_suite:HammerEnvV0",
max_episode_steps=200,
)
register(
id="hammer-human-v0",
entry_point="d4rl_alt.hand_manipulation_suite:HammerEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": HAMMER_RANDOM_SCORE,
"ref_max_score": HAMMER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/hammer-v0_demos_clipped.hdf5",
},
)
register(
id="hammer-cloned-v0",
entry_point="d4rl_alt.hand_manipulation_suite:HammerEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": HAMMER_RANDOM_SCORE,
"ref_max_score": HAMMER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/hammer-demos-v0-bc-combined.hdf5",
},
)
register(
id="hammer-expert-v0",
entry_point="d4rl_alt.hand_manipulation_suite:HammerEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": HAMMER_RANDOM_SCORE,
"ref_max_score": HAMMER_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/hammer-v0_expert_clipped.hdf5",
},
)
# Reposition a pen in hand
register(
id="pen-v0",
entry_point="d4rl_alt.hand_manipulation_suite:PenEnvV0",
max_episode_steps=100,
)
register(
id="pen-human-v0",
entry_point="d4rl_alt.hand_manipulation_suite:PenEnvV0",
max_episode_steps=100,
kwargs={
"ref_min_score": PEN_RANDOM_SCORE,
"ref_max_score": PEN_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/pen-v0_demos_clipped.hdf5",
},
)
register(
id="pen-cloned-v0",
entry_point="d4rl_alt.hand_manipulation_suite:PenEnvV0",
max_episode_steps=100,
kwargs={
"ref_min_score": PEN_RANDOM_SCORE,
"ref_max_score": PEN_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/pen-demos-v0-bc-combined.hdf5",
},
)
register(
id="pen-expert-v0",
entry_point="d4rl_alt.hand_manipulation_suite:PenEnvV0",
max_episode_steps=100,
kwargs={
"ref_min_score": PEN_RANDOM_SCORE,
"ref_max_score": PEN_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/pen-v0_expert_clipped.hdf5",
},
)
# Relcoate an object to the target
register(
id="relocate-v0",
entry_point="d4rl_alt.hand_manipulation_suite:RelocateEnvV0",
max_episode_steps=200,
)
register(
id="relocate-human-v0",
entry_point="d4rl_alt.hand_manipulation_suite:RelocateEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": RELOCATE_RANDOM_SCORE,
"ref_max_score": RELOCATE_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/relocate-v0_demos_clipped.hdf5",
},
)
register(
id="relocate-cloned-v0",
entry_point="d4rl_alt.hand_manipulation_suite:RelocateEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": RELOCATE_RANDOM_SCORE,
"ref_max_score": RELOCATE_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/relocate-demos-v0-bc-combined.hdf5",
},
)
register(
id="relocate-expert-v0",
entry_point="d4rl_alt.hand_manipulation_suite:RelocateEnvV0",
max_episode_steps=200,
kwargs={
"ref_min_score": RELOCATE_RANDOM_SCORE,
"ref_max_score": RELOCATE_EXPERT_SCORE,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/hand_dapg/relocate-v0_expert_clipped.hdf5",
},
)
| 5,515 | 29.307692 | 120 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/hand_manipulation_suite/door_v0.py | import os
import numpy as np
from gym import spaces, utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from d4rl_alt import offline_env
ADD_BONUS_REWARDS = True
class DoorEnvV0(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):
def __init__(self, **kwargs):
offline_env.OfflineEnv.__init__(self, **kwargs)
self.door_hinge_did = 0
self.door_bid = 0
self.grasp_sid = 0
self.handle_sid = 0
curr_dir = os.path.dirname(os.path.abspath(__file__))
mujoco_env.MujocoEnv.__init__(self, curr_dir + "/assets/DAPG_door.xml", 5)
# Override action_space to -1, 1
self.action_space = spaces.Box(
low=-1.0, high=1.0, dtype=np.float32, shape=self.action_space.shape
)
# change actuator sensitivity
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([10, 0, 0])
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([1, 0, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([0, -10, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([0, -1, 0])
utils.EzPickle.__init__(self)
ob = self.reset_model()
self.act_mid = np.mean(self.model.actuator_ctrlrange, axis=1)
self.act_rng = 0.5 * (
self.model.actuator_ctrlrange[:, 1] - self.model.actuator_ctrlrange[:, 0]
)
self.door_hinge_did = self.model.jnt_dofadr[
self.model.joint_name2id("door_hinge")
]
self.grasp_sid = self.model.site_name2id("S_grasp")
self.handle_sid = self.model.site_name2id("S_handle")
self.door_bid = self.model.body_name2id("frame")
def step(self, a):
a = np.clip(a, -1.0, 1.0)
try:
a = self.act_mid + a * self.act_rng # mean center and scale
except:
a = a # only for the initialization phase
self.do_simulation(a, self.frame_skip)
ob = self.get_obs()
handle_pos = self.data.site_xpos[self.handle_sid].ravel()
palm_pos = self.data.site_xpos[self.grasp_sid].ravel()
door_pos = self.data.qpos[self.door_hinge_did]
# get to handle
reward = -0.1 * np.linalg.norm(palm_pos - handle_pos)
# open door
reward += -0.1 * (door_pos - 1.57) * (door_pos - 1.57)
# velocity cost
reward += -1e-5 * np.sum(self.data.qvel ** 2)
if ADD_BONUS_REWARDS:
# Bonus
if door_pos > 0.2:
reward += 2
if door_pos > 1.0:
reward += 8
if door_pos > 1.35:
reward += 10
goal_achieved = True if door_pos >= 1.35 else False
return ob, reward, False, dict(goal_achieved=goal_achieved)
def get_obs(self):
# qpos for hand
# xpos for obj
# xpos for target
qp = self.data.qpos.ravel()
handle_pos = self.data.site_xpos[self.handle_sid].ravel()
palm_pos = self.data.site_xpos[self.grasp_sid].ravel()
door_pos = np.array([self.data.qpos[self.door_hinge_did]])
if door_pos > 1.0:
door_open = 1.0
else:
door_open = -1.0
latch_pos = qp[-1]
return np.concatenate(
[
qp[1:-2],
[latch_pos],
door_pos,
palm_pos,
handle_pos,
palm_pos - handle_pos,
[door_open],
]
)
def reset_model(self):
qp = self.init_qpos.copy()
qv = self.init_qvel.copy()
self.set_state(qp, qv)
self.model.body_pos[self.door_bid, 0] = self.np_random.uniform(
low=-0.3, high=-0.2
)
self.model.body_pos[self.door_bid, 1] = self.np_random.uniform(
low=0.25, high=0.35
)
self.model.body_pos[self.door_bid, 2] = self.np_random.uniform(
low=0.252, high=0.35
)
self.sim.forward()
return self.get_obs()
def get_env_state(self):
"""
Get state of hand as well as objects and targets in the scene
"""
qp = self.data.qpos.ravel().copy()
qv = self.data.qvel.ravel().copy()
door_body_pos = self.model.body_pos[self.door_bid].ravel().copy()
return dict(qpos=qp, qvel=qv, door_body_pos=door_body_pos)
def set_env_state(self, state_dict):
"""
Set the state which includes hand as well as objects and targets in the scene
"""
qp = state_dict["qpos"]
qv = state_dict["qvel"]
self.set_state(qp, qv)
self.model.body_pos[self.door_bid] = state_dict["door_body_pos"]
self.sim.forward()
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = 90
self.sim.forward()
self.viewer.cam.distance = 1.5
def evaluate_success(self, paths):
num_success = 0
num_paths = len(paths)
# success if door open for 25 steps
for path in paths:
if np.sum(path["env_infos"]["goal_achieved"]) > 25:
num_success += 1
success_percentage = num_success * 100.0 / num_paths
return success_percentage
| 5,895 | 31.938547 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/hand_manipulation_suite/hammer_v0.py | import os
import numpy as np
from gym import spaces, utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from d4rl_alt import offline_env
from d4rl_alt.utils.quatmath import quat2euler
ADD_BONUS_REWARDS = True
class HammerEnvV0(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):
def __init__(self, **kwargs):
offline_env.OfflineEnv.__init__(self, **kwargs)
self.target_obj_sid = -1
self.S_grasp_sid = -1
self.obj_bid = -1
self.tool_sid = -1
self.goal_sid = -1
curr_dir = os.path.dirname(os.path.abspath(__file__))
mujoco_env.MujocoEnv.__init__(self, curr_dir + "/assets/DAPG_hammer.xml", 5)
# Override action_space to -1, 1
self.action_space = spaces.Box(
low=-1.0, high=1.0, dtype=np.float32, shape=self.action_space.shape
)
utils.EzPickle.__init__(self)
# change actuator sensitivity
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([10, 0, 0])
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([1, 0, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([0, -10, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([0, -1, 0])
self.target_obj_sid = self.sim.model.site_name2id("S_target")
self.S_grasp_sid = self.sim.model.site_name2id("S_grasp")
self.obj_bid = self.sim.model.body_name2id("Object")
self.tool_sid = self.sim.model.site_name2id("tool")
self.goal_sid = self.sim.model.site_name2id("nail_goal")
self.act_mid = np.mean(self.model.actuator_ctrlrange, axis=1)
self.act_rng = 0.5 * (
self.model.actuator_ctrlrange[:, 1] - self.model.actuator_ctrlrange[:, 0]
)
def step(self, a):
a = np.clip(a, -1.0, 1.0)
try:
a = self.act_mid + a * self.act_rng # mean center and scale
except:
a = a # only for the initialization phase
self.do_simulation(a, self.frame_skip)
ob = self.get_obs()
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
tool_pos = self.data.site_xpos[self.tool_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
goal_pos = self.data.site_xpos[self.goal_sid].ravel()
# get to hammer
reward = -0.1 * np.linalg.norm(palm_pos - obj_pos)
# take hammer head to nail
reward -= np.linalg.norm((tool_pos - target_pos))
# make nail go inside
reward -= 10 * np.linalg.norm(target_pos - goal_pos)
# velocity penalty
reward -= 1e-2 * np.linalg.norm(self.data.qvel.ravel())
if ADD_BONUS_REWARDS:
# bonus for lifting up the hammer
if obj_pos[2] > 0.04 and tool_pos[2] > 0.04:
reward += 2
# bonus for hammering the nail
if np.linalg.norm(target_pos - goal_pos) < 0.020:
reward += 25
if np.linalg.norm(target_pos - goal_pos) < 0.010:
reward += 75
goal_achieved = True if np.linalg.norm(target_pos - goal_pos) < 0.010 else False
return ob, reward, False, dict(goal_achieved=goal_achieved)
def get_obs(self):
# qpos for hand
# xpos for obj
# xpos for target
qp = self.data.qpos.ravel()
qv = np.clip(self.data.qvel.ravel(), -1.0, 1.0)
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
obj_rot = quat2euler(self.data.body_xquat[self.obj_bid].ravel()).ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
nail_impact = np.clip(
self.sim.data.sensordata[self.sim.model.sensor_name2id("S_nail")], -1.0, 1.0
)
return np.concatenate(
[
qp[:-6],
qv[-6:],
palm_pos,
obj_pos,
obj_rot,
target_pos,
np.array([nail_impact]),
]
)
def reset_model(self):
self.sim.reset()
target_bid = self.model.body_name2id("nail_board")
self.model.body_pos[target_bid, 2] = self.np_random.uniform(low=0.1, high=0.25)
self.sim.forward()
return self.get_obs()
def get_env_state(self):
"""
Get state of hand as well as objects and targets in the scene
"""
qpos = self.data.qpos.ravel().copy()
qvel = self.data.qvel.ravel().copy()
board_pos = self.model.body_pos[self.model.body_name2id("nail_board")].copy()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel().copy()
return dict(qpos=qpos, qvel=qvel, board_pos=board_pos, target_pos=target_pos)
def set_env_state(self, state_dict):
"""
Set the state which includes hand as well as objects and targets in the scene
"""
qp = state_dict["qpos"]
qv = state_dict["qvel"]
board_pos = state_dict["board_pos"]
self.set_state(qp, qv)
self.model.body_pos[self.model.body_name2id("nail_board")] = board_pos
self.sim.forward()
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = 45
self.viewer.cam.distance = 2.0
self.sim.forward()
def evaluate_success(self, paths):
num_success = 0
num_paths = len(paths)
# success if nail insude board for 25 steps
for path in paths:
if np.sum(path["env_infos"]["goal_achieved"]) > 25:
num_success += 1
success_percentage = num_success * 100.0 / num_paths
return success_percentage
| 6,437 | 35.168539 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/hand_manipulation_suite/pen_v0.py | import os
import numpy as np
from gym import spaces, utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from d4rl_alt import offline_env
from d4rl_alt.utils.quatmath import euler2quat, quat2euler
ADD_BONUS_REWARDS = True
class PenEnvV0(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):
def __init__(self, **kwargs):
offline_env.OfflineEnv.__init__(self, **kwargs)
self.target_obj_bid = 0
self.S_grasp_sid = 0
self.eps_ball_sid = 0
self.obj_bid = 0
self.obj_t_sid = 0
self.obj_b_sid = 0
self.tar_t_sid = 0
self.tar_b_sid = 0
self.pen_length = 1.0
self.tar_length = 1.0
curr_dir = os.path.dirname(os.path.abspath(__file__))
mujoco_env.MujocoEnv.__init__(self, curr_dir + "/assets/DAPG_pen.xml", 5)
# Override action_space to -1, 1
self.action_space = spaces.Box(
low=-1.0, high=1.0, dtype=np.float32, shape=self.action_space.shape
)
# change actuator sensitivity
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([10, 0, 0])
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([1, 0, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([0, -10, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([0, -1, 0])
utils.EzPickle.__init__(self)
self.target_obj_bid = self.sim.model.body_name2id("target")
self.S_grasp_sid = self.sim.model.site_name2id("S_grasp")
self.obj_bid = self.sim.model.body_name2id("Object")
self.eps_ball_sid = self.sim.model.site_name2id("eps_ball")
self.obj_t_sid = self.sim.model.site_name2id("object_top")
self.obj_b_sid = self.sim.model.site_name2id("object_bottom")
self.tar_t_sid = self.sim.model.site_name2id("target_top")
self.tar_b_sid = self.sim.model.site_name2id("target_bottom")
self.pen_length = np.linalg.norm(
self.data.site_xpos[self.obj_t_sid] - self.data.site_xpos[self.obj_b_sid]
)
self.tar_length = np.linalg.norm(
self.data.site_xpos[self.tar_t_sid] - self.data.site_xpos[self.tar_b_sid]
)
self.act_mid = np.mean(self.model.actuator_ctrlrange, axis=1)
self.act_rng = 0.5 * (
self.model.actuator_ctrlrange[:, 1] - self.model.actuator_ctrlrange[:, 0]
)
def step(self, a):
a = np.clip(a, -1.0, 1.0)
try:
starting_up = False
a = self.act_mid + a * self.act_rng # mean center and scale
except:
starting_up = True
a = a # only for the initialization phase
self.do_simulation(a, self.frame_skip)
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
desired_loc = self.data.site_xpos[self.eps_ball_sid].ravel()
obj_orien = (
self.data.site_xpos[self.obj_t_sid] - self.data.site_xpos[self.obj_b_sid]
) / self.pen_length
desired_orien = (
self.data.site_xpos[self.tar_t_sid] - self.data.site_xpos[self.tar_b_sid]
) / self.tar_length
# pos cost
dist = np.linalg.norm(obj_pos - desired_loc)
reward = -dist
# orien cost
orien_similarity = np.dot(obj_orien, desired_orien)
reward += orien_similarity
if ADD_BONUS_REWARDS:
# bonus for being close to desired orientation
if dist < 0.075 and orien_similarity > 0.9:
reward += 10
if dist < 0.075 and orien_similarity > 0.95:
reward += 50
# penalty for dropping the pen
done = False
if obj_pos[2] < 0.075:
reward -= 5
done = True if not starting_up else False
goal_achieved = True if (dist < 0.075 and orien_similarity > 0.95) else False
return self.get_obs(), reward, done, dict(goal_achieved=goal_achieved)
def get_obs(self):
qp = self.data.qpos.ravel()
obj_vel = self.data.qvel[-6:].ravel()
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
desired_pos = self.data.site_xpos[self.eps_ball_sid].ravel()
obj_orien = (
self.data.site_xpos[self.obj_t_sid] - self.data.site_xpos[self.obj_b_sid]
) / self.pen_length
desired_orien = (
self.data.site_xpos[self.tar_t_sid] - self.data.site_xpos[self.tar_b_sid]
) / self.tar_length
return np.concatenate(
[
qp[:-6],
obj_pos,
obj_vel,
obj_orien,
desired_orien,
obj_pos - desired_pos,
obj_orien - desired_orien,
]
)
def reset_model(self):
qp = self.init_qpos.copy()
qv = self.init_qvel.copy()
self.set_state(qp, qv)
desired_orien = np.zeros(3)
desired_orien[0] = self.np_random.uniform(low=-1, high=1)
desired_orien[1] = self.np_random.uniform(low=-1, high=1)
self.model.body_quat[self.target_obj_bid] = euler2quat(desired_orien)
self.sim.forward()
return self.get_obs()
def get_env_state(self):
"""
Get state of hand as well as objects and targets in the scene
"""
qp = self.data.qpos.ravel().copy()
qv = self.data.qvel.ravel().copy()
desired_orien = self.model.body_quat[self.target_obj_bid].ravel().copy()
return dict(qpos=qp, qvel=qv, desired_orien=desired_orien)
def set_env_state(self, state_dict):
"""
Set the state which includes hand as well as objects and targets in the scene
"""
qp = state_dict["qpos"]
qv = state_dict["qvel"]
desired_orien = state_dict["desired_orien"]
self.set_state(qp, qv)
self.model.body_quat[self.target_obj_bid] = desired_orien
self.sim.forward()
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = -45
self.sim.forward()
self.viewer.cam.distance = 1.0
def evaluate_success(self, paths):
num_success = 0
num_paths = len(paths)
# success if pen within 15 degrees of target for 20 steps
for path in paths:
if np.sum(path["env_infos"]["goal_achieved"]) > 20:
num_success += 1
success_percentage = num_success * 100.0 / num_paths
return success_percentage
| 7,119 | 34.6 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/hand_manipulation_suite/relocate_v0.py | import os
import numpy as np
from gym import spaces, utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from d4rl_alt import offline_env
ADD_BONUS_REWARDS = True
class RelocateEnvV0(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):
def __init__(self, **kwargs):
offline_env.OfflineEnv.__init__(self, **kwargs)
self.target_obj_sid = 0
self.S_grasp_sid = 0
self.obj_bid = 0
curr_dir = os.path.dirname(os.path.abspath(__file__))
mujoco_env.MujocoEnv.__init__(self, curr_dir + "/assets/DAPG_relocate.xml", 5)
# Override action_space to -1, 1
self.action_space = spaces.Box(
low=-1.0, high=1.0, dtype=np.float32, shape=self.action_space.shape
)
# change actuator sensitivity
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([10, 0, 0])
self.sim.model.actuator_gainprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([1, 0, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_WRJ1") : self.sim.model.actuator_name2id(
"A_WRJ0"
)
+ 1,
:3,
] = np.array([0, -10, 0])
self.sim.model.actuator_biasprm[
self.sim.model.actuator_name2id("A_FFJ3") : self.sim.model.actuator_name2id(
"A_THJ0"
)
+ 1,
:3,
] = np.array([0, -1, 0])
self.target_obj_sid = self.sim.model.site_name2id("target")
self.S_grasp_sid = self.sim.model.site_name2id("S_grasp")
self.obj_bid = self.sim.model.body_name2id("Object")
utils.EzPickle.__init__(self)
self.act_mid = np.mean(self.model.actuator_ctrlrange, axis=1)
self.act_rng = 0.5 * (
self.model.actuator_ctrlrange[:, 1] - self.model.actuator_ctrlrange[:, 0]
)
def step(self, a):
a = np.clip(a, -1.0, 1.0)
try:
a = self.act_mid + a * self.act_rng # mean center and scale
except:
a = a # only for the initialization phase
self.do_simulation(a, self.frame_skip)
ob = self.get_obs()
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
reward = -0.1 * np.linalg.norm(palm_pos - obj_pos) # take hand to object
if obj_pos[2] > 0.04: # if object off the table
reward += 1.0 # bonus for lifting the object
reward += -0.5 * np.linalg.norm(
palm_pos - target_pos
) # make hand go to target
reward += -0.5 * np.linalg.norm(
obj_pos - target_pos
) # make object go to target
if ADD_BONUS_REWARDS:
if np.linalg.norm(obj_pos - target_pos) < 0.1:
reward += 10.0 # bonus for object close to target
if np.linalg.norm(obj_pos - target_pos) < 0.05:
reward += 20.0 # bonus for object "very" close to target
goal_achieved = True if np.linalg.norm(obj_pos - target_pos) < 0.1 else False
return ob, reward, False, dict(goal_achieved=goal_achieved)
def get_obs(self):
# qpos for hand
# xpos for obj
# xpos for target
qp = self.data.qpos.ravel()
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
return np.concatenate(
[qp[:-6], palm_pos - obj_pos, palm_pos - target_pos, obj_pos - target_pos]
)
def reset_model(self):
qp = self.init_qpos.copy()
qv = self.init_qvel.copy()
self.set_state(qp, qv)
self.model.body_pos[self.obj_bid, 0] = self.np_random.uniform(
low=-0.15, high=0.15
)
self.model.body_pos[self.obj_bid, 1] = self.np_random.uniform(
low=-0.15, high=0.3
)
self.model.site_pos[self.target_obj_sid, 0] = self.np_random.uniform(
low=-0.2, high=0.2
)
self.model.site_pos[self.target_obj_sid, 1] = self.np_random.uniform(
low=-0.2, high=0.2
)
self.model.site_pos[self.target_obj_sid, 2] = self.np_random.uniform(
low=0.15, high=0.35
)
self.sim.forward()
return self.get_obs()
def get_env_state(self):
"""
Get state of hand as well as objects and targets in the scene
"""
qp = self.data.qpos.ravel().copy()
qv = self.data.qvel.ravel().copy()
hand_qpos = qp[:30]
obj_pos = self.data.body_xpos[self.obj_bid].ravel()
palm_pos = self.data.site_xpos[self.S_grasp_sid].ravel()
target_pos = self.data.site_xpos[self.target_obj_sid].ravel()
return dict(
hand_qpos=hand_qpos,
obj_pos=obj_pos,
target_pos=target_pos,
palm_pos=palm_pos,
qpos=qp,
qvel=qv,
)
def set_env_state(self, state_dict):
"""
Set the state which includes hand as well as objects and targets in the scene
"""
qp = state_dict["qpos"]
qv = state_dict["qvel"]
obj_pos = state_dict["obj_pos"]
target_pos = state_dict["target_pos"]
self.set_state(qp, qv)
self.model.body_pos[self.obj_bid] = obj_pos
self.model.site_pos[self.target_obj_sid] = target_pos
self.sim.forward()
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = 90
self.sim.forward()
self.viewer.cam.distance = 1.5
def evaluate_success(self, paths):
num_success = 0
num_paths = len(paths)
# success if object close to target for 25 steps
for path in paths:
if np.sum(path["env_infos"]["goal_achieved"]) > 25:
num_success += 1
success_percentage = num_success * 100.0 / num_paths
return success_percentage
| 6,435 | 34.955307 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/hand_manipulation_suite/Adroit/README.md | # Adroit Manipulation Platform
Adroit manipulation platform is reconfigurable, tendon-driven, pneumatically-actuated platform designed and developed by [Vikash Kumar](https://vikashplus.github.io/) during this Ph.D. ([Thesis: Manipulators and Manipulation in high dimensional spaces](https://digital.lib.washington.edu/researchworks/handle/1773/38104)) to study dynamic dexterous manipulation. Adroit is comprised of the [Shadow Hand](https://www.shadowrobot.com/products/dexterous-hand/) skeleton (developed by [Shadow Robot company](https://www.shadowrobot.com/)) and a custom arm, and is powered by a custom actuation sysem. This custom actuation system allows Adroit to move the ShadowHand skeleton faster than a human hand (70 msec limit-to-limit movement, 30 msec overall reflex latency), generate sufficient forces (40 N at each finger tendon, 125N at each wrist tendon), and achieve high compliance on the mechanism level (6 grams of external force at the fingertip displaces the finger when the system is powered.) This combination of speed, force, and compliance is a prerequisite for dexterous manipulation, yet it has never before been achieved with a tendon-driven system, let alone a system with 24 degrees of freedom and 40 tendons.
## Mujoco Model
Adroit is a 28 degree of freedom system which consists of a 24 degrees of freedom **ShadowHand** and a 4 degree of freedom arm. This repository contains the Mujoco Models of the system developed with extreme care and great attention to the details.
## In Projects
Adroit has been used in a wide variety of project. A small list is appended below. Details of these projects can be found [here](https://vikashplus.github.io/).
[](https://vikashplus.github.io/)
## In News and Media
Adroit has found quite some attention in the world media. Details can be found [here](https://vikashplus.github.io/news.html)
[](https://vikashplus.github.io/news.html)
## Citation
If the contents of this repo helped you, please consider citing
```
@phdthesis{Kumar2016thesis,
title = {Manipulators and Manipulation in high dimensional spaces},
school = {University of Washington, Seattle},
author = {Kumar, Vikash},
year = {2016},
url = {https://digital.lib.washington.edu/researchworks/handle/1773/38104}
}
```
| 2,461 | 81.066667 | 1,216 | md |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/__init__.py | from gym.envs.registration import register
from .kitchen_envs import (
KitchenHingeSlideBottomLeftBurnerLightV0,
KitchenMicrowaveKettleLightTopLeftBurnerV0,
)
# Smaller dataset with only positive demonstrations.
register(
id="kitchen-complete-v0",
entry_point="d4rl_alt.kitchen:KitchenMicrowaveKettleLightSliderV0",
max_episode_steps=280,
kwargs={
"ref_min_score": 0.0,
"ref_max_score": 4.0,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/kitchen/mini_kitchen_microwave_kettle_light_slider-v0.hdf5",
},
)
# Whole dataset with undirected demonstrations. A subset of the demonstrations
# solve the task.
register(
id="kitchen-partial-v0",
entry_point="d4rl_alt.kitchen:KitchenMicrowaveKettleLightSliderV0",
max_episode_steps=280,
kwargs={
"ref_min_score": 0.0,
"ref_max_score": 4.0,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/kitchen/kitchen_microwave_kettle_light_slider-v0.hdf5",
},
)
# Whole dataset with undirected demonstrations. No demonstration completely
# solves the task, but each demonstration partially solves different
# components of the task.
register(
id="kitchen-mixed-v0",
entry_point="d4rl_alt.kitchen:KitchenMicrowaveKettleBottomLeftBurnerLightV0",
max_episode_steps=280,
kwargs={
"ref_min_score": 0.0,
"ref_max_score": 4.0,
"dataset_url": "http://rail.eecs.berkeley.edu/datasets/offline_rl/kitchen/kitchen_microwave_kettle_bottomburner_light-v0.hdf5",
},
)
| 1,562 | 32.978261 | 135 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/env_dict.py | from d4rl_alt.kitchen.kitchen_envs import (
KitchenHingeCabinetV0,
KitchenHingeSlideBottomLeftBurnerLightV0,
KitchenKettleV0,
KitchenLightSwitchV0,
KitchenMicrowaveKettleLightTopLeftBurnerV0,
KitchenMicrowaveV0,
KitchenSlideCabinetV0,
KitchenTopLeftBurnerV0,
)
ALL_KITCHEN_ENVIRONMENTS = {
'microwave':KitchenMicrowaveV0,
'kettle':KitchenKettleV0,
'slide_cabinet':KitchenSlideCabinetV0,
'hinge_cabinet':KitchenHingeCabinetV0,
'top_left_burner':KitchenTopLeftBurnerV0,
'light_switch':KitchenLightSwitchV0,
'microwave_kettle_light_top_left_burner':KitchenMicrowaveKettleLightTopLeftBurnerV0,
'hinge_slide_bottom_left_burner_light':KitchenHingeSlideBottomLeftBurnerLightV0,
}
| 774 | 34.227273 | 88 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/kitchen_envs.py | """Environments using kitchen and Franka robot."""
import copy
import gym
import numpy as np
from gym import spaces
from gym.spaces.box import Box
from d4rl_alt.kitchen.adept_envs.franka.kitchen_multitask_v0 import KitchenTaskRelaxV1
OBS_ELEMENT_INDICES = {
"bottom left burner": np.array([11, 12]),
"top left burner": np.array([15, 16]),
"light switch": np.array([17, 18]),
"slide cabinet": np.array([19]),
"hinge cabinet": np.array([20, 21]),
"microwave": np.array([22]),
"kettle": np.array([23, 24, 25, 26, 27, 28, 29]),
}
OBS_ELEMENT_GOALS = {
"bottom left burner": np.array([-0.88, -0.01]),
"top left burner": np.array([-0.92, -0.01]),
"light switch": np.array([-0.69, -0.05]),
"slide cabinet": np.array([0.37]),
"hinge cabinet": np.array([0.0, 1.45]),
"microwave": np.array([-0.75]),
"kettle": np.array([-0.23, 0.75, 1.62, 0.99, 0.0, 0.0, -0.06]),
}
BONUS_THRESH = 0.3
class KitchenBase(KitchenTaskRelaxV1):
# A string of element names. The robot's task is then to modify each of
# these elements appropriately.
TASK_ELEMENTS = []
REMOVE_TASKS_WHEN_COMPLETE = False
TERMINATE_ON_TASK_COMPLETE = False
def __init__(self, dense=True, use_combined_action_space=False, **kwargs):
self.tasks_to_complete = set(self.TASK_ELEMENTS)
self.dense = dense
super(KitchenBase, self).__init__(**kwargs)
combined_action_space_low = -1.4 * np.ones(self.max_arg_len)
combined_action_space_high = 1.4 * np.ones(self.max_arg_len)
self.combined_action_space = Box(
combined_action_space_low, combined_action_space_high, dtype=np.float32
)
self.use_combined_action_space = use_combined_action_space
if self.use_combined_action_space and self.control_mode == "primitives":
self.action_space = self.combined_action_space
if not self.fixed_schema:
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
act_lower = np.concatenate((act_lower_primitive, self.action_space.low))
act_upper = np.concatenate(
(
act_upper_primitive,
self.action_space.high,
)
)
self.action_space = Box(act_lower, act_upper, dtype=np.float32)
def _get_obs(self):
t, qp, qv, obj_qp, obj_qv = self.robot.get_obs(
self, robot_noise_ratio=self.robot_noise_ratio
)
self.obs_dict = {}
self.obs_dict["t"] = t
self.obs_dict["qp"] = qp
self.obs_dict["qv"] = qv
self.obs_dict["obj_qp"] = obj_qp
self.obs_dict["obj_qv"] = obj_qv
self.obs_dict["goal"] = self.goal
if self.image_obs:
img = self.render(mode="rgb_array")
img = img.transpose(2, 0, 1).flatten()
if self.proprioception:
if not self.initializing:
proprioceptive_obs = np.concatenate(
(
qp,
self.get_ee_pose(),
self.get_ee_quat(),
)
)
if self.normalize_proprioception_obs:
proprioceptive_obs /= np.linalg.norm(proprioceptive_obs)
return np.concatenate((img, proprioceptive_obs))
else:
return img
return img
else:
return np.concatenate(
[self.obs_dict["qp"], self.obs_dict["obj_qp"], self.obs_dict["goal"]]
)
def _get_task_goal(self):
new_goal = np.zeros_like(self.goal)
for element in self.TASK_ELEMENTS:
element_idx = OBS_ELEMENT_INDICES[element]
element_goal = OBS_ELEMENT_GOALS[element]
new_goal[element_idx] = element_goal
return new_goal
def reset_model(self):
self.tasks_to_complete = set(self.TASK_ELEMENTS)
self.episodic_cumulative_reward = 0
return super(KitchenBase, self).reset_model()
def _get_reward_n_score(self, obs_dict):
reward_dict, score = super(KitchenBase, self)._get_reward_n_score(obs_dict)
next_q_obs = obs_dict["qp"]
next_obj_obs = obs_dict["obj_qp"]
next_goal = obs_dict["goal"]
idx_offset = len(next_q_obs)
completions = []
dense = 0
for element in self.tasks_to_complete:
element_idx = OBS_ELEMENT_INDICES[element]
distance = np.linalg.norm(
next_obj_obs[..., element_idx - idx_offset] - OBS_ELEMENT_GOALS[element]
)
dense += -1 * distance # reward must be negative distance for RL
complete = distance < BONUS_THRESH
if complete:
completions.append(element)
if self.REMOVE_TASKS_WHEN_COMPLETE:
[self.tasks_to_complete.remove(element) for element in completions]
bonus = float(len(completions))
reward_dict["bonus"] = bonus
reward_dict["r_total"] = bonus
if self.dense:
reward_dict["r_total"] = dense
score = bonus
return reward_dict, score
def step(
self,
a,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
obs, reward, done, env_info = super(KitchenBase, self).step(
a,
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
self.episodic_cumulative_reward += reward
if self.TERMINATE_ON_TASK_COMPLETE:
done = not self.tasks_to_complete
self.update_info(env_info)
return obs, reward, done, env_info
def update_info(self, info):
next_q_obs = self.obs_dict["qp"]
next_obj_obs = self.obs_dict["obj_qp"]
idx_offset = len(next_q_obs)
if self.initializing:
self.per_task_cumulative_reward = {
k: 0.0 for k in OBS_ELEMENT_INDICES.keys()
}
for element in OBS_ELEMENT_INDICES.keys():
element_idx = OBS_ELEMENT_INDICES[element]
distance = np.linalg.norm(
next_obj_obs[..., element_idx - idx_offset] - OBS_ELEMENT_GOALS[element]
)
info[element + " distance to goal"] = distance
info[element + " success"] = float(distance < BONUS_THRESH)
success = float(distance < BONUS_THRESH)
self.per_task_cumulative_reward[element] += success
info[element + " cumulative reward"] = self.per_task_cumulative_reward[
element
]
info[element + " success"] = success
info["coverage"] = self.coverage_grid.sum() / (
np.prod(self.coverage_grid.shape)
)
info["episodic cumulative reward"] = self.episodic_cumulative_reward
return info
class KitchenMicrowaveKettleLightTopLeftBurnerV0(KitchenBase):
TASK_ELEMENTS = ["microwave", "kettle", "light switch", "top left burner"]
REMOVE_TASKS_WHEN_COMPLETE = True
class KitchenMicrowaveKettleLightTopLeftBurnerV0Custom(KitchenBase):
TASK_ELEMENTS = ["microwave", "kettle", "light switch", "top left burner"]
REMOVE_TASKS_WHEN_COMPLETE = True
def __init__(self, *args, **kwargs):
self.obs_dim = 30
self.goal_dim = 30 # Always zero
self.task_infos = {
"BottomRightBurner": ([9, 10], [-0.88, -0.01], 0.5),
"BottomLeftBurner": ([11, 12], [-0.88, -0.01], 0.5),
"TopRightBurner": ([13, 14], [-0.88, -0.01], 0.5),
"TopLeftBurner": ([15, 16], [-0.88, -0.01], 0.5),
"LightSwitch": ([17, 18], [-0.69, -0.05], 0.44),
"SlideCabinet": ([19], [0.37], 0.1),
"HingeCabinet": ([20, 21], [0.0, 0.5], 0.2),
"Microwave": ([22], [-0.5], 0.2),
"KettleTopLeft": ([23, 24, 25], [-0.23, 0.75, 1.62], 0.2),
"KettleTopRight": ([23, 24, 25], [0.20, 0.75, 1.62], 0.2),
"KettleBottomRight": ([23, 24, 25], [0.20, 0.35, 1.62], 0.2),
"KettleLift": ([25], [2.1], 0.3),
"KettleFall": ([25], [-1.0], 1.3),
}
self.tasks = list(self.task_infos.keys())
super().__init__(*args, **kwargs)
self.ori_observation_space = self.observation_space
goal_space = Box(-np.inf, np.inf, (30,))
self.observation_space = gym.spaces.Dict({
'observation': self.ori_observation_space,
'achieved_goal': goal_space,
'desired_goal': goal_space,
})
def render(self, mode='human', width=None, height=None):
from dm_control.mujoco import engine
if width is None or height is None:
return []
camera = engine.MovableCamera(self.sim, width, height)
camera.set_pose(distance=2.2, lookat=[-0.2, .5, 2.], azimuth=70, elevation=-35)
img = camera.render()
return img
def _get_obs(self):
t, qp, qv, obj_qp, obj_qv = self.robot.get_obs(
self, robot_noise_ratio=self.robot_noise_ratio)
self.obs_dict = {}
self.obs_dict['t'] = t
self.obs_dict['qp'] = qp
self.obs_dict['qv'] = qv
self.obs_dict['obj_qp'] = obj_qp
self.obs_dict['obj_qv'] = obj_qv
self.obs_dict['goal'] = self.goal
return np.concatenate([self.obs_dict['qp'], self.obs_dict['obj_qp']])
def update_goal_info(self, obs, info):
for task in self.tasks:
cur_relevant_obs = obs[self.task_infos[task][0]]
cur_target_obs = self.task_infos[task][1]
if np.linalg.norm(cur_target_obs - cur_relevant_obs) <= self.task_infos[task][2]:
is_success = 1
else:
is_success = 0
info[f'Task{task}Success'] = is_success
def reset(self):
ret = super().reset()
return dict(
observation=ret,
achieved_goal=np.zeros(30),
desired_goal=np.zeros(30),
)
def step(self, *args, **kwargs):
obs, reward, done, info = super().step(*args, **kwargs)
self.update_goal_info(obs, info)
obs = dict(
observation=obs,
achieved_goal=np.zeros(30),
desired_goal=np.zeros(30),
)
return obs, reward, done, info
class KitchenHingeSlideBottomLeftBurnerLightV0(KitchenBase):
TASK_ELEMENTS = [
"hinge cabinet",
"slide cabinet",
"bottom left burner",
"light switch",
]
REMOVE_TASKS_WHEN_COMPLETE = True
class KitchenMicrowaveV0(KitchenBase):
TASK_ELEMENTS = ["microwave"]
def __init__(self, delta=0, **kwargs):
super(KitchenMicrowaveV0, self).__init__(**kwargs)
self.step_to_primitive_name = {
0: "drop",
1: "angled_x_y_grasp",
2: "move_backward",
3: "no_op",
4: "no_op",
}
if not self.use_combined_action_space and self.control_mode == "primitives":
action_low = np.array(
[
-np.pi / 6,
-0.3,
0.95,
0,
0,
0,
0,
0,
0.55,
0,
0,
0,
0.6,
0.0,
]
)
action_high = np.array(
[
-np.pi / 6,
-0.3,
0.95,
0,
0,
0,
0,
0,
0.55,
0,
0,
0,
0.6,
0.0,
]
)
action_low -= delta
action_high += delta
if not self.fixed_schema:
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32)
class KitchenKettleV0(KitchenBase):
TASK_ELEMENTS = ["kettle"]
def __init__(self, delta=0, **kwargs):
super(KitchenKettleV0, self).__init__(**kwargs)
self.step_to_primitive_name = {
0: "drop",
1: "angled_x_y_grasp",
2: "move_delta_ee_pose",
3: "drop",
4: "open_gripper",
}
if not self.use_combined_action_space and self.control_mode == "primitives":
action_low = np.array(
[
0,
0.15,
0.7,
0.25,
1.1,
0.25,
0,
0,
0.25,
0,
0,
0,
0.0,
0.0,
]
)
action_high = np.array(
[
0,
0.15,
0.7,
0.25,
1.1,
0.25,
0,
0,
0.5,
0,
0,
0,
0.0,
0.0,
]
)
action_low -= delta
action_high += delta
if not self.fixed_schema:
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32)
class KitchenBottomLeftBurnerV0(KitchenBase):
TASK_ELEMENTS = ["bottom left burner"]
def __init__(self, delta=0.0, **kwargs):
super(KitchenBottomLeftBurnerV0, self).__init__(**kwargs)
self.step_to_primitive_name = {
0: "lift",
1: "angled_x_y_grasp",
2: "rotate_about_y_axis",
3: "no_op",
4: "no_op",
}
if not self.use_combined_action_space and self.control_mode == "primitives":
action_low = np.array(
[
0,
0.55,
1.1,
0.0,
0.0,
0.0,
-np.pi / 4,
0.3,
0.0,
0,
0,
0,
0.0,
0.0,
]
)
action_high = np.array(
[
0,
0.55,
1.1,
0.0,
0.0,
0.0,
-np.pi / 4,
0.3,
0.0,
0,
0,
0,
0.0,
0.0,
]
)
action_low -= delta
action_high += delta
if not self.fixed_schema:
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32)
class KitchenTopLeftBurnerV0(KitchenBase):
TASK_ELEMENTS = ["top left burner"]
def __init__(self, delta=0, **kwargs):
super(KitchenTopLeftBurnerV0, self).__init__(**kwargs)
self.step_to_primitive_name = {
0: "lift",
1: "angled_x_y_grasp",
2: "rotate_about_y_axis",
3: "no_op",
4: "no_op",
}
if not self.use_combined_action_space and self.control_mode == "primitives":
action_low = np.array(
[
0,
0.5,
1,
0.0,
0.0,
0.0,
-np.pi / 4,
0.6,
0.0,
0,
0,
0,
0.0,
0.0,
]
)
action_high = np.array(
[
0,
0.5,
1,
0.0,
0.0,
0.0,
-np.pi / 4,
0.6,
0.0,
0,
0,
0,
0.0,
0.0,
]
)
action_low -= delta
action_high += delta
if not self.fixed_schema:
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32)
class KitchenSlideCabinetV0(KitchenBase):
TASK_ELEMENTS = ["slide cabinet"]
def __init__(self, delta=0, **kwargs):
super(KitchenSlideCabinetV0, self).__init__(**kwargs)
self.step_to_primitive_name = {
0: "lift",
1: "angled_x_y_grasp",
2: "move_right",
3: "no_op",
4: "no_op",
}
if not self.use_combined_action_space and self.control_mode == "primitives":
action_low = np.array(
[
0.0,
0.7,
1.0,
0.0,
0.0,
0.0,
0.0,
1,
0.0,
0.0,
0.6,
0.0,
0.0,
0.0,
]
)
action_high = np.array(
[
0.0,
0.7,
1.0,
0.0,
0.0,
0.0,
0.0,
1,
0.0,
0.0,
0.6,
0.0,
0.0,
0.0,
]
)
action_low -= delta
action_high += delta
if not self.fixed_schema:
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32)
class KitchenHingeCabinetV0(KitchenBase):
TASK_ELEMENTS = ["hinge cabinet"]
def __init__(self, delta=0, **kwargs):
super(KitchenHingeCabinetV0, self).__init__(**kwargs)
self.step_to_primitive_name = {
0: "lift",
1: "angled_x_y_grasp",
2: "move_delta_ee_pose",
3: "rotate_about_x_axis",
4: "no_op",
}
if not self.use_combined_action_space and self.control_mode == "primitives":
action_low = np.array(
[
-np.pi / 6,
-0.3,
1.4,
0.5,
-1,
0.0,
0.0,
1,
0.0,
0,
1,
0,
0.3,
1.0,
]
)
action_high = np.array(
[
-np.pi / 6,
-0.3,
1.4,
0.5,
-1,
0.0,
0.0,
1,
0.0,
0,
1,
0,
0.3,
1.0,
]
)
action_low -= delta
action_high += delta
if not self.fixed_schema:
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32)
class KitchenLightSwitchV0(KitchenBase):
TASK_ELEMENTS = ["light switch"]
def __init__(self, delta=0, **kwargs):
super(KitchenLightSwitchV0, self).__init__(**kwargs)
self.step_to_primitive_name = {
0: "close_gripper",
1: "lift",
2: "move_right",
3: "move_forward",
4: "move_left",
}
if not self.use_combined_action_space and self.control_mode == "primitives":
action_low = np.array(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.6,
0.0,
0.45,
0.45,
1.25,
0.0,
0.0,
]
)
action_high = np.array(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.6,
0.0,
0.45,
0.45,
1.25,
0.0,
0.0,
]
)
action_low -= delta
action_high += delta
if not self.fixed_schema:
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32)
| 23,478 | 32.162429 | 93 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/run_kitchen.py | import gym
import d4rl_alt
import cv2
from envs.d4rl_envs import KitchenEnv
#from d4rl_alt.kitchen.kitchen_envs import KitchenMicrowaveV0
import imageio
env = KitchenEnv()
env.reset()
done = False
imgs = []
for i in range(150):
o, r, d, i = env.step(env.action_space.sample())
print("microwave", i["microwave distance to goal"])
im = env.render(mode="rgb_array")
imgs.append(im)
#cv2.imshow("env", im)
#cv2.waitKey(1)
#print(r)
#import ipdb ; ipdb.set_trace()
imageio.mimsave('out.gif', imgs)
| 522 | 22.772727 | 61 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/__init__.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import d4rl_alt.kitchen.adept_envs.franka
from d4rl_alt.kitchen.adept_envs.utils.configurable import global_config
| 711 | 36.473684 | 74 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/base_robot.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import numpy as np
class BaseRobot(object):
"""Base class for all robot classes."""
def __init__(
self,
n_jnt,
n_obj,
pos_bounds=None,
vel_bounds=None,
calibration_path=None,
is_hardware=False,
device_name=None,
overlay=False,
calibration_mode=False,
observation_cache_maxsize=5,
):
"""Create a new robot.
Args:
n_jnt: The number of dofs in the robot.
n_obj: The number of dofs in the object.
pos_bounds: (n_jnt, 2)-shape matrix denoting the min and max joint
position for each joint.
vel_bounds: (n_jnt, 2)-shape matrix denoting the min and max joint
velocity for each joint.
calibration_path: File path to the calibration configuration file to
use.
is_hardware: Whether to run on hardware or not.
device_name: The device path for the robot hardware. Only required
in legacy mode.
overlay: Whether to show a simulation overlay of the hardware.
calibration_mode: Start with motors disengaged.
"""
assert n_jnt > 0
assert n_obj >= 0
self._n_jnt = n_jnt
self._n_obj = n_obj
self._n_dofs = n_jnt + n_obj
self._pos_bounds = None
if pos_bounds is not None:
pos_bounds = np.array(pos_bounds, dtype=np.float32)
assert pos_bounds.shape == (self._n_dofs, 2)
for low, high in pos_bounds:
assert low < high
self._pos_bounds = pos_bounds
self._vel_bounds = None
if vel_bounds is not None:
vel_bounds = np.array(vel_bounds, dtype=np.float32)
assert vel_bounds.shape == (self._n_dofs, 2)
for low, high in vel_bounds:
assert low < high
self._vel_bounds = vel_bounds
self._is_hardware = is_hardware
self._device_name = device_name
self._calibration_path = calibration_path
self._overlay = overlay
self._calibration_mode = calibration_mode
self._observation_cache_maxsize = observation_cache_maxsize
# Gets updated
self._observation_cache = deque([], maxlen=self._observation_cache_maxsize)
@property
def n_jnt(self):
return self._n_jnt
@property
def n_obj(self):
return self._n_obj
@property
def n_dofs(self):
return self._n_dofs
@property
def pos_bounds(self):
return self._pos_bounds
@property
def vel_bounds(self):
return self._vel_bounds
@property
def is_hardware(self):
return self._is_hardware
@property
def device_name(self):
return self._device_name
@property
def calibration_path(self):
return self._calibration_path
@property
def overlay(self):
return self._overlay
@property
def has_obj(self):
return self._n_obj > 0
@property
def calibration_mode(self):
return self._calibration_mode
@property
def observation_cache_maxsize(self):
return self._observation_cache_maxsize
@property
def observation_cache(self):
return self._observation_cache
def clip_positions(self, positions):
"""Clips the given joint positions to the position bounds.
Args:
positions: The joint positions.
Returns:
The bounded joint positions.
"""
if self.pos_bounds is None:
return positions
assert len(positions) == self.n_jnt or len(positions) == self.n_dofs
pos_bounds = self.pos_bounds[: len(positions)]
return np.clip(positions, pos_bounds[:, 0], pos_bounds[:, 1])
| 4,458 | 28.143791 | 83 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/mujoco_env.py | """Base environment for MuJoCo-based environments."""
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import time
from typing import Dict, Optional
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
from d4rl_alt.kitchen.adept_envs.simulation.sim_robot import MujocoSimRobot, RenderMode
DEFAULT_RENDER_SIZE = 480
USE_DM_CONTROL = True
class MujocoEnv(gym.Env):
"""Superclass for all MuJoCo environments."""
def __init__(
self,
model_path: str,
frame_skip: int,
camera_settings: Optional[Dict] = None,
use_dm_backend: Optional[bool] = None,
):
"""Initializes a new MuJoCo environment.
Args:
model_path: The path to the MuJoCo XML file.
frame_skip: The number of simulation steps per environment step. On
hardware this influences the duration of each environment step.
camera_settings: Settings to initialize the simulation camera. This
can contain the keys `distance`, `azimuth`, and `elevation`.
use_dm_backend: A boolean to switch between mujoco-py and dm_control.
"""
self._seed()
if not os.path.isfile(model_path):
raise IOError(
"[MujocoEnv]: Model path does not exist: {}".format(model_path)
)
self.frame_skip = frame_skip
self.sim_robot = MujocoSimRobot(
model_path,
use_dm_backend=use_dm_backend or USE_DM_CONTROL,
camera_settings=camera_settings,
)
self.sim = self.sim_robot.sim
self.model = self.sim_robot.model
self.data = self.sim_robot.data
self.metadata = {
"render.modes": ["human", "rgb_array", "depth_array"],
"video.frames_per_second": int(np.round(1.0 / self.dt)),
}
self.mujoco_render_frames = False
self.init_qpos = self.data.qpos.ravel().copy()
self.init_qvel = self.data.qvel.ravel().copy()
observation, _reward, done, _info = self.step(np.zeros(self.model.nu))
assert not done
bounds = self.model.actuator_ctrlrange.copy()
act_upper = bounds[:, 1]
act_lower = bounds[:, 0]
# Define the action and observation spaces.
# HACK: MJRL is still using gym 0.9.x so we can't provide a dtype.
try:
self.action_space = spaces.Box(act_lower, act_upper, dtype=np.float32)
if isinstance(observation, collections.Mapping):
self.observation_space = spaces.Dict(
{
k: spaces.Box(-np.inf, np.inf, shape=v.shape, dtype=np.float32)
for k, v in observation.items()
}
)
else:
self.obs_dim = (
np.sum([o.size for o in observation])
if type(observation) is tuple
else observation.size
)
self.observation_space = spaces.Box(
-np.inf, np.inf, observation.shape, dtype=np.float32
)
except TypeError:
# Fallback case for gym 0.9.x
self.action_space = spaces.Box(act_lower, act_upper)
assert not isinstance(
observation, collections.Mapping
), "gym 0.9.x does not support dictionary observation."
self.obs_dim = (
np.sum([o.size for o in observation])
if type(observation) is tuple
else observation.size
)
self.observation_space = spaces.Box(-np.inf, np.inf, observation.shape)
def seed(self, seed=None): # Compatibility with new gym
return self._seed(seed)
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# methods to override:
# ----------------------------
def reset_model(self):
"""Reset the robot degrees of freedom (qpos and qvel).
Implement this in each subclass.
"""
raise NotImplementedError
# -----------------------------
def reset(self): # compatibility with new gym
return self._reset()
def _reset(self):
self.sim.reset()
self.sim.forward()
ob = self.reset_model()
return ob
def set_state(self, qpos, qvel):
assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
state = np.concatenate([qpos, qvel])
# state = self.sim.get_state()
# for i in range(self.model.nq):
# state.qpos[i] = qpos[i]
# for i in range(self.model.nv):
# state.qvel[i] = qvel[i]
self.sim.set_state(state)
self.sim.forward()
@property
def dt(self):
return self.model.opt.timestep * self.frame_skip
def do_simulation(self, ctrl, n_frames):
for i in range(self.model.nu):
self.sim.data.ctrl[i] = ctrl[i]
for _ in range(n_frames):
self.sim.step()
# TODO(michaelahn): Remove this; render should be called separately.
if self.mujoco_render_frames is True:
self.mj_render()
def render(
self,
mode="human",
width=DEFAULT_RENDER_SIZE,
height=DEFAULT_RENDER_SIZE,
camera_id=-1,
):
"""Renders the environment.
Args:
mode: The type of rendering to use.
- 'human': Renders to a graphical window.
- 'rgb_array': Returns the RGB image as an np.ndarray.
- 'depth_array': Returns the depth image as an np.ndarray.
width: The width of the rendered image. This only affects offscreen
rendering.
height: The height of the rendered image. This only affects
offscreen rendering.
camera_id: The ID of the camera to use. By default, this is the free
camera. If specified, only affects offscreen rendering.
"""
if mode == "human":
self.sim_robot.renderer.render_to_window()
elif mode == "rgb_array":
assert width and height
return self.sim_robot.renderer.render_offscreen(
width, height, mode=RenderMode.RGB, camera_id=camera_id
)
elif mode == "depth_array":
assert width and height
return self.sim_robot.renderer.render_offscreen(
width, height, mode=RenderMode.DEPTH, camera_id=camera_id
)
else:
raise NotImplementedError(mode)
def close(self):
self.sim_robot.close()
def mj_render(self):
"""Backwards compatibility with MJRL."""
self.render(mode="human")
def state_vector(self):
state = self.sim.get_state()
return np.concatenate([state.qpos.flat, state.qvel.flat])
| 7,542 | 32.977477 | 87 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/robot_env.py | """Base class for robotics environments."""
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
from typing import Dict, Optional
import numpy as np
from d4rl_alt.kitchen.adept_envs import mujoco_env
from d4rl_alt.kitchen.adept_envs.base_robot import BaseRobot
from d4rl_alt.kitchen.adept_envs.utils.configurable import import_class_from_path
from d4rl_alt.kitchen.adept_envs.utils.constants import MODELS_PATH
class RobotEnv(mujoco_env.MujocoEnv):
"""Base environment for all adept robots."""
# Mapping of robot name to fully qualified class path.
# e.g. 'robot': 'adept_envs.dclaw.robot.Robot'
# Subclasses should override this to specify the Robot classes they support.
ROBOTS = {}
# Mapping of device path to the calibration file to use. If the device path
# is not found, the 'default' key is used.
# This can be overriden by subclasses.
CALIBRATION_PATHS = {}
def __init__(
self,
model_path: str,
robot: BaseRobot,
frame_skip: int,
camera_settings: Optional[Dict] = None,
):
"""Initializes a robotics environment.
Args:
model_path: The path to the model to run. Relative paths will be
interpreted as relative to the 'adept_models' folder.
robot: The Robot object to use.
frame_skip: The number of simulation steps per environment step. On
hardware this influences the duration of each environment step.
camera_settings: Settings to initialize the simulation camera. This
can contain the keys `distance`, `azimuth`, and `elevation`.
"""
self._robot = robot
# Initial pose for first step.
self.desired_pose = np.zeros(self.n_jnt)
if not model_path.startswith("/"):
model_path = os.path.abspath(os.path.join(MODELS_PATH, model_path))
self.remote_viz = None
try:
from adept_envs.utils.remote_viz import RemoteViz
self.remote_viz = RemoteViz(model_path)
except ImportError:
pass
self._initializing = True
super(RobotEnv, self).__init__(
model_path, frame_skip, camera_settings=camera_settings
)
self._initializing = False
@property
def robot(self):
return self._robot
@property
def n_jnt(self):
return self._robot.n_jnt
@property
def n_obj(self):
return self._robot.n_obj
@property
def skip(self):
"""Alias for frame_skip. Needed for MJRL."""
return self.frame_skip
@property
def initializing(self):
return self._initializing
def close_env(self):
if self._robot is not None:
self._robot.close()
def make_robot(
self,
n_jnt,
n_obj=0,
is_hardware=False,
device_name=None,
legacy=False,
**kwargs
):
"""Creates a new robot for the environment.
Args:
n_jnt: The number of joints in the robot.
n_obj: The number of object joints in the robot environment.
is_hardware: Whether to run on hardware or not.
device_name: The device path for the robot hardware.
legacy: If true, runs using direct dynamixel communication rather
than DDS.
kwargs: See BaseRobot for other parameters.
Returns:
A Robot object.
"""
if not self.ROBOTS:
raise NotImplementedError("Subclasses must override ROBOTS.")
if is_hardware and not device_name:
raise ValueError("Must provide device name if running on hardware.")
robot_name = "dds_robot" if not legacy and is_hardware else "robot"
if robot_name not in self.ROBOTS:
raise KeyError(
"Unsupported robot '{}', available: {}".format(
robot_name, list(self.ROBOTS.keys())
)
)
cls = import_class_from_path(self.ROBOTS[robot_name])
calibration_path = None
if self.CALIBRATION_PATHS:
if not device_name:
calibration_name = "default"
elif device_name not in self.CALIBRATION_PATHS:
print(
'Device "{}" not in CALIBRATION_PATHS; using default.'.format(
device_name
)
)
calibration_name = "default"
else:
calibration_name = device_name
calibration_path = self.CALIBRATION_PATHS[calibration_name]
if not os.path.isfile(calibration_path):
raise OSError(
"Could not find calibration file at: {}".format(calibration_path)
)
return cls(
n_jnt,
n_obj,
is_hardware=is_hardware,
device_name=device_name,
calibration_path=calibration_path,
**kwargs
)
| 5,622 | 30.589888 | 85 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/franka/__init__.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gym.envs.registration import register
# Relax the robot
register(
id="kitchen_relax-v1",
entry_point="adept_envs.franka.kitchen_multitask_v0:KitchenTaskRelaxV1",
max_episode_steps=280,
)
| 801 | 31.08 | 76 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/franka/kitchen_multitask_v0.py | """ Kitchen environment for long horizon manipulation """
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
#from robosuite_vices.controllers.arm_controller import JointImpedanceController
# import cv2
import mujoco_py
import numpy as np
import quaternion
from dm_control.mujoco import engine
from gym import spaces
from d4rl_alt.kitchen.adept_envs import robot_env
class KitchenV0(robot_env.RobotEnv):
CALIBRATION_PATHS = {
"default": os.path.join(os.path.dirname(__file__), "robot/franka_config.xml")
}
# Converted to velocity actuation
ROBOTS = {"robot": "d4rl_alt.kitchen.adept_envs.franka.robot.franka_robot:Robot_VelAct"}
EE_CTRL_MODEL = os.path.join(
os.path.dirname(__file__), "../franka/assets/franka_kitchen_ee_ctrl.xml"
)
JOINT_POSITION_CTRL_MODEL = os.path.join(
os.path.dirname(__file__),
"../franka/assets/franka_kitchen_joint_position_ctrl.xml",
)
TORQUE_CTRL_MODEL = os.path.join(
os.path.dirname(__file__),
"../franka/assets/franka_kitchen_torque_ctrl.xml",
)
CTLR_MODES_DICT = dict(
primitives=dict(
model=EE_CTRL_MODEL,
robot={
"robot": "d4rl_alt.kitchen.adept_envs.franka.robot.franka_robot:Robot_Unconstrained"
},
),
end_effector=dict(
model=EE_CTRL_MODEL,
robot={
"robot": "d4rl_alt.kitchen.adept_envs.franka.robot.franka_robot:Robot_Unconstrained"
},
),
torque=dict(
model=TORQUE_CTRL_MODEL,
robot={
"robot": "d4rl_alt.kitchen.adept_envs.franka.robot.franka_robot:Robot_Unconstrained"
},
),
joint_position=dict(
model=JOINT_POSITION_CTRL_MODEL,
robot={
"robot": "d4rl_alt.kitchen.adept_envs.franka.robot.franka_robot:Robot_PosAct"
},
),
joint_velocity=dict(
model=JOINT_POSITION_CTRL_MODEL,
robot={
"robot": "d4rl_alt.kitchen.adept_envs.franka.robot.franka_robot:Robot_VelAct"
},
),
vices=dict(
model=JOINT_POSITION_CTRL_MODEL,
robot={
"robot": "d4rl_alt.kitchen.adept_envs.franka.robot.franka_robot:Robot_Unconstrained"
},
)
)
N_DOF_ROBOT = 9
N_DOF_OBJECT = 21
def __init__(
self,
robot_params={},
frame_skip=40,
image_obs=False,
imwidth=64,
imheight=64,
fixed_schema=True,
action_scale=1,
view=1,
use_wrist_cam=False,
wrist_cam_concat_with_fixed_view=False,
proprioception=False,
start_image_concat_with_image_obs=False,
normalize_proprioception_obs=False,
use_workspace_limits=True,
control_mode="end_effector",
):
self.control_mode = control_mode
self.MODEL = self.CTLR_MODES_DICT[self.control_mode]["model"]
self.ROBOTS = self.CTLR_MODES_DICT[self.control_mode]["robot"]
self.episodic_cumulative_reward = 0
self.obs_dict = {}
# self.robot_noise_ratio = 0.1 # 10% as per robot_config specs
self.robot_noise_ratio = 0.0 # 10% as per robot_config specs
self.goal = np.zeros((30,))
self.step_count = 0
self.view = view
self.use_wrist_cam = use_wrist_cam
self.wrist_cam_concat_with_fixed_view = wrist_cam_concat_with_fixed_view
self.start_image_concat_with_image_obs = start_image_concat_with_image_obs
self.primitive_idx_to_name = {
0: "angled_x_y_grasp",
1: "move_delta_ee_pose",
2: "rotate_about_y_axis",
3: "lift",
4: "drop",
5: "move_left",
6: "move_right",
7: "move_forward",
8: "move_backward",
9: "open_gripper",
10: "close_gripper",
11: "rotate_about_x_axis",
}
self.primitive_name_to_func = dict(
angled_x_y_grasp=self.angled_x_y_grasp,
move_delta_ee_pose=self.move_delta_ee_pose,
rotate_about_y_axis=self.rotate_about_y_axis,
lift=self.lift,
drop=self.drop,
move_left=self.move_left,
move_right=self.move_right,
move_forward=self.move_forward,
move_backward=self.move_backward,
open_gripper=self.open_gripper,
close_gripper=self.close_gripper,
rotate_about_x_axis=self.rotate_about_x_axis,
)
self.primitive_name_to_action_idx = dict(
angled_x_y_grasp=[0, 1, 2],
move_delta_ee_pose=[3, 4, 5],
rotate_about_y_axis=6,
lift=7,
drop=8,
move_left=9,
move_right=10,
move_forward=11,
move_backward=12,
rotate_about_x_axis=13,
open_gripper=[], # doesn't matter
close_gripper=[], # doesn't matter
)
self.max_arg_len = 14
self.num_primitives = len(self.primitive_name_to_func)
self.image_obs = image_obs
self.imwidth = imwidth
self.imheight = imheight
self.fixed_schema = fixed_schema
self.action_scale = action_scale
self.min_ee_pos = np.array([-0.9, 0, 1.5])
self.max_ee_pos = np.array([0.7, 1.5, 3.25])
self.num_decimals_for_coverage_grid = 2
max_delta = self.max_ee_pos - self.min_ee_pos
grid_size = (max_delta * 10 ** self.num_decimals_for_coverage_grid).astype(int)
self.coverage_grid = np.zeros(
(grid_size[0], grid_size[1], grid_size[2]), dtype=np.uint8
)
self.proprioception = proprioception
self.normalize_proprioception_obs = normalize_proprioception_obs
self.use_workspace_limits = use_workspace_limits
super().__init__(
self.MODEL,
robot=self.make_robot(
n_jnt=self.N_DOF_ROBOT, # root+robot_jnts
n_obj=self.N_DOF_OBJECT,
**robot_params
),
frame_skip=frame_skip,
camera_settings=dict(
distance=2.2, lookat=[-0.2, 0.5, 2.0], azimuth=70, elevation=-35
),
)
if self.control_mode in ["primitives", "end_effector"]:
self.reset_mocap_welds(self.sim)
self.sim.forward()
gripper_target = (
np.array([-0.498, 0.005, -0.431 + 0.01]) + self.get_ee_pose()
)
gripper_rotation = np.array([1.0, 0.0, 1.0, 0.0])
self.set_mocap_pos("mocap", gripper_target)
self.set_mocap_quat("mocap", gripper_rotation)
for _ in range(10):
self.sim.step()
self.init_qpos = self.sim.model.key_qpos[0].copy()
# For the microwave kettle slide hinge
self.init_qpos = np.array(
[
1.48388023e-01,
-1.76848573e00,
1.84390296e00,
-2.47685760e00,
2.60252026e-01,
7.12533105e-01,
1.59515394e00,
4.79267505e-02,
3.71350919e-02,
-2.66279850e-04,
-5.18043486e-05,
3.12877220e-05,
-4.51199853e-05,
-3.90842156e-06,
-4.22629655e-05,
6.28065475e-05,
4.04984708e-05,
4.62730939e-04,
-2.26906415e-04,
-4.65501369e-04,
-6.44129196e-03,
-1.77048263e-03,
1.08009684e-03,
-2.69397440e-01,
3.50383255e-01,
1.61944683e00,
1.00618764e00,
4.06395120e-03,
-6.62095997e-03,
-2.68278933e-04,
]
)
self.init_qvel = self.sim.model.key_qvel[0].copy()
obs_upper = 8.0 * np.ones(self.obs_dim)
obs_lower = -obs_upper
self.observation_space = spaces.Box(obs_lower, obs_upper, dtype=np.float32)
if self.image_obs:
self.imlength = imwidth * imheight
self.imlength *= 3
if (
self.wrist_cam_concat_with_fixed_view
or self.start_image_concat_with_image_obs
):
self.imlength *= 2
self.image_shape = (6, imheight, imwidth)
else:
self.image_shape = (3, imheight, imwidth)
self.observation_space = spaces.Box(
0, 255, (self.imlength,), dtype=np.uint8
)
if self.proprioception:
obs_upper = 8.0 * np.ones(9 + 7)
obs_lower = -obs_upper
self.proprioception_obs_space = spaces.Box(obs_lower, obs_upper)
low = np.concatenate(
(self.observation_space.low, self.proprioception_obs_space.low)
)
high = np.concatenate(
(self.observation_space.high, self.proprioception_obs_space.high)
)
self.observation_space = spaces.Box(
low,
high,
dtype=np.float32,
)
if self.control_mode in ["joint_position", "joint_velocity", "torque"]:
self.act_mid = np.zeros(self.N_DOF_ROBOT)
self.act_amp = 2.0 * np.ones(self.N_DOF_ROBOT)
act_lower = -1 * np.ones((self.N_DOF_ROBOT,))
act_upper = 1 * np.ones((self.N_DOF_ROBOT,))
self.action_space = spaces.Box(act_lower, act_upper)
if self.control_mode == "end_effector":
# 3 for xyz pose, 3 for xyz angle, 1 for gripper
act_lower = -1 * np.ones((7,))
act_upper = 1 * np.ones((7,))
self.action_space = spaces.Box(act_lower, act_upper)
if self.control_mode == 'vices':
control_range = np.ones(9)
ctrl_ratio = 1.0
control_freq = 0.5 * ctrl_ratio
damping_max = 2
damping_min = 0.1
kp_max = 100
kp_min = 0.05
self.sim.model.opt.timestep = .01
self.controller = JointImpedanceController(
control_range, control_freq, kp_max, kp_min, damping_max, damping_min
)
self.joint_index_vel = np.arange(9)
self.joint_index_pos = np.arange(9)
self.controller.update_mass_matrix(self.sim, self.joint_index_vel)
self.controller.update_model(
self.sim, self.joint_index_pos, self.joint_index_vel
)
high = np.ones(27)
low = -high
self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)
def get_idx_from_primitive_name(self, primitive_name):
for idx, pn in self.primitive_idx_to_name.items():
if pn == primitive_name:
return idx
def get_site_xpos(self, name):
id = self.sim.model.site_name2id(name)
return self.sim.data.site_xpos[id]
def get_mocap_pos(self, name):
body_id = self.sim.model.body_name2id(name)
mocap_id = self.sim.model.body_mocapid[body_id]
return self.sim.data.mocap_pos[mocap_id]
def set_mocap_pos(self, name, value):
body_id = self.sim.model.body_name2id(name)
mocap_id = self.sim.model.body_mocapid[body_id]
self.sim.data.mocap_pos[mocap_id] = value
def get_mocap_quat(self, name):
body_id = self.sim.model.body_name2id(name)
mocap_id = self.sim.model.body_mocapid[body_id]
return self.sim.data.mocap_quat[mocap_id]
def set_mocap_quat(self, name, value):
body_id = self.sim.model.body_name2id(name)
mocap_id = self.sim.model.body_mocapid[body_id]
self.sim.data.mocap_quat[mocap_id] = value
def _get_reward_n_score(self, obs_dict):
return 0
def ctrl_set_action(self, sim, action):
self.data.ctrl[7] = action[-2]
self.data.ctrl[8] = action[-1]
def mocap_set_action(self, sim, action):
#import ipdb ; ipdb.set_trace()
if sim.model.nmocap > 0:
action, _ = np.split(action, (sim.model.nmocap * 7,))
action = action.reshape(sim.model.nmocap, 7)
pos_delta = action[:, :3]
quat_delta = action[:, 3:]
self.reset_mocap2body_xpos(sim)
sim.data.mocap_pos[:] = sim.data.mocap_pos + pos_delta
sim.data.mocap_quat[:] = sim.data.mocap_quat + quat_delta
def reset_mocap_welds(self, sim):
if sim.model.nmocap > 0 and sim.model.eq_data is not None:
for i in range(sim.model.eq_data.shape[0]):
if sim.model.eq_type[i] == mujoco_py.const.EQ_WELD:
sim.model.eq_data[i, :] = np.array(
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
)
sim.forward()
def reset_mocap2body_xpos(self, sim):
if (
sim.model.eq_type is None
or sim.model.eq_obj1id is None
or sim.model.eq_obj2id is None
):
return
for eq_type, obj1_id, obj2_id in zip(
sim.model.eq_type, sim.model.eq_obj1id, sim.model.eq_obj2id
):
if eq_type != mujoco_py.const.EQ_WELD:
continue
mocap_id = sim.model.body_mocapid[obj1_id]
if mocap_id != -1:
body_idx = obj2_id
else:
mocap_id = sim.model.body_mocapid[obj2_id]
body_idx = obj1_id
assert mocap_id != -1
sim.data.mocap_pos[mocap_id][:] = sim.data.body_xpos[body_idx]
sim.data.mocap_quat[mocap_id][:] = sim.data.body_xquat[body_idx]
def _set_action(self, action):
assert action.shape == (9,)
action = action.copy()
pos_ctrl, rot_ctrl, gripper_ctrl = action[:3], action[3:7], action[7:9]
pos_ctrl *= 0.05
rot_ctrl *= 0.05
assert gripper_ctrl.shape == (2,)
action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])
# Apply action to simulation.
self.ctrl_set_action(self.sim, action)
self.mocap_set_action(self.sim, action)
# update coverage grid
xpos = self.get_ee_pose()
xpos_rounded = np.around(xpos, self.num_decimals_for_coverage_grid)
delta = xpos_rounded - self.min_ee_pos
indices = (delta * 10 ** (self.num_decimals_for_coverage_grid)).astype(int)
indices = np.clip(
indices, 0, self.coverage_grid.shape[0] - 1
) # make sure all valid indices, clip any to min/max of range
self.coverage_grid[indices[0]][indices[1]][indices[2]] = 1
def get_ee_pose(self):
return self.get_site_xpos("end_effector")
def get_ee_quat(self):
return self.sim.data.body_xquat[10]
def rpy_to_quat(self, rpy):
q = quaternion.from_euler_angles(rpy)
return np.array([q.x, q.y, q.z, q.w])
def quat_to_rpy(self, q):
q = quaternion.quaternion(q[0], q[1], q[2], q[3])
return quaternion.as_euler_angles(q)
def convert_xyzw_to_wxyz(self, q):
return np.array([q[3], q[0], q[1], q[2]])
def close_gripper(
self,
unusued=None,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
for _ in range(200):
self._set_action(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
self.sim.step()
if render_every_step:
if render_mode == "rgb_array":
self.img_array.append(
self.render(
render_mode,
render_im_shape[0],
render_im_shape[1],
original=True,
)
)
else:
self.render(
render_mode,
render_im_shape[0],
render_im_shape[1],
original=True,
)
def open_gripper(
self,
unusued=None,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
for _ in range(200):
self._set_action(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.04, 0.04]))
self.sim.step()
if render_every_step:
if render_mode == "rgb_array":
self.img_array.append(
self.render(
render_mode,
render_im_shape[0],
render_im_shape[1],
original=True,
)
)
else:
self.render(
render_mode,
render_im_shape[0],
render_im_shape[1],
original=True,
)
def rotate_ee(
self,
rpy,
repeats=200,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
gripper = self.sim.data.qpos[7:9]
for _ in range(repeats):
quat = self.rpy_to_quat(rpy)
quat_delta = self.convert_xyzw_to_wxyz(quat) - self.sim.data.body_xquat[10]
self._set_action(
np.array(
[
0.0,
0.0,
0.0,
quat_delta[0],
quat_delta[1],
quat_delta[2],
quat_delta[3],
gripper[0],
gripper[1],
]
)
)
self.sim.step()
def goto_pose(
self,
pose,
repeats=300,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
# clamp the pose within workspace limits:
gripper = self.sim.data.qpos[7:9]
for _ in range(repeats):
if self.use_workspace_limits:
pose = np.clip(pose, self.min_ee_pos, self.max_ee_pos)
self.reset_mocap2body_xpos(self.sim)
delta = pose - self.get_ee_pose()
self._set_action(
np.array(
[
delta[0],
delta[1],
delta[2],
0.0,
0.0,
0.0,
0.0,
gripper[0],
gripper[1],
]
)
)
self.sim.step()
def rotate_about_x_axis(
self,
angle,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
rotation = self.quat_to_rpy(self.sim.data.body_xquat[10]) - np.array(
[angle, 0, 0]
)
self.rotate_ee(
rotation,
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def rotate_about_y_axis(
self,
angle,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
angle = np.clip(angle, -np.pi, np.pi)
rotation = self.quat_to_rpy(self.sim.data.body_xquat[10]) - np.array(
[0, 0, angle],
)
self.rotate_ee(
rotation,
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def angled_x_y_grasp(
self,
angle_and_xy,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
angle, x_dist, y_dist = angle_and_xy
angle = np.clip(angle, -np.pi, np.pi)
rotation = self.quat_to_rpy(self.sim.data.body_xquat[10]) - np.array(
[angle, 0, 0]
)
self.rotate_ee(
rotation,
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
self.goto_pose(
self.get_ee_pose() + np.array([x_dist, 0.0, 0]),
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
self.goto_pose(
self.get_ee_pose() + np.array([0.0, y_dist, 0]),
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
self.close_gripper(
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def move_delta_ee_pose(
self,
pose,
repeats=300,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
self.goto_pose(
self.get_ee_pose() + pose,
repeats=repeats,
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def lift(
self,
z_dist,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
z_dist = np.maximum(z_dist, 0.0)
self.goto_pose(
self.get_ee_pose() + np.array([0.0, 0.0, z_dist]),
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def drop(
self,
z_dist,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
z_dist = np.maximum(z_dist, 0.0)
self.goto_pose(
self.get_ee_pose() + np.array([0.0, 0.0, -z_dist]),
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def move_left(
self,
x_dist,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
x_dist = np.maximum(x_dist, 0.0)
self.goto_pose(
self.get_ee_pose() + np.array([-x_dist, 0.0, 0.0]),
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def move_right(
self,
x_dist,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
x_dist = np.maximum(x_dist, 0.0)
self.goto_pose(
self.get_ee_pose() + np.array([x_dist, 0.0, 0.0]),
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def move_forward(
self,
y_dist,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
y_dist = np.maximum(y_dist, 0.0)
self.goto_pose(
self.get_ee_pose() + np.array([0.0, y_dist, 0.0]),
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def move_backward(
self,
y_dist,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
y_dist = np.maximum(y_dist, 0.0)
self.goto_pose(
self.get_ee_pose() + np.array([0.0, -y_dist, 0.0]),
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def break_apart_action(self, a):
broken_a = {}
for k, v in self.primitive_name_to_action_idx.items():
broken_a[k] = a[v]
return broken_a
def act(
self,
a,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
if not np.any(a):
# all zeros should be a no-op!!!
return
if not self.initializing:
a = a * self.action_scale
a = np.clip(a, self.action_space.low, self.action_space.high)
if self.fixed_schema:
primitive_args = a
primitive_name = self.step_to_primitive_name[self.step_count]
else:
primitive_idx, primitive_args = (
np.argmax(a[: self.num_primitives]),
a[self.num_primitives :],
)
primitive_name = self.primitive_idx_to_name[primitive_idx]
if primitive_name != "no_op":
primitive_name_to_action_dict = self.break_apart_action(primitive_args)
primitive_action = primitive_name_to_action_dict[primitive_name]
primitive = self.primitive_name_to_func[primitive_name]
primitive(
primitive_action,
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
def update(self):
self.controller.update_mass_matrix(self.sim, self.joint_index_vel)
self.controller.update_model(self.sim, self.joint_index_pos, self.joint_index_vel)
def step(
self,
a,
render_every_step=False,
render_mode="rgb_array",
render_im_shape=(1000, 1000),
):
if self.control_mode in [
"joint_position",
"joint_velocity",
"torque",
"end_effector",
]:
a = np.clip(a, -1.0, 1.0)
if self.control_mode == "end_effector":
if not self.initializing:
rotation = self.quat_to_rpy(self.sim.data.body_xquat[10]) - np.array(a[3:6])
for _ in range(self.frame_skip):
quat = self.rpy_to_quat(rotation)
quat_delta = self.convert_xyzw_to_wxyz(quat) - self.sim.data.body_xquat[10]
self._set_action(np.concatenate([ a[:3], quat_delta, [a[-1], -a[-1]] ]))
self.sim.step()
elif self.control_mode == "vices":
if not self.initializing:
for i in range(int(self.controller.interpolation_steps)):
self.update()
action = self.controller.action_to_torques(a, i == 0)
self.sim.data.ctrl[:] = action
self.sim.step()
else:
if not self.initializing and self.control_mode == "joint_velocity":
a = self.act_mid + a * self.act_amp # mean center and scale
self.robot.step(
self, a, step_duration=self.skip * self.model.opt.timestep
)
else:
if not self.initializing:
if render_every_step and render_mode == "rgb_array":
self.img_array = []
self.act(
a,
render_every_step=render_every_step,
render_mode=render_mode,
render_im_shape=render_im_shape,
)
obs = self._get_obs()
# rewards
reward_dict, score = self._get_reward_n_score(self.obs_dict)
# termination
done = False
# finalize step
env_info = {
"time": self.obs_dict["t"],
"score": score,
}
return obs, reward_dict["r_total"], done, env_info
def _get_obs(self):
t, qp, qv, obj_qp, obj_qv = self.robot.get_obs(
self, robot_noise_ratio=self.robot_noise_ratio
)
self.obs_dict = {}
self.obs_dict["t"] = t
self.obs_dict["qp"] = qp
self.obs_dict["qv"] = qv
self.obs_dict["obj_qp"] = obj_qp
self.obs_dict["obj_qv"] = obj_qv
self.obs_dict["goal"] = self.goal
if self.image_obs:
img = self.render(mode="rgb_array")
img = img.transpose(2, 0, 1).flatten()
return img
else:
return np.concatenate(
[self.obs_dict["qp"], self.obs_dict["obj_qp"], self.obs_dict["goal"]]
)
def reset_model(self):
reset_pos = self.init_qpos[:].copy()
reset_vel = self.init_qvel[:].copy()
self.robot.reset(self, reset_pos, reset_vel)
self.sim.forward()
if self.control_mode in ["primitives", "end_effector"]:
self.reset_mocap2body_xpos(self.sim)
self.goal = self._get_task_goal() # sample a new goal on reset
self.step_count = 0
# if self.sim_robot._use_dm_backend:
# imwidth = self.imwidth
# imheight = self.imheight
# camera = engine.MovableCamera(self.sim, imwidth, imheight)
# camera.set_pose(
# distance=2.2, lookat=[-0.2, 0.5, 2.0], azimuth=70, elevation=-35
# )
# self.start_img = camera.render()
# else:
# self.start_img = self.sim_robot.renderer.render_offscreen(
# self.imwidth,
# self.imheight,
# )
return self._get_obs()
def evaluate_success(self, paths):
# score
mean_score_per_rollout = np.zeros(shape=len(paths))
for idx, path in enumerate(paths):
mean_score_per_rollout[idx] = np.mean(path["env_infos"]["score"])
mean_score = np.mean(mean_score_per_rollout)
# success percentage
num_success = 0
num_paths = len(paths)
for path in paths:
num_success += bool(path["env_infos"]["rewards"]["bonus"][-1])
success_percentage = num_success * 100.0 / num_paths
# fuse results
return np.sign(mean_score) * (
1e6 * round(success_percentage, 2) + abs(mean_score)
)
def close(self):
self.robot.close()
def set_goal(self, goal):
self.goal = goal
def _get_task_goal(self):
return self.goal
# Only include goal
@property
def goal_space(self):
len_obs = self.observation_space.low.shape[0]
env_lim = np.abs(self.observation_space.low[0])
return spaces.Box(
low=-env_lim, high=env_lim, shape=(len_obs // 2,), dtype=np.float32
)
def convert_to_active_observation(self, observation):
return observation
def get_env_state(self):
joint_state = self.sim.get_state()
mocap_state = self.data.mocap_pos, self.data.mocap_quat
state = self.step_count, joint_state, mocap_state
return copy.deepcopy(state)
def set_env_state(self, state):
step_count, joint_state, mocap_state = state
self.sim.set_state(joint_state)
mocap_pos, mocap_quat = mocap_state
self.set_mocap_pos("mocap", mocap_pos)
self.set_mocap_quat("mocap", mocap_quat)
self.sim.forward()
self.step_count = step_count
class KitchenTaskRelaxV1(KitchenV0):
"""Kitchen environment with proper camera and goal setup"""
def __init__(self, **kwargs):
super(KitchenTaskRelaxV1, self).__init__(**kwargs)
def _get_reward_n_score(self, obs_dict):
reward_dict = {}
reward_dict["true_reward"] = 0.0
reward_dict["bonus"] = 0.0
reward_dict["r_total"] = 0.0
score = 0.0
return reward_dict, score
| 32,746 | 33.325996 | 100 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/franka/robot/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/franka/robot/franka_robot.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import getpass
import os
import time
# obervations structure
from collections import namedtuple
import click
import numpy as np
from termcolor import cprint
from d4rl_alt.kitchen.adept_envs import base_robot
from d4rl_alt.kitchen.adept_envs.utils.config import (
get_config_root_node,
read_config_from_node,
)
observation = namedtuple(
"observation", ["time", "qpos_robot", "qvel_robot", "qpos_object", "qvel_object"]
)
franka_interface = ""
class Robot(base_robot.BaseRobot):
"""
Abstracts away the differences between the robot_simulation and robot_hardware
"""
def __init__(self, *args, **kwargs):
super(Robot, self).__init__(*args, **kwargs)
global franka_interface
# Read robot configurations
self._read_specs_from_config(robot_configs=self.calibration_path)
# Robot: Handware
if self.is_hardware:
if franka_interface is "":
raise NotImplementedError()
from handware.franka import franka
# initialize franka
self.franka_interface = franka()
franka_interface = self.franka_interface
cprint(
"Initializing %s Hardware (Status:%d)"
% (self.robot_name, self.franka.okay(self.robot_hardware_dof)),
"white",
"on_grey",
)
else:
self.franka_interface = franka_interface
cprint("Reusing previours Franka session", "white", "on_grey")
# Robot: Simulation
else:
self.robot_name = "Franka"
# cprint("Initializing %s sim" % self.robot_name, "white", "on_grey")
# Robot's time
self.time_start = time.time()
self.time = time.time() - self.time_start
self.time_render = -1 # time of rendering
# read specs from the calibration file
def _read_specs_from_config(self, robot_configs):
root, root_name = get_config_root_node(config_file_name=robot_configs)
self.robot_name = root_name[0]
self.robot_mode = np.zeros(self.n_dofs, dtype=int)
self.robot_mj_dof = np.zeros(self.n_dofs, dtype=int)
self.robot_hardware_dof = np.zeros(self.n_dofs, dtype=int)
self.robot_scale = np.zeros(self.n_dofs, dtype=float)
self.robot_offset = np.zeros(self.n_dofs, dtype=float)
self.robot_pos_bound = np.zeros([self.n_dofs, 2], dtype=float)
self.robot_vel_bound = np.zeros([self.n_dofs, 2], dtype=float)
self.robot_pos_noise_amp = np.zeros(self.n_dofs, dtype=float)
self.robot_vel_noise_amp = np.zeros(self.n_dofs, dtype=float)
# print("Reading configurations for %s" % self.robot_name)
for i in range(self.n_dofs):
self.robot_mode[i] = read_config_from_node(
root, "qpos" + str(i), "mode", int
)
self.robot_mj_dof[i] = read_config_from_node(
root, "qpos" + str(i), "mj_dof", int
)
self.robot_hardware_dof[i] = read_config_from_node(
root, "qpos" + str(i), "hardware_dof", int
)
self.robot_scale[i] = read_config_from_node(
root, "qpos" + str(i), "scale", float
)
self.robot_offset[i] = read_config_from_node(
root, "qpos" + str(i), "offset", float
)
self.robot_pos_bound[i] = read_config_from_node(
root, "qpos" + str(i), "pos_bound", float
)
self.robot_vel_bound[i] = read_config_from_node(
root, "qpos" + str(i), "vel_bound", float
)
self.robot_pos_noise_amp[i] = read_config_from_node(
root, "qpos" + str(i), "pos_noise_amp", float
)
self.robot_vel_noise_amp[i] = read_config_from_node(
root, "qpos" + str(i), "vel_noise_amp", float
)
# convert to hardware space
def _de_calib(self, qp_mj, qv_mj=None):
qp_ad = (qp_mj - self.robot_offset) / self.robot_scale
if qv_mj is not None:
qv_ad = qv_mj / self.robot_scale
return qp_ad, qv_ad
else:
return qp_ad
# convert to mujoco space
def _calib(self, qp_ad, qv_ad):
qp_mj = qp_ad * self.robot_scale + self.robot_offset
qv_mj = qv_ad * self.robot_scale
return qp_mj, qv_mj
# refresh the observation cache
def _observation_cache_refresh(self, env):
for _ in range(self.observation_cache_maxsize):
self.get_obs(env, sim_mimic_hardware=False)
# get past observation
def get_obs_from_cache(self, env, index=-1):
assert (index >= 0 and index < self.observation_cache_maxsize) or (
index < 0 and index >= -self.observation_cache_maxsize
), (
"cache index out of bound. (cache size is %2d)"
% self.observation_cache_maxsize
)
obs = self.observation_cache[index]
if self.has_obj:
return (
obs.time,
obs.qpos_robot,
obs.qvel_robot,
obs.qpos_object,
obs.qvel_object,
)
else:
return obs.time, obs.qpos_robot, obs.qvel_robot
# get observation
def get_obs(
self, env, robot_noise_ratio=1, object_noise_ratio=1, sim_mimic_hardware=True
):
if self.is_hardware:
raise NotImplementedError()
else:
# Gather simulated observation
qp = env.sim.data.qpos[: self.n_jnt].copy()
qv = env.sim.data.qvel[: self.n_jnt].copy()
if self.has_obj:
qp_obj = env.sim.data.qpos[-self.n_obj :].copy()
qv_obj = env.sim.data.qvel[-self.n_obj :].copy()
else:
qp_obj = None
qv_obj = None
self.time = env.sim.data.time
# Simulate observation noise
if not env.initializing:
qp += (
robot_noise_ratio
* self.robot_pos_noise_amp[: self.n_jnt]
* env.np_random.uniform(low=-1.0, high=1.0, size=self.n_jnt)
)
qv += (
robot_noise_ratio
* self.robot_vel_noise_amp[: self.n_jnt]
* env.np_random.uniform(low=-1.0, high=1.0, size=self.n_jnt)
)
if self.has_obj:
qp_obj += (
robot_noise_ratio
* self.robot_pos_noise_amp[-self.n_obj :]
* env.np_random.uniform(low=-1.0, high=1.0, size=self.n_obj)
)
qv_obj += (
robot_noise_ratio
* self.robot_vel_noise_amp[-self.n_obj :]
* env.np_random.uniform(low=-1.0, high=1.0, size=self.n_obj)
)
# cache observations
obs = observation(
time=self.time,
qpos_robot=qp,
qvel_robot=qv,
qpos_object=qp_obj,
qvel_object=qv_obj,
)
self.observation_cache.append(obs)
if self.has_obj:
return (
obs.time,
obs.qpos_robot,
obs.qvel_robot,
obs.qpos_object,
obs.qvel_object,
)
else:
return obs.time, obs.qpos_robot, obs.qvel_robot
# enforce position specs.
def ctrl_position_limits(self, ctrl_position):
ctrl_feasible_position = np.clip(
ctrl_position,
self.robot_pos_bound[: self.n_jnt, 0],
self.robot_pos_bound[: self.n_jnt, 1],
)
return ctrl_feasible_position
# step the robot env
def step(self, env, ctrl_desired, step_duration, sim_override=False):
# Populate observation cache during startup
if env.initializing:
self._observation_cache_refresh(env)
# enforce velocity limits
ctrl_feasible = self.ctrl_velocity_limits(ctrl_desired, step_duration)
# enforce position limits
ctrl_feasible = self.ctrl_position_limits(ctrl_feasible)
# Send controls to the robot
if self.is_hardware and (not sim_override):
raise NotImplementedError()
else:
env.do_simulation(
ctrl_feasible, int(step_duration / env.sim.model.opt.timestep)
) # render is folded in here
# Update current robot state on the overlay
if self.overlay:
env.sim.data.qpos[self.n_jnt : 2 * self.n_jnt] = env.desired_pose.copy()
env.sim.forward()
# synchronize time
if self.is_hardware:
time_now = time.time() - self.time_start
time_left_in_step = step_duration - (time_now - self.time)
if time_left_in_step > 0.0001:
time.sleep(time_left_in_step)
return 1
def reset(
self,
env,
reset_pose,
reset_vel,
overlay_mimic_reset_pose=True,
sim_override=False,
):
reset_pose = self.clip_positions(reset_pose)
if self.is_hardware:
raise NotImplementedError()
else:
env.sim.reset()
env.sim.data.qpos[: self.n_jnt] = reset_pose[: self.n_jnt].copy()
env.sim.data.qvel[: self.n_jnt] = reset_vel[: self.n_jnt].copy()
if self.has_obj:
env.sim.data.qpos[-self.n_obj :] = reset_pose[-self.n_obj :].copy()
env.sim.data.qvel[-self.n_obj :] = reset_vel[-self.n_obj :].copy()
env.sim.forward()
if self.overlay:
env.sim.data.qpos[self.n_jnt : 2 * self.n_jnt] = env.desired_pose[
: self.n_jnt
].copy()
env.sim.forward()
# refresh observation cache before exit
self._observation_cache_refresh(env)
def close(self):
if self.is_hardware:
cprint(
"Closing Franka hardware... ", "white", "on_grey", end="", flush=True
)
status = 0
raise NotImplementedError()
cprint("Closed (Status: {})".format(status), "white", "on_grey", flush=True)
else:
cprint("Closing Franka sim", "white", "on_grey", flush=True)
class Robot_PosAct(Robot):
# enforce velocity sepcs.
# ALERT: This depends on previous observation. This is not ideal as it breaks MDP addumptions. Be careful
def ctrl_velocity_limits(self, ctrl_position, step_duration):
last_obs = self.observation_cache[-1]
ctrl_desired_vel = (
ctrl_position - last_obs.qpos_robot[: self.n_jnt]
) / step_duration
ctrl_feasible_vel = np.clip(
ctrl_desired_vel,
self.robot_vel_bound[: self.n_jnt, 0],
self.robot_vel_bound[: self.n_jnt, 1],
)
ctrl_feasible_position = (
last_obs.qpos_robot[: self.n_jnt] + ctrl_feasible_vel * step_duration
)
return ctrl_feasible_position
class Robot_VelAct(Robot):
# enforce velocity sepcs.
# ALERT: This depends on previous observation. This is not ideal as it breaks MDP addumptions. Be careful
def ctrl_velocity_limits(self, ctrl_velocity, step_duration):
last_obs = self.observation_cache[-1]
ctrl_feasible_vel = np.clip(
ctrl_velocity,
self.robot_vel_bound[: self.n_jnt, 0],
self.robot_vel_bound[: self.n_jnt, 1],
)
ctrl_feasible_position = (
last_obs.qpos_robot[: self.n_jnt] + ctrl_feasible_vel * step_duration
)
return ctrl_feasible_position
class Robot_Unconstrained(Robot):
def ctrl_velocity_limits(self, ctrl_velocity, step_duration):
return ctrl_velocity
def ctrl_position_limits(self, ctrl_position):
return ctrl_position
| 12,704 | 33.808219 | 109 | py |
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/simulation/__init__.py | 0 | 0 | 0 | py |
|
CSD-manipulation | CSD-manipulation-master/d4rl_alt/kitchen/adept_envs/simulation/module.py | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for caching Python modules related to simulation."""
import sys
_MUJOCO_PY_MODULE = None
_DM_MUJOCO_MODULE = None
_DM_VIEWER_MODULE = None
_DM_RENDER_MODULE = None
_GLFW_MODULE = None
def get_mujoco_py():
"""Returns the mujoco_py module."""
global _MUJOCO_PY_MODULE
if _MUJOCO_PY_MODULE:
return _MUJOCO_PY_MODULE
try:
import mujoco_py
# Override the warning function.
from mujoco_py.builder import cymj
cymj.set_warning_callback(_mj_warning_fn)
except ImportError:
print(
"Failed to import mujoco_py. Ensure that mujoco_py (using MuJoCo "
"v1.50) is installed.",
file=sys.stderr,
)
sys.exit(1)
_MUJOCO_PY_MODULE = mujoco_py
return mujoco_py
def get_mujoco_py_mjlib():
"""Returns the mujoco_py mjlib module."""
class MjlibDelegate:
"""Wrapper that forwards mjlib calls."""
def __init__(self, lib):
self._lib = lib
def __getattr__(self, name: str):
if name.startswith("mj"):
return getattr(self._lib, "_" + name)
raise AttributeError(name)
return MjlibDelegate(get_mujoco_py().cymj)
def get_dm_mujoco():
"""Returns the DM Control mujoco module."""
global _DM_MUJOCO_MODULE
if _DM_MUJOCO_MODULE:
return _DM_MUJOCO_MODULE
try:
from dm_control import mujoco
except ImportError:
print(
"Failed to import dm_control.mujoco. Ensure that dm_control (using "
"MuJoCo v2.00) is installed.",
file=sys.stderr,
)
sys.exit(1)
_DM_MUJOCO_MODULE = mujoco
return mujoco
def get_dm_viewer():
"""Returns the DM Control viewer module."""
global _DM_VIEWER_MODULE
if _DM_VIEWER_MODULE:
return _DM_VIEWER_MODULE
try:
from dm_control import viewer
except ImportError:
print(
"Failed to import dm_control.viewer. Ensure that dm_control (using "
"MuJoCo v2.00) is installed.",
file=sys.stderr,
)
sys.exit(1)
_DM_VIEWER_MODULE = viewer
return viewer
def get_dm_render():
"""Returns the DM Control render module."""
global _DM_RENDER_MODULE
if _DM_RENDER_MODULE:
return _DM_RENDER_MODULE
try:
try:
from dm_control import _render
render = _render
except ImportError:
print("Warning: DM Control is out of date.")
from dm_control import render
except ImportError:
print(
"Failed to import dm_control.render. Ensure that dm_control (using "
"MuJoCo v2.00) is installed.",
file=sys.stderr,
)
sys.exit(1)
_DM_RENDER_MODULE = render
return render
def _mj_warning_fn(warn_data: bytes):
"""Warning function override for mujoco_py."""
print(
"WARNING: Mujoco simulation is unstable (has NaNs): {}".format(
warn_data.decode()
)
)
| 3,644 | 25.605839 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.