repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
mmsegmentation | mmsegmentation-master/tools/convert_datasets/drive.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import tempfile
import zipfile
import cv2
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Convert DRIVE dataset to mmsegmentation format')
parser.add_argument(
'training_path', help='the training part of DRIVE dataset')
parser.add_argument(
'testing_path', help='the testing part of DRIVE dataset')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
training_path = args.training_path
testing_path = args.testing_path
if args.out_dir is None:
out_dir = osp.join('data', 'DRIVE')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(out_dir)
mmcv.mkdir_or_exist(osp.join(out_dir, 'images'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation'))
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
print('Extracting training.zip...')
zip_file = zipfile.ZipFile(training_path)
zip_file.extractall(tmp_dir)
print('Generating training dataset...')
now_dir = osp.join(tmp_dir, 'training', 'images')
for img_name in os.listdir(now_dir):
img = mmcv.imread(osp.join(now_dir, img_name))
mmcv.imwrite(
img,
osp.join(
out_dir, 'images', 'training',
osp.splitext(img_name)[0].replace('_training', '') +
'.png'))
now_dir = osp.join(tmp_dir, 'training', '1st_manual')
for img_name in os.listdir(now_dir):
cap = cv2.VideoCapture(osp.join(now_dir, img_name))
ret, img = cap.read()
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'training',
osp.splitext(img_name)[0] + '.png'))
print('Extracting test.zip...')
zip_file = zipfile.ZipFile(testing_path)
zip_file.extractall(tmp_dir)
print('Generating validation dataset...')
now_dir = osp.join(tmp_dir, 'test', 'images')
for img_name in os.listdir(now_dir):
img = mmcv.imread(osp.join(now_dir, img_name))
mmcv.imwrite(
img,
osp.join(
out_dir, 'images', 'validation',
osp.splitext(img_name)[0].replace('_test', '') + '.png'))
now_dir = osp.join(tmp_dir, 'test', '1st_manual')
if osp.exists(now_dir):
for img_name in os.listdir(now_dir):
cap = cv2.VideoCapture(osp.join(now_dir, img_name))
ret, img = cap.read()
# The annotation img should be divided by 128, because some of
# the annotation imgs are not standard. We should set a
# threshold to convert the nonstandard annotation imgs. The
# value divided by 128 is equivalent to '1 if value >= 128
# else 0'
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'validation',
osp.splitext(img_name)[0] + '.png'))
now_dir = osp.join(tmp_dir, 'test', '2nd_manual')
if osp.exists(now_dir):
for img_name in os.listdir(now_dir):
cap = cv2.VideoCapture(osp.join(now_dir, img_name))
ret, img = cap.read()
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'validation',
osp.splitext(img_name)[0] + '.png'))
print('Removing the temporary files...')
print('Done!')
if __name__ == '__main__':
main()
| 4,238 | 36.184211 | 78 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/hrf.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import tempfile
import zipfile
import mmcv
HRF_LEN = 15
TRAINING_LEN = 5
def parse_args():
parser = argparse.ArgumentParser(
description='Convert HRF dataset to mmsegmentation format')
parser.add_argument('healthy_path', help='the path of healthy.zip')
parser.add_argument(
'healthy_manualsegm_path', help='the path of healthy_manualsegm.zip')
parser.add_argument('glaucoma_path', help='the path of glaucoma.zip')
parser.add_argument(
'glaucoma_manualsegm_path', help='the path of glaucoma_manualsegm.zip')
parser.add_argument(
'diabetic_retinopathy_path',
help='the path of diabetic_retinopathy.zip')
parser.add_argument(
'diabetic_retinopathy_manualsegm_path',
help='the path of diabetic_retinopathy_manualsegm.zip')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
images_path = [
args.healthy_path, args.glaucoma_path, args.diabetic_retinopathy_path
]
annotations_path = [
args.healthy_manualsegm_path, args.glaucoma_manualsegm_path,
args.diabetic_retinopathy_manualsegm_path
]
if args.out_dir is None:
out_dir = osp.join('data', 'HRF')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(out_dir)
mmcv.mkdir_or_exist(osp.join(out_dir, 'images'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation'))
print('Generating images...')
for now_path in images_path:
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
zip_file = zipfile.ZipFile(now_path)
zip_file.extractall(tmp_dir)
assert len(os.listdir(tmp_dir)) == HRF_LEN, \
'len(os.listdir(tmp_dir)) != {}'.format(HRF_LEN)
for filename in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]:
img = mmcv.imread(osp.join(tmp_dir, filename))
mmcv.imwrite(
img,
osp.join(out_dir, 'images', 'training',
osp.splitext(filename)[0] + '.png'))
for filename in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]:
img = mmcv.imread(osp.join(tmp_dir, filename))
mmcv.imwrite(
img,
osp.join(out_dir, 'images', 'validation',
osp.splitext(filename)[0] + '.png'))
print('Generating annotations...')
for now_path in annotations_path:
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
zip_file = zipfile.ZipFile(now_path)
zip_file.extractall(tmp_dir)
assert len(os.listdir(tmp_dir)) == HRF_LEN, \
'len(os.listdir(tmp_dir)) != {}'.format(HRF_LEN)
for filename in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]:
img = mmcv.imread(osp.join(tmp_dir, filename))
# The annotation img should be divided by 128, because some of
# the annotation imgs are not standard. We should set a
# threshold to convert the nonstandard annotation imgs. The
# value divided by 128 is equivalent to '1 if value >= 128
# else 0'
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'training',
osp.splitext(filename)[0] + '.png'))
for filename in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]:
img = mmcv.imread(osp.join(tmp_dir, filename))
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'validation',
osp.splitext(filename)[0] + '.png'))
print('Done!')
if __name__ == '__main__':
main()
| 4,364 | 37.973214 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/isaid.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os
import os.path as osp
import shutil
import tempfile
import zipfile
import mmcv
import numpy as np
from PIL import Image
iSAID_palette = \
{
0: (0, 0, 0),
1: (0, 0, 63),
2: (0, 63, 63),
3: (0, 63, 0),
4: (0, 63, 127),
5: (0, 63, 191),
6: (0, 63, 255),
7: (0, 127, 63),
8: (0, 127, 127),
9: (0, 0, 127),
10: (0, 0, 191),
11: (0, 0, 255),
12: (0, 191, 127),
13: (0, 127, 191),
14: (0, 127, 255),
15: (0, 100, 155)
}
iSAID_invert_palette = {v: k for k, v in iSAID_palette.items()}
def iSAID_convert_from_color(arr_3d, palette=iSAID_invert_palette):
"""RGB-color encoding to grayscale labels."""
arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)
for c, i in palette.items():
m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)
arr_2d[m] = i
return arr_2d
def slide_crop_image(src_path, out_dir, mode, patch_H, patch_W, overlap):
img = np.asarray(Image.open(src_path).convert('RGB'))
img_H, img_W, _ = img.shape
if img_H < patch_H and img_W > patch_W:
img = mmcv.impad(img, shape=(patch_H, img_W), pad_val=0)
img_H, img_W, _ = img.shape
elif img_H > patch_H and img_W < patch_W:
img = mmcv.impad(img, shape=(img_H, patch_W), pad_val=0)
img_H, img_W, _ = img.shape
elif img_H < patch_H and img_W < patch_W:
img = mmcv.impad(img, shape=(patch_H, patch_W), pad_val=0)
img_H, img_W, _ = img.shape
for x in range(0, img_W, patch_W - overlap):
for y in range(0, img_H, patch_H - overlap):
x_str = x
x_end = x + patch_W
if x_end > img_W:
diff_x = x_end - img_W
x_str -= diff_x
x_end = img_W
y_str = y
y_end = y + patch_H
if y_end > img_H:
diff_y = y_end - img_H
y_str -= diff_y
y_end = img_H
img_patch = img[y_str:y_end, x_str:x_end, :]
img_patch = Image.fromarray(img_patch.astype(np.uint8))
image = osp.basename(src_path).split('.')[0] + '_' + str(
y_str) + '_' + str(y_end) + '_' + str(x_str) + '_' + str(
x_end) + '.png'
# print(image)
save_path_image = osp.join(out_dir, 'img_dir', mode, str(image))
img_patch.save(save_path_image)
def slide_crop_label(src_path, out_dir, mode, patch_H, patch_W, overlap):
label = mmcv.imread(src_path, channel_order='rgb')
label = iSAID_convert_from_color(label)
img_H, img_W = label.shape
if img_H < patch_H and img_W > patch_W:
label = mmcv.impad(label, shape=(patch_H, img_W), pad_val=255)
img_H = patch_H
elif img_H > patch_H and img_W < patch_W:
label = mmcv.impad(label, shape=(img_H, patch_W), pad_val=255)
img_W = patch_W
elif img_H < patch_H and img_W < patch_W:
label = mmcv.impad(label, shape=(patch_H, patch_W), pad_val=255)
img_H = patch_H
img_W = patch_W
for x in range(0, img_W, patch_W - overlap):
for y in range(0, img_H, patch_H - overlap):
x_str = x
x_end = x + patch_W
if x_end > img_W:
diff_x = x_end - img_W
x_str -= diff_x
x_end = img_W
y_str = y
y_end = y + patch_H
if y_end > img_H:
diff_y = y_end - img_H
y_str -= diff_y
y_end = img_H
lab_patch = label[y_str:y_end, x_str:x_end]
lab_patch = Image.fromarray(lab_patch.astype(np.uint8), mode='P')
image = osp.basename(src_path).split('.')[0].split(
'_')[0] + '_' + str(y_str) + '_' + str(y_end) + '_' + str(
x_str) + '_' + str(x_end) + '_instance_color_RGB' + '.png'
lab_patch.save(osp.join(out_dir, 'ann_dir', mode, str(image)))
def parse_args():
parser = argparse.ArgumentParser(
description='Convert iSAID dataset to mmsegmentation format')
parser.add_argument('dataset_path', help='iSAID folder path')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument(
'--patch_width',
default=896,
type=int,
help='Width of the cropped image patch')
parser.add_argument(
'--patch_height',
default=896,
type=int,
help='Height of the cropped image patch')
parser.add_argument(
'--overlap_area', default=384, type=int, help='Overlap area')
args = parser.parse_args()
return args
def main():
args = parse_args()
dataset_path = args.dataset_path
# image patch width and height
patch_H, patch_W = args.patch_width, args.patch_height
overlap = args.overlap_area # overlap area
if args.out_dir is None:
out_dir = osp.join('data', 'iSAID')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'test'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'test'))
assert os.path.exists(os.path.join(dataset_path, 'train')), \
'train is not in {}'.format(dataset_path)
assert os.path.exists(os.path.join(dataset_path, 'val')), \
'val is not in {}'.format(dataset_path)
assert os.path.exists(os.path.join(dataset_path, 'test')), \
'test is not in {}'.format(dataset_path)
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
for dataset_mode in ['train', 'val', 'test']:
# for dataset_mode in [ 'test']:
print('Extracting {}ing.zip...'.format(dataset_mode))
img_zipp_list = glob.glob(
os.path.join(dataset_path, dataset_mode, 'images', '*.zip'))
print('Find the data', img_zipp_list)
for img_zipp in img_zipp_list:
zip_file = zipfile.ZipFile(img_zipp)
zip_file.extractall(os.path.join(tmp_dir, dataset_mode, 'img'))
src_path_list = glob.glob(
os.path.join(tmp_dir, dataset_mode, 'img', 'images', '*.png'))
src_prog_bar = mmcv.ProgressBar(len(src_path_list))
for i, img_path in enumerate(src_path_list):
if dataset_mode != 'test':
slide_crop_image(img_path, out_dir, dataset_mode, patch_H,
patch_W, overlap)
else:
shutil.move(img_path,
os.path.join(out_dir, 'img_dir', dataset_mode))
src_prog_bar.update()
if dataset_mode != 'test':
label_zipp_list = glob.glob(
os.path.join(dataset_path, dataset_mode, 'Semantic_masks',
'*.zip'))
for label_zipp in label_zipp_list:
zip_file = zipfile.ZipFile(label_zipp)
zip_file.extractall(
os.path.join(tmp_dir, dataset_mode, 'lab'))
lab_path_list = glob.glob(
os.path.join(tmp_dir, dataset_mode, 'lab', 'images',
'*.png'))
lab_prog_bar = mmcv.ProgressBar(len(lab_path_list))
for i, lab_path in enumerate(lab_path_list):
slide_crop_label(lab_path, out_dir, dataset_mode, patch_H,
patch_W, overlap)
lab_prog_bar.update()
print('Removing the temporary files...')
print('Done!')
if __name__ == '__main__':
main()
| 8,164 | 32.191057 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/loveda.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import shutil
import tempfile
import zipfile
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Convert LoveDA dataset to mmsegmentation format')
parser.add_argument('dataset_path', help='LoveDA folder path')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
dataset_path = args.dataset_path
if args.out_dir is None:
out_dir = osp.join('data', 'loveDA')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(out_dir)
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'test'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val'))
assert 'Train.zip' in os.listdir(dataset_path), \
'Train.zip is not in {}'.format(dataset_path)
assert 'Val.zip' in os.listdir(dataset_path), \
'Val.zip is not in {}'.format(dataset_path)
assert 'Test.zip' in os.listdir(dataset_path), \
'Test.zip is not in {}'.format(dataset_path)
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
for dataset in ['Train', 'Val', 'Test']:
zip_file = zipfile.ZipFile(
os.path.join(dataset_path, dataset + '.zip'))
zip_file.extractall(tmp_dir)
data_type = dataset.lower()
for location in ['Rural', 'Urban']:
for image_type in ['images_png', 'masks_png']:
if image_type == 'images_png':
dst = osp.join(out_dir, 'img_dir', data_type)
else:
dst = osp.join(out_dir, 'ann_dir', data_type)
if dataset == 'Test' and image_type == 'masks_png':
continue
else:
src_dir = osp.join(tmp_dir, dataset, location,
image_type)
src_lst = os.listdir(src_dir)
for file in src_lst:
shutil.move(osp.join(src_dir, file), dst)
print('Removing the temporary files...')
print('Done!')
if __name__ == '__main__':
main()
| 2,696 | 35.445946 | 76 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/pascal_context.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from functools import partial
import mmcv
import numpy as np
from detail import Detail
from PIL import Image
_mapping = np.sort(
np.array([
0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22, 23, 397, 25, 284,
158, 159, 416, 33, 162, 420, 454, 295, 296, 427, 44, 45, 46, 308, 59,
440, 445, 31, 232, 65, 354, 424, 68, 326, 72, 458, 34, 207, 80, 355,
85, 347, 220, 349, 360, 98, 187, 104, 105, 366, 189, 368, 113, 115
]))
_key = np.array(range(len(_mapping))).astype('uint8')
def generate_labels(img_id, detail, out_dir):
def _class_to_index(mask, _mapping, _key):
# assert the values
values = np.unique(mask)
for i in range(len(values)):
assert (values[i] in _mapping)
index = np.digitize(mask.ravel(), _mapping, right=True)
return _key[index].reshape(mask.shape)
mask = Image.fromarray(
_class_to_index(detail.getMask(img_id), _mapping=_mapping, _key=_key))
filename = img_id['file_name']
mask.save(osp.join(out_dir, filename.replace('jpg', 'png')))
return osp.splitext(osp.basename(filename))[0]
def parse_args():
parser = argparse.ArgumentParser(
description='Convert PASCAL VOC annotations to mmsegmentation format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('json_path', help='annoation json filepath')
parser.add_argument('-o', '--out_dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
devkit_path = args.devkit_path
if args.out_dir is None:
out_dir = osp.join(devkit_path, 'VOC2010', 'SegmentationClassContext')
else:
out_dir = args.out_dir
json_path = args.json_path
mmcv.mkdir_or_exist(out_dir)
img_dir = osp.join(devkit_path, 'VOC2010', 'JPEGImages')
train_detail = Detail(json_path, img_dir, 'train')
train_ids = train_detail.getImgs()
val_detail = Detail(json_path, img_dir, 'val')
val_ids = val_detail.getImgs()
mmcv.mkdir_or_exist(
osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext'))
train_list = mmcv.track_progress(
partial(generate_labels, detail=train_detail, out_dir=out_dir),
train_ids)
with open(
osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext',
'train.txt'), 'w') as f:
f.writelines(line + '\n' for line in sorted(train_list))
val_list = mmcv.track_progress(
partial(generate_labels, detail=val_detail, out_dir=out_dir), val_ids)
with open(
osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext',
'val.txt'), 'w') as f:
f.writelines(line + '\n' for line in sorted(val_list))
print('Done!')
if __name__ == '__main__':
main()
| 2,925 | 32.25 | 78 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/potsdam.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import math
import os
import os.path as osp
import tempfile
import zipfile
import mmcv
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(
description='Convert potsdam dataset to mmsegmentation format')
parser.add_argument('dataset_path', help='potsdam folder path')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument(
'--clip_size',
type=int,
help='clipped size of image after preparation',
default=512)
parser.add_argument(
'--stride_size',
type=int,
help='stride of clipping original images',
default=256)
args = parser.parse_args()
return args
def clip_big_image(image_path, clip_save_dir, args, to_label=False):
# Original image of Potsdam dataset is very large, thus pre-processing
# of them is adopted. Given fixed clip size and stride size to generate
# clipped image, the intersection of width and height is determined.
# For example, given one 5120 x 5120 original image, the clip size is
# 512 and stride size is 256, thus it would generate 20x20 = 400 images
# whose size are all 512x512.
image = mmcv.imread(image_path)
h, w, c = image.shape
clip_size = args.clip_size
stride_size = args.stride_size
num_rows = math.ceil((h - clip_size) / stride_size) if math.ceil(
(h - clip_size) /
stride_size) * stride_size + clip_size >= h else math.ceil(
(h - clip_size) / stride_size) + 1
num_cols = math.ceil((w - clip_size) / stride_size) if math.ceil(
(w - clip_size) /
stride_size) * stride_size + clip_size >= w else math.ceil(
(w - clip_size) / stride_size) + 1
x, y = np.meshgrid(np.arange(num_cols + 1), np.arange(num_rows + 1))
xmin = x * clip_size
ymin = y * clip_size
xmin = xmin.ravel()
ymin = ymin.ravel()
xmin_offset = np.where(xmin + clip_size > w, w - xmin - clip_size,
np.zeros_like(xmin))
ymin_offset = np.where(ymin + clip_size > h, h - ymin - clip_size,
np.zeros_like(ymin))
boxes = np.stack([
xmin + xmin_offset, ymin + ymin_offset,
np.minimum(xmin + clip_size, w),
np.minimum(ymin + clip_size, h)
],
axis=1)
if to_label:
color_map = np.array([[0, 0, 0], [255, 255, 255], [255, 0, 0],
[255, 255, 0], [0, 255, 0], [0, 255, 255],
[0, 0, 255]])
flatten_v = np.matmul(
image.reshape(-1, c),
np.array([2, 3, 4]).reshape(3, 1))
out = np.zeros_like(flatten_v)
for idx, class_color in enumerate(color_map):
value_idx = np.matmul(class_color,
np.array([2, 3, 4]).reshape(3, 1))
out[flatten_v == value_idx] = idx
image = out.reshape(h, w)
for box in boxes:
start_x, start_y, end_x, end_y = box
clipped_image = image[start_y:end_y,
start_x:end_x] if to_label else image[
start_y:end_y, start_x:end_x, :]
idx_i, idx_j = osp.basename(image_path).split('_')[2:4]
mmcv.imwrite(
clipped_image.astype(np.uint8),
osp.join(
clip_save_dir,
f'{idx_i}_{idx_j}_{start_x}_{start_y}_{end_x}_{end_y}.png'))
def main():
args = parse_args()
splits = {
'train': [
'2_10', '2_11', '2_12', '3_10', '3_11', '3_12', '4_10', '4_11',
'4_12', '5_10', '5_11', '5_12', '6_10', '6_11', '6_12', '6_7',
'6_8', '6_9', '7_10', '7_11', '7_12', '7_7', '7_8', '7_9'
],
'val': [
'5_15', '6_15', '6_13', '3_13', '4_14', '6_14', '5_14', '2_13',
'4_15', '2_14', '5_13', '4_13', '3_14', '7_13'
]
}
dataset_path = args.dataset_path
if args.out_dir is None:
out_dir = osp.join('data', 'potsdam')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val'))
zipp_list = glob.glob(os.path.join(dataset_path, '*.zip'))
print('Find the data', zipp_list)
for zipp in zipp_list:
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
zip_file = zipfile.ZipFile(zipp)
zip_file.extractall(tmp_dir)
src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif'))
if not len(src_path_list):
sub_tmp_dir = os.path.join(tmp_dir, os.listdir(tmp_dir)[0])
src_path_list = glob.glob(os.path.join(sub_tmp_dir, '*.tif'))
prog_bar = mmcv.ProgressBar(len(src_path_list))
for i, src_path in enumerate(src_path_list):
idx_i, idx_j = osp.basename(src_path).split('_')[2:4]
data_type = 'train' if f'{idx_i}_{idx_j}' in splits[
'train'] else 'val'
if 'label' in src_path:
dst_dir = osp.join(out_dir, 'ann_dir', data_type)
clip_big_image(src_path, dst_dir, args, to_label=True)
else:
dst_dir = osp.join(out_dir, 'img_dir', data_type)
clip_big_image(src_path, dst_dir, args, to_label=False)
prog_bar.update()
print('Removing the temporary files...')
print('Done!')
if __name__ == '__main__':
main()
| 5,847 | 36.012658 | 77 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/stare.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import gzip
import os
import os.path as osp
import tarfile
import tempfile
import mmcv
STARE_LEN = 20
TRAINING_LEN = 10
def un_gz(src, dst):
g_file = gzip.GzipFile(src)
with open(dst, 'wb+') as f:
f.write(g_file.read())
g_file.close()
def parse_args():
parser = argparse.ArgumentParser(
description='Convert STARE dataset to mmsegmentation format')
parser.add_argument('image_path', help='the path of stare-images.tar')
parser.add_argument('labels_ah', help='the path of labels-ah.tar')
parser.add_argument('labels_vk', help='the path of labels-vk.tar')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
image_path = args.image_path
labels_ah = args.labels_ah
labels_vk = args.labels_vk
if args.out_dir is None:
out_dir = osp.join('data', 'STARE')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(out_dir)
mmcv.mkdir_or_exist(osp.join(out_dir, 'images'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation'))
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
mmcv.mkdir_or_exist(osp.join(tmp_dir, 'gz'))
mmcv.mkdir_or_exist(osp.join(tmp_dir, 'files'))
print('Extracting stare-images.tar...')
with tarfile.open(image_path) as f:
f.extractall(osp.join(tmp_dir, 'gz'))
for filename in os.listdir(osp.join(tmp_dir, 'gz')):
un_gz(
osp.join(tmp_dir, 'gz', filename),
osp.join(tmp_dir, 'files',
osp.splitext(filename)[0]))
now_dir = osp.join(tmp_dir, 'files')
assert len(os.listdir(now_dir)) == STARE_LEN, \
'len(os.listdir(now_dir)) != {}'.format(STARE_LEN)
for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]:
img = mmcv.imread(osp.join(now_dir, filename))
mmcv.imwrite(
img,
osp.join(out_dir, 'images', 'training',
osp.splitext(filename)[0] + '.png'))
for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]:
img = mmcv.imread(osp.join(now_dir, filename))
mmcv.imwrite(
img,
osp.join(out_dir, 'images', 'validation',
osp.splitext(filename)[0] + '.png'))
print('Removing the temporary files...')
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
mmcv.mkdir_or_exist(osp.join(tmp_dir, 'gz'))
mmcv.mkdir_or_exist(osp.join(tmp_dir, 'files'))
print('Extracting labels-ah.tar...')
with tarfile.open(labels_ah) as f:
f.extractall(osp.join(tmp_dir, 'gz'))
for filename in os.listdir(osp.join(tmp_dir, 'gz')):
un_gz(
osp.join(tmp_dir, 'gz', filename),
osp.join(tmp_dir, 'files',
osp.splitext(filename)[0]))
now_dir = osp.join(tmp_dir, 'files')
assert len(os.listdir(now_dir)) == STARE_LEN, \
'len(os.listdir(now_dir)) != {}'.format(STARE_LEN)
for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]:
img = mmcv.imread(osp.join(now_dir, filename))
# The annotation img should be divided by 128, because some of
# the annotation imgs are not standard. We should set a threshold
# to convert the nonstandard annotation imgs. The value divided by
# 128 equivalent to '1 if value >= 128 else 0'
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'training',
osp.splitext(filename)[0] + '.png'))
for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]:
img = mmcv.imread(osp.join(now_dir, filename))
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'validation',
osp.splitext(filename)[0] + '.png'))
print('Removing the temporary files...')
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
mmcv.mkdir_or_exist(osp.join(tmp_dir, 'gz'))
mmcv.mkdir_or_exist(osp.join(tmp_dir, 'files'))
print('Extracting labels-vk.tar...')
with tarfile.open(labels_vk) as f:
f.extractall(osp.join(tmp_dir, 'gz'))
for filename in os.listdir(osp.join(tmp_dir, 'gz')):
un_gz(
osp.join(tmp_dir, 'gz', filename),
osp.join(tmp_dir, 'files',
osp.splitext(filename)[0]))
now_dir = osp.join(tmp_dir, 'files')
assert len(os.listdir(now_dir)) == STARE_LEN, \
'len(os.listdir(now_dir)) != {}'.format(STARE_LEN)
for filename in sorted(os.listdir(now_dir))[:TRAINING_LEN]:
img = mmcv.imread(osp.join(now_dir, filename))
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'training',
osp.splitext(filename)[0] + '.png'))
for filename in sorted(os.listdir(now_dir))[TRAINING_LEN:]:
img = mmcv.imread(osp.join(now_dir, filename))
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'validation',
osp.splitext(filename)[0] + '.png'))
print('Removing the temporary files...')
print('Done!')
if __name__ == '__main__':
main()
| 6,039 | 35.167665 | 78 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/vaihingen.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import math
import os
import os.path as osp
import tempfile
import zipfile
import mmcv
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(
description='Convert vaihingen dataset to mmsegmentation format')
parser.add_argument('dataset_path', help='vaihingen folder path')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument(
'--clip_size',
type=int,
help='clipped size of image after preparation',
default=512)
parser.add_argument(
'--stride_size',
type=int,
help='stride of clipping original images',
default=256)
args = parser.parse_args()
return args
def clip_big_image(image_path, clip_save_dir, to_label=False):
# Original image of Vaihingen dataset is very large, thus pre-processing
# of them is adopted. Given fixed clip size and stride size to generate
# clipped image, the intersection of width and height is determined.
# For example, given one 5120 x 5120 original image, the clip size is
# 512 and stride size is 256, thus it would generate 20x20 = 400 images
# whose size are all 512x512.
image = mmcv.imread(image_path)
h, w, c = image.shape
cs = args.clip_size
ss = args.stride_size
num_rows = math.ceil((h - cs) / ss) if math.ceil(
(h - cs) / ss) * ss + cs >= h else math.ceil((h - cs) / ss) + 1
num_cols = math.ceil((w - cs) / ss) if math.ceil(
(w - cs) / ss) * ss + cs >= w else math.ceil((w - cs) / ss) + 1
x, y = np.meshgrid(np.arange(num_cols + 1), np.arange(num_rows + 1))
xmin = x * cs
ymin = y * cs
xmin = xmin.ravel()
ymin = ymin.ravel()
xmin_offset = np.where(xmin + cs > w, w - xmin - cs, np.zeros_like(xmin))
ymin_offset = np.where(ymin + cs > h, h - ymin - cs, np.zeros_like(ymin))
boxes = np.stack([
xmin + xmin_offset, ymin + ymin_offset,
np.minimum(xmin + cs, w),
np.minimum(ymin + cs, h)
],
axis=1)
if to_label:
color_map = np.array([[0, 0, 0], [255, 255, 255], [255, 0, 0],
[255, 255, 0], [0, 255, 0], [0, 255, 255],
[0, 0, 255]])
flatten_v = np.matmul(
image.reshape(-1, c),
np.array([2, 3, 4]).reshape(3, 1))
out = np.zeros_like(flatten_v)
for idx, class_color in enumerate(color_map):
value_idx = np.matmul(class_color,
np.array([2, 3, 4]).reshape(3, 1))
out[flatten_v == value_idx] = idx
image = out.reshape(h, w)
for box in boxes:
start_x, start_y, end_x, end_y = box
clipped_image = image[start_y:end_y,
start_x:end_x] if to_label else image[
start_y:end_y, start_x:end_x, :]
area_idx = osp.basename(image_path).split('_')[3].strip('.tif')
mmcv.imwrite(
clipped_image.astype(np.uint8),
osp.join(clip_save_dir,
f'{area_idx}_{start_x}_{start_y}_{end_x}_{end_y}.png'))
def main():
splits = {
'train': [
'area1', 'area11', 'area13', 'area15', 'area17', 'area21',
'area23', 'area26', 'area28', 'area3', 'area30', 'area32',
'area34', 'area37', 'area5', 'area7'
],
'val': [
'area6', 'area24', 'area35', 'area16', 'area14', 'area22',
'area10', 'area4', 'area2', 'area20', 'area8', 'area31', 'area33',
'area27', 'area38', 'area12', 'area29'
],
}
dataset_path = args.dataset_path
if args.out_dir is None:
out_dir = osp.join('data', 'vaihingen')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val'))
zipp_list = glob.glob(os.path.join(dataset_path, '*.zip'))
print('Find the data', zipp_list)
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
for zipp in zipp_list:
zip_file = zipfile.ZipFile(zipp)
zip_file.extractall(tmp_dir)
src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif'))
if 'ISPRS_semantic_labeling_Vaihingen' in zipp:
src_path_list = glob.glob(
os.path.join(os.path.join(tmp_dir, 'top'), '*.tif'))
if 'ISPRS_semantic_labeling_Vaihingen_ground_truth_eroded_COMPLETE' in zipp: # noqa
src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif'))
# delete unused area9 ground truth
for area_ann in src_path_list:
if 'area9' in area_ann:
src_path_list.remove(area_ann)
prog_bar = mmcv.ProgressBar(len(src_path_list))
for i, src_path in enumerate(src_path_list):
area_idx = osp.basename(src_path).split('_')[3].strip('.tif')
data_type = 'train' if area_idx in splits['train'] else 'val'
if 'noBoundary' in src_path:
dst_dir = osp.join(out_dir, 'ann_dir', data_type)
clip_big_image(src_path, dst_dir, to_label=True)
else:
dst_dir = osp.join(out_dir, 'img_dir', data_type)
clip_big_image(src_path, dst_dir, to_label=False)
prog_bar.update()
print('Removing the temporary files...')
print('Done!')
if __name__ == '__main__':
args = parse_args()
main()
| 5,905 | 36.858974 | 96 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/voc_aug.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from functools import partial
import mmcv
import numpy as np
from PIL import Image
from scipy.io import loadmat
AUG_LEN = 10582
def convert_mat(mat_file, in_dir, out_dir):
data = loadmat(osp.join(in_dir, mat_file))
mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8)
seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png'))
Image.fromarray(mask).save(seg_filename, 'PNG')
def generate_aug_list(merged_list, excluded_list):
return list(set(merged_list) - set(excluded_list))
def parse_args():
parser = argparse.ArgumentParser(
description='Convert PASCAL VOC annotations to mmsegmentation format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('aug_path', help='pascal voc aug path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
devkit_path = args.devkit_path
aug_path = args.aug_path
nproc = args.nproc
if args.out_dir is None:
out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug')
else:
out_dir = args.out_dir
mmcv.mkdir_or_exist(out_dir)
in_dir = osp.join(aug_path, 'dataset', 'cls')
mmcv.track_parallel_progress(
partial(convert_mat, in_dir=in_dir, out_dir=out_dir),
list(mmcv.scandir(in_dir, suffix='.mat')),
nproc=nproc)
full_aug_list = []
with open(osp.join(aug_path, 'dataset', 'train.txt')) as f:
full_aug_list += [line.strip() for line in f]
with open(osp.join(aug_path, 'dataset', 'val.txt')) as f:
full_aug_list += [line.strip() for line in f]
with open(
osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',
'train.txt')) as f:
ori_train_list = [line.strip() for line in f]
with open(
osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',
'val.txt')) as f:
val_list = [line.strip() for line in f]
aug_train_list = generate_aug_list(ori_train_list + full_aug_list,
val_list)
assert len(aug_train_list) == AUG_LEN, 'len(aug_train_list) != {}'.format(
AUG_LEN)
with open(
osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',
'trainaug.txt'), 'w') as f:
f.writelines(line + '\n' for line in aug_train_list)
aug_list = generate_aug_list(full_aug_list, ori_train_list + val_list)
assert len(aug_list) == AUG_LEN - len(
ori_train_list), 'len(aug_list) != {}'.format(AUG_LEN -
len(ori_train_list))
with open(
osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'),
'w') as f:
f.writelines(line + '\n' for line in aug_list)
print('Done!')
if __name__ == '__main__':
main()
| 3,107 | 32.419355 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/model_converters/beit2mmseg.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmcv
import torch
from mmcv.runner import CheckpointLoader
def convert_beit(ckpt):
new_ckpt = OrderedDict()
for k, v in ckpt.items():
if k.startswith('blocks'):
new_key = k.replace('blocks', 'layers')
if 'norm' in new_key:
new_key = new_key.replace('norm', 'ln')
elif 'mlp.fc1' in new_key:
new_key = new_key.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in new_key:
new_key = new_key.replace('mlp.fc2', 'ffn.layers.1')
new_ckpt[new_key] = v
elif k.startswith('patch_embed'):
new_key = k.replace('patch_embed.proj', 'patch_embed.projection')
new_ckpt[new_key] = v
else:
new_key = k
new_ckpt[new_key] = v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in official pretrained beit models to'
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert_beit(state_dict)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
| 1,747 | 29.666667 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/model_converters/mit2mmseg.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmcv
import torch
from mmcv.runner import CheckpointLoader
def convert_mit(ckpt):
new_ckpt = OrderedDict()
# Process the concat between q linear weights and kv linear weights
for k, v in ckpt.items():
if k.startswith('head'):
continue
# patch embedding conversion
elif k.startswith('patch_embed'):
stage_i = int(k.split('.')[0].replace('patch_embed', ''))
new_k = k.replace(f'patch_embed{stage_i}', f'layers.{stage_i-1}.0')
new_v = v
if 'proj.' in new_k:
new_k = new_k.replace('proj.', 'projection.')
# transformer encoder layer conversion
elif k.startswith('block'):
stage_i = int(k.split('.')[0].replace('block', ''))
new_k = k.replace(f'block{stage_i}', f'layers.{stage_i-1}.1')
new_v = v
if 'attn.q.' in new_k:
sub_item_k = k.replace('q.', 'kv.')
new_k = new_k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)
elif 'attn.kv.' in new_k:
continue
elif 'attn.proj.' in new_k:
new_k = new_k.replace('proj.', 'attn.out_proj.')
elif 'attn.sr.' in new_k:
new_k = new_k.replace('sr.', 'sr.')
elif 'mlp.' in new_k:
string = f'{new_k}-'
new_k = new_k.replace('mlp.', 'ffn.layers.')
if 'fc1.weight' in new_k or 'fc2.weight' in new_k:
new_v = v.reshape((*v.shape, 1, 1))
new_k = new_k.replace('fc1.', '0.')
new_k = new_k.replace('dwconv.dwconv.', '1.')
new_k = new_k.replace('fc2.', '4.')
string += f'{new_k} {v.shape}-{new_v.shape}'
# norm layer conversion
elif k.startswith('norm'):
stage_i = int(k.split('.')[0].replace('norm', ''))
new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i-1}.2')
new_v = v
else:
new_k = k
new_v = v
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in official pretrained segformer to '
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert_mit(state_dict)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
| 3,069 | 35.987952 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/model_converters/stdc2mmseg.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import torch
from mmcv.runner import CheckpointLoader
def convert_stdc(ckpt, stdc_type):
new_state_dict = {}
if stdc_type == 'STDC1':
stage_lst = ['0', '1', '2.0', '2.1', '3.0', '3.1', '4.0', '4.1']
else:
stage_lst = [
'0', '1', '2.0', '2.1', '2.2', '2.3', '3.0', '3.1', '3.2', '3.3',
'3.4', '4.0', '4.1', '4.2'
]
for k, v in ckpt.items():
ori_k = k
flag = False
if 'cp.' in k:
k = k.replace('cp.', '')
if 'features.' in k:
num_layer = int(k.split('.')[1])
feature_key_lst = 'features.' + str(num_layer) + '.'
stages_key_lst = 'stages.' + stage_lst[num_layer] + '.'
k = k.replace(feature_key_lst, stages_key_lst)
flag = True
if 'conv_list' in k:
k = k.replace('conv_list', 'layers')
flag = True
if 'avd_layer.' in k:
if 'avd_layer.0' in k:
k = k.replace('avd_layer.0', 'downsample.conv')
elif 'avd_layer.1' in k:
k = k.replace('avd_layer.1', 'downsample.bn')
flag = True
if flag:
new_state_dict[k] = ckpt[ori_k]
return new_state_dict
def main():
parser = argparse.ArgumentParser(
description='Convert keys in official pretrained STDC1/2 to '
'MMSegmentation style.')
parser.add_argument('src', help='src model path')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
parser.add_argument('type', help='model type: STDC1 or STDC2')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
assert args.type in ['STDC1',
'STDC2'], 'STD type should be STDC1 or STDC2!'
weight = convert_stdc(state_dict, args.type)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
| 2,307 | 31.055556 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/model_converters/swin2mmseg.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmcv
import torch
from mmcv.runner import CheckpointLoader
def convert_swin(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in official pretrained swin models to'
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert_swin(state_dict)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
| 2,728 | 30.011364 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/model_converters/twins2mmseg.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmcv
import torch
from mmcv.runner import CheckpointLoader
def convert_twins(args, ckpt):
new_ckpt = OrderedDict()
for k, v in list(ckpt.items()):
new_v = v
if k.startswith('head'):
continue
elif k.startswith('patch_embeds'):
if 'proj.' in k:
new_k = k.replace('proj.', 'projection.')
else:
new_k = k
elif k.startswith('blocks'):
# Union
if 'attn.q.' in k:
new_k = k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[k.replace('attn.q.', 'attn.kv.')]],
dim=0)
elif 'mlp.fc1' in k:
new_k = k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in k:
new_k = k.replace('mlp.fc2', 'ffn.layers.1')
# Only pcpvt
elif args.model == 'pcpvt':
if 'attn.proj.' in k:
new_k = k.replace('proj.', 'attn.out_proj.')
else:
new_k = k
# Only svt
else:
if 'attn.proj.' in k:
k_lst = k.split('.')
if int(k_lst[2]) % 2 == 1:
new_k = k.replace('proj.', 'attn.out_proj.')
else:
new_k = k
else:
new_k = k
new_k = new_k.replace('blocks.', 'layers.')
elif k.startswith('pos_block'):
new_k = k.replace('pos_block', 'position_encodings')
if 'proj.0.' in new_k:
new_k = new_k.replace('proj.0.', 'proj.')
else:
new_k = k
if 'attn.kv.' not in k:
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in timm pretrained vit models to '
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
parser.add_argument('model', help='model: pcpvt or svt')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
# timm checkpoint
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
weight = convert_twins(args, state_dict)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
| 2,752 | 30.284091 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/model_converters/vit2mmseg.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmcv
import torch
from mmcv.runner import CheckpointLoader
def convert_vit(ckpt):
new_ckpt = OrderedDict()
for k, v in ckpt.items():
if k.startswith('head'):
continue
if k.startswith('norm'):
new_k = k.replace('norm.', 'ln1.')
elif k.startswith('patch_embed'):
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
elif k.startswith('blocks'):
if 'norm' in k:
new_k = k.replace('norm', 'ln')
elif 'mlp.fc1' in k:
new_k = k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in k:
new_k = k.replace('mlp.fc2', 'ffn.layers.1')
elif 'attn.qkv' in k:
new_k = k.replace('attn.qkv.', 'attn.attn.in_proj_')
elif 'attn.proj' in k:
new_k = k.replace('attn.proj', 'attn.attn.out_proj')
else:
new_k = k
new_k = new_k.replace('blocks.', 'layers.')
else:
new_k = k
new_ckpt[new_k] = v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in timm pretrained vit models to '
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
# timm checkpoint
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
# deit checkpoint
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert_vit(state_dict)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
| 2,117 | 28.830986 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/model_converters/vitjax2mmseg.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
import torch
def vit_jax_to_torch(jax_weights, num_layer=12):
torch_weights = dict()
# patch embedding
conv_filters = jax_weights['embedding/kernel']
conv_filters = conv_filters.permute(3, 2, 0, 1)
torch_weights['patch_embed.projection.weight'] = conv_filters
torch_weights['patch_embed.projection.bias'] = jax_weights[
'embedding/bias']
# pos embedding
torch_weights['pos_embed'] = jax_weights[
'Transformer/posembed_input/pos_embedding']
# cls token
torch_weights['cls_token'] = jax_weights['cls']
# head
torch_weights['ln1.weight'] = jax_weights['Transformer/encoder_norm/scale']
torch_weights['ln1.bias'] = jax_weights['Transformer/encoder_norm/bias']
# transformer blocks
for i in range(num_layer):
jax_block = f'Transformer/encoderblock_{i}'
torch_block = f'layers.{i}'
# attention norm
torch_weights[f'{torch_block}.ln1.weight'] = jax_weights[
f'{jax_block}/LayerNorm_0/scale']
torch_weights[f'{torch_block}.ln1.bias'] = jax_weights[
f'{jax_block}/LayerNorm_0/bias']
# attention
query_weight = jax_weights[
f'{jax_block}/MultiHeadDotProductAttention_1/query/kernel']
query_bias = jax_weights[
f'{jax_block}/MultiHeadDotProductAttention_1/query/bias']
key_weight = jax_weights[
f'{jax_block}/MultiHeadDotProductAttention_1/key/kernel']
key_bias = jax_weights[
f'{jax_block}/MultiHeadDotProductAttention_1/key/bias']
value_weight = jax_weights[
f'{jax_block}/MultiHeadDotProductAttention_1/value/kernel']
value_bias = jax_weights[
f'{jax_block}/MultiHeadDotProductAttention_1/value/bias']
qkv_weight = torch.from_numpy(
np.stack((query_weight, key_weight, value_weight), 1))
qkv_weight = torch.flatten(qkv_weight, start_dim=1)
qkv_bias = torch.from_numpy(
np.stack((query_bias, key_bias, value_bias), 0))
qkv_bias = torch.flatten(qkv_bias, start_dim=0)
torch_weights[f'{torch_block}.attn.attn.in_proj_weight'] = qkv_weight
torch_weights[f'{torch_block}.attn.attn.in_proj_bias'] = qkv_bias
to_out_weight = jax_weights[
f'{jax_block}/MultiHeadDotProductAttention_1/out/kernel']
to_out_weight = torch.flatten(to_out_weight, start_dim=0, end_dim=1)
torch_weights[
f'{torch_block}.attn.attn.out_proj.weight'] = to_out_weight
torch_weights[f'{torch_block}.attn.attn.out_proj.bias'] = jax_weights[
f'{jax_block}/MultiHeadDotProductAttention_1/out/bias']
# mlp norm
torch_weights[f'{torch_block}.ln2.weight'] = jax_weights[
f'{jax_block}/LayerNorm_2/scale']
torch_weights[f'{torch_block}.ln2.bias'] = jax_weights[
f'{jax_block}/LayerNorm_2/bias']
# mlp
torch_weights[f'{torch_block}.ffn.layers.0.0.weight'] = jax_weights[
f'{jax_block}/MlpBlock_3/Dense_0/kernel']
torch_weights[f'{torch_block}.ffn.layers.0.0.bias'] = jax_weights[
f'{jax_block}/MlpBlock_3/Dense_0/bias']
torch_weights[f'{torch_block}.ffn.layers.1.weight'] = jax_weights[
f'{jax_block}/MlpBlock_3/Dense_1/kernel']
torch_weights[f'{torch_block}.ffn.layers.1.bias'] = jax_weights[
f'{jax_block}/MlpBlock_3/Dense_1/bias']
# transpose weights
for k, v in torch_weights.items():
if 'weight' in k and 'patch_embed' not in k and 'ln' not in k:
v = v.permute(1, 0)
torch_weights[k] = v
return torch_weights
def main():
# stole refactoring code from Robin Strudel, thanks
parser = argparse.ArgumentParser(
description='Convert keys from jax official pretrained vit models to '
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
args = parser.parse_args()
jax_weights = np.load(args.src)
jax_weights_tensor = {}
for key in jax_weights.files:
value = torch.from_numpy(jax_weights[key])
jax_weights_tensor[key] = value
if 'L_16-i21k' in args.src:
num_layer = 24
else:
num_layer = 12
torch_weights = vit_jax_to_torch(jax_weights_tensor, num_layer)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(torch_weights, args.dst)
if __name__ == '__main__':
main()
| 4,675 | 36.709677 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/torchserve/mmseg2torchserve.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmseg2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts mmsegmentation model (config + checkpoint) to TorchServe
`.mar`.
Args:
config_file:
In MMSegmentation config format.
The contents vary for each task repository.
checkpoint_file:
In MMSegmentation checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmseg_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert mmseg models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmseg2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,700 | 32.044643 | 76 | py |
mmsegmentation | mmsegmentation-master/tools/torchserve/mmseg_handler.py | # Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import cv2
import mmcv
import torch
from mmcv.cnn.utils.sync_bn import revert_sync_batchnorm
from ts.torch_handler.base_handler import BaseHandler
from mmseg.apis import inference_segmentor, init_segmentor
class MMsegHandler(BaseHandler):
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_segmentor(self.config_file, checkpoint, self.device)
self.model = revert_sync_batchnorm(self.model)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = [inference_segmentor(self.model, img) for img in data]
return results
def postprocess(self, data):
output = []
for image_result in data:
_, buffer = cv2.imencode('.png', image_result[0].astype('uint8'))
content = buffer.tobytes()
output.append(content)
return output
| 1,867 | 31.77193 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/torchserve/test_torchserve.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser
from io import BytesIO
import matplotlib.pyplot as plt
import mmcv
import requests
from mmseg.apis import inference_segmentor, init_segmentor
def parse_args():
parser = ArgumentParser(
description='Compare result of torchserve and pytorch,'
'and visualize them.')
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--result-image',
type=str,
default=None,
help='save server output in result-image')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
args = parser.parse_args()
return args
def main(args):
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
tmp_res = requests.post(url, image)
content = tmp_res.content
if args.result_image:
with open(args.result_image, 'wb') as out_image:
out_image.write(content)
plt.imshow(mmcv.imread(args.result_image, 'grayscale'))
plt.show()
else:
plt.imshow(plt.imread(BytesIO(content)))
plt.show()
model = init_segmentor(args.config, args.checkpoint, args.device)
image = mmcv.imread(args.img)
result = inference_segmentor(model, image)
plt.imshow(result[0])
plt.show()
if __name__ == '__main__':
args = parse_args()
main(args)
| 1,795 | 29.440678 | 77 | py |
CSD-locomotion | CSD-locomotion-master/README.md | # Controllability-Aware Unsupervised Skill Discovery
## Overview
This is the official implementation of [**Controllability-aware Skill Discovery** (**CSD**)](https://arxiv.org/abs/2302.05103) on locomotion environments (MuJoCo Ant, HalfCheetah, and Humanoid).
The codebase is based on the implementation of [LSD](https://github.com/seohongpark/LSD).
We refer to http://github.com/seohongpark/CSD-manipulation for the implementation of CSD on manipulation environments.
Please visit [our project page](https://seohong.me/projects/csd/) for videos.
## Installation
```
conda create --name csd-locomotion python=3.8
conda activate csd-locomotion
pip install -r requirements.txt
pip install -e .
pip install -e garaged --no-deps
```
## Examples
CSD Ant (16 discrete skills)
```
python tests/main.py --run_group EXP --env ant --max_path_length 200 --dim_option 16 --num_random_trajectories 200 --seed 0 --normalizer_type ant_preset --use_gpu 1 --traj_batch_size 10 --n_parallel 1 --n_epochs_per_eval 1000 --n_thread 1 --record_metric_difference 0 --n_epochs_per_tb 100 --n_epochs_per_save 10000 --n_epochs_per_pt_save 5000 --n_epochs_per_pkl_update 1000 --eval_record_video 1 --n_epochs 2000001 --n_epochs_per_log 100 --discrete 1 --sac_discount 0.99 --sac_update_target_per_gradient 1 --max_optimization_epochs 1 --trans_minibatch_size 1024 --trans_optimization_epochs 64 --te_max_optimization_epochs 1 --te_trans_optimization_epochs 32 --sac_scale_reward 10 --spectral_normalization 0 --alpha 0.03 --sac_lr_a -1 --common_lr 0.0001 --dual_reg 1 --dual_dist s2_from_s --dual_lam 3000 --dual_slack 1e-06 --eval_plot_axis -50 50 -50 50 --model_master_dim 512
```
CSD HalfCheetah (16 discrete skills)
```
python tests/main.py --run_group EXP --env half_cheetah --max_path_length 200 --dim_option 16 --num_random_trajectories 200 --seed 0 --normalizer_type half_cheetah_preset --use_gpu 1 --traj_batch_size 10 --n_parallel 1 --n_epochs_per_eval 1000 --n_thread 1 --record_metric_difference 0 --n_epochs_per_tb 100 --n_epochs_per_save 10000 --n_epochs_per_pt_save 5000 --n_epochs_per_pkl_update 1000 --eval_record_video 1 --n_epochs 2000001 --n_epochs_per_log 100 --discrete 1 --sac_discount 0.99 --sac_update_target_per_gradient 1 --max_optimization_epochs 1 --trans_minibatch_size 1024 --trans_optimization_epochs 64 --te_max_optimization_epochs 1 --te_trans_optimization_epochs 32 --sac_scale_reward 10 --spectral_normalization 0 --alpha 0.1 --sac_lr_a -1 --common_lr 0.0001 --dual_reg 1 --dual_dist s2_from_s --dual_lam 3000 --dual_slack 1e-06 --model_master_dim 512
```
CSD Humanoid (16 discrete skills)
```
python tests/main.py --run_group EXP --env humanoid --max_path_length 1000 --dim_option 16 --num_random_trajectories 200 --seed 0 --normalizer_type humanoid_preset --use_gpu 1 --traj_batch_size 5 --n_parallel 1 --n_epochs_per_eval 1000 --n_thread 1 --record_metric_difference 0 --n_epochs_per_tb 100 --n_epochs_per_save 10000 --n_epochs_per_pt_save 5000 --n_epochs_per_pkl_update 1000 --eval_record_video 1 --n_epochs 2000001 --n_epochs_per_log 100 --discrete 1 --sac_discount 0.99 --sac_update_target_per_gradient 1 --max_optimization_epochs 1 --trans_minibatch_size 1024 --trans_optimization_epochs 64 --te_max_optimization_epochs 1 --te_trans_optimization_epochs 32 --sac_scale_reward 10 --spectral_normalization 0 --alpha 0.3 --sac_lr_a -1 --common_lr 0.0003 --dual_reg 1 --dual_dist s2_from_s --dual_lam 3000 --dual_slack 1e-06 --video_skip_frames 3 --model_master_dim 1024 --sac_replay_buffer 1
```
LSD Ant (16 discrete skills)
```
python tests/main.py --run_group EXP --env ant --max_path_length 200 --dim_option 16 --num_random_trajectories 200 --seed 0 --normalizer_type ant_preset --use_gpu 1 --traj_batch_size 10 --n_parallel 1 --n_epochs_per_eval 1000 --n_thread 1 --record_metric_difference 0 --n_epochs_per_tb 100 --n_epochs_per_save 10000 --n_epochs_per_pt_save 5000 --n_epochs_per_pkl_update 1000 --eval_record_video 1 --n_epochs 2000001 --n_epochs_per_log 100 --discrete 1 --sac_discount 0.99 --sac_update_target_per_gradient 1 --max_optimization_epochs 1 --trans_minibatch_size 1024 --trans_optimization_epochs 64 --te_max_optimization_epochs 1 --te_trans_optimization_epochs 32 --sac_scale_reward 1 --spectral_normalization 1 --alpha 0.003 --sac_lr_a -1 --common_lr 0.0001 --eval_plot_axis -50 50 -50 50 --model_master_dim 512
```
LSD HalfCheetah (16 discrete skills)
```
python tests/main.py --run_group EXP --env half_cheetah --max_path_length 200 --dim_option 16 --num_random_trajectories 200 --seed 0 --normalizer_type half_cheetah_preset --use_gpu 1 --traj_batch_size 10 --n_parallel 1 --n_epochs_per_eval 1000 --n_thread 1 --record_metric_difference 0 --n_epochs_per_tb 100 --n_epochs_per_save 10000 --n_epochs_per_pt_save 5000 --n_epochs_per_pkl_update 1000 --eval_record_video 1 --n_epochs 2000001 --n_epochs_per_log 100 --discrete 1 --sac_discount 0.99 --sac_update_target_per_gradient 1 --max_optimization_epochs 1 --trans_minibatch_size 1024 --trans_optimization_epochs 64 --te_max_optimization_epochs 1 --te_trans_optimization_epochs 32 --sac_scale_reward 1 --spectral_normalization 1 --alpha 0.003 --sac_lr_a -1 --common_lr 0.0001 --model_master_dim 512
```
LSD Humanoid (16 discrete skills)
```
python tests/main.py --run_group EXP --env humanoid --max_path_length 1000 --dim_option 16 --num_random_trajectories 200 --seed 0 --normalizer_type humanoid_preset --use_gpu 1 --traj_batch_size 5 --n_parallel 1 --n_epochs_per_eval 1000 --n_thread 1 --record_metric_difference 0 --n_epochs_per_tb 100 --n_epochs_per_save 10000 --n_epochs_per_pt_save 5000 --n_epochs_per_pkl_update 1000 --eval_record_video 1 --n_epochs 2000001 --n_epochs_per_log 100 --discrete 1 --sac_discount 0.99 --sac_update_target_per_gradient 1 --max_optimization_epochs 1 --trans_minibatch_size 1024 --trans_optimization_epochs 64 --te_max_optimization_epochs 1 --te_trans_optimization_epochs 32 --sac_scale_reward 1 --spectral_normalization 1 --alpha 0.03 --sac_lr_a -1 --common_lr 0.0001 --video_skip_frames 3 --model_master_dim 1024 --sac_replay_buffer 1
```
## Comments on the Implementations of CSD and LSD
The CSD and LSD implementations in this repository, which we use to produce the results in the CSD paper,
are based on a sample-efficient version of LSD.
The main difference between [the original LSD](https://github.com/seohongpark/LSD) and this sample-efficient version is that
the latter updates the target network every gradient step, not every epoch.
This (in combination with additional hyperpameter adjustments) greatly improves the sample efficiency of LSD by ~10 times
(e.g., the original LSD uses 400M steps for Ant while this version uses 40M steps),
but it may also slightly degrade the performance.
For reproducing the results in the LSD paper,
we recommend using [the original LSD implementation](https://github.com/seohongpark/LSD).
## Licence
MIT
| 6,890 | 106.671875 | 900 | md |
CSD-locomotion | CSD-locomotion-master/dowel_wrapper.py | import sys
assert 'dowel' not in sys.modules, 'dowel must be imported after dowel_wrapper.'
# https://stackoverflow.com/a/6985648/2182622
import dowel
dowel_eval = dowel
del sys.modules['dowel']
import dowel
dowel_plot = dowel
del sys.modules['dowel']
import dowel
all_dowels = [dowel, dowel_eval, dowel_plot]
assert len(set(id(d) for d in all_dowels)) == len(all_dowels)
import global_context
def get_dowel(phase=None):
if (phase or global_context.get_context().get('phase')).lower() == 'plot':
return dowel_plot
if (phase or global_context.get_context().get('phase')).lower() == 'eval':
return dowel_eval
return dowel
def get_logger(phase=None):
return get_dowel(phase).logger
def get_tabular(phase=None):
return get_dowel(phase).tabular
| 781 | 25.965517 | 80 | py |
CSD-locomotion | CSD-locomotion-master/global_context.py | import copy
_g_session = None
_g_context = {}
class GlobalContext:
def __init__(self, context):
self.context = context
def __enter__(self):
global _g_context
self.prev_g_context = _g_context
_g_context = self.context
def __exit__(self, exc_type, exc_val, exc_tb):
global _g_context
_g_context = self.prev_g_context
def get_metric_prefix():
global _g_context
prefix = ''
if 'phase' in _g_context:
prefix += _g_context['phase'].capitalize()
if 'policy' in _g_context:
prefix += {'sampling': 'Sp', 'option': 'Op'}.get(
_g_context['policy'].lower(), _g_context['policy'].lower()).capitalize()
if len(prefix) == 0:
return '', ''
return prefix + '/'
def get_context():
global _g_context
return copy.copy(_g_context)
| 849 | 20.794872 | 84 | py |
CSD-locomotion | CSD-locomotion-master/setup.py | import os
from setuptools import find_packages
from setuptools import setup
setup(
name='iod',
packages=find_packages(where='.'),
package_dir={'': '.'},
python_requires='>=3.5',
)
| 198 | 15.583333 | 38 | py |
CSD-locomotion | CSD-locomotion-master/dowel/__init__.py | """Logger module.
This module instantiates a global logger singleton.
"""
from dowel.histogram import Histogram
from dowel.logger import Logger, LoggerWarning, LogOutput
from dowel.simple_outputs import StdOutput, TextOutput
from dowel.tabular_input import TabularInput
from dowel.csv_output import CsvOutput # noqa: I100
from dowel.tensor_board_output import TensorBoardOutput
logger = Logger()
tabular = TabularInput()
__all__ = [
'Histogram',
'Logger',
'CsvOutput',
'StdOutput',
'TextOutput',
'LogOutput',
'LoggerWarning',
'TabularInput',
'TensorBoardOutput',
'logger',
'tabular',
]
| 634 | 21.678571 | 57 | py |
CSD-locomotion | CSD-locomotion-master/dowel/csv_output.py | """A `dowel.logger.LogOutput` for CSV files."""
import csv
import warnings
from dowel import TabularInput
from dowel.simple_outputs import FileOutput
from dowel.utils import colorize
class CsvOutput(FileOutput):
"""CSV file output for logger.
:param file_name: The file this output should log to.
"""
def __init__(self, file_name):
super().__init__(file_name)
self._writer = None
self._fieldnames = None
self._warned_once = set()
self._disable_warnings = False
@property
def types_accepted(self):
"""Accept TabularInput objects only."""
return (TabularInput, )
def record(self, data, prefix=''):
"""Log tabular data to CSV."""
if isinstance(data, TabularInput):
to_csv = data.as_primitive_dict
if not to_csv.keys() and not self._writer:
return
if not self._writer:
self._fieldnames = set(to_csv.keys())
self._writer = csv.DictWriter(
self._log_file,
fieldnames=sorted(list(self._fieldnames)),
extrasaction='ignore')
self._writer.writeheader()
if to_csv.keys() != self._fieldnames:
self._warn('Inconsistent TabularInput keys detected. '
'CsvOutput keys: {}. '
'TabularInput keys: {}. '
'Did you change key sets after your first '
'logger.log(TabularInput)?'.format(
set(self._fieldnames), set(to_csv.keys())))
self._writer.writerow(to_csv)
for k in to_csv.keys():
data.mark(k)
else:
raise ValueError('Unacceptable type.')
def _warn(self, msg):
"""Warns the user using warnings.warn.
The stacklevel parameter needs to be 3 to ensure the call to logger.log
is the one printed.
"""
if not self._disable_warnings and msg not in self._warned_once:
warnings.warn(
colorize(msg, 'yellow'), CsvOutputWarning, stacklevel=3)
self._warned_once.add(msg)
return msg
def disable_warnings(self):
"""Disable logger warnings for testing."""
self._disable_warnings = True
class CsvOutputWarning(UserWarning):
"""Warning class for CsvOutput."""
pass
| 2,451 | 29.65 | 79 | py |
CSD-locomotion | CSD-locomotion-master/dowel/histogram.py | """Histogram logger input."""
import numpy as np
class Histogram(np.ndarray):
"""A `dowel.logger` input representing a histogram of raw data.
This is implemented as a typed view of a numpy array. It will accept
input that `numpy.asarray` will.
See https://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
details on implementation.
"""
def __new__(cls, *args, **kwargs):
"""Reimplement `numpy.ndarray.__new__`.
Creates objects of this class using `numpy.asarray`, then view-casts
them back into the class `Histogram`.
"""
return np.asarray(*args, **kwargs).view(cls)
| 650 | 28.590909 | 76 | py |
CSD-locomotion | CSD-locomotion-master/dowel/logger.py | """Logging facility.
It takes in many different types of input and directs them to the correct
output.
The logger has 4 major steps:
1. Inputs, such as a simple string or something more complicated like
TabularInput, are passed to the log() method of an instantiated Logger.
2. The Logger class checks for any outputs that have been added to it, and
calls the record() method of any outputs that accept the type of input.
3. The output (a subclass of LogOutput) receives the input via its record()
method and handles it in whatever way is expected.
4. (only in some cases) The dump method is used to dump the output to file.
It is necessary for some LogOutput subclasses, like TensorBoardOutput.
# Here's a demonstration of dowel:
from dowel import logger
+------+
|logger|
+------+
# Let's add an output to the logger. We want to log to the console, so we'll
# add a StdOutput.
from dowel import StdOutput
logger.add_output(StdOutput())
+------+ +---------+
|logger+------>StdOutput|
+------+ +---------+
# Great! Now we can start logging text.
logger.log('Hello dowel')
# This will go straight to the console as 'Hello dowel'
+------+ +---------+
|logger+---'Hello dowel'--->StdOutput|
+------+ +---------+
# Let's try adding another output.
from dowel import TextOutput
logger.add_output(TextOutput('log_folder/log.txt'))
+---------+
+------>StdOutput|
+------+ +---------+
|logger|
+------+ +----------+
+------>TextOutput|
+----------+
# And another output.
from dowel import CsvOutput
logger.add_output(CsvOutput('log_folder/table.csv'))
+---------+
+------>StdOutput|
| +---------+
|
+------+ +----------+
|logger+------>TextOutput|
+------+ +----------+
|
| +---------+
+------>CsvOutput|
+---------+
# The logger will record anything passed to logger.log to all outputs that
# accept its type.
logger.log('test')
+---------+
+---'test'--->StdOutput|
| +---------+
|
+------+ +----------+
|logger+---'test'--->TextOutput|
+------+ +----------+
|
| +---------+
+-----!!----->CsvOutput|
+---------+
# !! Note that the logger knows not to send CsvOutput the string 'test'
# Similarly, more complex objects like tf.tensor won't be sent to (for
# example) TextOutput.
# This behavior is defined in each output's types_accepted property
# Here's a more complex example.
# TabularInput, instantiated for you as the tabular, can log key/value pairs.
from dowel import tabular
tabular.record('key', 72)
tabular.record('foo', 'bar')
logger.log(tabular)
+---------+
+---tabular--->StdOutput|
| +---------+
|
+------+ +----------+
|logger+---tabular--->TextOutput|
+------+ +----------+
|
| +---------+
+---tabular--->CsvOutput|
+---------+
Note that LogOutputs which consume TabularInputs must call TabularInput.mark()
on each key they log. This helps the logger detect when tabular data is not
logged.
# Console Output:
--- ---
key 72
foo bar
--- ---
# Feel free to add your own inputs and outputs to the logger!
"""
import abc
import contextlib
import warnings
from dowel.utils import colorize
class LogOutput(abc.ABC):
"""Abstract class for Logger Outputs."""
@property
def types_accepted(self):
"""Pass these types to this logger output.
The types in this tuple will be accepted by this output.
:return: A tuple containing all valid input types.
"""
return ()
@abc.abstractmethod
def record(self, data, prefix=''):
"""Pass logger data to this output.
:param data: The data to be logged by the output.
:param prefix: A prefix placed before a log entry in text outputs.
"""
pass
def dump(self, step=None):
"""Dump the contents of this output.
:param step: The current run step.
"""
pass
def close(self):
"""Close any files used by the output."""
pass
def __del__(self):
"""Clean up object upon deletion."""
self.close()
class Logger:
"""This is the class that handles logging."""
def __init__(self):
self._outputs = []
self._prefixes = []
self._prefix_str = ''
self._warned_once = set()
self._disable_warnings = False
def log(self, data):
"""Magic method that takes in all different types of input.
This method is the main API for the logger. Any data to be logged goes
through this method.
Any data sent to this method is sent to all outputs that accept its
type (defined in the types_accepted property).
:param data: Data to be logged. This can be any type specified in the
types_accepted property of any of the logger outputs.
"""
if not self._outputs:
self._warn('No outputs have been added to the logger.')
at_least_one_logged = False
for output in self._outputs:
if isinstance(data, output.types_accepted):
output.record(data, prefix=self._prefix_str)
at_least_one_logged = True
if not at_least_one_logged:
warning = (
'Log data of type {} was not accepted by any output'.format(
type(data).__name__))
self._warn(warning)
def add_output(self, output):
"""Add a new output to the logger.
All data that is compatible with this output will be sent there.
:param output: An instantiation of a LogOutput subclass to be added.
"""
if isinstance(output, type):
msg = 'Output object must be instantiated - don\'t pass a type.'
raise ValueError(msg)
elif not isinstance(output, LogOutput):
raise ValueError('Output object must be a subclass of LogOutput')
self._outputs.append(output)
def remove_all(self):
"""Remove all outputs that have been added to this logger."""
self._outputs.clear()
def remove_output_type(self, output_type):
"""Remove all outputs of a given type.
:param output_type: A LogOutput subclass type to be removed.
"""
self._outputs = [
output for output in self._outputs
if not isinstance(output, output_type)
]
def reset_output(self, output):
"""Removes, then re-adds a given output to the logger.
:param output: An instantiation of a LogOutput subclass to be added.
"""
self.remove_output_type(type(output))
self.add_output(output)
def has_output_type(self, output_type):
"""Check to see if a given logger output is attached to the logger.
:param output_type: A LogOutput subclass type to be checked for.
"""
for output in self._outputs:
if isinstance(output, output_type):
return True
return False
def dump_output_type(self, output_type, step=None):
"""Dump all outputs of the given type.
:param output_type: A LogOutput subclass type to be dumped.
:param step: The current run step.
"""
for output in self._outputs:
if isinstance(output, output_type):
output.dump(step=step)
def dump_all(self, step=None):
"""Dump all outputs connected to the logger.
:param step: The current run step.
"""
for output in self._outputs:
output.dump(step=step)
@contextlib.contextmanager
def prefix(self, prefix):
"""Add a prefix to the logger.
This allows text output to be prepended with a given stack of prefixes.
Example:
with logger.prefix('prefix: '):
logger.log('test_string') # this will have the prefix
logger.log('test_string2') # this will not have the prefix
:param prefix: The prefix string to be logged.
"""
self.push_prefix(prefix)
try:
yield
finally:
self.pop_prefix()
def push_prefix(self, prefix):
"""Add prefix to prefix stack.
:param prefix: The prefix string to be logged.
"""
self._prefixes.append(prefix)
self._prefix_str = ''.join(self._prefixes)
def pop_prefix(self):
"""Pop prefix from prefix stack."""
del self._prefixes[-1]
self._prefix_str = ''.join(self._prefixes)
def _warn(self, msg):
"""Warns the user using warnings.warn.
The stacklevel parameter needs to be 3 to ensure the call to logger.log
is the one printed.
"""
if not self._disable_warnings and msg not in self._warned_once:
warnings.warn(colorize(msg, 'yellow'), LoggerWarning, stacklevel=3)
self._warned_once.add(msg)
return msg
def disable_warnings(self):
"""Disable logger warnings for testing."""
self._disable_warnings = True
class LoggerWarning(UserWarning):
"""Warning class for the Logger."""
| 9,416 | 27.536364 | 79 | py |
CSD-locomotion | CSD-locomotion-master/dowel/setup.py | import os
from setuptools import find_packages
from setuptools import setup
setup(
name='dowel',
packages=find_packages(where='.'),
package_dir={'': '.'},
python_requires='>=3.5',
)
| 200 | 15.75 | 38 | py |
CSD-locomotion | CSD-locomotion-master/dowel/simple_outputs.py | """Contains the output classes for the logger.
Each class is sent logger data and handles it itself.
"""
import abc
import datetime
import os
import sys
import dateutil.tz
from dowel import LogOutput
from dowel.tabular_input import TabularInput
from dowel.utils import mkdir_p
class StdOutput(LogOutput):
"""Standard console output for the logger.
:param with_timestamp: Whether to log a timestamp before non-tabular data.
"""
def __init__(self, with_timestamp=True):
self._with_timestamp = with_timestamp
@property
def types_accepted(self):
"""Accept str and TabularInput objects."""
return (str, TabularInput)
def record(self, data, prefix=''):
"""Log data to console."""
if isinstance(data, str):
out = prefix + data
if self._with_timestamp:
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S')
out = '%s | %s' % (timestamp, out)
elif isinstance(data, TabularInput):
out = str(data)
data.mark_str()
else:
raise ValueError("Unacceptable type")
print(out)
def dump(self, step=None):
"""Flush data to standard output stream."""
sys.stdout.flush()
class FileOutput(LogOutput, metaclass=abc.ABCMeta):
"""File output abstract class for logger.
:param file_name: The file this output should log to.
:param mode: File open mode ('a', 'w', etc).
"""
def __init__(self, file_name, mode='w'):
mkdir_p(os.path.dirname(file_name))
# Open the log file in child class
self._log_file = open(file_name, mode)
def close(self):
"""Close any files used by the output."""
if self._log_file and not self._log_file.closed:
self._log_file.close()
def dump(self, step=None):
"""Flush data to log file."""
self._log_file.flush()
class TextOutput(FileOutput):
"""Text file output for logger.
:param file_name: The file this output should log to.
:param with_timestamp: Whether to log a timestamp before the data.
"""
def __init__(self, file_name, with_timestamp=True):
super().__init__(file_name, 'a')
self._with_timestamp = with_timestamp
self._delimiter = ' | '
@property
def types_accepted(self):
"""Accept str objects only."""
return (str, TabularInput)
def record(self, data, prefix=''):
"""Log data to text file."""
if isinstance(data, str):
out = prefix + data
if self._with_timestamp:
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S')
out = '%s | %s' % (timestamp, out)
elif isinstance(data, TabularInput):
out = str(data)
data.mark_str()
else:
raise ValueError("Unacceptable type.")
self._log_file.write(out + '\n')
| 3,052 | 27.801887 | 78 | py |
CSD-locomotion | CSD-locomotion-master/dowel/tabular_input.py | """A `dowel.logger` input for tabular (key-value) data."""
import contextlib
import warnings
import numpy as np
import tabulate
from dowel.utils import colorize
class TabularInput:
"""This class allows the user to create tables for easy display.
TabularInput may be passed to the logger via its log() method.
"""
def __init__(self):
self._dict = {}
self._recorded = set()
self._prefixes = []
self._prefix_str = ''
self._warned_once = set()
self._disable_warnings = False
def __str__(self):
"""Return a string representation of the table for the logger."""
return tabulate.tabulate(
sorted(self.as_primitive_dict.items(), key=lambda x: x[0]))
def record(self, key, val):
"""Save key/value entries for the table.
:param key: String key corresponding to the value.
:param val: Value that is to be stored in the table.
"""
self._dict[self._prefix_str + str(key)] = val
def mark(self, key):
"""Mark key as recorded."""
self._recorded.add(key)
def mark_str(self):
"""Mark keys in the primitive dict."""
self._recorded |= self.as_primitive_dict.keys()
def mark_all(self):
"""Mark all keys."""
self._recorded |= self._dict.keys()
def record_misc_stat(self, key, values, placement='back'):
"""Record statistics of an array.
:param key: String key corresponding to the values.
:param values: Array of values to be analyzed.
:param placement: Whether to put the prefix in front or in the back.
"""
if placement == 'front':
front = ""
back = key
else:
front = key
back = ""
if values:
self.record(front + 'Average' + back, np.average(values))
self.record(front + 'Std' + back, np.std(values))
self.record(front + 'Median' + back, np.median(values))
self.record(front + 'Min' + back, np.min(values))
self.record(front + 'Max' + back, np.max(values))
else:
self.record(front + 'Average' + back, np.nan)
self.record(front + 'Std' + back, np.nan)
self.record(front + 'Median' + back, np.nan)
self.record(front + 'Min' + back, np.nan)
self.record(front + 'Max' + back, np.nan)
@contextlib.contextmanager
def prefix(self, prefix):
"""Handle pushing and popping of a tabular prefix.
Can be used in the following way:
with tabular.prefix('your_prefix_'):
# your code
tabular.record(key, val)
:param prefix: The string prefix to be prepended to logs.
"""
self.push_prefix(prefix)
try:
yield
finally:
self.pop_prefix()
def clear(self):
"""Clear the tabular."""
# Warn if something wasn't logged
for k, v in self._dict.items():
if k not in self._recorded:
warning = (
'TabularInput {{{}: type({})}} was not accepted by any '
'output'.format(k,
type(v).__name__))
self._warn(warning)
self._dict.clear()
self._recorded.clear()
def push_prefix(self, prefix):
"""Push prefix to be appended before printed table.
:param prefix: The string prefix to be prepended to logs.
"""
self._prefixes.append(prefix)
self._prefix_str = ''.join(self._prefixes)
def pop_prefix(self):
"""Pop prefix that was appended to the printed table."""
del self._prefixes[-1]
self._prefix_str = ''.join(self._prefixes)
@property
def as_primitive_dict(self):
"""Return the dictionary, excluding all nonprimitive types."""
return {
key: val
for key, val in self._dict.items() if np.isscalar(val)
}
@property
def as_dict(self):
"""Return a dictionary of the tabular items."""
return self._dict
def _warn(self, msg):
"""Warns the user using warnings.warn.
The stacklevel parameter needs to be 3 to ensure the call to logger.log
is the one printed.
"""
if not self._disable_warnings and msg not in self._warned_once:
warnings.warn(
colorize(msg, 'yellow'), TabularInputWarning, stacklevel=3)
self._warned_once.add(msg)
return msg
def disable_warnings(self):
"""Disable logger warnings for testing."""
self._disable_warnings = True
class TabularInputWarning(UserWarning):
"""Warning class for the TabularInput."""
pass
| 4,786 | 29.883871 | 79 | py |
CSD-locomotion | CSD-locomotion-master/dowel/tensor_board_output.py | """A `dowel.logger.LogOutput` for tensorboard.
It receives the input data stream from `dowel.logger`, then add them to
tensorboard summary operations through tensorboardX.
Note:
Neither TensorboardX nor TensorBoard supports log parametric
distributions. We add this feature by sampling data from a
`tfp.distributions.Distribution` object.
"""
import functools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import tensorboardX as tbX
try:
import tensorflow as tf
except ImportError:
tf = None
from dowel import Histogram
from dowel import LoggerWarning
from dowel import LogOutput
from dowel import TabularInput
from dowel.utils import colorize
class TensorBoardOutput(LogOutput):
"""TensorBoard output for logger.
Args:
log_dir(str): The save location of the tensorboard event files.
x_axis(str): The name of data used as x-axis for scalar tabular.
If None, x-axis will be the number of dump() is called.
additional_x_axes(list[str]): Names of data to used be as additional
x-axes.
flush_secs(int): How often, in seconds, to flush the added summaries
and events to disk.
histogram_samples(int): Number of samples to generate when logging
random distribution.
"""
def __init__(self,
log_dir,
x_axis=None,
additional_x_axes=None,
flush_secs=120,
histogram_samples=1e3):
if x_axis is None:
assert not additional_x_axes, (
'You have to specify an x_axis if you want additional axes.')
additional_x_axes = additional_x_axes or []
self._writer = tbX.SummaryWriter(log_dir, flush_secs=flush_secs)
self._x_axis = x_axis
self._additional_x_axes = additional_x_axes
self._default_step = 0
self._histogram_samples = int(histogram_samples)
self._added_graph = False
self._waiting_for_dump = []
# Used in tests to emulate Tensorflow not being installed.
self._tf = tf
self._warned_once = set()
self._disable_warnings = False
@property
def types_accepted(self):
"""Return the types that the logger may pass to this output."""
if self._tf is None:
return (TabularInput, )
else:
return (TabularInput, self._tf.Graph)
def record(self, data, prefix=''):
"""Add data to tensorboard summary.
Args:
data: The data to be logged by the output.
prefix(str): A prefix placed before a log entry in text outputs.
"""
if isinstance(data, TabularInput):
self._waiting_for_dump.append(
functools.partial(self._record_tabular, data))
elif self._tf is not None and isinstance(data, self._tf.Graph):
self._record_graph(data)
else:
raise ValueError('Unacceptable type.')
def _record_tabular(self, data, step):
if self._x_axis:
nonexist_axes = []
for axis in [self._x_axis] + self._additional_x_axes:
if axis not in data.as_dict:
nonexist_axes.append(axis)
if nonexist_axes:
self._warn('{} {} exist in the tabular data.'.format(
', '.join(nonexist_axes),
'do not' if len(nonexist_axes) > 1 else 'does not'))
for key, value in data.as_dict.items():
if isinstance(value,
np.ScalarType) and self._x_axis in data.as_dict:
if self._x_axis is not key:
x = data.as_dict[self._x_axis]
self._record_kv(key, value, x)
for axis in self._additional_x_axes:
if key is not axis and key in data.as_dict:
x = data.as_dict[axis]
self._record_kv('{}/{}'.format(key, axis), value, x)
else:
self._record_kv(key, value, step)
data.mark(key)
def _record_kv(self, key, value, step):
if isinstance(value, str):
self._writer.add_text(key, value, step)
elif isinstance(value, np.ScalarType):
self._writer.add_scalar(key, value, step)
elif isinstance(value, plt.Figure):
self._writer.add_figure(key, value, step)
elif isinstance(value, np.ndarray) and value.ndim == 5:
self._writer.add_video(key, value, step, fps=15)
elif isinstance(value, scipy.stats._distn_infrastructure.rv_frozen):
shape = (self._histogram_samples, ) + value.mean().shape
self._writer.add_histogram(key, value.rvs(shape), step)
elif isinstance(value, scipy.stats._multivariate.multi_rv_frozen):
self._writer.add_histogram(key, value.rvs(self._histogram_samples),
step)
elif isinstance(value, Histogram):
self._writer.add_histogram(key, np.asarray(value), step)
def _record_graph(self, graph):
graph_def = graph.as_graph_def(add_shapes=True)
event = tbX.proto.event_pb2.Event(
graph_def=graph_def.SerializeToString())
self._writer.file_writer.add_event(event)
def dump(self, step=None):
"""Flush summary writer to disk."""
# Log the tabular inputs, now that we have a step
for p in self._waiting_for_dump:
p(step or self._default_step)
self._waiting_for_dump.clear()
# Flush output files
for w in self._writer.all_writers.values():
w.flush()
self._default_step += 1
def close(self):
"""Flush all the events to disk and close the file."""
self._writer.close()
def _warn(self, msg):
"""Warns the user using warnings.warn.
The stacklevel parameter needs to be 3 to ensure the call to logger.log
is the one printed.
"""
if not self._disable_warnings and msg not in self._warned_once:
warnings.warn(
colorize(msg, 'yellow'), NonexistentAxesWarning, stacklevel=3)
self._warned_once.add(msg)
return msg
class NonexistentAxesWarning(LoggerWarning):
"""Raise when the specified x axes do not exist in the tabular."""
| 6,395 | 35.340909 | 79 | py |
CSD-locomotion | CSD-locomotion-master/dowel/utils.py | """Utilities for console outputs."""
import errno
import os
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38)
def colorize(string, color, bold=False, highlight=False):
"""Colorize the string for console output."""
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def mkdir_p(path):
"""Create a directory with path."""
if not path:
return
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 796 | 18.439024 | 61 | py |
CSD-locomotion | CSD-locomotion-master/envs/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/envs/mujoco/__init__.py | 0 | 0 | 0 | py |
|
CSD-locomotion | CSD-locomotion-master/envs/mujoco/ant_env.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import math
import os
from gym import utils
import numpy as np
from gym.envs.mujoco import mujoco_env
from envs.mujoco.mujoco_utils import MujocoTrait
def q_inv(a):
return [a[0], -a[1], -a[2], -a[3]]
def q_mult(a, b): # multiply two quaternion
w = a[0] * b[0] - a[1] * b[1] - a[2] * b[2] - a[3] * b[3]
i = a[0] * b[1] + a[1] * b[0] + a[2] * b[3] - a[3] * b[2]
j = a[0] * b[2] - a[1] * b[3] + a[2] * b[0] + a[3] * b[1]
k = a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + a[3] * b[0]
return [w, i, j, k]
# pylint: disable=missing-docstring
class AntEnv(MujocoTrait, mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
task="motion",
goal=None,
expose_obs_idxs=None,
expose_all_qpos=True,
expose_body_coms=None,
expose_body_comvels=None,
expose_foot_sensors=False,
use_alt_path=False,
model_path=None,
fixed_initial_state=False,
done_allowing_step_unit=None,
original_env=False,
render_hw=100,
):
utils.EzPickle.__init__(**locals())
if model_path is None:
model_path = 'ant.xml'
self._task = task
self._goal = goal
self._expose_obs_idxs = expose_obs_idxs
self._expose_all_qpos = expose_all_qpos
self._expose_body_coms = expose_body_coms
self._expose_body_comvels = expose_body_comvels
self._expose_foot_sensors = expose_foot_sensors
self._body_com_indices = {}
self._body_comvel_indices = {}
self.fixed_initial_state = fixed_initial_state
self._done_allowing_step_unit = done_allowing_step_unit
self._original_env = original_env
self.render_hw = render_hw
# Settings from
# https://github.com/openai/gym/blob/master/gym/envs/__init__.py
xml_path = "envs/mujoco/assets/"
model_path = os.path.abspath(os.path.join(xml_path, model_path))
mujoco_env.MujocoEnv.__init__(self, model_path, 5)
def compute_reward(self, **kwargs):
return None
def _get_done(self):
return False
def step(self, a, render=False):
if hasattr(self, '_step_count'):
self._step_count += 1
obsbefore = self._get_obs()
xposbefore = self.sim.data.qpos.flat[0]
yposbefore = self.sim.data.qpos.flat[1]
self.do_simulation(a, self.frame_skip)
obsafter = self._get_obs()
xposafter = self.sim.data.qpos.flat[0]
yposafter = self.sim.data.qpos.flat[1]
reward = self.compute_reward(xposbefore=xposbefore, yposbefore=yposbefore, xposafter=xposafter, yposafter=yposafter)
if reward is None:
forward_reward = (xposafter - xposbefore) / self.dt
sideward_reward = (yposafter - yposbefore) / self.dt
ctrl_cost = .5 * np.square(a).sum()
survive_reward = 1.0
if self._task == "forward":
reward = forward_reward - ctrl_cost + survive_reward
elif self._task == "backward":
reward = -forward_reward - ctrl_cost + survive_reward
elif self._task == "left":
reward = sideward_reward - ctrl_cost + survive_reward
elif self._task == "right":
reward = -sideward_reward - ctrl_cost + survive_reward
elif self._task == "goal":
reward = -np.linalg.norm(np.array([xposafter, yposafter]) - self._goal)
elif self._task == "motion":
reward = np.max(np.abs(np.array([forward_reward, sideward_reward
]))) - ctrl_cost + survive_reward
def _get_gym_ant_reward():
forward_reward = (xposafter - xposbefore)/self.dt
ctrl_cost = .5 * np.square(a).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
return reward
reward = _get_gym_ant_reward()
done = self._get_done()
ob = self._get_obs()
info = dict(
# reward_forward=forward_reward,
# reward_sideward=sideward_reward,
# reward_ctrl=-ctrl_cost,
# reward_survive=survive_reward,
coordinates=np.array([xposbefore, yposbefore]),
next_coordinates=np.array([xposafter, yposafter]),
ori_obs=obsbefore,
next_ori_obs=obsafter,
)
if render:
info['render'] = self.render(mode='rgb_array').transpose(2, 0, 1)
return ob, reward, done, info
def _get_obs(self):
if self._original_env:
return np.concatenate([
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
# No crfc observation
if self._expose_all_qpos:
obs = np.concatenate([
self.sim.data.qpos.flat[:15],
self.sim.data.qvel.flat[:14],
])
else:
obs = np.concatenate([
self.sim.data.qpos.flat[2:15],
self.sim.data.qvel.flat[:14],
])
if self._expose_body_coms is not None:
for name in self._expose_body_coms:
com = self.get_body_com(name)
if name not in self._body_com_indices:
indices = range(len(obs), len(obs) + len(com))
self._body_com_indices[name] = indices
obs = np.concatenate([obs, com])
if self._expose_body_comvels is not None:
for name in self._expose_body_comvels:
comvel = self.get_body_comvel(name)
if name not in self._body_comvel_indices:
indices = range(len(obs), len(obs) + len(comvel))
self._body_comvel_indices[name] = indices
obs = np.concatenate([obs, comvel])
if self._expose_foot_sensors:
obs = np.concatenate([obs, self.sim.data.sensordata])
if self._expose_obs_idxs is not None:
obs = obs[self._expose_obs_idxs]
return obs
def _get_done(self):
return False
def reset_model(self):
self._step_count = 0
self._done_internally = False
if self.fixed_initial_state:
qpos = self.init_qpos
qvel = self.init_qvel
else:
qpos = self.init_qpos + np.random.uniform(
size=self.sim.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + np.random.randn(self.sim.model.nv) * .1
if not self._original_env:
qpos[15:] = self.init_qpos[15:]
qvel[14:] = 0.
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
# self.viewer.cam.distance = self.model.stat.extent * 2.5
pass
@property
def body_com_indices(self):
return self._body_com_indices
@property
def body_comvel_indices(self):
return self._body_comvel_indices
| 8,108 | 33.952586 | 124 | py |
CSD-locomotion | CSD-locomotion-master/envs/mujoco/half_cheetah_env.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import os
from gym import utils
import numpy as np
from gym.envs.mujoco import mujoco_env
from envs.mujoco.mujoco_utils import MujocoTrait
class HalfCheetahEnv(MujocoTrait, mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
expose_obs_idxs=None,
expose_all_qpos=True,
task='default',
target_velocity=None,
model_path=None,
fixed_initial_state=False,
render_hw=100):
utils.EzPickle.__init__(**locals())
if model_path is None:
model_path = 'half_cheetah.xml'
# Settings from
# https://github.com/openai/gym/blob/master/gym/envs/__init__.py
self._expose_obs_idxs = expose_obs_idxs
self._expose_all_qpos = expose_all_qpos
self._task = task
self._target_velocity = target_velocity
self.fixed_initial_state = fixed_initial_state
self.render_hw = render_hw
xml_path = "envs/mujoco/assets/"
model_path = os.path.abspath(os.path.join(xml_path, model_path))
mujoco_env.MujocoEnv.__init__(
self,
model_path,
5)
def compute_reward(self, **kwargs):
return None
def _get_done(self):
return False
def step(self, action, render=False):
obsbefore = self._get_obs()
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
obsafter = self._get_obs()
xposafter = self.sim.data.qpos[0]
xvelafter = self.sim.data.qvel[0]
reward_ctrl = -0.1 * np.square(action).sum()
reward = self.compute_reward(xposbefore=xposbefore, xposafter=xposafter)
if reward is None:
if self._task == 'default':
reward_vel = 0.
reward_run = (xposafter - xposbefore) / self.dt
reward = reward_ctrl + reward_run
elif self._task == 'target_velocity':
reward_vel = -(self._target_velocity - xvelafter) ** 2
reward = reward_ctrl + reward_vel
elif self._task == 'run_back':
reward_vel = 0.
reward_run = (xposbefore - xposafter) / self.dt
reward = reward_ctrl + reward_run
done = self._get_done()
ob = self._get_obs()
info = dict(
# reward_run=reward_run,
# reward_ctrl=reward_ctrl,
# reward_vel=reward_vel,
coordinates=np.array([xposbefore, 0.]),
next_coordinates=np.array([xposafter, 0.]),
ori_obs=obsbefore,
next_ori_obs=obsafter,
)
if render:
info['render'] = self.render(mode='rgb_array').transpose(2, 0, 1)
return ob, reward, done, info
def _get_obs(self):
if self._expose_all_qpos:
obs = np.concatenate(
[self.sim.data.qpos.flat, self.sim.data.qvel.flat])
else:
obs = np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
])
if self._expose_obs_idxs is not None:
obs = obs[self._expose_obs_idxs]
return obs
def reset_model(self):
if self.fixed_initial_state:
qpos = self.init_qpos
qvel = self.init_qvel
else:
qpos = self.init_qpos + np.random.uniform(
low=-.1, high=.1, size=self.sim.model.nq)
qvel = self.init_qvel + np.random.randn(self.sim.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
def plot_trajectory(self, trajectory, color, ax):
# https://stackoverflow.com/a/20474765/2182622
from matplotlib.collections import LineCollection
#linewidths = np.linspace(0.5, 1.5, len(trajectory))
#linewidths = np.linspace(0.1, 0.8, len(trajectory))
linewidths = np.linspace(0.2, 1.2, len(trajectory))
points = np.reshape(trajectory, (-1, 1, 2))
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, linewidths=linewidths, color=color)
ax.add_collection(lc)
def _get_coordinates_trajectories(self, trajectories):
coordinates_trajectories = super()._get_coordinates_trajectories(
trajectories)
for i, traj in enumerate(coordinates_trajectories):
# Designed to fit in [-5, 5] * [-5, 5] -> roughly --> now multiplied by 20.
traj[:, 1] = (i - len(coordinates_trajectories) / 2) / 1.25
return coordinates_trajectories
| 5,456 | 34.435065 | 87 | py |
CSD-locomotion | CSD-locomotion-master/envs/mujoco/humanoid_env.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from collections import defaultdict
from gym import utils
import numpy as np
from gym.envs.mujoco import mujoco_env
from envs.mujoco.mujoco_utils import MujocoTrait
def mass_center(sim):
mass = np.expand_dims(sim.model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
# pylint: disable=missing-docstring
class HumanoidEnv(MujocoTrait, mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self,
expose_obs_idxs=None,
expose_all_qpos=True,
model_path=None,
task='forward',
goal=None,
fixed_initial_state=False,
num_action_repeats=None,
done_allowing_step_unit=None,
fixed_mpl=None,
original_env=False,
render_hw=100,
):
utils.EzPickle.__init__(**locals())
if model_path is None:
model_path = 'humanoid.xml'
self._task = task
self._goal = goal
if self._task == "follow_goals":
self._goal_list = [
np.array([3.0, -0.5]),
np.array([6.0, 8.0]),
np.array([12.0, 12.0]),
]
self._goal = self._goal_list[0]
print("Following a trajectory of goals:", self._goal_list)
self._expose_obs_idxs = expose_obs_idxs
self._expose_all_qpos = expose_all_qpos
self.fixed_initial_state = fixed_initial_state
self._num_action_repeats = num_action_repeats
self._done_allowing_step_unit = done_allowing_step_unit
self._fixed_mpl = fixed_mpl
self._original_env = original_env
self.render_hw = render_hw
xml_path = "envs/mujoco/assets/"
model_path = os.path.abspath(os.path.join(xml_path, model_path))
mujoco_env.MujocoEnv.__init__(self, model_path, 5)
def _get_obs(self):
data = self.sim.data
if self._original_env:
return np.concatenate([data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat])
data = self.sim.data
if self._expose_all_qpos:
obs = np.concatenate([
data.qpos.flat, data.qvel.flat,
# data.cinert.flat, data.cvel.flat,
# data.qfrc_actuator.flat, data.cfrc_ext.flat
])
else:
obs = np.concatenate([
data.qpos.flat[2:], data.qvel.flat, data.cinert.flat, data.cvel.flat,
data.qfrc_actuator.flat, data.cfrc_ext.flat
])
if self._expose_obs_idxs is not None:
obs = obs[self._expose_obs_idxs]
return obs
# def compute_reward(self, ob, next_ob, action=None):
# xposbefore = ob[:, 0]
# yposbefore = ob[:, 1]
# xposafter = next_ob[:, 0]
# yposafter = next_ob[:, 1]
#
# forward_reward = (xposafter - xposbefore) / self.dt
# sideward_reward = (yposafter - yposbefore) / self.dt
#
# if action is not None:
# ctrl_cost = .5 * np.square(action).sum(axis=1)
# survive_reward = 1.0
# if self._task == "forward":
# reward = forward_reward - ctrl_cost + survive_reward
# elif self._task == "backward":
# reward = -forward_reward - ctrl_cost + survive_reward
# elif self._task == "left":
# reward = sideward_reward - ctrl_cost + survive_reward
# elif self._task == "right":
# reward = -sideward_reward - ctrl_cost + survive_reward
# elif self._task in ["goal", "follow_goals"]:
# reward = -np.linalg.norm(
# np.array([xposafter, yposafter]).T - self._goal, axis=1)
# elif self._task in ["sparse_goal"]:
# reward = (-np.linalg.norm(
# np.array([xposafter, yposafter]).T - self._goal, axis=1) >
# -0.3).astype(np.float32)
# return reward
def compute_reward(self, **kwargs):
return None
def step(self, a, render=False):
if hasattr(self, '_step_count'):
self._step_count += 1
obsbefore = self._get_obs()
pos_before = mass_center(self.sim)
xposbefore = self.sim.data.qpos.flat[0]
yposbefore = self.sim.data.qpos.flat[1]
if self._num_action_repeats is None:
self.do_simulation(a, self.frame_skip)
else:
for i in range(self._num_action_repeats):
self.do_simulation(a, self.frame_skip)
obsafter = self._get_obs()
pos_after = mass_center(self.sim)
xposafter = self.sim.data.qpos.flat[0]
yposafter = self.sim.data.qpos.flat[1]
def _get_dads_humanoid_reward():
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 0.25 * (
pos_after - pos_before) / self.sim.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
return reward
def _get_gym_humanoid_reward():
# gym/envs/mujoco/humanoid.py
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
return reward
qpos = self.sim.data.qpos
if hasattr(self, '_done_internally') and self._done_allowing_step_unit is not None:
self._done_internally = (self._done_internally or bool((qpos[2] < 1.0) or (qpos[2] > 2.0)))
done = (self._done_internally and self._step_count % self._done_allowing_step_unit == 0)
else:
done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0))
reward = self.compute_reward(xposbefore=xposbefore, yposbefore=yposbefore, xposafter=xposafter, yposafter=yposafter, cur_done=done)
if reward is None:
reward = _get_gym_humanoid_reward()
if self._task == "follow_goals":
xposafter = self.sim.data.qpos.flat[0]
yposafter = self.sim.data.qpos.flat[1]
reward = -np.linalg.norm(np.array([xposafter, yposafter]).T - self._goal)
# update goal
if np.abs(reward) < 0.5:
self._goal = self._goal_list[0]
self._goal_list = self._goal_list[1:]
print("Goal Updated:", self._goal)
elif self._task == "goal":
xposafter = self.sim.data.qpos.flat[0]
yposafter = self.sim.data.qpos.flat[1]
reward = -np.linalg.norm(np.array([xposafter, yposafter]).T - self._goal)
ob = self._get_obs()
info = dict(
#reward_linvel=lin_vel_cost,
#reward_quadctrl=-quad_ctrl_cost,
#reward_alive=alive_bonus,
#reward_impact=-quad_impact_cost,
coordinates=np.array([xposbefore, yposbefore]),
next_coordinates=np.array([xposafter, yposafter]),
ori_obs=obsbefore,
next_ori_obs=obsafter,
)
if render:
info['render'] = self.render(mode='rgb_array').transpose(2, 0, 1)
return ob, reward, done, info
def reset_model(self):
self._step_count = 0
self._done_internally = False
c = 0.01
if self.fixed_initial_state:
self.set_state(
self.init_qpos,
self.init_qvel)
else:
self.set_state(
self.init_qpos + np.random.uniform(
low=-c, high=c, size=self.sim.model.nq),
self.init_qvel + np.random.uniform(
low=-c,
high=c,
size=self.sim.model.nv,
))
if self._task == "follow_goals":
self._goal = self._goal_list[0]
self._goal_list = self._goal_list[1:]
print("Current goal:", self._goal)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 2.0
| 9,473 | 36.595238 | 139 | py |
CSD-locomotion | CSD-locomotion-master/envs/mujoco/mujoco_utils.py | from collections import OrderedDict
import akro
import numpy as np
from gym import spaces
def convert_observation_to_space(observation):
if isinstance(observation, dict):
space = spaces.Dict(OrderedDict([
(key, convert_observation_to_space(value))
for key, value in observation.items()
]))
elif isinstance(observation, np.ndarray):
low = np.full(observation.shape, -float('inf'), dtype=np.float32)
high = np.full(observation.shape, float('inf'), dtype=np.float32)
space = akro.Box(low=low, high=high, dtype=observation.dtype)
else:
raise NotImplementedError(type(observation), observation)
return space
class MujocoTrait:
def _set_action_space(self):
bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)
low, high = bounds.T
self.action_space = akro.Box(low=low, high=high, dtype=np.float32)
return self.action_space
def _set_observation_space(self, observation):
self.observation_space = convert_observation_to_space(observation)
return self.observation_space
def render(self,
mode='human',
width=100,
height=100,
camera_id=None,
camera_name=None):
if hasattr(self, 'render_hw') and self.render_hw is not None:
width = self.render_hw
height = self.render_hw
return super().render(mode, width, height, camera_id, camera_name)
def plot_trajectory(self, trajectory, color, ax):
ax.plot(trajectory[:, 0], trajectory[:, 1], color=color, linewidth=0.7)
def plot_trajectories(self, trajectories, colors, plot_axis, ax):
"""Plot trajectories onto given ax."""
square_axis_limit = 0.0
for trajectory, color in zip(trajectories, colors):
trajectory = np.array(trajectory)
self.plot_trajectory(trajectory, color, ax)
square_axis_limit = max(square_axis_limit, np.max(np.abs(trajectory[:, :2])))
square_axis_limit = square_axis_limit * 1.2
if plot_axis == 'free':
return
if plot_axis is None:
plot_axis = [-square_axis_limit, square_axis_limit, -square_axis_limit, square_axis_limit]
if plot_axis is not None:
ax.axis(plot_axis)
ax.set_aspect('equal')
else:
ax.axis('scaled')
def render_trajectories(self, trajectories, colors, plot_axis, ax):
coordinates_trajectories = self._get_coordinates_trajectories(trajectories)
self.plot_trajectories(coordinates_trajectories, colors, plot_axis, ax)
def _get_coordinates_trajectories(self, trajectories):
coordinates_trajectories = []
for trajectory in trajectories:
if trajectory['env_infos']['coordinates'].dtype == np.object:
coordinates_trajectories.append(np.concatenate([
np.concatenate(trajectory['env_infos']['coordinates'], axis=0),
[trajectory['env_infos']['next_coordinates'][-1][-1]],
]))
elif trajectory['env_infos']['coordinates'].ndim == 2:
coordinates_trajectories.append(np.concatenate([
trajectory['env_infos']['coordinates'],
[trajectory['env_infos']['next_coordinates'][-1]]
]))
elif trajectory['env_infos']['coordinates'].ndim > 2:
coordinates_trajectories.append(np.concatenate([
trajectory['env_infos']['coordinates'].reshape(-1, 2),
trajectory['env_infos']['next_coordinates'].reshape(-1, 2)[-1:]
]))
else:
assert False
return coordinates_trajectories
| 3,802 | 38.614583 | 102 | py |
CSD-locomotion | CSD-locomotion-master/garaged/.codecov.yml | coverage:
range: 60..100
status:
patch:
default:
target: 90%
codecov:
ci:
- "travis-ci.com"
notify:
wait_for_ci: yes
after_n_builds: 4
| 174 | 11.5 | 21 | yml |
CSD-locomotion | CSD-locomotion-master/garaged/.mergify.yml | pull_request_rules:
- name: Automatic merge queue for master
conditions:
- base=master
- -conflict
- "#approved-reviews-by>=2"
- "approved-reviews-by=@maintainers"
- "#changes-requested-reviews-by=0"
- status-success=Travis CI - Pull Request
- status-success=codecov/patch
- label=ready-to-merge
actions:
merge:
strict: smart
strict_method: rebase
method: squash
delete_head_branch: {}
- name: Automatic merge queue for master (maintainers)
conditions:
- base=master
- -conflict
- author=@maintainers
- "#approved-reviews-by>=1"
- "#changes-requested-reviews-by=0"
- status-success=Travis CI - Pull Request
- status-success=codecov/patch
- label=ready-to-merge
actions:
merge:
strict: smart
strict_method: rebase
method: squash
delete_head_branch: {}
| 939 | 26.647059 | 56 | yml |
CSD-locomotion | CSD-locomotion-master/garaged/.pre-commit-config.yaml | fail_fast: false # set to true to have pre-commit stop running hooks after the first failure.
default_stages: [commit, push]
repos:
- repo: local
hooks:
- id: check-commit-message
stages: [commit-msg]
name: Check commit message
language: script
entry: scripts/check_commit_message
- id: check-pylint-normal
name: Check pylint (except unit tests)
types: [file, python]
exclude: (^tests/garage/.*$|^benchmarks/) # exclude unit tests and benchmarks
require_serial: true # pylint does its own parallelism
language: system
entry: pylint
- id: check-pylint-unit-tests
name: Check pylint for unit tests
types: [file, python]
files: ^tests/garage/.*$ # check only unit tests
require_serial: true # pylint does its own parallelism
language: system
entry: pylint
args: [--rcfile=tests/garage/.pylintrc]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.2.3
hooks:
- id: check-added-large-files # Prevent giant files from being committed.
- id: check-ast # Simply check whether files parse as valid python.
- id: check-byte-order-marker # Forbid files which have a UTF-8 byte-order marker
- id: check-docstring-first # Checks for a common error of placing code before the docstring.
- id: check-executables-have-shebangs # Checks that non-binary executables have a proper shebang.
- id: check-json # Attempts to load all json files to verify syntax.
- id: check-merge-conflict # Check for files that contain merge conflict strings.
- id: check-symlinks # Checks for symlinks which do not point to anything.
- id: check-vcs-permalinks # Ensures that links to vcs websites are permalinks.
- id: check-xml # Attempts to load all xml files to verify syntax.
- id: check-yaml # Attempts to load all yaml files to verify syntax.
- id: debug-statements # Check for debugger imports and py37+ breakpoint() calls in python source.
- id: detect-private-key # Checks for the existence of private keys.
- id: double-quote-string-fixer # This hook replaces double quoted strings with single quoted strings.
- id: end-of-file-fixer # Makes sure files end in a newline and only a newline.
- id: flake8 # Run flake8 on your Python files
additional_dependencies:
- 'flake8-docstrings>=1.5.0'
- 'flake8-import-order'
- 'pycodestyle>=2.5.0'
- 'pydocstyle>=4.0.0'
- id: forbid-new-submodules # Prevent addition of new git submodules.
- id: mixed-line-ending # Replaces or checks mixed line ending.
- id: pretty-format-json # Checks that all your JSON files have keys that are sorted and indented.
- id: trailing-whitespace # Trims trailing whitespace.
# First pass: check format
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.28.0
hooks:
- id: yapf
name: Check format with yapf
args: ['-vv', '-dpr']
# Second pass: format in-place
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.28.0
hooks:
- id: yapf
name: Format in-place with yapf
args: ['-vv', '-ipr']
| 3,572 | 48.625 | 120 | yaml |
CSD-locomotion | CSD-locomotion-master/garaged/.travis.yml | language: python
python: "3.5"
services:
- docker
addons:
apt:
packages:
- docker-ce
jobs:
include:
- stage: "Build"
if: NOT fork
name: "Build and cache docker container"
before_script: skip
install: skip
script:
- tag="rlworkgroup/garage-ci:${TRAVIS_BUILD_NUMBER}"
- make build-ci TAG="${tag}" BUILD_ARGS="--no-cache"
- make ci-deploy-docker TAG="${tag}"
- stage: test
# pre-commit checks only run for pull requests
if: type = pull_request
name: "Pre-commit checks"
env:
- JOB_RUN_CMD="make ci-job-precommit"
- name: "Normal tests"
env:
- JOB_RUN_CMD="make ci-job-normal"
- DEPLOY_FROM_THIS_JOB="true"
- name: "Large tests"
env: JOB_RUN_CMD="make ci-job-large"
- if: type != pull_request OR head_repo = "rlworkgroup/garage"
name: "MuJoCo-based tests"
env:
- JOB_RUN_CMD="make ci-job-mujoco"
- if: type != pull_request OR head_repo = "rlworkgroup/garage"
name: "MuJoCo-based long running tests"
env:
- JOB_RUN_CMD="make ci-job-mujoco-long"
- name: "Verify conda and pipenv installations"
env: JOB_RUN_CMD="make ci-job-verify-envs"
- if: type = cron
name: "Nightly tests"
env: JOB_RUN_CMD="make ci-job-nightly"
# special deploy stage for tag builds ONLY
- stage: deploy
if: tag IS present
name: "Deploy to PyPI"
before_install: skip
install: skip
script:
- echo "${TRAVIS_TAG}" > VERSION
after_script: skip
deploy:
provider: pypi
user: "__token__"
password: "${PYPI_TOKEN}"
skip_cleanup: true
on:
tags: true
before_install:
# Reconfigure docker to be more efficient
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- sudo apt-get update
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
- |
echo '{
"experimental": true,
"storage-driver": "overlay2",
"max-concurrent-downloads": 50,
"max-concurrent-uploads": 50
}' | sudo tee /etc/docker/daemon.json
- sudo service docker restart
- docker --version
install:
# Pull cached docker image
- tag="rlworkgroup/garage-ci:${TRAVIS_BUILD_NUMBER}"
- docker pull ${tag}
- make build-ci TAG="${tag}"
before_script:
- ci_env="$(bash <(curl -s https://codecov.io/env))"
script:
- make run-ci RUN_CMD="${JOB_RUN_CMD}" RUN_ARGS="${ci_env}" TAG="${tag}"
deploy:
provider: script
script: make ci-deploy-docker TAG="${tag}"
on:
branch: master
condition: $DEPLOY_FROM_THIS_JOB = true
git:
depth: false
branches:
only:
- master
- /^release-.*/
- /^v([1-9][0-9]*!)?(0|[1-9][0-9]*)(\.([0-9]*))*((a|b|rc)(0|[1-9][0-9]*))?(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?$/ # regex for release tags
notifications:
email: false
| 3,039 | 26.142857 | 151 | yml |
CSD-locomotion | CSD-locomotion-master/garaged/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 2020.06.3
- Fixed
* PyTorch 1.7 support ([#1934](https://github.com/rlworkgroup/garage/pull/1934))
* `LocalRunner` ignores `worker_cls` attribute of algorithms ([#1984](https://github.com/rlworkgroup/garage/pull/1984))
* `mujoco_py` versions greater than v2.0.2.8 are incompatible with some GCC versions in conda ([#2000](https://github.com/rlworkgroup/garage/pull/2000))
* MTSAC not learning because it corrupts the termination signal by wrapping with `GarageEnv` twice ([#2029](https://github.com/rlworkgroup/garage/pull/2029))
* MTSAC does not respect `max_episode_length_eval` hyperparameter ([#2029](https://github.com/rlworkgroup/garage/pull/2029))
* MTSAC MetaWorld examples do not use the correct number of tasks ([#2029](https://github.com/rlworkgroup/garage/pull/2029))
* MTSAC now supports a separate `max_episode_length` for evalaution via the `max_episode_length_eval` hyperparameter ([#2029](https://github.com/rlworkgroup/garage/pull/2029))
* MTSAC MetaWorld MT50 example used an incorrect `max_episode_length` ([#2029](https://github.com/rlworkgroup/garage/pull/2029))
## 2020.06.2
- Fixed
* Better parameters for example `her_ddpg_fetchreach` ([#1763](https://github.com/rlworkgroup/garage/pull/1763))
* Ensure determinism in TensorFlow by using `tfp.SeedStream` ([#1821](https://github.com/rlworkgroup/garage/pull/1821))
* Broken rendering of MuJoCo environments to pixels in the NVIDIA Docker container ([#1838](https://github.com/rlworkgroup/garage/pull/1838))
* Enable cudnn in the NVIDIA Docker container ([#1840](https://github.com/rlworkgroup/garage/pull/1840))
* Bug in `DiscreteQfDerivedPolicy` in which parameters were not returned ([#1847](https://github.com/rlworkgroup/garage/pull/1847))
* Populate `TimeLimit.truncated` at every step when using `gym.Env` ([#1852](https://github.com/rlworkgroup/garage/pull/1852))
* Bug in which parameters where not copied when TensorFlow primitives are `clone()`ed ([#1855](https://github.com/rlworkgroup/garage/pull/1855))
* Typo in the `Makefile` target `run-nvidia` ([#1914](https://github.com/rlworkgroup/garage/pull/1914))
## 2020.06.1
- Fixed
* Pipenv fails to resolve a stable dependency set because of excessively-narrow dependencies in tensorflow-probability ([#1721](https://github.com/rlworkgroup/garage/pull/1721))
* Bug which prevented `rollout` from running policies deterministically ([#1714](https://github.com/rlworkgroup/garage/pull/1714))
## 2020.06.0
### Added
- Algorithms
* PPO in PyTorch (
[#905](https://github.com/rlworkgroup/garage/pull/905),
[#1188](https://github.com/rlworkgroup/garage/pull/1188))
* TRPO in PyTorch (
[#1018](https://github.com/rlworkgroup/garage/pull/1018),
[#1053](https://github.com/rlworkgroup/garage/pull/1053),
[#1186](https://github.com/rlworkgroup/garage/pull/1186))
* MAML in PyTorch (
[#1128](https://github.com/rlworkgroup/garage/pull/1128),
[#1136](https://github.com/rlworkgroup/garage/pull/1136),
[#1214](https://github.com/rlworkgroup/garage/pull/1214),
[#1234](https://github.com/rlworkgroup/garage/pull/1234),
[#1283](https://github.com/rlworkgroup/garage/pull/1283))
* RL2 in TensorFlow (
[#1127](https://github.com/rlworkgroup/garage/pull/1127),
[#1175](https://github.com/rlworkgroup/garage/pull/1175),
[#1189](https://github.com/rlworkgroup/garage/pull/1189),
[#1190](https://github.com/rlworkgroup/garage/pull/1190),
[#1195](https://github.com/rlworkgroup/garage/pull/1195),
[#1231](https://github.com/rlworkgroup/garage/pull/1231))
* PEARL in PyTorch (
[#1059](https://github.com/rlworkgroup/garage/pull/1059),
[#1124](https://github.com/rlworkgroup/garage/pull/1124),
[#1218](https://github.com/rlworkgroup/garage/pull/1218),
[#1316](https://github.com/rlworkgroup/garage/pull/1316),
[#1374](https://github.com/rlworkgroup/garage/pull/1374))
* SAC in PyTorch ([#1235](https://github.com/rlworkgroup/garage/pull/1235))
* MTSAC in PyTorch ([#1332](https://github.com/rlworkgroup/garage/pull/1332))
* Task Embeddings in TensorFlow (
[#1168](https://github.com/rlworkgroup/garage/pull/1168),
[#1169](https://github.com/rlworkgroup/garage/pull/1169),
[#1167](https://github.com/rlworkgroup/garage/pull/1167))
- Samplers
* New Sampler API, with efficient multi-env and multi-policy support (
[#881](https://github.com/rlworkgroup/garage/pull/881),
[#1153](https://github.com/rlworkgroup/garage/pull/1153),
[#1319](https://github.com/rlworkgroup/garage/pull/1319))
* `garage.sampler.LocalSampler`, which uses the main process to sample (
[#1133](https://github.com/rlworkgroup/garage/pull/1133),
[#1156](https://github.com/rlworkgroup/garage/pull/1156))
* Reworked `garage.sampler.RaySampler` to use new API (
[#1133](https://github.com/rlworkgroup/garage/pull/1133),
[#1134](https://github.com/rlworkgroup/garage/pull/1134))
* `garage.sampler.MultiprocessingSampler` ([#1298](https://github.com/rlworkgroup/garage/pull/1298))
* `garage.sampler.VecWorker`, a replacement for VecEnvExecutor ([#1311](https://github.com/rlworkgroup/garage/pull/1311))
- APIs
* `garage.TrajectoryBatch` data type (
[#1058](https://github.com/rlworkgroup/garage/pull/1058),
[#1065](https://github.com/rlworkgroup/garage/pull/1065),
[#1132](https://github.com/rlworkgroup/garage/pull/1132),
[#1154](https://github.com/rlworkgroup/garage/pull/1154))
* `garage.TimeStep` data type (
[#1114](https://github.com/rlworkgroup/garage/pull/1114),
[#1221](https://github.com/rlworkgroup/garage/pull/1221))
* `garage.TimeStepBatch` data type ([#1529](https://github.com/rlworkgroup/garage/pull/1529))
* `garage.log_performance` (
[#1116](https://github.com/rlworkgroup/garage/pull/1116),
[#1142](https://github.com/rlworkgroup/garage/pull/1142),
[#1159](https://github.com/rlworkgroup/garage/pull/1159))
* `garage.np.algos.MetaRLAlgorithm` ([#1142](https://github.com/rlworkgroup/garage/pull/1142))
* `garage.experiment.MetaEvaluator` (
[#1142](https://github.com/rlworkgroup/garage/pull/1142),
[#1152](https://github.com/rlworkgroup/garage/pull/1152),
[#1227](https://github.com/rlworkgroup/garage/pull/1227))
* `garage.log_multitask_performance` ([#1192](https://github.com/rlworkgroup/garage/pull/1192))
* `garage.torch.distributions.TanhNormal` ([#1140](https://github.com/rlworkgroup/garage/pull/1140))
* `garage.torch.policies.TanhGaussianMLPPolicy` ([#1176](https://github.com/rlworkgroup/garage/pull/1176))
* `garage.experiment.wrap_experiment` to replace `run_experiment` with several new features (
[#1100](https://github.com/rlworkgroup/garage/pull/1100),
[#1155](https://github.com/rlworkgroup/garage/pull/1155),
[#1160](https://github.com/rlworkgroup/garage/pull/1160),
[#1164](https://github.com/rlworkgroup/garage/pull/1164),
[#1249](https://github.com/rlworkgroup/garage/pull/1249),
[#1258](https://github.com/rlworkgroup/garage/pull/1258),
[#1281](https://github.com/rlworkgroup/garage/pull/1281),
[#1396](https://github.com/rlworkgroup/garage/pull/1396),
[#1482](https://github.com/rlworkgroup/garage/pull/1482))
* `garage.torch.q_functions.ContinuousCNNQFunction` ([#1326](https://github.com/rlworkgroup/garage/pull/1326))
* PyTorch support for non-linearities with parameters ([#928](https://github.com/rlworkgroup/garage/pull/928),
* `garage.torch.value_function.GaussianMLPValueFunction` (
[#1317](https://github.com/rlworkgroup/garage/pull/1317))
* Simpler PyTorch policy API ([#1528](https://github.com/rlworkgroup/garage/pull/1528))
* `garage.envs.TaskOnehotWrapper` ([#1157](https://github.com/rlworkgroup/garage/pull/1157))
- HalfCheetah meta environments (
[#1108](https://github.com/rlworkgroup/garage/pull/1108),
[#1131](https://github.com/rlworkgroup/garage/pull/1131),
[#1216](https://github.com/rlworkgroup/garage/pull/1216),
[#1385](https://github.com/rlworkgroup/garage/pull/1385))
- PyTorch GPU support ([#1182](https://github.com/rlworkgroup/garage/pull/1182))
- PyTorch deterministic support ([#1063](https://github.com/rlworkgroup/garage/pull/1063))
- Support for running Meta-RL algorithms on MetaWorld benchmarks (
[#1306](https://github.com/rlworkgroup/garage/pull/1306))
- Examples for running MetaWorld benchmarks (
[#1010](https://github.com/rlworkgroup/garage/pull/1010),
[#1263](https://github.com/rlworkgroup/garage/pull/1263),
[#1265](https://github.com/rlworkgroup/garage/pull/1265),
[#1265](https://github.com/rlworkgroup/garage/pull/1265),
[#1241](https://github.com/rlworkgroup/garage/pull/1241),
[#1232](https://github.com/rlworkgroup/garage/pull/1232),
[#1327](https://github.com/rlworkgroup/garage/pull/1327),
[#1351](https://github.com/rlworkgroup/garage/pull/1351),
[#1393](https://github.com/rlworkgroup/garage/pull/1393))
- Improved off-policy evaluation (
[#1139](https://github.com/rlworkgroup/garage/pull/1139),
[#1279](https://github.com/rlworkgroup/garage/pull/1279),
[#1331](https://github.com/rlworkgroup/garage/pull/1331),
[#1384](https://github.com/rlworkgroup/garage/pull/1384))
### Changed
- Allow TensorFlow 2 (or TF >=1.14) (
[#1309](https://github.com/rlworkgroup/garage/pull/1309),
[#1563](https://github.com/rlworkgroup/garage/pull/1563))
- Require Torch 1.4.0 (
[#1335](https://github.com/rlworkgroup/garage/pull/1335),
[#1361](https://github.com/rlworkgroup/garage/pull/1361))
- Ensure TF and torch are optional ([#1510](https://github.com/rlworkgroup/garage/pull/1510))
- Update gym to 0.15.4 (
[#1098](https://github.com/rlworkgroup/garage/pull/1098),
[#1158](https://github.com/rlworkgroup/garage/pull/1158))
- Rename `baseline` to `value_function` ([#1275](https://github.com/rlworkgroup/garage/pull/1275))
- Make `runner._sampler` optional ([#1394](https://github.com/rlworkgroup/garage/pull/1394))
- Make ExplorationStrategies a type of Policy ([#1397](https://github.com/rlworkgroup/garage/pull/1397))
- Use `garage.replay_buffer.PathBuffer` in off-policy algos (
[#1173](https://github.com/rlworkgroup/garage/pull/1173),
[#1433](https://github.com/rlworkgroup/garage/pull/1433))
- Deprecated `run_experiment` (
[#1370](https://github.com/rlworkgroup/garage/pull/1370),
[#1412](https://github.com/rlworkgroup/garage/pull/1412))
- Deprecated old-style samplers ([#1369](https://github.com/rlworkgroup/garage/pull/1369))
- Refactor TensorFlow to use tfp.distribution (
[#1073](https://github.com/rlworkgroup/garage/pull/1073),
[#1356](https://github.com/rlworkgroup/garage/pull/1356),
[#1357](https://github.com/rlworkgroup/garage/pull/1357),
[#1410](https://github.com/rlworkgroup/garage/pull/1410),
[#1456](https://github.com/rlworkgroup/garage/pull/1456),
[#1444](https://github.com/rlworkgroup/garage/pull/1444),
[#1554](https://github.com/rlworkgroup/garage/pull/1554),
[#1569](https://github.com/rlworkgroup/garage/pull/1569))
- Set TotalEnvSteps as the default Tensorboard x-axis (
[#1017](https://github.com/rlworkgroup/garage/pull/1017),
[#1069](https://github.com/rlworkgroup/garage/pull/1069))
- Update dependencies for docs ([#1383](https://github.com/rlworkgroup/garage/pull/1383))
- New optimizer_args TensorFlow interface ([#1496](https://github.com/rlworkgroup/garage/pull/1496))
- Move LocalTFRunner to garage.experiment ([#1513](https://github.com/rlworkgroup/garage/pull/1513))
- Implement HER using PathBuffer (
[#1282](https://github.com/rlworkgroup/garage/pull/1282)
[#1505](https://github.com/rlworkgroup/garage/pull/1505))
- Change CNN API to use tuples for defining kernels ([#1515](https://github.com/rlworkgroup/garage/pull/1515))
- Many documentation improvements (
[#1056](https://github.com/rlworkgroup/garage/pull/1056),
[#1065](https://github.com/rlworkgroup/garage/pull/1065),
[#1120](https://github.com/rlworkgroup/garage/pull/1120),
[#1266](https://github.com/rlworkgroup/garage/pull/1266),
[#1327](https://github.com/rlworkgroup/garage/pull/1327),
[#1413](https://github.com/rlworkgroup/garage/pull/1413),
[#1429](https://github.com/rlworkgroup/garage/pull/1429),
[#1451](https://github.com/rlworkgroup/garage/pull/1451),
[#1481](https://github.com/rlworkgroup/garage/pull/1481),
[#1484](https://github.com/rlworkgroup/garage/pull/1484))
- Eliminate use of "base" module name ([#1403](https://github.com/rlworkgroup/garage/pull/1403))
- Significant improvements to benchmarking (
[#1271](https://github.com/rlworkgroup/garage/pull/1271)
[#1291](https://github.com/rlworkgroup/garage/pull/1291),
[#1306](https://github.com/rlworkgroup/garage/pull/1306),
[#1307](https://github.com/rlworkgroup/garage/pull/1307),
[#1310](https://github.com/rlworkgroup/garage/pull/1310),
[#1320](https://github.com/rlworkgroup/garage/pull/1320),
[#1368](https://github.com/rlworkgroup/garage/pull/1368),
[#1380](https://github.com/rlworkgroup/garage/pull/1380),
[#1409](https://github.com/rlworkgroup/garage/pull/1409))
- Refactor benchmarks into a separate module (
[#1395](https://github.com/rlworkgroup/garage/pull/1395),
[#1402](https://github.com/rlworkgroup/garage/pull/1402),
[#1400](https://github.com/rlworkgroup/garage/pull/1400),
[#1411](https://github.com/rlworkgroup/garage/pull/1411),
[#1408](https://github.com/rlworkgroup/garage/pull/1408),
[#1416](https://github.com/rlworkgroup/garage/pull/1416),
[#1414](https://github.com/rlworkgroup/garage/pull/1414),
[#1432](https://github.com/rlworkgroup/garage/pull/1432))
### Removed
- Dependencies:
* matplotlib (moved to dev) ([#1083](https://github.com/rlworkgroup/garage/pull/1083))
* atari-py ([#1194](https://github.com/rlworkgroup/garage/pull/1194))
* gtimer, pandas, rlkit, seaborn (moved to benchmarks) ([#1325](https://github.com/rlworkgroup/garage/pull/1325))
* pyprind ([#1495](https://github.com/rlworkgroup/garage/pull/1495))
- `RLAlgorithm.get_itr_snapshot` ([#1054](https://github.com/rlworkgroup/garage/pull/1054))
- `garage.misc.nb_utils` ([#1288](https://github.com/rlworkgroup/garage/pull/1288))
- `garage.np.regressors` ([#1493](https://github.com/rlworkgroup/garage/pull/1493))
- `garage.np.BatchPolopt` (
[#1486](https://github.com/rlworkgroup/garage/pull/1486),
[#1492](https://github.com/rlworkgroup/garage/pull/1492))
- `garage.misc.prog_bar_counter` ([#1495](https://github.com/rlworkgroup/garage/pull/1495))
- `garage.tf.envs.TfEnv` ([#1443](https://github.com/rlworkgroup/garage/pull/1443))
- `garage.tf.BatchPolopt` ([#1504](https://github.com/rlworkgroup/garage/pull/1504))
- `garage.np.OffPolicyRLAlgorithm` ([#1552](https://github.com/rlworkgroup/garage/pull/1552))
### Fixed
- Bug where `GarageEnv` did not pickle ([#1029](https://github.com/rlworkgroup/garage/pull/1029))
- Bug where `VecEnvExecutor` conflated terminal state and time limit signal (
[#1178](https://github.com/rlworkgroup/garage/pull/1178),
[#1570](https://github.com/rlworkgroup/garage/pull/1570))
- Bug where plotter window was opened multiple times ([#1253](https://github.com/rlworkgroup/garage/pull/1253))
- Bug where TF plotter used main policy on separate thread ([#1270](https://github.com/rlworkgroup/garage/pull/1270))
- Workaround gym timelimit and terminal state conflation ([#1118](https://github.com/rlworkgroup/garage/pull/1118))
- Bug where pixels weren't normalized correctly when using CNNs (
[#1236](https://github.com/rlworkgroup/garage/pull/1236),
[#1419](https://github.com/rlworkgroup/garage/pull/1419))
- Bug where `garage.envs.PointEnv` did not step correctly ([#1165](https://github.com/rlworkgroup/garage/pull/1165))
- Bug where sampler workers crashed in non-Deterministic mode ([#1567](https://github.com/rlworkgroup/garage/pull/1567))
- Use cloudpickle in old-style samplers to handle lambdas ([#1371](https://github.com/rlworkgroup/garage/pull/1371))
- Bug where workers where not shut down after running a resumed algorithm ([#1293](https://github.com/rlworkgroup/garage/pull/1293))
- Non-PyPI dependencies, which blocked using pipenv and poetry ([#1247](https://github.com/rlworkgroup/garage/pull/1247))
- Bug where TensorFlow paramter setting didn't work across differently named policies ([#1355](https://github.com/rlworkgroup/garage/pull/1355))
- Bug where advantages where computed incorrectly in PyTorch ([#1197](https://github.com/rlworkgroup/garage/pull/1197))
- Bug where TF plotter was used in LocalRunner ([#1267](https://github.com/rlworkgroup/garage/pull/1267))
- Worker processes are no longer started unnecessarily ([#1006](https://github.com/rlworkgroup/garage/pull/1006))
- All examples where fixed and are now tested ([#1009](https://github.com/rlworkgroup/garage/pull/1009))
## 2019.10.2
### Fixed
- Use a GitHub Token in the CI to retrieve packages to avoid hitting GitHub API rate limit ([#1250](https://github.com/rlworkgroup/garage/pull/1250))
- Avoid installing dev extra dependencies during the conda check ([#1296](https://github.com/rlworkgroup/garage/pull/1296))
- Install `dm_control` from PyPI ([#1406](https://github.com/rlworkgroup/garage/pull/1406))
- Pin tfp to 0.8.x to avoid breaking pipenv ([#1480](https://github.com/rlworkgroup/garage/pull/1480))
- Force python 3.5 in CI ([#1522](https://github.com/rlworkgroup/garage/pull/1522))
- Separate terminal and completion signal in vectorized sampler ([#1581](https://github.com/rlworkgroup/garage/pull/1581))
- Disable certicate check for roboti.us ([#1595](https://github.com/rlworkgroup/garage/pull/1595))
- Fix `advantages` shape in `compute_advantage()` in torch tree ([#1209](https://github.com/rlworkgroup/garage/pull/1209))
- Fix plotting using tf.plotter ([#1292](https://github.com/rlworkgroup/garage/pull/1292))
- Fix duplicate window rendering when using garage.Plotter ([#1299](https://github.com/rlworkgroup/garage/pull/1299))
- Fix setting garage.model parameters ([#1363](https://github.com/rlworkgroup/garage/pull/1363))
- Fix two example jupyter notebook ([#1584](https://github.com/rlworkgroup/garage/pull/1584))
- Fix collecting samples in `RaySampler` ([#1583](https://github.com/rlworkgroup/garage/pull/1583))
## 2019.10.1
### Added
- Integration tests which cover all example scripts (
[#1078](https://github.com/rlworkgroup/garage/pull/1078),
[#1090](https://github.com/rlworkgroup/garage/pull/1090))
- Deterministic mode support for PyTorch ([#1068](https://github.com/rlworkgroup/garage/pull/1068))
- Install script support for macOS 10.15.1 ([#1051](https://github.com/rlworkgroup/garage/pull/1051))
- PyTorch modules now support either functions or modules for specifying their non-linearities ([#1038](https://github.com/rlworkgroup/garage/pull/1038))
### Fixed
- Errors in the documentation on implementing new algorithms ([#1074](https://github.com/rlworkgroup/garage/pull/1074))
- Broken example for DDPG+HER in TensorFlow ([#1070](https://github.com/rlworkgroup/garage/pull/1070))
- Error in the documentation for using garage with conda ([#1066](https://github.com/rlworkgroup/garage/pull/1066))
- Broken pickling of environment wrappers ([#1061](https://github.com/rlworkgroup/garage/pull/1061))
- `garage.torch` was not included in the PyPI distribution ([#1037](https://github.com/rlworkgroup/garage/pull/1037))
- A few broken examples for `garage.tf` ([#1032](https://github.com/rlworkgroup/garage/pull/1032))
## 2019.10.0
### Added
- Algorithms
* (D)DQN in TensorFlow ([#582](https://github.com/rlworkgroup/garage/pull/582))
* Maximum-entropy and entropy regularization for policy gradient algorithms in
TensorFlow ([#632](https://github.com/rlworkgroup/garage/pull/632))
* DDPG in PyTorch ([#815](https://github.com/rlworkgroup/garage/pull/815))
* VPG (i.e. policy gradients) in PyTorch ([#883](https://github.com/rlworkgroup/garage/pull/883))
* TD3 in TensorFlow ([#458](https://github.com/rlworkgroup/garage/pull/458))
- APIs
* Runner API for executing experiments and `LocalRunner` implementation for
executing them on the local machine (
[#541](https://github.com/rlworkgroup/garage/pull/541),
[#593](https://github.com/rlworkgroup/garage/pull/593),
[#602](https://github.com/rlworkgroup/garage/pull/602),
[#816](https://github.com/rlworkgroup/garage/pull/816),
)
* New Logger API, provided by a sister project [dowel](https://github.com/rlworkgroup/dowel) ([#464](https://github.com/rlworkgroup/garage/pull/464), [#660](https://github.com/rlworkgroup/garage/pull/660))
- Environment wrappers for pixel-based algorithms, especially DQN ([#556](https://github.com/rlworkgroup/garage/pull/556))
- Example for how to use garage with Google Colab ([#476](https://github.com/rlworkgroup/garage/pull/476))
- Advantage normalization for recurrent policies in TF ([#626](https://github.com/rlworkgroup/garage/pull/626))
- PyTorch support ([#725](https://github.com/rlworkgroup/garage/pull/725), [#764](https://github.com/rlworkgroup/garage/pull/764))
- Autogenerated API docs on [garage.readthedocs.io](https://garage.readthedocs.io/en/latest/py-modindex.html) ([#802](https://github.com/rlworkgroup/garage/pull/802))
- GPU version of the pip package ([#834](https://github.com/rlworkgroup/garage/pull/834))
- PathBuffer, a trajectory-oriented replay buffer ([#838](https://github.com/rlworkgroup/garage/pull/838))
- RaySampler, a remote and/or multiprocess sampler based on ray ([#793](https://github.com/rlworkgroup/garage/pull/793))
- Garage is now distributed on PyPI ([#870](https://github.com/rlworkgroup/garage/pull/870))
- `rollout` option to only sample policies deterministically ([#896](https://github.com/rlworkgroup/garage/pull/896))
- MultiEnvWrapper, which wraps multiple `gym.Env` environments into a discrete
multi-task environment ([#946](https://github.com/rlworkgroup/garage/pull/946))
### Changed
- Optimized Dockerfiles for fast rebuilds ([#557](https://github.com/rlworkgroup/garage/pull/557))
- Random seed APIs moved to `garage.experiment.deterministic` ([#578](https://github.com/rlworkgroup/garage/pull/578))
- Experiment wrapper script is now an ordinary module ([#586](https://github.com/rlworkgroup/garage/pull/586))
- numpy-based modules and algorithms moved to `garage.np` ([#604](https://github.com/rlworkgroup/garage/pull/604))
- Algorithm constructors now use `EnvSpec` rather than `gym.Env` ([#575](https://github.com/rlworkgroup/garage/pull/575))
- Snapshotter API moved from `garage.logger` to `garage.experiment` ([#658](https://github.com/rlworkgroup/garage/pull/658))
- Moved `process_samples` API from the Sampler to algorithms ([#652](https://github.com/rlworkgroup/garage/pull/652))
- Updated Snapshotter API ([#699](https://github.com/rlworkgroup/garage/pull/699))
- Updated Resume API ([#777](https://github.com/rlworkgroup/garage/pull/777))
- All algorithms now have a default sampler ([#832](https://github.com/rlworkgroup/garage/pull/832))
- Experiment lauchers now require an explicit `snapshot_config` to their
`run_task` function ([#860](https://github.com/rlworkgroup/garage/pull/860))
- Various samplers moved from `garage.tf.sampler` to `garage.sampler` ([#836](https://github.com/rlworkgroup/garage/pull/836),
[#840](https://github.com/rlworkgroup/garage/pull/840))
- Dockerfiles are now based on Ubuntu 18.04 LTS by default ([#763](https://github.com/rlworkgroup/garage/pull/763))
- `dm_control` is now an optional dependency, installed using the extra
`garage[dm_control]` ([#828](https://github.com/rlworkgroup/garage/pull/828))
- MuJoCo is now an optional dependency, installed using the extra
`garage[mujoco]` ([#848](https://github.com/rlworkgroup/garage/pull/828))
- Samplers no longer flatten observations and actions ([#930](https://github.com/rlworkgroup/garage/pull/930),
[#938](https://github.com/rlworkgroup/garage/pull/938),
[#967](https://github.com/rlworkgroup/garage/pull/967))
- Implementations, tests, and benchmarks for all TensorFlow primitives, which
are now based on `garage.tf.Model` ([#574](https://github.com/rlworkgroup/garage/pull/574),
[#606](https://github.com/rlworkgroup/garage/pull/606),
[#615](https://github.com/rlworkgroup/garage/pull/615),
[#616](https://github.com/rlworkgroup/garage/pull/616),
[#618](https://github.com/rlworkgroup/garage/pull/618),
[#641](https://github.com/rlworkgroup/garage/pull/641),
[#642](https://github.com/rlworkgroup/garage/pull/642),
[#656](https://github.com/rlworkgroup/garage/pull/656),
[#662](https://github.com/rlworkgroup/garage/pull/662),
[#668](https://github.com/rlworkgroup/garage/pull/668),
[#672](https://github.com/rlworkgroup/garage/pull/672),
[#677](https://github.com/rlworkgroup/garage/pull/677),
[#730](https://github.com/rlworkgroup/garage/pull/730),
[#722](https://github.com/rlworkgroup/garage/pull/722),
[#765](https://github.com/rlworkgroup/garage/pull/765),
[#855](https://github.com/rlworkgroup/garage/pull/855),
[#878](https://github.com/rlworkgroup/garage/pull/878),
[#888](https://github.com/rlworkgroup/garage/pull/888),
[#898](https://github.com/rlworkgroup/garage/pull/898),
[#892](https://github.com/rlworkgroup/garage/pull/892),
[#897](https://github.com/rlworkgroup/garage/pull/897),
[#893](https://github.com/rlworkgroup/garage/pull/893),
[#890](https://github.com/rlworkgroup/garage/pull/890),
[#903](https://github.com/rlworkgroup/garage/pull/903),
[#916](https://github.com/rlworkgroup/garage/pull/916),
[#891](https://github.com/rlworkgroup/garage/pull/891),
[#922](https://github.com/rlworkgroup/garage/pull/922),
[#931](https://github.com/rlworkgroup/garage/pull/931),
[#933](https://github.com/rlworkgroup/garage/pull/933),
[#906](https://github.com/rlworkgroup/garage/pull/906),
[#945](https://github.com/rlworkgroup/garage/pull/945),
[#944](https://github.com/rlworkgroup/garage/pull/944),
[#943](https://github.com/rlworkgroup/garage/pull/943),
[#972](https://github.com/rlworkgroup/garage/pull/972))
- Dependency upgrades:
* mujoco-py to 2.0 ([#661](https://github.com/rlworkgroup/garage/pull/661))
* gym to 0.12.4 ([#661](https://github.com/rlworkgroup/garage/pull/661))
* dm_control to 7a36377879c57777e5d5b4da5aae2cd2a29b607a ([#661](https://github.com/rlworkgroup/garage/pull/661))
* akro to 0.0.6 ([#796](https://github.com/rlworkgroup/garage/pull/796))
* pycma to 2.7.0 ([#861](https://github.com/rlworkgroup/garage/pull/861))
* tensorflow to 1.15 ([#953](https://github.com/rlworkgroup/garage/pull/953))
* pytorch to 1.3.0 ([#952](https://github.com/rlworkgroup/garage/pull/952))
### Removed
- `garage.misc.autoargs`, a tool for decorating classes with autogenerated
command-line arguments ([#573](https://github.com/rlworkgroup/garage/pull/573))
- `garage.misc.ext`, a module with several unrelated utilities ([#578](https://github.com/rlworkgroup/garage/pull/578))
- `config_personal.py` module, replaced by environment variables where relevant ([#578](https://github.com/rlworkgroup/garage/pull/578), [#747](https://github.com/rlworkgroup/garage/pull/747))
- `contrib.rllab_hyperopt`, an experimental module for using `hyperopt` to tune
hyperparameters ([#684](https://github.com/rlworkgroup/garage/pull/684))
- `contrib.bichenchao`, a module of example launchers ([#683](https://github.com/rlworkgroup/garage/pull/683))
- `contrib.alexbeloi`, a module with an importance-sampling sampler and examples
(there were merged into garage) ([#717](https://github.com/rlworkgroup/garage/pull/717))
- EC2 cluster documentation and examples ([#835](https://github.com/rlworkgroup/garage/pull/835))
- `DeterministicMLPPolicy`, because it duplicated `ContinuousMLPPolicy` ([#929](https://github.com/rlworkgroup/garage/pull/929))
- `garage.tf.layers`, a custom high-level neural network definition API, was replaced by `garage.tf.models` ([#939](https://github.com/rlworkgroup/garage/pull/939))
- `Parameterized`, which was replaced by `garage.tf.Model` ([#942](https://github.com/rlworkgroup/garage/pull/942))
- `garage.misc.overrides`, whose features are no longer needed due proper ABC
support in Python 3 and sphinx-autodoc ([#974](https://github.com/rlworkgroup/garage/pull/942))
- `Serializable`, which became a maintainability burden and has now been
replaced by regular pickle protocol (`__getstate__`/`__setstate__`)
implementations, where necessary ([#982](https://github.com/rlworkgroup/garage/pull/982))
- `garage.misc.special`, a library of mostly-unused math subroutines ([#986](https://github.com/rlworkgroup/garage/pull/986))
- `garage.envs.util`, superceded by features in [akro](https://github.com/rlworkgroup/akro) ([#986](https://github.com/rlworkgroup/garage/pull/986))
- `garage.misc.console`, a library of mostly-unused helper functions for writing
shell scripts ([#988](https://github.com/rlworkgroup/garage/pull/988))
### Fixed
- Bug in `ReplayBuffer` [#554](https://github.com/rlworkgroup/garage/pull/554)
- Bug in `setup_linux.sh` [#560](https://github.com/rlworkgroup/garage/pull/560)
- Bug in `examples/sim_policy.py` ([#691](https://github.com/rlworkgroup/garage/pull/691))
- Bug in `FiniteDifferenceHvp` ([#745](https://github.com/rlworkgroup/garage/pull/745))
- Determinism bug for some samplers ([#880](https://github.com/rlworkgroup/garage/pull/880))
- `use_gpu` in the experiment runner ([#918](https://github.com/rlworkgroup/garage/pull/918))
## [2019.02.2](https://github.com/rlworkgroup/garage/releases/tag/v2019.02.2)
### Fixed
- Bug in entropy regularization in TensorFlow PPO/TRPO ([#579](https://github.com/rlworkgroup/garage/pull/579))
- Bug in which advantage normalization was broken for recurrent policies ([#626](https://github.com/rlworkgroup/garage/pull/626))
- Bug in `examples/sim_policy.py` ([#691](https://github.com/rlworkgroup/garage/pull/691))
- Bug in `FiniteDifferenceHvp` ([#745](https://github.com/rlworkgroup/garage/pull/745))
## [2019.02.1](https://github.com/rlworkgroup/garage/releases/tag/v2019.02.1)
### Fixed
- Fix overhead in GaussianMLPRegressor by optionally creating assign operations ([#622](https://github.com/rlworkgroup/garage/pull/622))
## [2019.02.0](https://github.com/rlworkgroup/garage/releases/tag/v2019.02.0)
### Added
- Epsilon-greedy exploration strategy, DiscreteMLPModel, and
QFunctionDerivedPolicy (all needed by DQN)
- Base Model class for TensorFlow-based primitives
- Dump plots generated with matplotlib to TensorBoard
- Relative Entropy Policy Search (REPS) algorithm
- GaussianConvBaseline and GaussianConvRegressor primitives
- New Dockerfiles, docker-compose files, and Makefiles for running garage using
Docker
- Vanilla policy gradient loss to NPO
- Truncated Natural Policy Gradient (TNPG) algorithm for TensorFlow
- Episodic Reward Weighted Regression (ERWR) algorithm for TensorFlow
- gym.Env wrappers used for pixel environments
- Convolutional Neural Network primitive
### Changed
- Move dependencies from environment.yml to setup.py
- Update dependencies:
- tensorflow-probability to 0.5.x
- dm_control to commit 92f9913
- TensorFlow to 1.12
- MuJoCo to 2.0
- gym to 0.10.11
- Move dm_control tests into the unit test tree
- Use GitHub standard .gitignore
- Improve the implementation of RandomizedEnv (Dynamics Randomization)
- Decouple TensorBoard from the logger
- Move files from garage/misc/instrument to garage/experiment
- setup.py to be canonical in format and use automatic versioning
### Removed
- Move some garage subpackages into their own repositories:
- garage.viskit to [rlworkgroup/viskit](https://github.com/rlworkgroup/viskit)
- garage.spaces to [rlworkgroup/akro](https://github.com/rlworkgroup/akro)
- Remove Theano backend, algorithms, and dependencies
- Custom environments which duplicated [openai/gym](https://github.com/openai/gym)
- Some dead files from garage/misc (meta.py and viewer2d.py)
- Remove all code coverage tracking providers except CodeCov
### Fixed
- Clean up warnings in the test suite
- Pickling bug in GaussianMLPolicyWithModel
- Namescope in LbfgsOptimizer
- Correctly sample paths in OffPolicyVectorizedSampler
- Implementation bugs in tf/VPG
- Bug when importing Box
- Bug in test_benchmark_her
## [2018.10.1](https://github.com/rlworkgroup/garage/releases/tag/v2018.10.1)
### Fixed
- Avoid importing Theano when using the TensorFlow branch
- Avoid importing MuJoCo when not required
- Implementation bugs in tf/VPG
- Bug when importing Box
- Bug in test_benchmark_her
- Bug in the CI scripts which produced false positives
## [2018.10.0](https://github.com/rlworkgroup/garage/releases/tag/v2018.10.1)
### Added
- PPO and DDPG for the TensorFlow branch
- HER for DDPG
- Recurrent Neural Network policy support for NPO, PPO and TRPO
- Base class for ReplayBuffer, and two implementations: SimpleReplayBuffer
and HerReplayBuffer
- Sampler classes OffPolicyVectorizedSampler and OnPolicyVectorizedSampler
- Base class for offline policies OffPolicyRLAlgorithm
- Benchmark tests for TRPO, PPO and DDPG to compare their performance with
those produced by OpenAI Baselines
- Dynamics randomization for MuJoCo environments
- Support for dm_control environments
- DictSpace support for garage environments
- PEP8 checks enforced in the codebase
- Support for Python imports: maintain correct ordering and remove unused
imports or import errors
- Test on TravisCI using Docker images for managing dependencies
- Testing code reorganized
- Code Coverage measurement with codecov
- Pre-commit hooks to enforce PEP8 and to verify imports and commit messages,
which are also applied in the Travis CI verification
- Docstring verification for added files that are not in the test branch or
moved files
- TensorBoard support for all key-value/log_tabular calls, plus support for
logging distributions
- Variable and name scope for symbolic operations in TensorFlow
- Top-level base Space class for garage
- Asynchronous plotting for Theano and Tensorflow
- GPU support for Theano
### Changed
- Rename rllab to garage, including all the rllab references in the packages
and modules inside the project
- Rename run_experiment_lite to run_experiment
- The file cma_es_lib.py was replaced by the pycma library available on PyPI
- Move the contrib package to garage.contrib
- Move Theano-dependent code to garage.theano
- Move all code from sandbox.rocky.tf to garage.tf
- Update several dependencies, mainly:
- Python to 3.6.6
- TensorFlow to 1.9
- Theano to 1.0.2
- mujoco-py to 1.50.1
- gym to 0.10.8
- Transfer various dependencies from conda to pip
- Separate example script files in the Theano and TensorFlow branch
- Update LICENSE, CONTRIBUTING.md and .gitignore
- Use convenience imports, that is, import classes and functions that share the
same or similar name to its module in the corresponding `__init__.py` file of
their package
- Replace ProxyEnv with gym.Wrapper
- Update installation scripts for Linux and macOS
### Removed
- All unused imports in the Python files
- Unused packages from environment.yml
- The files under rllab.mujoco_py were removed to use the pip release instead
- Empty `__init__.py` files
- The environment class defined by rllab.envs.Env was not imported to garage
and the environment defined by gym.Env is used now
### Fixed
- Sleeping processes produced by the parallel sampler. NOTE: although the
frequency of this issue has been reduced, our tests in TravisCI occasionally
detect the issue and currently it seems to be an issue with re-entrant locks
and multiprocessing in Python.
| 35,801 | 62.142857 | 207 | md |
CSD-locomotion | CSD-locomotion-master/garaged/CONTRIBUTING.md | # Contributing to garage
We welcome all contributions to garage.
Use this guide to prepare your contribution.
## Pull requests
All contributions to the garage codebase are submitted via a GitHub pull request.
### Review process
To be submitted, a pull request must satisfy the following criteria:
1. Rebases cleanly on the `master` branch
1. Passes all continuous integration tests
1. Conforms to the git commit message [format](#commit-message-format)
1. Receives approval from another contributor
1. Receives approval from a maintainer (distinct from the contributor review)
These criteria may be satisfied in any order, but in practice your PR is unlikely to get attention from contributors until 1-3 are satisfied. Maintainer attention is a scarce resource, so generally maintainers wait for a review from a non-maintainer contributor before reviewing your PR.
## Preparing your repo to make contributions
After following the standard garage setup steps, make sure to run to install the pre-commit hooks into your repository. pre-commit helps streamline the pull request process by catching basic problems locally before they are checked by the CI.
To setup pre-commit in your repo:
```sh
# make sure your Python environment is activated, e.g.
# conda activate garage
# pipenv shell
# poetry shell
# source venv/bin/activate
pre-commit install -t pre-commit
pre-commit install -t pre-push
pre-commit install -t commit-msg
```
Once you've installed pre-commit, it will automatically run every time you type `git commit`.
## Code style
The Python code in garage conforms to the [PEP8](https://www.python.org/dev/peps/pep-0008/) standard. Please read and understand it in detail.
### garage-specific Python style
These are garage-specific rules which are not part of the aforementioned style guides.
* Python package imports should be sorted alphabetically within their PEP8 groupings.
The sorting is alphabetical from left to right, ignoring case and Python keywords (i.e. `import`, `from`, `as`). Notable exceptions apply in `__init__.py` files, where sometimes this rule will trigger a circular import.
* Prefer single-quoted strings (`'foo'`) over double-quoted strings (`"foo"`).
Double-quoted strings can be used if there is a compelling escape or formatting reason for using single quotes (e.g. a single quote appears inside the string).
* Add convenience imports in `__init__.py` of a package for shallow first-level repetitive imports, but not for subpackages, even if that subpackage is defined in a single `.py` file.
For instance, if an import line reads `from garage.foo.bar import Bar` then you should add `from garage.foo.bar import Bar` to `garage/foo/__init__.py` so that users may instead write `from garage.foo import Bar`. However, if an import line reads `from garage.foo.bar.stuff import Baz`, *do not* add `from garage.foo.bar.stuff import Baz` to `garage/foo/__init__.py`, because that obscures the `stuff` subpackage.
*Do*
`garage/foo/__init__.py`:
```python
"""Foo package."""
from garage.foo.bar import Bar
```
`garage/barp/bux.py`:
```python
"""Bux tools for barps."""
from garage.foo import Bar
from garage.foo.stuff import Baz
```
*Don't*
`garage/foo/__init__.py`:
```python
"""Foo package."""
from garage.foo.bar import Bar
from garage.foo.bar.stuff import Baz
```
`garage/barp/bux.py`:
```python
"""Bux tools for barps."""
from garage.foo import Bar
from garage.foo import Baz
```
* Imports within the same package should be absolute, to avoid creating circular dependencies due to convenience imports in `__init__.py`
*Do*
`garage/foo/bar.py`
```python
from garage.foo.baz import Baz
b = Baz()
```
*Don't*
`garage/foo/bar.py`
```python
from garage.foo import Baz # this could lead to a circular import, if Baz is imported in garage/foo/__init__.py
b = Baz()
```
* Base and interface classes (i.e. classes which are not intended to ever be instantiated) should use the `abc` package to declare themselves as abstract.
i.e. your class should inherit from `abc.ABC` or use the metaclass `abc.ABCMeta`, it should declare its methods abstract (e.g. using `@abc.abstractmethod`) as-appropriate. Abstract methods should all use `pass` as their implementation, not `raise NotImplementedError`
*Do*
```python
import abc
class Robot(abc.ABC):
"""Interface for robots."""
@abc.abstractmethod
def beep(self):
pass
```
*Don't*
```python
class Robot(object):
"Base class for robots."""
def beep(self):
raise NotImplementedError
```
* When using external dependencies, use the `import` statement only to import whole modules, not individual classes or functions.
This applies to both packages from the standard library and 3rd-party dependencies. If a package has a long or cumbersome full path, or is used very frequently (e.g. `numpy`, `tensorflow`), you may use the keyword `as` to create a file-specific name which makes sense. Additionally, you should always follow the community concensus short names for common dependencies (see below).
*Do*
```python
import collections
import gym.spaces
from garage.tf.models import MLPModel
q = collections.deque(10)
d = gym.spaces.Discrete(5)
m = MLPModel(output_dim=2)
```
*Don't*
```python
from collections import deque
from gym.spaces import Discrete
import tensorflow as tf
from garage.tf.models import MLPModel
q = deque(10)
d = Discrete(5)
m = MLPModel(output_dim=2)
```
*Known community-concensus imports*
```python
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import dowel.logger as logger
import dowel.tabular as tabular
```
### Other languages
Non-Python files (including XML, HTML, CSS, JS, and Shell Scripts) should follow the [Google Style Guide](https://github.com/google/styleguide) for that language
YAML files should use 2 spaces for indentation.
### Whitespace (all languages)
* Use Unix-style line endings
* Trim trailing whitespace from all lines
* All files should end in a single newline
## Documentation
Python files should provide docstrings for all public methods which follow [PEP257](https://www.python.org/dev/peps/pep-0257/) docstring conventions and [Google](http://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) docstring formatting. A good docstring example can be found [here](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html).
### Docstrings
* Docstrings for `__init__` should be included in the class docstring as suggested in the [Google example](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html).
* Docstrings should provide full type information for all arguments, return values, exceptions, etc. according to the Google format
* When documenting fields which are numpy arrays or other tensor types (and collections thereof), please carefully document the expected input shape of the field. See below for shape conventions.
* For shapes and equations, use the Sphinx `:math:` directive to render them properly with mathematical symbols.
#### Conventions for documenting tensor shapes
Data which include a meaningful time-series dimension (e.g. trajectories) should always document that dimension explicitly, even if that dimension has been flattened out. Data containing only non time-series samples should omit the time dimension.
Always use the Sphinx `:math:` directive to render your shapes properly.
| Symbol | Description |
|-----------|---------------------------------------------------------------------------------------------------------------|
| `(...)` | Tensor shapes are enclosed in parentheses, e.g a batch of `(N, S^*)` samples |
| `N` | Batch dimension (e.g. trajectories or samples) |
| `T` | Time dimension |
| `.^*` | Variadic parts of a tensor shape, which will be broadcast or ignored are denoted with a `*`, e.g. `S^*` |
| `[.]` | Variable-length dimensions are enclosed in square brackets, e.g. `[K]` if `K` is the dimension variable |
| `\bullet` | Flattening operator, e.g. `N \bullet T` has length `N * T`. `N \bullet [T]` has length `\sum_{i \in N} [T]_i` |
**Example**
```python
def concatenate_time(paths):
"""Concatenate a list of variable-length tensors along the time dimemsion.
Concatenates a list `paths` of `N` variable-length time-series tensors
along their time dimension, producing a single time-series tensor with the
component tensors arranged along a single batch dimension.
Args:
paths (list[numpy.ndarray]): A list of :math:`N` tensors to combine
into a single batch of tensors, with elements of shape
:math:`([T], S^*)`
Returns:
numpy.ndarray: Time-flattened version of `paths`, with shape
:math:`(N \bullet [T], S^*)`
"""
```
### Application guide
**Newly created** Python files should follow all of the above standards for docstrings.
**Non-trivially modified** Python files should be submitted with updated docstrings according to the above standard.
**New or heavily-redesigned** modules with non-trivial APIs and functionality should provide full text documentation, in addition to docstrings, which covers:
* Explanation of the purpose of the module or API
* Brief overview of its design
* Usage examples for the most common use cases
* Explicitly calls out common gotchas, misunderstandings, etc.
* A quick summary of how to go about advanced usage, configuration, or extension
## Testing
garage maintains a test suite to ensure that future changes do not break existing functionality. We use TravisCI to run a unit test suite on every pull request before merging.
* New functionality should always include unit tests and, where appropriate, integration tests.
* PRs fixing bugs which were not caught by an existing test should always include a test replicating the bug
### Creating Tests
Add a test for your functionality under the `garage/tests/` directory. Make sure your test filename is prepended with test(i.e. `test_<filename>.py`) to ensure the test will be run in the CI.
## Git
### Workflow
__garage uses a linear commit history and rebase-only merging.__
This means that no merge commits appear in the project history. All pull requests, regardless of number of commits, are squashed to a single atomic commit at merge time.
Do's and Don'ts for avoiding accidental merge commits and other headaches:
* *Don't* use GitHub's "Update branch" button on pull requests, no matter how tempting it seems
* *Don't* use `git merge`
* *Don't* use `git pull` (unless git tells you that your branch can be fast-forwarded)
* *Don't* make commits in the `master` branch---always use a feature branch
* *Do* fetch upstream (`rlworkgroup/garage`) frequently and keep your `master` branch up-to-date with upstream
* *Do* rebase your feature branch on `master` frequently
* *Do* keep only one or a few commits in your feature branch, and use `git commit --amend` to update your changes. This helps prevent long chains of identical merges during a rebase.
Please see [this guide](https://gist.github.com/markreid/12e7c2203916b93d23c27a263f6091a0) for a tutorial on the workflow. Note: unlike the guide, we don't use separate `develop`/`master` branches, so all PRs should be based on `master` rather than `develop`
### Commit message format
garage follows the git commit message guidelines documented [here](https://gist.github.com/robertpainsi/b632364184e70900af4ab688decf6f53) and [here](https://chris.beams.io/posts/git-commit/). You can also find an in-depth guide to writing great commit messages [here](https://github.com/RomuloOliveira/commit-messages-guide/blob/master/README.md)
In short:
* All commit messages have an informative subject line of 50 characters
* A newline between the subject and the body
* If relevant, an informative body which is wrapped to 72 characters
### Git recipes
These recipes assume you are working out of a private GitHub fork.
If you are working directly as a contributor to `rlworkgroup`, you can replace references to `rlworkgroup` with `origin`. You also, of course, do not need to add `rlworkgroup` as a remote, since it will be `origin` in your repository.
#### Clone your GitHub fork and setup the rlworkgroup remote
```sh
git clone [email protected]:<your_github_username>/garage.git
cd garage
git remote add rlworkgroup [email protected]:rlworkgroup/garage.git
git fetch rlworkgroup
```
#### Update your GitHub fork with the latest from upstream
```sh
git fetch rlworkgroup
git reset --hard master rlworkgroup/master
git push -f origin master
```
#### Make a new feature branch and push it to your fork
```sh
git checkout master
git checkout -b myfeaturebranch
# make some changes
git add file1 file2 file3
git commit # Write a commit message conforming to the guidelines
git push origin myfeaturebranch
```
#### Rebase a feature branch so it's up-to-date with upstream and push it to your fork
```sh
git checkout master
git fetch rlworkgroup
git reset --hard rlworkgroup/master
git checkout myfeaturebranch
git rebase master
# you may need to manually reconcile merge conflicts here. Follow git's instructions.
git push -f origin myfeaturebranch # -f is frequently necessary because rebases rewrite history
```
## Release
### Modify CHANGELOG.md
For each release in garage, modify [CHANGELOG.md](https://github.com/rlworkgroup/garage/blob/master/CHANGELOG.md) with the most relevant changes from the latest release. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), which adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
| 14,354 | 44.283912 | 417 | md |
CSD-locomotion | CSD-locomotion-master/garaged/README.md | [](http://garage.readthedocs.org/en/latest/)
[](https://travis-ci.com/rlworkgroup/garage)
[](https://github.com/rlworkgroup/garage/blob/master/LICENSE)
[](https://codecov.io/gh/rlworkgroup/garage)
[](https://badge.fury.io/py/garage)
# garage
garage is a toolkit for developing and evaluating reinforcement learning algorithms, and an accompanying library of state-of-the-art implementations built using that toolkit.
The toolkit provides wide range of modular tools for implementing RL algorithms, including:
* Composable neural network models
* Replay buffers
* High-performance samplers
* An expressive experiment definition interface
* Tools for reproducibility (e.g. set a global random seed which all components respect)
* Logging to many outputs, including TensorBoard
* Reliable experiment checkpointing and resuming
* Environment interfaces for many popular benchmark suites
* Supporting for running garage in diverse environments, including always up-to-date Docker containers
See the [latest documentation](https://garage.readthedocs.org/en/latest/) for getting started instructions and detailed APIs.
## Installation
```
pip install --user garage
```
## Algorithms
The table below summarizes the algorithms available in garage.
| Algorithm | Framework(s) |
| ---------------------- | ------------------- |
| CEM | numpy |
| CMA-ES | numpy |
| REINFORCE (a.k.a. VPG) | PyTorch, TensorFlow |
| DDPG | PyTorch, TensorFlow |
| DQN | TensorFlow |
| DDQN | TensorFlow |
| ERWR | TensorFlow |
| NPO | TensorFlow |
| PPO | PyTorch, TensorFlow |
| REPS | TensorFlow |
| TD3 | TensorFlow |
| TNPG | TensorFlow |
| TRPO | PyTorch, TensorFlow |
| MAML | PyTorch |
| RL2 | TensorFlow |
| PEARL | PyTorch |
| SAC | PyTorch |
| MTSAC | PyTorch |
| MTPPO | PyTorch, TensorFlow |
| MTTRPO | PyTorch, TensorFlow |
| Task Embedding | TensorFlow |
## Supported Tools and Frameworks
garage supports Python 3.5+
The package is tested on Ubuntu 18.04. It is also known to run on recent versions of macOS, using Homebrew to install some dependencies. Windows users can install garage via WSL, or by making use of the Docker containers.
We currently support [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) for implementing the neural network portions of RL algorithms, and additions of new framework support are always welcome. PyTorch modules can be found in the package [`garage.torch`](https://github.com/rlworkgroup/garage/tree/master/src/garage/torch) and TensorFlow modules can be found in the package [`garage.tf`](https://github.com/rlworkgroup/garage/tree/master/src/garage/tf). Algorithms which do not require neural networks are found in the package [`garage.np`](https://github.com/rlworkgroup/garage/tree/master/src/garage/np).
The package is available for download on PyPI, and we ensure that it installs successfully into environments defined using [conda](https://docs.conda.io/en/latest/), [Pipenv](https://pipenv.readthedocs.io/en/latest/), and [virtualenv](https://virtualenv.pypa.io/en/latest/).
All components use the popular [`gym.Env`](https://github.com/openai/gym) interface for RL environments.
## Testing
The most important feature of garage is its comprehensive automated unit test and benchmarking suite, which helps ensure that the algorithms and modules in garage maintain state-of-the-art performance as the software changes.
Our testing strategy has three pillars:
* **Automation:**
We use continuous integration to test all modules and algorithms in garage before adding any change. The full installation and test suite is also run nightly, to detect regressions.
* **Acceptance Testing:**
Any commit which might change the performance of an algorithm is subjected to comprehensive benchmarks on the relevant algorithms before it is merged
* **Benchmarks and Monitoring:**
We benchmark the full suite of algorithms against their relevant benchmarks and widely-used implementations regularly, to detect regressions and improvements we may have missed.
## Supported Releases
| Release | Build Status | Last date of support |
| ------- | ------------ | -------------------- |
| [v2020.06](https://github.com/rlworkgroup/garage/releases/tag/v2020.06.0) | [](https://travis-ci.com/rlworkgroup/garage) | February 28th, 2021 |
| [v2019.10](https://github.com/rlworkgroup/garage/releases/tag/v2019.10.0) | [](https://travis-ci.com/rlworkgroup/garage) | October 31st, 2020 |
Garage releases a new stable version approximately every 4 months, in February, June, and October. Maintenance releases have a stable API and dependency tree, and receive bug fixes and critical improvements but not new features. We currently support each release for a window of 8 months.
## Citing garage
If you use garage for academic research, please cite the repository using the following BibTeX entry. You should update the `commit` field with the commit or release tag your publication uses.
```latex
@misc{garage,
author = {The garage contributors},
title = {Garage: A toolkit for reproducible reinforcement learning research},
year = {2019},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/rlworkgroup/garage}},
commit = {be070842071f736eb24f28e4b902a9f144f5c97b}
}
```
## Credits
The original code for garage was adopted from predecessor project called [rllab](https://github.com/rll/rllab). The garage project is grateful for the contributions of the original rllab authors, and hopes to continue advancing the state of reproducibility in RL research in the same spirit.
rllab was developed by Rocky Duan (UC Berkeley/OpenAI), Peter Chen (UC Berkeley), Rein Houthooft (UC Berkeley/OpenAI), John Schulman (UC Berkeley/OpenAI), and Pieter Abbeel (UC Berkeley/OpenAI).
| 6,792 | 62.485981 | 633 | md |
CSD-locomotion | CSD-locomotion-master/garaged/readthedocs.yml | version: 2
sphinx:
configuration: docs/conf.py
python:
version: 3.7
install:
- requirements: docs/requirements.txt
system_packages: true
| 151 | 12.818182 | 41 | yml |
CSD-locomotion | CSD-locomotion-master/garaged/setup.py | """setuptools based setup module."""
import os
from setuptools import find_packages
from setuptools import setup
GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'
GYM_VERSION = '==0.17.2'
# Required dependencies
REQUIRED = [
# Please keep alphabetized
'akro',
'click>=2.0',
'cloudpickle==1.3',
'cma==2.7.0',
#'dowel==0.0.3',
'gym[atari,box2d,classic_control]' + GYM_VERSION,
'joblib<0.13,>=0.12',
'numpy>=1.14.5',
'psutil',
'python-dateutil',
'ray<=0.8.6',
'scikit-image',
'scipy',
'setproctitle>=1.0',
'tensorflow>=1.14',
'tensorflow-probability>=0.11.0',
#'torch>=1.0.0,!=1.5.0',
# For torch.clip
'torch>=1.7.0',
'torchvision>=0.2.1',
]
# Dependencies for optional features
EXTRAS = {}
EXTRAS['mujoco'] = [
'mujoco-py<=2.0.2.8,>=2.0',
'gym[all]' + GYM_VERSION,
]
EXTRAS['dm_control'] = [
# dm_control throws an error during install about not being able to
# find a build dependency (absl-py). Later pip executes the `install`
# command again and the install succeeds because absl-py has been
# installed. This is stupid, but harmless.
'dm_control==0.0.300771433',
]
EXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))
# Development dependencies (*not* included in 'all')
EXTRAS['dev'] = [
# Please keep alphabetized
'flake8',
'flake8-docstrings>=1.5.0',
'flake8-import-order',
'metaworld @ https://{}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77'.format(GARAGE_GH_TOKEN), # noqa: E501
'pep8-naming==0.7.0',
'pre-commit',
'pycodestyle>=2.5.0',
'pydocstyle>=4.0.0',
'pylint>=2.4.3',
'pytest>=4.5.0', # Required for strict-markers
'pytest-cov',
'pytest-timeout',
'pytest-xdist',
'recommonmark',
'sphinx',
'sphinx_rtd_theme',
'yapf==0.28.0',
] # yapf: disable
with open('README.md') as f:
README = f.read()
# Get the package version dynamically
with open('VERSION') as v:
VERSION = v.read().strip()
setup(
name='garage',
version=VERSION,
author='Reinforcement Learning Working Group',
description='A toolkit for reproducible reinforcement learning research',
url='https://github.com/rlworkgroup/garage',
packages=find_packages(where='src'),
package_dir={'': 'src'},
scripts=['scripts/garage'],
python_requires='>=3.5',
install_requires=REQUIRED,
extras_require=EXTRAS,
license='MIT',
long_description=README,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries',
],
)
| 3,211 | 27.678571 | 159 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/README.md | # garage benchmarking
This guide explains how to write garage benchmark scripts.
### How to write a benchmark script?
**0. Install `garage_benchmarks`**
`cd benchmarks && pip install -e .`
**1. Define an experiment function**
Use `@wrap_experiment` to define your experiment. `ctxt` is used for `@wrap_experiment`.
The second and third parameters of the function should be `env_id` and `seed`.
You should give your function a good name because it is used as the label name when it comes to plotting.
See more [examples](https://github.com/rlworkgroup/garage/tree/master/benchmarks/src/garage_benchmarks/experiments).
```py
@wrap_experiment
def trpo_garage_pytorch(ctxt, env_id, seed):
...
```
**2. Define an execution function using `@benchmark` and `iterate_experiments()`**
```py
@benchmark
def your_benchmarks():
iterate_experiments(trpo_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(trpo_garage_tf, MuJoCo1M_ENV_SET)
```
For the above example, the results will be saved under `/data/local/benchmarks/your_benchmarks`.
Plotting is enabled by default and figures will be stored in `/plot`.
You can explicitly disable it by defining `@benchmark(plot=False)`.
**3. Trigger your benchmark**
Under `garage/benchmarks` do:
`garage_benchmark run your_benchmarks`
### Environment sets
| Algorithm | Observation | Action | Environment Set |
| --- | --- | --- | --- |
| On-policy | Pixel | Discrete | *PIXEL_ENV_SET |
| Off-policy | Pixel | Discrete | Atari1M |
| Meta-RL | Non-Pixel | Discrete | *ML_ENV_SET |
| MultiTask-RL | Non-Pixel | Discrete | *MT_ENV_SET |
| ALL | Non-Pixel | Discrete | *NON_PIXEL_ENV_SET |
| ALL | Non-Pixel | Continuous | MuJoCo1M |
```
PIXEL_ENV_SET: [
'MemorizeDigits-v0', 'CubeCrash-v0', ^'Acrobot-v1', ^'MountainCar-v0', ^'CartPole-v1', ^'LunarLander-v2']
```
^ Using the wrappers [PixelObservationWrapper](https://github.com/openai/gym/blob/master/gym/wrappers/pixel_observation.py) and [FrameStack](https://github.com/openai/gym/blob/master/gym/wrappers/frame_stack.py) (n=4)
```
ML_ENV_SET: [
'ML1-push-v1', 'ML1-reach-v1', 'ML1-pick-place-v1', 'ML10', 'ML45']
```
```
MT_ENV_SET: [
'ML1-push-v1', 'ML1-reach-v1', 'ML1-pick-place-v1', 'MT10', 'MT50']
```
See [Meta-World](https://github.com/rlworkgroup/metaworld) for ML1/ML10/ML45/MT10/MT50.
```
STATE_ENV_SET: [
'LunarLander-v2', 'CartPole-v1', 'Assault-ramDeterministic-v4', 'Breakout-ramDeterministic-v4', 'ChopperCommand-ramDeterministic-v4', 'Tutankham-ramDeterministic-v4']
```
| 2,499 | 35.764706 | 217 | md |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/setup.py | """Setup script for garage benchmarking scripts.
This package is generally not needed by users of garage.
"""
import os
from setuptools import find_packages, setup
GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'
REQUIRED = [
# Please keep alphabetized
'baselines @ https://{}@api.github.com/repos/openai/baselines/tarball/ea25b9e8b234e6ee1bca43083f8f3cf974143998'.format(GARAGE_GH_TOKEN), # noqa: E501
'google-cloud-storage',
'gym==0.17.2',
'matplotlib'
] # yapf: disable
setup(name='garage_benchmarks',
packages=find_packages(where='src'),
package_dir={'': 'src'},
install_requires=REQUIRED,
include_package_data=True,
entry_points='''
[console_scripts]
garage_benchmark=garage_benchmarks.run_benchmarks:cli
''')
| 822 | 28.392857 | 154 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/__init__.py | """garage benchmarks."""
| 25 | 12 | 24 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/benchmark_algos.py | """Benchmarking for algorithms."""
from garage_benchmarks.experiments.algos import ddpg_garage_tf
from garage_benchmarks.experiments.algos import her_garage_tf
from garage_benchmarks.experiments.algos import ppo_garage_pytorch
from garage_benchmarks.experiments.algos import ppo_garage_tf
from garage_benchmarks.experiments.algos import td3_garage_tf
from garage_benchmarks.experiments.algos import trpo_garage_pytorch
from garage_benchmarks.experiments.algos import trpo_garage_tf
from garage_benchmarks.experiments.algos import vpg_garage_pytorch
from garage_benchmarks.experiments.algos import vpg_garage_tf
from garage_benchmarks.helper import benchmark, iterate_experiments
from garage_benchmarks.parameters import Fetch1M_ENV_SET, MuJoCo1M_ENV_SET
@benchmark
def ddpg_benchmarks():
"""Run experiments for DDPG benchmarking."""
iterate_experiments(ddpg_garage_tf, MuJoCo1M_ENV_SET)
@benchmark
def her_benchmarks():
"""Run experiments for HER benchmarking."""
iterate_experiments(her_garage_tf, Fetch1M_ENV_SET)
@benchmark
def ppo_benchmarks():
"""Run experiments for PPO benchmarking."""
iterate_experiments(ppo_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(ppo_garage_tf, MuJoCo1M_ENV_SET)
@benchmark
def td3_benchmarks():
"""Run experiments for TD3 benchmarking."""
td3_env_ids = [
env_id for env_id in MuJoCo1M_ENV_SET if env_id != 'Reacher-v2'
]
iterate_experiments(td3_garage_tf, td3_env_ids)
@benchmark
def trpo_benchmarks():
"""Run experiments for TRPO benchmarking."""
iterate_experiments(trpo_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(trpo_garage_tf, MuJoCo1M_ENV_SET)
@benchmark
def vpg_benchmarks():
"""Run experiments for VPG benchmarking."""
iterate_experiments(vpg_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(vpg_garage_tf, MuJoCo1M_ENV_SET)
| 1,880 | 32.589286 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/benchmark_auto.py | """Automatic benchmarking."""
from garage_benchmarks.experiments.algos import ddpg_garage_tf
from garage_benchmarks.experiments.algos import ppo_garage_pytorch
from garage_benchmarks.experiments.algos import ppo_garage_tf
from garage_benchmarks.experiments.algos import td3_garage_tf
from garage_benchmarks.experiments.algos import trpo_garage_pytorch
from garage_benchmarks.experiments.algos import trpo_garage_tf
from garage_benchmarks.experiments.algos import vpg_garage_pytorch
from garage_benchmarks.experiments.algos import vpg_garage_tf
from garage_benchmarks.helper import benchmark, iterate_experiments
from garage_benchmarks.parameters import MuJoCo1M_ENV_SET
@benchmark(plot=False, auto=True)
def auto_ddpg_benchmarks():
"""Run experiments for DDPG benchmarking."""
iterate_experiments(ddpg_garage_tf, MuJoCo1M_ENV_SET)
@benchmark(plot=False, auto=True)
def auto_ppo_benchmarks():
"""Run experiments for PPO benchmarking."""
iterate_experiments(ppo_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(ppo_garage_tf, MuJoCo1M_ENV_SET)
@benchmark(plot=False, auto=True)
def auto_td3_benchmarks():
"""Run experiments for TD3 benchmarking."""
td3_env_ids = [
env_id for env_id in MuJoCo1M_ENV_SET if env_id != 'Reacher-v2'
]
iterate_experiments(td3_garage_tf, td3_env_ids)
@benchmark(plot=False, auto=True)
def auto_trpo_benchmarks():
"""Run experiments for TRPO benchmarking."""
iterate_experiments(trpo_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(trpo_garage_tf, MuJoCo1M_ENV_SET)
@benchmark(plot=False, auto=True)
def auto_vpg_benchmarks():
"""Run experiments for VPG benchmarking."""
iterate_experiments(vpg_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(vpg_garage_tf, MuJoCo1M_ENV_SET)
| 1,797 | 35.693878 | 71 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/benchmark_baselines.py | """Benchmarking for baselines."""
import random
from garage_benchmarks.experiments.baselines import continuous_mlp_baseline
from garage_benchmarks.experiments.baselines import gaussian_cnn_baseline
from garage_benchmarks.experiments.baselines import gaussian_mlp_baseline
from garage_benchmarks.helper import benchmark, iterate_experiments
from garage_benchmarks.parameters import MuJoCo1M_ENV_SET, PIXEL_ENV_SET
_seeds = random.sample(range(100), 3)
@benchmark
def continuous_mlp_baseline_tf_ppo_benchmarks():
"""Run benchmarking experiments for Continuous MLP Baseline on TF-PPO."""
iterate_experiments(continuous_mlp_baseline,
MuJoCo1M_ENV_SET,
seeds=_seeds)
@benchmark
def gaussian_cnn_baseline_tf_ppo_benchmarks():
"""Run benchmarking experiments for Gaussian CNN Baseline on TF-PPO."""
iterate_experiments(gaussian_cnn_baseline, PIXEL_ENV_SET, seeds=_seeds)
@benchmark
def gaussian_mlp_baseline_tf_ppo_benchmarks():
"""Run benchmarking experiments for Gaussian MLP Baseline on TF-PPO."""
iterate_experiments(gaussian_mlp_baseline, MuJoCo1M_ENV_SET, seeds=_seeds)
| 1,149 | 36.096774 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/benchmark_policies.py | """Benchmarking for policies."""
import random
from garage_benchmarks.experiments.policies import categorical_cnn_policy
from garage_benchmarks.experiments.policies import categorical_gru_policy
from garage_benchmarks.experiments.policies import categorical_lstm_policy
from garage_benchmarks.experiments.policies import categorical_mlp_policy
from garage_benchmarks.experiments.policies import continuous_mlp_policy
from garage_benchmarks.experiments.policies import gaussian_gru_policy
from garage_benchmarks.experiments.policies import gaussian_lstm_policy
from garage_benchmarks.experiments.policies import gaussian_mlp_policy
from garage_benchmarks.helper import benchmark, iterate_experiments
from garage_benchmarks.parameters import (MuJoCo1M_ENV_SET, PIXEL_ENV_SET,
STATE_ENV_SET)
_seeds = random.sample(range(100), 3)
@benchmark
def categorical_cnn_policy_tf_ppo_benchmarks():
"""Run benchmarking experiments for Categorical CNN Policy on TF-PPO."""
iterate_experiments(categorical_cnn_policy, PIXEL_ENV_SET, seeds=_seeds)
@benchmark
def categorical_gru_policy_tf_ppo_benchmarks():
"""Run benchmarking experiments for Categorical GRU Policy on TF-PPO."""
iterate_experiments(categorical_gru_policy, STATE_ENV_SET, seeds=_seeds)
@benchmark
def categorical_lstm_policy_tf_ppo_benchmarks():
"""Run benchmarking experiments for Categorical LSTM Policy on TF-PPO."""
iterate_experiments(categorical_lstm_policy, STATE_ENV_SET, seeds=_seeds)
@benchmark
def categorical_mlp_policy_tf_ppo_benchmarks():
"""Run benchmarking experiments for Categorical MLP Policy on TF-PPO."""
iterate_experiments(categorical_mlp_policy, STATE_ENV_SET, seeds=_seeds)
@benchmark
def continuous_mlp_policy_tf_ddpg_benchmarks():
"""Run benchmarking experiments for Continuous MLP Policy on TF-DDPG."""
seeds = random.sample(range(100), 5)
iterate_experiments(continuous_mlp_policy, MuJoCo1M_ENV_SET, seeds=seeds)
@benchmark
def gaussian_gru_policy_tf_ppo_benchmarks():
"""Run benchmarking experiments for Gaussian GRU Policy on TF-PPO."""
iterate_experiments(gaussian_gru_policy, MuJoCo1M_ENV_SET, seeds=_seeds)
@benchmark
def gaussian_lstm_policy_tf_ppo_benchmarks():
"""Run benchmarking experiments for Gaussian LSTM Policy on TF-PPO."""
iterate_experiments(gaussian_lstm_policy, MuJoCo1M_ENV_SET, seeds=_seeds)
@benchmark
def gaussian_mlp_policy_tf_ppo_benchmarks():
"""Run benchmarking experiments for Gaussian MLP Policy on TF-PPO."""
iterate_experiments(gaussian_mlp_policy, MuJoCo1M_ENV_SET, seeds=_seeds)
| 2,621 | 38.727273 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/benchmark_q_functions.py | """Benchmarking for q-functions."""
import random
from garage_benchmarks.experiments.q_functions import continuous_mlp_q_function
from garage_benchmarks.helper import benchmark, iterate_experiments
from garage_benchmarks.parameters import MuJoCo1M_ENV_SET
_seeds = random.sample(range(100), 5)
@benchmark
def continuous_mlp_q_function_tf_ddpg_benchmarks():
"""Run benchmarking experiments for Continuous MLP QFunction on TF-DDPG."""
iterate_experiments(continuous_mlp_q_function,
MuJoCo1M_ENV_SET,
seeds=_seeds)
| 572 | 32.705882 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/helper.py | """Helper functions for benchmarks.
A decorator and a function are provided for benchmarks. They essentially
take care log directory configuration and result plotting.
The decorator can be with or without parameters to specify options for
plotting and JSON exporting used for automatic benchmarking.
If plot is enabled, iterate_experiments() can be called with different
experiments function, each of which can have its specified env_ids. In
the end, figures will be generated per env_id.
Example:
@benchmark
def ppo_benchmarks():
iterate_experiments(ppo_garage_tf)
iterate_experiments(ppo_garage_pytorch)
@benchmark(plot=False)
def my_benchmarks():
iterate_experiments(trpo_garage_pytorch, env_ids=env_ids)
iterate_experiments(vpg_garage_pytorch, seeds=seeds)
Attributes:
_plot (dict): A dictionary used for plotting to figures
grouped by tasks. If no need to plot, _plot will be None.
_log_dir (str): Log directory for running the benchmarking.
_auto (bool): Global variable used to control whether to export
JSON files for automatic benchmarking.
"""
import csv
import functools
import json
import os
import pathlib
import random
from google.cloud import storage
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
_plot = None
_log_dir = None
_auto = False
_bucket = storage.Client().bucket('resl-garage-benchmarks')
def benchmark(exec_func=None, *, plot=True, auto=False):
"""Decorator for benchmark function.
Args:
exec_func (func): The experiment function.
plot (bool): Whether the result of this run needs to be plotted.
PNG files will be generated in sub folder /plot.
auto (auto): Whether this is automatic benchmarking. JSON files
will be generated in sub folder /auto.
Returns:
func: The wrapper function.
"""
if exec_func is None:
return functools.partial(benchmark, plot=plot, auto=auto)
@functools.wraps(exec_func)
def wrapper_func():
"""The wrapper function."""
# pylint: disable=global-statement
global _plot, _log_dir, _auto
_plot = {} if plot else None
plt.close('all')
_log_dir = _get_log_dir(exec_func.__name__)
if os.path.exists(_log_dir):
count = 1
while os.path.exists(_log_dir + '_' + str(count)):
count += 1
_log_dir = _log_dir + '_' + str(count)
if auto:
_auto = auto
auto_dir = os.path.join(_log_dir, 'auto')
os.makedirs(auto_dir)
exec_func()
if plot:
plot_dir = os.path.join(_log_dir, 'plot')
os.makedirs(plot_dir)
for env_id in _plot:
plt.figure(env_id)
plt.legend()
plt.xlabel(_plot[env_id]['xlabel'])
plt.ylabel(_plot[env_id]['ylabel'])
plt.title(env_id)
plt.savefig(plot_dir + '/' + env_id)
if auto:
_upload_to_gcp_storage(_log_dir)
return wrapper_func
def iterate_experiments(func,
env_ids,
seeds=None,
xcolumn='TotalEnvSteps',
xlabel='Total Environment Steps',
ycolumn='Evaluation/AverageReturn',
ylabel='Average Return'):
"""Iterate experiments for benchmarking over env_ids and seeds.
Args:
env_ids (list[str]): List of environment ids.
seeds (list[int]): List of seeds.
func (func): The experiment function.
xcolumn (str): Which column should be the JSON x axis.
xlabel (str): Label name for x axis.
ycolumn (str): Which column should be the JSON y axis.
ylabel (str): Label name for y axis.
"""
func_name = func.__name__.replace('_', '-')
if seeds is None:
seeds = random.sample(range(100), 4)
for env_id in env_ids:
task_ys = []
if _plot is not None and env_id not in _plot:
_plot[env_id] = {'xlabel': xlabel, 'ylabel': ylabel}
for seed in seeds:
exp_name = func_name + '_' + env_id + '_' + str(seed)
sub_log_dir = os.path.join(_log_dir, exp_name)
tf.compat.v1.reset_default_graph()
func(dict(log_dir=sub_log_dir), env_id=env_id, seed=seed)
if _plot is not None or _auto:
xs, ys = _read_csv(sub_log_dir, xcolumn, ycolumn)
task_ys.append(ys)
if _plot is not None or _auto:
ys_mean = np.array(task_ys).mean(axis=0)
ys_std = np.array(task_ys).std(axis=0)
if _plot is not None:
plt.figure(env_id)
plt.plot(xs, ys_mean, label=func_name)
plt.fill_between(xs, (ys_mean - ys_std), (ys_mean + ys_std),
alpha=.1)
if _auto:
_export_to_json(env_id + '_' + func_name, xs, xlabel, ys_mean,
ylabel, ys_std)
def _get_log_dir(exec_func_name):
"""Get the log directory given the experiment name.
Args:
exec_func_name (str): The function name which runs benchmarks.
Returns:
str: Log directory.
"""
cwd = pathlib.Path.cwd()
return str(cwd.joinpath('data', 'local', 'benchmarks', exec_func_name))
def _read_csv(log_dir, xcolumn, ycolumn):
"""Read csv files and return xs and ys.
Args:
log_dir (str): Log directory for csv file.
xcolumn (str): Which column should be the JSON x axis.
ycolumn (str): Which column should be the JSON y axis.
Returns:
list: List of x axis points.
list: List of y axis points.
"""
xs, ys = [], []
with open(os.path.join(log_dir, 'progress.csv'), 'r') as csv_file:
for row in csv.DictReader(csv_file):
xs.append(float(row[xcolumn]))
ys.append(float(row[ycolumn]))
return xs, ys
def _export_to_json(json_name, xs, xlabel, ys, ylabel, ys_std):
"""Save selected csv column to JSON preparing for automatic benchmarking.
Args:
json_name (str): The JSON file name.
xs (list): List of x axis points
xlabel (str): Label name for x axis.
ys (np.array): List of y axis points
ylabel (str): Label name for y axis.
ys_std (np.array): Standard deviation of y asis, used to calculate
upper and lower boundary for confidence interval.
"""
json_path = os.path.join(_log_dir, 'auto', json_name + '.json')
with open(json_path, 'w') as json_file:
json.dump(
dict(x=xs,
y=ys.tolist(),
y_min=(ys - ys_std).tolist(),
y_max=(ys + ys_std).tolist(),
xlabel=xlabel,
ylabel=ylabel), json_file)
def _upload_to_gcp_storage(exec_dir):
"""Upload all files to GCP storage under exec_dir folder.
Args:
exec_dir (str): The execution directory.
"""
exec_name = os.path.basename(exec_dir)
for folder_name in os.listdir(exec_dir):
folder_path = os.path.join(exec_dir, folder_name)
if not os.path.isfile(folder_path):
remote_folder = os.path.join(exec_name, folder_name)
for file_name in os.listdir(folder_path):
file_path = os.path.join(folder_path, file_name)
if os.path.isfile(file_path):
blob = _bucket.blob(os.path.join(remote_folder, file_name))
blob.upload_from_filename(file_path)
| 7,681 | 30.483607 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/parameters.py | """Global parameters for benchmarking."""
from baselines.bench import benchmarks
Fetch1M_ENV_SET = [
task['env_id'] for task in benchmarks.get_benchmark('Fetch1M')['tasks']
]
MuJoCo1M_ENV_SET = [
task['env_id'] for task in benchmarks.get_benchmark('Mujoco1M')['tasks']
]
PIXEL_ENV_SET = ['CubeCrash-v0', 'MemorizeDigits-v0']
STATE_ENV_SET = [
'LunarLander-v2',
'Assault-ramDeterministic-v4',
'Breakout-ramDeterministic-v4',
'ChopperCommand-ramDeterministic-v4',
'Tutankham-ramDeterministic-v4',
'CartPole-v1',
]
| 548 | 23.954545 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/run_benchmarks.py | #!/usr/bin/env python
"""Script for running benchmarking.
Examples:
# List all benchmark options
benchmark show
# Run selected benchmarks
benchmark run b_1 b_2 ...
"""
import inspect
import click
from garage_benchmarks import benchmark_algos
from garage_benchmarks import benchmark_auto
from garage_benchmarks import benchmark_baselines
from garage_benchmarks import benchmark_policies
from garage_benchmarks import benchmark_q_functions
@click.group()
def cli():
"""The main command group."""
@click.command()
def list(): # pylint: disable=redefined-builtin
"""List all benchmarks."""
_echo_run_names('Algorithms', _get_runs_dict(benchmark_algos))
_echo_run_names('Policies', _get_runs_dict(benchmark_policies))
_echo_run_names('Baselines', _get_runs_dict(benchmark_baselines))
_echo_run_names('Q Functions', _get_runs_dict(benchmark_q_functions))
_echo_run_names('Automatic benchmarking', _get_runs_dict(benchmark_auto))
@click.command()
@click.argument('names', nargs=-1)
def run(names):
"""Run selected benchmarks.
Args:
names (tuple): Benchmark names.
Raises:
BadParameter: if any run name is invalid or duplicated.
"""
if not names:
raise click.BadParameter('Empty names!')
if len(names) != len(set(names)):
raise click.BadParameter('Duplicate names!')
options = _get_all_options()
for name in names:
if name not in options:
raise click.BadParameter(
'Invalid run name! Make sure every name can be found in '
'`garage_benchmark list`!')
for name in names:
options[name]()
cli.add_command(list)
cli.add_command(run)
def _get_all_options():
"""Return a dict containing all benchmark options.
Dict of (str: obj) representing benchmark name and its function object.
Returns:
dict: Benchmark options.
"""
d = {}
d.update(_get_runs_dict(benchmark_algos))
d.update(_get_runs_dict(benchmark_policies))
d.update(_get_runs_dict(benchmark_baselines))
d.update(_get_runs_dict(benchmark_q_functions))
d.update(_get_runs_dict(benchmark_auto))
return d
def _get_runs_dict(module):
"""Return a dict containing benchmark options of the module.
Dict of (str: obj) representing benchmark name and its function object.
Args:
module (object): Module object.
Returns:
dict: Benchmark options of the module.
"""
d = {}
for name, obj in inspect.getmembers(module):
if inspect.isfunction(obj) and name.endswith('benchmarks'):
d[name] = obj
return d
def _echo_run_names(header, d):
"""Echo run names to the command line.
Args:
header (str): The header name.
d (dict): The dict containing benchmark options.
"""
click.echo('-----' + header + '-----')
for name in d:
click.echo(name)
click.echo()
| 2,950 | 23.591667 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/__init__.py | """Benchmarking experiments."""
| 32 | 15.5 | 31 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/__init__.py | """Benchmarking experiments for algorithms."""
from garage_benchmarks.experiments.algos.ddpg_garage_tf import ddpg_garage_tf
from garage_benchmarks.experiments.algos.her_garage_tf import her_garage_tf
from garage_benchmarks.experiments.algos.ppo_garage_pytorch import (
ppo_garage_pytorch)
from garage_benchmarks.experiments.algos.ppo_garage_tf import ppo_garage_tf
from garage_benchmarks.experiments.algos.td3_garage_tf import td3_garage_tf
from garage_benchmarks.experiments.algos.trpo_garage_pytorch import (
trpo_garage_pytorch)
from garage_benchmarks.experiments.algos.trpo_garage_tf import trpo_garage_tf
from garage_benchmarks.experiments.algos.vpg_garage_pytorch import (
vpg_garage_pytorch)
from garage_benchmarks.experiments.algos.vpg_garage_tf import vpg_garage_tf
__all__ = [
'ddpg_garage_tf', 'her_garage_tf', 'ppo_garage_pytorch', 'ppo_garage_tf',
'td3_garage_tf', 'trpo_garage_pytorch', 'trpo_garage_tf',
'vpg_garage_pytorch', 'vpg_garage_tf'
]
| 985 | 48.3 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/ddpg_garage_tf.py | """A regression test for automatic benchmarking garage-TensorFlow-DDPG."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import PathBuffer
from garage.tf.algos import DDPG
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
hyper_parameters = {
'policy_lr': 1e-4,
'qf_lr': 1e-3,
'policy_hidden_sizes': [64, 64],
'qf_hidden_sizes': [64, 64],
'n_epochs': 500,
'steps_per_epoch': 20,
'n_rollout_steps': 100,
'n_train_steps': 50,
'discount': 0.9,
'tau': 1e-2,
'replay_buffer_size': int(1e6),
'sigma': 0.2
}
@wrap_experiment
def ddpg_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow DDPG model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(
env.spec, policy, sigma=hyper_parameters['sigma'])
qf = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_parameters['replay_buffer_size'])
algo = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=hyper_parameters['steps_per_epoch'],
policy_lr=hyper_parameters['policy_lr'],
qf_lr=hyper_parameters['qf_lr'],
target_update_tau=hyper_parameters['tau'],
n_train_steps=hyper_parameters['n_train_steps'],
discount=hyper_parameters['discount'],
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['n_rollout_steps'])
| 2,951 | 34.566265 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/her_garage_tf.py | """A regression test for automatic benchmarking garage-TensorFlow-HER."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import HERReplayBuffer
from garage.tf.algos import DDPG
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
hyper_parameters = {
'policy_lr': 1e-3,
'qf_lr': 1e-3,
'policy_hidden_sizes': [256, 256, 256],
'qf_hidden_sizes': [256, 256, 256],
'n_epochs': 50,
'steps_per_epoch': 20,
'n_rollout_steps': 100,
'n_train_steps': 40,
'discount': 0.9,
'tau': 0.05,
'replay_buffer_size': int(1e6),
'sigma': 0.2,
}
@wrap_experiment
def her_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow HER model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh,
)
exploration_policy = AddOrnsteinUhlenbeckNoise(
env_spec=env.spec, policy=policy, sigma=hyper_parameters['sigma'])
qf = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
)
replay_buffer = HERReplayBuffer(
env_spec=env.spec,
capacity_in_transitions=hyper_parameters['replay_buffer_size'],
replay_k=4,
reward_fn=env.compute_reward,
)
algo = DDPG(
env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=hyper_parameters['steps_per_epoch'],
policy_lr=hyper_parameters['policy_lr'],
qf_lr=hyper_parameters['qf_lr'],
target_update_tau=hyper_parameters['tau'],
n_train_steps=hyper_parameters['n_train_steps'],
discount=hyper_parameters['discount'],
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer,
buffer_batch_size=256,
)
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['n_rollout_steps'])
| 3,031 | 32.318681 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/ppo_garage_pytorch.py | """A regression test for automatic benchmarking garage-PyTorch-PPO."""
import gym
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import PPO as PyTorch_PPO
from garage.torch.optimizers import OptimizerWrapper
from garage.torch.policies import GaussianMLPPolicy as PyTorch_GMP
from garage.torch.value_functions import GaussianMLPValueFunction
hyper_parameters = {
'n_epochs': 500,
'max_path_length': 100,
'batch_size': 1024,
}
@wrap_experiment
def ppo_garage_pytorch(ctxt, env_id, seed):
"""Create garage PyTorch PPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
runner = LocalRunner(ctxt)
env = GarageEnv(normalize(gym.make(env_id)))
policy = PyTorch_GMP(env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
policy_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=2.5e-4)),
policy,
max_optimization_epochs=10,
minibatch_size=64)
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=2.5e-4)),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
algo = PyTorch_PPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
max_path_length=hyper_parameters['max_path_length'],
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2)
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
| 2,677 | 36.194444 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/ppo_garage_tf.py | """A regression test for automatic benchmarking garage-TensorFlow-PPO."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.tf.algos import PPO as TF_PPO
from garage.tf.baselines import GaussianMLPBaseline as TF_GMB
from garage.tf.optimizers import FirstOrderOptimizer
from garage.tf.policies import GaussianMLPPolicy as TF_GMP
hyper_parameters = {
'n_epochs': 500,
'max_path_length': 100,
'batch_size': 1024,
}
@wrap_experiment
def ppo_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow PPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = TF_GMP(
env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = TF_GMB(
env_spec=env.spec,
regressor_args=dict(
hidden_sizes=(32, 32),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=3e-4,
),
),
)
algo = TF_PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=hyper_parameters['max_path_length'],
discount=0.99,
gae_lambda=0.95,
center_adv=True,
lr_clip_range=0.2,
optimizer_args=dict(batch_size=32,
max_epochs=10,
learning_rate=3e-4,
verbose=True))
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
| 2,447 | 31.64 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/td3_garage_tf.py | """A regression test for automatic benchmarking garage-TensorFlow-TD3."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import AddGaussianNoise
from garage.replay_buffer import PathBuffer
from garage.tf.algos import TD3
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
hyper_parameters = {
'policy_lr': 1e-3,
'qf_lr': 1e-3,
'policy_hidden_sizes': [400, 300],
'qf_hidden_sizes': [400, 300],
'n_epochs': 8,
'steps_per_epoch': 20,
'n_rollout_steps': 250,
'n_train_steps': 1,
'discount': 0.99,
'tau': 0.005,
'replay_buffer_size': int(1e6),
'sigma': 0.1,
'smooth_return': False,
'buffer_batch_size': 100,
'min_buffer_size': int(1e4)
}
@wrap_experiment
def td3_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow TD3 model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddGaussianNoise(
env.spec,
policy,
max_sigma=hyper_parameters['sigma'],
min_sigma=hyper_parameters['sigma'])
qf = ContinuousMLPQFunction(
name='ContinuousMLPQFunction',
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
qf2 = ContinuousMLPQFunction(
name='ContinuousMLPQFunction2',
env_spec=env.spec,
hidden_sizes=hyper_parameters['qf_hidden_sizes'],
action_merge_layer=0,
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_parameters['replay_buffer_size'])
td3 = TD3(env.spec,
policy=policy,
qf=qf,
qf2=qf2,
replay_buffer=replay_buffer,
steps_per_epoch=hyper_parameters['steps_per_epoch'],
policy_lr=hyper_parameters['policy_lr'],
qf_lr=hyper_parameters['qf_lr'],
target_update_tau=hyper_parameters['tau'],
n_train_steps=hyper_parameters['n_train_steps'],
discount=hyper_parameters['discount'],
smooth_return=hyper_parameters['smooth_return'],
min_buffer_size=hyper_parameters['min_buffer_size'],
buffer_batch_size=hyper_parameters['buffer_batch_size'],
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
runner.setup(td3, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['n_rollout_steps'])
| 3,589 | 34.544554 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/trpo_garage_pytorch.py | """A regression test for automatic benchmarking garage-PyTorch-TRPO."""
import gym
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import TRPO as PyTorch_TRPO
from garage.torch.policies import GaussianMLPPolicy as PyTorch_GMP
from garage.torch.value_functions import GaussianMLPValueFunction
hyper_parameters = {
'hidden_sizes': [32, 32],
'max_kl': 0.01,
'gae_lambda': 0.97,
'discount': 0.99,
'max_path_length': 100,
'n_epochs': 999,
'batch_size': 1024,
}
@wrap_experiment
def trpo_garage_pytorch(ctxt, env_id, seed):
"""Create garage PyTorch TRPO model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
runner = LocalRunner(ctxt)
env = GarageEnv(normalize(gym.make(env_id)))
policy = PyTorch_GMP(env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = PyTorch_TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=hyper_parameters['max_path_length'],
discount=hyper_parameters['discount'],
gae_lambda=hyper_parameters['gae_lambda'])
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
| 2,125 | 33.852459 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/trpo_garage_tf.py | """A regression test for automatic benchmarking garage-TensorFlow-TRPO."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import GaussianMLPPolicy
hyper_parameters = {
'hidden_sizes': [32, 32],
'max_kl': 0.01,
'gae_lambda': 0.97,
'discount': 0.99,
'max_path_length': 100,
'n_epochs': 999,
'batch_size': 1024,
}
@wrap_experiment
def trpo_garage_tf(ctxt, env_id, seed):
"""Create garage Tensorflow TROI model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=hyper_parameters['max_path_length'],
discount=hyper_parameters['discount'],
gae_lambda=hyper_parameters['gae_lambda'],
max_kl_step=hyper_parameters['max_kl'])
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
| 1,938 | 30.786885 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/vpg_garage_pytorch.py | """A regression test for automatic benchmarking garage-PyTorch-VPG."""
import gym
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import VPG as PyTorch_VPG
from garage.torch.optimizers import OptimizerWrapper
from garage.torch.policies import GaussianMLPPolicy as PyTorch_GMP
from garage.torch.value_functions import GaussianMLPValueFunction
hyper_parameters = {
'hidden_sizes': [64, 64],
'center_adv': True,
'learning_rate': 1e-2,
'discount': 0.99,
'n_epochs': 250,
'max_path_length': 100,
'batch_size': 2048,
}
@wrap_experiment
def vpg_garage_pytorch(ctxt, env_id, seed):
"""Create garage PyTorch VPG model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
runner = LocalRunner(ctxt)
env = GarageEnv(normalize(gym.make(env_id)))
policy = PyTorch_GMP(env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
policy_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=2.5e-4)),
policy,
max_optimization_epochs=10,
minibatch_size=64)
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=2.5e-4)),
value_function,
max_optimization_epochs=10,
minibatch_size=64)
algo = PyTorch_VPG(env_spec=env.spec,
policy=policy,
value_function=value_function,
policy_optimizer=policy_optimizer,
vf_optimizer=vf_optimizer,
max_path_length=hyper_parameters['max_path_length'],
discount=hyper_parameters['discount'],
center_adv=hyper_parameters['center_adv'])
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
| 2,771 | 36.972603 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/algos/vpg_garage_tf.py | """A regression test for automatic benchmarking garage-TensorFlow-VPG."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import VPG as TF_VPG
from garage.tf.policies import GaussianMLPPolicy as TF_GMP
hyper_parameters = {
'hidden_sizes': [64, 64],
'center_adv': True,
'learning_rate': 1e-2,
'discount': 0.99,
'n_epochs': 250,
'max_path_length': 100,
'batch_size': 2048,
}
@wrap_experiment
def vpg_garage_tf(ctxt, env_id, seed):
"""Create garage TensorFlow VPG model and training.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = TF_GMP(
env_spec=env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TF_VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=hyper_parameters['max_path_length'],
discount=hyper_parameters['discount'],
center_adv=hyper_parameters['center_adv'],
optimizer_args=dict(
learning_rate=hyper_parameters['learning_rate'], ))
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
| 2,023 | 31.645161 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/baselines/__init__.py | """Benchmarking experiments for baselines."""
from garage_benchmarks.experiments.baselines.continuous_mlp_baseline import (
continuous_mlp_baseline)
from garage_benchmarks.experiments.baselines.gaussian_cnn_baseline import (
gaussian_cnn_baseline)
from garage_benchmarks.experiments.baselines.gaussian_mlp_baseline import (
gaussian_mlp_baseline)
__all__ = [
'continuous_mlp_baseline', 'gaussian_cnn_baseline', 'gaussian_mlp_baseline'
]
| 454 | 36.916667 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/baselines/continuous_mlp_baseline.py | """Benchmarking experiment of the ContinuousMLPBaseline."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.tf.algos import PPO
from garage.tf.baselines import ContinuousMLPBaseline
from garage.tf.policies import GaussianLSTMPolicy
hyper_params = {
'policy_hidden_sizes': 32,
'hidden_nonlinearity': tf.nn.tanh,
'n_envs': 8,
'n_epochs': 20,
'n_rollout_steps': 2048,
'discount': 0.99,
'max_path_length': 100,
'gae_lambda': 0.95,
'lr_clip_range': 0.2,
'policy_ent_coeff': 0.02,
'entropy_method': 'max',
'center_adv': False,
'num_proc': 8
}
@wrap_experiment
def continuous_mlp_baseline(ctxt, env_id, seed):
"""Create Continuous MLP Baseline on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=hyper_params['num_proc']) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = GaussianLSTMPolicy(
env_spec=env.spec,
hidden_dim=hyper_params['policy_hidden_sizes'],
hidden_nonlinearity=hyper_params['hidden_nonlinearity'],
)
baseline = ContinuousMLPBaseline(
env_spec=env.spec,
regressor_args=dict(hidden_sizes=(64, 64)),
)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=hyper_params['max_path_length'],
discount=hyper_params['discount'],
gae_lambda=hyper_params['gae_lambda'],
lr_clip_range=hyper_params['lr_clip_range'],
entropy_method=hyper_params['entropy_method'],
policy_ent_coeff=hyper_params['policy_ent_coeff'],
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
center_adv=hyper_params['center_adv'],
stop_entropy_gradient=True)
runner.setup(algo,
env,
sampler_args=dict(n_envs=hyper_params['n_envs']))
runner.train(n_epochs=hyper_params['n_epochs'],
batch_size=hyper_params['n_rollout_steps'])
| 2,689 | 32.625 | 74 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/baselines/gaussian_cnn_baseline.py | """Benchmarking experiment of the GaussianCNNBaseline."""
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianCNNBaseline
from garage.tf.policies import CategoricalCNNPolicy
params = {
'conv_filters': (
(32, (5, 5)),
(64, (3, 3)),
(64, (2, 2)),
),
'conv_strides': (4, 2, 1),
'conv_pad': 'VALID',
'hidden_sizes': (256, ),
'n_epochs': 1000,
'batch_size': 2048,
'use_trust_region': True
} # yapf: disable
@wrap_experiment
def gaussian_cnn_baseline(ctxt, env_id, seed):
"""Create Gaussian CNN Baseline on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=12) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = CategoricalCNNPolicy(env_spec=env.spec,
conv_filters=params['conv_filters'],
conv_strides=params['conv_strides'],
conv_pad=params['conv_pad'],
hidden_sizes=params['hidden_sizes'])
baseline = GaussianCNNBaseline(
env_spec=env.spec,
regressor_args=dict(filters=params['conv_filters'],
strides=params['conv_strides'],
padding=params['conv_pad'],
hidden_sizes=params['hidden_sizes'],
use_trust_region=params['use_trust_region']))
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
flatten_input=False,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
)
runner.setup(algo, env)
runner.train(n_epochs=params['n_epochs'],
batch_size=params['batch_size'])
| 2,605 | 32.410256 | 77 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/baselines/gaussian_mlp_baseline.py | """Benchmarking experiment of the GaussianMLPBaseline."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.optimizers import FirstOrderOptimizer
from garage.tf.policies import GaussianMLPPolicy
@wrap_experiment
def gaussian_mlp_baseline(ctxt, env_id, seed):
"""Create Gaussian MLP Baseline on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(
hidden_sizes=(64, 64),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
),
)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
)
runner.setup(algo, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=5, batch_size=2048)
| 2,118 | 29.271429 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/__init__.py | """Benchmarking experiments for baselines."""
from garage_benchmarks.experiments.policies.categorical_cnn_policy import (
categorical_cnn_policy)
from garage_benchmarks.experiments.policies.categorical_gru_policy import (
categorical_gru_policy)
from garage_benchmarks.experiments.policies.categorical_lstm_policy import (
categorical_lstm_policy)
from garage_benchmarks.experiments.policies.categorical_mlp_policy import (
categorical_mlp_policy)
from garage_benchmarks.experiments.policies.continuous_mlp_policy import (
continuous_mlp_policy)
from garage_benchmarks.experiments.policies.gaussian_gru_policy import (
gaussian_gru_policy)
from garage_benchmarks.experiments.policies.gaussian_lstm_policy import (
gaussian_lstm_policy)
from garage_benchmarks.experiments.policies.gaussian_mlp_policy import (
gaussian_mlp_policy)
__all__ = [
'categorical_cnn_policy', 'categorical_gru_policy',
'categorical_lstm_policy', 'categorical_mlp_policy',
'continuous_mlp_policy', 'gaussian_gru_policy', 'gaussian_lstm_policy',
'gaussian_mlp_policy'
]
| 1,092 | 42.72 | 76 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/categorical_cnn_policy.py | """Benchmarking experiment of the CategoricalCNNPolicy."""
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianCNNBaseline
from garage.tf.policies import CategoricalCNNPolicy
hyper_params = {
'conv_filters': (
(32, (5, 5)),
(64, (3, 3)),
(64, (2, 2)),
),
'conv_strides': (4, 2, 1),
'conv_pad': 'VALID',
'hidden_sizes': (256, ),
'n_epochs': 3,
'batch_size': 2048,
'use_trust_region': True
} # yapf: disable
@wrap_experiment
def categorical_cnn_policy(ctxt, env_id, seed):
"""Create Categorical CNN Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=12) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = CategoricalCNNPolicy(
env_spec=env.spec,
conv_filters=hyper_params['conv_filters'],
conv_strides=hyper_params['conv_strides'],
conv_pad=hyper_params['conv_pad'],
hidden_sizes=hyper_params['hidden_sizes'])
baseline = GaussianCNNBaseline(
env_spec=env.spec,
regressor_args=dict(
filters=hyper_params['conv_filters'],
strides=hyper_params['conv_strides'],
padding=hyper_params['conv_pad'],
hidden_sizes=hyper_params['hidden_sizes'],
use_trust_region=hyper_params['use_trust_region']))
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
flatten_input=False,
)
runner.setup(algo, env)
runner.train(n_epochs=hyper_params['n_epochs'],
batch_size=hyper_params['batch_size'])
| 2,539 | 30.75 | 67 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/categorical_gru_policy.py | """Benchmarking experiment of the CategoricalGRUPolicy."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import PPO
from garage.tf.policies import CategoricalGRUPolicy
@wrap_experiment
def categorical_gru_policy(ctxt, env_id, seed):
"""Create Categorical CNN Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=12) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = CategoricalGRUPolicy(
env_spec=env.spec,
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
)
runner.setup(algo, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=488, batch_size=2048)
| 1,685 | 28.578947 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/categorical_lstm_policy.py | """Benchmarking experiment of the CategoricalLSTMPolicy."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import PPO
from garage.tf.policies import CategoricalLSTMPolicy
@wrap_experiment
def categorical_lstm_policy(ctxt, env_id, seed):
"""Create Categorical LSTM Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=12) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = CategoricalLSTMPolicy(
env_spec=env.spec,
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
)
runner.setup(algo, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=488, batch_size=2048)
| 1,690 | 28.666667 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/categorical_mlp_policy.py | """Benchmarking experiment of the CategoricalMLPPolicy."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import PPO
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def categorical_mlp_policy(ctxt, env_id, seed):
"""Create Categorical MLP Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=12) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
name='CategoricalMLPPolicyBenchmark')
runner.setup(algo, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=5, batch_size=2048)
| 1,774 | 31.272727 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/continuous_mlp_policy.py | """Benchmarking experiment of the ContinuousMLPPolicy."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import PathBuffer
from garage.tf.algos import DDPG
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
hyper_params = {
'policy_lr': 1e-4,
'qf_lr': 1e-3,
'policy_hidden_sizes': [64, 64],
'qf_hidden_sizes': [64, 64],
'n_epochs': 300,
'steps_per_epoch': 20,
'n_rollout_steps': 100,
'n_train_steps': 50,
'discount': 0.9,
'tau': 1e-2,
'replay_buffer_size': int(1e6),
'sigma': 0.2,
}
@wrap_experiment
def continuous_mlp_policy(ctxt, env_id, seed):
"""Create Continuous MLP Policy on TF-DDPG.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=12) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
name='ContinuousMLPPolicy',
hidden_sizes=hyper_params['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(
env.spec, policy, sigma=hyper_params['sigma'])
qf = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_params['qf_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
name='ContinuousMLPQFunction')
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_params['replay_buffer_size'])
ddpg = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=hyper_params['steps_per_epoch'],
policy_lr=hyper_params['policy_lr'],
qf_lr=hyper_params['qf_lr'],
target_update_tau=hyper_params['tau'],
n_train_steps=hyper_params['n_train_steps'],
discount=hyper_params['discount'],
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
runner.setup(ddpg, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=hyper_params['n_epochs'],
batch_size=hyper_params['n_rollout_steps'])
| 3,007 | 34.388235 | 71 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/gaussian_gru_policy.py | """Benchmarking experiment of the GaussianGRUPolicy."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.optimizers import FirstOrderOptimizer
from garage.tf.policies import GaussianGRUPolicy
@wrap_experiment
def gaussian_gru_policy(ctxt, env_id, seed):
"""Create Gaussian GRU Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = GaussianGRUPolicy(
env_spec=env.spec,
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(
hidden_sizes=(64, 64),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
),
)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
)
runner.setup(algo, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=5, batch_size=2048)
| 2,105 | 28.661972 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/gaussian_lstm_policy.py | """Benchmarking experiment of the GaussianLSTMPolicy."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.optimizers import FirstOrderOptimizer
from garage.tf.policies import GaussianLSTMPolicy
@wrap_experiment
def gaussian_lstm_policy(ctxt, env_id, seed):
"""Create Gaussian LSTM Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = GaussianLSTMPolicy(
env_spec=env.spec,
hidden_dim=32,
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(
hidden_sizes=(64, 64),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
),
)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
)
runner.setup(algo, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=5, batch_size=2048)
| 2,110 | 28.732394 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/policies/gaussian_mlp_policy.py | """Benchmarking experiment of the GaussianMLPPolicy."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianMLPBaseline
from garage.tf.optimizers import FirstOrderOptimizer
from garage.tf.policies import GaussianMLPPolicy
@wrap_experiment
def gaussian_mlp_policy(ctxt, env_id, seed):
"""Create Gaussian MLP Policy on TF-PPO.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(
hidden_sizes=(64, 64),
use_trust_region=False,
optimizer=FirstOrderOptimizer,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
),
)
algo = PPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
)
runner.setup(algo, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=5, batch_size=2048)
| 2,113 | 28.774648 | 66 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/q_functions/__init__.py | """Benchmarking experiments for Q-functions."""
from garage_benchmarks.experiments.q_functions.continuous_mlp_q_function import ( # noqa: E501
continuous_mlp_q_function)
__all__ = ['continuous_mlp_q_function']
| 216 | 35.166667 | 95 | py |
CSD-locomotion | CSD-locomotion-master/garaged/benchmarks/src/garage_benchmarks/experiments/q_functions/continuous_mlp_q_function.py | """Benchmarking experiment of the ContinuousMLPQFunction."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import deterministic
from garage.experiment import LocalTFRunner
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import PathBuffer
from garage.tf.algos import DDPG
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
hyper_params = {
'policy_lr': 1e-4,
'qf_lr': 1e-3,
'policy_hidden_sizes': [64, 64],
'qf_hidden_sizes': [64, 64],
'n_epochs': 300,
'steps_per_epoch': 20,
'n_rollout_steps': 100,
'n_train_steps': 50,
'discount': 0.9,
'tau': 1e-2,
'replay_buffer_size': int(1e6),
'sigma': 0.2,
}
@wrap_experiment
def continuous_mlp_q_function(ctxt, env_id, seed):
"""Create Continuous MLP QFunction on TF-DDPG.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the
snapshotter.
env_id (str): Environment id of the task.
seed (int): Random positive integer for the trial.
"""
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=12) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
name='ContinuousMLPPolicy',
hidden_sizes=hyper_params['policy_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(
env.spec, policy, sigma=hyper_params['sigma'])
qf = ContinuousMLPQFunction(
env_spec=env.spec,
hidden_sizes=hyper_params['qf_hidden_sizes'],
hidden_nonlinearity=tf.nn.relu,
name='ContinuousMLPQFunction')
replay_buffer = PathBuffer(
capacity_in_transitions=hyper_params['replay_buffer_size'])
ddpg = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=hyper_params['steps_per_epoch'],
policy_lr=hyper_params['policy_lr'],
qf_lr=hyper_params['qf_lr'],
target_update_tau=hyper_params['tau'],
n_train_steps=hyper_params['n_train_steps'],
discount=hyper_params['discount'],
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
runner.setup(ddpg, env, sampler_args=dict(n_envs=12))
runner.train(n_epochs=hyper_params['n_epochs'],
batch_size=hyper_params['n_rollout_steps'])
| 3,017 | 34.505882 | 71 | py |
CSD-locomotion | CSD-locomotion-master/garaged/docker/README.md | # Using Docker to run garage
Currently there are two types of garage images:
- headless: garage without environment visualization.
- nvidia: garage with environment visualization using an NVIDIA graphics
card.
## Headless image
If you already have a copy of garage, proceed to subsection ["Build and run the
image"], otherwise, keep reading.
To run an example launcher in the container, execute:
```
docker run -it --rm rlworkgroup/garage-headless python examples/tf/trpo_cartpole.py
```
To run environments using MuJoCo, pass the contents of the MuJoCo key in a
variable named MJKEY in the same docker-run command using `cat`. For example,
if your key is at `~/.mujoco/mjkey.txt`, execute:
```
docker run \
-it \
--rm \
-e MJKEY="$(cat ~/.mujoco/mjkey.txt)" \
rlworkgroup/garage-headless python examples/tf/trpo_swimmer.py
```
To save the experiment data generated in the container, you need to specify a
path where the files will be saved inside your host computer with the argument
`-v` in the docker-run command. For example, if the path you want to use is
at `/home/tmp/data`, execute:
```
docker run \
-it \
--rm \
-v /home/tmp/data:/root/code/garage/data \
rlworkgroup/garage-headless python examples/tf/trpo_cartpole.py
```
This binds a volume between your host path and the path in garage at
`/root/code/garage/data`.
### Build and run the image
To build the headless image, first clone this repository, move to the root
folder of your local repository and then execute:
```
make build-headless
```
To build and run the container, execute;
```
make run-headless RUN_CMD="python examples/tf/trpo_cartpole.py"
```
Where RUN_CMD specifies the executable to run in the container.
The previous command adds a volume from the data folder inside your cloned
garage repository to the data folder in the garage container, so any experiment
results ran in the container will be saved in the data folder inside your
cloned repository. The data is saved in a folder with the name of the container
that generated the data, which by default is the name of the image type the
container is based on with the date and time the container was launched.
If you want to specify another name for the container, do so with the variable
`CONTAINER_NAME`:
```
make run-headless RUN_CMD="..." CONTAINER_NAME="my_container"
```
If you need to use MuJoCo, you need to place your key at `~/.mujoco/mjkey.txt`
or specify the corresponding path through the MJKEY_PATH variable:
```
make run-headless RUN_CMD="..." MJKEY_PATH="/home/user/mjkey.txt"
```
If you require to pass addtional arguments to the the make commands, you can
use the variable ADD_ARGS, for example:
```
make build-headless ADD_ARGS="--build-arg MY_VAR=123"
make run-headless ADD_ARGS="-e MY_VAR=123"
```
#### Prerequisites
Be aware of the following prerequisites to build the image.
- Install [Docker CE](https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-docker-ce). Tested
on version 18.09.0.
- Install [Docker Compose](https://docs.docker.com/compose/install/#install-compose). Tested
on version 1.23.2.
Tested on Ubuntu 16.04. It's recommended to use the versions indicated above
for docker-ce and docker-compose.
## nvidia image
The same commands for the headless image mentioned above apply for the nvidia
image, except that the image name is defined by `rlworkgroup/garage-nvidia`.
For example, to execute a launcher file:
```
docker run -it --rm rlworkgroup/garage-nvidia python examples/tf/trpo_cartpole.py
```
### Build and run the image
The same rules for the headless image apply here, except that the target names
are the following:
```
make build-nvidia
make run-nvidia
```
#### Prerequisites
Additional to the prerequisites for the headless image, make sure to have:
- Install the latest NVIDIA driver, tested
on [nvidia-390](https://tecadmin.net/install-latest-nvidia-drivers-ubuntu/)
- [Install nvidia-docker2](https://github.com/NVIDIA/nvidia-docker#ubuntu-140416041804-debian-jessiestretch)
Tested on Ubuntu 16.04.
| 4,049 | 32.471074 | 108 | md |
CSD-locomotion | CSD-locomotion-master/garaged/docs/conf.py | # -*- coding: utf-8 -*-
# flake8: noqa
# pylint: skip-file
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
from recommonmark.parser import CommonMarkParser
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# For sphinx-apidoc
sys.path.insert(0, os.path.abspath('../src/'))
# Get version info dynamically
with open('../VERSION') as v:
version_ = v.read().strip()
add_module_names = False
# Auto-generate API documentation for readthedocs.org
# See https://github.com/rtfd/readthedocs.org/issues/1139#issuecomment-398083449 # noqa: E501
def run_apidoc(_):
ignore_paths = []
argv = [
'-f',
# '-T',
'-e',
'-M',
'-o', './_apidoc',
'../src/'
] + ignore_paths # yapf: disable
try:
# Sphinx 1.7+
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx 1.6 (and earlier)
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
def setup(app):
app.connect('builder-inited', run_apidoc)
# -- Project information -----------------------------------------------------
project = 'garage'
copyright = '2019, garage contributors'
author = 'garage contributors'
# The short X.Y version.
version = version_
# The full version, including alpha/beta/rc tags.
release = version_
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Markdown parsing
source_parsers = {
'.md': CommonMarkParser,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# See https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
html_css_files = [
'theme_overrides.css', # override wide tables in RTD theme
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'garagedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'garage.tex', 'garage Documentation', 'garage contributors',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'garage', 'garage Documentation', [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'garage', 'garage Documentation', author, 'garage',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autodoc_mock_imports = [
'dm_control', 'glfw', 'mujoco_py', 'ray', 'torch', 'torchvision'
]
| 11,105 | 30.372881 | 94 | py |
CSD-locomotion | CSD-locomotion-master/garaged/docs/_static/theme_overrides.css | /* override table width restrictions*/
@media screen and (min-width: 767px) {
.wy-table-responsive table td {
/* !important prevents the common CSS stylesheets from overriding
this as on RTD they are loaded after this stylesheet */
white-space: normal !important;
}
.wy-table-responsive {
overflow: visible !important;
}
}
| 364 | 25.071429 | 71 | css |
CSD-locomotion | CSD-locomotion-master/garaged/examples/sim_policy.py | #!/usr/bin/env python3
"""Simulates pre-learned policy."""
import argparse
import sys
import joblib
import tensorflow as tf
from garage.sampler.utils import rollout
def query_yes_no(question, default='yes'):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='path to the snapshot file')
parser.add_argument('--max_path_length',
type=int,
default=1000,
help='Max length of rollout')
parser.add_argument('--speedup', type=float, default=1, help='Speedup')
args = parser.parse_args()
# If the snapshot file use tensorflow, do:
# import tensorflow as tf
# with tf.compat.v1.Session():
# [rest of the code]
with tf.compat.v1.Session() as sess:
data = joblib.load(args.file)
policy = data['algo'].policy
env = data['env']
while True:
path = rollout(env,
policy,
max_path_length=args.max_path_length,
animated=True,
speedup=args.speedup)
if not query_yes_no('Continue simulation?'):
break
| 2,300 | 31.408451 | 75 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/step_dm_control_env.py | #!/usr/bin/env python3
"""Example of how to load, step, and visualize an environment.
This example requires that garage[dm_control] be installed.
"""
import argparse
from garage.envs.dm_control import DmControlEnv
parser = argparse.ArgumentParser()
parser.add_argument('--n_steps',
type=int,
default=1000,
help='Number of steps to run')
args = parser.parse_args()
# Construct the environment
env = DmControlEnv.from_suite('walker', 'run')
# Reset the environment and launch the viewer
env.reset()
env.render()
# Step randomly until interrupted
steps = 0
while True:
if steps == args.n_steps:
break
env.step(env.action_space.sample())
env.render()
steps += 1
| 748 | 22.40625 | 62 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/step_env.py | #!/usr/bin/env python3
"""Example of how to load, step, and visualize an environment."""
import argparse
import gym
parser = argparse.ArgumentParser()
parser.add_argument('--n_steps',
type=int,
default=1000,
help='Number of steps to run')
args = parser.parse_args()
# Construct the environment
env = gym.make('MountainCar-v0')
# Reset the environment and launch the viewer
env.reset()
env.render()
steps = 0
while True:
if steps == args.n_steps:
env.close()
break
env.step(env.action_space.sample())
env.render()
steps += 1
| 621 | 20.448276 | 65 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/np/cem_cartpole.py | #!/usr/bin/env python3
"""This is an example to train a task with Cross Entropy Method.
Here it runs CartPole-v1 environment with 100 epoches.
Results:
AverageReturn: 100
RiseTime: epoch 8
"""
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.algos import CEM
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import OnPolicyVectorizedSampler
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def cem_cartpole(ctxt=None, seed=1):
"""Train CEM with Cartpole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CEM(env_spec=env.spec,
policy=policy,
baseline=baseline,
best_frac=0.05,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
runner.train(n_epochs=100, batch_size=1000)
cem_cartpole(seed=1)
| 1,672 | 29.981481 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/np/cma_es_cartpole.py | #!/usr/bin/env python3
"""This is an example to train a task with CMA-ES.
Here it runs CartPole-v1 environment with 100 epoches.
Results:
AverageReturn: 100
RiseTime: epoch 38 (itr 760),
but regression is observed in the course of training.
"""
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.algos import CMAES
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import OnPolicyVectorizedSampler
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def cma_es_cartpole(ctxt=None, seed=1):
"""Train CMA_ES with Cartpole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CMAES(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
runner.train(n_epochs=100, batch_size=1000)
cma_es_cartpole()
| 1,702 | 30.537037 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/ddpg_pendulum.py | #!/usr/bin/env python3
"""This is an example to train a task with DDPG algorithm.
Here it creates a gym environment InvertedDoublePendulum. And uses a DDPG with
1M steps.
Results:
AverageReturn: 250
RiseTime: epoch 499
"""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import PathBuffer
from garage.tf.algos import DDPG
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
@wrap_experiment
def ddpg_pendulum(ctxt=None, seed=1):
"""Train DDPG with InvertedDoublePendulum-v2 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(gym.make('InvertedDoublePendulum-v2'))
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))
ddpg = DDPG(env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
min_buffer_size=int(1e4),
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer)
runner.setup(algo=ddpg, env=env)
runner.train(n_epochs=500, batch_size=100)
ddpg_pendulum(seed=1)
| 2,642 | 33.776316 | 78 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/dqn_cartpole.py | #!/usr/bin/env python3
"""An example to train a task with DQN algorithm.
Here it creates a gym environment CartPole, and trains a DQN with 50k steps.
"""
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.exploration_policies import EpsilonGreedyPolicy
from garage.replay_buffer import PathBuffer
from garage.tf.algos import DQN
from garage.tf.policies import DiscreteQfDerivedPolicy
from garage.tf.q_functions import DiscreteMLPQFunction
@wrap_experiment
def dqn_cartpole(ctxt=None, seed=1):
"""Train TRPO with CubeCrash-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
n_epochs = 10
steps_per_epoch = 10
sampler_batch_size = 500
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = GarageEnv(gym.make('CartPole-v0'))
replay_buffer = PathBuffer(capacity_in_transitions=int(1e4))
qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64))
policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
steps_per_epoch=steps_per_epoch,
qf_lr=1e-4,
discount=1.0,
min_buffer_size=int(1e3),
double_q=True,
n_train_steps=500,
target_network_update_freq=1,
buffer_batch_size=32)
runner.setup(algo, env)
runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
dqn_cartpole()
| 2,479 | 37.153846 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/dqn_pong.py | #!/usr/bin/env python3
"""This is an example to train a task with DQN algorithm in pixel environment.
Here it creates a gym environment Pong, and trains a DQN with 1M steps.
"""
import click
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.envs.wrappers.clip_reward import ClipReward
from garage.envs.wrappers.episodic_life import EpisodicLife
from garage.envs.wrappers.fire_reset import FireReset
from garage.envs.wrappers.grayscale import Grayscale
from garage.envs.wrappers.max_and_skip import MaxAndSkip
from garage.envs.wrappers.noop import Noop
from garage.envs.wrappers.resize import Resize
from garage.envs.wrappers.stack_frames import StackFrames
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.exploration_policies import EpsilonGreedyPolicy
from garage.replay_buffer import PathBuffer
from garage.tf.algos import DQN
from garage.tf.policies import DiscreteQfDerivedPolicy
from garage.tf.q_functions import DiscreteCNNQFunction
@click.command()
@click.option('--buffer_size', type=int, default=int(5e4))
@click.option('--max_path_length', type=int, default=None)
@wrap_experiment
def dqn_pong(ctxt=None, seed=1, buffer_size=int(5e4), max_path_length=None):
"""Train DQN on PongNoFrameskip-v4 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
buffer_size (int): Number of timesteps to store in replay buffer.
max_path_length (int): Maximum length of a path after which a path is
considered complete. This is used during testing to minimize the
memory required to store a single path.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
n_epochs = 100
steps_per_epoch = 20
sampler_batch_size = 500
num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size
env = gym.make('PongNoFrameskip-v4')
env = Noop(env, noop_max=30)
env = MaxAndSkip(env, skip=4)
env = EpisodicLife(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireReset(env)
env = Grayscale(env)
env = Resize(env, 84, 84)
env = ClipReward(env)
env = StackFrames(env, 4)
env = GarageEnv(env, is_image=True)
replay_buffer = PathBuffer(capacity_in_transitions=buffer_size)
qf = DiscreteCNNQFunction(env_spec=env.spec,
filters=(
(32, (8, 8)),
(64, (4, 4)),
(64, (3, 3)),
),
strides=(4, 2, 1),
dueling=False) # yapf: disable
policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf)
exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec,
policy=policy,
total_timesteps=num_timesteps,
max_epsilon=1.0,
min_epsilon=0.02,
decay_ratio=0.1)
algo = DQN(env_spec=env.spec,
policy=policy,
qf=qf,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
qf_lr=1e-4,
discount=0.99,
min_buffer_size=int(1e4),
max_path_length=max_path_length,
double_q=False,
n_train_steps=500,
steps_per_epoch=steps_per_epoch,
target_network_update_freq=2,
buffer_batch_size=32)
runner.setup(algo, env)
runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
dqn_pong()
| 4,193 | 38.942857 | 79 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/erwr_cartpole.py | #!/usr/bin/env python3
"""This is an example to train a task with ERWR algorithm.
Here it runs CartpoleEnv on ERWR with 100 iterations.
Results:
AverageReturn: 100
RiseTime: itr 34
"""
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import ERWR
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def erwr_cartpole(ctxt=None, seed=1):
"""Train with ERWR on CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = ERWR(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99)
runner.setup(algo=algo, env=env)
runner.train(n_epochs=100, batch_size=10000, plot=False)
erwr_cartpole(seed=1)
| 1,543 | 28.692308 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/her_ddpg_fetchreach.py | #!/usr/bin/env python3
"""This is an example to train a task with DDPG + HER algorithm.
Here it creates a gym environment FetchReach.
"""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise
from garage.replay_buffer import HERReplayBuffer
from garage.tf.algos import DDPG
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
@wrap_experiment(snapshot_mode='last')
def her_ddpg_fetchreach(ctxt=None, seed=1):
"""Train DDPG + HER on the goal-conditioned FetchReach env.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(gym.make('FetchReach-v1'))
policy = ContinuousMLPPolicy(
env_spec=env.spec,
name='Policy',
hidden_sizes=[256, 256, 256],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh,
)
exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,
policy,
sigma=0.2)
qf = ContinuousMLPQFunction(
env_spec=env.spec,
name='QFunction',
hidden_sizes=[256, 256, 256],
hidden_nonlinearity=tf.nn.relu,
)
replay_buffer = HERReplayBuffer(capacity_in_transitions=int(1e6),
replay_k=4,
reward_fn=env.compute_reward,
env_spec=env.spec)
ddpg = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-3,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
target_update_tau=0.01,
steps_per_epoch=50,
max_path_length=250,
n_train_steps=40,
discount=0.95,
exploration_policy=exploration_policy,
policy_optimizer=tf.compat.v1.train.AdamOptimizer,
qf_optimizer=tf.compat.v1.train.AdamOptimizer,
buffer_batch_size=256,
)
runner.setup(algo=ddpg, env=env)
runner.train(n_epochs=50, batch_size=256)
her_ddpg_fetchreach()
| 2,700 | 31.542169 | 73 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/multi_env_ppo.py | #!/usr/bin/env python3
"""This is an example to train multiple tasks with PPO algorithm."""
import gym
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs.multi_env_wrapper import MultiEnvWrapper
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import PPO
from garage.tf.policies import CategoricalMLPPolicy
@wrap_experiment
def multi_env_ppo(ctxt=None, seed=1):
"""Train PPO on two Atari environments simultaneously.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env1 = GarageEnv(normalize(gym.make('Adventure-ram-v4')))
env2 = GarageEnv(normalize(gym.make('Alien-ram-v4')))
env = MultiEnvWrapper([env1, env2])
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
))
runner.setup(algo, env)
runner.train(n_epochs=120, batch_size=2048, plot=False)
multi_env_ppo()
| 1,887 | 31 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/multi_env_trpo.py | #!/usr/bin/env python3
"""This is an example to train multiple tasks with TRPO algorithm."""
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.envs import PointEnv
from garage.envs.multi_env_wrapper import MultiEnvWrapper
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import GaussianMLPPolicy
@wrap_experiment
def multi_env_trpo(ctxt=None, seed=1):
"""Train TRPO on two different PointEnv instances.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env1 = GarageEnv(normalize(PointEnv(goal=(-1., 0.))))
env2 = GarageEnv(normalize(PointEnv(goal=(1., 0.))))
env = MultiEnvWrapper([env1, env2])
policy = GaussianMLPPolicy(env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0)
runner.setup(algo, env)
runner.train(n_epochs=40, batch_size=2048, plot=False)
multi_env_trpo()
| 1,632 | 32.326531 | 72 | py |
CSD-locomotion | CSD-locomotion-master/garaged/examples/tf/ppo_memorize_digits.py | #!/usr/bin/env python3
"""This is an example to train a task with PPO algorithm.
Here it runs MemorizeDigits-v0 environment with 1000 iterations.
"""
import click
import gym
from garage import wrap_experiment
from garage.envs import GarageEnv, normalize
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianCNNBaseline
from garage.tf.policies import CategoricalCNNPolicy
@click.command()
@click.option('--batch_size', type=int, default=4000)
@wrap_experiment
def ppo_memorize_digits(ctxt=None, seed=1, batch_size=4000):
"""Train PPO on MemorizeDigits-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make('MemorizeDigits-v0')),
is_image=True)
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=(
(32, (5, 5)),
(64, (3, 3)),
(64, (2, 2)),
),
strides=(4, 2, 1),
padding='VALID',
hidden_sizes=(256, )) # yapf: disable
baseline = GaussianCNNBaseline(
env_spec=env.spec,
regressor_args=dict(filters=(
(32, (5, 5)),
(64, (3, 3)),
(64, (2, 2)),
),
strides=(4, 2, 1),
padding='VALID',
hidden_sizes=(256, ),
use_trust_region=True)) # yapf: disable
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
optimizer_args=dict(
batch_size=32,
max_epochs=10,
learning_rate=1e-3,
),
flatten_input=False)
runner.setup(algo, env)
runner.train(n_epochs=1000, batch_size=batch_size)
ppo_memorize_digits()
| 2,918 | 36.423077 | 76 | py |
Subsets and Splits