python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Variants of MSE loss."""
import torch.nn as nn
class NormedMSE(nn.MSELoss):
def forward(self, inp, tgt, *args, **kwargs):
"""
Args:
inp: (*, C)
tgt: (*, C)
Will normalize the input before the loss
"""
inp = nn.functional.normalize(inp, dim=-1, p=2)
tgt = nn.functional.normalize(tgt, dim=-1, p=2)
return super().forward(inp, tgt, *args, **kwargs)
|
AVT-main
|
loss_fn/mse.py
|
AVT-main
|
loss_fn/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Cross entropy loss, that works with multi-dim input."""
import torch
import torch.nn as nn
from common.cluster import KmeansAssigner
class MultiDimCrossEntropy(nn.CrossEntropyLoss):
def forward(self, inp, tgt, *args, **kwargs):
"""
Args:
inp: (*, C)
tgt: (*, )
Will reshape the flatten initial dimensions and then incur loss
"""
assert inp.ndim == tgt.ndim + 1
assert inp.shape[:-1] == tgt.shape
res = super().forward(inp.reshape(-1, inp.size(-1)), tgt.reshape(
(-1, )), *args, **kwargs)
if torch.numel(res) == torch.numel(tgt):
# Reduction was not done, so reshape back to orig shape
res = res.reshape(tgt.shape)
return res
class QuantizeAndCrossEntropy(MultiDimCrossEntropy):
"""Given a set of cluster centers, project the features to that before
incurring the loss."""
def __init__(self, centroids_fpath, norm=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.assigner = KmeansAssigner(centroids_fpath)
self.norm = norm
def forward(self, inp, tgt):
"""
Args:
inp: (*, C)
tgt: (*, C)
Will reshape the flatten initial dimensions and then incur loss
"""
# Normalize L2 both target and input, since that's how I'm computing
# centroids
if self.norm:
inp = nn.functional.normalize(inp, dim=-1, p=2)
tgt = nn.functional.normalize(tgt, dim=-1, p=2)
# assign the GT and predictions to the centroids
inp_proj = torch.mm(inp.flatten(0, 1),
self.centroids.t()).view(inp.shape[:-1] +
self.centroids.shape[:1])
# the weights of project layer are the centroids, so pick from there
tgt_proj_q = self.assigner(tgt)
return super().forward(inp_proj, tgt_proj_q)
|
AVT-main
|
loss_fn/multidim_xentropy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""The SimCLR InfoNCE loss."""
import torch
import torch.nn as nn
from common import utils
LARGE_NUM = 1e9
class MILCrossEntropyLoss(nn.Module):
def __init__(self, mil_type='sum', reduction='mean'):
super().__init__()
self.mil_type = mil_type
self.reduction = reduction
def forward(self, *args, **kwargs):
if self.mil_type == 'sum':
return self.forward_sum(*args, **kwargs)
elif self.mil_type == 'max':
return self.forward_max(*args, **kwargs)
else:
raise NotImplementedError(f'Unknown type {self.mil_type}')
def forward_sum(self, pred, labels_onehot):
"""
Args:
pred: BxC is the output
labels: BxC is 1s for positive, and 0s for negatives
Based on https://github.com/antoine77340/MIL-NCE_HowTo100M/blob/master/loss.py
Or the MIL-NCE paper Eq 1 (https://arxiv.org/pdf/1912.06430.pdf)
"""
assert pred.shape == labels_onehot.shape
# In the MILNCE code there is a sum, followed by logsumexp. I think
# using the labels to select the positive samples and then doing
# logsumexp will have the same effect.
pos_pred = pred[labels_onehot.bool()].reshape((pred.size(0), -1))
numerator = torch.logsumexp(pos_pred, dim=1)
denominotor = torch.logsumexp(pred, dim=1)
loss = denominotor - numerator
if self.reduction == 'mean':
loss = torch.mean(loss)
elif self.reduction == 'none':
pass
else:
raise NotImplementedError(f'Unknown reduction {self.reduction}')
return loss
def forward_max(self, pred, labels_onehot):
"""
Args:
pred: BxC is the output
labels: BxC is 1s for positive, and 0s for negatives
Based on Appendix A (https://arxiv.org/pdf/1912.06430.pdf)
"""
assert pred.shape == labels_onehot.shape
# Do max before, and then logsumexp. Works since exp is monotonic fn
# so the max with exp or without will be the same.
pos_pred = pred[labels_onehot.bool()].reshape((pred.size(0), -1))
pos_pred = torch.max(pos_pred, dim=1, keepdim=True)[0]
neg_pred = pred[~labels_onehot.bool()].reshape((pred.size(0), -1))
numerator = torch.logsumexp(pos_pred, dim=1)
denominotor = torch.logsumexp(torch.cat([pos_pred, neg_pred], dim=1),
dim=1)
return torch.mean(denominotor - numerator)
class DistributedSimclrInfoNCELoss(nn.Module):
def __init__(self,
temperature: float = 0.1,
target_to_output_loss=True,
mil_type='sum',
reduction='mean'):
super().__init__()
self.temperature = temperature
self.criterion = MILCrossEntropyLoss(mil_type, reduction=reduction)
# This defines whether the reverse part of the loss, from target to
# the output features, is incurred.
self.target_to_output_loss = target_to_output_loss
def forward(self, output: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
"""
Args:
output: BxC
target: BxC or BxKxC <-- In case of MIL NCE, K is the number of
positives for each batch element.
Following https://github.com/google-research/simclr/blob/master/objective.py
"""
# Normalize first, before the gather -- so that all the features I get
# are normalized
output = nn.functional.normalize(output, dim=-1, p=2)
target = nn.functional.normalize(target, dim=-1, p=2)
# To be consistent with MIL-NCE input, convert K to batch dim,
# and repeat the output to same value for each repeated target
elt_for_back_loss = 0
if target.ndim == 3:
num_matching = target.size(1)
target_flat = target.reshape((-1, target.size(-1)))
# Keep the first one for the back loss
target = target[:, elt_for_back_loss]
else:
num_matching = 1
target_flat = target
# Gather all the outputs and all the targets
output_all = self.gather_embeddings(output)
target_flat_all = self.gather_embeddings(target_flat)
batch_size = output.size(0)
replica_id = utils.get_rank()
# -> (B, B_full * num_matching)
labels_onehot = torch.zeros((batch_size, output_all.size(0)),
dtype=output.dtype,
device=output.device)
extra_zeros = torch.zeros((batch_size, output_all.size(0)),
dtype=output.dtype,
device=output.device)
ones_diag = torch.eye(batch_size,
batch_size,
dtype=output.dtype,
device=output.device)
labels_onehot[:, replica_id * batch_size:(replica_id + 1) *
batch_size] = ones_diag
labels_onehot_interleaved = labels_onehot.repeat_interleave(
num_matching, dim=1)
# (B, C) * (B_full, C) -> (B, B_full)
logits_aa = torch.mm(output, output_all.t() / self.temperature)
# (B, C) * (B_full * num_matching, C) -> (B, B_full * num_matching)
logits_ab = torch.mm(output, target_flat_all.t() / self.temperature)
logits_aa = logits_aa - labels_onehot * LARGE_NUM
loss = self.criterion(
torch.cat([logits_ab, logits_aa], 1),
torch.cat([labels_onehot_interleaved, extra_zeros], 1))
if self.target_to_output_loss:
# Keep only the first prediction, since that is what I will incur
# reverse loss with
target_all = target_flat_all[elt_for_back_loss::num_matching]
logits_bb = torch.mm(target, target_all.t() / self.temperature)
logits_bb = logits_bb - labels_onehot * LARGE_NUM
logits_ba = torch.mm(target, output_all.t() / self.temperature)
loss = loss + self.criterion(
torch.cat([logits_ba, logits_bb], 1),
torch.cat([labels_onehot, extra_zeros], 1))
return loss
def gather_embeddings(self, embedding: torch.Tensor) -> torch.Tensor:
"""
Do a gather over all embeddings, so we can compute the loss.
Final shape is like: (batch_size * num_gpus) x embedding_dim
"""
if torch.distributed.is_available(
) and torch.distributed.is_initialized():
# gather all embeddings.
embedding_gathered = utils.gather_from_all(embedding)
else:
embedding_gathered = embedding
return embedding_gathered
class MultiDimDistributedSimclrInfoNCELoss(DistributedSimclrInfoNCELoss):
"""
Fold in the initial dimensions and run simple NCE.
"""
def forward(self, output: torch.Tensor, target: torch.Tensor, *args,
**kwargs) -> torch.Tensor:
return super().forward(output.flatten(0, -2), target.flatten(0, -2),
*args, **kwargs)
|
AVT-main
|
loss_fn/simclr_infonce.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Utils for notebook."""
import sys
import os
import os.path as osp
import glob
from collections import OrderedDict
from collections.abc import Iterable
import json
import subprocess
import pickle as pkl
import logging
import h5py
import math
import operator
import pathlib
import pandas as pd
import moviepy.editor as mpy
from tqdm import tqdm
import proglog
import numpy as np
from scipy.special import softmax
import torch
# from omegaconf import OmegaConf
import hydra
from hydra.experimental import initialize as hydra_initialize, compose as hydra_compose
import matplotlib
from matplotlib import pylab
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
# from tqdm import tqdm
from tqdm.notebook import tqdm
sys.path.append('..')
from external.rulstm.RULSTM.utils import topk_recall
from launch import subselect_dict_keys_diff
from datasets import epic_kitchens
CODE_DIR = str(pathlib.Path(__file__).parent.resolve() / '../')
OUTPUT_DIR = f'{CODE_DIR}/OUTPUTS/'
RESULTS_SAVE_DIR_PREFIX = 'results' # This is the prefix, can have multiple, if >1 eval datasets
DATASET_EVAL_CFG_KEY = 'dataset_eval'
DATASET_EVAL_CFG_KEY_SUFFIX = ''
proglog.notebook() # so moviepy uses notebook tqdm
SQRT2 = math.sqrt(2)
sns.set_style("whitegrid")
rcParams['mathtext.fontset'] = 'custom'
rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
rcParams['mathtext.fontset'] = 'stix'
rcParams['font.family'] = 'STIXGeneral'
matplotlib.rc('axes', edgecolor='k')
matplotlib.rc('font', size=30)
def save_graph(fig, outfpath, root_dir='./', **kwargs):
# Any postprocessing of the graphs
sns.despine(top=True, right=True, left=False, bottom=False)
# Save code
final_oufpath = os.path.join(root_dir, outfpath)
os.makedirs(osp.dirname(final_oufpath), exist_ok=True)
fig.savefig(final_oufpath,
bbox_inches='tight',
transparent=True,
pad_inches=0,
**kwargs)
def allkeys(obj, keys=[]):
"""Recursively find all leaf keys in h5. """
keys = []
for key in obj.keys():
if isinstance(obj[key], h5py.Group):
keys += [f'{key}/{el}' for el in allkeys(obj[key])]
else:
keys.append(key)
return keys
class EmptyResdirError(ValueError):
pass
def gen_load_resfiles(resdir):
resfiles = glob.glob(osp.join(resdir, '*.pth'))
if len(resfiles) == 0:
resfiles = glob.glob(osp.join(resdir, '*.h5'))
if len(resfiles) == 0:
raise EmptyResdirError(f'Didnt find any resfiles in {resdir}')
for resfile in resfiles:
if resfile.endswith('.pth'):
output_dict = {
key: val.numpy() if torch.torch.is_tensor(val) else val
for key, val in torch.load(resfile).items()
}
else:
output_dict = {}
with h5py.File(resfile, 'r') as fin:
for key in allkeys(fin):
try:
output_dict[key] = fin[key][()]
except AttributeError as err:
# Happens for the string keys... need to figure what
# to do here
logging.warning('Unable to load %s (%s)', key, err)
yield output_dict
def read_results(conf_path, run_id=0, results_dir='results/'):
resdir = osp.join(OUTPUT_DIR, conf_path, str(run_id), results_dir)
data = next(gen_load_resfiles(resdir))
# TODO allow to read only certain keys, eg some times we only need logits
# which would be faster to read
res_per_layer = {
key: OrderedDict()
for key in data if key not in ['epoch']
}
if len(res_per_layer) == 0:
raise ValueError('No logits found in the output. Note that code was '
'changed Aug 26 2020 that renames "output" to '
'"logits" etc. So might need to rerun testing.')
logging.info('Reading from resfiles')
for data in gen_load_resfiles(resdir):
for i, idx in enumerate(data['idx']):
idx = int(idx)
for key in res_per_layer:
if idx not in res_per_layer[key]:
res_per_layer[key][idx] = []
res_per_layer[key][idx].append(data[key][i])
# Mean over all the multiple predictions per key
final_res = {}
for key in res_per_layer:
if len(res_per_layer[key]) == 0:
continue
max_idx = max(res_per_layer[key].keys())
key_output = np.zeros([
max_idx + 1,
] + list(res_per_layer[key][0][0].shape))
for idx in res_per_layer[key]:
key_output[idx] = np.mean(np.stack(res_per_layer[key][idx]),
axis=0)
final_res[key] = key_output
return final_res
def get_epoch_from_resdir(conf_path, run_id=0, results_dir='results/'):
resdir = osp.join(OUTPUT_DIR, conf_path, str(run_id), results_dir)
data = next(gen_load_resfiles(resdir))
if 'epoch' not in data:
return None
return np.min(data['epoch'])
def read_all_results(conf_path, run_id=0):
resdirs = glob.glob(
osp.join(OUTPUT_DIR, conf_path, str(run_id),
RESULTS_SAVE_DIR_PREFIX + '*'))
all_res = {}
for resdir in resdirs:
resdir_bname = osp.basename(resdir)
all_res[resdir_bname] = read_results(conf_path,
run_id,
results_dir=resdir_bname)
return all_res
def read_file_into_list(fpath):
"""Read cli from file into a string."""
# TODO: Ideally reuse this from the launch script
args_lst = []
with open(fpath, 'r') as fin:
for line in fin:
args = line.split('#')[0].strip()
if not args: # Empty
continue
args_lst.append(args)
# Importing this on the global scope does not work .. gives the
# super(cls, self).. error
# https://thomas-cokelaer.info/blog/2011/09/382/
# Probably some issue with auto package reload in notebooks for py2.7
# packages..
from hydra._internal.core_plugins.basic_sweeper import BasicSweeper
from hydra.core.override_parser.overrides_parser import OverridesParser
sweeper = BasicSweeper(max_batch_size=None)
parser = OverridesParser.create()
overrides = parser.parse_overrides(args_lst)
run_args = sweeper.split_arguments(overrides, max_batch_size=None)[0]
return run_args
def get_config(cfg_fpath, run_id=0):
# outdir = osp.join(OUTPUT_DIR, cfg_fpath, str(run_id))
overrides_all = read_file_into_list('../' + cfg_fpath)
# https://github.com/facebookresearch/hydra/issues/716 should fix the issue
# with interpolation not working in notebook etc.
# However it can't handle ":" style custom interpolation, so need to
# override those.
cfg_all = []
for overrides in overrides_all:
overrides.append('cwd="../"')
with hydra_initialize(config_path='../conf'):
cfg = hydra_compose(config_name='config.yaml',
return_hydra_config=True,
overrides=overrides)
cfg_all.append(cfg)
if run_id is None:
return cfg_all
else:
return cfg_all[run_id]
def get_dataset(cfg_fpath,
run_id=0,
dataset_cfg_key=DATASET_EVAL_CFG_KEY,
dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX):
cfg = get_config(cfg_fpath, run_id)
sys.path.append('../')
dataset = hydra.utils.instantiate(getattr(
cfg, dataset_cfg_key + dataset_key_suffix),
frames_per_clip=1,
_recursive_=False)
return dataset
def overlay_text(clip, texts):
"""
Args:
clip: Moviepy clip
texts: List of 2 strings (corr to GT and pred) to overlay onto the clip
"""
bg_color = 'white' if texts[0] == texts[1] else 'pink'
texts[0] = 'GT: ' + texts[0]
texts[1] = 'Pred: ' + texts[1]
textclip = (mpy.TextClip(str(texts), bg_color=bg_color).set_duration(
clip.duration).set_pos(("right", "top")))
return mpy.CompositeVideoClip([clip, textclip])
def compute_topk(predictions, labels, k, classes=None):
"""
Args:
predictions (N, K)
labels (N,)
classes: (C', ): Set of classes to compute over. By default, uses
all classes
"""
if classes is None:
classes = np.unique(labels)
# Subselect items that belong to the classes
# Converting to list since classses are at times dict_values and that
# doesn't directly convert to np.array
reqd_elts = np.isin(labels, list(classes))
predictions = predictions[reqd_elts]
labels = labels[reqd_elts]
top_predictions = np.argpartition(predictions, -k, axis=-1)[:, -k:]
ratio_solved = np.mean(
np.any(labels[:, np.newaxis] == top_predictions, axis=-1))
return ratio_solved * 100.0
def combine_verb_noun_preds(res_verb, res_noun):
"""
Args:
res_verb (matrix with NxC1 dims)
res_noun (matrix with NxC2 dims)
Returns:
res_action (matrix with Nx(C1 * C2) dims)
"""
num_elts = res_verb.shape[0]
# normalize the predictions using softmax
res_verb = softmax(res_verb, axis=-1)
res_noun = softmax(res_noun, axis=-1)
# Cross product to get the combined score
return np.einsum('ij,ik->ijk', res_verb, res_noun).reshape((num_elts, -1))
def compute_conf_mat(predictions, target):
def to_onehot(indices, num_classes):
onehot = torch.zeros(indices.shape[0],
num_classes,
*indices.shape[1:],
device=indices.device)
# rgirdhar: When test on test set, there will be some data points where
# we don't have the labels
return onehot.scatter_(1, indices[indices >= 0].unsqueeze(1), 1)
num_classes = predictions.shape[1]
assert predictions.shape[0] == target.shape[0]
with torch.no_grad():
target_1hot = to_onehot(target, num_classes)
target_1hot_t = target_1hot.transpose(0, 1).float()
pred_idx = torch.argmax(predictions, dim=1)
pred_1hot = to_onehot(pred_idx.reshape(-1), num_classes)
pred_1hot = pred_1hot.float()
confusion_matrix = torch.matmul(target_1hot_t, pred_1hot)
return confusion_matrix
def mean_class_accuracy(conf_mat):
# Increase floating point precision similar to forecasting HOI
conf_mat = conf_mat.type(torch.float64)
cls_cnt = conf_mat.sum(dim=1) + 1e-15
cls_hit = conf_mat.diag()
cls_acc = (cls_hit / cls_cnt).mean().item()
return cls_acc
def compute_accuracy(predictions, labels, classes=None):
"""
Args:
predictions: (B, C) logits
labels: (B, )
classes: OrderedDict[name (str), cls_id (int)]
"""
# This can happen when computing tail class accuracies and it's not
# specified for the test set
if predictions.size == 0:
return [float('nan')] * 5
labels = labels.astype(np.int64)
if classes is not None:
classes_to_keep = list(classes.values())
else:
classes_to_keep = range(max(labels) + 1)
top_1 = compute_topk(predictions, labels, 1, classes=classes_to_keep)
top_5 = compute_topk(predictions, labels, 5, classes=classes_to_keep)
try:
ar_outputs = topk_recall(predictions,
labels,
k=5,
classes=classes_to_keep)
if isinstance(ar_outputs, tuple):
# This happens if RULSTM code is modified to return per-class AR
# values
ar5, ar5_per_cls = ar_outputs
ar5_per_cls = {k: v * 100.0 for k, v in ar5_per_cls.items()}
else:
ar5 = ar_outputs
ar5_per_cls = {c: float('nan') for c in classes_to_keep}
except ZeroDivisionError:
# This happens when it can't find any true classes, the code
# can't deal with that
ar5 = float('nan')
ar5_per_cls = {c: float('nan') for c in classes_to_keep}
# Compute a mean class accuracy (used in EGTEA) -- accuracy per class and
# then mean over the classes
conf_mat = compute_conf_mat(torch.from_numpy(predictions),
torch.from_numpy(labels))
# Make sure conf mat makes sense
top_1_confmat = 100.0 * (conf_mat.diag()[classes_to_keep].sum() /
conf_mat[classes_to_keep].sum())
if (not np.isnan(top_1) and not np.isnan(top_1_confmat)
and not np.isclose(top_1, top_1_confmat, atol=1.0)):
# Using a large atol margin cos conf_mat comp happens on GPUs and can
# be non deterministic, so might not match sometimes..
# Save the outputs for analysis
with open('debug_acc.pkl', 'wb') as fout:
pkl.dump(predictions, fout)
pkl.dump(labels, fout)
pkl.dump(conf_mat, fout)
raise ValueError(f'top1 ({top_1}) doesnt match what I get from '
f'conf_mat ({top_1_confmat}). This could happen '
f'if the model predicts all 0s for some data points '
f'and hence argmax is not defined and behaves '
f'differently in numpy and torch '
f'(https://github.com/pytorch/pytorch/issues/14147)')
top1_meancls = 100.0 * mean_class_accuracy(conf_mat)
return top_1, top_5, ar5 * 100, top1_meancls, ar5_per_cls
def print_accuracies_epic(metrics: dict, prefix: str = ''):
print(f"[{prefix}] Accuracies verb/noun/action: "
f"{metrics['vtop1']:.1f} {metrics['vtop5']:.1f} "
f"{metrics['ntop1']:.1f} {metrics['ntop5']:.1f} "
f"{metrics['atop1']:.1f} {metrics['atop5']:.1f} ")
print(f"[{prefix}] Mean class top-1 accuracies verb/noun/action: "
f"{metrics['vtop1_meancls']:.1f} "
f"{metrics['ntop1_meancls']:.1f} "
f"{metrics['atop1_meancls']:.1f} ")
print(f"[{prefix}] Recall@5 verb/noun/action: "
f"{metrics['vrec5']:.1f} {metrics['nrec5']:.1f} "
f"{metrics['arec5']:.1f} ")
print(f"[{prefix}] Recall@5 many shot verb/noun/action: "
f"{metrics['vrec5_ms']:.1f} {metrics['nrec5_ms']:.1f} "
f"{metrics['arec5_ms']:.1f} ")
if 'vrec5_tail' in metrics:
# assuming the others for tail/unseen will be in there too, since
# they are all computed at one place for ek100
print(f"[{prefix}] Recall@5 tail verb/noun/action: "
f"{metrics['vrec5_tail']:.1f} {metrics['nrec5_tail']:.1f} "
f"{metrics['arec5_tail']:.1f} ")
print(f"[{prefix}] Recall@5 unseen verb/noun/action: "
f"{metrics['vrec5_unseen']:.1f} {metrics['nrec5_unseen']:.1f} "
f"{metrics['arec5_unseen']:.1f} ")
def get_logits_from_results(results):
if 'logits' in results:
return results['logits']
# Newer version, as of Nov 3 2020
logits_keys = [key for key in results.keys() if key.startswith('logits/')]
if len(logits_keys) == 1:
return results[logits_keys[0]]
# Else, return all of them in a dict
return {key: results[key] for key in logits_keys}
def get_epic_action_accuracy(run_info_verb, run_info_noun):
# Compute action accuracies implicitly from verb and noun
# TODO also compute with many-shot classes for EPIC 55
res_verb = get_logits_from_results(read_results(*run_info_verb))
res_noun = get_logits_from_results(read_results(*run_info_noun))
dataset_verb = get_dataset(*run_info_verb)
vtop1, vtop5, vrec5, vtop1_meancls, vrec5_per_cls = compute_accuracy(
res_verb, dataset_verb.df['verb_class'].values)
dataset_noun = get_dataset(*run_info_noun)
ntop1, ntop5, nrec5, ntop1_meancls, nrec5_per_cls = compute_accuracy(
res_noun, dataset_noun.df['noun_class'].values)
assert (len(dataset_verb.df) == len(res_verb) == len(dataset_noun.df) ==
len(res_noun))
res_action = combine_verb_noun_preds(res_verb, res_noun)
true_action = (
dataset_verb.df['verb_class'].values * len(dataset_noun.classes) +
dataset_noun.df['noun_class'].values)
atop1, atop5, arec5, atop1_meancls, arec5_per_cls = compute_accuracy(
res_action, true_action)
print_accuracies_epic({
'vtop1': vtop1,
'vtop5': vtop5,
'vrec5': vrec5,
'vrec5_ms': float('nan'), # TODO
'vtop1_meancls': vtop1_meancls,
'vrec5_per_cls': vrec5_per_cls,
'ntop1': ntop1,
'ntop5': ntop5,
'nrec5': nrec5,
'nrec5_ms': float('nan'), # TODO
'ntop1_meancls': ntop1_meancls,
'nrec5_per_cls': nrec5_per_cls,
'atop1': atop1,
'atop5': atop5,
'arec5': arec5,
'arec5_ms': float('nan'), # TODO
'atop1_meancls': atop1_meancls,
'arec5_per_cls': arec5_per_cls,
})
def epic100_unseen_tail_eval(probs, dataset):
"""
probs: contains 3 elements: predictions for verb, noun and action
"""
# based on https://github.com/fpv-iplab/rulstm/blob/d44612e4c351ff668f149e2f9bc870f1e000f113/RULSTM/main.py#L379
unseen_participants_ids = pd.read_csv(osp.join(
dataset.rulstm_annotation_dir,
'validation_unseen_participants_ids.csv'),
names=['id'],
squeeze=True)
tail_verbs_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_verbs_ids.csv'),
names=['id'],
squeeze=True)
tail_nouns_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_nouns_ids.csv'),
names=['id'],
squeeze=True)
tail_actions_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_actions_ids.csv'),
names=['id'],
squeeze=True)
# Now based on https://github.com/fpv-iplab/rulstm/blob/d44612e4c351ff668f149e2f9bc870f1e000f113/RULSTM/main.py#L495
unseen_bool_idx = dataset.df.narration_id.isin(
unseen_participants_ids).values
tail_verbs_bool_idx = dataset.df.narration_id.isin(tail_verbs_ids).values
tail_nouns_bool_idx = dataset.df.narration_id.isin(tail_nouns_ids).values
tail_actions_bool_idx = dataset.df.narration_id.isin(
tail_actions_ids).values
# For tail
_, _, vrec5_tail, _, _ = compute_accuracy(
probs[0][tail_verbs_bool_idx],
dataset.df.verb_class.values[tail_verbs_bool_idx])
_, _, nrec5_tail, _, _ = compute_accuracy(
probs[1][tail_nouns_bool_idx],
dataset.df.noun_class.values[tail_nouns_bool_idx])
_, _, arec5_tail, _, _ = compute_accuracy(
probs[2][tail_actions_bool_idx],
dataset.df.action_class.values[tail_actions_bool_idx])
# for unseen
_, _, vrec5_unseen, _, _ = compute_accuracy(
probs[0][unseen_bool_idx],
dataset.df.verb_class.values[unseen_bool_idx])
_, _, nrec5_unseen, _, _ = compute_accuracy(
probs[1][unseen_bool_idx],
dataset.df.noun_class.values[unseen_bool_idx])
_, _, arec5_unseen, _, _ = compute_accuracy(
probs[2][unseen_bool_idx],
dataset.df.action_class.values[unseen_bool_idx])
return dict(
vrec5_tail=vrec5_tail,
nrec5_tail=nrec5_tail,
arec5_tail=arec5_tail,
vrec5_unseen=vrec5_unseen,
nrec5_unseen=nrec5_unseen,
arec5_unseen=arec5_unseen,
)
def compute_accuracies_epic(probs, dataset):
manyshot_classes = dataset.classes_manyshot
vtop1, vtop5, vrec5, vtop1_meancls, vrec5_per_cls = compute_accuracy(
probs[0], dataset.df.verb_class.values)
vrec5_ms, nrec5_ms, arec5_ms = float('nan'), float('nan'), float('nan')
if 'verb' in manyshot_classes:
_, _, vrec5_ms, _, _ = compute_accuracy(
probs[0],
dataset.df.verb_class.values,
classes=manyshot_classes['verb'])
ntop1, ntop5, nrec5, ntop1_meancls, nrec5_per_cls = compute_accuracy(
probs[1], dataset.df.noun_class.values)
if 'noun' in manyshot_classes:
_, _, nrec5_ms, _, _ = compute_accuracy(
probs[1],
dataset.df.noun_class.values,
classes=manyshot_classes['noun'])
atop1, atop5, arec5, atop1_meancls, arec5_per_cls = compute_accuracy(
probs[2], dataset.df.action_class.values)
if 'action' in manyshot_classes:
_, _, arec5_ms, _, _ = compute_accuracy(
probs[2],
dataset.df.action_class.values,
classes=manyshot_classes['action'])
res = {
'vtop1': vtop1,
'vtop5': vtop5,
'vrec5': vrec5,
'vrec5_ms': vrec5_ms,
'vtop1_meancls': vtop1_meancls,
'vrec5_per_cls': vrec5_per_cls,
'ntop1': ntop1,
'ntop5': ntop5,
'nrec5': nrec5,
'nrec5_ms': nrec5_ms,
'ntop1_meancls': ntop1_meancls,
'nrec5_per_cls': nrec5_per_cls,
'atop1': atop1,
'atop5': atop5,
'arec5': arec5,
'arec5_ms': arec5_ms,
'atop1_meancls': atop1_meancls,
'arec5_per_cls': arec5_per_cls,
}
if dataset.version == epic_kitchens.EPIC100_VERSION:
res.update(epic100_unseen_tail_eval(probs, dataset))
return res
def get_epic_marginalize_verb_noun(
run_info, dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX):
res_action = get_logits_from_results(
read_results(*run_info, results_dir=f'results{dataset_key_suffix}'))
dataset = get_dataset(*run_info, dataset_key_suffix=dataset_key_suffix)
if isinstance(res_action, dict):
print(f'Found logits outputs for verb noun as well [{run_info}]')
# It has multiple heads for verb/noun as well
res_verb = res_action['logits/verb']
res_noun = res_action['logits/noun']
res_action = res_action['logits/action']
else:
res_action_probs = softmax(res_action, axis=-1)
# Marginalize the other dimension, using the mapping matrices I store
# in the dataset obj
res_verb = np.matmul(
res_action_probs,
dataset.class_mappings[('verb', 'action')].numpy())
res_noun = np.matmul(
res_action_probs,
dataset.class_mappings[('noun', 'action')].numpy())
accuracies = compute_accuracies_epic([res_verb, res_noun, res_action],
dataset)
# Returning the actual scores for actions instead of the probs. Found
# better results with this, and Sener et al. ECCV'20 does the same.
scores = [res_verb, res_noun, res_action]
return accuracies, scores, dataset
def read_scores_from_pkl(pkl_fpath):
"""
This is to read the data as I dump in the ActionBanks code
"""
with open(pkl_fpath, 'rb') as fin:
scores = pkl.load(fin)
return [
scores['verb_scores'], scores['noun_scores'], scores['action_scores']
]
def load_json(fpath, verb_noun_to_action, nclasses):
"""
Args:
fpath: Path to the json
verb_noun_to_action: Dict from (verb_id, noun_id) to action_id
nclasses: A list of 3 elements, with the label space for verb/noun/act
Returns: a dict with
{uid1: score1, uid2: score2 ...}
"""
assert len(nclasses) == 3, 'One for verb/noun/action'
with open(fpath, 'r') as fin:
preds = json.load(fin)
# Res for verb/noun/action
all_res = []
for j, space in enumerate(['verb', 'noun', 'action']):
# Convert to a {uid: <scores>} format
res = {}
for key, val in preds['results'].items():
# Will be using 0 for all the scores not defined. Should be fine given
# top 100 should be enough for late fusion etc, metrics are like top-5
# anyway.
scores = np.zeros((nclasses[j], ))
for i, score in val[space].items():
if space == 'action':
# Since for actions the "key" is (verb, noun) tuple,
# need to convert it to an action index by
# verb_id * noun_count + noun_id
idx = tuple(int(el) for el in i.split(','))
idx = verb_noun_to_action[idx]
else:
idx = int(i)
scores[idx] = score
res[key] = scores
all_res.append(res)
return all_res
def _concat_with_uids(scores, dataset, uid_key):
# Make a dict with the IDs from the dataset
# There will be 3 elements in scores -- verb, noun, action
return [
dict(
zip([str(el)
for el in dataset.df[uid_key].values], scores_per_space))
for scores_per_space in scores
]
def _normalize_scores(scores, p):
"""This brings the scores between 0 to 1, and normalizes by """
res = []
for scores_per_space in scores:
res.append({
uid: val / (np.linalg.norm(val, ord=p, axis=-1) + 0.000001)
for uid, val in scores_per_space.items()
})
return res
def _get_avg_norm_scores(scores, p):
"""Remove the UID keys etc, and then compute."""
scores = np.array([val for _, val in scores.items()])
return np.mean(np.linalg.norm(scores, ord=p, axis=-1), axis=0)
def get_epic_marginalize_late_fuse(
run_infos,
weights=1.0,
dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX,
uid_key='uid',
eventual_fname='seen.json',
normalize_before_combine=None):
"""
Args:
eventual_fname: This is used to read prepackaged outputs from result
files, and using the filename to know which file to look for
when a directory is passed in as run info.
normalize_before_combine: Set to non-None to normalize the features
by that p-norm, and then combine. So the weights would have to be
defined w.r.t normalized features.
"""
all_scores = []
all_datasets = []
for run_info_id, run_info in enumerate(run_infos):
if isinstance(run_info[0], dict):
# This is likely a pre-computed scores (so eg a nested
# get_epic_marginalize.. function). So I just use the scores as is.
scores = run_info
elif os.path.isdir(run_info[0]):
assert len(all_datasets) > 0, (
'Need at least 1 datasets to be read before reading from json '
'to figure the verb/noun -> action_id and '
'to figure the total number of classes to gen feat vectors')
scores = load_json(
os.path.join(run_info[0], eventual_fname),
all_datasets[-1].verb_noun_to_action,
[list(el.values())[0].shape[-1] for el in all_scores[-1]])
elif run_info[0].endswith('.pkl'):
# This is the input used to read predictions from the action_banks
# codebase, where I dump output into pkl and read here for late
# fusion.
scores = read_scores_from_pkl(run_info[0])
assert len(
all_datasets) > 0, 'At least one run_info must be passed in'
scores = _concat_with_uids(scores, all_datasets[-1], uid_key)
else:
accuracies, scores, dataset = get_epic_marginalize_verb_noun(
run_info, dataset_key_suffix=dataset_key_suffix)
scores = _concat_with_uids(scores, dataset, uid_key)
print_accuracies_epic(accuracies, prefix=run_info)
all_datasets.append(dataset)
if normalize_before_combine is not None:
scores = _normalize_scores(scores, p=normalize_before_combine)
logging.warning(
'Adding scores from run_info %d with avg action L1 norm of %f',
run_info_id, _get_avg_norm_scores(scores[-1], p=1))
all_scores.append(scores)
# Late fuse
if isinstance(weights, float):
weights = [weights] * len(run_infos)
else:
assert len(weights) == len(run_infos)
# broadcastable_weights = np.array(weights)[:, np.newaxis, np.newaxis]
# Combined scores by combining the corresponding score for each uid.
combined = []
for space_id in range(3): # verb/noun/action
scores_for_space = [scores[space_id] for scores in all_scores]
# Take the union of all the UIDs we have score for
total_uids = set.union(*[set(el.keys()) for el in scores_for_space])
logging.warning('Combined UIDs: %d. UIDs in the runs %s',
len(total_uids),
[len(el.keys()) for el in scores_for_space])
combined_for_space = {}
for uid in total_uids:
combined_for_space[uid] = []
for run_id, scores_for_space_per_run in enumerate(
scores_for_space):
if uid in scores_for_space_per_run:
combined_for_space[uid].append(
scores_for_space_per_run[uid] * weights[run_id])
combined_for_space[uid] = np.sum(np.stack(combined_for_space[uid]),
axis=0)
combined.append(combined_for_space)
# Now to compute accuracies, need to convert back to np arrays from dict.
# Would only work for parts that are in the dataset
combined_np = []
for combined_for_space in combined:
combined_np.append(
np.array([
combined_for_space[str(uid)]
for uid in all_datasets[-1].df[uid_key].values
]))
accuracies = compute_accuracies_epic(combined_np, all_datasets[-1])
return accuracies, combined, all_datasets[-1]
def summarize_results(cfg_name, metric='arec5'):
"""
Read all runs corr to cfg_name, and show the results in a human readable
form with the config overrides (unique) that were active. It averages
over runs too.
"""
run_cfgs = read_file_into_list('../' + cfg_name)
run_cfgs_hydra = get_config(cfg_name, run_id=None)
# Convert to dicts
run_cfgs = [(i, dict([el.split('=') for el in conf]))
for i, conf in enumerate(run_cfgs)]
# Keep only the stuff that changes across them
run_cfgs = subselect_dict_keys_diff(run_cfgs)
all_res = {}
for (run_id, params), cfg_hydra in tqdm(zip(run_cfgs, run_cfgs_hydra),
total=len(run_cfgs),
desc='Loading results'):
try:
accuracies, _, _ = get_epic_marginalize_verb_noun(
(cfg_name, run_id))
epoch = get_epoch_from_resdir(cfg_name, run_id)
except (EmptyResdirError, OSError): # H5 didn't let it read
continue
if epoch != cfg_hydra.train.num_epochs:
# This training has not finished
continue
run_id = 0
if 'run_id' in params:
run_id = int(params['run_id'])
del params['run_id']
params_hash = tuple(sorted(list(params.items())))
if params_hash not in all_res:
all_res[params_hash] = {}
all_res[params_hash][run_id] = accuracies[metric]
for params_hash in all_res:
run_ids, values = zip(*all_res[params_hash].items())
print(f'{params_hash} [{run_ids}]: [{values}] '
f'mean: {np.mean(values)}, std: {np.std(values)}')
def plot_per_cls_perf(run_infos_all: list,
names: list,
metrics: list = ['vrec5_per_cls', 'nrec5_per_cls'],
cls_types: list = ['verb', 'noun'],
show_topn: int = 10,
xticks_rotation: float = 0,
show_subset: callable = None,
outfpath: str = 'figs/improved/'):
"""
Args:
run_infos_all: [[(cfg, sweep_id), (cfg, sweep_id)...],
[(cfg, sweep_id), (cfg, sweep_id)...], ...]
names: The name for each run_info group
metrics: There will be 1 graph for each
"""
assert len(run_infos_all) == len(names)
assert len(metrics) == len(cls_types)
final_accs = {cls_type: [] for cls_type in cls_types}
for i, run_infos in enumerate(tqdm(run_infos_all, desc='Reading acc')):
for run_id, run_info in enumerate(run_infos):
cfg_fpath, sweep_id = run_info
all_accuracies, _, dataset = get_epic_marginalize_verb_noun(
(cfg_fpath, sweep_id))
for metric, cls_type in zip(metrics, cls_types):
accuracies = all_accuracies[metric]
assert isinstance(accuracies,
dict), 'Supports per-class for now'
classes = operator.attrgetter(f'{cls_type}_classes')(dataset)
cls_id_to_name = {v: k for k, v in classes.items()}
for cls_id, score in accuracies.items():
final_accs[cls_type].append({
'method':
names[i],
'run_id':
run_id,
'cls_name':
cls_id_to_name[cls_id],
'accuracy':
score,
})
for cls_type in final_accs:
accs = pd.DataFrame(final_accs[cls_type])
# Print logs
for method in names:
for run_id in accs.run_id.unique():
this_acc = (accs[accs.method == method][
accs.run_id == run_id].accuracy.mean())
print(f'Check {method} {run_id}: {this_acc}')
mean_acc_by_cls = accs.groupby(['method',
'cls_name']).mean().reset_index()
first_col = mean_acc_by_cls[mean_acc_by_cls.method == names[0]]
last_col = mean_acc_by_cls[mean_acc_by_cls.method == names[-1]]
merged = first_col[['cls_name', 'accuracy'
]].merge(last_col[['cls_name', 'accuracy']],
on='cls_name',
how='outer',
suffixes=['_first', '_last'])
# get the largest gains
gains = (merged['accuracy_last'] -
merged['accuracy_first']).sort_values()
gained_labels = merged.loc[gains.index].cls_name.tolist()
if show_subset is not None:
gained_labels = [el for el in gained_labels if show_subset(el)]
gained_labels = gained_labels[-show_topn:]
accs_largegains = accs[accs.cls_name.isin(gained_labels)]
fig = plt.figure(num=None,
figsize=(2 * len(gained_labels), 4),
dpi=300)
ax = sns.barplot(x='cls_name',
y='accuracy',
hue='method',
data=accs_largegains,
order=gained_labels,
errwidth=1.0)
ax.set_xlabel('Classes')
ax.set_ylabel('Recall @ 5')
ax.set_xticklabels(ax.get_xticklabels(),
rotation=xticks_rotation,
ha='center')
plt.show()
save_graph(fig, os.path.join(outfpath, cls_type + '.pdf'))
def get_struct_outputs_per_dataset(run_infos,
weights,
dataset_key_suffix,
uid_key='uid',
eventual_fname='seen.json',
normalize_before_combine=None):
_, combined, dataset = get_epic_marginalize_late_fuse(
run_infos,
weights,
dataset_key_suffix=dataset_key_suffix,
uid_key=uid_key,
eventual_fname=eventual_fname,
normalize_before_combine=normalize_before_combine)
results = {}
# Now the following may not be true since if the run_info contains an
# actual json, it might have more rows etc.
# assert len(combined[0]) == len(dataset)
action_to_verb_noun = {
val: key
for key, val in dataset.verb_noun_to_action.items()
}
for uid in tqdm(combined[0].keys(), desc='Computing res'):
verb_res = {f'{j}': val for j, val in enumerate(combined[0][uid])}
noun_res = {f'{j}': val for j, val in enumerate(combined[1][uid])}
top_100_actions = sorted(np.argpartition(combined[2][uid],
-100)[-100:],
key=lambda x: -combined[2][uid][x])
action_res = {
','.join((str(el)
for el in action_to_verb_noun[j])): combined[2][uid][j]
for j in top_100_actions
}
results[f'{uid}'] = {
'verb': verb_res,
'noun': noun_res,
'action': action_res,
}
# Add in all the discarded dfs with uniform distribution
if dataset.discarded_df is not None:
for _, row in dataset.discarded_df.iterrows():
if str(row[uid_key]) in results:
continue
results[f'{row[uid_key]}'] = {
'verb':
{f'{j}': 0.0
for j in range(len(dataset.verb_classes))},
'noun':
{f'{j}': 0.0
for j in range(len(dataset.noun_classes))},
'action': {f'0,{j}': 0.0
for j in range(100)},
}
output_dict = {
'version': f'{dataset.version}',
'challenge': dataset.challenge_type,
'results': results
}
return output_dict
def package_results_for_submission(run_infos,
weights,
normalize_before_combine=None):
res_s1 = get_struct_outputs_per_dataset(
run_infos,
weights,
dataset_key_suffix='',
eventual_fname='seen.json',
normalize_before_combine=normalize_before_combine)
res_s2 = get_struct_outputs_per_dataset(
run_infos,
weights,
dataset_key_suffix='_s2',
eventual_fname='unseen.json',
normalize_before_combine=normalize_before_combine)
# write it out in the first run's output dir
output_dir = osp.join(OUTPUT_DIR, run_infos[0][0], str(run_infos[0][1]),
'challenge')
print(f'Saving outputs to {output_dir}')
os.makedirs(output_dir, exist_ok=True)
with open(osp.join(output_dir, 'seen.json'), 'w') as fout:
json.dump(res_s1, fout, indent=4)
with open(osp.join(output_dir, 'unseen.json'), 'w') as fout:
json.dump(res_s2, fout, indent=4)
subprocess.check_output(
f'zip -j {output_dir}/submit.zip '
f'{output_dir}/seen.json '
f'{output_dir}/unseen.json ',
shell=True)
def package_results_for_submission_ek100(run_infos, weights, sls=[1, 4, 4]):
res = get_struct_outputs_per_dataset(run_infos,
weights,
dataset_key_suffix='',
uid_key='narration_id',
eventual_fname='test.json')
res['sls_pt'] = sls[0]
res['sls_tl'] = sls[1]
res['sls_td'] = sls[2]
# write it out in the first run's output dir
output_dir = osp.join(OUTPUT_DIR, run_infos[0][0], str(run_infos[0][1]),
'challenge')
print(f'Saving outputs to {output_dir}')
os.makedirs(output_dir, exist_ok=True)
with open(osp.join(output_dir, 'test.json'), 'w') as fout:
json.dump(res, fout, indent=4)
subprocess.check_output(
f'zip -j {output_dir}/submit.zip '
f'{output_dir}/test.json ',
shell=True)
|
AVT-main
|
notebooks/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Modular implementation of the basic train ops
"""
from typing import Dict, Union, Tuple
import torch
import torch.nn as nn
import hydra
from hydra.types import TargetConf
from common import utils
from datasets.base_video_dataset import FUTURE_PREFIX
from models.base_model import PAST_LOGITS_PREFIX
from loss_fn.multidim_xentropy import MultiDimCrossEntropy
class NoLossAccuracy(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
return {}, {}
class BasicLossAccuracy(nn.Module):
def __init__(self, dataset, device, balance_classes=False):
super().__init__()
kwargs = {'ignore_index': -1}
if balance_classes:
assert dataset.class_balanced_sampling is False, (
'Do not re-weight the losses, and do balanced sampling')
weight = torch.zeros((len(dataset.classes, )),
device=device,
dtype=torch.float)
for cls_id, count in dataset.classes_counts.items():
weight[cls_id] = count
weight = weight / torch.sum(weight) # To get ratios for non -1 cls
weight = 1 / (weight + 0.00001)
kwargs['weight'] = weight
kwargs['reduction'] = 'none' # to get batch level output
self.cls_criterion = MultiDimCrossEntropy(**kwargs)
def forward(self, outputs, target, target_subclips):
"""
Args:
outputs['logits'] torch.Tensor (B, num_classes) or
(B, T, num_classes)
Latter in case of dense prediction
target: {type: (B) or (B, T')}; latter in case of dense prediction
target_subclips: {type: (B, #clips, T)}: The target for each input
frame
"""
losses = {}
accuracies = {}
for tgt_type, tgt_val in target.items():
logits = outputs[f'logits/{tgt_type}']
assert logits.ndim == tgt_val.ndim + 1
loss = self.cls_criterion(logits, tgt_val)
dataset_max_classes = logits.size(-1)
acc1, acc5 = utils.accuracy(logits,
tgt_val,
topk=(1, min(5, dataset_max_classes)))
# Don't use / in loss since I use the config to set weights, and
# can't use / there.
losses[f'cls_{tgt_type}'] = loss
accuracies[f'acc1/{tgt_type}'] = acc1
accuracies[f'acc5/{tgt_type}'] = acc5
# Incur past losses
past_logits_key = f'{PAST_LOGITS_PREFIX}logits/{tgt_type}'
# If this key exists, means we asked for classifier on the last
# layer, so the loss should be incurred.
if past_logits_key in outputs and target_subclips is not None:
past_logits = outputs[past_logits_key]
# Take mode over the frames to get the subclip level loss
past_target = torch.mode(target_subclips[tgt_type], -1)[0]
assert past_logits.shape[:-1] == past_target.shape, (
f'The subclips should be set such that the past logits '
f'and past targets match in shape. Currently they are '
f'{past_logits.shape} and {past_target.shape}')
losses[f'past_cls_{tgt_type}'] = self.cls_criterion(
past_logits, past_target)
# Else likely not using subclips, so no way to do this loss
return losses, accuracies
class Basic:
def __init__(self,
model,
device,
dataset,
cls_loss_acc_fn: TargetConf,
reg_criterion: TargetConf = None):
super().__init__()
self.model = model
self.device = device
self.cls_loss_acc_fn = hydra.utils.instantiate(cls_loss_acc_fn,
dataset, device)
del reg_criterion # not used here
def _basic_preproc(self, data, train_mode):
if not isinstance(data, dict):
video, target = data
# Make a dict so that later code can use it
data = {}
data['video'] = video
data['target'] = target
data['idx'] = -torch.ones_like(target)
if train_mode:
self.model.train()
else:
self.model.eval()
return data
def __call__(
self,
data: Union[Dict[str, torch.Tensor], # If dict
Tuple[torch.Tensor, torch.Tensor]], # vid, target
train_mode: bool = True):
"""
Args:
data (dict): Dictionary of all the data from the data loader
"""
data = self._basic_preproc(data, train_mode)
video = data['video'].to(self.device, non_blocking=True)
target = {}
target_subclips = {}
for key in data['target'].keys():
target[key] = data['target'][key].to(self.device,
non_blocking=True)
outputs, aux_losses = self.model(video,
target_shape=next(
iter(target.values())).shape)
if 'target_subclips' in data:
for key in data['target_subclips'].keys():
target_subclips[key] = data['target_subclips'][key].to(
self.device, non_blocking=True)
else:
target_subclips = None
losses, accuracies = self.cls_loss_acc_fn(outputs, target,
target_subclips)
losses.update(aux_losses)
return data, outputs, losses, accuracies
class PredFutureFeat(Basic):
def __init__(self,
*args,
reg_criterion: TargetConf = None,
future_target: str = 'temp_agg_projected',
incur_loss_style: str = 'separately',
combine_future_losses: TargetConf = {'_target_': 'torch.min'},
cumulative_future: bool = False,
**kwargs):
'''
Args:
incur_loss_style (str): Defines how to incur losses for multiple
futures. Could do 'separately', and then combine using
`combine_future_losses`. Or 'together', such as for MIL-NCE.
'''
super().__init__(*args, **kwargs)
self.reg_criterion = hydra.utils.instantiate(reg_criterion)
self.future_target = future_target
self.incur_loss_style = incur_loss_style
self.combine_future_losses = combine_future_losses
self.cumulative_future = cumulative_future
def __call__(
self,
data: Union[Dict[str, torch.Tensor], # If dict
Tuple[torch.Tensor, torch.Tensor]], # vid, target
train_mode: bool = True):
data = self._basic_preproc(data, train_mode)
video = data['video'].to(self.device, non_blocking=True)
target = {
key: val.to(self.device, non_blocking=True)
for key, val in data['target'].items()
}
batch_size = video.size(0)
if train_mode:
# At test time, I don't sample the extra future video, since
# that is only used during training
all_videos = [video]
nfutures = len(
[key for key in data.keys() if key.startswith(FUTURE_PREFIX)])
for i in range(nfutures):
future_vid = data[f'{FUTURE_PREFIX}_{i}_video'].to(
self.device, non_blocking=True)
all_videos.append(future_vid)
video = torch.cat(all_videos, dim=0) # Add to batch dim
outputs_full, aux_losses = self.model(video)
# Just the actual video for outputs
outputs = {key: val[:batch_size] for key, val in outputs_full.items()}
# if self.cls_loss_wt != 0:
# Doing this makes some layers not have gradients and it gives errors,
# so just leaving it here for now. The gradient should be 0 anyway
losses, accuracies = self.cls_loss_acc_fn(outputs, target)
losses.update(aux_losses)
losses['cls'] = losses['cls']
if train_mode:
# Incur the regression losses, for each of the futures
reg_losses = []
if self.incur_loss_style == 'separately':
for i in range(nfutures):
future_feats = outputs_full[self.future_target][
(i + 1) * batch_size:(i + 2) * batch_size]
if self.cumulative_future:
future_feats = torch.cumsum(future_feats, 0)
# Divide by the position to get mean of features until then
future_feats = future_feats / (torch.range(
1,
future_feats.size(0),
device=future_feats.device,
dtype=future_feats.dtype).unsqueeze(1))
loss = self.reg_criterion(outputs['future_projected'],
future_feats)
reg_losses.append(loss)
final_reg_loss = hydra.utils.call(self.combine_future_losses,
torch.stack(reg_losses))
elif self.incur_loss_style == 'together':
future_feats = outputs_full[self.future_target][batch_size:]
future_feats = future_feats.reshape(
(-1, batch_size, future_feats.size(-1))).transpose(0, 1)
final_reg_loss = self.reg_criterion(
outputs['future_projected'], future_feats)
else:
raise NotImplementedError(self.incur_loss_style)
losses['reg'] = final_reg_loss
return data, outputs, losses, accuracies
|
AVT-main
|
func/train_eval_ops.py
|
from . import train
|
AVT-main
|
func/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Training code."""
from typing import Union, Sequence
import datetime
import os
import time
import sys
import logging
import itertools
import operator
import psutil
import h5py
import subprocess
from tqdm import tqdm
import numpy as np
# Need to import this here, as with pytorch 1.7.1 (or some other CLIP dep)
# it's giving a segmentation fault
# https://github.com/pytorch/pytorch/issues/30651
# Needs to imported before torchvision it seems
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.utils.data
from torch.utils.data.dataloader import default_collate
from torch import nn
import torchvision
import torchvision.datasets.video_utils
from torchvision.datasets.samplers import (DistributedSampler,
UniformClipSampler,
RandomClipSampler)
import torch.distributed as dist
import hydra
from omegaconf import OmegaConf
from models import base_model
from common import scheduler, utils, transforms as T
from common.log import MetricLogger, setup_tbx, get_default_loggers
from datasets.data import get_dataset
from notebooks import utils as nb_utils
__all__ = ['main', 'evaluate', 'train_one_epoch', 'initial_setup']
RESULTS_SAVE_DIR = 'results' # Don't put a "/" at the end, will add later
CKPT_FNAME = 'checkpoint.pth'
DATASET_TRAIN_CFG_KEY = 'dataset_train'
DATASET_EVAL_CFG_KEY = 'dataset_eval'
STR_UID_MAXLEN = 64 # Max length of the string UID stored in H5PY
def store_checkpoint(fpaths: Union[str, Sequence[str]], model, optimizer,
lr_scheduler, epoch):
"""
Args:
fpaths: List of paths or a single path, where to store.
model: the model to be stored
optimizer, lr_scheduler
epoch: How many epochs have elapsed when this model is being stored.
"""
model_without_ddp = model
if isinstance(model, nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
}
if not isinstance(fpaths, list):
fpaths = [fpaths]
for fpath in fpaths:
logging.info('Storing ckpt at epoch %f to %s', epoch, fpath)
utils.save_on_master(checkpoint, fpath)
def _store_video_logs(data, key, step_id, print_large_freq, metric_logger):
"""
Args:
data[key] -> video (B, #clips, 3, T, H, W)
"""
if metric_logger.writer is None:
return
if step_id % print_large_freq != 0:
return
if key not in data:
return
video = data[key]
if video.ndim != 6:
return
## Store the videos
# Swap dims to get N*#clips,T,C,H,W format used by tensorboard
video = torch.flatten(video, 0, 1)
vid_log = torch.transpose(video, 1, 2)
vid_log = vid_log - vid_log.min()
vid_log = vid_log / vid_log.max()
kwargs = {}
if 'video_info' in data:
# Can't specify different frame rate for videos, so use the min
kwargs['fps'] = max(
data['video_info']['video_fps'].min().cpu().numpy().tolist(), 4)
metric_logger.writer.add_video(key, vid_log, step_id, **kwargs)
def _store_scalar_logs(name, val, step_id, print_freq, metric_logger):
if metric_logger.writer is None:
return
if step_id % print_freq != 0:
return
metric_logger.writer.add_scalar(name, val, step_id)
def _get_memory_usage_gb():
mem = psutil.virtual_memory()
return mem.used / (1024**3)
def _compute_final_acc_from_stored(results_dir, dataset):
results = nb_utils.read_results(os.getcwd(), '', results_dir)
accs = {}
for key in results.keys():
if not key.startswith('logits/'):
continue
base_key = key[len('logits/'):]
top1, top5, ar5, top1_meancls, _ = nb_utils.compute_accuracy(
results[key], results[f'target/{base_key}'])
_, _, ar5_ms, _, _ = nb_utils.compute_accuracy(
results[key], results[f'target/{base_key}'],
dataset.classes_manyshot[base_key])
accs[f'final_acc/{base_key}/top1'] = top1
accs[f'final_acc/{base_key}/top1_meanOverClasses'] = top1_meancls
accs[f'final_acc/{base_key}/top5'] = top5
accs[f'final_acc/{base_key}/AR5'] = ar5
accs[f'final_acc/{base_key}/AR5_manyshot'] = ar5_ms
return accs
def train_one_epoch(
train_eval_op,
optimizer,
lr_scheduler,
data_loader,
epoch: int,
partial_epoch: float,
metric_logger,
logger,
last_saved_time,
# kwargs:
print_freq,
print_large_freq,
grad_clip_params,
loss_wts, # All the loss wts go here
save_freq: float, # num epochs to save at. Could be fractional.
save_freq_min: float, # Save a checkpoint every this many minutes
save_intermediates: bool,
):
"""
Args:
epoch (int) defines how many full epochs have finished
partial_epoch (float): Defines the ratio of the last epoch that was
finished before the current model was written out
"""
header = 'Epoch: [{}]'.format(epoch)
batches_per_epoch = len(data_loader)
# Run the data loader for the partial epochs
partial_iters = int(batches_per_epoch * partial_epoch)
if partial_iters > 0:
# TODO: Figure a better way to do this ... too slow
for i, _ in tqdm(enumerate(data_loader),
desc=(f'Loading and throwing data for '
f'{partial_epoch:0.8f} epochs or '
f'{partial_iters} iters'),
total=partial_iters):
if i >= partial_iters:
break
if save_freq:
save_freq_steps = int(save_freq * batches_per_epoch)
logger.info('Storing checkpoints every %0.8f epochs, or '
'%d steps', save_freq, save_freq_steps)
if save_freq_min:
logger.info('Storing checkpoints every %0.2f mins', save_freq_min)
for i, data in enumerate(
metric_logger.log_every(data_loader, print_freq, header),
partial_iters):
step_id = epoch * batches_per_epoch + i
cur_epoch = step_id / batches_per_epoch # Fractional value
time_now = datetime.datetime.now()
mins_since_last_saved = (time_now -
last_saved_time).total_seconds() / 60.0
if (save_freq and step_id % save_freq_steps == 0) or (
save_freq_min and (mins_since_last_saved >= save_freq_min)):
# Not storing in the main checkpoint, keeping that only for the
# models at full epoch boundaries. So set save_intermediates true
# to save models at these points
ckpt_names = []
if save_intermediates:
ckpt_names.append(f'checkpoint_ep{cur_epoch:.8f}.pth')
store_checkpoint(ckpt_names, train_eval_op.model, optimizer,
lr_scheduler, cur_epoch)
last_saved_time = time_now
start_time = time.time()
data, _, losses, accuracies = train_eval_op(data, train_mode=True)
# Reduce the losses, since by default I no longer reduce the losses,
# to be able to store the outputs
losses = {key: torch.mean(val) for key, val in losses.items()}
# Weight the losses
losses_wtd = []
for key, val in losses.items():
this_loss_wt = operator.attrgetter(key)(loss_wts)
# This will ensure only non 0 loss wts contribute, else otherwise
# the weight decay will still be associated with this loss.
if this_loss_wt > 0:
losses_wtd.append(this_loss_wt * val)
# Use the total loss to backprop etc
loss = torch.sum(torch.stack(losses_wtd))
if torch.isnan(loss):
raise ValueError('The loss is NaN!')
optimizer.zero_grad()
loss.backward()
# Clip the gradients if asked for
if grad_clip_params['max_norm'] is not None:
params_being_optimized = []
for param_group in optimizer.param_groups:
params_being_optimized += param_group['params']
assert len(params_being_optimized) > 0, 'Shouldnt be training else'
torch.nn.utils.clip_grad_norm_(params_being_optimized,
**grad_clip_params)
optimizer.step()
batch_size = data_loader.batch_size
metric_logger.update(loss=loss.item(),
lr=optimizer.param_groups[0]['lr'])
metric_logger.meters['clips/s'].update(batch_size /
(time.time() - start_time))
# Store logs in a sane way
for acc_key, acc_val in accuracies.items():
metric_logger.meters[acc_key].update(acc_val.item(), n=batch_size)
for loss_key, loss_val in losses.items():
_store_scalar_logs(f'train_per_iter/loss/{loss_key}', loss_val,
step_id, print_freq, metric_logger)
_store_scalar_logs('train_per_iter/loss', loss, step_id, print_freq,
metric_logger)
_store_scalar_logs('train_per_iter/lr',
optimizer.param_groups[0]['lr'], step_id,
print_freq, metric_logger)
_store_scalar_logs('train_per_iter/sys/cpu_mem_use_gb',
_get_memory_usage_gb(), step_id, print_freq,
metric_logger)
# Store video logs for all videos (future, current etc)
[
_store_video_logs(data, key, step_id, print_large_freq,
metric_logger) for key in data
if key.endswith('video')
]
if not isinstance(lr_scheduler.base_scheduler,
scheduler.ReduceLROnPlateau):
# If it is, then that is handled in the main training loop,
# since it uses the validation accuracy to step down
lr_scheduler.step()
return last_saved_time
def store_append_h5(endpoints, output_dir):
output_fpath = os.path.join(output_dir, f'{utils.get_rank()}.h5')
os.makedirs(output_dir, exist_ok=True)
with h5py.File(output_fpath, 'a') as fout:
for key, val in endpoints.items():
if key not in fout:
fout.create_dataset(key,
data=val,
compression='gzip',
compression_opts=9,
chunks=True,
maxshape=(None, ) + val.shape[1:])
else:
fout[key].resize((fout[key].shape[0] + val.shape[0], ) +
val.shape[1:])
fout[key][-val.shape[0]:, ...] = val
def _evaluate_store_logs(logger, metric_logger, acc_keys, store, this_save_dir,
data_key, data_loader, epoch, loss_names):
# gather the stats from all processes
metric_logger.synchronize_between_processes()
# gather all accuracies
final_accuracies = {}
# Using the loop variable name from earlier .. but ok for now to get
# the keys
for acc_key in acc_keys:
final_accuracies[acc_key] = metric_logger.meters[acc_key].global_avg
if store:
dist.barrier() # all the processes have written out the res
# Compute the AR@5: will have to read the stored outputs
final_accuracies.update(
_compute_final_acc_from_stored(this_save_dir, data_loader.dataset))
# store logs in a sane way
for acc_key, acc_val in final_accuracies.items():
_store_scalar_logs(f'eval_per_epoch{data_key}/{acc_key}', acc_val,
int(round(epoch)), 1, metric_logger)
for loss_name in loss_names:
_store_scalar_logs(f'eval_per_epoch{data_key}/loss_{loss_name}',
metric_logger.meters[loss_name].global_avg,
int(round(epoch)), 1, metric_logger)
logger.info('[{data_key}]'.format(data_key=data_key))
for key in metric_logger.meters:
logging.info('%s: %f', key, metric_logger.meters[key].global_avg)
return final_accuracies
def evaluate(
train_eval_op,
data_loaders: dict,
tb_writer,
logger,
epoch: float, # Can be a partial epoch
store=True,
store_endpoint='logits',
only_run_featext=False):
"""
Args:
data_loaders: A dict from key (name) to a data loader. Allows to
multiple dataloaders for testing on.
only_run_featext (bool): Set this to true and it will return after the
features are extracted and won't compute final numbers etc. So
it will never try to sync processes etc, which leads to crashes.
"""
all_metric_loggers = {}
final_accuracies = {}
for data_key, data_loader in data_loaders.items():
logger.info('Running evaluation for {0}{1}'.format(
DATASET_EVAL_CFG_KEY, data_key))
header = f'[{data_key}] Test:'
metric_logger = MetricLogger(delimiter=' ',
writer=tb_writer,
stat_set='val' + data_key,
logger=logger)
all_metric_loggers[data_key] = metric_logger
this_save_dir = RESULTS_SAVE_DIR + data_key + '/'
if not only_run_featext:
# Delete the stored output features files, since with H5 they
# might be getting appended and will blow up. Note that if
# feature extraction was the goal and we wanted to append,
# need to set in the config to not delete the old files so it
# can append to what has already been computed
logger.info('Clearing %s/%s/*', os.getcwd(), this_save_dir)
subprocess.call(f'rm -r {this_save_dir}/*', shell=True)
for data in metric_logger.log_every(data_loader, 2, header):
with torch.no_grad():
data, outputs, losses, accuracies = train_eval_op(
data, train_mode=False)
# Reduce the losses, since by default I no longer reduce the
# losses, to be able to store the outputs
losses_reduced = {
key: torch.mean(val)
for key, val in losses.items()
}
loss = torch.sum(torch.stack(list(losses_reduced.values())))
if store:
# allow to store logits and logits_regression if that's in too
all_logits = {
key: outputs[key].detach().cpu().numpy()
for key in outputs if key.startswith(store_endpoint)
}
all_logits.update({'idx': data['idx'].detach().cpu().numpy()})
uid_data = np.array(data['uid'])
# If strings, convert format to work with HDF5
if uid_data.dtype.kind == 'U':
# So that it can store upto 64 char strings -- will be
# used by the hdf5 too
assert int(uid_data.dtype.str[2:]) < STR_UID_MAXLEN, (
f'Make sure UID data is smaller than '
f'{STR_UID_MAXLEN}, or update that value of '
f'STR_UID_MAXLEN')
uid_data = uid_data.astype(f'S{STR_UID_MAXLEN}')
all_logits.update({'uid': uid_data})
# Storing the actual per batch/elt unreduced losses for
# potential analysis
all_logits.update({
'loss/' + key: val.detach().cpu()
for key, val in losses.items()
})
if not only_run_featext:
# store the targets as well
all_logits.update({
'target/' + key: val.detach().cpu().numpy()
for key, val in data['target'].items()
})
# Do the actual storage into HDF5s that can append to the
# stuff from previous batch. Doing it here rather than
# collecting (as I used to do) so that this can be used
# for feature extraction where storing into a list will
# be too expensive
all_logits.update({'epoch': np.array([epoch])})
store_append_h5(all_logits, this_save_dir)
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = data_loader.batch_size
metric_logger.update(loss=loss.item())
for acc_key, acc_val in accuracies.items():
metric_logger.meters[acc_key].update(acc_val.item(),
n=batch_size)
for loss_name, loss_val in losses_reduced.items():
metric_logger.meters[loss_name].update(loss_val.item(),
n=batch_size)
if not only_run_featext:
final_accuracies[data_key] = _evaluate_store_logs(
logger, metric_logger, accuracies.keys(), store, this_save_dir,
data_key, data_loader, epoch, losses_reduced.keys())
if only_run_featext:
# None of the rest is needed
return 0.0
# Return the accuracy on the main evaluation dataset, which must be the
# one which doesn't have any prefix (i.e. in the dataset_eval)
# Returning the accuracy metric that is most relevant to the dataset.
main_dataset_key = ''
main_metric = final_accuracies[main_dataset_key][
data_loaders[main_dataset_key].dataset.primary_metric]
return main_metric
def initial_setup(cfg, logger):
torchvision.set_video_backend(cfg.pytorch.video_backend)
if cfg.data_parallel:
dist_info = {}
dist_info['distributed'] = False
dist_info['world_size'] = torch.cuda.device_count()
# In DDP we set these params for a single process
cfg.train.batch_size *= dist_info['world_size']
cfg.eval.batch_size *= dist_info['world_size']
else:
dist_info = utils.init_distributed_mode(logger,
dist_backend=cfg.dist_backend)
logger.info("Dist info:", dist_info)
logger.info("torch version: %s", torch.__version__)
logger.info("torchvision version: %s", torchvision.__version__)
logger.info("hydra version: %s", hydra.__version__)
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
writer = setup_tbx('logs/', SummaryWriter)
return dist_info, device, writer
def init_model(model, ckpt_path, modules_to_keep, logger):
"""Initialize model with weights from ckpt_path.
Args:
ckpt_path (str): A string with path to file
modules_to_keep (str): A comma sep string with the module name prefix
that should be loaded from the checkpoint
"""
logger.debug('Initing %s with ckpt path: %s, using modules in it %s',
model, ckpt_path, modules_to_keep)
checkpoint = torch.load(ckpt_path, map_location="cpu")
if 'model' in checkpoint.keys():
state_dict = checkpoint['model']
elif 'state_dict' in checkpoint.keys():
state_dict = checkpoint['state_dict']
elif 'classy_state_dict' in checkpoint.keys():
state_dict = checkpoint['classy_state_dict']
# This is likely coming from a VISSL codebase, so the actual trunk
# params will be as follows. Ideally support this more generally TODO
state_dict = state_dict['base_model']['model']['trunk']
else:
state_dict = checkpoint
if modules_to_keep:
# Keep only the elements of state_dict that match modules to keep.
# Also, remove that prefix from the names
filtered_state_dict = {}
for key, val in state_dict.items():
for mod_name in modules_to_keep.split(','):
if key.startswith(mod_name):
filtered_state_dict[key[len(mod_name):]] = val
continue
state_dict = filtered_state_dict
# Ignore any parameters/buffers (bn mean/var) where shape does not match
for name, param in itertools.chain(model.named_parameters(),
model.named_buffers()):
if name in state_dict and state_dict[name].shape != param.shape:
logger.warning('Ckpt shape mismatch for %s (%s vs %s). Ignoring.',
name, state_dict[name].shape, param.shape)
del state_dict[name]
missing_keys, unexp_keys = model.load_state_dict(state_dict, strict=False)
logger.warning('Could not init from %s: %s', ckpt_path, missing_keys)
logger.warning('Unused keys in %s: %s', ckpt_path, unexp_keys)
def collate_fn_remove_audio(batch):
"""Remove audio from the batch.
Also remove any None(s) -- those were data points I wasn't able to read.
Not needed, and it doesn't batch properly since it is different length.
"""
batch = list(filter(lambda x: x is not None, batch))
if isinstance(batch[0], tuple):
batch = [(d[0], d[2]) for d in batch]
return default_collate(batch)
def _get_resize_shape(data_cfg):
scale_h = data_cfg.scale_h
scale_w = data_cfg.scale_w
if isinstance(scale_w, int) and scale_w == -1:
resize_shape = scale_h
else:
assert (not isinstance(scale_h, int) or scale_h != -1), (
'If using -1, must be used for scale_w. The smaller side will be '
'scaled by that size.')
resize_shape = (scale_h, scale_w)
return resize_shape
def _get_pixel_mean_std(data_cfg):
return {'mean': tuple(data_cfg.mean), 'std': tuple(data_cfg.std)}
def _set_all_bn_to_not_track_running_mean(model):
"""
Set all batch norm layers to not use running mean.
"""
for module in model.modules():
# This should be able to capture any BatchNorm1d, 2d, 3d etc.
if isinstance(module, nn.modules.batchnorm._BatchNorm):
module.track_running_stats = False
return model
def main(cfg):
logger = logging.getLogger(__name__)
dist_info, device, writer = initial_setup(cfg, logger)
# Data loading code
logger.info("Loading data")
logger.info("\t Loading datasets")
st = time.time()
# separate these into get transforms
# TODO: This is gotten too complex: clean up, make interface better
transform_train = [
T.ToTensorVideo(),
T.Resize(_get_resize_shape(cfg.data_train)),
T.RandomHorizontalFlipVideo(cfg.data_train.flip_p),
T.ColorJitterVideo(brightness=cfg.data_train.color_jitter_brightness,
contrast=cfg.data_train.color_jitter_contrast,
saturation=cfg.data_train.color_jitter_saturation,
hue=cfg.data_train.color_jitter_hue),
torchvision.transforms.Lambda(
lambda x: x * cfg.data_train.scale_pix_val),
torchvision.transforms.Lambda(lambda x: x[[2, 1, 0], ...])
if cfg.data_train.reverse_channels else torchvision.transforms.Compose(
[]),
T.NormalizeVideo(**_get_pixel_mean_std(cfg.data_train)),
]
if cfg.data_train.crop_size is not None:
transform_train.append(
T.RandomCropVideo(
(cfg.data_train.crop_size, cfg.data_train.crop_size)), )
transform_train = torchvision.transforms.Compose(transform_train)
transform_eval = [
T.ToTensorVideo(),
T.Resize(_get_resize_shape(cfg.data_eval)),
torchvision.transforms.Lambda(
lambda x: x * cfg.data_eval.scale_pix_val),
torchvision.transforms.Lambda(lambda x: x[[2, 1, 0], ...]) if
cfg.data_eval.reverse_channels else torchvision.transforms.Compose([]),
T.NormalizeVideo(**_get_pixel_mean_std(cfg.data_eval)),
]
if cfg.data_eval.crop_size is not None:
transform_eval.append(
T.MultiCropVideo(
(cfg.data_eval.crop_size, cfg.data_eval.crop_size),
cfg.data_eval.eval_num_crops, cfg.data_eval.eval_flip_crops))
transform_eval = torchvision.transforms.Compose(transform_eval)
datasets_train = [
get_dataset(getattr(cfg, el), cfg.data_train, transform_train, logger)
for el in cfg.keys() if el.startswith(DATASET_TRAIN_CFG_KEY)
]
if len(datasets_train) > 1:
dataset = torch.utils.data.ConcatDataset(datasets_train)
else:
dataset = datasets_train[0]
# could be multiple test datasets
datasets_test = {
el[len(DATASET_EVAL_CFG_KEY):]:
get_dataset(getattr(cfg, el), cfg.data_eval, transform_eval, logger)
for el in cfg.keys() if el.startswith(DATASET_EVAL_CFG_KEY)
}
logger.info("Took %d", time.time() - st)
logger.info("Creating data loaders")
train_sampler = None
test_samplers = {key: None for key in datasets_test}
if hasattr(dataset, 'video_clips'):
assert cfg.train.shuffle_data, 'TODO'
train_sampler = RandomClipSampler(getattr(dataset, 'video_clips'),
cfg.data_train.train_bs_multiplier)
test_samplers = {
key: UniformClipSampler(val.video_clips,
cfg.data_eval.val_clips_per_video)
for key, val in datasets_test.items()
}
if dist_info['distributed']:
train_sampler = DistributedSampler(train_sampler)
test_samplers = [DistributedSampler(el) for el in test_samplers]
elif dist_info['distributed']:
# Distributed, but doesn't have video_clips
if cfg.data_train.use_dist_sampler:
train_sampler = torch.utils.data.distributed.DistributedSampler(
dataset,
num_replicas=dist_info['world_size'],
rank=dist_info['rank'],
shuffle=cfg.train.shuffle_data)
if cfg.data_eval.use_dist_sampler:
test_samplers = {
key: torch.utils.data.distributed.DistributedSampler(
val,
num_replicas=dist_info['world_size'],
rank=dist_info['rank'],
shuffle=False)
for key, val in datasets_test.items()
}
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=cfg.train.batch_size,
sampler=train_sampler,
num_workers=cfg.data_train.workers,
pin_memory=False, # usually hurts..
shuffle=(train_sampler is None and cfg.train.shuffle_data),
collate_fn=collate_fn_remove_audio,
)
data_loaders_test = {
key: torch.utils.data.DataLoader(
val,
# Since no backprop, so can have a larger batch size
batch_size=cfg.eval.batch_size or cfg.train.batch_size * 4,
sampler=test_samplers[key],
num_workers=cfg.data_eval.workers,
pin_memory=False, # Usually hurts..
shuffle=False,
collate_fn=collate_fn_remove_audio,
)
for key, val in datasets_test.items()
}
num_classes = {key: len(val) for key, val in dataset.classes.items()}
logger.info('Creating model with %s classes', num_classes)
model = base_model.BaseModel(cfg.model,
num_classes=num_classes,
class_mappings=dataset.class_mappings)
logger.debug('Model: %s', model)
if dist_info['distributed'] and cfg.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if cfg.train.init_from_model:
# This can have structure as follows:
# <module name>:<path to init model>;<module name>:<path>: ...
for module_ckpt in cfg.train.init_from_model:
elts = module_ckpt
if len(elts) == 1:
model_to_init = model
ckpt_modules_to_keep = None
ckpt_path = elts[0]
elif len(elts) == 2:
model_to_init = operator.attrgetter(elts[0])(model)
ckpt_modules_to_keep = None
ckpt_path = elts[1]
elif len(elts) == 3:
model_to_init = operator.attrgetter(elts[0])(model)
ckpt_modules_to_keep = elts[1]
ckpt_path = elts[2]
else:
raise ValueError(f'Incorrect formatting {module_ckpt}')
init_model(model_to_init, ckpt_path, ckpt_modules_to_keep, logger)
model.to(device)
if cfg.opt.classifier_only:
assert len(cfg.opt.lr_wd) == 1
assert cfg.opt.lr_wd[0][0] == 'classifier'
model = _set_all_bn_to_not_track_running_mean(model)
params = []
for this_module_names, this_lr, this_wd in cfg.opt.lr_wd:
if OmegaConf.get_type(this_module_names) != list:
this_module_names = [this_module_names]
this_modules = [
operator.attrgetter(el)(model) if el != '__all__' else model
for el in this_module_names
]
this_params_bias_bn = {}
this_params_rest = {}
for this_module_name, this_module in zip(this_module_names,
this_modules):
for name, param in this_module.named_parameters():
# ignore the param without grads
if not param.requires_grad:
continue
# May not always have a ".bias" if it's the last element, and no
# module name
if name.endswith('bias') or ('.bn' in name):
this_params_bias_bn[this_module_name + '.' + name] = param
else:
this_params_rest[this_module_name + '.' + name] = param
this_scaled_lr = this_lr * dist_info['world_size']
if cfg.opt.scale_lr_by_bs:
this_scaled_lr *= cfg.train.batch_size
params.append({
'params': this_params_rest.values(),
'lr': this_scaled_lr,
'weight_decay': this_wd,
})
logger.info('Using LR %f WD %f for parameters %s', params[-1]['lr'],
params[-1]['weight_decay'], this_params_rest.keys())
params.append({
'params': this_params_bias_bn.values(),
'lr': this_scaled_lr,
'weight_decay': this_wd * cfg.opt.bias_bn_wd_scale,
})
logger.info('Using LR %f WD %f for parameters %s', params[-1]['lr'],
params[-1]['weight_decay'], this_params_bias_bn.keys())
# Remove any parameters for which LR is 0; will save GPU usage
params_final = []
for param_lr in params:
if param_lr['lr'] != 0.0:
params_final.append(param_lr)
else:
for param in param_lr['params']:
param.requires_grad = False
optimizer = hydra.utils.instantiate(cfg.opt.optimizer, params_final)
# convert scheduler to be per iteration,
# not per epoch, for warmup that lasts
# between different epochs
main_scheduler = hydra.utils.instantiate(
cfg.opt.scheduler,
optimizer,
iters_per_epoch=len(data_loader),
world_size=dist_info['world_size'])
lr_scheduler = hydra.utils.instantiate(cfg.opt.warmup,
optimizer,
main_scheduler,
iters_per_epoch=len(data_loader),
world_size=dist_info['world_size'])
last_saved_ckpt = CKPT_FNAME
start_epoch = 0
if os.path.isfile(last_saved_ckpt):
checkpoint = torch.load(last_saved_ckpt, map_location='cpu')
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = checkpoint['epoch']
logger.warning('Loaded model from %s (ep %f)', last_saved_ckpt,
start_epoch)
if dist_info['distributed'] and not cfg.eval.eval_fn.only_run_featext:
# If only feat ext, then each gpu is going to test separately anyway,
# no need for communication between the models
logger.info('Wrapping model into DDP')
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[dist_info['gpu']],
output_device=dist_info['gpu'])
elif cfg.data_parallel:
logger.info('Wrapping model into DP')
device_ids = range(dist_info['world_size'])
model = torch.nn.parallel.DataParallel(model, device_ids=device_ids)
# TODO add an option here to support val mode training
# Passing in the training dataset, since that will be used for computing
# weights for classes etc.
train_eval_op = hydra.utils.instantiate(cfg.train_eval_op,
model,
device,
dataset,
_recursive_=False)
if cfg.test_only:
logger.info("Starting test_only")
hydra.utils.call(cfg.eval.eval_fn, train_eval_op, data_loaders_test,
writer, logger, start_epoch)
return
logger.info("Start training")
start_time = time.time()
# Get training metric logger
stat_loggers = get_default_loggers(writer, start_epoch, logger)
best_acc1 = 0.0
partial_epoch = start_epoch - int(start_epoch)
start_epoch = int(start_epoch)
last_saved_time = datetime.datetime(1, 1, 1, 0, 0)
epoch = 0 # Since using this var to write the checkpoint output, so init to sth
for epoch in range(start_epoch, cfg.train.num_epochs):
if dist_info['distributed'] and train_sampler is not None:
train_sampler.set_epoch(epoch)
last_saved_time = hydra.utils.call(cfg.train.train_one_epoch_fn,
train_eval_op, optimizer,
lr_scheduler, data_loader, epoch,
partial_epoch,
stat_loggers["train"], logger,
last_saved_time)
partial_epoch = 0 # Reset, for future epochs
store_checkpoint([CKPT_FNAME], model, optimizer, lr_scheduler,
epoch + 1)
if cfg.train.eval_freq and epoch % cfg.train.eval_freq == 0:
acc1 = hydra.utils.call(cfg.eval.eval_fn, train_eval_op,
data_loaders_test, writer, logger,
epoch + 1)
else:
acc1 = 0
if cfg.train.store_best and acc1 >= best_acc1:
store_checkpoint('checkpoint_best.pth', model, optimizer,
lr_scheduler, epoch + 1)
best_acc1 = acc1
if isinstance(lr_scheduler.base_scheduler,
scheduler.ReduceLROnPlateau):
lr_scheduler.step(acc1)
# reset all meters in the metric logger
for log in stat_loggers:
stat_loggers[log].reset_meters()
# Store the final model to checkpoint
store_checkpoint([CKPT_FNAME], model, optimizer, lr_scheduler, epoch + 1)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('Training time %s', total_time_str)
|
AVT-main
|
func/train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Useful links:
Streamlit cheatsheet:
https://docs.streamlit.io/library/cheatsheet
Also check the components we provide for demos in metastreamlit:
https://github.com/fairinternal/metastreamlit
You can request new components by creating an issue
"""
# Designed to run from controllable_agent with streamlit run demo/main.py
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "" # avoid using CUDA
import sys
import time
import logging
import tempfile
from pathlib import Path
from collections import OrderedDict
import streamlit as st
try:
import url_benchmark
base = Path(url_benchmark.__file__).absolute().parents[1]
except ImportError:
base = Path(__file__).absolute().parents[1]
# we need to add base repo to be able to import url_benchmark
# we need to add url_benchmar to be able to reload legacy checkpoints
for fp in [base, base / "url_benchmark"]:
assert fp.exists()
if str(fp) not in sys.path:
sys.path.append(str(fp))
print("base", base)
from url_benchmark import pretrain
import numpy as np
import torch
import torch.nn.functional as F
from controllable_agent import runner
from url_benchmark import goals
from url_benchmark import utils
from url_benchmark.video import VideoRecorder
logger = logging.getLogger(__name__)
st.set_page_config(
page_title="Controllable agent - Meta AI",
menu_items={"About": "This demo is powered by the code available at https://github.com/facebookresearch/controllable_agent\nCopyright 2022 Meta Inc. Available under MIT Licence."},
)
# st.title('Controllable agent')
st.sidebar.write('# Controllable Agent Demo')
st.sidebar.write("### Optimize Any Reward Function with a Single Pretrained Agent")
st.sidebar.write("***Ahmed Touati, Jérémy Rapin, Yann Ollivier***")
st.sidebar.write("A controllable agent is a reinforcement learning agent whose _reward function can be set in real time_, without any additional learning or fine-tuning, based on a reward-free pretraining phase.")
st.sidebar.write("""The controllable agent here uses the _forward-backward representation_ from our papers:
* [Does Zero-Shot Reinforcement Learning Exist?](https://arxiv.org/abs/2209.14935)
* [Learning One Representation to Optimize All Rewards](https://arxiv.org/abs/2103.07945) (Neurips 2021)
""")
st.sidebar.write("The [code is open-source](https://github.com/facebookresearch/controllable_agent).")
model_path = Path("/checkpoint/jrapin/ca/models")
if not model_path.exists():
model_path = base / "models"
# having more cases will trigger a dropdown box
CASES = {
# Update the following path to a checkpoint that exists in you system
"walker - 221020 (rnd init)": model_path / "walker_rnd_init_65697627_11_221020.pt",
}
CASES = {x: y for x, y in CASES.items() if y.exists()}
if len(CASES) > 1:
case = st.selectbox(
'Which model do you want to load?',
list(CASES)
)
else:
case = list(CASES)[0]
assert case is not None
@st.cache(max_entries=1, allow_output_mutation=True)
def load_workspace(case: str):
checkpoint = CASES[case]
hp = runner.HydraEntryPoint(base / "url_benchmark/anytrain.py")
ws = hp.workspace(task="walker_walk", replay_buffer_episodes=2000, goal_space="walker_pos_speed_z", append_goal_to_observation=True)
ws.train_env.reset()
with checkpoint.open("rb") as f:
payload = torch.load(f, map_location=ws.device)
ws.agent = payload["agent"]
ws.agent.cfg.device = ws.cfg.device
replay = payload["replay_loader"]
ws.replay_loader = replay
ws.replay_storage = replay
return ws
# load
ws = load_workspace(case)
recorder = VideoRecorder(base, camera_id=ws.video_recorder.camera_id, use_wandb=False)
recorder.enabled = True
reward = goals.WalkerEquation("x")
reward._precompute_for_demo(ws) # precompute before first run
ws.replay_loader._storage.clear() # clear memory since not used anymore
params = list(reward._extract(reward._env))
params_str = ", ".join(f"`{x}`" for x in params)
st.write("##### Try Your Own Reward Function for Walker")
st.write(f"Enter a Walker reward function to maximize, such as `-vx` or `exp(-(x-8)**2)`\n\n This can be any Python equation using {params_str} (horizontal and vertical position, horizontal and vertical speed, sine of torso angle, angular momentum)")
string = st.text_input("Reward function:", value=st.session_state.get("prefill", ""))
# st.session_state.pop("prefill", None)
col1, col2 = st.columns(2)
early_stopping = True
last_physics = np.ndarray([])
if string and string is not None:
reward = goals.WalkerEquation(string)
reward._precompute_for_demo(ws) # loads from cached workspace if already precomputed
logger.info(f"Running reward: {string}") # for the console
col1.write(f"Running reward `{string}`") # use code formating to avoid italic from **
if not reward._precomputed:
meta = pretrain._init_eval_meta(ws, custom_reward=reward)
else:
print("Inferring from precomputed data")
meta = reward._from_precomputed()
col1.write("Applying the policy for 500 time steps and generating video (this may take 10-15s)")
# play
env = ws._make_env()
time_step = env.reset()
recorder.init(env)
total_reward = 0
k = 0
durations = dict(model=0.0, env=0.0, render=0.0)
t_start = time.time()
while k < 500 and not time_step.last():
k += 1
t0 = time.time()
with torch.no_grad(), utils.eval_mode(ws.agent):
action = ws.agent.act(time_step.observation,
meta,
1000000,
eval_mode=True)
t1 = time.time()
time_step = env.step(action)
t2 = time.time()
recorder.record(env)
t3 = time.time()
durations["model"] += t1 - t0
durations["env"] += t2 - t1
durations["render"] += t3 - t2
total_reward += reward.from_env(env)
distance = np.linalg.norm(time_step.physics - last_physics) / time_step.physics.size
if early_stopping and distance < 5e-6:
print(f"Early stopping at time step {k}")
break
last_physics = time_step.physics
print(f"Total play time {time.time() - t_start:.2f}s with {durations}")
state = reward._extract(env)
state_str = " ".join(f"{x}={y:.2f}" for x, y in state.items())
col1.write(
f"Average reward is {total_reward / k}\n\n"
f'Final state is {state_str}'
)
name = "demo.mp4"
with tempfile.TemporaryDirectory() as tmp:
recorder.save_dir = Path(tmp)
t0 = time.time()
recorder.save(name)
print(f"Saved video to {recorder.save_dir / name} in {time.time() - t0:.2f}s, now serving it.")
col = st.columns([1, 3, 1])[1]
with col:
col2.video(str(recorder.save_dir / name))
st.write("---")
st.write(f"""**Note**: multiplicative rewards are a good way to combine constraints on the agent. For instance, `z**4 * exp(-abs(x-5))` makes the agent try to jump around `x=5`""")
st.write(f"""This agent is far from perfect, and it is still easy to make it fail. For instance, the variable `x` works well only in the range well-covered in the trainset (typically -15 to 15). Rewards like `x**2` or `exp(x)` produce bad results, presumably because they are largest far away from the trainset. On the other hand, `x**2 * (x<20) * (x>-20)` works better, because the reward is restricted to a well-explored zone. Also, the variable `vz` does not seem to do much. """)
with st.expander("How Does This Work?"):
st.write(r"""
The algorithms are directly taken from our papers (see side bar). At pre-training time, two representations $F(s,a,z)$ and $B(s)$ ("forward" and "backward") were learned, as well as a parametric policy $\pi_z(s)$. Here $z$ is a hidden variable in representation space.
When a new reward function $r$ is set, the app computes the hidden variable $z=\mathbb{E}[r(s)B(s)]$ using 5,000 states $s$ from the training set, using the provided function $r$. Then the policy $\pi_z$ with parameter $z$ is deployed.
The dimension of $F$, $B$ and $z$ is 50. The networks are small multilayer perceptrons. The training set was initialized by a standard exploration algorithm, Random Network Distillation. It is made of 2,000 length-1,000 trajectories. Then we learn $F$, $B$ and $\pi_z$ using the method described in our papers, and we update the training set by sampling random $z$ and applying the corresponding policy.
For $B$, we only provide a subset of variables from the full state $s$, namely, the six variables `x,z,vx,vz,up,am` mentioned above, to focus training on those. Our theory guarantees that, if the networks minimize the loss well, all reward functions depending on those variables will be optimized.
###### How do we Learn $F$, $B$ and $\pi$? Causes and Effects
Intuitively, $F(s,a,z)$ represents the "effects" of following $\pi_z$ starting at state-action $(s,a)$, while $B(s')$ represents the possible "causes" leading to state $s'$.
If it's easy to reach $s'$ while starting at $s,a$ and following $\pi_z$ for many steps, then the dot product $F(s,a,z)^TB(s')$ will be large, meaning, we align the representation vectors $F(s,a,z)$ and $B(s')$. The precise equation (below) uses the cumulated long-term transition probabilities between states.
The policy $\pi_z$ is trained to return an action $a$ that maximizes $F(s,a,z)^T z$.
The full set of equations is:""")
st.latex(r'''\begin{cases}
\pi_z(s)=\mathrm{argmax}_a \, F(s,a,z)^T z\\
F(s,a,z)^T B(s') \rho(s') = \sum_t \gamma^t \Pr(s_t=s'|s_0=s,a_0=a,\pi_z)
\end{cases}
''')
st.write("""
Here $\\rho$ is the distribution of states in the training set (we don't need to know $\\rho$, just to sample from it).
Our theory guarantees this provides all optimal policies if training is successful:
**Theorem.** *Assume the equations above hold. Then the optimal policy for any reward function $r$ can be obtained by evaluating* """)
st.latex(r''' z=\mathbb{E}[r(s)B(s)] ''')
st.write(r""" *on states sampled from the training distribution, and applying policy $\pi_z$.*
*Moreover, approximate solutions still provide approximately optimal policies.*
The equation on $F$ and $B$ seems hard to handle, but it can be rewritten as a kind of generalized Bellman equation for $F^T B$, which we use for training. There is no representation collapse ($F=B=0$ does not satisfy the equation). There is no sparse reward problem from $\Pr(s_t=s')$, thanks to our probability-measure-valued treatment of the equation.
Overall, this is somewhat similar to a world model except:
* There is no planning at test time
* We never synthesize states or imaginary trajectories
* We learn long-term transition probabilities for many policies instead of one-step, policy-independent next states
""")
st.write("##### Some Examples")
reward_texts = [
("vx", "run as fast as possible"),
("x < -4", "go to the left until x<-4"),
("1 / z", "be close to the ground"),
("-up", "be upside down"),
("-up * x * (x > 0)", "be to the right and upside down"),
("(1-up) * exp(-abs(x-10))", "be upside down around x=10"),
("exp(-abs(x - 8)) * up / z", "be around x=8, upright, and close to the ground: crouch at x=8"),
("exp(-abs(x - 10)) * up * z**4", "be around x=10, upright, and very high: jump at x=10"),
("vx/z**2", "crawl"),
("exp(-abs(vx - 2)) * up", "move slowly (speed=2) and stay upright"),
("vx * (1 - up) / z", "move as fast as possible, upside down, close to the ground"),
("vx * (1 + up * cos(x / 4))", "run upright or rolling depending on cos(x/4)"),
]
def _prefill(eq: str) -> None:
st.session_state["prefill"] = eq
for reward, text in reward_texts:
cols = st.columns(3)
cols[0].write(f"`{reward}`")
cols[1].write(text)
cols[2].button("Try", key=reward, on_click=_prefill, args=(reward,))
# col[2].write("video TODO")
|
controllable_agent-main
|
demo/main.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import uuid
import shutil
import logging
import datetime
import importlib
import traceback
import contextlib
import typing as tp
from pathlib import Path
import numpy as np
import submitit
import omegaconf
import hydra
from .executor import ( # pylint: disable=unused-import
DelayedExecutor as DelayedExecutor,
)
PathLike = tp.Union[str, Path]
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def working_directory(path: tp.Union[str, Path]) -> tp.Iterator[None]:
cwd = Path().cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
class HydraEntryPoint:
"""Creates a callable from a Hydra main
Config and python files are expected to be in the same folder
Parameter
---------
script_path: str/Path
Path to the python script containing main
"""
# callable to be typed when using an actual package
def __init__(self, script_path: PathLike) -> None:
self._script_path = Path(script_path).absolute()
assert self._script_path.suffix == ".py"
assert self._script_path.is_file(), f"{self._script_path} is not a file"
assert self._script_path.with_name("base_config.yaml").is_file()
self._folder: tp.Optional[Path] = None # defined later
@property
def folder(self) -> Path:
if self._folder is None:
raise RuntimeError(
"Folder is not defined if call method has not be called yet"
)
return self._folder
def validated(self, **kwargs: tp.Any) -> "HydraEntryPoint":
self._folder = (
None # reset folder if validated to avoid reusing a previous test folder
)
self.config(**kwargs)
return self
def _relative_path(self) -> Path:
return Path(os.path.relpath(self._script_path, Path(__file__).parent))
def config(self, **kwargs: tp.Any) -> omegaconf.DictConfig:
self._get_module() # needs to be loaded to make sure configs are available
name = self._script_path.stem
rel_path = self._relative_path().with_name("base_config.yaml")
overrides = [f"{x}={y}" for x, y in kwargs.items()]
with hydra.initialize(
config_path=str(rel_path.parent), job_name=name, version_base="1.1"
):
cfg_ = hydra.compose(config_name="base_config", overrides=overrides)
return cfg_
def _get_module(self) -> tp.Any:
benchpath = str(self._script_path.parents[1])
if benchpath not in sys.path:
sys.path.insert(0, benchpath)
# add url_benchmark, for legacy buffers
sys.path.append(str(self._script_path.parent))
already_imported = any("url_benchmark" in x for x in sys.modules)
module = importlib.import_module("url_benchmark." + self._script_path.stem)
module = importlib.reload(module) # reload to override hydra configstore
assert module is not None
if module.__file__ is None or not module.__file__.startswith(benchpath):
if already_imported:
logger.warning(
"url_benchmark had already been imported, using {module.__file__}"
)
else:
raise RuntimeError(
f"Imported {module.__file__} while expecting to be in {benchpath}"
)
return module
def main(self, **kwargs: tp.Any) -> tp.Any:
return self._get_module().main(self.config(**kwargs))
def workspace(self, **kwargs: tp.Any) -> tp.Any:
return self._get_module().Workspace(self.config(**kwargs))
def __repr__(self) -> str:
rel_path = str(self._relative_path())
return f"{self.__class__.__name__}({rel_path!r})"
def get_hiplog(self) -> tp.Any:
if self._folder is None:
raise RuntimeError("No workspace avaible")
import hiplogs # type: ignore
loggers = list(hiplogs.HipLog.find_in_folder(self._folder))
assert len(loggers) == 1
return loggers[0]
def __call__(
self, _working_directory_: tp.Optional[PathLike] = None, **kwargs: tp.Any
) -> float:
config = self.config(**kwargs)
try:
slurm_folder: tp.Optional[Path] = submitit.JobEnvironment().paths.folder
except RuntimeError:
slurm_folder = None
if self._folder is None and _working_directory_ is not None:
self._folder = Path(_working_directory_) # override working directory
self._folder.mkdir(exist_ok=True, parents=True)
logger.warning(
f"Bypassing folder affectation and using provided: {self._folder}"
)
if slurm_folder is not None:
# try and link to latest slurm dir anyway
symlink = self._folder / "slurm"
if symlink.exists():
symlink.unlink()
symlink.symlink_to(slurm_folder)
if self._folder is None:
if slurm_folder is not None:
self._folder = slurm_folder
else:
name = f"{datetime.date.today().isoformat()}_{config.experiment}_{uuid.uuid4().hex[:6]}"
self._folder = Path("exp_local") / name
self._folder.mkdir(exist_ok=True, parents=True)
omegaconf.OmegaConf.save(config=config, f=str(self.folder / "config.yaml"))
with working_directory(self.folder):
workspace = self._get_module().Workspace(config)
try:
workspace.train()
except Exception as e:
if not workspace.eval_rewards_history:
raise e # it did not even run :s
logger.warning(f"Something went wrong:\n{traceback.format_exc()}")
reward = -float("inf")
if workspace.eval_rewards_history:
reward = np.mean(workspace.eval_rewards_history[-12:])
return -float(reward) # minimization for nevergrad
def checkpoint(self, *args: tp.Any, **kwargs: tp.Any) -> tp.Any:
return submitit.helpers.DelayedSubmission(self, *args, **kwargs)
class CopiedBenchmark(HydraEntryPoint):
def __init__(self, folder: PathLike, name: str) -> None:
self.code = Path(folder) / "code"
self.code.parent.mkdir(parents=True, exist_ok=True)
if self.code.exists():
logger.warning(
f"Folder {folder} already exists, it will **not** be updated"
)
else:
shutil.copytree(
Path(__file__).parents[1] / "url_benchmark",
self.code / "url_benchmark",
ignore=shutil.ignore_patterns("exp_*"),
)
super().__init__(self.code / "url_benchmark" / f"{name}.py")
def on_exception_enter_postmortem(f):
"""Decorator for triggering pdb in case of exception"""
import pdb
import sys
from functools import wraps
import traceback
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
raise
return wrapper
|
controllable_agent-main
|
controllable_agent/runner.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
controllable_agent-main
|
controllable_agent/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import concurrent.futures
from pathlib import Path
import pytest
import submitit
from . import executor as _exec
def func(fail: bool = False) -> str:
if fail:
raise ValueError("This is a failure")
return "success"
def get_executor(tmp_path: Path) -> _exec.DelayedExecutor[str]:
local_exec = submitit.AutoExecutor(folder=tmp_path, cluster="debug")
return _exec.DelayedExecutor(
local_exec, default="ERROR", batch_size=2, max_delay=0.2, max_failure_rate=0.5
)
def test_delayed_exec_num(tmp_path: Path) -> None:
executor = get_executor(tmp_path)
job1 = executor.submit(func)
assert not job1.done()
assert job1.job is None, "Job should not be submitted"
job2 = executor.submit(func)
assert job2.done()
assert job1.job is not None, "Job should not be submitted"
assert job2.job is not None, "Job should not be submitted"
assert not executor._unsubmitted, "Unsubmitted jobs should be purged"
def test_delayed_exec_delay(tmp_path: Path) -> None:
executor = get_executor(tmp_path)
job1 = executor.submit(func)
time.sleep(0.1)
assert job1.job is None, "Job should not be submitted"
time.sleep(0.11)
job1.done() # trigger a possible submission
assert job1.job is not None, "Job should be submitted"
assert not executor._unsubmitted, "Unsubmitted jobs should be purged"
def test_delayed_exec_error(tmp_path: Path) -> None:
executor = get_executor(tmp_path)
jobs = [executor.submit(func, fail=f) for f in [True, True]]
with pytest.raises(RuntimeError):
jobs[0].result()
def test_delayed_exec_caught_error(tmp_path: Path) -> None:
executor = get_executor(tmp_path)
jobs = [executor.submit(func, fail=f) for f in [False, True]]
assert jobs[0].result() == "success"
assert jobs[1].result() == "ERROR"
def _do_nothing() -> int:
return 12
def test_wait_for_jobs() -> None:
jobs = []
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as exc:
for _ in range(2):
jobs.append(exc.submit(_do_nothing))
_exec.wait_for_jobs(jobs, sleep=0.04)
|
controllable_agent-main
|
controllable_agent/test_executor.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import itertools
import subprocess
from pathlib import Path
import controllable_agent
from . import runner
def test_quadruped_goal(tmp_path: Path) -> None:
conf_path = Path(__file__).parents[1] / "url_benchmark" / "pretrain.py"
with runner.working_directory(tmp_path):
ep = runner.HydraEntryPoint(conf_path)
ep(
_working_directory_=tmp_path / "bypass",
agent="fb_ddpg",
device="cpu",
num_train_frames=1,
num_eval_episodes=1,
replay_buffer_episodes=2,
goal_space="simplified_quadruped",
task="quadruped_walk",
use_hiplog=True,
final_tests=1,
**{"agent.feature_dim": 80, "agent.z_dim": 100},
)
reward_file = tmp_path / "bypass" / "test_rewards.json"
text = reward_file.read_text()
assert "quadruped_run" in text
def test_anytrain(tmp_path: Path) -> None:
with runner.working_directory(tmp_path):
ep = runner.CopiedBenchmark(tmp_path / "no_code", "anytrain")
ep(
agent="fb_ddpg",
device="cpu",
num_train_episodes=1,
num_eval_episodes=1,
replay_buffer_episodes=2,
use_hiplog=True,
final_tests=0,
)
def test_grid_anytrain(tmp_path: Path) -> None:
with runner.working_directory(tmp_path):
ep = runner.CopiedBenchmark(tmp_path / "no_code", "anytrain")
ep(
agent="discrete_fb",
device="cpu",
task="grid_simple",
num_train_episodes=1,
num_eval_episodes=1,
replay_buffer_episodes=2,
use_hiplog=True,
final_tests=0,
)
def test_package_init_annotations() -> None:
# automatically updates the __init__ functions with "-> None:" if missing
# it fails the first time when adding it, then it should work
# feel free to deactivate if that helps, it's not that important :p
failed = []
pattern = re.compile(r"(def __init__\(self.*\)):")
root = Path(__file__).parents[1]
assert (root / "url_benchmark").is_dir()
for fp in root.rglob("*.py"):
if "expected" in str(fp) or "test_" in fp.name:
continue
text = fp.read_text()
text2 = pattern.sub(r"\g<1> -> None:", text)
if text2 != text:
failed.append(str(fp))
fp.write_text(text2)
if failed:
string = "\n -".join(
["Missing -> None at the end of __init__ definition"] + failed
)
string += "\nUpdate, or run this test locally for automatic addition"
raise AssertionError(string)
def test_property_syntax() -> None:
# automatic linters tend to change @property to @ property for no reason
root = Path(__file__).parents[1]
assert (root / "url_benchmark").is_dir()
errors = []
for fp in root.rglob("*.py"):
if fp == Path(__file__):
continue
if "@ property" in fp.read_text():
errors.append(str(fp))
if errors:
msg = ["Additional space in @property, linter got crazy:"] + errors
raise AssertionError("\n - ".join(msg))
def test_pretrain_checkpoint(tmp_path: Path) -> None:
conf_path = Path(__file__).parents[1] / "url_benchmark" / "pretrain.py"
with runner.working_directory(tmp_path):
ep = runner.HydraEntryPoint(conf_path)
params = dict(
agent="fb_ddpg",
device="cpu",
num_train_frames=1001,
num_eval_episodes=1,
replay_buffer_episodes=2,
use_hiplog=True,
checkpoint_every=1000,
final_tests=0,
)
wsp = ep.workspace(**params)
assert not wsp.global_step
wsp.train()
assert wsp.global_step == 1001
wsp2 = ep.workspace(**params)
assert wsp2.global_step == 1001
# keep last because it may make a mess with the paths (for copied benchmark)
def test_pretrain_from_runner(tmp_path: Path) -> None:
conf_path = Path(__file__).parents[1] / "url_benchmark" / "pretrain.py"
with runner.working_directory(tmp_path):
ep = runner.HydraEntryPoint(conf_path)
reward = ep(
agent="fb_ddpg",
device="cpu",
num_train_frames=1011,
num_eval_episodes=1,
num_seed_frames=1010,
replay_buffer_episodes=2,
use_hiplog=True,
final_tests=0,
)
assert isinstance(reward, float)
assert -1000 < reward < 0
from url_benchmark import hiplogs # pylint: disable=import-outside-toplevel
hippath = ep.folder / "hip.log"
assert hippath.exists()
hiploggers = list(hiplogs.HipLog.find_in_folder(tmp_path, recursive=True))
assert len(hiploggers) == 1
hiplog = hiploggers[0]
out = hiplog.read()
assert "eval/episode_reward" in out[0]
confpath = ep.folder / "config.yaml"
assert confpath.exists()
def test_header() -> None:
lines = Path(__file__).read_text("utf8").splitlines()
header = "\n".join(itertools.takewhile(lambda l: l.startswith("#"), lines))
assert len(header.splitlines()) == 4, f"Identified header:\n{header}"
root = Path(controllable_agent.__file__).parents[1]
base = [x for x in root.iterdir() if not x.name.startswith(".")] # avoid .git
tocheck = []
for fp in base:
if fp.is_file() and fp.suffix == ".py":
tocheck.append(fp)
elif fp.is_dir():
output = subprocess.check_output(
["find", str(fp), "-name", "*.py"], shell=False
)
tocheck.extend([Path(p) for p in output.decode().splitlines()])
missing = []
AUTOADD = True
for fp in tocheck:
text = Path(fp).read_text("utf8")
if not text.startswith(header):
if AUTOADD and not any(x in text.lower() for x in ("license", "copyright")):
print(f"Automatically adding header to {fp}")
Path(fp).write_text(header + "\n\n" + text, "utf8")
missing.append(str(fp))
if missing:
missing_str = "\n - ".join(missing)
raise AssertionError(
f"Following files are/were missing standard header (see other files):\n - {missing_str}"
)
|
controllable_agent-main
|
controllable_agent/test_url_benchmark.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
import logging
import traceback
import contextlib
import nevergrad.common.typing as tp
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def batch_if_available(executor: tp.ExecutorLike) -> tp.Iterator[None]:
"""Only submitit executors have a batch context, so we need different
cases for other executor (eg: concurrent.futures)
Batch context in submitit allows for using arrays in slurm, which is
better for the cluster health.
"""
if hasattr(executor, "batch"):
with executor.batch(): # type: ignore
yield
else:
yield
X = tp.TypeVar("X")
Fn = tp.Callable[..., X]
class DelayedJob(tp.Generic[X]):
def __init__(
self, executor: "DelayedExecutor[X]", fn: Fn[X], *args: tp.Any, **kwargs: tp.Any
) -> None:
self.executor = executor
self.time = time.time()
self.job: tp.Optional[tp.JobLike[X]] = None
self._submission: tp.Optional[
tp.Tuple[Fn[X], tp.Tuple[tp.Any, ...], tp.Dict[str, tp.Any]]
] = (fn, args, kwargs)
def _is_submited(self, force: bool = False) -> bool:
if self.job is None:
self.executor._check_submit(force=force)
return self.job is not None
def done(self) -> bool:
if not self._is_submited():
return False
return self.job is not None and self.job.done()
def result(self) -> X:
self._is_submited(force=True)
if self.job is None:
raise RuntimeError("Job should have been submitted")
error = ""
try:
result = self.job.result()
except Exception: # pylint: disable=broad-except
error = traceback.format_exc()
result = self.executor._default
self.executor._add_result(error=error)
return result
class DelayedExecutor(tp.Generic[X]):
def __init__(
self,
executor: tp.ExecutorLike,
default: X,
batch_size: int = 8,
max_delay: float = 45 * 60,
max_failure_rate: float = 0.25,
) -> None:
self.executor = executor
self.batch_size = batch_size
self.max_delay = max_delay
self.max_failure_rate = max_failure_rate
self._default = default
self._unsubmitted: tp.List[DelayedJob[X]] = []
self._total_results = 0
self._total_failures = 0
assert 0 < max_failure_rate < 1
def submit(self, fn: Fn[X], *args: tp.Any, **kwargs: tp.Any) -> DelayedJob[X]:
job = DelayedJob(self, fn, *args, **kwargs)
self._unsubmitted.append(job)
return job
def _check_submit(self, force: bool = False) -> None:
delay = time.time() - self._unsubmitted[0].time
if self._unsubmitted:
if (
force
or len(self._unsubmitted) >= self.batch_size
or delay > self.max_delay
):
logger.info(
f"Submitting {len(self._unsubmitted)} job(s) after {int(delay / 60)}min wait"
)
with batch_if_available(self.executor):
for job in self._unsubmitted:
assert job._submission is not None
fn, args, kwargs = job._submission
job._submission = None
job.job = self.executor.submit(fn, *args, **kwargs)
self._unsubmitted = []
def _add_result(self, error: str) -> None:
self._total_results += 1
self._total_failures += bool(error)
if error:
logger.warning(
f"Caught {self._total_failures} out of {self._total_results} runs:\n{error}"
)
if self._total_failures / self._total_results > self.max_failure_rate:
raise RuntimeError(
f"Stopping since failure rate is above the threshold: {self.max_failure_rate}."
)
logger.warning("Ignoring since this is below max failure rate")
def wait_for_jobs(jobs: tp.Iterable[tp.Any], sleep: float = 2.0) -> None:
"""Very crude function for regularly printing the percent
of finished jobs in a list
"""
jobs = list(jobs)
done = 0
print(f"Submitted {len(jobs)} jobs")
while done < 100:
new_done = int(100 * sum(j.done() for j in jobs) / len(jobs))
if new_done > done:
print(f"{new_done}% done")
jdone = [j for j in jobs if j.done()]
if not done:
print(jdone[0].result())
# pylint: disable=expression-not-assigned
# [j.result() for j in jdone] # raise asap
done = new_done
else:
time.sleep(sleep)
print("Waiting is over")
|
controllable_agent-main
|
controllable_agent/executor.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import dataclasses
import typing as tp
import numpy as np
from url_benchmark.agent.ddpg import MetaDict
from url_benchmark.dmc import EnvWrapper, ExtendedTimeStep, TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from dm_env import specs, StepType
from dm_env.auto_reset_environment import AutoResetEnvironment
@dataclasses.dataclass
class D4RLConfig:
minimum_episode_length: tp.Optional[int] = None
ignore_terminals: bool = False
class EmptyPhysics():
def __init__(self) :
self.empty_physics = np.zeros((1,1))
def get_state(self) -> np.ndarray:
return self.empty_physics
@dataclasses.dataclass
class ExtendedTimeStepD4RL(ExtendedTimeStep):
reward: tp.Any
discount: tp.Any
class D4RLWrapper(AutoResetEnvironment):
def __init__(self, env) -> None:
self.physics = EmptyPhysics()
super().__init__()
self._env = env
def observation_spec(self) -> tp.Any:
return specs.BoundedArray(shape=self._env.observation_space.shape,
dtype=self._env.observation_space.dtype,
minimum=self._env.observation_space.low,
maximum=self._env.observation_space.high,
name='observation')
def action_spec(self) -> specs.Array:
return specs.BoundedArray(shape=self._env.action_space.shape,
dtype=self._env.action_space.dtype,
minimum=self._env.action_space.low,
maximum=self._env.action_space.high,
name='action')
def get_normalized_score(self, reward: float) -> float:
return self._env.get_normalized_score(reward)
def _step(self, action) -> TimeStep:
obs, reward, done, _ = self._env.step(action)
step_type = StepType.LAST if done else StepType.MID
return ExtendedTimeStepD4RL(step_type=step_type,observation=obs,reward=reward,discount=1.0,action=action)
def _reset(self) -> TimeStep:
obs = self._env.reset()
return ExtendedTimeStepD4RL(step_type=StepType.FIRST, observation=obs, reward= None, discount= None, action=self._env.action_space.sample())
@property
def base_env(self) -> tp.Any:
env = self._env
if isinstance(env, D4RLWrapper):
return env.base_env
return env
def get_dataset(self) -> tp.Any:
return self._env.get_dataset()
class D4RLReplayBufferBuilder:
def filter_dataset_by_episode_length(self, dataset: tp.Any, minimum_episode_length: tp.Optional[int]):
if minimum_episode_length is None or minimum_episode_length <= 1:
return dataset
end_indices = (dataset["terminals"] + dataset["timeouts"]).nonzero()[0]
episode_lengths = np.diff(np.concatenate(([-1], end_indices)))
episode_lengths_expanded = episode_lengths.repeat(episode_lengths)
diff_len = dataset['observations'].shape[0] - len(episode_lengths_expanded)
assert diff_len >= 0 # there is no guarantee that last step in the data step is last step in an episode
episode_lengths_expanded = np.concatenate((episode_lengths_expanded, np.zeros(diff_len, dtype=int))) # ignore last steps if they do not belong to an episode
filter_indices = episode_lengths_expanded >= minimum_episode_length
dataset_size = len(dataset["observations"])
for key, values in dataset.items():
if isinstance(values, np.ndarray) and len(values) == dataset_size:
dataset[key] = values[filter_indices]
return dataset
def prepare_replay_buffer_d4rl(self, env: EnvWrapper, meta: MetaDict, cfg: tp.Any) -> ReplayBuffer:
dataset = env.base_env.get_dataset()
dataset = self.filter_dataset_by_episode_length(dataset, cfg.d4rl_config.minimum_episode_length)
# please note we can use d4rl.qlearning_dataset instead, but termination conditions are not calculated as expected only consider (terminals)
# last next_obs, I used first obs (I see they neglect it at qlearning_dataset, but this will result that last episode will not be terminiated, however we can fake it)
observations = dataset['observations']
actions = dataset['actions']
rewards = dataset['rewards']
is_ignore_terminals = cfg.d4rl_config and (cfg.d4rl_config.ignore_terminals)
terminals = np.zeros_like(dataset['terminals']) if is_ignore_terminals else dataset['terminals']
timeouts = dataset['timeouts']
end_indices = (terminals + timeouts).nonzero()[0]
episode_lengths = np.diff(np.concatenate(([-1], end_indices)))
max_episode_length = episode_lengths.max()
if not cfg.d4rl_config or cfg.d4rl_config.minimum_episode_length is None:
assert (episode_lengths==1).sum()==0
else:
assert (episode_lengths<cfg.d4rl_config.minimum_episode_length).sum()==0
replay_storage = ReplayBuffer(max_episodes=len(end_indices), discount=cfg.discount, future=cfg.future, max_episode_length=max_episode_length)
first = True
dataset_len = dataset['rewards'].shape[0]
for idx in range(dataset_len):
if first:
time_step = ExtendedTimeStep(
step_type = StepType.FIRST, observation=observations[idx], reward=0, discount=1, action=actions[0])
first = False
else:
time_step = ExtendedTimeStep(
step_type = StepType.MID, observation=observations[idx], reward=rewards[idx-1], discount=1, action=actions[idx-1])
if terminals[idx] or timeouts[idx]:
first = True
final_discount = 1
if terminals[idx]:
final_discount = 0
time_step.step_type = StepType.LAST
time_step.discount = final_discount
replay_storage.add(time_step, meta)
assert (episode_lengths-1 == replay_storage._episodes_length).all()
if episode_lengths.min()!=episode_lengths.max():
assert not replay_storage._is_fixed_episode_length
return replay_storage
|
controllable_agent-main
|
url_benchmark/d4rl_benchmark.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import pdb # pylint: disable=unused-import
import logging
import dataclasses
import typing as tp
import warnings
from pathlib import Path
warnings.filterwarnings('ignore', category=DeprecationWarning)
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
# if the default egl does not work, you may want to try:
# export MUJOCO_GL=glfw
os.environ['MUJOCO_GL'] = os.environ.get('MUJOCO_GL', 'egl')
import hydra
from hydra.core.config_store import ConfigStore
import numpy as np
import torch
import wandb
import omegaconf as omgcf
# from dm_env import specs
from url_benchmark import dmc
from dm_env import specs
from url_benchmark import utils
from url_benchmark import goals as _goals
from url_benchmark.logger import Logger
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark.video import TrainVideoRecorder, VideoRecorder
from url_benchmark import agent as agents
from url_benchmark.d4rl_benchmark import D4RLReplayBufferBuilder, D4RLWrapper
from url_benchmark.gridworld.env import build_gridworld_task
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = True
# os.environ['WANDB_MODE']='offline'
# from url_benchmark.dmc_benchmark import PRIMAL_TASKS
# # # Config # # #
@dataclasses.dataclass
class Config:
agent: tp.Any
# misc
seed: int = 1
device: str = "cuda"
save_video: bool = False
use_tb: bool = False
use_wandb: bool = False
use_hiplog: bool = False
# experiment
experiment: str = "online"
# task settings
task: str = "walker_stand"
obs_type: str = "states" # [states, pixels]
frame_stack: int = 3 # only works if obs_type=pixels
action_repeat: int = 1 # set to 2 for pixels
discount: float = 0.99
future: float = 0.99 # discount of future sampling, future=1 means no future sampling
goal_space: tp.Optional[str] = None
append_goal_to_observation: bool = False
# eval
num_eval_episodes: int = 10
custom_reward: tp.Optional[str] = None # activates custom eval if not None
final_tests: int = 10
# checkpoint
snapshot_at: tp.Tuple[int, ...] = (100000, 200000, 500000, 800000, 1000000, 1500000,
2000000, 3000000, 4000000, 5000000, 9000000, 10000000)
checkpoint_every: int = 100000
load_model: tp.Optional[str] = None
# training
num_seed_frames: int = 4000
replay_buffer_episodes: int = 5000
update_encoder: bool = True
batch_size: int = omgcf.II("agent.batch_size")
@dataclasses.dataclass
class PretrainConfig(Config):
# mode
reward_free: bool = True
# train settings
num_train_frames: int = 2000010
# snapshot
eval_every_frames: int = 10000
load_replay_buffer: tp.Optional[str] = None
# replay buffer
# replay_buffer_num_workers: int = 4
# nstep: int = omgcf.II("agent.nstep")
# misc
save_train_video: bool = False
# loaded as base_pretrain in pretrain.yaml
# we keep the yaml since it's easier to configure plugins from it
ConfigStore.instance().store(name="workspace_config", node=PretrainConfig)
# # # Implem # # #
def make_agent(
obs_type: str, obs_spec, action_spec, num_expl_steps: int, cfg: omgcf.DictConfig
) -> tp.Union[agents.FBDDPGAgent, agents.DDPGAgent]:
cfg.obs_type = obs_type
cfg.obs_shape = obs_spec.shape
cfg.action_shape = (action_spec.num_values, ) if isinstance(action_spec, specs.DiscreteArray) \
else action_spec.shape
cfg.num_expl_steps = num_expl_steps
return hydra.utils.instantiate(cfg)
C = tp.TypeVar("C", bound=Config)
def _update_legacy_class(obj: tp.Any, classes: tp.Sequence[tp.Type[tp.Any]]) -> tp.Any:
"""Updates a legacy class (eg: agent.FBDDPGAgent) to the new
class (url_benchmark.agent.FBDDPGAgent)
Parameters
----------
obj: Any
Object to update
classes: Types
Possible classes to update the object to. If current name is one of the classes
name, the object class will be remapped to it.
"""
classes = tuple(classes)
if not isinstance(obj, classes):
clss = {x.__name__: x for x in classes}
cls = clss.get(obj.__class__.__name__, None)
if cls is not None:
logger.warning(f"Promoting legacy object {obj.__class__} to {cls}")
obj.__class__ = cls
def _init_eval_meta(workspace: "BaseWorkspace", custom_reward: tp.Optional[_goals.BaseReward] = None) -> agents.MetaDict:
if workspace.domain == "grid":
assert isinstance(workspace.agent, agents.DiscreteFBAgent)
return workspace.agent.get_goal_meta(workspace.eval_env.get_goal_obs())
special = (agents.FBDDPGAgent, agents.SFAgent, agents.SFSVDAgent, agents.APSAgent, agents.NEWAPSAgent, agents.GoalSMAgent, agents.UVFAgent)
ag = workspace.agent
_update_legacy_class(ag, special)
# we need to check against name for legacy reason when reloading old checkpoints
if not isinstance(ag, special) or not len(workspace.replay_loader):
return workspace.agent.init_meta()
if custom_reward is not None:
try: # if the custom reward implements a goal, return it
goal = custom_reward.get_goal(workspace.cfg.goal_space)
return workspace.agent.get_goal_meta(goal)
except Exception: # pylint: disable=broad-except
pass
if not isinstance(workspace.agent, agents.SFSVDAgent):
# we cannot fully type because of the FBBDPG string check :s
num_steps = workspace.agent.cfg.num_inference_steps # type: ignore
obs_list, reward_list = [], []
batch_size = 0
while batch_size < num_steps:
batch = workspace.replay_loader.sample(workspace.cfg.batch_size, custom_reward=custom_reward)
batch = batch.to(workspace.cfg.device)
obs_list.append(batch.next_goal if workspace.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs_t, reward_t = obs[:num_steps], reward[:num_steps]
# phy = workspace.replay_loader._storage["physics"]
# phy = phy.reshape(-1, phy.shape[-1])
# back_input = "observation" if workspace.cfg.goal_space is None else "goal"
# obs = workspace.replay_loader._storage[back_input].reshape(phy.shape[0], -1) # should have been next obs
# inds = np.random.choice(phy.shape[0], size=workspace.agent.cfg.num_inference_steps, replace=False)
# phy, obs = (x[inds, :] for x in (phy, obs))
# rewards = [[custom_reward.from_physics(p)] for p in phy]
# obs_t, reward_t = (torch.Tensor(x).float().to(workspace.agent.cfg.device) for x in (obs, rewards))
return workspace.agent.infer_meta_from_obs_and_rewards(obs_t, reward_t)
else:
assert isinstance(workspace.agent, agents.SFSVDAgent)
obs_list, reward_list, action_list = [], [], []
batch_size = 0
while batch_size < workspace.agent.cfg.num_inference_steps:
batch = workspace.replay_loader.sample(workspace.cfg.batch_size, custom_reward=custom_reward)
batch = batch.to(workspace.cfg.device)
obs_list.append(batch.goal if workspace.cfg.goal_space is not None else batch.obs)
action_list.append(batch.action)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward, action = torch.cat(obs_list, 0), torch.cat(reward_list, 0), torch.cat(action_list, 0) # type: ignore
obs_t, reward_t, action_t = obs[:workspace.agent.cfg.num_inference_steps], reward[:workspace.agent.cfg.num_inference_steps],\
action[:workspace.agent.cfg.num_inference_steps]
return workspace.agent.infer_meta_from_obs_action_and_rewards(obs_t, action_t, reward_t)
if workspace.cfg.goal_space is not None:
funcs = _goals.goals.funcs.get(workspace.cfg.goal_space, {})
if workspace.cfg.task in funcs:
g = funcs[workspace.cfg.task]()
return workspace.agent.get_goal_meta(g)
return workspace.agent.infer_meta(workspace.replay_loader)
class BaseWorkspace(tp.Generic[C]):
def __init__(self, cfg: C) -> None:
self.work_dir = Path.cwd()
print(f'Workspace: {self.work_dir}')
print(f'Running code in : {Path(__file__).parent.resolve().absolute()}')
logger.info(f'Workspace: {self.work_dir}')
logger.info(f'Running code in : {Path(__file__).parent.resolve().absolute()}')
self.cfg = cfg
utils.set_seed_everywhere(cfg.seed)
if not torch.cuda.is_available():
if cfg.device != "cpu":
logger.warning(f"Falling back to cpu as {cfg.device} is not available")
cfg.device = "cpu"
cfg.agent.device = "cpu"
self.device = torch.device(cfg.device)
# goal_spec: tp.Optional[specs.Array] = None
# if cfg.goal_space is not None:
# g = _goals.goals.funcs[cfg.goal_space][cfg.task]()
# goal_spec = specs.Array((len(g),), np.float32, 'goal')
# create envs
# task = PRIMAL_TASKS[self.domain]
task = cfg.task
if task.startswith('point_mass_maze'):
self.domain = 'point_mass_maze'
else:
self.domain = task.split('_', maxsplit=1)[0]
self.train_env = self._make_env()
self.eval_env = self._make_env()
# create agent
self.agent = make_agent(cfg.obs_type,
self.train_env.observation_spec(),
self.train_env.action_spec(),
cfg.num_seed_frames // cfg.action_repeat,
cfg.agent)
# create logger
self.logger = Logger(self.work_dir,
use_tb=cfg.use_tb,
use_wandb=cfg.use_wandb,
use_hiplog=cfg.use_hiplog)
if cfg.use_wandb:
exp_name = '_'.join([
cfg.experiment, cfg.agent.name, self.domain
])
wandb.init(project="controllable_agent", group=cfg.agent.name, name=exp_name, # mode="disabled",
config=omgcf.OmegaConf.to_container(cfg, resolve=True, throw_on_missing=True)) # type: ignore
if cfg.use_hiplog:
# record config now that it is filled
parts = ("snapshot", "_type", "_shape", "num_", "save_", "frame", "device", "use_tb", "use_wandb")
skipped = [x for x in cfg if any(y in x for y in parts)] # type: ignore
self.logger.hiplog.flattened({x: y for x, y in cfg.items() if x not in skipped}) # type: ignore
self.logger.hiplog(workdir=self.work_dir.stem)
for rm in ("agent/use_tb", "agent/use_wandb", "agent/device"):
del self.logger.hiplog._content[rm]
self.logger.hiplog(observation_size=np.prod(self.train_env.observation_spec().shape))
# # create replay buffer
# self._data_specs: tp.List[tp.Any] = [self.train_env.observation_spec(),
# self.train_env.action_spec(), ]
if cfg.goal_space is not None:
if cfg.goal_space not in _goals.goal_spaces.funcs[self.domain]:
raise ValueError(f"Unregistered goal space {cfg.goal_space} for domain {self.domain}")
# g = _goals.goals.funcs[cfg.goal_space][cfg.task]()
# self._data_specs.append(specs.Array((len(g),), np.float32, 'goal'))
# self._data_specs.extend([specs.Array((1,), np.float32, 'reward'),
# specs.Array((1,), np.float32, 'discount')])
self.replay_loader = ReplayBuffer(max_episodes=cfg.replay_buffer_episodes, discount=cfg.discount, future=cfg.future)
# # create data storage
# self.replay_storage = ReplayBufferStorage(data_specs, meta_specs,
# self.work_dir / 'buffer')
#
# # create replay buffer
# self.replay_loader = make_replay_loader(self.replay_storage,
# cfg.replay_buffer_size,
# cfg.batch_size,
# cfg.replay_buffer_num_workers,
# False, True, cfg.nstep, cfg.discount)
# create video recorders
# cam_id = 2 if 'quadruped' not in self.domain else 1
# cam_id = 1 # centered on subject
cam_id = 0 if 'quadruped' not in self.domain else 2
self.video_recorder = VideoRecorder(self.work_dir if cfg.save_video else None,
camera_id=cam_id, use_wandb=self.cfg.use_wandb)
self.timer = utils.Timer()
self.global_step = 0
self.global_episode = 0
self.eval_rewards_history: tp.List[float] = []
self._checkpoint_filepath = self.work_dir / "models" / "latest.pt"
if self._checkpoint_filepath.exists():
self.load_checkpoint(self._checkpoint_filepath)
elif cfg.load_model is not None:
self.load_checkpoint(cfg.load_model, exclude=["replay_loader"])
self.reward_cls: tp.Optional[_goals.BaseReward] = None
if self.cfg.custom_reward == "maze_multi_goal":
self.reward_cls = self._make_custom_reward(seed=self.cfg.seed)
def _make_env(self) -> dmc.EnvWrapper:
cfg = self.cfg
if self.domain == "grid":
return dmc.EnvWrapper(build_gridworld_task(self.cfg.task.split('_')[1]))
if self.domain == "d4rl":
import d4rl # type: ignore # pylint: disable=unused-import
import gym
return dmc.EnvWrapper(D4RLWrapper(gym.make(self.cfg.task.split('_')[1])))
return dmc.make(cfg.task, cfg.obs_type, cfg.frame_stack, cfg.action_repeat, cfg.seed,
goal_space=cfg.goal_space, append_goal_to_observation=cfg.append_goal_to_observation)
@property
def global_frame(self) -> int:
return self.global_step * self.cfg.action_repeat
def _make_custom_reward(self, seed: int) -> tp.Optional[_goals.BaseReward]:
"""Creates a custom reward function if provided in configuration
else returns None
"""
if self.cfg.custom_reward is None:
return None
return _goals.get_reward_function(self.cfg.custom_reward, seed)
def eval_maze_goals(self) -> None:
if isinstance(self.agent, (agents.SFAgent, agents.SFSVDAgent, agents.NEWAPSAgent)) and len(self.replay_loader) > 0:
self.agent.precompute_cov(self.replay_loader)
reward_cls = _goals.MazeMultiGoal()
rewards = list()
for g in reward_cls.goals:
goal_rewards = list()
goal_distances = list()
meta = self.agent.get_goal_meta(g)
for episode in range(self.cfg.num_eval_episodes):
self.video_recorder.init(self.eval_env, enabled=(episode == 0))
time_step = self.eval_env.reset()
episode_reward = 0.0
while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
0,
eval_mode=True)
time_step = self.eval_env.step(action)
self.video_recorder.record(self.eval_env)
assert isinstance(time_step, dmc.ExtendedGoalTimeStep)
step_reward, distance = reward_cls.from_goal(time_step.goal, g)
episode_reward += step_reward
goal_rewards.append(episode_reward)
goal_distances.append(distance)
self.video_recorder.save(f'{g}.mp4')
print(f"goal: {g}, avg_reward: {round(float(np.mean(goal_rewards)), 2)}, avg_distance: {round(float(np.mean(goal_distances)), 5)}")
rewards.append(float(np.mean(goal_rewards)))
self.eval_rewards_history.append(float(np.mean(rewards)))
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
log('episode_reward', self.eval_rewards_history[-1])
log('step', self.global_step)
log('episode', self.global_episode)
def eval(self) -> None:
step, episode = 0, 0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
physics_agg = dmc.PhysicsAggregator()
rewards: tp.List[float] = []
normalized_scores: tp.List[float] = []
meta = _init_eval_meta(self) # Don't work
z_correl = 0.0
is_d4rl_task = self.cfg.task.split('_')[0] == 'd4rl'
actor_success: tp.List[float] = []
while eval_until_episode(episode):
time_step = self.eval_env.reset()
# create custom reward if need be (if field exists)
seed = 12 * self.cfg.num_eval_episodes + len(rewards)
custom_reward = self._make_custom_reward(seed=seed)
if custom_reward is not None:
meta = _init_eval_meta(self, custom_reward)
if self.domain == "grid":
meta = _init_eval_meta(self)
total_reward = 0.0
self.video_recorder.init(self.eval_env, enabled=(episode == 0))
while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
self.global_step,
eval_mode=True)
time_step = self.eval_env.step(action)
physics_agg.add(self.eval_env)
self.video_recorder.record(self.eval_env)
# for legacy reasons, we need to check the name :s
if isinstance(self.agent, agents.FBDDPGAgent):
if self.agent.cfg.additional_metric:
z_correl += self.agent.compute_z_correl(time_step, meta)
actor_success.extend(self.agent.actor_success)
if custom_reward is not None:
time_step.reward = custom_reward.from_env(self.eval_env)
total_reward += time_step.reward
step += 1
if is_d4rl_task:
normalized_scores.append(self.eval_env.get_normalized_score(total_reward))
rewards.append(total_reward)
episode += 1
self.video_recorder.save(f'{self.global_frame}.mp4')
self.eval_rewards_history.append(float(np.mean(rewards)))
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
if is_d4rl_task:
log('episode_normalized_score', float(100 * np.mean(normalized_scores)))
log('episode_reward', self.eval_rewards_history[-1])
if len(rewards) > 1:
log('episode_reward#std', float(np.std(rewards)))
log('episode_length', step * self.cfg.action_repeat / episode)
log('episode', self.global_episode)
log('z_correl', z_correl / episode)
log('step', self.global_step)
if actor_success:
log('actor_sucess', float(np.mean(actor_success)))
if isinstance(self.agent, agents.FBDDPGAgent):
log('z_norm', np.linalg.norm(meta['z']).item())
for key, val in physics_agg.dump():
log(key, val)
_CHECKPOINTED_KEYS = ('agent', 'global_step', 'global_episode', "replay_loader")
def save_checkpoint(self, fp: tp.Union[Path, str], exclude: tp.Sequence[str] = ()) -> None:
logger.info(f"Saving checkpoint to {fp}")
exclude = list(exclude)
assert all(x in self._CHECKPOINTED_KEYS for x in exclude)
fp = Path(fp)
fp.parent.mkdir(exist_ok=True, parents=True)
assert isinstance(self.replay_loader, ReplayBuffer), "Is this buffer designed for checkpointing?"
# this is just a dumb security check to not forget about it
payload = {k: self.__dict__[k] for k in self._CHECKPOINTED_KEYS if k not in exclude}
with fp.open('wb') as f:
torch.save(payload, f, pickle_protocol=4)
def load_checkpoint(self, fp: tp.Union[Path, str], only: tp.Optional[tp.Sequence[str]] = None, exclude: tp.Sequence[str] = ()) -> None:
"""Reloads a checkpoint or part of it
Parameters
----------
only: None or sequence of str
reloads only a specific subset (defaults to all)
exclude: sequence of str
does not reload the provided keys
"""
print(f"loading checkpoint from {fp}")
fp = Path(fp)
with fp.open('rb') as f:
payload = torch.load(f)
_update_legacy_class(payload, (ReplayBuffer,))
if isinstance(payload, ReplayBuffer): # compatibility with pure buffers pickles
payload = {"replay_loader": payload}
if only is not None:
only = list(only)
assert all(x in self._CHECKPOINTED_KEYS for x in only)
payload = {x: payload[x] for x in only}
exclude = list(exclude)
assert all(x in self._CHECKPOINTED_KEYS for x in exclude)
for x in exclude:
payload.pop(x, None)
for name, val in payload.items():
logger.info("Reloading %s from %s", name, fp)
if name == "agent":
self.agent.init_from(val)
elif name == "replay_loader":
_update_legacy_class(val, (ReplayBuffer,))
assert isinstance(val, ReplayBuffer)
# pylint: disable=protected-access
# drop unecessary meta which could make a mess
val._current_episode.clear() # make sure we can start over
val._future = self.cfg.future
val._discount = self.cfg.discount
val._max_episodes = len(val._storage["discount"])
self.replay_loader = val
else:
assert hasattr(self, name)
setattr(self, name, val)
if name == "global_episode":
logger.warning(f"Reloaded agent at global episode {self.global_episode}")
def finalize(self) -> None:
print("Running final test", flush=True)
repeat = self.cfg.final_tests
if not repeat:
return
if self.cfg.custom_reward == "maze_multi_goal":
eval_hist = self.eval_rewards_history
rewards = {}
self.eval_rewards_history = []
self.cfg.num_eval_episodes = repeat
self.eval_maze_goals()
rewards["rewards"] = self.eval_rewards_history
self.eval_rewards_history = eval_hist # restore
else:
domain_tasks = {
"cheetah": ['walk', 'walk_backward', 'run', 'run_backward'],
"quadruped": ['stand', 'walk', 'run', 'jump'],
"walker": ['stand', 'walk', 'run', 'flip'],
}
if self.domain not in domain_tasks:
return
eval_hist = self.eval_rewards_history
rewards = {}
for name in domain_tasks[self.domain]:
task = "_".join([self.domain, name])
self.cfg.task = task
self.cfg.custom_reward = task # for the replay buffer
self.cfg.seed += 1 # for the sake of avoiding similar seeds
self.eval_env = self._make_env()
self.eval_rewards_history = []
self.cfg.num_eval_episodes = 1
for _ in range(repeat):
self.eval()
rewards[task] = self.eval_rewards_history
self.eval_rewards_history = eval_hist # restore
with (self.work_dir / "test_rewards.json").open("w") as f:
json.dump(rewards, f)
class Workspace(BaseWorkspace[PretrainConfig]):
def __init__(self, cfg: PretrainConfig) -> None:
super().__init__(cfg)
self.train_video_recorder = TrainVideoRecorder(self.work_dir if cfg.save_train_video else None,
camera_id=self.video_recorder.camera_id, use_wandb=self.cfg.use_wandb)
if not self._checkpoint_filepath.exists(): # don't relay if there is a checkpoint
if cfg.load_replay_buffer is not None:
if self.cfg.task.split('_')[0] == "d4rl":
d4rl_replay_buffer_builder = D4RLReplayBufferBuilder()
self.replay_storage = d4rl_replay_buffer_builder.prepare_replay_buffer_d4rl(self.train_env, self.agent.init_meta(), self.cfg)
self.replay_loader = self.replay_storage
else:
self.load_checkpoint(cfg.load_replay_buffer, only=["replay_loader"])
def _init_meta(self):
if isinstance(self.agent, agents.GoalTD3Agent) and isinstance(self.reward_cls, _goals.MazeMultiGoal):
meta = self.agent.init_meta(self.reward_cls)
elif isinstance(self.agent, agents.GoalSMAgent) and len(self.replay_loader) > 0:
meta = self.agent.init_meta(self.replay_loader)
else:
meta = self.agent.init_meta()
return meta
def train(self) -> None:
# predicates
train_until_step = utils.Until(self.cfg.num_train_frames,
self.cfg.action_repeat)
seed_until_step = utils.Until(self.cfg.num_seed_frames,
self.cfg.action_repeat)
eval_every_step = utils.Every(self.cfg.eval_every_frames,
self.cfg.action_repeat)
# if self.cfg.custom_reward is not None:
# raise NotImplementedError("Custom reward not implemented in pretrain.py train loop (see anytrain.py)")
episode_step, episode_reward, z_correl = 0, 0.0, 0.0
time_step = self.train_env.reset()
meta = self._init_meta()
self.replay_loader.add(time_step, meta)
self.train_video_recorder.init(time_step.observation)
metrics = None
physics_agg = dmc.PhysicsAggregator()
while train_until_step(self.global_step):
if time_step.last():
self.global_episode += 1
self.train_video_recorder.save(f'{self.global_frame}.mp4')
# wait until all the metrics schema is populated
if metrics is not None:
# log stats
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_loader))
log('step', self.global_step)
log('z_correl', z_correl)
for key, val in physics_agg.dump():
log(key, val)
if self.cfg.use_hiplog and self.logger.hiplog.content:
self.logger.hiplog.write()
# reset env
time_step = self.train_env.reset()
meta = self._init_meta()
self.replay_loader.add(time_step, meta)
self.train_video_recorder.init(time_step.observation)
# try to save snapshot
if self.global_frame in self.cfg.snapshot_at:
self.save_checkpoint(self._checkpoint_filepath.with_name(f'snapshot_{self.global_frame}.pt'))
episode_step = 0
episode_reward = 0.0
z_correl = 0.0
# try to evaluate
if eval_every_step(self.global_step):
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
if self.cfg.custom_reward == "maze_multi_goal":
self.eval_maze_goals()
# elif self.domain == "grid":
# self.eval_grid_goals()
else:
self.eval()
meta = self.agent.update_meta(meta, self.global_step, time_step, finetune=False, replay_loader=self.replay_loader)
# sample action
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
self.global_step,
eval_mode=False)
# try to update the agent
if not seed_until_step(self.global_step):
# TODO: reward_free should be handled in the agent update itself !
# TODO: the commented code below raises incompatible type "Generator[EpisodeBatch[ndarray[Any, Any]], None, None]"; expected "ReplayBuffer"
# replay = (x.with_no_reward() if self.cfg.reward_free else x for x in self.replay_loader)
if isinstance(self.agent, agents.GoalTD3Agent) and isinstance(self.reward_cls, _goals.MazeMultiGoal):
metrics = self.agent.update(self.replay_loader, self.global_step, self.reward_cls)
else:
metrics = self.agent.update(self.replay_loader, self.global_step)
self.logger.log_metrics(metrics, self.global_frame, ty='train')
# take env step
time_step = self.train_env.step(action)
physics_agg.add(self.train_env)
episode_reward += time_step.reward
self.replay_loader.add(time_step, meta)
self.train_video_recorder.record(time_step.observation)
if isinstance(self.agent, agents.FBDDPGAgent):
z_correl += self.agent.compute_z_correl(time_step, meta)
episode_step += 1
self.global_step += 1
# save checkpoint to reload
if not self.global_frame % self.cfg.checkpoint_every:
self.save_checkpoint(self._checkpoint_filepath)
self.save_checkpoint(self._checkpoint_filepath) # make sure we save the final checkpoint
self.finalize()
@hydra.main(config_path='.', config_name='base_config', version_base="1.1")
def main(cfg: omgcf.DictConfig) -> None:
# we assume cfg is a PretrainConfig (but actually not really)
workspace = Workspace(cfg) # type: ignore
workspace.train()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import logging
import dataclasses
import typing as tp
from url_benchmark import pretrain # NEEDS TO BE FIRST NON-STANDARD IMPORT (sets up env variables)
import omegaconf as omgcf
import hydra
from hydra.core.config_store import ConfigStore
import torch
from url_benchmark import dmc
from url_benchmark import utils
from url_benchmark import agent as agents
from url_benchmark.video import TrainVideoRecorder
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = True
from pathlib import Path
import sys
base = Path(__file__).absolute().parents[1]
# we need to add base repo to be able to import url_benchmark
# we need to add url_benchmarl to be able to reload legacy checkpoints
for fp in [base, base / "url_benchmark"]:
assert fp.exists()
if str(fp) not in sys.path:
sys.path.append(str(fp))
@dataclasses.dataclass
class OnlinetrainConfig(pretrain.Config):
# mode
reward_free: bool = True
# train settings
num_train_episodes: int = 2000
# snapshot
eval_every_episodes: int = 10
load_replay_buffer: tp.Optional[str] = None
# replay buffer
# replay_buffer_num_workers: int = 4
# nstep: int = omgcf.II("agent.nstep")
# misc
save_train_video: bool = False
update_replay_buffer: bool = True
num_rollout_episodes: int = 10
num_agent_updates: int = 10
ConfigStore.instance().store(name="workspace_config", node=OnlinetrainConfig)
class Workspace(pretrain.BaseWorkspace[OnlinetrainConfig]):
def __init__(self, cfg: OnlinetrainConfig) -> None:
super().__init__(cfg)
self.train_video_recorder = TrainVideoRecorder(self.work_dir if cfg.save_train_video else None,
camera_id=self.video_recorder.camera_id, use_wandb=self.cfg.use_wandb)
self._last_processed_step = 0 # for checkpointing
if not cfg.update_replay_buffer:
cfg.num_seed_frames = -1
if cfg.load_replay_buffer is None:
raise ValueError("If update_replay_buffer is False, load_replay_buffer must be provided")
if not self._checkpoint_filepath.exists(): # don't relay if there is a checkpoint
if cfg.load_replay_buffer is not None:
self.load_checkpoint(cfg.load_replay_buffer, only=["replay_loader"])
def _play_episode(self, log_metrics: bool = True) -> None:
time_step = self.train_env.reset()
meta = self.agent.init_meta()
self.replay_loader.add(time_step, meta)
self.train_video_recorder.init(time_step.observation)
episode_step = 0
episode_reward = 0.0
z_correl = 0.0
physics_agg = dmc.PhysicsAggregator()
custom_reward = self._make_custom_reward(seed=self.global_step)
while not time_step.last():
meta = self.agent.update_meta(meta, self.global_step, time_step, replay_loader=self.replay_loader)
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
self.global_step,
eval_mode=False)
time_step = self.train_env.step(action)
if custom_reward is not None:
time_step.reward = custom_reward.from_env(self.train_env)
physics_agg.add(self.train_env)
episode_reward += time_step.reward
self.replay_loader.add(time_step, meta)
self.train_video_recorder.record(time_step.observation)
if isinstance(self.agent, agents.FBDDPGAgent):
z_correl += self.agent.compute_z_correl(time_step, meta)
episode_step += 1
self.global_step += 1
# log episode stats
if log_metrics:
self.train_video_recorder.save(f'{self.global_frame}.mp4')
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('z_correl', z_correl)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_loader))
log('step', self.global_step)
for key, val in physics_agg.dump():
log(key, val)
def _checkpoint_if_need_be(self) -> None:
# save checkpoint to reload
if self.global_step // self.cfg.checkpoint_every != self._last_processed_step // self.cfg.checkpoint_every:
self.save_checkpoint(self._checkpoint_filepath)
if any(self._last_processed_step < x <= self.global_step for x in self.cfg.snapshot_at):
self.save_checkpoint(self._checkpoint_filepath.with_name(f'snapshot_{self.global_frame}.pt'))
self._last_processed_step = self.global_step
def train(self) -> None:
metrics: tp.Optional[tp.Dict[str, float]] = None
while self.global_episode < self.cfg.num_train_episodes:
logger.info(f"rollout {self.cfg.num_rollout_episodes} episodes...")
for _ in range(self.cfg.num_rollout_episodes):
self._play_episode(log_metrics=metrics is not None) # logging requires all metrics available
self.global_episode += 1
# update the agent
if self.global_frame > self.cfg.num_seed_frames:
# TODO: reward_free should be handled in the agent update itself !
# replay = (x.with_no_reward() if self.cfg.reward_free else x for x in self.replay_loader)
logger.info(f"Agent update for {self.cfg.num_agent_updates}...")
for _ in range(self.cfg.num_agent_updates):
metrics = self.agent.update(self.replay_loader, self.global_step)
if metrics is not None:
self.logger.log_metrics(metrics, self.global_step, ty='train')
# evaluate
if not self.global_episode % self.cfg.eval_every_episodes:
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
self.eval()
if self.cfg.use_hiplog and self.logger.hiplog.content:
self.logger.hiplog.write() # write to hiplog only once per episode
# checkpoint
self._checkpoint_if_need_be()
self.save_checkpoint(self._checkpoint_filepath)
self.finalize()
@hydra.main(config_path='.', config_name='base_config')
def main(cfg: omgcf.DictConfig) -> None:
# we assume cfg is a PretrainConfig (but actually not really)
workspace = Workspace(cfg) # type: ignore
workspace.train()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/train_online.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import logging
import dataclasses
import typing as tp
from url_benchmark import pretrain # NEEDS TO BE FIRST NON-STANDARD IMPORT (sets up env variables)
import omegaconf as omgcf
import hydra
from hydra.core.config_store import ConfigStore
import torch
from url_benchmark import dmc
from url_benchmark import utils
from url_benchmark import agent as agents
from url_benchmark.d4rl_benchmark import D4RLConfig, D4RLReplayBufferBuilder
from url_benchmark.video import TrainVideoRecorder
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = True
from pathlib import Path
import sys
base = Path(__file__).absolute().parents[1]
# we need to add base repo to be able to import url_benchmark
# we need to add url_benchmarl to be able to reload legacy checkpoints
for fp in [base, base / "url_benchmark"]:
assert fp.exists()
if str(fp) not in sys.path:
sys.path.append(str(fp))
@dataclasses.dataclass
class AnytrainConfig(pretrain.Config):
# mode
reward_free: bool = True
# train settings
num_train_episodes: int = 2000
# snapshot
eval_every_episodes: int = 10
load_replay_buffer: tp.Optional[str] = None
# replay buffer
# replay_buffer_num_workers: int = 4
# nstep: int = omgcf.II("agent.nstep")
# misc
save_train_video: bool = False
update_replay_buffer: bool = True
num_total_updates: tp.Optional[int] = None
d4rl_config: D4RLConfig = dataclasses.field(default_factory=D4RLConfig)
ConfigStore.instance().store(name="workspace_config", node=AnytrainConfig)
class Workspace(pretrain.BaseWorkspace[AnytrainConfig]):
def __init__(self, cfg: AnytrainConfig) -> None:
super().__init__(cfg)
self.train_video_recorder = TrainVideoRecorder(self.work_dir if cfg.save_train_video else None,
camera_id=self.video_recorder.camera_id, use_wandb=self.cfg.use_wandb)
self._last_processed_step = 0 # for checkpointing
if not cfg.update_replay_buffer:
cfg.num_seed_frames = -1
if cfg.load_replay_buffer is None:
raise ValueError("If update_replay_buffer is False, load_replay_buffer must be provided")
if not self._checkpoint_filepath.exists(): # don't relay if there is a checkpoint
if cfg.load_replay_buffer is not None:
if self.cfg.task.split('_')[0] == "d4rl":
d4rl_replay_buffer_builder = D4RLReplayBufferBuilder()
self.replay_storage = d4rl_replay_buffer_builder.prepare_replay_buffer_d4rl(self.train_env, self.agent.init_meta(), self.cfg)
self.replay_loader = self.replay_storage
else:
self.load_checkpoint(cfg.load_replay_buffer, only=["replay_loader"])
def _play_episode(self, log_metrics: bool = True) -> None:
time_step = self.train_env.reset()
meta = self.agent.init_meta()
self.replay_loader.add(time_step, meta)
self.train_video_recorder.init(time_step.observation)
episode_step = 0
episode_reward = 0.0
z_correl = 0.0
physics_agg = dmc.PhysicsAggregator()
custom_reward = self._make_custom_reward(seed=self.global_step)
while not time_step.last():
meta = self.agent.update_meta(meta, self.global_step, time_step, replay_loader=self.replay_loader)
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
self.global_step,
eval_mode=False)
time_step = self.train_env.step(action)
if custom_reward is not None:
time_step.reward = custom_reward.from_env(self.train_env)
physics_agg.add(self.train_env)
episode_reward += time_step.reward
self.replay_loader.add(time_step, meta)
self.train_video_recorder.record(time_step.observation)
if isinstance(self.agent, agents.FBDDPGAgent):
z_correl += self.agent.compute_z_correl(time_step, meta)
episode_step += 1
self.global_step += 1
# log episode stats
if log_metrics:
self.train_video_recorder.save(f'{self.global_frame}.mp4')
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('z_correl', z_correl)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_loader))
log('step', self.global_step)
for key, val in physics_agg.dump():
log(key, val)
def _checkpoint_if_need_be(self) -> None:
# save checkpoint to reload
if self.global_step // self.cfg.checkpoint_every != self._last_processed_step // self.cfg.checkpoint_every:
self.save_checkpoint(self._checkpoint_filepath)
if any(self._last_processed_step < x <= self.global_step for x in self.cfg.snapshot_at):
self.save_checkpoint(self._checkpoint_filepath.with_name(f'snapshot_{self.global_frame}.pt'))
self._last_processed_step = self.global_step
def train(self) -> None:
metrics: tp.Optional[tp.Dict[str, float]] = None
last_step = 0
while self.global_episode < self.cfg.num_train_episodes:
# play 1 episode
if self.cfg.update_replay_buffer:
self._play_episode(log_metrics=metrics is not None) # logging requires all metrics available
else:
global_step_update = self.replay_loader.avg_episode_length
if self.cfg.num_total_updates is not None:
global_step_update = self.cfg.num_total_updates // self.cfg.num_train_episodes
self.global_step += global_step_update
self.global_episode += 1
# update the agent
if self.global_frame > self.cfg.num_seed_frames:
# TODO: reward_free should be handled in the agent update itself !
# replay = (x.with_no_reward() if self.cfg.reward_free else x for x in self.replay_loader)
for step in range(last_step + 1, self.global_step + 1): # make it comparable to the standard pretrain pipeline
metrics = self.agent.update(self.replay_loader, step)
self.logger.log_metrics(metrics, step, ty='train')
last_step = self.global_step
# evaluate
if not self.global_episode % self.cfg.eval_every_episodes:
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
self.eval()
if self.cfg.use_hiplog and self.logger.hiplog.content:
self.logger.hiplog.write() # write to hiplog only once per episode
# checkpoint
self._checkpoint_if_need_be()
self.save_checkpoint(self._checkpoint_filepath)
self.finalize()
@hydra.main(config_path='.', config_name='base_config', version_base="1.1")
def main(cfg: omgcf.DictConfig) -> None:
# we assume cfg is a PretrainConfig (but actually not really)
workspace = Workspace(cfg) # type: ignore
workspace.train()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/anytrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import token
import tokenize
import functools
import typing as tp
from io import BytesIO
from collections import OrderedDict
import numpy as np
from url_benchmark import dmc
from dm_control.utils import rewards
from url_benchmark.custom_dmc_tasks.jaco import TASKS as jaco_tasks_list
from url_benchmark.custom_dmc_tasks.point_mass_maze import TASKS as point_mass_maze_tasks_list
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
jaco_tasks = dict(jaco_tasks_list)
point_mass_maze_tasks = dict(point_mass_maze_tasks_list)
F = tp.TypeVar("F", bound=tp.Callable[..., np.ndarray])
class Register(tp.Generic[F]):
def __init__(self) -> None:
self.funcs: tp.Dict[str, tp.Dict[str, F]] = {}
def __call__(self, name: str) -> tp.Callable[[F], F]:
return functools.partial(self._register, name=name)
def _register(self, func: F, name: str) -> F:
fname = func.__name__
subdict = self.funcs.setdefault(name, {})
if fname in subdict:
raise ValueError(f"Already registered a function {fname} for {name}")
subdict[fname] = func
return func
goal_spaces: Register[tp.Callable[[dmc.EnvWrapper], np.ndarray]] = Register()
goals: Register[tp.Callable[[], np.ndarray]] = Register()
# # # # #
# goal spaces, defined on one environment to specify:
# # # # #
# pylint: disable=function-redefined
@goal_spaces("jaco")
def simplified_jaco(env: dmc.EnvWrapper) -> np.ndarray:
return np.array(env.physics.bind(env.task._hand.tool_center_point).xpos,
dtype=np.float32)
@goal_spaces("point_mass_maze")
def simplified_point_mass_maze(env: dmc.EnvWrapper) -> np.ndarray:
return np.array(env.physics.named.data.geom_xpos['pointmass'][:2],
dtype=np.float32)
@goal_spaces("walker")
def simplified_walker(env: dmc.EnvWrapper) -> np.ndarray:
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/walker.py
return np.array([env.physics.torso_height(),
env.physics.torso_upright(),
env.physics.horizontal_velocity()],
dtype=np.float32)
@goal_spaces("walker")
def walker_pos_speed(env: dmc.EnvWrapper) -> np.ndarray:
"""simplifed walker, with x position as additional variable"""
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/walker.py
x = env.physics.named.data.xpos['torso', 'x']
return np.concatenate([simplified_walker(env), [x]], axis=0, dtype=np.float32) # type: ignore
@goal_spaces("walker")
def walker_pos_speed_z(env: dmc.EnvWrapper) -> np.ndarray:
"""simplifed walker, with x position as additional variable"""
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/walker.py
# vz = env.physics.named.data.sensordata["torso_subtreelinvel"][-1]
# om_y = env.physics.named.data.subtree_angmom['torso'][1]
vz = env.physics.named.data.subtree_linvel['torso', 'z']
om_y = env.physics.named.data.subtree_angmom['torso', 'y']
return np.concatenate([walker_pos_speed(env), [vz, om_y]], axis=0, dtype=np.float32) # type: ignore
@goal_spaces("quadruped")
def simplified_quadruped(env: dmc.EnvWrapper) -> np.ndarray:
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/quadruped.py#L145
return np.array([env.physics.torso_upright(),
np.linalg.norm(env.physics.torso_velocity())],
dtype=np.float32)
@goal_spaces("quadruped")
def quad_pos_speed(env: dmc.EnvWrapper) -> np.ndarray:
# check the physics here:
# https://github.com/deepmind/dm_control/blob/d72c22f3bb89178bff38728957daf62965632c2f/dm_control/suite/quadruped.py#L145
x = np.array(env.physics.named.data.site_xpos['workspace'])
states = [[env.physics.torso_upright()], x, env.physics.torso_velocity()]
return np.concatenate(states, dtype=np.float32)
# @goal_spaces("quadruped") # this one needs a specific task for the ball to be present
# def quadruped_positions(env: dmc.EnvWrapper) -> np.ndarray:
# data = env.physics.named.data
# states = [data.xpos['ball'] - data.site_xpos['target'], data.xpos["torso"] - data.site_xpos['target']]
# return np.concatenate(states, dtype=np.float32)
# # # # #
# goals, defined on one goal_space to specify:
# # # # #
@goals("simplified_walker")
def walker_stand() -> np.ndarray:
return np.array([1.2, 1.0, 0], dtype=np.float32)
@goals("simplified_walker")
def walker_walk() -> np.ndarray:
return np.array([1.2, 1.0, 2], dtype=np.float32)
@goals("simplified_walker")
def walker_run() -> np.ndarray:
return np.array([1.2, 1.0, 4], dtype=np.float32)
@goals("simplified_quadruped")
def quadruped_stand() -> np.ndarray:
return np.array([1.0, 0], dtype=np.float32)
@goals("simplified_quadruped")
def quadruped_walk() -> np.ndarray:
return np.array([1.0, 0.6], dtype=np.float32)
@goals("simplified_quadruped")
def quadruped_run() -> np.ndarray:
return np.array([1.0, 6], dtype=np.float32)
@goals("quadruped_positions")
def quadruped_fetch() -> np.ndarray:
return np.zeros((6,), dtype=np.float32)
@goals("simplified_point_mass_maze")
def point_mass_maze_reach_top_left() -> np.ndarray:
return np.array(point_mass_maze_tasks['reach_top_left'],
dtype=np.float32)
@goals("simplified_point_mass_maze")
def point_mass_maze_reach_top_right() -> np.ndarray:
return np.array(point_mass_maze_tasks['reach_top_right'],
dtype=np.float32)
@goals("simplified_point_mass_maze")
def point_mass_maze_reach_bottom_left() -> np.ndarray:
return np.array(point_mass_maze_tasks['reach_bottom_left'],
dtype=np.float32)
@goals("simplified_point_mass_maze")
def point_mass_maze_reach_bottom_right() -> np.ndarray:
return np.array(point_mass_maze_tasks['reach_bottom_right'],
dtype=np.float32)
@goals("simplified_jaco")
def jaco_reach_top_left() -> np.ndarray:
return jaco_tasks['reach_top_left'].astype(np.float32)
@goals("simplified_jaco")
def jaco_reach_top_right() -> np.ndarray:
return jaco_tasks['reach_top_right'].astype(np.float32)
@goals("simplified_jaco")
def jaco_reach_bottom_left() -> np.ndarray:
return jaco_tasks['reach_bottom_left'].astype(np.float32)
@goals("simplified_jaco")
def jaco_reach_bottom_right() -> np.ndarray:
return jaco_tasks['reach_bottom_right'].astype(np.float32)
@goals("walker_pos_speed_z")
def walker_dummy() -> np.ndarray:
return np.zeros((6,), dtype=np.float32)
# # # Custom Reward # # #
def _make_env(domain: str) -> dmc.EnvWrapper:
task = {"quadruped": "stand", "walker": "walk", "jaco": "reach_top_left", "point_mass_maze": "reach_bottom_right"}[domain]
return dmc.make(f"{domain}_{task}", obs_type="states", frame_stack=1, action_repeat=1, seed=12)
def get_goal_space_dim(name: str) -> int:
domain = {space: domain for domain, spaces in goal_spaces.funcs.items() for space in spaces}[name]
env = _make_env(domain)
return goal_spaces.funcs[domain][name](env).size
class BaseReward:
def __init__(self, seed: tp.Optional[int] = None) -> None:
self._env: dmc.EnvWrapper # to be instantiated in subclasses
self._rng = np.random.RandomState(seed)
def get_goal(self, goal_space: str) -> np.ndarray:
raise NotImplementedError
def from_physics(self, physics: np.ndarray) -> float:
"careful this is not threadsafe"
with self._env.physics.reset_context():
self._env.physics.set_state(physics)
return self.from_env(self._env)
def from_env(self, env: dmc.EnvWrapper) -> float:
raise NotImplementedError
def get_reward_function(name: str, seed: tp.Optional[int] = None) -> BaseReward:
if name == "quadruped_mix":
return QuadrupedReward(seed)
if name == "walker_random_equation":
return WalkerRandomReward(seed)
if name == "quadruped_position":
return QuadrupedPosReward(seed)
if name == "maze_multi_goal":
return MazeMultiGoal(seed)
if name == "walker_position":
return WalkerPosReward(seed)
return DmcReward(name)
def _inv(distance: float) -> float:
# print("dist", distance)
return 1 / (1 + abs(distance))
class DmcReward(BaseReward):
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
env_name, task_name = name.split("_", maxsplit=1)
try:
from dm_control import suite # import
from url_benchmark import custom_dmc_tasks as cdmc
except ImportError as e:
raise dmc.UnsupportedPlatform("DMC does not run on Mac") from e
make = suite.load if (env_name, task_name) in suite.ALL_TASKS else cdmc.make
self._env = make(env_name, task_name)
def from_env(self, env: dmc.EnvWrapper) -> float:
return float(self._env.task.get_reward(env.physics))
# def from_env(self, env: dmc.EnvWrapper) -> float:
# return self.from_physics(env.physics.get_state())
#
# def from_physics(self, physics: np.ndarray) -> float:
# # pdb.set_trace()
# with self._env.physics.reset_context():
# self._env.physics.set_state(physics)
# return float(self._env.task.get_reward(self._env.physics))
class QuadrupedReward(BaseReward):
NUM_CASES = 7
def __init__(self, seed: tp.Optional[int] = None) -> None:
super().__init__(seed)
self._env = _make_env("quadruped")
self.x = self._rng.uniform(-5, 5, size=2)
self.vx = self._rng.uniform(-3, 3, size=2)
self.quadrant = self._rng.choice([1, -1], size=2, replace=True)
self.speed = float(np.linalg.norm(self.vx))
self._case = self._rng.randint(self.NUM_CASES)
def from_env(self, env: dmc.EnvWrapper) -> float:
# x = env.physics.named.data.xpos["torso"][:2]
x = env.physics.named.data.site_xpos['workspace'][:2]
vx = env.physics.torso_velocity()[:2]
up = max(0, float(env.physics.torso_upright()))
speed = float(np.linalg.norm(vx))
if not self._case: # specific speed norm
return up * _inv(speed - self.speed)
if self._case == 1: # specific position
return up * _inv(float(np.linalg.norm(x - self.x)))
if self._case == 2: # specific quadrant
return up * float(np.all(x * self.quadrant > self.x))
if self._case == 3: # specific quadrant and speed norm
return up * float(np.all(x * self.quadrant > self.x)) * _inv(self.speed - speed)
if self._case == 4: # specific speed
return up * _inv(np.linalg.norm(self.vx - vx) / np.sqrt(2))
if self._case == 5: # specific quadrant and sufficient speed
return up * float(np.all(x * self.quadrant > self.x)) * (speed > self.speed)
if self._case == 6: # sufficient speed
return up * (speed > self.speed)
else:
raise ValueError(f"No case #{self._case}")
class QuadrupedPosReward(BaseReward):
"""Deterministic positional reward"""
def __init__(self, seed: tp.Optional[int] = None) -> None:
super().__init__(seed)
self._env = _make_env("quadruped")
self.x = np.array([2, 2, 0.8])
def get_goal(self, goal_space: str) -> np.ndarray:
if goal_space != "quad_pos_speed":
raise ValueError(f"Goal space {goal_space} not supported with this reward")
states = [[1.0], self.x, [0] * 3]
return np.concatenate(states, dtype=np.float32) # type: ignore
def from_env(self, env: dmc.EnvWrapper) -> float:
x = env.physics.named.data.site_xpos['workspace']
up = float(env.physics.torso_upright())
up = (up + 1) / 2
out = 0.5 * up + 0.5 * _inv(float(np.linalg.norm(x - self.x))) # * _inv(speed)
return out
class WalkerPosReward(BaseReward):
"""Random positional reward"""
def __init__(self, seed: tp.Optional[int] = None) -> None:
super().__init__(seed)
self._env = _make_env("walker")
self.x = np.random.randint(-20, 20)
def get_goal(self, goal_space: str) -> np.ndarray:
if goal_space != "walker_pos_speed_z":
raise ValueError(f"Goal space {goal_space} not supported with this reward")
states = [1, 1, 0, self.x, 0, 0] # [z, up, vx, x, vz, om_y]
# states = [self.x]
return np.array(states, dtype=np.float32) # type: ignore
def from_env(self, env: dmc.EnvWrapper) -> float:
x = env.physics.named.data.xpos['torso', 'x']
target_size = 1
d = abs(x - self.x)
reward = rewards.tolerance(d, bounds=(0, target_size), margin=target_size)
return reward
class MazeMultiGoal(BaseReward):
def __init__(self, seed: tp.Optional[int] = None) -> None:
super().__init__(seed)
self.goals = np.array([
[-0.15, 0.15], # room 1: top left
[-0.22, 0.22], # room 1
[-0.08, 0.08], # room 1
[-0.22, 0.08], # room 1
[-0.08, 0.22], # room 1
[0.15, 0.15], # room 2: top right
[0.22, 0.22], # room 2
[0.08, 0.08], # room 2
[0.22, 0.08], # room 2
[0.08, 0.22], # room 2
[-0.15, -0.15], # room 3: bottom left
[-0.22, -0.22], # room 3
[-0.08, -0.08], # room 3
[-0.22, -0.08], # room 3
[-0.08, -0.22], # room 3
[0.15, -0.15], # room 4: bottom right
[0.22, -0.22], # room 4
[0.08, -0.08], # room 4
[0.22, -0.08], # room 4
[0.08, -0.22], # room 4
], dtype=np.float32)
# save images for debug
# import imageio
# self._env = dmc.make("point_mass_maze_multi_goal", obs_type="states", frame_stack=1, action_repeat=1, seed=12)
# self._env.reset()
# img = self._env.physics.render(height=256, width=256, camera_id=0)
# imageio.imsave("maze.png", img)
def from_goal(self, achieved_goal: np.ndarray, desired_goal: np.ndarray) -> tp.Tuple[float, float]:
"""returns reward and distance"""
assert achieved_goal.shape == desired_goal.shape
target_size = .03
d: np.ndarray = achieved_goal - desired_goal
distance = np.linalg.norm(d, axis=-1) if len(d.shape) > 0 else np.linalg.norm(d)
reward = rewards.tolerance(distance,
bounds=(0, target_size), margin=target_size)
return reward, distance
class WalkerYogaReward():
def __init__(self) -> None:
self._env = _make_env("walker")
self._goals = get_walkeryoga_goals()
self.target_obs = {}
for key, g in self._goals.items():
self.target_obs[key] = get_obs_from_yoga_goal(self._env, g).astype(np.float32)
# save images for debug
# import imageio
# img = self._env.physics.render(height=256, width=256, camera_id=0)
# imageio.imsave(f"yoga_goals/{key}.png", img)
def compute_reward(self, phy: np.ndarray, g: str) -> float:
assert g in self._goals.keys()
distance = _oracle_distance(phy, self._goals[g])
return - distance
def _shortest_angle(angle):
if not angle.shape:
return _shortest_angle(angle[None])[0]
angle = angle % (2 * np.pi)
angle[angle > np.pi] = 2 * np.pi - angle[angle > np.pi]
return angle
def _oracle_distance(x1, x2):
assert x1.shape[0] in [9, 18], x2.shape[0] in [9, 18]
x1, x2 = x1[:9], x2[:9]
def get_su(_goal):
dist = np.abs(x1 - _goal)
dist = dist[..., [0, 2, 3, 4, 6, 7]]
dist[..., 1] = _shortest_angle(dist[..., 1])
return dist.max(-1)
return min(get_su(x2), get_su(x2[..., [0, 1, 2, 6, 7, 8, 3, 4, 5]]))
def get_obs_from_yoga_goal(env, goal):
new_state = np.pad(goal, (0, 9), mode="constant")
env.physics.set_state(new_state)
env.physics.forward()
obs = env.task.get_observation(env.physics)
return _flatten_obs(obs)
def _flatten_obs(obs):
obs_pieces = []
for v in obs.values():
flat = np.array([v]) if np.isscalar(v) else v.ravel()
obs_pieces.append(flat)
return np.concatenate(obs_pieces, axis=0)
def get_walkeryoga_goals():
# pose[0] is height
# pose[1] is x
# pose[2] is global rotation
# pose[3:6] - first leg hip, knee, ankle
# pose[6:9] - second leg hip, knee, ankle
# Note: seems like walker can't bend legs backwards
lie_back = [-1.2, 0., -1.57, 0., 0., 0., 0, -0., 0.]
lie_front = [-1.2, -0, 1.57, 0., 0, 0., 0., 0., 0.]
legs_up = [-1.24, 0., -1.57, 1.57, 0., 0.0, 1.57, -0., 0.0]
kneel = [-0.5, 0., 0., 0., -1.57, -0.8, 1.57, -1.57, 0.0]
side_angle = [-0.3, 0., 0.9, 0., 0., -0.7, 1.87, -1.07, 0.0]
stand_up = [-0.15, 0., 0.34, 0.74, -1.34, -0., 1.1, -0.66, -0.1]
lean_back = [-0.27, 0., -0.45, 0.22, -1.5, 0.86, 0.6, -0.8, -0.4]
boat = [-1.04, 0., -0.8, 1.6, 0., 0.0, 1.6, -0., 0.0]
bridge = [-1.1, 0., -2.2, -0.3, -1.5, 0., -0.3, -0.8, -0.4]
head_stand = [-1, 0., -3, 0.6, -1, -0.3, 0.9, -0.5, 0.3]
one_feet = [-0.2, 0., 0, 0.7, -1.34, 0.5, 1.5, -0.6, 0.1]
arabesque = [-0.34, 0., 1.57, 1.57, 0, 0., 0, -0., 0.]
return {'lie_back': np.array(lie_back, dtype=np.float32),
'lie_front': np.array(lie_front, dtype=np.float32),
'legs_up': np.array(legs_up, dtype=np.float32),
'kneel': np.array(kneel, dtype=np.float32),
'side_angle': np.array(side_angle, dtype=np.float32),
'stand_up': np.array(stand_up, dtype=np.float32),
'lean_back': np.array(lean_back, dtype=np.float32),
'boat': np.array(boat, dtype=np.float32),
'bridge': np.array(bridge, dtype=np.float32),
'one_feet': np.array(one_feet, dtype=np.float32),
'head_stand': np.array(head_stand, dtype=np.float32),
'arabesque': np.array(arabesque, dtype=np.float32)
}
def extract_names(string: str) -> tp.Set[str]:
rl = BytesIO(string.encode('utf-8')).readline
tokens = list(tokenize.tokenize(rl))
return {t.string for t in tokens if t.type == token.NAME}
class WalkerEquation(BaseReward):
def __init__(self, string: str) -> None:
super().__init__()
self._env = _make_env("walker")
self._np = ["sin", "cos", "tan", "abs", "exp", "sqrt"]
variables = list(self._extract(self._env)) + self._np
not_allowed = extract_names(string) - set(variables)
# keep this safety measure to avoid being hacked in the demo!
if not_allowed:
raise ValueError(f"The following variables are not allowed: {not_allowed}\nPlease only use {variables}")
self.string = string
self._precomputed: tp.Dict[str, np.ndarray] = {}
def _extract(self, env: dmc.EnvWrapper) -> tp.Dict[str, float]:
data = env.physics.named.data
return dict(
x=data.xpos["torso", "x"],
z=data.xpos["torso", "z"],
vx=env.physics.horizontal_velocity(),
vz=env.physics.named.data.sensordata["torso_subtreelinvel"][-1],
up=env.physics.torso_upright(),
am=env.physics.named.data.subtree_angmom['torso', 'y']
)
def from_env(self, env: dmc.EnvWrapper) -> float:
# pylint: disable=eval-used
variables = self._extract(env)
for name in self._np:
variables[name] = getattr(np, name)
return eval(self.string, {}, variables) # type: ignore
def _precompute_for_demo(self, workspace: tp.Any) -> None:
"""special method for the demo which precomputes data
please only use in demo, since it's messy
"""
ws = workspace
if hasattr(ws, "_precomputed_"):
self._precomputed = ws._precomputed_
return
import torch # pylint: disable=import-outside-toplevel
replay: ReplayBuffer = ws.replay_loader # recover some typing
batch = replay.sample(ws.agent.cfg.num_inference_steps, with_physics=True)
with torch.no_grad():
obs = torch.Tensor(batch.goal).to(ws.cfg.device)
B = workspace.agent.backward_net(obs).detach().cpu().numpy()
precomputed = {"#B": B.astype(np.float32)}
for k, phy in enumerate(batch._physics): # type: ignore
with self._env.physics.reset_context():
self._env.physics.set_state(phy)
step_feat = self._extract(self._env)
for key, val in step_feat.items():
if key not in precomputed:
precomputed[key] = np.zeros((B.shape[0], 1), dtype=np.float32)
precomputed[key][k] = val
ws._precomputed_ = precomputed # store it for reuse
self._precomputed = precomputed
def _from_precomputed(self) -> tp.Dict[str, np.ndarray]:
variables = dict(self._precomputed)
var_name0 = [x for x in variables if not x.startswith("#")][0]
for name in self._np:
variables[name] = getattr(np, name)
rewards = eval(self.string, {}, variables) # type: ignore
if not isinstance(rewards, np.ndarray):
rewards = rewards * np.ones_like(variables[var_name0])
z = self._precomputed["#B"].T.dot(rewards).squeeze()
if True: # ASSUMING SCALED
norm = float(np.linalg.norm(z))
if not norm:
norm = 1e-9
z *= np.sqrt(z.size) / norm
meta = OrderedDict()
meta['z'] = z
return meta
class WalkerRandomReward(WalkerEquation):
"""Deterministic positional reward"""
def __init__(self, seed: tp.Optional[int] = None) -> None:
rng = np.random.RandomState(seed)
x = rng.uniform(3, 15)
nx = rng.uniform(3, 8)
# equation + weight
cases = [
(f"exp(-(x-{x:.1f})**2)", 5),
(f"exp(-(x-{x:.1f})**2) * up", 5),
(f"exp(-(x+{nx:.1f})**2)", 2),
("vx > 1", 1),
("vx > 3", 1),
("vx < -1", 1),
]
p = np.array([float(x[1]) for x in cases])
p /= p.sum()
selected = cases[rng.choice(range(p.size), p=p)][0]
super().__init__(selected)
self._rng = rng
|
controllable_agent-main
|
url_benchmark/goals.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import collections
from collections import abc
from concurrent import futures
import time
import uuid
import json
import typing as tp
import logging
from datetime import datetime
import subprocess
from pathlib import Path
try:
from typing import Protocol
except ImportError:
# backward compatible
from typing_extensions import Protocol # type: ignore
import numpy as np
import pandas as pd
# pylint: disable=import-outside-toplevel
START_LINE = "# Hiplot logs"
logger: logging.Logger = logging.getLogger(__name__)
class _StatCall(Protocol):
def __call__(self, **kwargs: float) -> "HipLog":
...
class HipLogfileError(RuntimeError):
pass
class STYLE: # pylint: disable=too-few-public-methods
metrics = "badge badge-pill badge-primary"
internal = "badge badge-pill badge-secondary"
optim = "badge badge-pill badge-dark"
model = "badge badge-pill badge-success"
other = "badge badge-pill badge-danger"
# "badge badge-pill badge-warning"
def _set_style(exp: tp.Any) -> None:
import hiplot as hip
assert isinstance(exp, hip.Experiment)
# Don't display `uid` and `from_uid` columns to the user
cols = set(x for dp in exp.datapoints for x in dp.values.keys())
internals = ["workdir", "#now", "train/episode", "eval/episode", "#time", "#reloaded", "#job_id"]
hidden = [x for x in cols if x.startswith(("eval/", "train/"))]
hidden = [x for x in hidden if not any(y in x for y in ("episode", "loss"))]
exp.display_data(hip.Displays.PARALLEL_PLOT).update(
{
"hide": ["uid", "from_uid"] + hidden,
}
)
# for the record, some more options:
exp.display_data(hip.Displays.XY).update(
{"lines_thickness": 1.4, "lines_opacity": 0.9}
)
exp.display_data(hip.Displays.XY).update(
{"axis_x": "eval/episode", "axis_y": "eval/episode_reward"}
)
# colors
styles = {}
styles.update(
{
name: STYLE.metrics
for name in cols
if name.startswith(("eval/", "train/"))
and not any(y in name for y in ("/episode", "episode_reward"))
}
)
styles.update(
{name: STYLE.other for name in ("eval/episode_reward", "train/episode_reward")}
)
styles.update({name: STYLE.internal for name in internals})
styles["experiment"] = STYLE.other
for col in cols:
for start, style in styles.items():
if col.startswith(start):
exp.parameters_definition[col].label_css = style
def create_hiplot_experiment(uri: tp.Union[str, Path]) -> tp.Any:
import hiplot as hip
# one xp case
uri = Path(uri)
assert uri.suffix == ".csv", f"Path should be a csv, got {uri}"
assert uri.is_file(), f"Path should be a valid file, but got {uri}"
df = pd.read_csv(uri)
prev_uid: tp.Optional[str] = None
exp = hip.Experiment()
base = dict(xp=uri.parent.name, date=uri.parents[1].name, mode=uri.stem)
for k, xp in enumerate(df.itertuples(index=False)):
data = xp._asdict()
data.update(base)
dp = hip.Datapoint(
uid=f"{uri.parent.name}_{uri.stem}_{k}", from_uid=prev_uid, values=data
)
prev_uid = dp.uid
exp.datapoints.append(dp)
_set_style(exp)
return exp
def load(uri: tp.Union[Path, str], step: int = 10) -> tp.Any:
"""Loader for hiplot
Running:
python -m hiplot controllable_agent.hiplogs..load --port=XXXX
will run an hiplot server in which you can past one (or more) log paths
to plot them
Note
----
if you install first: "pip install -e ."
you can simplify to:
hiplot xxxx.load --port=XXXX
Then either provide the folder of the experiments in the freeform,
or their parent directory, so that all subfolders will be parsed for logs.
"""
import hiplot as hip
uri = Path(uri)
if str(uri).startswith("#"): # deactivate a line
return hip.Experiment()
assert uri.is_dir(), f"uri should be a valid directory, got {uri}"
jobs = []
with futures.ProcessPoolExecutor() as executor:
for path in uri.rglob("eval.csv"):
for hlog in HipLog.find_in_folder(path.parent):
jobs.append(executor.submit(hlog.to_hiplot_experiment, step))
# exps.append(create_hiplot_experiment(path))
# exps.append(create_hiplot_experiment(path.with_name("eval.csv")))
exps = [j.result() for j in jobs]
exp = hip.Experiment.merge({str(k): xp for k, xp in enumerate(exps)})
_set_style(exp)
return exp
class HipLog:
"""Simple object for logging hiplot compatible content
Parameters
----------
filepath: str or Path
path to the logfile. It will be created if it does not exist, otherwise
data will be appended to it.
Usage
-----
hiplogs are not mutable, adding content is done through
`with_content` and creates a new instance. This way, you can prefill
some content, then use the object to add more content and write.
Example
-------
hiplog = hiplogs.HipLog(filepath)
hiplog = hiplog.with_content(shared_key=12)
hiplog.write() # writes only {"shared_key": 12}
hiplog.with_content(hello="world").write() # writes shared_key and hello
hiplog.with_content(something="blublu").write() # writes shared_key and something
"""
def __init__(self, filepath: tp.Union[Path, str]) -> None:
self._filepath = Path(filepath)
if self._filepath.suffix not in (".txt", ".log"):
raise ValueError("Filepath must have .txt or .log as extension")
self._content: tp.Dict[str, tp.Any] = {
"#start_time": f"{datetime.now():%Y-%m-%d %H:%M}"
}
self._floats: tp.Dict[str, tp.List[float]] = collections.defaultdict(list)
self._stats: tp.Dict[str, tp.Tuple[str, ...]] = {}
self._reloaded = 0
try:
self._filepath.parent.mkdir(parents=True, exist_ok=True)
if not self._filepath.exists():
self._filepath.write_text(START_LINE + " v1\n", encoding="utf8")
except Exception as e: # pylint: disable=broad-except
logger.warning("Failing to write data to json: %s", e)
try:
import submitit
self._content["#job_id"] = submitit.JobEnvironment().job_id
except Exception: # pylint: disable=broad-except
pass
data = self.read()
if data:
self._reloaded = data[-1].get("#reloaded", -1) + 1 # type: ignore
@classmethod
def find_in_folder(
cls, folder: tp.Union[str, Path], recursive: bool = False
) -> tp.Iterator["HipLog"]:
"""Instantiate all hiplog instances from the folder or subfolders
Parameters
----------
folder: str/Path
folder to look into
recursive: bool
instantiate all hiplog logs recursively
Yields
------
HipLog
hiplog instance
"""
folder = Path(folder)
for suffix in (".txt", ".log"):
iterator = (folder.rglob if recursive else folder.glob)("*" + suffix)
for fp in iterator:
if fp.suffix in (".log", ".txt"):
with fp.open("r", encoding="utf8") as f:
is_hiplog = START_LINE in f.readline()
if is_hiplog:
yield cls(fp)
def __call__(self, **kwargs: tp.Hashable) -> "HipLog":
sanitized = {
x: y if not isinstance(y, np.generic) else y.item()
for x, y in kwargs.items()
}
self._content.update(sanitized)
return self
def with_stats(self, *stats: tp.Sequence[str]) -> _StatCall:
return functools.partial(self._with_stats, tuple(stats))
def _with_stats(self, _internal_name_stats: tp.Tuple[str, ...], **kwargs: float) -> "HipLog":
for key, val in kwargs.items():
self._stats[key] = _internal_name_stats # overridden by last call
self._floats[key].append(float(val))
return self
def read(self, step: int = 1) -> tp.List[tp.Dict[str, tp.Hashable]]:
"""Returns the data recorded through the logger
Parameter
---------
step: int
step for decimating the data if too big
Returns
-------
list of dict
all the timepoints. Data from past timepoints are used if not
provided in newer timepoints (eg: initial hyperparameters are
passed to all timepoints)
"""
with self._filepath.open("r", encoding="utf8") as f:
lines = f.readlines()
if lines and not lines[0].startswith(START_LINE):
raise HipLogfileError(
f"Did not recognize first line: {lines[0]!r} instead of {START_LINE!r}"
)
data: tp.List[tp.Dict[str, tp.Hashable]] = []
last = {}
for k, line in enumerate(lines):
if not line.startswith("#"):
line_dict = json.loads(line.strip())
last.update(line_dict)
if not k % step:
data.append(dict(last))
return data
def last_line(self) -> tp.Dict[str, tp.Hashable]:
data = self.read()
return {} if not data else data[-1]
@property
def content(self) -> tp.Dict[str, tp.Hashable]:
return dict(self._content)
def _export_floats(self) -> tp.Dict[str, float]:
out: tp.Dict[str, float] = {}
for key, vals in self._floats.items():
for stat in self._stats[key]:
out[f"{key}#{stat}"] = getattr(np, stat)(vals)
return out
def write(self) -> None:
# avoid as much as possible any disruption
self._content["#now"] = f"{datetime.now():%Y-%m-%d %H:%M}"
self._content["#time"] = time.time()
self._content["#reloaded"] = self._reloaded
self._content.update(self._export_floats())
if not self._filepath.exists():
return # initialization failed, can't do anything more
try:
string = json.dumps(self._content)
except Exception as e: # pylint: disable=broad-except
logger.warning("Failing to write data to json: %s", e)
return # can't be json-ed, stop there
# if it reaches here, it should be safe to write
with self._filepath.open("a", encoding="utf8") as f:
f.write(string + "\n")
self._content.clear()
self._floats.clear()
self._stats.clear()
def flattened(self, data: tp.Any) -> "HipLog":
"""Flattens a structured configuration and adds it to the content"""
self(**_flatten(data))
return self
def to_hiplot_experiment(self, step: int = 1) -> tp.Any:
"""Returns the Experiment recorded through the logger
Parameter
---------
step: int
step for decimating the data if too big
Returns
-------
Experiment
Hiplot Experiment instance containing the logger data
"""
import hiplot as hip
exp = hip.Experiment()
prev_uid: tp.Optional[str] = None
name = uuid.uuid4().hex[:8]
for k, data in enumerate(self.read(step=step)):
# update the displayed name to something readable
if not k:
xp = data.get("experiment", "#UNKNOWN#")
job_id = data.get("#job_id", name)
name = f"{xp} / {job_id}"
dp = hip.Datapoint(uid=f"{name} / {k}", from_uid=prev_uid, values=data) # type: ignore
prev_uid = dp.uid
exp.datapoints.append(dp)
_set_style(exp)
logger.info("Finished loading %s", self._filepath)
return exp
def _flatten(data: abc.Mapping) -> tp.Dict[str, tp.Hashable]: # type: ignore
output: tp.Dict[str, tp.Hashable] = {}
if isinstance(data, abc.Mapping):
for x, y in data.items():
if isinstance(y, abc.Mapping):
content = _flatten(y)
output.update({f"{x}/{x2}": y2 for x2, y2 in content.items()})
elif isinstance(y, abc.Sequence) and not isinstance(y, str):
if y and isinstance(
y[0], (int, float, str)
): # ignoring weird structures
output[x] = ",".join(str(z) for z in y)
elif isinstance(y, abc.Hashable):
output[x] = y
return output
def repository_information() -> tp.Dict[str, str]:
commands = {
"commit": "git rev-parse --short HEAD",
"branch": "git rev-parse --abbrev-ref HEAD",
"closest_main": "git rev-parse --short main",
}
here = Path(__file__).parent
output: tp.Dict[str, str] = {}
for name, command in commands.items():
try:
output[name] = (
subprocess.check_output(command.split(), shell=False, cwd=here)
.strip()
.decode()
)
except Exception: # pylint: disable=broad-except
pass
return output
|
controllable_agent-main
|
url_benchmark/hiplogs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import sys
from pathlib import Path
import subprocess
import pytest
def _run(tmp_path: Path, **params: tp.Any) -> None:
folder = Path(__file__).parents[1] / "url_benchmark"
assert folder.exists()
if sys.platform == "darwin":
pytest.skip(reason="Does not run on Mac")
string = " ".join(f"{x}={y}" for (x, y) in params.items())
command = (
f"python -m url_benchmark.pretrain device=cpu hydra.run.dir={tmp_path} final_tests=0 "
+ string
)
print(f"Running: {command}")
subprocess.check_call(command.split())
@pytest.mark.parametrize(
"agent", ["aps", "diayn", "rnd", "proto"]
) # test most important ones
def test_pretrain_from_commandline(agent: str, tmp_path: Path) -> None:
_run(
tmp_path,
agent=agent,
num_train_frames=1011,
num_eval_episodes=1,
num_seed_frames=1010,
replay_buffer_episodes=2,
)
def test_pretrain_from_commandline_fb_with_goal(tmp_path: Path) -> None:
_run(
tmp_path,
agent="fb_ddpg",
num_train_frames=1,
num_eval_episodes=1,
replay_buffer_episodes=2,
goal_space="simplified_walker",
use_hiplog=True,
)
assert (tmp_path / "hip.log").exists()
|
controllable_agent-main
|
url_benchmark/test_pretrain.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import logging
import dataclasses
import typing as tp
from url_benchmark import pretrain # NEEDS TO BE FIRST NON-STANDARD IMPORT (sets up env variables)
import omegaconf as omgcf
import hydra
from hydra.core.config_store import ConfigStore
import torch
from url_benchmark import goals as _goals
from url_benchmark import utils
from url_benchmark.in_memory_replay_buffer import ReplayBuffer # pylint: disable=unused-import
from url_benchmark.replay_buffer import EpisodeBatch # pylint: disable=unused-import
from url_benchmark import agent as agents
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = True
from pathlib import Path
import sys
base = Path(__file__).absolute().parents[1]
for fp in [base, base / "url_benchmark"]:
assert fp.exists()
if str(fp) not in sys.path:
sys.path.append(str(fp))
@dataclasses.dataclass
class OfflineConfig(pretrain.Config):
# training
num_grad_steps: int = 1000000
num_seed_frames: int = 0
log_every_steps: int = 1000
# eval
num_eval_episodes: int = 10
eval_every_steps: int = 10000
# dataset
load_replay_buffer: tp.Optional[str] = None
expl_agent: str = "proto"
replay_buffer_dir: str = omgcf.SI("../../../../datasets") # make sure to update this if you change hydra run dir
# misc
experiment: str = "offline"
reward_free: bool = False
ConfigStore.instance().store(name="workspace_config", node=OfflineConfig)
class Workspace(pretrain.BaseWorkspace[OfflineConfig]):
def __init__(self, cfg: OfflineConfig) -> None:
super().__init__(cfg)
self.agent.cfg.update_every_steps = 1
datasets_dir = self.work_dir / cfg.replay_buffer_dir
replay_dir = datasets_dir.resolve() / self.domain / cfg.expl_agent / 'buffer'
print(f'replay dir: {replay_dir}')
# self.replay_loader = ReplayBuffer([], # self._data_specs, [], # meta_specs = []
# cfg.batch_size, cfg.replay_buffer_episodes,
# cfg.discount, True)
if self.cfg.load_replay_buffer is not None:
print("loading Replay from %s", self.cfg.load_replay_buffer)
self.load_checkpoint(self.cfg.load_replay_buffer, only=["replay_loader"])
# with open(self.cfg.load_replay_buffer, 'rb') as f:
# content = torch.load(f)
# if isinstance(content, dict):
# content = content["replay_loader"]
# # assert isinstance(content, ReplayBuffer)
# self.replay_loader = content
else:
relabeled_replay_file_path = replay_dir / f"../relabeled_replay_{cfg.task}_{cfg.replay_buffer_episodes}.pt"
if relabeled_replay_file_path.exists():
print("loading Replay from %s", relabeled_replay_file_path.resolve())
self.load_checkpoint(relabeled_replay_file_path, only=["replay_loader"])
# with relabeled_replay_file_path.open('rb') as f:
# self.replay_loader = torch.load(f)
else:
print("loading and relabeling...")
goal_func = None if cfg.goal_space is None else _goals.goal_spaces.funcs[self.domain][cfg.goal_space]
self.replay_loader.load(self.train_env, replay_dir, relabel=True, goal_func=goal_func)
print("loading is done")
with relabeled_replay_file_path.open('wb') as f:
torch.save(self.replay_loader, f)
self.replay_loader._future = cfg.future
self.replay_loader._discount = cfg.discount
# self.replay_loader._full = True
self.replay_loader._max_episodes = len(self.replay_loader._storage["discount"])
if isinstance(self.agent, agents.GoalTD3Agent) and self.agent.cfg.fb_reward:
self.agent.precompute_cov(self.replay_loader)
def train(self):
train_until_step = utils.Until(self.cfg.num_grad_steps)
eval_every_step = utils.Every(self.cfg.eval_every_steps)
log_every_step = utils.Every(self.cfg.log_every_steps)
while train_until_step(self.global_step):
# try to evaluate
if eval_every_step(self.global_step):
self.logger.log('eval_total_time', self.timer.total_time(), self.global_step)
if self.cfg.custom_reward == "maze_multi_goal":
self.eval_maze_goals()
else:
self.eval()
if isinstance(self.agent, agents.GoalTD3Agent):
metrics = self.agent.update(self.replay_loader, self.global_step, self.reward_cls)
else:
metrics = self.agent.update(self.replay_loader, self.global_step)
self.logger.log_metrics(metrics, self.global_step, ty='train')
if log_every_step(self.global_step):
elapsed_time, total_time = self.timer.reset()
with self.logger.log_and_dump_ctx(self.global_step, ty='train') as log:
log('fps', self.cfg.log_every_steps / elapsed_time)
log('total_time', total_time)
log('step', self.global_step)
self.global_step += 1
# try to save snapshot
if self.global_frame in self.cfg.snapshot_at:
self.save_checkpoint(self._checkpoint_filepath.with_name(f'snapshot_{self.global_frame}.pt'), exclude=["replay_loader"])
# save checkpoint to reload
if not self.global_frame % self.cfg.checkpoint_every:
self.save_checkpoint(self._checkpoint_filepath, exclude=["replay_loader"])
self.save_checkpoint(self._checkpoint_filepath) # make sure we save the final checkpoint
self.finalize()
# def load_checkpoint(self, fp: tp.Union[Path, str]) -> None:
# fp = Path(fp)
# with fp.open('rb') as f:
# payload = torch.load(f)
# self.agent.init_from(payload['agent'])
@hydra.main(config_path='.', config_name='base_config')
def main(cfg: omgcf.DictConfig) -> None:
workspace = Workspace(cfg) # type: ignore
# for _ in range(10):
# workspace.eval()
if isinstance(workspace.agent, agents.DDPGAgent):
if workspace.agent.reward_free:
workspace.agent.train_reward(workspace.replay_loader)
workspace.train()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/train_offline.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import subprocess
import sys
import tempfile
import typing as tp
from pathlib import Path
import pytest
import hydra
import numpy as np
from url_benchmark import hiplogs
from url_benchmark import utils
def test_until_repr() -> None:
until = utils.Until(3, 1)
assert str(until) == "Until(action_repeat=1, until=3)"
def test_parse_logs() -> None:
path = (
Path(__file__).parents[1]
/ "controllable_agent"
/ "data"
/ "mockpretrain"
/ "hip.log"
)
hlog = hiplogs.HipLog(path)
logs = hlog.to_hiplot_experiment().datapoints
assert len(logs) == 13
vals = logs[-1].values
assert vals["workdir"] == "054238_fb_ddpg", "Xp id not exported"
bad_type = {x: y for x, y in vals.items() if not isinstance(y, (int, float, str))}
assert not bad_type, "Found unsupported type(s)"
def test_load() -> None:
xp = hiplogs.load(str(Path(__file__).parents[1] / "controllable_agent"), step=2)
assert len(xp.datapoints) == 6
def test_hiplog(tmp_path: Path) -> None:
hiplog = hiplogs.HipLog(tmp_path / "log.txt")
hiplog(hello="world")
hiplog.write()
hiplog(hello="monde")
hiplog(number=12).write()
hiplog(something=np.int32(12)).write()
data = hiplog.read()
for d in data:
for key in list(d):
if key.startswith("#"):
d.pop(key)
expected = [
dict(hello="world"),
dict(hello="monde", number=12),
dict(hello="monde", number=12, something=12),
]
assert data == expected
# reloaded
assert not hiplog._reloaded
hiplog = hiplogs.HipLog(tmp_path / "log.txt")
assert hiplog._reloaded == 1
def test_hiplog_stats(tmp_path: Path) -> None:
hiplog = hiplogs.HipLog(tmp_path / "log.txt")
for vals in ([3, 5], [7, 8, 9]):
for val in vals:
hiplog.with_stats("mean")(val=val)
hiplog.write()
data = hiplog.read()
for d in data:
for key in list(d):
if key.startswith("#"):
d.pop(key)
expected = [{"val#mean": 4}, {"val#mean": 8}]
assert data == expected
def test_repository_information() -> None:
out = hiplogs.repository_information()
assert len(out) == 3
def test_hiplogs_from_hydra_config(tmp_path: Path) -> None:
if sys.platform == "darwin":
pytest.skip(reason="Does not run on Mac")
train_cmd = [
sys.executable,
"-m",
"url_benchmark.test_hiplogs",
f"hydra.run.dir={tmp_path}",
]
subprocess.check_call(train_cmd)
@hydra.main(config_name="base_config", config_path=".", version_base="1.1")
def main(args: tp.Any) -> None:
args.agent.obs_type = "blublu"
args.agent.obs_shape = (2, 2)
args.agent.action_shape = (2, 2)
args.agent.num_expl_steps = 12
with tempfile.TemporaryDirectory() as tmp:
log = hiplogs.HipLog(Path(tmp) / "hiplog.test.log").flattened(args)
assert "agent/obs_type" in log.content
if __name__ == "__main__":
# needed to load the config:
from url_benchmark import pretrain # pylint: disable=unused-import,import-outside-toplevel
main()
|
controllable_agent-main
|
url_benchmark/test_hiplogs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
controllable_agent-main
|
url_benchmark/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import logging
import typing as tp
from pathlib import Path
import datetime
from collections import defaultdict
import torch
import wandb
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
from url_benchmark.hiplogs import HipLog
Formating = tp.List[tp.Tuple[str, str, str]]
COMMON_TRAIN_FORMAT = [('frame', 'F', 'int'), ('step', 'S', 'int'),
('episode', 'E', 'int'), ('episode_length', 'L', 'int'),
('episode_reward', 'R', 'float'),
('fps', 'FPS', 'float'), ('total_time', 'T', 'time')]
COMMON_EVAL_FORMAT = [('frame', 'F', 'int'), ('step', 'S', 'int'),
('episode', 'E', 'int'), ('episode_length', 'L', 'int'),
('episode_reward', 'R', 'float'),
('total_time', 'T', 'time')]
pylogger = logging.getLogger(__name__)
class AverageMeter:
def __init__(self) -> None:
self._sum = 0.0
self._count = 0
def update(self, value: float, n: int = 1) -> None:
self._sum += value
self._count += n
def value(self) -> float:
return self._sum / max(1, self._count)
Metrics = tp.Dict[str, float]
class MetersGroup:
def __init__(self, csv_file_name: tp.Union[Path, str], formating: Formating, use_wandb: bool) -> None:
self._csv_file_name = Path(csv_file_name)
self._formating = formating
self._meters: tp.Dict[str, AverageMeter] = defaultdict(AverageMeter)
self._csv_file: tp.Optional[tp.TextIO] = None
self._csv_writer: tp.Optional[csv.DictWriter[str]] = None
self.use_wandb = use_wandb
def log(self, key: str, value: float, n: int = 1) -> None:
self._meters[key].update(value, n)
def _prime_meters(self) -> Metrics:
data = {}
for key, meter in self._meters.items():
if key.startswith('train'):
key = key[len('train') + 1:]
else:
key = key[len('eval') + 1:]
key = key.replace('/', '_')
data[key] = meter.value()
return data
def _remove_old_entries(self, data: Metrics) -> None:
rows = []
with self._csv_file_name.open('r') as f:
reader = csv.DictReader(f)
for row in reader:
if float(row['episode']) >= data['episode']:
break
rows.append(row)
with self._csv_file_name.open('w') as f:
writer = csv.DictWriter(f,
fieldnames=sorted(data.keys()),
restval=0.0)
writer.writeheader()
for row in rows:
writer.writerow(row)
def _dump_to_csv(self, data: Metrics) -> None:
if self._csv_writer is None:
should_write_header = True
if self._csv_file_name.exists():
self._remove_old_entries(data)
should_write_header = False
self._csv_file = self._csv_file_name.open('a')
self._csv_writer = csv.DictWriter(self._csv_file,
fieldnames=sorted(data.keys()),
restval=0.0)
if should_write_header:
self._csv_writer.writeheader()
if self._csv_writer is None or self._csv_file is None:
raise RuntimeError("CSV writer and file should have been instantiated")
self._csv_writer.writerow(data)
self._csv_file.flush()
@staticmethod
def _format(key: str, value: float, ty: str) -> str:
if ty == 'int':
value = int(value)
return f'{key}: {value}'
elif ty == 'float':
return f'{key}: {value:.04f}'
elif ty == 'time':
value_ = str(datetime.timedelta(seconds=int(value)))
return f'{key}: {value_}'
raise ValueError(f'invalid format type: {ty}')
def _dump_to_console(self, data: Metrics, prefix: str) -> None:
prefix = colored(prefix, 'yellow' if prefix == 'train' else 'green')
pieces = [f'| {prefix: <14}']
for key, disp_key, ty in self._formating:
value = data.get(key, 0)
pieces.append(self._format(disp_key, value, ty))
print(' | '.join(pieces))
@staticmethod
def _dump_to_wandb(data: Metrics) -> None:
wandb.log(data)
def dump(self, step: int, prefix: str) -> None:
if len(self._meters) == 0:
return
data = self._prime_meters()
data['frame'] = step
if self.use_wandb:
wandb_data = {prefix + '/' + key: val for key, val in data.items()}
self._dump_to_wandb(data=wandb_data)
self._dump_to_csv(data)
self._dump_to_console(data, prefix)
self._meters.clear()
class Logger:
def __init__(self, log_dir: Path, use_tb: bool, use_wandb: bool, use_hiplog: bool) -> None:
self._log_dir = log_dir
self._train_mg = MetersGroup(log_dir / 'train.csv',
formating=COMMON_TRAIN_FORMAT,
use_wandb=use_wandb)
self._eval_mg = MetersGroup(log_dir / 'eval.csv',
formating=COMMON_EVAL_FORMAT,
use_wandb=use_wandb)
self._sw: tp.Optional[SummaryWriter] = None
# self.hiplog: tp.Optional[HipLog] = None
self.use_hiplog = use_hiplog
if use_hiplog:
self.hiplog = HipLog(log_dir / "hip.log")
if use_tb:
self._sw = SummaryWriter(str(log_dir / 'tb'))
self.use_wandb = use_wandb
def _try_sw_log(self, key, value, step) -> None:
if self._sw is not None:
self._sw.add_scalar(key, value, step)
def log(self, key: str, value: tp.Union[float, torch.Tensor], step: int) -> None:
assert key.startswith('train') or key.startswith('eval')
if isinstance(value, torch.Tensor):
value = value.item()
self._try_sw_log(key, value, step)
mg = self._train_mg if key.startswith('train') else self._eval_mg
mg.log(key, value)
if self.use_hiplog:
self.hiplog(**{key: value})
def log_metrics(self, metrics: tp.Dict[str, float], step: int, ty: str) -> None:
for key, value in metrics.items():
self.log(f'{ty}/{key}', value, step)
def dump(self, step, ty=None) -> None:
try:
if ty is None or ty == 'eval':
self._eval_mg.dump(step, 'eval')
if ty is None or ty == 'train':
self._train_mg.dump(step, 'train')
except ValueError as e:
pylogger.warning(f"Could not dump metrics: {e}")
def log_and_dump_ctx(self, step: int, ty: str) -> "LogAndDumpCtx":
return LogAndDumpCtx(self, step, ty)
class LogAndDumpCtx:
def __init__(self, logger: Logger, step: int, ty: str) -> None:
self._logger = logger
self._step = step
self._ty = ty
def __enter__(self) -> "LogAndDumpCtx":
return self
def __call__(self, key: str, value: float) -> None:
self._logger.log(f'{self._ty}/{key}', value, self._step)
def __exit__(self, *args: tp.Any) -> None:
self._logger.dump(self._step, self._ty)
|
controllable_agent-main
|
url_benchmark/logger.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from url_benchmark import replay_buffer as rb
def test_batch() -> None:
shapes = dict(obs=(4, 12), action=(5, 11), next_obs=(6, 10))
meta = dict(a=np.random.rand(16), b=np.random.rand(17))
batch = rb.EpisodeBatch(
reward=np.array([1.0]),
discount=np.array([0.5]),
meta=meta,
**{x: np.random.rand(*y) for x, y in shapes.items()}
)
batches = rb.EpisodeBatch.collate_fn([batch, batch])
assert batches.obs.shape == (2, 4, 12)
assert isinstance(batches.meta, dict)
assert len(batches.meta) == 2
assert batches.meta["a"].shape == (2, 16)
# check that moving to Tensor does not change anything
cpu = batch.to("cpu")
assert cpu.reward.shape == (1,)
batches = rb.EpisodeBatch.collate_fn([cpu, cpu])
assert batches.reward.shape == (2, 1)
no_reward = batches.with_no_reward()
assert not no_reward.reward.abs().sum(), "reward should be masked"
assert batches.reward.abs().sum(), "reward should not be masked"
assert no_reward.obs is batches.obs, "Observations have been copied, which is time consuming"
|
controllable_agent-main
|
url_benchmark/test_replay_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
import re
import time
import typing as tp
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch import distributions as pyd
from torch.distributions.utils import _standard_normal
try:
from typing import Protocol
except ImportError:
# backward compatible
from typing_extensions import Protocol # type: ignore
class Trainable(Protocol): # cannot from url_benchmark import agent
@property
def training(self) -> bool:
...
def train(self, train: bool) -> None:
...
class eval_mode:
def __init__(self, *models: Trainable) -> None:
self.models = models
self.prev_states: tp.List[bool] = []
def __enter__(self) -> None:
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args: tp.Any) -> None:
for model, state in zip(self.models, self.prev_states):
model.train(state)
def set_seed_everywhere(seed: int) -> None:
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
X = tp.TypeVar("X")
def chain(*iterables: tp.Iterable[X]) -> tp.Iterator[X]: # TODO remove
for it in iterables:
yield from it
def soft_update_params(net, target_net, tau) -> None:
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def hard_update_params(net, target_net) -> None:
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(param.data)
def to_torch(xs, device) -> tuple:
return tuple(torch.as_tensor(x, device=device) for x in xs)
def weight_init(m) -> None:
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if m.bias is not None:
# if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data, gain)
if m.bias is not None:
# if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
def grad_norm(params, norm_type: float = 2.0):
params = [p for p in params if p.grad is not None]
total_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), norm_type) for p in params]),
norm_type)
return total_norm.item()
def param_norm(params, norm_type: float = 2.0):
total_norm = torch.norm(
torch.stack([torch.norm(p.detach(), norm_type) for p in params]),
norm_type)
return total_norm.item()
def _repr(obj: tp.Any) -> str:
items = {x: y for x, y in obj.__dict__.items() if not x.startswith("_")}
params = ", ".join(f"{x}={y!r}" for x, y in sorted(items.items()))
return f"{obj.__class__.__name__}({params})"
class Until:
def __init__(self, until: tp.Optional[int], action_repeat: int = 1) -> None:
self.until = until
self.action_repeat = action_repeat
def __call__(self, step: int) -> bool:
if self.until is None:
return True
until = self.until // self.action_repeat
return step < until
def __repr__(self) -> str:
return _repr(self)
class Every:
def __init__(self, every: tp.Optional[int], action_repeat: int = 1) -> None:
self.every = every
self.action_repeat = action_repeat
def __call__(self, step: int) -> bool:
if self.every is None:
return False
every = self.every // self.action_repeat
if step % every == 0:
return True
return False
def __repr__(self) -> str:
return _repr(self)
class Timer:
def __init__(self) -> None:
self._start_time = time.time()
self._last_time = time.time()
def reset(self) -> tp.Tuple[float, float]:
elapsed_time = time.time() - self._last_time
self._last_time = time.time()
total_time = time.time() - self._start_time
return elapsed_time, total_time
def total_time(self) -> float:
return time.time() - self._start_time
class TruncatedNormal(pyd.Normal):
def __init__(self, loc, scale, low=-1.0, high=1.0, eps=1e-6) -> None:
super().__init__(loc, scale, validate_args=False)
self.low = low
self.high = high
self.eps = eps
def _clamp(self, x) -> torch.Tensor:
clamped_x = torch.clamp(x, self.low + self.eps, self.high - self.eps)
x = x - x.detach() + clamped_x.detach()
return x
def sample(self, clip=None, sample_shape=torch.Size()) -> torch.Tensor: # type: ignore
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape,
dtype=self.loc.dtype,
device=self.loc.device)
eps *= self.scale
if clip is not None:
eps = torch.clamp(eps, -clip, clip)
x = self.loc + eps
return self._clamp(x)
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1) -> None:
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x) -> torch.Tensor:
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x) -> torch.Tensor:
return x.tanh()
def _inverse(self, y) -> torch.Tensor:
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y) -> torch.Tensor:
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale) -> None:
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
def schedule(schdl, step) -> float:
try:
return float(schdl)
except ValueError:
match = re.match(r'linear\((.+),(.+),(.+)\)', schdl)
if match:
init, final, duration = [float(g) for g in match.groups()]
mix = np.clip(step / duration, 0.0, 1.0)
return (1.0 - mix) * init + mix * final
match = re.match(r'step_linear\((.+),(.+),(.+),(.+),(.+)\)', schdl)
if match:
init, final1, duration1, final2, duration2 = [
float(g) for g in match.groups()
]
if step <= duration1:
mix = np.clip(step / duration1, 0.0, 1.0)
return (1.0 - mix) * init + mix * final1
else:
mix = np.clip((step - duration1) / duration2, 0.0, 1.0)
return (1.0 - mix) * final1 + mix * final2
raise NotImplementedError(schdl)
class RandomShiftsAug(nn.Module):
def __init__(self, pad) -> None:
super().__init__()
self.pad = pad
def forward(self, x) -> torch.Tensor:
x = x.float()
n, _, h, w = x.size()
assert h == w
padding = tuple([self.pad] * 4)
x = F.pad(x, padding, 'replicate')
eps = 1.0 / (h + 2 * self.pad)
arange = torch.linspace(-1.0 + eps,
1.0 - eps,
h + 2 * self.pad,
device=x.device,
dtype=x.dtype)[:h]
arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2)
base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2)
base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1)
shift = torch.randint(0,
2 * self.pad + 1,
size=(n, 1, 1, 2),
device=x.device,
dtype=x.dtype)
shift *= 2.0 / (h + 2 * self.pad)
grid = base_grid + shift
return F.grid_sample(x,
grid,
padding_mode='zeros',
align_corners=False)
class RMS:
"""running mean and std """
def __init__(self, device, epsilon=1e-4, shape=(1,)) -> None:
self.M = torch.zeros(shape).to(device)
self.S = torch.ones(shape).to(device)
self.n = epsilon
def __call__(self, x):
bs = x.size(0)
delta = torch.mean(x, dim=0) - self.M
new_M = self.M + delta * bs / (self.n + bs)
new_S = (self.S * self.n + torch.var(x, dim=0) * bs +
torch.square(delta) * self.n * bs /
(self.n + bs)) / (self.n + bs)
self.M = new_M
self.S = new_S
self.n += bs
return self.M, self.S
class PBE:
"""particle-based entropy based on knn normalized by running mean """
def __init__(self, rms, knn_clip, knn_k, knn_avg, knn_rms, device) -> None:
self.rms = rms
self.knn_rms = knn_rms
self.knn_k = knn_k
self.knn_avg = knn_avg
self.knn_clip = knn_clip
self.device = device
def __call__(self, rep):
source = target = rep
b1, b2 = source.size(0), target.size(0)
# (b1, 1, c) - (1, b2, c) -> (b1, 1, c) - (1, b2, c) -> (b1, b2, c) -> (b1, b2)
sim_matrix = torch.norm(source[:, None, :].view(b1, 1, -1) -
target[None, :, :].view(1, b2, -1),
dim=-1,
p=2)
reward, _ = sim_matrix.topk(self.knn_k,
dim=1,
largest=False,
sorted=True) # (b1, k)
if not self.knn_avg: # only keep k-th nearest neighbor
reward = reward[:, -1]
reward = reward.reshape(-1, 1) # (b1, 1)
reward /= self.rms(reward)[0] if self.knn_rms else 1.0
reward = torch.maximum(
reward - self.knn_clip,
torch.zeros_like(reward).to(self.device)
) if self.knn_clip >= 0.0 else reward # (b1, 1)
else: # average over all k nearest neighbors
reward = reward.reshape(-1, 1) # (b1 * k, 1)
reward /= self.rms(reward)[0] if self.knn_rms else 1.0
reward = torch.maximum(
reward - self.knn_clip,
torch.zeros_like(reward).to(
self.device)) if self.knn_clip >= 0.0 else reward
reward = reward.reshape((b1, self.knn_k)) # (b1, k)
reward = reward.mean(dim=1, keepdim=True) # (b1, 1)
reward = torch.log(reward + 1.0)
return reward
class FloatStats:
def __init__(self) -> None:
self.min = np.inf
self.max = -np.inf
self.mean = 0.0
self.count = 0
def add(self, value: float) -> "FloatStats":
self.min = min(value, self.min)
self.max = max(value, self.max)
self.count += 1
self.mean = (self.count - 1) / self.count * self.mean + 1 / self.count * value
return self
|
controllable_agent-main
|
url_benchmark/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
DOMAINS = [
'walker',
'quadruped',
'jaco',
'point_mass_maze'
'cheetah'
]
CHEETAH_TASKS = [
'cheetah_walk',
'cheetah_walk_backward',
'cheetah_run',
'cheetah_run_backward'
]
WALKER_TASKS = [
'walker_stand',
'walker_walk',
'walker_run',
'walker_flip',
]
QUADRUPED_TASKS = [
'quadruped_walk',
'quadruped_run',
'quadruped_stand',
'quadruped_jump',
]
JACO_TASKS = [
'jaco_reach_top_left',
'jaco_reach_top_right',
'jaco_reach_bottom_left',
'jaco_reach_bottom_right',
]
POINT_MASS_MAZE_TASKS = [
'point_mass_maze_reach_top_left',
'point_mass_maze_reach_top_right',
'point_mass_maze_reach_bottom_left',
'point_mass_maze_reach_bottom_right',
]
TASKS: List[str] = WALKER_TASKS + QUADRUPED_TASKS + JACO_TASKS + POINT_MASS_MAZE_TASKS
PRIMAL_TASKS = {
'walker': 'walker_stand',
'jaco': 'jaco_reach_top_left',
'quadruped': 'quadruped_walk'
}
|
controllable_agent-main
|
url_benchmark/dmc_benchmark.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import logging
import typing as tp
import dataclasses
import collections
from pathlib import Path
import numpy as np
import torch
from dm_env import specs, TimeStep
from tqdm import tqdm
from url_benchmark.replay_buffer import EpisodeBatch
from url_benchmark.dmc import ExtendedGoalTimeStep
Specs = tp.Sequence[specs.Array]
logger = logging.getLogger(__name__)
EpisodeTuple = tp.Tuple[np.ndarray, ...]
Episode = tp.Dict[str, np.ndarray]
T = tp.TypeVar("T", np.ndarray, torch.Tensor)
def episode_len(episode: Episode) -> int:
# subtract -1 because the dummy first transition
return next(iter(episode.values())).shape[0] - 1
def load_episode(fn: Path) -> tp.Dict[str, np.ndarray]:
with fn.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
return episode # type: ignore
def relabel_episode(env: tp.Any, episode: tp.Dict[str, np.ndarray], goal_func: tp.Any) -> tp.Dict[str, np.ndarray]:
goals = []
rewards = []
states = episode['physics']
for i in range(states.shape[0]):
with env.physics.reset_context():
env.physics.set_state(states[i])
reward = env.task.get_reward(env.physics)
reward = np.full((1,), reward, dtype=np.float32)
rewards.append(reward)
if goal_func is not None:
goals.append(goal_func(env))
episode['reward'] = np.array(rewards, dtype=np.float32)
if goals:
episode['goal'] = np.array(goals, dtype=np.float32)
return episode
# class ReplayBufferIterable:
# def __init__(self, replay_buffer: "ReplayBuffer") -> None:
# self._replay_buffer = replay_buffer
#
# def __next__(self) -> EpisodeBatch:
# return self._replay_buffer.sample()
class ReplayBuffer:
def __init__(self,
max_episodes: int, discount: float, future: float, max_episode_length: tp.Optional[int] = None) -> None:
# data_specs: Specs,
# self._data_specs = tuple(data_specs)
# self._meta_specs = tuple(meta_specs)
# self._batch_size = batch_size
self._max_episodes = max_episodes
self._discount = discount
assert 0 <= future <= 1
self._future = future
self._current_episode: tp.Dict[str, tp.List[np.ndarray]] = collections.defaultdict(list)
self._idx = 0
self._full = False
self._num_transitions = 0
self._storage: tp.Dict[str, np.ndarray] = collections.defaultdict()
self._collected_episodes = 0
self._batch_names = set(field.name for field in dataclasses.fields(ExtendedGoalTimeStep))
self._episodes_length = np.zeros(max_episodes, dtype=np.int32)
self._episodes_selection_probability = None
self._is_fixed_episode_length = True
self._max_episode_length = max_episode_length
def __len__(self) -> int:
return self._max_episodes if self._full else self._idx
def __setstate__(self, state):
self.__dict__.update(state)
self._backward_compatibility()
def _backward_compatibility(self):
if self._storage and not hasattr(self, '_episodes_length'):
self._episodes_length = np.array([len(array) - 1 for array in self._storage["discount"]], dtype=np.int32)
self._episodes_length[len(self):] = 0
assert self._episodes_length[:len(self)].min() == self._episodes_length[:len(self)].max()
self._episodes_selection_probability = None
self._is_fixed_episode_length = True
self._max_episode_length = None
def add(self, time_step: TimeStep, meta: tp.Mapping[str, np.ndarray]) -> None:
dtype = np.float32
for key, value in meta.items():
self._current_episode[key].append(value)
for field in dataclasses.fields(time_step):
value = time_step[field.name]
if np.isscalar(value):
value = np.full((1,), value, dtype=dtype)
if isinstance(value, np.ndarray):
self._current_episode[field.name].append(np.array(value, dtype=dtype))
if time_step.last():
if not hasattr(self, "_batch_names"):
self._batch_names = set(field.name for field in dataclasses.fields(ExtendedGoalTimeStep))
for name, value_list in self._current_episode.items():
values = np.array(value_list, dtype)
if name not in self._storage:
# first iteration, the buffer is created with appropriate size
_shape = values.shape
if self._max_episode_length is not None:
_shape = (self._max_episode_length,) + _shape[1:]
self._storage[name] = np.empty((self._max_episodes,) + _shape, dtype=dtype)
self._storage[name][self._idx][:len(values)] = values
self._episodes_length[self._idx] = len(self._current_episode['discount']) - 1 # compensate for the dummy transition at the beginning
if self._episodes_length[self._idx] != self._episodes_length[self._idx - 1] and self._episodes_length[self._idx - 1] != 0:
self._is_fixed_episode_length = False
self._current_episode = collections.defaultdict(list)
self._collected_episodes += 1
self._idx = (self._idx + 1) % self._max_episodes
self._full = self._full or self._idx == 0
self._episodes_selection_probability = None
@property
def avg_episode_length(self) -> int:
return round(self._episodes_length[:len(self)].mean())
def sample(self, batch_size, custom_reward: tp.Optional[tp.Any] = None, with_physics: bool = False) -> EpisodeBatch:
if not hasattr(self, "_batch_names"):
self._batch_names = set(field.name for field in dataclasses.fields(ExtendedGoalTimeStep))
if not isinstance(self._future, float):
assert isinstance(self._future, bool)
self._future = float(self._future)
if self._is_fixed_episode_length:
ep_idx = np.random.randint(0, len(self), size=batch_size)
else:
if self._episodes_selection_probability is None:
self._episodes_selection_probability = self._episodes_length / self._episodes_length.sum()
ep_idx = np.random.choice(np.arange(len(self._episodes_length)), size=batch_size, p=self._episodes_selection_probability)
eps_lengths = self._episodes_length[ep_idx]
# add +1 for the first dummy transition
step_idx = np.random.randint(0, eps_lengths) + 1
assert (step_idx <= eps_lengths).all()
if self._future < 1:
# future_idx = step_idx + np.random.randint(0, self.episode_length - step_idx + 1, size=self._batch_size)
future_idx = step_idx + np.random.geometric(p=(1 - self._future), size=batch_size)
future_idx = np.clip(future_idx, 0, eps_lengths)
assert (future_idx <= eps_lengths).all()
meta = {name: data[ep_idx, step_idx - 1] for name, data in self._storage.items() if name not in self._batch_names}
obs = self._storage['observation'][ep_idx, step_idx - 1]
action = self._storage['action'][ep_idx, step_idx]
next_obs = self._storage['observation'][ep_idx, step_idx]
phy = self._storage['physics'][ep_idx, step_idx]
if custom_reward is not None:
reward = np.array([[custom_reward.from_physics(p)] for p in phy], dtype=np.float32)
else:
reward = self._storage['reward'][ep_idx, step_idx]
discount = self._discount * self._storage['discount'][ep_idx, step_idx]
goal: tp.Optional[np.ndarray] = None
next_goal: tp.Optional[np.ndarray] = None
future_obs: tp.Optional[np.ndarray] = None
future_goal: tp.Optional[np.ndarray] = None
if 'goal' in self._storage.keys():
goal = self._storage['goal'][ep_idx, step_idx - 1]
next_goal = self._storage['goal'][ep_idx, step_idx]
if self._future < 1:
future_goal = self._storage['goal'][ep_idx, future_idx - 1]
# elif self._future:
if self._future < 1:
future_obs = self._storage['observation'][ep_idx, future_idx - 1]
additional = {}
if with_physics:
additional["_physics"] = phy
# TODO remove type ignore when working
return EpisodeBatch(obs=obs, goal=goal, action=action, reward=reward, discount=discount,
next_obs=next_obs, next_goal=next_goal,
future_obs=future_obs, future_goal=future_goal, meta=meta, **additional)
def load(self, env: tp.Any, replay_dir: Path, relabel: bool = True, goal_func: tp.Any = None) -> None:
eps_fns = sorted(replay_dir.glob('*.npz'))
for eps_fn in tqdm(eps_fns):
if self._full:
break
episode = load_episode(eps_fn)
if relabel:
episode = relabel_episode(env, episode, goal_func)
# for field in dataclasses.fields(TimeStep):
for name, values in episode.items():
# values = episode[field.name]
if name not in self._storage:
# first iteration, the buffer is created with appropriate size
self._storage[name] = np.empty((self._max_episodes,) + values.shape, dtype=np.float32)
self._storage[name][self._idx] = np.array(values, dtype=np.float32)
self._idx = (self._idx + 1) % self._max_episodes
self._full = self._full or self._idx == 0
def relabel(self, custom_reward) -> None:
for (ep_idx, phy) in tqdm(enumerate(self._storage["physics"])):
reward = np.array([[custom_reward.from_physics(p)] for p in phy], dtype=np.float32)
self._storage["reward"][ep_idx] = reward
self._max_episodes = len(self._storage["physics"])
self._full = True
# def __iter__(self) -> ReplayBufferIterable:
# ''' Returns the Iterator object '''
# return ReplayBufferIterable(self)
# def __iter__(self) -> tp.Iterator[EpisodeBatch[np.ndarray]]:
# while True:
# yield self.sample()
|
controllable_agent-main
|
url_benchmark/in_memory_replay_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import io
import random
import traceback
import typing as tp
from pathlib import Path
from collections import defaultdict
import dataclasses
import numpy as np
import torch
from torch.utils.data import IterableDataset
from dm_env import specs, TimeStep
EpisodeTuple = tp.Tuple[np.ndarray, ...]
Episode = tp.Dict[str, np.ndarray]
T = tp.TypeVar("T", np.ndarray, torch.Tensor)
B = tp.TypeVar("B", bound="EpisodeBatch")
@dataclasses.dataclass
class EpisodeBatch(tp.Generic[T]):
"""For later use
A container for batchable replayed episodes
"""
obs: T
action: T
reward: T
next_obs: T
discount: T
meta: tp.Dict[str, T] = dataclasses.field(default_factory=dict)
_physics: tp.Optional[T] = None
goal: tp.Optional[T] = None
next_goal: tp.Optional[T] = None
future_obs: tp.Optional[T] = None
future_goal: tp.Optional[T] = None
def __post_init__(self) -> None:
# some security to be removed later
assert isinstance(self.reward, (np.ndarray, torch.Tensor))
assert isinstance(self.discount, (np.ndarray, torch.Tensor))
assert isinstance(self.meta, dict)
def to(self, device: str) -> "EpisodeBatch[torch.Tensor]":
"""Creates a new instance on the appropriate device"""
out: tp.Dict[str, tp.Any] = {}
for field in dataclasses.fields(self):
data = getattr(self, field.name)
if field.name == "meta":
out[field.name] = {x: torch.as_tensor(y, device=device) for x, y in data.items()} # type: ignore
elif isinstance(data, (torch.Tensor, np.ndarray)):
out[field.name] = torch.as_tensor(data, device=device) # type: ignore
elif data is None:
out[field.name] = data
else:
raise RuntimeError(f"Not sure what to do with {field.name}: {data}")
return EpisodeBatch(**out)
@classmethod
def collate_fn(cls, batches: tp.List["EpisodeBatch[T]"]) -> "EpisodeBatch[torch.Tensor]":
"""Creates a new instance from several by stacking in a new first dimension
for all attributes
"""
out: tp.Dict[str, tp.Any] = {}
if isinstance(batches[0].obs, np.ndarray): # move everything to pytorch if first one is numpy
batches = [b.to("cpu") for b in batches] # type: ignore
for field in dataclasses.fields(cls):
data = [getattr(mf, field.name) for mf in batches]
# skip fields with None data
if data[0] is None:
if any(x is not None for x in data):
raise RuntimeError("Found a non-None value mixed with Nones")
out[field.name] = None
continue
# reward and discount can be float which should be converted to
# tensors for stacking
if field.name == "meta":
meta = {k: torch.stack([d[k] for d in data]) for k in data[0]}
out[field.name] = meta
elif isinstance(data[0], torch.Tensor):
out[field.name] = torch.stack(data)
else:
raise RuntimeError(f"Not sure what to do with {field.name}: {data}")
# out[field.name] = [x for y in data for x in y]
return EpisodeBatch(**out)
def unpack(self) -> tp.Tuple[T, T, T, T, T]:
"""Unpacks the structure into the legacy unnamed tuple.
Try to avoid it if possible, this is more likely to be wrong than using names
"""
return (self.obs, self.action, self.reward, self.discount, self.next_obs)
# return (self.obs, self.action, self.reward, self.discount, self.next_obs, *self.meta)
def with_no_reward(self: B) -> B:
reward = self.reward
reward = torch.zeros_like(reward) if isinstance(reward, torch.Tensor) else 0 * reward
return dataclasses.replace(self, reward=reward)
def episode_len(episode: Episode) -> int:
# subtract -1 because the dummy first transition
return next(iter(episode.values())).shape[0] - 1
def save_episode(episode: Episode, fn: Path) -> None:
with io.BytesIO() as bs:
np.savez_compressed(bs, **episode)
bs.seek(0)
with fn.open('wb') as f:
f.write(bs.read())
def load_episode(fn: Path) -> Episode:
with fn.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
return episode
Specs = tp.Sequence[specs.Array]
class ReplayBufferStorage:
def __init__(self, data_specs: Specs, replay_dir: tp.Union[str, Path]) -> None:
self._data_specs = tuple(data_specs)
self._meta_specs: tp.Tuple[tp.Any, ...] = tuple() # deactivated
self._replay_dir = Path(replay_dir)
self._replay_dir.mkdir(exist_ok=True)
# probably bad annotation, let's update when it starts failing
self._current_episode: tp.Dict[str, tp.List[np.ndarray]] = defaultdict(list)
self._preload()
raise Exception("This code is dead due to missing handling of meta data")
def __len__(self) -> int:
return self._num_transitions
def add(self, time_step: TimeStep, meta: tp.Mapping[str, np.ndarray]) -> None:
for key, value in meta.items():
self._current_episode[key].append(value)
for spec in self._data_specs:
value = time_step[spec.name]
if np.isscalar(value):
value = np.full(spec.shape, value, spec.dtype)
assert spec.shape == value.shape and spec.dtype == value.dtype
self._current_episode[spec.name].append(value)
if time_step.last():
episode = {}
for spec in self._data_specs:
values = self._current_episode[spec.name]
episode[spec.name] = np.array(values, spec.dtype)
for spec in self._meta_specs:
values = self._current_episode[spec.name]
episode[spec.name] = np.array(values, spec.dtype)
self._current_episode = defaultdict(list)
self._store_episode(episode)
def _preload(self) -> None:
self._num_episodes = 0
self._num_transitions = 0
for fn in self._replay_dir.glob('*.npz'):
_, _, eps_len = fn.stem.split('_')
self._num_episodes += 1
self._num_transitions += int(eps_len)
def _store_episode(self, episode: Episode) -> None:
eps_idx = self._num_episodes
eps_len = episode_len(episode)
self._num_episodes += 1
self._num_transitions += eps_len
ts = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
eps_fn = f'{ts}_{eps_idx}_{eps_len}.npz'
save_episode(episode, self._replay_dir / eps_fn)
class ReplayBuffer(IterableDataset):
def __init__(self, storage: ReplayBufferStorage, max_size: int, num_workers: int, nstep: int, discount: float,
fetch_every: int, save_snapshot: bool, future: bool) -> None:
super().__init__()
self._storage = storage
self._size = 0
self._max_size = max_size
self._num_workers = max(1, num_workers)
self._episode_fns: tp.List[Path] = []
self._episodes: tp.Dict[Path, Episode] = {}
self._nstep = nstep
self._discount = discount
self._fetch_every = fetch_every
self._samples_since_last_fetch = fetch_every
self._save_snapshot = save_snapshot
self._future = future
def _sample_episode(self) -> Episode:
eps_fn = random.choice(self._episode_fns)
return self._episodes[eps_fn]
def _store_episode(self, eps_fn: Path) -> bool:
try:
episode = load_episode(eps_fn)
except Exception: # pylint: disable=broad-except
return False
eps_len = episode_len(episode)
while eps_len + self._size > self._max_size:
early_eps_fn = self._episode_fns.pop(0)
early_eps = self._episodes.pop(early_eps_fn)
self._size -= episode_len(early_eps)
early_eps_fn.unlink(missing_ok=True) # type: ignore
self._episode_fns.append(eps_fn)
self._episode_fns.sort()
self._episodes[eps_fn] = episode
self._size += eps_len
if not self._save_snapshot:
eps_fn.unlink(missing_ok=True) # type: ignore
return True
def _try_fetch(self) -> None:
if self._samples_since_last_fetch < self._fetch_every:
return
self._samples_since_last_fetch = 0
try:
worker_id = int(torch.utils.data.get_worker_info().id)
except Exception: # pylint: disable=broad-except
worker_id = 0
eps_fns = sorted(self._storage._replay_dir.glob('*.npz'), reverse=True)
fetched_size = 0
for eps_fn in eps_fns:
eps_idx, eps_len = [int(x) for x in eps_fn.stem.split('_')[1:]]
if eps_idx % self._num_workers != worker_id:
continue
if eps_fn in self._episodes:
break
if fetched_size + eps_len > self._max_size:
break
fetched_size += eps_len
if not self._store_episode(eps_fn):
break
def _sample(self) -> EpisodeBatch[np.ndarray]:
try:
self._try_fetch()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
self._samples_since_last_fetch += 1
episode = self._sample_episode()
# add +1 for the first dummy transition
idx = np.random.randint(0, episode_len(episode) - self._nstep + 1) + 1
meta = {spec.name: episode[spec.name][idx - 1] for spec in self._storage._meta_specs}
obs = episode['observation'][idx - 1]
action = episode['action'][idx]
next_obs = episode['observation'][idx + self._nstep - 1]
reward = np.zeros_like(episode['reward'][idx])
discount = np.ones_like(episode['discount'][idx])
for i in range(self._nstep):
step_reward = episode['reward'][idx + i]
reward += discount * step_reward
discount *= episode['discount'][idx + i] * self._discount
goal: tp.Optional[np.ndarray] = None
future_obs: tp.Optional[np.ndarray] = None
future_goal: tp.Optional[np.ndarray] = None
if 'goal' in episode.keys():
goal = episode['goal'][idx - 1]
if self._future:
future_idx = idx + np.random.randint(0, episode_len(episode) - idx + 1)
future_goal = episode['goal'][future_idx - 1]
# return (obs, goal, action, reward, discount, next_obs, *meta) # type: ignore
elif self._future:
future_idx = idx + np.random.randint(0, episode_len(episode) - idx + 1)
future_obs = episode['observation'][future_idx - 1]
# TODO remove type ignore when working
return EpisodeBatch(obs=obs, action=action, reward=reward, discount=discount,
next_obs=next_obs, goal=goal, future_obs=future_obs,
future_goal=future_goal, meta=meta)
def __iter__(self) -> tp.Iterator[EpisodeBatch[np.ndarray]]:
while True:
yield self._sample()
def _worker_init_fn(worker_id: int) -> None:
seed = np.random.get_state()[1][0] + worker_id # type: ignore
np.random.seed(seed)
random.seed(seed)
def make_replay_loader(storage: ReplayBufferStorage, max_size: int, batch_size: int, num_workers: int,
save_snapshot: bool, future: bool, nstep: int, discount: float) -> tp.Iterable[EpisodeBatch[torch.Tensor]]:
max_size_per_worker = max_size // max(1, num_workers)
iterable = ReplayBuffer(storage,
max_size_per_worker,
num_workers,
nstep,
discount,
fetch_every=1000,
save_snapshot=save_snapshot,
future=future)
loader = torch.utils.data.DataLoader(iterable,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
collate_fn=EpisodeBatch.collate_fn,
worker_init_fn=_worker_init_fn)
return loader
|
controllable_agent-main
|
url_benchmark/replay_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import sys
import unittest
import dataclasses
from collections import OrderedDict, deque
import typing as tp
from typing import Any
from dm_env import Environment
from dm_env import StepType, specs
import numpy as np
class UnsupportedPlatform(unittest.SkipTest, RuntimeError):
"""The platform is not supported for running"""
try:
from dm_control import suite # , manipulation
from dm_control.suite.wrappers import action_scale, pixels
from url_benchmark import custom_dmc_tasks as cdmc
except ImportError as e:
raise UnsupportedPlatform(f"Import error (Note: DMC does not run on Mac):\n{e}") from e
S = tp.TypeVar("S", bound="TimeStep")
Env = tp.Union["EnvWrapper", Environment]
@dataclasses.dataclass
class TimeStep:
step_type: StepType
reward: float
discount: float
observation: np.ndarray
physics: np.ndarray = dataclasses.field(default=np.ndarray([]), init=False)
def first(self) -> bool:
return self.step_type == StepType.FIRST # type: ignore
def mid(self) -> bool:
return self.step_type == StepType.MID # type: ignore
def last(self) -> bool:
return self.step_type == StepType.LAST # type: ignore
def __getitem__(self, attr: str) -> tp.Any:
return getattr(self, attr)
def _replace(self: S, **kwargs: tp.Any) -> S:
for name, val in kwargs.items():
setattr(self, name, val)
return self
@dataclasses.dataclass
class GoalTimeStep(TimeStep):
goal: np.ndarray
@dataclasses.dataclass
class ExtendedGoalTimeStep(GoalTimeStep):
action: tp.Any
@dataclasses.dataclass
class ExtendedTimeStep(TimeStep):
action: tp.Any
class EnvWrapper:
def __init__(self, env: Env) -> None:
self._env = env
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
if not isinstance(time_step, TimeStep):
# dm_env time step is a named tuple
time_step = TimeStep(**time_step._asdict())
if self.physics is not None:
return time_step._replace(physics=self.physics.get_state())
else:
return time_step
def reset(self) -> TimeStep:
time_step = self._env.reset()
return self._augment_time_step(time_step)
def step(self, action: np.ndarray) -> TimeStep:
time_step = self._env.step(action)
return self._augment_time_step(time_step, action)
def observation_spec(self) -> tp.Any:
assert isinstance(self, EnvWrapper)
return self._env.observation_spec()
def action_spec(self) -> specs.Array:
return self._env.action_spec()
def render(self, *args: tp.Any, **kwargs: tp.Any) -> np.ndarray:
return self._env.render(*args, **kwargs) # type: ignore
@property
def base_env(self) -> tp.Any:
env = self._env
if isinstance(env, EnvWrapper):
return self.base_env
return env
@property
def physics(self) -> tp.Any:
if hasattr(self._env, "physics"):
return self._env.physics
def __getattr__(self, name):
return getattr(self._env, name)
class FlattenJacoObservationWrapper(EnvWrapper):
def __init__(self, env: Env) -> None:
super().__init__(env)
self._obs_spec = OrderedDict()
wrapped_obs_spec = env.observation_spec().copy()
if 'front_close' in wrapped_obs_spec:
spec = wrapped_obs_spec['front_close']
# drop batch dim
self._obs_spec['pixels'] = specs.BoundedArray(shape=spec.shape[1:],
dtype=spec.dtype,
minimum=spec.minimum,
maximum=spec.maximum,
name='pixels')
wrapped_obs_spec.pop('front_close')
for spec in wrapped_obs_spec.values():
assert spec.dtype == np.float64
assert type(spec) == specs.Array
dim = np.sum(
np.fromiter((int(np.prod(spec.shape)) # type: ignore
for spec in wrapped_obs_spec.values()), np.int32))
self._obs_spec['observations'] = specs.Array(shape=(dim,),
dtype=np.float32,
name='observations')
def observation_spec(self) -> tp.Any:
return self._obs_spec
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
super()._augment_time_step(time_step=time_step, action=action)
obs = OrderedDict()
# TODO: this is badly typed since observation is a dict in this case
if 'front_close' in time_step.observation:
pixels = time_step.observation['front_close']
time_step.observation.pop('front_close') # type: ignore
pixels = np.squeeze(pixels)
obs['pixels'] = pixels
features = []
for feature in time_step.observation.values(): # type: ignore
features.append(feature.ravel())
obs['observations'] = np.concatenate(features, axis=0)
return time_step._replace(observation=obs)
class ActionRepeatWrapper(EnvWrapper):
def __init__(self, env: tp.Any, num_repeats: int) -> None:
super().__init__(env)
self._num_repeats = num_repeats
def step(self, action: np.ndarray) -> TimeStep:
reward = 0.0
discount = 1.0
for _ in range(self._num_repeats):
time_step = self._env.step(action)
reward += (time_step.reward or 0.0) * discount
discount *= time_step.discount
if time_step.last():
break
return time_step._replace(reward=reward, discount=discount)
class FrameStackWrapper(EnvWrapper):
def __init__(self, env: Env, num_frames: int, pixels_key: str = 'pixels') -> None:
super().__init__(env)
self._num_frames = num_frames
self._frames: tp.Deque[np.ndarray] = deque([], maxlen=num_frames)
self._pixels_key = pixels_key
wrapped_obs_spec = env.observation_spec()
assert pixels_key in wrapped_obs_spec
pixels_shape = wrapped_obs_spec[pixels_key].shape
# remove batch dim
if len(pixels_shape) == 4:
pixels_shape = pixels_shape[1:]
self._obs_spec = specs.BoundedArray(shape=np.concatenate(
[[pixels_shape[2] * num_frames], pixels_shape[:2]], axis=0),
dtype=np.uint8,
minimum=0,
maximum=255,
name='observation')
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
super()._augment_time_step(time_step=time_step, action=action)
assert len(self._frames) == self._num_frames
obs = np.concatenate(list(self._frames), axis=0)
return time_step._replace(observation=obs)
def _extract_pixels(self, time_step: TimeStep) -> np.ndarray:
pixels_ = time_step.observation[self._pixels_key]
# remove batch dim
if len(pixels_.shape) == 4:
pixels_ = pixels_[0]
return pixels_.transpose(2, 0, 1).copy()
def reset(self) -> TimeStep:
time_step = self._env.reset()
pixels_ = self._extract_pixels(time_step)
for _ in range(self._num_frames):
self._frames.append(pixels_)
return self._augment_time_step(time_step)
def step(self, action: np.ndarray) -> TimeStep:
time_step = self._env.step(action)
pixels_ = self._extract_pixels(time_step)
self._frames.append(pixels_)
return self._augment_time_step(time_step)
class GoalWrapper(EnvWrapper):
def __init__(self, env: Env, goal_func: tp.Callable[[Env], np.ndarray], append_goal_to_observation: bool = False) -> None:
"""Adds a goal space with a predefined function.
This can also append the observation with the goal to make sure the goal is achievable
"""
super().__init__(env)
self.append_goal_to_observation = append_goal_to_observation
self.goal_func = goal_func
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
goal = self.goal_func(self)
obs = time_step.observation.copy()
if self.append_goal_to_observation:
k = "observations"
obs[k] = np.concatenate([obs[k], goal], axis=0)
# obs[k] = np.concatenate([obs[k], np.random.normal(size=goal.shape)], axis=0)
ts = GoalTimeStep(
step_type=time_step.step_type,
reward=time_step.reward,
discount=time_step.discount,
observation=obs,
goal=goal,
)
return super()._augment_time_step(time_step=ts, action=action)
def observation_spec(self) -> specs.Array:
spec = super().observation_spec().copy()
k = "observations"
if not self.append_goal_to_observation:
return spec
goal = self.goal_func(self)
spec[k] = specs.Array((spec[k].shape[0] + goal.shape[0],), dtype=np.float32, name=k)
return spec
class ActionDTypeWrapper(EnvWrapper):
def __init__(self, env: Env, dtype) -> None:
super().__init__(env)
wrapped_action_spec = env.action_spec()
self._action_spec = specs.BoundedArray(wrapped_action_spec.shape,
dtype,
wrapped_action_spec.minimum,
wrapped_action_spec.maximum,
'action')
def action_spec(self) -> specs.BoundedArray:
return self._action_spec
def step(self, action) -> Any:
action = action.astype(self._env.action_spec().dtype)
return self._env.step(action)
class ObservationDTypeWrapper(EnvWrapper):
def __init__(self, env: Env, dtype) -> None:
super().__init__(env)
self._dtype = dtype
wrapped_obs_spec = env.observation_spec()['observations']
self._obs_spec = specs.Array(wrapped_obs_spec.shape, dtype,
'observation')
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
obs = time_step.observation['observations'].astype(self._dtype)
return time_step._replace(observation=obs)
def observation_spec(self) -> Any:
return self._obs_spec
class ExtendedGoalTimeStepWrapper(EnvWrapper):
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
if action is None:
action_spec = self.action_spec()
action = np.zeros(action_spec.shape, dtype=action_spec.dtype)
assert isinstance(time_step, GoalTimeStep)
ts = ExtendedGoalTimeStep(observation=time_step.observation,
step_type=time_step.step_type,
action=action,
reward=time_step.reward or 0.0,
discount=time_step.discount or 1.0,
goal=time_step.goal)
return super()._augment_time_step(time_step=ts, action=action)
class ExtendedTimeStepWrapper(EnvWrapper):
def _augment_time_step(self, time_step: TimeStep, action: tp.Optional[np.ndarray] = None) -> TimeStep:
if action is None:
action_spec = self.action_spec()
action = np.zeros(action_spec.shape, dtype=action_spec.dtype)
ts = ExtendedTimeStep(observation=time_step.observation,
step_type=time_step.step_type,
action=action,
reward=time_step.reward or 0.0,
discount=time_step.discount or 1.0)
return super()._augment_time_step(time_step=ts, action=action)
def _make_jaco(obs_type, domain, task, frame_stack, action_repeat, seed,
goal_space: tp.Optional[str] = None, append_goal_to_observation: bool = False
) -> FlattenJacoObservationWrapper:
env = cdmc.make_jaco(task, obs_type, seed)
if goal_space is not None:
# inline because circular import
from url_benchmark import goals as _goals # pytlint: disable=import-outside-toplevel
funcs = _goals.goal_spaces.funcs[domain]
if goal_space not in funcs:
raise ValueError(f"No goal space {goal_space} for {domain}, avail: {list(funcs)}")
goal_func = funcs[goal_space]
env = GoalWrapper(env, goal_func, append_goal_to_observation=append_goal_to_observation)
env = ActionDTypeWrapper(env, np.float32)
env = ActionRepeatWrapper(env, action_repeat)
env = FlattenJacoObservationWrapper(env)
return env
def _make_dmc(obs_type, domain, task, frame_stack, action_repeat, seed,
goal_space: tp.Optional[str] = None, append_goal_to_observation: bool = False):
visualize_reward = False
if (domain, task) in suite.ALL_TASKS:
env = suite.load(domain,
task,
task_kwargs=dict(random=seed),
environment_kwargs=dict(flat_observation=True),
visualize_reward=visualize_reward)
else:
env = cdmc.make(domain,
task,
task_kwargs=dict(random=seed),
environment_kwargs=dict(flat_observation=True),
visualize_reward=visualize_reward)
if goal_space is not None:
# inline because circular import
from url_benchmark import goals as _goals # pytlint: disable=import-outside-toplevel
funcs = _goals.goal_spaces.funcs[domain]
if goal_space not in funcs:
raise ValueError(f"No goal space {goal_space} for {domain}, avail: {list(funcs)}")
goal_func = funcs[goal_space]
env = GoalWrapper(env, goal_func, append_goal_to_observation=append_goal_to_observation)
env = ActionDTypeWrapper(env, np.float32)
env = ActionRepeatWrapper(env, action_repeat)
if obs_type == 'pixels':
# zoom in camera for quadruped
camera_id = dict(quadruped=2).get(domain, 0)
render_kwargs = dict(height=84, width=84, camera_id=camera_id)
env = pixels.Wrapper(env,
pixels_only=True,
render_kwargs=render_kwargs)
return env
def make(
name: str, obs_type='states', frame_stack=1, action_repeat=1,
seed=1, goal_space: tp.Optional[str] = None, append_goal_to_observation: bool = False
) -> EnvWrapper:
if append_goal_to_observation and goal_space is None:
raise ValueError("Cannot append goal space since none is defined")
assert obs_type in ['states', 'pixels']
if name.startswith('point_mass_maze'):
domain = 'point_mass_maze'
_, _, _, task = name.split('_', 3)
else:
domain, task = name.split('_', 1)
domain = dict(cup='ball_in_cup').get(domain, domain)
if sys.platform == "darwin":
raise UnsupportedPlatform("Mac platform is not supported")
make_fn = _make_jaco if domain == 'jaco' else _make_dmc
# TODO fix this when it fails (signatures differ)
env = make_fn(obs_type, domain, task, frame_stack, action_repeat, seed,
goal_space=goal_space, append_goal_to_observation=append_goal_to_observation) # type: ignore
if obs_type == 'pixels':
env = FrameStackWrapper(env, frame_stack)
else:
env = ObservationDTypeWrapper(env, np.float32)
env = action_scale.Wrapper(env, minimum=-1.0, maximum=+1.0)
if goal_space is not None:
env = ExtendedGoalTimeStepWrapper(env)
else:
env = ExtendedTimeStepWrapper(env)
return env
def extract_physics(env: Env) -> tp.Dict[str, float]:
"""Extract some physics available in the env"""
output = {}
names = ["torso_height", "torso_upright", "horizontal_velocity", "torso_velocity"]
for name in names:
if not hasattr(env.physics, name):
continue
val: tp.Union[float, np.ndarray] = getattr(env.physics, name)()
if isinstance(val, (int, float)) or not val.ndim:
output[name] = float(val)
else:
for k, v in enumerate(val):
output[f"{name}#{k}"] = float(v)
return output
class FloatStats:
"""Handle for keeping track of the statistics of a float variable"""
def __init__(self) -> None:
self.min = np.inf
self.max = -np.inf
self.mean = 0.0
self._count = 0
def add(self, value: float) -> "FloatStats":
self.min = min(value, self.min)
self.max = max(value, self.max)
self._count += 1
self.mean = (self._count - 1) / self._count * self.mean + 1 / self._count * value
return self
def items(self) -> tp.Iterator[tp.Tuple[str, float]]:
for name, val in self.__dict__.items():
if not name.startswith("_"):
yield name, val
class PhysicsAggregator:
"""Aggregate stats on the physics of an environment"""
def __init__(self) -> None:
self.stats: tp.Dict[str, FloatStats] = {}
def add(self, env: Env) -> "PhysicsAggregator":
phy = extract_physics(env)
for key, val in phy.items():
self.stats.setdefault(key, FloatStats()).add(val)
return self
def dump(self) -> tp.Iterator[tp.Tuple[str, float]]:
"""Exports all statistics and reset the statistics"""
for key, stats in self.stats.items():
for stat, val in stats.items():
yield (f'{key}/{stat}', val)
self.stats.clear()
|
controllable_agent-main
|
url_benchmark/dmc.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
from url_benchmark.dmc import TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import List
import numpy as np
from dm_env import StepType
import pytest
fixed_episode_lengths = [10, 10, 10, 10, 10]
variable_episode_lengths = [2, 3, 5, 6, 7]
@pytest.mark.parametrize('test_data', [(10, fixed_episode_lengths, None, False, 10),
(5, fixed_episode_lengths, None, True, 10),
(10, variable_episode_lengths, 8, False, 5),
(5, variable_episode_lengths, 8, True, 5)])
def test_avg_episode_length_fixed_length_not_full(test_data) -> None:
max_episodes, episode_lengths, max_episode_length, is_full, avg_episode_length = test_data
replay_storage = ReplayBuffer(
max_episodes=max_episodes, discount=1, future=1, max_episode_length=max_episode_length)
meta = {'z': np.ones((3, 3))}
for episode_length in episode_lengths:
for time_step in _create_dummy_episode(episode_length):
replay_storage.add(time_step, meta=meta)
assert replay_storage._full == is_full
assert replay_storage.avg_episode_length == avg_episode_length
@pytest.mark.parametrize('test_data', [(10, 5, 7), (10, 10, 7)])
def test_backward_compatibility(test_data) -> None:
max_episodes, episodes_count, episode_length = test_data
is_full = max_episodes == episodes_count
replay_storage = ReplayBuffer(max_episodes=max_episodes, discount=1, future=1, max_episode_length=episode_length + 1)
meta = {'z': np.ones((3, 3))}
for _ in range(episodes_count):
for time_step in _create_dummy_episode(episode_length):
replay_storage.add(time_step, meta=meta)
# remove attributes recently added
del replay_storage._episodes_length
del replay_storage._episodes_selection_probability
del replay_storage._is_fixed_episode_length
del replay_storage._max_episode_length
loaded_replay_storage = pickle.loads(pickle.dumps(replay_storage))
assert loaded_replay_storage._idx == episodes_count%max_episodes
assert loaded_replay_storage._full == is_full
assert (loaded_replay_storage._episodes_length[:episodes_count]==episode_length).all()
assert (loaded_replay_storage._episodes_length[episodes_count:]==0).all()
assert loaded_replay_storage._max_episode_length is None
def _create_dummy_episode(episode_length: int) -> List[TimeStep]:
time_steps = []
for i in range(episode_length+1):
step_type = StepType.MID
if i == 0:
step_type = StepType.FIRST
elif i == episode_length:
step_type = StepType.LAST
time_step = TimeStep(step_type=step_type, observation=np.zeros(
(3, 3)), reward=1, discount=1)
time_steps.append(time_step)
return time_steps
|
controllable_agent-main
|
url_benchmark/test_in_memory_replay_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import collections
import numpy as np
import pytest
from url_benchmark import goals
def test_basics() -> None:
assert "simplified_walker" in goals.goal_spaces.funcs["walker"]
assert len(goals.goals.funcs["simplified_walker"]["walker_stand"]()) == 3
@pytest.mark.parametrize("domain,space", [(d, s) for d in goals.goal_spaces.funcs for s in goals.goal_spaces.funcs[d]])
def test_goal_space_extraction(domain: str, space: str) -> None:
env = goals._make_env(domain)
out = goals.goal_spaces.funcs[domain][space](env)
assert isinstance(out, np.ndarray)
assert out.dtype == np.float32
for name, func in goals.goals.funcs.get(space, {}).items():
goal = func()
assert goal.shape == out.shape, f"Wrong shape for goal {name}"
assert goal.dtype == np.float32
@pytest.mark.parametrize("case", (range(goals.QuadrupedReward.NUM_CASES)))
def test_quad_rewards(case: int) -> None:
reward = goals.QuadrupedReward()
reward._case = case
out = reward.from_physics(reward._env.physics.get_state())
assert 0 <= out <= 1
def test_quad_pos_rewards() -> None:
reward = goals.QuadrupedPosReward()
env = goals._make_env("quadruped")
env.reset()
out = reward.from_physics(env.physics.get_state())
out2 = reward.from_env(env)
assert 0 <= out <= 1
assert out == out2, "Should be deterministic"
assert reward.get_goal("quad_pos_speed").dtype == np.float32
def test_walker_equation() -> None:
reward = goals.WalkerEquation("1 / (1 + abs(x - 2))")
env = goals._make_env("walker")
env.reset()
out = reward.from_physics(env.physics.get_state())
out2 = reward.from_env(env)
assert 0 <= out <= 1
assert out == out2, "Should be deterministic"
def test_walker_bad_equation() -> None:
with pytest.raises(ValueError):
goals.WalkerEquation("1 / (1 + os(x - 2))")
def test_walker_random_equation() -> None:
env = goals._make_env("walker")
reward = goals.WalkerRandomReward()
out = reward.from_env(env)
assert 0 <= out <= 1
def test_dmc_rewards() -> None:
env = goals._make_env("quadruped")
reward = env.task.get_reward(env.physics)
rewarders = {name: goals.get_reward_function(f"quadruped_{name}") for name in ["walk", "stand"]}
rewards = {name: r.from_env(env) for name, r in rewarders.items()}
assert rewards["stand"] == reward
assert rewards["walk"] != reward
assert rewarders["stand"].from_physics(env.physics.get_state()) == reward
def test_walker_qpos() -> None:
env = goals._make_env("walker")
env.reset()
env.step(np.random.uniform(-1, 1, size=6))
out = goals.goal_spaces.funcs["walker"]["walker_pos_speed"](env)
qpos = env.physics.data.qpos
assert pytest.approx(qpos[1]) == out[-1], qpos
@pytest.mark.parametrize("name,expected", [("walker_pos_speed", 4)])
def test_goal_space_dim(name: str, expected: int) -> None:
out = goals.get_goal_space_dim(name)
assert out == expected
def test_uniquely_named_goal_space() -> None:
space_counts = collections.Counter(space for spaces in goals.goal_spaces.funcs.values() for space in spaces)
duplicated = {x for x, y in space_counts.items() if y > 1}
if duplicated:
raise RuntimeError(f"Duplicated goal space names: {duplicated}\n(goal space names need to be unique)")
@pytest.mark.parametrize(
"string,expected", [
("(x + y) * z", {"x", "y", "z"}),
("import x;os.system(stuff) # hello", {"import", "x", "os", "system", "stuff"}),
])
def test_extract_variables(string: str, expected: tp.Set[str]) -> None:
out = goals.extract_names(string)
assert out == expected
|
controllable_agent-main
|
url_benchmark/test_goals.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from url_benchmark.goals import DmcReward
import torch
name = "walker_flip"
load_replay_buffer = "/checkpoint/jrapin/ca/buffers/walker_rnd_ddpg_220803.pt"
relabeled_replay_file_path = "/private/home/atouati/controllable_agent/datasets/walker/rnd/walker_flip_rnd_ddpg.pt"
custom_reward = DmcReward(name)
print("loading Replay from %s", load_replay_buffer)
with open(load_replay_buffer, 'rb') as f:
replay_loader = torch.load(f)
replay_loader.relabel(custom_reward)
with open(relabeled_replay_file_path, 'wb') as f:
torch.save(replay_loader, f)
|
controllable_agent-main
|
url_benchmark/relabel_buffer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import os
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
os.environ['MUJOCO_GL'] = 'egl'
from pathlib import Path
import dataclasses
import typing as tp
import hydra
from hydra.core.config_store import ConfigStore
import torch
import omegaconf as omgcf
from url_benchmark.pretrain import make_agent
from url_benchmark import dmc
from url_benchmark import utils
from url_benchmark.video import VideoRecorder
from url_benchmark import agent as agents
from url_benchmark import goals as _goals
from typing import Any
torch.backends.cudnn.benchmark = True
# # # Config # # #
@dataclasses.dataclass
class PlayConfig:
agent: tp.Any
# mode
reward_free: bool = True
# task settings
task: str = "walker_stand"
obs_type: str = "states" # [states, pixels]
frame_stack: int = 3 # only works if obs_type=pixels
action_repeat: int = 1 # set to 2 for pixels
discount: float = 0.99
goal_space: str = "simplified"
# train settings
num_train_frames: int = 100010
num_seed_frames: int = 0
# eval
eval_every_frames: int = 10000
num_eval_episodes: int = 10
# snapshot
snapshot_ts: int = 2000000
snapshot_base_dir: str = omgcf.SI("./pretrained_models")
# replay buffer
replay_buffer_size: int = 1000000
replay_buffer_num_workers: int = 4
batch_size: int = omgcf.II("agent.batch_size")
nstep: int = omgcf.II("agent.nstep")
update_encoder: bool = False # should always be true for pre-training
# misc
seed: int = 1
device: str = "cuda"
save_video: bool = True
save_train_video: bool = False
use_tb: bool = False
use_wandb: bool = False
use_hiplog: bool = False
# experiment
experiment: str = "exp"
# loaded as base_finetune in finetune.yaml
# we keep the yaml since it's easier to configure plugins from it
ConfigStore.instance().store(name="workspace_config", node=PlayConfig)
# # # Implem # # #
class Workspace:
def __init__(self, cfg: PlayConfig) -> None:
self.work_dir = Path.cwd()
print(f'workspace: {self.work_dir}')
self.cfg = cfg
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
# create envs
self.env = dmc.make(cfg.task, cfg.obs_type, cfg.frame_stack,
cfg.action_repeat, cfg.seed, cfg.goal_space)
# create agent
self.agent = make_agent(cfg.obs_type,
self.env.observation_spec(),
self.env.action_spec(),
cfg.num_seed_frames // cfg.action_repeat,
cfg.agent)
# initialize from pretrained
if cfg.snapshot_ts > 0:
pretrained_agent = self.load_snapshot()['agent']
self.agent.init_from(pretrained_agent)
# create video recorders
self.video_recorder = VideoRecorder(
self.work_dir if cfg.save_video else None)
def play(self) -> None:
episode, total_reward = 0, 0.0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
while eval_until_episode(episode):
total_reward = 0
if isinstance(self.agent, agents.FBDDPGAgent):
g = _goals.goals.funcs[self.cfg.goal_space][self.cfg.task]()
meta = self.agent.get_goal_meta(g)
else:
meta = self.agent.init_meta()
time_step = self.env.reset()
self.video_recorder.init(self.env)
step = 0
eval_until_step = utils.Until(1000)
while eval_until_step(step):
# print(f'episode {episode}, step {step}')
# while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
meta,
1,
eval_mode=True)
time_step = self.env.step(action)
self.video_recorder.record(self.env)
total_reward += time_step.reward
# print(time_step.goal[2])
step += 1
episode += 1
print(total_reward)
self.video_recorder.save(f'{episode}.mp4')
def load_snapshot(self) -> Any:
snapshot_base_dir = Path(self.cfg.snapshot_base_dir)
# domain, _ = self.cfg.task.split('_', 1)
# snapshot_dir = snapshot_base_dir / self.cfg.obs_type / domain / self.cfg.agent.name
snapshot_dir = snapshot_base_dir
def try_load():
# snapshot = snapshot_dir / str(
# seed) / f'snapshot_{self.cfg.snapshot_ts}.pt'
snapshot = snapshot_dir / f'snapshot_{self.cfg.snapshot_ts}.pt'
# if not snapshot.exists():
# return None
with snapshot.open('rb') as f:
payload = torch.load(f)
return payload
# try to load current seed
payload = try_load()
return payload
@hydra.main(config_path='.', config_name='base_config')
def main(cfg) -> None:
workspace = Workspace(cfg)
workspace.play()
if __name__ == '__main__':
main()
|
controllable_agent-main
|
url_benchmark/play_behaviors.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from pathlib import Path
import cv2
import imageio
import numpy as np
import wandb
class VideoRecorder:
def __init__(self,
root_dir: tp.Optional[tp.Union[str, Path]],
render_size: int = 256,
fps: int = 20,
camera_id: int = 0,
use_wandb: bool = False) -> None:
self.save_dir: tp.Optional[Path] = None
if root_dir is not None:
self.save_dir = Path(root_dir) / 'eval_video'
self.save_dir.mkdir(exist_ok=True)
self.enabled = False
self.render_size = render_size
self.fps = fps
self.frames: tp.List[np.ndarray] = []
self.camera_id = camera_id
self.use_wandb = use_wandb
def init(self, env, enabled: bool = True) -> None:
self.frames = []
self.enabled = self.save_dir is not None and enabled
self.record(env)
def record(self, env) -> None:
if self.enabled:
if hasattr(env, 'physics'):
if env.physics is not None:
frame = env.physics.render(height=self.render_size,
width=self.render_size,
camera_id=self.camera_id)
else:
frame = env.base_env.render()
else:
frame = env.render()
self.frames.append(frame)
def log_to_wandb(self) -> None:
frames = np.transpose(np.array(self.frames), (0, 3, 1, 2))
fps, skip = 6, 8
wandb.log({
'eval/video':
wandb.Video(frames[::skip, :, ::2, ::2], fps=fps, format="gif")
})
def save(self, file_name: str) -> None:
if self.enabled:
if self.use_wandb:
self.log_to_wandb()
assert self.save_dir is not None
path = self.save_dir / file_name
imageio.mimsave(str(path), self.frames, fps=self.fps) # type: ignore
class TrainVideoRecorder:
def __init__(self,
root_dir: tp.Optional[tp.Union[str, Path]],
render_size: int = 256,
fps: int = 20,
camera_id: int = 0,
use_wandb: bool = False) -> None:
self.save_dir: tp.Optional[Path] = None
if root_dir is not None:
self.save_dir = Path(root_dir) / 'train_video'
self.save_dir.mkdir(exist_ok=True)
self.enabled = False
self.render_size = render_size
self.fps = fps
self.frames: tp.List[np.ndarray] = []
self.camera_id = camera_id
self.use_wandb = use_wandb
def init(self, obs, enabled=True) -> None:
self.frames = []
self.enabled = self.save_dir is not None and enabled
self.record(obs)
def record(self, obs) -> None:
if self.enabled:
frame = cv2.resize(obs[-3:].transpose(1, 2, 0),
dsize=(self.render_size, self.render_size),
interpolation=cv2.INTER_CUBIC)
self.frames.append(frame)
def log_to_wandb(self) -> None:
frames = np.transpose(np.array(self.frames), (0, 3, 1, 2))
fps, skip = 6, 8
wandb.log({
'train/video':
wandb.Video(frames[::skip, :, ::2, ::2], fps=fps, format="gif")
})
def save(self, file_name) -> None:
if self.enabled:
if self.use_wandb:
self.log_to_wandb()
assert self.save_dir is not None
path = self.save_dir / file_name
imageio.mimsave(str(path), self.frames, fps=self.fps) # type: ignore
|
controllable_agent-main
|
url_benchmark/video.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import pytest
from url_benchmark import dmc
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
@pytest.mark.parametrize("name,expected", [
("walker_walk", {'torso_height': 1.3, 'torso_upright': 1.0, 'horizontal_velocity': 0.0}),
("quadruped_walk", {'torso_upright': 1.0, 'torso_velocity#0': 0.0, 'torso_velocity#1': 0.0, 'torso_velocity#2': 0.0}),
])
def test_extract_physics(name: str, expected: tp.Dict[str, float]) -> None:
env = dmc.make(name, obs_type="states", frame_stack=1, action_repeat=1, seed=12)
phy = dmc.extract_physics(env)
assert phy == expected
time_step = env.reset()
assert time_step.physics.size > 0
# check that it works in the ReplayBuffer
rb = ReplayBuffer(12, 0.9, True)
rb.add(time_step, {})
assert "physics" in rb._current_episode
def test_goal_wrapper() -> None:
env = dmc.make("quadruped_walk", obs_type="states", frame_stack=1, action_repeat=1,
seed=12, goal_space="simplified_quadruped", append_goal_to_observation=True)
out = env.reset()
assert out.observation.shape == env.observation_spec().shape
env = dmc.make("quadruped_walk", obs_type="states", frame_stack=1, action_repeat=1,
seed=12, goal_space="simplified_quadruped", append_goal_to_observation=False)
out2 = env.reset()
assert out2.observation.shape[0] < out.observation.shape[0]
def test_physics_aggregator() -> None:
env = dmc.make("walker_walk", obs_type="states", frame_stack=1, action_repeat=1, seed=12)
agg = dmc.PhysicsAggregator()
agg.add(env)
names = [x[0] for x in agg.dump()]
assert len(names) == 9
assert not list(agg.dump())
def test_float_stats() -> None:
stats = dmc.FloatStats().add(12)
assert all(getattr(stats, name) == 12 for name in ["mean", "max", "min"])
stats.add(24)
assert stats.min == 12
assert stats.max == 24
assert stats.mean == 18
assert stats._count == 2
stats.add(24)
assert stats.mean == 20
|
controllable_agent-main
|
url_benchmark/test_dmc.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import enum
import dm_env
from dm_env import specs
import numpy as np
import matplotlib.pyplot as plt
from url_benchmark.dmc import ExtendedTimeStep
class ObservationType(enum.IntEnum):
STATE_INDEX = enum.auto()
AGENT_ONEHOT = enum.auto()
GRID = enum.auto()
AGENT_GOAL_POS = enum.auto()
AGENT_POS = enum.auto()
def build_gridworld_task(task,
discount=1.0,
penalty_for_walls=0,
observation_type=ObservationType.AGENT_POS,
max_episode_length=200):
"""Construct a particular Gridworld layout with start/goal states.
Args:
task: string name of the task to use. One of {'simple', 'obstacle',
'random_goal'}.
discount: Discounting factor included in all Timesteps.
penalty_for_walls: Reward added when hitting a wall (should be negative).
observation_type: Enum observation type to use. One of:
* ObservationType.STATE_INDEX: int32 index of agent occupied tile.
* ObservationType.AGENT_ONEHOT: NxN float32 grid, with a 1 where the
agent is and 0 elsewhere.
* ObservationType.GRID: NxNx3 float32 grid of feature channels.
First channel contains walls (1 if wall, 0 otherwise), second the
agent position (1 if agent, 0 otherwise) and third goal position
(1 if goal, 0 otherwise)
* ObservationType.AGENT_GOAL_POS: float32 tuple with
(agent_y, agent_x, goal_y, goal_x).
max_episode_length: If set, will terminate an episode after this many
steps.
"""
tasks_specifications = {
'simple': {
'layout': [
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
],
'start_state': (2, 2),
'randomize_goals': True
# 'goal_state': (7, 2)
},
'obstacle': {
'layout': [
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 0, 0, 0, 0, 0, -1, 0, 0, -1],
[-1, 0, 0, 0, -1, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
],
'start_state': (2, 2),
'goal_state': (2, 8)
},
'random_goal': {
'layout': [
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, -1, -1, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
],
'start_state': (2, 2),
# 'randomize_goals': True
},
}
return GridWorld(
discount=discount,
penalty_for_walls=penalty_for_walls,
observation_type=observation_type,
max_episode_length=max_episode_length,
**tasks_specifications[task])
class GridWorld(dm_env.Environment):
def __init__(self,
layout,
start_state,
goal_state=None,
observation_type=ObservationType.STATE_INDEX,
discount=1.0,
penalty_for_walls=0,
reward_goal=1,
max_episode_length=None,
randomize_goals=False) -> None:
"""Build a grid environment.
Simple gridworld defined by a map layout, a start and a goal state.
Layout should be a NxN grid, containing:
* 0: empty
* -1: wall
* Any other positive value: value indicates reward; episode will terminate
Args:
layout: NxN array of numbers, indicating the layout of the environment.
start_state: Tuple (y, x) of starting location.
goal_state: Optional tuple (y, x) of goal location. Will be randomly
sampled once if None.
observation_type: Enum observation type to use. One of:
* ObservationType.STATE_INDEX: int32 index of agent occupied tile.
* ObservationType.AGENT_ONEHOT: NxN float32 grid, with a 1 where the
agent is and 0 elsewhere.
* ObservationType.GRID: NxNx3 float32 grid of feature channels.
First channel contains walls (1 if wall, 0 otherwise), second the
agent position (1 if agent, 0 otherwise) and third goal position
(1 if goal, 0 otherwise)
* ObservationType.AGENT_GOAL_POS: float32 tuple with
(agent_y, agent_x, goal_y, goal_x)
discount: Discounting factor included in all Timesteps.
penalty_for_walls: Reward added when hitting a wall (should be negative).
reward_goal: Reward added when finding the goal (should be positive).
max_episode_length: If set, will terminate an episode after this many
steps.
randomize_goals: If true, randomize goal at every episode.
"""
if observation_type not in ObservationType:
raise ValueError('observation_type should be a ObservationType instace.')
self._layout = np.array(layout)
self._start_state = start_state
self._state = self._start_state
self._number_of_states = np.prod(np.shape(self._layout))
self._discount = discount
self._penalty_for_walls = penalty_for_walls
self._reward_goal = reward_goal
self._observation_type = observation_type
self._layout_dims = self._layout.shape
self._max_episode_length = max_episode_length
self._num_episode_steps = 0
self._randomize_goals = randomize_goals
self._goal_state: tp.Tuple[int, int]
if goal_state is None:
# Randomly sample goal_state if not provided
goal_state = self._sample_goal()
self.goal_state = goal_state
def _sample_goal(self):
"""Randomly sample reachable non-starting state."""
# Sample a new goal
n = 0
max_tries = 1e5
while n < max_tries:
goal_state = tuple(np.random.randint(d) for d in self._layout_dims)
if goal_state != self._state and self._layout[goal_state] == 0:
# Reachable state found!
return goal_state
n += 1
raise ValueError('Failed to sample a goal state.')
@property
def number_of_states(self):
return self._number_of_states
@property
def goal_state(self):
return self._goal_state
@goal_state.setter
def goal_state(self, new_goal):
if new_goal == self._state or self._layout[new_goal] < 0:
raise ValueError('This is not a valid goal!')
# Zero out any other goal
self._layout[self._layout > 0] = 0
# Setup new goal location
self._layout[new_goal] = self._reward_goal
self._goal_state = new_goal
def set_state(self, x, y):
self._state = (y, x)
def observation_spec(self):
if self._observation_type is ObservationType.AGENT_ONEHOT:
return specs.Array(
shape=(self._number_of_states, ),
dtype=np.float32,
name='observation_agent_onehot')
elif self._observation_type is ObservationType.GRID:
return specs.Array(
shape=self._layout_dims + (3,),
dtype=np.float32,
name='observation_grid')
elif self._observation_type is ObservationType.AGENT_POS:
return specs.Array(
shape=(2,), dtype=np.float32, name='observation_agent_pos')
elif self._observation_type is ObservationType.AGENT_GOAL_POS:
return specs.Array(
shape=(4,), dtype=np.float32, name='observation_agent_goal_pos')
elif self._observation_type is ObservationType.STATE_INDEX:
return specs.DiscreteArray(
self._number_of_states, dtype=int, name='observation_state_index')
def action_spec(self):
return specs.DiscreteArray(5, dtype=int, name='action')
def get_state(self):
return self._state
def get_goal_obs(self):
if self._observation_type is ObservationType.AGENT_ONEHOT:
obs = np.zeros(self._layout.shape, dtype=np.float32)
# Place agent
obs[self._goal_state] = 1
return obs.flatten()
elif self._observation_type is ObservationType.AGENT_POS:
return np.array(self._goal_state, dtype=np.float32) / np.array(self._layout.shape, dtype=np.float32)
elif self._observation_type is ObservationType.STATE_INDEX:
y, x = self._goal_state
return y * self._layout.shape[1] + x
def get_obs(self):
if self._observation_type is ObservationType.AGENT_ONEHOT:
obs = np.zeros(self._layout.shape, dtype=np.float32)
# Place agent
obs[self._state] = 1
return obs.flatten()
elif self._observation_type is ObservationType.GRID:
obs = np.zeros(self._layout.shape + (3,), dtype=np.float32)
obs[..., 0] = self._layout < 0
obs[self._state[0], self._state[1], 1] = 1
obs[self._goal_state[0], self._goal_state[1], 2] = 1
return obs
elif self._observation_type is ObservationType.AGENT_POS:
return np.array(self._state, dtype=np.float32) / np.array(self._layout.shape, dtype=np.float32)
elif self._observation_type is ObservationType.AGENT_GOAL_POS:
return np.array(self._state + self._goal_state, dtype=np.float32)
elif self._observation_type is ObservationType.STATE_INDEX:
y, x = self._state
return y * self._layout.shape[1] + x
def reset(self):
self._state = self._start_state
self._num_episode_steps = 0
if self._randomize_goals:
self.goal_state = self._sample_goal()
return ExtendedTimeStep(
step_type=dm_env.StepType.FIRST,
action=0,
reward=0.0,
discount=1,
observation=self.get_obs())
def step(self, action):
y, x = self._state
if action == 0: # up
new_state = (y - 1, x)
elif action == 1: # right
new_state = (y, x + 1)
elif action == 2: # down
new_state = (y + 1, x)
elif action == 3: # left
new_state = (y, x - 1)
elif action == 4: # stay
new_state = (y, x)
else:
raise ValueError(
'Invalid action: {} is not 0, 1, 2, 3, or 4.'.format(action))
new_y, new_x = new_state
step_type = dm_env.StepType.MID
if self._layout[new_y, new_x] == -1: # wall
reward = self._penalty_for_walls
discount = self._discount
new_state = (y, x)
elif self._layout[new_y, new_x] == 0: # empty cell
reward = 0.
discount = self._discount
else: # a goal
reward = self._layout[new_y, new_x]
## if we choose de terminate
# discount = 0.
# new_state = self._start_state
# step_type = dm_env.StepType.LAST
discount = self._discount
self._state = new_state
self._num_episode_steps += 1
if (self._max_episode_length is not None and
self._num_episode_steps >= self._max_episode_length):
step_type = dm_env.StepType.LAST
return ExtendedTimeStep(
step_type=step_type,
action=action,
reward=np.float32(reward),
discount=discount,
observation=self.get_obs())
def plot_grid(self, add_start=True):
asbestos = (127 / 255, 140 / 255, 141 / 255, 0.8)
dodger_blue = (25 / 255, 140 / 255, 255 / 255, 0.8)
# carrot = (235 / 255, 137 / 255, 33 / 255, 0.8)
grid_kwargs = {'color': (220 / 255, 220 / 255, 220 / 255, 0.5)}
# marker_style = dict(linestyle=':', color=carrot, markersize=20)
plt.figure(figsize=(4, 4))
img = np.ones((self._layout.shape[0], self._layout.shape[1], 4))
wall_y, wall_x = np.where(self._layout <= -1)
for i in range(len(wall_y)):
img[wall_y[i], wall_x[i]] = np.array(asbestos)
plt.imshow(img, interpolation=None)
# plt.imshow(self._layout <= -1, interpolation='nearest')
ax = plt.gca()
ax.grid(0)
plt.xticks([])
plt.yticks([])
# Add start/goal
if add_start:
plt.text(
self._start_state[1],
self._start_state[0],
r'$\mathbf{S}$',
fontsize=16,
ha='center',
va='center')
plt.text(
self._goal_state[1],
self._goal_state[0],
r'$\mathbf{G}$',
fontsize=16,
ha='center',
va='center',
color=dodger_blue)
h, w = self._layout.shape
for y in range(h - 1):
plt.plot([-0.5, w - 0.5], [y + 0.5, y + 0.5], **grid_kwargs)
for x in range(w - 1):
plt.plot([x + 0.5, x + 0.5], [-0.5, h - 0.5], **grid_kwargs)
def render(self, return_rgb=True):
carrot = (235 / 255, 137 / 255, 33 / 255, 0.8)
self.plot_grid(add_start=False)
# Add the agent location
plt.text(
self._state[1],
self._state[0],
u'😃',
fontname='symbola',
fontsize=18,
ha='center',
va='center',
color=carrot)
if return_rgb:
fig = plt.gcf()
plt.axis('tight')
plt.subplots_adjust(0, 0, 1, 1, 0, 0)
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
w, h = fig.canvas.get_width_height()
data = data.reshape((h, w, 3))
plt.close(fig)
return data
def plot_policy(self, policy):
action_names = [
r'$\uparrow$', r'$\rightarrow$', r'$\downarrow$', r'$\leftarrow$'
]
self.plot_grid()
plt.title('Policy Visualization')
h, w = self._layout.shape
for y in range(h):
for x in range(w):
# if ((y, x) != self._start_state) and ((y, x) != self._goal_state):
if (y, x) != self._goal_state:
action_name = action_names[policy[y, x]]
plt.text(x, y, action_name, ha='center', va='center')
def plot_greedy_policy(self, q):
greedy_actions = np.argmax(q, axis=2)
self.plot_policy(greedy_actions)
|
controllable_agent-main
|
url_benchmark/gridworld/env.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
controllable_agent-main
|
url_benchmark/gridworld/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
from pathlib import Path
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from .ddpg import MetaDict
from .ddpg import Encoder
from .fb_modules import Actor, DiagGaussianActor, ForwardMap, BackwardMap, mlp, OnlineCov
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class SFAgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.sf.SFAgent"
name: str = "sf"
# reward_free: ${reward_free}
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
lr_coef: float = 5
sf_target_tau: float = 0.01 # 0.001-0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING # ??? # to be specified later
num_inference_steps: int = 5120
hidden_dim: int = 1024 # 128, 2048
backward_hidden_dim: int = 512 # 128, 2048
feature_dim: int = 512 # 128, 1024
z_dim: int = 100 # 30-200
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" # 0, 0.1, 0.2
stddev_clip: float = 0.3 # 1
update_z_every_step: int = 100
nstep: int = 1
batch_size: int = 1024
init_sf: bool = True
update_encoder: bool = omegaconf.II("update_encoder") # ${update_encoder}
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
# ortho_coef: float = 0.1 # 0.01-10
log_std_bounds: tp.Tuple[float, float] = (-5, 2) # param for DiagGaussianActor
temp: float = 1 # temperature for DiagGaussianActor
boltzmann: bool = False # set to true for DiagGaussianActor
debug: bool = False
preprocess: bool = True
num_sf_updates: int = 1
feature_learner: str = "icm"
mix_ratio: float = 0.0
q_loss: bool = True
update_cov_every_step: int = 1000
add_trunk: bool = False
cs = ConfigStore.instance()
cs.store(group="agent", name="sf", node=SFAgentConfig)
class FeatureLearner(nn.Module):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__()
self.feature_net: nn.Module = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
return None
class Identity(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.feature_net = nn.Identity()
class Laplacian(FeatureLearner):
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del action
del future_obs
phi = self.feature_net(obs)
next_phi = self.feature_net(next_obs)
loss = (phi - next_phi).pow(2).mean()
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
return loss
class ContrastiveFeature(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.apply(utils.weight_init)
# self.W = nn.Linear(z_dim, z_dim, bias=False)
# nn.init.orthogonal_(self.W.weight.data, 1)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del action
del next_obs
assert future_obs is not None
# phi = self.feature_net(obs)
# future_phi = self.feature_net(future_obs)
# phi = F.normalize(phi, dim=1)
# future_phi = F.normalize(future_phi, dim=1)
phi = self.feature_net(obs)
future_mu = self.mu_net(future_obs)
phi = F.normalize(phi, dim=1)
future_mu = F.normalize(future_mu, dim=1)
logits = torch.einsum('sd, td-> st', phi, future_mu) # batch x batch
I = torch.eye(*logits.size(), device=logits.device)
off_diag = ~I.bool()
logits_off_diag = logits[off_diag].reshape(logits.shape[0], logits.shape[0] - 1)
loss = - logits.diag() + torch.logsumexp(logits_off_diag, dim=1)
loss = loss.mean()
return loss
# loss = - logits.diag().mean() + 0.5 * logits[off_diag].pow(2).mean()
# orthonormality loss
# Cov = torch.matmul(phi, phi.T)
# I = torch.eye(*Cov.size(), device=Cov.device)
# off_diag = ~I.bool()
# orth_loss_diag = - 2 * Cov.diag().mean()
# orth_loss_offdiag = Cov[off_diag].pow(2).mean()
# orth_loss = orth_loss_offdiag + orth_loss_diag
# loss += orth_loss
# normalize to compute cosine distance
# phi = F.normalize(phi, dim=1)
# future_phi = F.normalize(future_phi, dim=1)
# logits = torch.einsum('sd, td-> st', phi, future_phi) # batch x batch
# labels = torch.eye(*logits.size(), out=torch.empty_like(logits))
# # - labels * torch.log(torch.sigmoid(logits)) - (1 - labels) * torch.log(1 - torch.sigmoid(logits))
# loss = F.binary_cross_entropy(torch.sigmoid(logits), labels)
class ContrastiveFeaturev2(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.apply(utils.weight_init)
# self.W = nn.Linear(z_dim, z_dim, bias=False)
# nn.init.orthogonal_(self.W.weight.data, 1)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del action
del next_obs
assert future_obs is not None
# phi = self.feature_net(obs)
# future_phi = self.feature_net(future_obs)
# phi = F.normalize(phi, dim=1)
# future_phi = F.normalize(future_phi, dim=1)
future_phi = self.feature_net(future_obs)
mu = self.mu_net(obs)
future_phi = F.normalize(future_phi, dim=1)
mu = F.normalize(mu, dim=1)
logits = torch.einsum('sd, td-> st', mu, future_phi) # batch x batch
I = torch.eye(*logits.size(), device=logits.device)
off_diag = ~I.bool()
logits_off_diag = logits[off_diag].reshape(logits.shape[0], logits.shape[0] - 1)
loss = - logits.diag() + torch.logsumexp(logits_off_diag, dim=1)
loss = loss.mean()
return loss
class ICM(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
# self.forward_dynamic_net = mlp(z_dim + action_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', z_dim)
self.inverse_dynamic_net = mlp(2 * z_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', action_dim, 'tanh')
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(obs)
next_phi = self.feature_net(next_obs)
# predicted_next_obs = self.forward_dynamic_net(torch.cat([phi, action], dim=-1))
# forward_error = (next_phi.detach() - predicted_next_obs).pow(2).mean()
predicted_action = self.inverse_dynamic_net(torch.cat([phi, next_phi], dim=-1))
backward_error = (action - predicted_action).pow(2).mean()
icm_loss = backward_error
# icm_loss = forward_error + backward_error
return icm_loss
class TransitionModel(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.forward_dynamic_net = mlp(z_dim + action_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', obs_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(obs)
predicted_next_obs = self.forward_dynamic_net(torch.cat([phi, action], dim=-1))
forward_error = (predicted_next_obs - next_obs).pow(2).mean()
return forward_error
class TransitionLatentModel(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.forward_dynamic_net = mlp(z_dim + action_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', z_dim)
self.target_feature_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(obs)
with torch.no_grad():
next_phi = self.target_feature_net(next_obs)
predicted_next_obs = self.forward_dynamic_net(torch.cat([phi, action], dim=-1))
forward_error = (predicted_next_obs - next_phi.detach()).pow(2).mean()
utils.soft_update_params(self.feature_net, self.target_feature_net, 0.01)
return forward_error
class AutoEncoder(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.decoder = mlp(z_dim, hidden_dim, 'irelu', hidden_dim, 'irelu', obs_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
del next_obs
del action
phi = self.feature_net(obs)
predicted_obs = self.decoder(phi)
reconstruction_error = (predicted_obs - obs).pow(2).mean()
return reconstruction_error
class SVDSR(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.target_feature_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.target_mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(obs)
mu = self.mu_net(next_obs)
SR = torch.einsum("sd, td -> st", phi, mu)
with torch.no_grad():
target_phi = self.target_feature_net(next_obs)
target_mu = self.target_mu_net(next_obs)
target_SR = torch.einsum("sd, td -> st", target_phi, target_mu)
I = torch.eye(*SR.size(), device=SR.device)
off_diag = ~I.bool()
loss = - 2 * SR.diag().mean() + (SR - 0.99 * target_SR.detach())[off_diag].pow(2).mean()
# orthonormality loss
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
utils.soft_update_params(self.feature_net, self.target_feature_net, 0.01)
utils.soft_update_params(self.mu_net, self.target_mu_net, 0.01)
return loss
class SVDSRv2(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.target_feature_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.target_mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(next_obs)
mu = self.mu_net(obs)
SR = torch.einsum("sd, td -> st", mu, phi)
with torch.no_grad():
target_phi = self.target_feature_net(next_obs)
target_mu = self.target_mu_net(next_obs)
target_SR = torch.einsum("sd, td -> st", target_mu, target_phi)
I = torch.eye(*SR.size(), device=SR.device)
off_diag = ~I.bool()
loss = - 2 * SR.diag().mean() + (SR - 0.98 * target_SR.detach())[off_diag].pow(2).mean()
# orthonormality loss
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
utils.soft_update_params(self.feature_net, self.target_feature_net, 0.01)
utils.soft_update_params(self.mu_net, self.target_mu_net, 0.01)
return loss
class SVDP(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
self.mu_net = mlp(obs_dim + action_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(next_obs)
mu = self.mu_net(torch.cat([obs, action], dim=1))
P = torch.einsum("sd, td -> st", mu, phi)
I = torch.eye(*P.size(), device=P.device)
off_diag = ~I.bool()
loss = - 2 * P.diag().mean() + P[off_diag].pow(2).mean()
# orthonormality loss
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
return loss
class FBFeatures(FeatureLearner):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__(obs_dim, action_dim, z_dim, hidden_dim)
pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_sweep/2022.08.03/"
"161531_fb_ddpg_point_mass_maze_reach_top_right_offline/1/models/snapshot_1000000.pt")
print(f"loading {pt.resolve()}")
with pt.open("rb") as f:
payload = torch.load(f)
self.fb_agent = payload["agent"]
self.feature_net = self.fb_agent.backward_net
self.feature_net.eval()
class SFAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = SFAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
goal_dim = len(g)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
if cfg.feature_learner == "identity":
cfg.z_dim = goal_dim
self.cfg.z_dim = goal_dim
# create the network
if self.cfg.boltzmann:
self.actor: nn.Module = DiagGaussianActor(cfg.obs_type, self.obs_dim, cfg.z_dim, self.action_dim,
cfg.hidden_dim, cfg.log_std_bounds).to(cfg.device)
else:
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.successor_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# build up the target network
self.successor_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
learner = dict(icm=ICM, transition=TransitionModel, latent=TransitionLatentModel,
contrastive=ContrastiveFeature, autoencoder=AutoEncoder, lap=Laplacian,
random=FeatureLearner, FB=FBFeatures, svd_sr=SVDSR, svd_p=SVDP,
contrastivev2=ContrastiveFeaturev2, svd_srv2=SVDSRv2,
identity=Identity)[self.cfg.feature_learner]
self.feature_learner = learner(goal_dim, self.action_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# if cfg.debug:
# self.feature_learner: nn.Module = IdentityMap().to(cfg.device)
# self.feature_net = BackwardMap(cfg.obs_type, goal_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# load the weights into the target networks
self.successor_target_net.load_state_dict(self.successor_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.sf_opt = torch.optim.Adam(self.successor_net.parameters(), lr=cfg.lr)
self.phi_opt: tp.Optional[torch.optim.Adam] = None
if cfg.feature_learner not in ["random", "FB", "identity"]:
self.phi_opt = torch.optim.Adam(self.feature_learner.parameters(), lr=cfg.lr_coef * cfg.lr)
self.train()
self.successor_target_net.train()
self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
# self.online_cov = OnlineCov(mom=0.99, dim=self.cfg.z_dim).to(self.cfg.device)
# self.online_cov.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.successor_net]:
net.train(training)
if self.phi_opt is not None:
self.feature_learner.train()
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder", "actor"]
if self.cfg.init_sf:
names += ["successor_net", "feature_learner", "successor_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def precompute_cov(self, replay_loader: ReplayBuffer) -> None:
print("computing Cov of phi to be used at inference")
obs_list = []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.next_goal if self.cfg.goal_space is not None else batch.next_obs
if obs is None:
raise ValueError("Obs should never be None")
obs_list.append(obs)
batch_size += batch.next_obs.size(0)
obs = torch.cat(obs_list, 0)
self.inv_cov = self._compute_cov(obs)
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# cov = torch.matmul(phi.T, phi) / phi.shape[0]
# self.inv_cov = torch.linalg.pinv(cov)
def _compute_cov(self, goal: torch.Tensor) -> torch.Tensor:
# compute inverse of cov of phi
with torch.no_grad():
phi = self.feature_learner.feature_net(goal)
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.linalg.pinv(cov)
return inv_cov
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
# assert self.cfg.feature_learner in ["FB"]
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.feature_learner.feature_net(desired_goal)
z = torch.matmul(z, self.inv_cov) # 1 x z_dim
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
with torch.no_grad():
phi = self.feature_learner.feature_net(obs)
z = torch.linalg.lstsq(phi, reward).solution # z_dim x 1
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=0) # be careful to the dimension
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def sample_z(self, size):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32)
z = math.sqrt(self.cfg.z_dim) * F.normalize(gaussian_rdv, dim=1)
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
if self.cfg.boltzmann:
dist = self.actor(h, z)
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def update_sf(
self,
obs: torch.Tensor,
goal: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
future_goal: tp.Optional[torch.Tensor],
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
if self.cfg.boltzmann:
dist = self.actor(next_obs, z)
next_action = dist.sample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
next_F1, next_F2 = self.successor_target_net(next_obs, z, next_action) # batch x z_dim
target_phi = self.feature_learner.feature_net(next_goal).detach() # batch x z_dim
next_Q1, next_Q2 = [torch.einsum('sd, sd -> s', next_Fi, z) for next_Fi in [next_F1, next_F2]]
next_F = torch.where((next_Q1 < next_Q2).reshape(-1, 1), next_F1, next_F2)
target_F = target_phi + discount * next_F
F1, F2 = self.successor_net(obs, z, action)
if not self.cfg.q_loss:
# compute SF loss
sf_loss = F.mse_loss(F1, target_F) + F.mse_loss(F2, target_F)
else:
# alternative loss
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
target_Q = torch.einsum('sd, sd -> s', target_F, z)
sf_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
# sf_loss /= self.cfg.z_dim
# compute feature loss
phi_loss = self.feature_learner(obs=goal, action=action, next_obs=next_goal, future_obs=future_goal)
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_F'] = target_F.mean().item()
metrics['F1'] = F1.mean().item()
metrics['phi'] = target_phi.mean().item()
metrics['phi_norm'] = torch.norm(target_phi, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['sf_loss'] = sf_loss.item()
if phi_loss is not None:
metrics['phi_loss'] = phi_loss.item()
if isinstance(self.sf_opt, torch.optim.Adam):
metrics["sf_opt_lr"] = self.sf_opt.param_groups[0]["lr"]
# if self.cfg.goal_space in ["simplified_walker", "simplified_quadruped"]:
# metrics['max_velocity'] = goal_prime[:, -1].max().item()
# optimize SF
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.sf_opt.zero_grad(set_to_none=True)
sf_loss.backward()
self.sf_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
# optimise phi
if self.phi_opt is not None:
self.phi_opt.zero_grad(set_to_none=True)
phi_loss.backward()
self.phi_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if self.cfg.boltzmann:
dist = self.actor(obs, z)
action = dist.rsample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.successor_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
Q = torch.min(Q1, Q2)
actor_loss = (self.cfg.temp * log_prob - Q).mean() if self.cfg.boltzmann else -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
# metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def aug_and_encode(self, obs: torch.Tensor) -> torch.Tensor:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
for _ in range(self.cfg.num_sf_updates):
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = goal = batch.obs
action = batch.action
discount = batch.discount
next_obs = next_goal = batch.next_obs
future_goal = batch.future_obs
if self.cfg.goal_space:
assert batch.goal is not None
assert batch.next_goal is not None
goal = batch.goal
next_goal = batch.next_goal
future_goal = batch.future_goal
z = self.sample_z(self.cfg.batch_size).to(self.cfg.device)
if not z.shape[-1] == self.cfg.z_dim:
raise RuntimeError("There's something wrong with the logic here")
if self.cfg.mix_ratio > 0:
perm = torch.randperm(self.cfg.batch_size)
desired_goal = next_goal[perm]
with torch.no_grad():
phi = self.feature_learner.feature_net(desired_goal)
# compute inverse of cov of phi
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.linalg.pinv(cov)
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
with torch.no_grad():
new_z = phi[mix_idxs]
new_z = torch.matmul(new_z, inv_cov) # batch_size x z_dim
new_z = math.sqrt(self.cfg.z_dim) * F.normalize(new_z, dim=1)
z[mix_idxs] = new_z
metrics.update(self.update_sf(obs=obs, goal=goal, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, future_goal=future_goal,
z=z, step=step))
# update actor
metrics.update(self.update_actor(obs, z, step))
# update critic target
utils.soft_update_params(self.successor_net, self.successor_target_net,
self.cfg.sf_target_tau)
# update inv cov
# if step % self.cfg.update_cov_every_step == 0:
# logger.info("update online cov")
# obs_list = list()
# batch_size = 0
# while batch_size < 10000:
# batch = next(replay_loader)
# batch = batch.to(self.cfg.device)
# obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
# batch_size += batch.next_obs.size(0)
# obs = torch.cat(obs_list, 0)
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# self.inv_cov = torch.inverse(self.online_cov(phi))
return metrics
|
controllable_agent-main
|
url_benchmark/agent/sf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import dataclasses
import typing as tp
import torch
from hydra.core.config_store import ConfigStore
from .sf import SFAgent, SFAgentConfig
@dataclasses.dataclass
class DiscreteSFAgentConfig(SFAgentConfig):
# @package agent
_target_: str = "url_benchmark.agent.discrete_sf.DiscreteSFAgent"
name: str = "discrete_sf"
cs = ConfigStore.instance()
cs.store(group="agent", name="discrete_sf", node=DiscreteSFAgentConfig)
class DiscreteSFAgent(SFAgent):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
num_xy = 5
x = y = torch.linspace(-1, 1, num_xy, dtype=torch.float32, device=self.cfg.device)
XX, YY = torch.meshgrid(x, y)
X = XX.reshape(-1, 1)
Y = YY.reshape(-1, 1)
self.ACTION_GRID = torch.cat([X, Y], dim=1)
def greedy_action(self, obs, z):
OBS = obs.repeat(1, self.ACTION_GRID.shape[0]).reshape(self.ACTION_GRID.shape[0] * obs.shape[0], obs.shape[1])
Z = z.repeat(1, self.ACTION_GRID.shape[0]).reshape(self.ACTION_GRID.shape[0] * z.shape[0], z.shape[1])
ACTION = self.ACTION_GRID.repeat(obs.shape[0], 1)
F1, F2 = self.successor_net(OBS, Z, ACTION)
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, Z) for Fi in [F1, F2]]
Q = torch.min(Q1, Q2)
max_idx = Q.reshape(obs.shape[0], self.ACTION_GRID.shape[0]).max(dim=1)[1]
return self.ACTION_GRID[max_idx]
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
obs = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
action = self.greedy_action(obs, z)
if not eval_mode:
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
return metrics
def update_sf( # type: ignore
self,
obs: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
future_obs: tp.Optional[torch.Tensor],
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
next_action = self.greedy_action(next_obs, z)
target_F1, target_F2 = self.successor_target_net(next_obs, z, next_action) # batch x z_dim
target_phi = self.feature_learner.feature_net(next_goal).detach() # batch x z_dim
target_F = torch.min(target_F1, target_F2)
target_F = target_phi + discount * target_F
# compute SF loss
F1, F2 = self.successor_net(obs, z, action)
# sf_loss = torch.norm(F1 - target_F, dim=-1, p='fro').mean()
# sf_loss += torch.norm(F2 - target_F, dim=-1, p='fro').mean()
sf_loss = (F1 - target_F).pow(2).mean()
sf_loss += (F2 - target_F).pow(2).mean()
# compute feature loss
phi_loss = self.feature_learner(obs=obs, action=action, next_obs=next_obs, future_obs=future_obs)
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_F'] = target_F.mean().item()
metrics['F1'] = F1.mean().item()
metrics['phi'] = target_phi.mean().item()
metrics['phi_norm'] = torch.norm(target_phi, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['sf_loss'] = sf_loss.item()
if phi_loss is not None:
metrics['phi_loss'] = phi_loss.item()
if isinstance(self.sf_opt, torch.optim.Adam):
metrics["sf_opt_lr"] = self.sf_opt.param_groups[0]["lr"]
# if self.cfg.goal_space in ["simplified_walker", "simplified_quadruped"]:
# metrics['max_velocity'] = goal_prime[:, -1].max().item()
# optimize SF
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.sf_opt.zero_grad(set_to_none=True)
sf_loss.backward()
self.sf_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
# optimise phi
if self.phi_opt is not None:
self.phi_opt.zero_grad(set_to_none=True)
phi_loss.backward()
self.phi_opt.step()
return metrics
|
controllable_agent-main
|
url_benchmark/agent/discrete_sf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
import dataclasses
from collections import OrderedDict
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from url_benchmark import utils
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from .ddpg import MetaDict
from .ddpg import Encoder
from .fb_modules import Actor, ForwardMap, mlp
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark import goals as _goals
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class APSAgentConfig:
_target_: str = "url_benchmark.agent.new_aps.NEWAPSAgent"
name: str = "new_aps"
reward_free: bool = omegaconf.II("reward_free")
custom_reward: tp.Optional[str] = omegaconf.II("custom_reward")
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
sf_target_tau: float = 0.01
update_every_steps: float = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING
hidden_dim: int = 1024
feature_dim: int = 512
backward_hidden_dim: int = 512
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" # "0.2"
stddev_clip: str = "0.3" # 1
nstep: int = 1
batch_size: int = 512 # 256 for pixels
init_critic: bool = True
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
preprocess: bool = False
update_encoder: bool = omegaconf.II("update_encoder")
z_dim: int = 10
update_z_every_step: int = 100
knn_rms: bool = True
knn_k: int = 12
knn_avg: bool = True
knn_clip: float = 0.0001
num_init_steps: int = 4096 # set to ${num_train_frames} to disable finetune policy parameters
num_inference_steps: int = 5120
add_trunk: bool = False
lr_coef: float = 1
future_ratio: float = 0
cs = ConfigStore.instance()
cs.store(group="agent", name="new_aps", node=APSAgentConfig)
class FeatureNet(nn.Module):
def __init__(self, obs_dim, z_dim, hidden_dim) -> None:
super().__init__()
self.net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs, norm=True):
phi = self.net(obs)
return F.normalize(phi, dim=1) if norm else phi
class FeatureLearner(nn.Module):
def __init__(self, obs_dim, z_dim, hidden_dim) -> None:
super().__init__()
self.feature_net = FeatureNet(obs_dim, z_dim, hidden_dim)
def forward(self, obs: torch.Tensor, z: torch.Tensor):
"""MLE loss"""
phi = self.feature_net(obs)
loss = -torch.einsum("bd,bd->b", phi, z).mean()
return loss
class NEWAPSAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
) -> None:
cfg = APSAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
goal_dim = len(g)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
# create the network
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.successor_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# build up the target network
self.successor_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.feature_learner = FeatureLearner(goal_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# load the weights into the target networks
self.successor_target_net.load_state_dict(self.successor_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.sf_opt = torch.optim.Adam(self.successor_net.parameters(), lr=cfg.lr)
self.phi_opt = torch.optim.Adam(self.feature_learner.parameters(), lr=cfg.lr_coef * cfg.lr)
self.train()
self.successor_target_net.train()
# particle-based entropy
rms = utils.RMS(self.cfg.device)
self.pbe = utils.PBE(rms, cfg.knn_clip, cfg.knn_k, cfg.knn_avg, cfg.knn_rms,
cfg.device)
self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.successor_net, self.feature_learner]:
net.train(training)
def sample_z(self, size):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32)
z = F.normalize(gaussian_rdv, dim=1)
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def precompute_cov(self, replay_loader: ReplayBuffer) -> None:
print("computing Cov of phi to be used at inference")
obs_list = list()
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.next_goal if self.cfg.goal_space is not None else batch.next_obs
if obs is None:
raise ValueError("Obs should never be None")
obs_list.append(obs)
batch_size += batch.next_obs.size(0)
obs = torch.cat(obs_list, 0)
self.inv_cov = self._compute_cov(obs)
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# cov = torch.matmul(phi.T, phi) / phi.shape[0]
# self.inv_cov = torch.linalg.pinv(cov)
def _compute_cov(self, goal: torch.Tensor) -> torch.Tensor:
# compute inverse of cov of phi
with torch.no_grad():
phi = self.feature_learner.feature_net(goal)
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.inverse(cov)
return inv_cov
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.feature_learner.feature_net(desired_goal)
z = torch.matmul(z, self.inv_cov) # 1 x z_dim
z = F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
with torch.no_grad():
phi = self.feature_learner.feature_net(obs)
z = torch.linalg.lstsq(phi, reward).solution # z_dim x 1
z = F.normalize(z, dim=0) # be careful to the dimension
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def update_phi(self, obs, z, step) -> tp.Dict[str, tp.Any]:
metrics: tp.Dict[str, float] = {}
loss = self.feature_learner(obs, z)
self.phi_opt.zero_grad(set_to_none=True)
loss.backward()
self.phi_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['phi_loss'] = loss.item()
return metrics
def compute_intrinsic_reward(self, next_obs, z, step) -> tp.Tuple[tp.Any, tp.Any]:
# maxent reward
with torch.no_grad():
phi = self.feature_learner.feature_net(next_obs, norm=False)
reward = self.pbe(phi)
entropy_reward = reward.reshape(-1, 1)
# successor feature reward
phi = F.normalize(phi, dim=1)
diayn_reward = torch.einsum("bi,bi->b", phi, z).reshape(-1, 1)
return entropy_reward, diayn_reward
def update_critic(self,
obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
z: torch.Tensor,
step: int) -> tp.Dict[str, tp.Any]:
"""diff is critic takes task as input"""
metrics: tp.Dict[str, float] = {}
# compute target critic
with torch.no_grad():
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
next_F1, next_F2 = self.successor_target_net(next_obs, z, next_action) # batch x z_dim
next_Q1, next_Q2 = [torch.einsum('sd, sd -> s', next_Fi, z) for next_Fi in [next_F1, next_F2]]
next_Q = torch.min(next_Q1, next_Q2)
target_Q = reward + discount * next_Q.reshape(-1, 1)
target_Q = target_Q.squeeze(1)
F1, F2 = self.successor_net(obs, z, action)
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
sf_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
# sf_loss /= self.cfg.z_dim
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_Q'] = target_Q.mean().item()
metrics['Q1'] = Q1.mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['sf_loss'] = sf_loss.item()
# optimize SF
self.sf_opt.zero_grad(set_to_none=True)
sf_loss.backward()
self.sf_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.successor_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.obs
action = batch.action
discount = batch.discount
reward = batch.reward
next_obs = next_goal = batch.next_obs
if self.cfg.goal_space is not None:
assert batch.next_goal is not None
next_goal = batch.next_goal
z = batch.meta["z"]
assert z.shape[-1] == self.cfg.z_dim
if self.cfg.reward_free:
# freeze successor features at finetuning phase
metrics.update(self.update_phi(next_goal, z, step))
with torch.no_grad():
entropy_reward, diayn_reward = self.compute_intrinsic_reward(next_goal, z, step)
intrinsic_reward = entropy_reward + diayn_reward
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['intrinsic_reward'] = intrinsic_reward.mean().item()
metrics['entropy_reward'] = entropy_reward.mean().item()
metrics['diayn_reward'] = diayn_reward.mean().item()
reward = intrinsic_reward
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['extrinsic_reward'] = batch.reward.mean().item()
# hindsight replay
if self.cfg.future_ratio > 0:
future_goal = batch.future_goal if self.cfg.goal_space else batch.future_obs
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)
with torch.no_grad():
phi = self.feature_learner.feature_net(future_goal)
# compute inverse of cov of phi
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.linalg.pinv(cov)
new_z = phi[future_idxs]
new_z = torch.matmul(new_z, inv_cov) # batch_size x z_dim
new_z = F.normalize(new_z, dim=1)
z[future_idxs] = new_z
# update critic
metrics.update(
self.update_critic(obs=obs, action=action, reward=reward, discount=discount,
next_obs=next_obs, z=z, step=step))
# update actor
metrics.update(self.update_actor(obs=obs, z=z, step=step))
# update critic target
utils.soft_update_params(self.successor_net, self.successor_target_net,
self.cfg.sf_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/new_aps.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
from url_benchmark import utils
# from url_benchmark import replay_buffer as rb
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
from .ddpg import MetaDict
from .fb_modules import IdentityMap
from .ddpg import Encoder
from .fb_modules import BackwardMap, mlp
logger = logging.getLogger(__name__)
from .fb_ddpg import FBDDPGAgentConfig
@dataclasses.dataclass
class DiscreteFBAgentConfig(FBDDPGAgentConfig):
# @package agent
_target_: str = "url_benchmark.agent.discrete_fb.DiscreteFBAgent"
name: str = "discrete_fb"
preprocess: bool = False
expl_eps: float = 0.2
boltzmann = True
temp = 100
cs = ConfigStore.instance()
cs.store(group="agent", name="discrete_fb", node=DiscreteFBAgentConfig)
class ForwardMap(nn.Module):
""" forward representation class"""
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.action_dim = action_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_net = mlp(self.obs_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh",
hidden_dim, "irelu",
hidden_dim, "irelu")
feature_dim = hidden_dim
seq = [feature_dim, hidden_dim, "irelu", self.z_dim * self.action_dim]
self.F1 = mlp(*seq)
self.F2 = mlp(*seq)
self.apply(utils.weight_init)
def forward(self, obs, z):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs = self.obs_action_net(obs)
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
h = torch.cat([obs, obs_z], dim=-1)
else:
h = torch.cat([obs, z], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
F1 = self.F1(h)
F2 = self.F2(h)
return F1.reshape(-1, self.z_dim, self.action_dim), F2.reshape(-1, self.z_dim, self.action_dim)
class DiscreteFBAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = DiscreteFBAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
goal_dim = _goals.get_goal_space_dim(cfg.goal_space)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
self.forward_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
if cfg.debug:
self.backward_net: nn.Module = IdentityMap().to(cfg.device)
self.backward_target_net: nn.Module = IdentityMap().to(cfg.device)
else:
self.backward_net = BackwardMap(goal_dim, cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(
cfg.device)
self.backward_target_net = BackwardMap(goal_dim,
cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
# build up the target network
self.forward_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# load the weights into the target networks
self.forward_target_net.load_state_dict(self.forward_net.state_dict())
self.backward_target_net.load_state_dict(self.backward_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.fb_opt = torch.optim.Adam([{'params': self.forward_net.parameters()}, # type: ignore
{'params': self.backward_net.parameters(), 'lr': cfg.lr_coef * cfg.lr}],
lr=cfg.lr)
self.train()
self.forward_target_net.train()
self.backward_target_net.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.forward_net, self.backward_net]:
net.train(training)
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder"]
if self.cfg.init_fb:
names += ["forward_net", "backward_net", "backward_target_net", "forward_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.backward_net(desired_goal)
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
# filter out small reward
# pdb.set_trace()
# idx = torch.where(reward >= torch.quantile(reward, 0.99))[0]
# obs = obs[idx]
# reward = reward[idx]
with torch.no_grad():
B = self.backward_net(obs)
z = torch.matmul(reward.T, B) / reward.shape[0]
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def sample_z(self, size, device: str = "cpu"):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32, device=device)
gaussian_rdv = F.normalize(gaussian_rdv, dim=1)
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * gaussian_rdv
else:
uniform_rdv = torch.rand((size, self.cfg.z_dim), dtype=torch.float32, device=device)
z = np.sqrt(self.cfg.z_dim) * uniform_rdv * gaussian_rdv
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0 and np.random.rand() < self.cfg.update_z_proba:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device, dtype=torch.float32).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
F1, F2 = self.forward_net(h, z)
Q1, Q2 = [torch.einsum('sda, sd -> sa', Fi, z) for Fi in [F1, F2]]
Q = torch.min(Q1, Q2)
action = Q.max(1)[1]
if not eval_mode:
if step < self.cfg.num_expl_steps:
action = torch.randint_like(action, self.action_dim)
else:
action = torch.randint_like(action, self.action_dim) \
if np.random.rand() < self.cfg.expl_eps else action
return action.item()
def update_fb(
self,
obs: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
# compute greedy action
target_F1, target_F2 = self.forward_target_net(next_obs, z)
next_Q1, next_Q2 = [torch.einsum('sda, sd -> sa', Fi, z) for Fi in [target_F1, target_F2]]
next_Q = torch.min(next_Q1, next_Q2)
if self.cfg.boltzmann:
pi = F.softmax(next_Q / self.cfg.temp, dim=-1)
target_F1, target_F2 = [torch.einsum("sa, sda -> sd", pi, Fi) for Fi in [target_F1, target_F2]] # batch x z_dim
next_Q = torch.einsum("sa, sa -> s", pi, next_Q)
else:
next_action = next_Q.max(1)[1]
next_idx = next_action[:, None].repeat(1, self.cfg.z_dim)[:, :, None]
target_F1, target_F2 = [Fi.gather(-1, next_idx).squeeze() for Fi in [target_F1, target_F2]] # batch x z_dim
next_Q = next_Q.max(1)[0]
target_B = self.backward_target_net(next_goal) # batch x z_dim
target_M1, target_M2 = [torch.einsum('sd, td -> st', Fi, target_B) \
for Fi in [target_F1, target_F2]] # batch x batch
target_M = torch.min(target_M1, target_M2)
# compute FB loss
idxs = action.repeat(1, self.cfg.z_dim)[:, :, None]
F1, F2 = [Fi.gather(-1, idxs).squeeze() for Fi in self.forward_net(obs, z)]
B = self.backward_net(next_goal)
M1 = torch.einsum('sd, td -> st', F1, B) # batch x batch
M2 = torch.einsum('sd, td -> st', F2, B) # batch x batch
I = torch.eye(*M1.size(), device=M1.device)
off_diag = ~I.bool()
fb_offdiag: tp.Any = 0.5 * sum((M - discount * target_M)[off_diag].pow(2).mean() for M in [M1, M2])
fb_diag: tp.Any = -sum(M.diag().mean() for M in [M1, M2])
fb_loss = fb_offdiag + fb_diag
# Q LOSS
if self.cfg.q_loss:
with torch.no_grad():
# next_Q1, nextQ2 = [torch.einsum('sd, sd -> s', target_Fi, z) for target_Fi in [target_F1, target_F2]]
# next_Q = torch.min(next_Q1, nextQ2)
cov = torch.matmul(B.T, B) / B.shape[0]
inv_cov = torch.linalg.pinv(cov)
implicit_reward = (torch.matmul(B, inv_cov) * z).sum(dim=1) # batch_size
target_Q = implicit_reward.detach() + discount.squeeze(1) * next_Q # batch_size
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
q_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
fb_loss += self.cfg.q_loss_coef * q_loss
# ORTHONORMALITY LOSS FOR BACKWARD EMBEDDING
Cov = torch.matmul(B, B.T)
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
fb_loss += self.cfg.ortho_coef * orth_loss
# Cov = torch.cov(B.T) # Vicreg loss
# var_loss = F.relu(1 - Cov.diag().clamp(1e-4, 1).sqrt()).mean() # eps avoids inf. sqrt gradient at 0
# cov_loss = 2 * torch.triu(Cov, diagonal=1).pow(2).mean() # 2x upper triangular part
# orth_loss = var_loss + cov_loss
# fb_loss += self.cfg.ortho_coef * orth_loss
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_M'] = target_M.mean().item()
metrics['M1'] = M1.mean().item()
metrics['F1'] = F1.mean().item()
metrics['B'] = B.mean().item()
metrics['B_norm'] = torch.norm(B, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['fb_loss'] = fb_loss.item()
metrics['fb_diag'] = fb_diag.item()
metrics['fb_offdiag'] = fb_offdiag.item()
if self.cfg.q_loss:
metrics['q_loss'] = q_loss.item()
metrics['orth_loss'] = orth_loss.item()
metrics['orth_loss_diag'] = orth_loss_diag.item()
metrics['orth_loss_offdiag'] = orth_loss_offdiag.item()
if self.cfg.q_loss:
metrics['q_loss'] = q_loss.item()
eye_diff = torch.matmul(B.T, B) / B.shape[0] - torch.eye(B.shape[1], device=B.device)
metrics['orth_linf'] = torch.max(torch.abs(eye_diff)).item()
metrics['orth_l2'] = eye_diff.norm().item() / math.sqrt(B.shape[1])
if isinstance(self.fb_opt, torch.optim.Adam):
metrics["fb_opt_lr"] = self.fb_opt.param_groups[0]["lr"]
# optimize FB
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.fb_opt.zero_grad(set_to_none=True)
fb_loss.backward()
self.fb_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
return metrics
def aug_and_encode(self, obs: torch.Tensor) -> torch.Tensor:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
# pdb.set_trace()
obs = batch.obs
action = batch.action.type(torch.int64)
discount = batch.discount
next_obs = next_goal = batch.next_obs
if self.cfg.goal_space is not None:
assert batch.next_goal is not None
next_goal = batch.next_goal
# if len(batch.meta) == 1 and batch.meta[0].shape[-1] == self.cfg.z_dim:
# z = batch.meta[0]
# invalid = torch.linalg.norm(z, dim=1) < 1e-15
# if sum(invalid):
# z[invalid, :] = self.sample_z(sum(invalid)).to(self.cfg.device)
# else:
z = self.sample_z(self.cfg.batch_size, device=self.cfg.device)
if not z.shape[-1] == self.cfg.z_dim:
raise RuntimeError("There's something wrong with the logic here")
# obs = self.aug_and_encode(batch.obs)
# next_obs = self.aug_and_encode(batch.next_obs)
# if not self.cfg.update_encoder:
# obs = obs.detach()
# next_obs = next_obs.detach()
backward_input = batch.obs
future_goal = batch.future_obs
if self.cfg.goal_space is not None:
assert batch.goal is not None
backward_input = batch.goal
future_goal = batch.future_goal
perm = torch.randperm(self.cfg.batch_size)
backward_input = backward_input[perm]
if self.cfg.mix_ratio > 0:
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
if not self.cfg.rand_weight:
with torch.no_grad():
mix_z = self.backward_net(backward_input[mix_idxs]).detach()
else:
# generate random weight
weight = torch.rand(size=(mix_idxs.shape[0], self.cfg.batch_size)).to(self.cfg.device)
weight = F.normalize(weight, dim=1)
uniform_rdv = torch.rand(mix_idxs.shape[0], 1).to(self.cfg.device)
weight = uniform_rdv * weight
with torch.no_grad():
mix_z = torch.matmul(weight, self.backward_net(backward_input).detach())
if self.cfg.norm_z:
mix_z = math.sqrt(self.cfg.z_dim) * F.normalize(mix_z, dim=1)
z[mix_idxs] = mix_z
# hindsight replay
if self.cfg.future_ratio > 0:
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)
z[future_idxs] = self.backward_net(future_goal[future_idxs]).detach()
metrics.update(self.update_fb(obs=obs, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, z=z, step=step))
# update critic target
utils.soft_update_params(self.forward_net, self.forward_target_net,
self.cfg.fb_target_tau)
utils.soft_update_params(self.backward_net, self.backward_target_net,
self.cfg.fb_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/discrete_fb.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import typing as tp
import torch
from torch import nn
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from .ddpg import DDPGAgent
from .ddpg import DDPGAgentConfig as _BaseConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, Tuple
@dataclasses.dataclass
class ICMAPTAgentConfig(_BaseConfig):
_target_: str = "url_benchmark.agent.icm_apt.ICMAPTAgent"
name: str = "icm_apt"
update_encoder: bool = omegaconf.II("update_encoder")
icm_rep_dim: int = 512
icm_scale: float = 1.0
knn_rms: bool = False
knn_k: int = 12
knn_avg: bool = True
knn_clip: float = 0.0
cs = ConfigStore.instance()
cs.store(group="agent", name="icm_apt", node=ICMAPTAgentConfig)
class ICM(nn.Module):
"""
Same as ICM, with a trunk to save memory for KNN
"""
def __init__(self, obs_dim, action_dim, hidden_dim, icm_rep_dim) -> None:
super().__init__()
self.trunk = nn.Sequential(nn.Linear(obs_dim, icm_rep_dim),
nn.LayerNorm(icm_rep_dim), nn.Tanh())
self.forward_net = nn.Sequential(
nn.Linear(icm_rep_dim + action_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, icm_rep_dim))
self.backward_net = nn.Sequential(
nn.Linear(2 * icm_rep_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, action_dim), nn.Tanh())
self.apply(utils.weight_init)
def forward(self, obs, action, next_obs) -> Tuple[Any, Any]:
assert obs.shape[0] == next_obs.shape[0]
assert obs.shape[0] == action.shape[0]
obs = self.trunk(obs)
next_obs = self.trunk(next_obs)
next_obs_hat = self.forward_net(torch.cat([obs, action], dim=-1))
action_hat = self.backward_net(torch.cat([obs, next_obs], dim=-1))
forward_error = torch.norm(next_obs - next_obs_hat,
dim=-1,
p=2,
keepdim=True)
backward_error = torch.norm(action - action_hat,
dim=-1,
p=2,
keepdim=True)
return forward_error, backward_error
def get_rep(self, obs, action) -> Any:
rep = self.trunk(obs)
return rep
class ICMAPTAgent(DDPGAgent):
def __init__(self, **kwargs) -> None:
cfg = ICMAPTAgentConfig(**kwargs)
super().__init__(**kwargs)
self.cfg = cfg # override base ddpg cfg type
self.icm = ICM(self.obs_dim, self.action_dim, self.hidden_dim,
cfg.icm_rep_dim).to(self.device)
# optimizers
self.icm_opt = torch.optim.Adam(self.icm.parameters(), lr=self.lr)
self.icm.train()
# particle-based entropy
rms = utils.RMS(self.device)
self.pbe = utils.PBE(rms, cfg.knn_clip, cfg.knn_k, cfg.knn_avg, cfg.knn_rms,
self.device)
def update_icm(self, obs, action, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
forward_error, backward_error = self.icm(obs, action, next_obs)
loss = forward_error.mean() + backward_error.mean()
self.icm_opt.zero_grad()
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.icm_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['icm_loss'] = loss.item()
return metrics
def compute_intr_reward(self, obs, action, next_obs, step) -> Any:
rep = self.icm.get_rep(obs, action)
reward = self.pbe(rep)
reward = reward.reshape(-1, 1)
return reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, extr_reward, discount, next_obs = batch.to(self.device).unpack()
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
metrics.update(self.update_icm(obs, action, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(obs, action, next_obs,
step)
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['intr_reward'] = intr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/icm_apt.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from dm_env import specs
from url_benchmark import utils
# from url_benchmark import replay_buffer as rb
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
from .ddpg import MetaDict
from .fb_modules import IdentityMap
from .ddpg import Encoder
from .fb_modules import Actor, DiagGaussianActor, ForwardMap, BackwardMap, OnlineCov
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class FBDDPGAgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.fb_ddpg.FBDDPGAgent"
name: str = "fb_ddpg"
# reward_free: ${reward_free}
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
lr_coef: float = 1
fb_target_tau: float = 0.01 # 0.001-0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING # ??? # to be specified later
num_inference_steps: int = 5120
hidden_dim: int = 1024 # 128, 2048
backward_hidden_dim: int = 526 # 512
feature_dim: int = 512 # 128, 1024
z_dim: int = 50 # 100
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" #
stddev_clip: float = 0.3 # 1
update_z_every_step: int = 300
update_z_proba: float = 1.0
nstep: int = 1
batch_size: int = 1024 # 512
init_fb: bool = True
update_encoder: bool = omegaconf.II("update_encoder") # ${update_encoder}
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
ortho_coef: float = 1.0 # 0.01-10
log_std_bounds: tp.Tuple[float, float] = (-5, 2) # param for DiagGaussianActor
temp: float = 1 # temperature for DiagGaussianActor
boltzmann: bool = False # set to true for DiagGaussianActor
debug: bool = False
future_ratio: float = 0.0
mix_ratio: float = 0.5 # 0-1
rand_weight: bool = False # True, False
preprocess: bool = True
norm_z: bool = True
q_loss: bool = False
q_loss_coef: float = 0.01
additional_metric: bool = False
add_trunk: bool = False
cs = ConfigStore.instance()
cs.store(group="agent", name="fb_ddpg", node=FBDDPGAgentConfig)
class FBDDPGAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = FBDDPGAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
goal_dim = _goals.get_goal_space_dim(cfg.goal_space)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
# create the network
if self.cfg.boltzmann:
self.actor: nn.Module = DiagGaussianActor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.hidden_dim, cfg.log_std_bounds).to(cfg.device)
else:
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.forward_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
if cfg.debug:
self.backward_net: nn.Module = IdentityMap().to(cfg.device)
self.backward_target_net: nn.Module = IdentityMap().to(cfg.device)
else:
self.backward_net = BackwardMap(goal_dim, cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
self.backward_target_net = BackwardMap(goal_dim,
cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
# build up the target network
self.forward_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# load the weights into the target networks
self.forward_target_net.load_state_dict(self.forward_net.state_dict())
self.backward_target_net.load_state_dict(self.backward_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
# params = [p for net in [self.forward_net, self.backward_net] for p in net.parameters()]
# self.fb_opt = torch.optim.Adam(params, lr=cfg.lr)
self.fb_opt = torch.optim.Adam([{'params': self.forward_net.parameters()}, # type: ignore
{'params': self.backward_net.parameters(), 'lr': cfg.lr_coef * cfg.lr}],
lr=cfg.lr)
self.train()
self.forward_target_net.train()
self.backward_target_net.train()
self.actor_success: tp.List[float] = [] # only for debugging, can be removed eventually
# self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
# self.online_cov = OnlineCov(mom=0.99, dim=self.cfg.z_dim).to(self.cfg.device)
# self.online_cov.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.forward_net, self.backward_net]:
net.train(training)
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder", "actor"]
if self.cfg.init_fb:
names += ["forward_net", "backward_net", "backward_target_net", "forward_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.backward_net(desired_goal)
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
# filter out small reward
# pdb.set_trace()
# idx = torch.where(reward >= torch.quantile(reward, 0.99))[0]
# obs = obs[idx]
# reward = reward[idx]
with torch.no_grad():
B = self.backward_net(obs)
z = torch.matmul(reward.T, B) / reward.shape[0]
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def sample_z(self, size, device: str = "cpu"):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32, device=device)
gaussian_rdv = F.normalize(gaussian_rdv, dim=1)
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * gaussian_rdv
else:
uniform_rdv = torch.rand((size, self.cfg.z_dim), dtype=torch.float32, device=device)
z = np.sqrt(self.cfg.z_dim) * uniform_rdv * gaussian_rdv
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0 and np.random.rand() < self.cfg.update_z_proba:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device, dtype=torch.float32).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
if self.cfg.boltzmann:
dist = self.actor(h, z)
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
if self.cfg.additional_metric:
# the following is doing extra computation only used for metrics,
# it should be deactivated eventually
F_mean_s = self.forward_net(obs, z, action)
# F_samp_s = self.forward_net(obs, z, dist.sample())
F_rand_s = self.forward_net(obs, z, torch.zeros_like(action).uniform_(-1.0, 1.0))
Qs = [torch.min(*(torch.einsum('sd, sd -> s', F, z) for F in Fs)) for Fs in [F_mean_s, F_rand_s]]
self.actor_success = (Qs[0] > Qs[1]).cpu().numpy().tolist()
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def compute_z_correl(self, time_step: TimeStep, meta: MetaDict) -> float:
goal = time_step.goal if self.cfg.goal_space is not None else time_step.observation # type: ignore
with torch.no_grad():
zs = [torch.Tensor(x).unsqueeze(0).float().to(self.cfg.device) for x in [goal, meta["z"]]]
zs[0] = self.backward_net(zs[0])
zs = [F.normalize(z, 1) for z in zs]
return torch.matmul(zs[0], zs[1].T).item()
def update_fb(
self,
obs: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
if self.cfg.boltzmann:
dist = self.actor(next_obs, z)
next_action = dist.sample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
target_F1, target_F2 = self.forward_target_net(next_obs, z, next_action) # batch x z_dim
target_B = self.backward_target_net(next_goal) # batch x z_dim
target_M1 = torch.einsum('sd, td -> st', target_F1, target_B) # batch x batch
target_M2 = torch.einsum('sd, td -> st', target_F2, target_B) # batch x batch
target_M = torch.min(target_M1, target_M2)
# compute FB loss
F1, F2 = self.forward_net(obs, z, action)
B = self.backward_net(next_goal)
M1 = torch.einsum('sd, td -> st', F1, B) # batch x batch
M2 = torch.einsum('sd, td -> st', F2, B) # batch x batch
I = torch.eye(*M1.size(), device=M1.device)
off_diag = ~I.bool()
fb_offdiag: tp.Any = 0.5 * sum((M - discount * target_M)[off_diag].pow(2).mean() for M in [M1, M2])
fb_diag: tp.Any = -sum(M.diag().mean() for M in [M1, M2])
fb_loss = fb_offdiag + fb_diag
# Q LOSS
if self.cfg.q_loss:
with torch.no_grad():
next_Q1, nextQ2 = [torch.einsum('sd, sd -> s', target_Fi, z) for target_Fi in [target_F1, target_F2]]
next_Q = torch.min(next_Q1, nextQ2)
cov = torch.matmul(B.T, B) / B.shape[0]
inv_cov = torch.inverse(cov)
implicit_reward = (torch.matmul(B, inv_cov) * z).sum(dim=1) # batch_size
target_Q = implicit_reward.detach() + discount.squeeze(1) * next_Q # batch_size
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
q_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
fb_loss += self.cfg.q_loss_coef * q_loss
# ORTHONORMALITY LOSS FOR BACKWARD EMBEDDING
Cov = torch.matmul(B, B.T)
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
fb_loss += self.cfg.ortho_coef * orth_loss
# Cov = torch.cov(B.T) # Vicreg loss
# var_loss = F.relu(1 - Cov.diag().clamp(1e-4, 1).sqrt()).mean() # eps avoids inf. sqrt gradient at 0
# cov_loss = 2 * torch.triu(Cov, diagonal=1).pow(2).mean() # 2x upper triangular part
# orth_loss = var_loss + cov_loss
# fb_loss += self.cfg.ortho_coef * orth_loss
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_M'] = target_M.mean().item()
metrics['M1'] = M1.mean().item()
metrics['F1'] = F1.mean().item()
metrics['B'] = B.mean().item()
metrics['B_norm'] = torch.norm(B, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['fb_loss'] = fb_loss.item()
metrics['fb_diag'] = fb_diag.item()
metrics['fb_offdiag'] = fb_offdiag.item()
if self.cfg.q_loss:
metrics['q_loss'] = q_loss.item()
metrics['orth_loss'] = orth_loss.item()
metrics['orth_loss_diag'] = orth_loss_diag.item()
metrics['orth_loss_offdiag'] = orth_loss_offdiag.item()
if self.cfg.q_loss:
metrics['q_loss'] = q_loss.item()
eye_diff = torch.matmul(B.T, B) / B.shape[0] - torch.eye(B.shape[1], device=B.device)
metrics['orth_linf'] = torch.max(torch.abs(eye_diff)).item()
metrics['orth_l2'] = eye_diff.norm().item() / math.sqrt(B.shape[1])
if isinstance(self.fb_opt, torch.optim.Adam):
metrics["fb_opt_lr"] = self.fb_opt.param_groups[0]["lr"]
# optimize FB
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.fb_opt.zero_grad(set_to_none=True)
fb_loss.backward()
self.fb_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if self.cfg.boltzmann:
dist = self.actor(obs, z)
action = dist.rsample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.forward_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
if self.cfg.additional_metric:
q1_success = Q1 > Q2
Q = torch.min(Q1, Q2)
actor_loss = (self.cfg.temp * log_prob - Q).mean() if self.cfg.boltzmann else -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['q'] = Q.mean().item()
if self.cfg.additional_metric:
metrics['q1_success'] = q1_success.float().mean().item()
metrics['actor_logprob'] = log_prob.mean().item()
# metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def aug_and_encode(self, obs: torch.Tensor) -> torch.Tensor:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
# pdb.set_trace()
obs = batch.obs
action = batch.action
discount = batch.discount
next_obs = next_goal = batch.next_obs
if self.cfg.goal_space is not None:
assert batch.next_goal is not None
next_goal = batch.next_goal
# if len(batch.meta) == 1 and batch.meta[0].shape[-1] == self.cfg.z_dim:
# z = batch.meta[0]
# invalid = torch.linalg.norm(z, dim=1) < 1e-15
# if sum(invalid):
# z[invalid, :] = self.sample_z(sum(invalid)).to(self.cfg.device)
# else:
z = self.sample_z(self.cfg.batch_size, device=self.cfg.device)
if not z.shape[-1] == self.cfg.z_dim:
raise RuntimeError("There's something wrong with the logic here")
# obs = self.aug_and_encode(batch.obs)
# next_obs = self.aug_and_encode(batch.next_obs)
# if not self.cfg.update_encoder:
# obs = obs.detach()
# next_obs = next_obs.detach()
backward_input = batch.obs
future_goal = batch.future_obs
if self.cfg.goal_space is not None:
assert batch.goal is not None
backward_input = batch.goal
future_goal = batch.future_goal
perm = torch.randperm(self.cfg.batch_size)
backward_input = backward_input[perm]
if self.cfg.mix_ratio > 0:
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
if not self.cfg.rand_weight:
with torch.no_grad():
mix_z = self.backward_net(backward_input[mix_idxs]).detach()
else:
# generate random weight
weight = torch.rand(size=(mix_idxs.shape[0], self.cfg.batch_size)).to(self.cfg.device)
weight = F.normalize(weight, dim=1)
uniform_rdv = torch.rand(mix_idxs.shape[0], 1).to(self.cfg.device)
weight = uniform_rdv * weight
with torch.no_grad():
mix_z = torch.matmul(weight, self.backward_net(backward_input).detach())
if self.cfg.norm_z:
mix_z = math.sqrt(self.cfg.z_dim) * F.normalize(mix_z, dim=1)
z[mix_idxs] = mix_z
# hindsight replay
if self.cfg.future_ratio > 0:
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)
z[future_idxs] = self.backward_net(future_goal[future_idxs]).detach()
metrics.update(self.update_fb(obs=obs, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, z=z, step=step))
# update actor
metrics.update(self.update_actor(obs, z, step))
# update critic target
utils.soft_update_params(self.forward_net, self.forward_target_net,
self.cfg.fb_target_tau)
utils.soft_update_params(self.backward_net, self.backward_target_net,
self.cfg.fb_target_tau)
# update inv cov
# if step % self.cfg.update_cov_every_step == 0:
# logger.info("update online cov")
# obs_list = list()
# batch_size = 0
# while batch_size < 10000:
# batch = next(replay_loader)
# batch = batch.to(self.cfg.device)
# obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
# batch_size += batch.next_obs.size(0)
# obs = torch.cat(obs_list, 0)
# with torch.no_grad():
# B = self.backward_net(obs)
# self.inv_cov = torch.inverse(self.online_cov(B))
return metrics
# def update(self, replay_loader: tp.Iterator[rb.EpisodeBatch], step: int) -> tp.Dict[str, float]:
# metrics: tp.Dict[str, float] = {}
#
# if step % self.cfg.update_every_steps != 0:
# return metrics
#
# for _ in range(self.cfg.num_fb_updates):
# batch = next(replay_loader)
# batch = batch.to(self.cfg.device)
# if self.cfg.mix_ratio > 0:
# assert self.cfg.batch_size % 3 == 0
# mini_batch_size = self.cfg.batch_size // 3
# else:
# assert self.cfg.batch_size % 2 == 0
# mini_batch_size = self.cfg.batch_size // 2
# idxs = list(range(mini_batch_size))
# idxs_prime = list(range(mini_batch_size, 2 * mini_batch_size))
#
# # pdb.set_trace()
# obs = batch.obs[idxs]
# action = batch.action[idxs]
# discount = batch.discount[idxs]
# next_obs = next_goal = batch.next_obs[idxs]
# if self.cfg.goal_space is not None:
# assert batch.next_goal is not None
# next_goal = batch.next_goal[idxs]
# if len(batch.meta) == 1 and batch.meta[0].shape[-1] == self.cfg.z_dim:
# z = batch.meta[0][idxs]
# invalid = torch.linalg.norm(z, dim=1) < 1e-15
# if sum(invalid):
# z[invalid, :] = self.sample_z(sum(invalid)).to(self.cfg.device)
# else:
# z = self.sample_z(mini_batch_size).to(self.cfg.device)
# if not z.shape[-1] == self.cfg.z_dim:
# raise RuntimeError("There's something wrong with the logic here")
# # obs = self.aug_and_encode(batch.obs)
# # next_obs = self.aug_and_encode(batch.next_obs)
# # if not self.cfg.update_encoder:
# # obs = obs.detach()
# # next_obs = next_obs.detach()
#
# backward_input = batch.obs
# future_goal = batch.future_obs
# if self.cfg.goal_space is not None:
# assert batch.goal is not None
# backward_input = batch.goal
# future_goal = batch.future_goal
#
# # goal = backward_input[idxs]
# goal_prime = backward_input[idxs_prime]
#
# if self.cfg.mix_ratio > 0:
# mix_idxs: tp.Any = np.where(np.random.uniform(size=mini_batch_size) < self.cfg.mix_ratio)[0]
# part = backward_input[2 * mini_batch_size:]
# if not self.cfg.rand_weight:
# mix_z = self.backward_net(part[mix_idxs]).detach()
# else:
# # generate random weight
# weight = torch.rand(size=(mix_idxs.shape[0], mini_batch_size)).to(self.cfg.device)
# weight = F.normalize(weight, dim=1)
# uniform_rdv = torch.rand(mix_idxs.shape[0], 1).to(self.cfg.device)
# weight = uniform_rdv * weight
# mix_z = torch.matmul(weight, self.backward_net(part).detach())
# if self.cfg.norm_z:
# mix_z = math.sqrt(self.cfg.z_dim) * F.normalize(mix_z, dim=1)
# z[mix_idxs] = mix_z
#
# # hindsight replay
# if self.cfg.future_ratio > 0:
# assert future_goal is not None
# future_idxs = np.where(np.random.uniform(size=mini_batch_size) < self.cfg.future_ratio)
# future_goal = future_goal[idxs][future_idxs]
# z[future_idxs] = self.backward_net(future_goal).detach()
# goal_prime[future_idxs] = future_goal
# metrics.update(self.update_fb(obs=obs, action=action, discount=discount,
# next_obs=next_obs, next_goal=next_goal, goal_prime=goal_prime, z=z, step=step))
#
# # update actor
# metrics.update(self.update_actor(obs, z, step))
#
# # update critic target
# utils.soft_update_params(self.forward_net, self.forward_target_net,
# self.cfg.fb_target_tau)
# utils.soft_update_params(self.backward_net, self.backward_target_net,
# self.cfg.fb_target_tau)
#
# return metrics
# def update_fb(
# self,
# obs: torch.Tensor,
# action: torch.Tensor,
# discount: torch.Tensor,
# next_obs: torch.Tensor,
# next_goal: torch.Tensor,
# goal_prime: torch.Tensor,
# z: torch.Tensor,
# step: int
# ) -> tp.Dict[str, float]:
# metrics: tp.Dict[str, float] = {}
# # compute target successor measure
# with torch.no_grad():
# if self.cfg.boltzmann:
# dist = self.actor(next_obs, z)
# next_action = dist.sample()
# else:
# stddev = utils.schedule(self.cfg.stddev_schedule, step)
# dist = self.actor(next_obs, z, stddev)
# next_action = dist.sample(clip=self.cfg.stddev_clip)
# target_F1, target_F2 = self.forward_target_net(next_obs, z, next_action) # batch x z_dim
# target_B = self.backward_target_net(goal_prime) # batch x z_dim
# target_M1 = torch.einsum('sd, td -> st', target_F1, target_B) # batch x batch
# target_M2 = torch.einsum('sd, td -> st', target_F2, target_B) # batch x batch
# target_M = torch.min(target_M1, target_M2)
#
# # compute FB loss
# F1, F2 = self.forward_net(obs, z, action)
# B = self.backward_net(next_goal)
# B_prime = self.backward_net(goal_prime)
# M1_diag = torch.einsum('sd, sd -> s', F1, B) # batch
# M2_diag = torch.einsum('sd, sd -> s', F2, B) # batch
# M1 = torch.einsum('sd, td -> st', F1, B_prime) # batch x batch
# M2 = torch.einsum('sd, td -> st', F2, B_prime) # batch x batch
# fb_loss = 0.5 * (M1 - discount * target_M).pow(2).mean() - M1_diag.mean()
# fb_loss += 0.5 * (M2 - discount * target_M).pow(2).mean() - M2_diag.mean()
#
# # ORTHONORMALITY LOSS FOR BACKWARD EMBEDDING
#
# B_B_prime = torch.matmul(B, B_prime.T)
# B_diag = torch.einsum('sd, sd -> s', B, B)
# B_prime_diag = torch.einsum('sd, sd -> s', B_prime, B_prime)
# orth_loss = B_B_prime.pow(2).mean() - (B_diag.mean() + B_prime_diag.mean())
# fb_loss += self.cfg.ortho_coef * orth_loss
#
# if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
# metrics['target_M'] = target_M.mean().item()
# metrics['M1'] = M1.mean().item()
# metrics['F1'] = F1.mean().item()
# metrics['B'] = B.mean().item()
# metrics['B_norm'] = torch.norm(B, dim=-1).mean().item()
# metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
# metrics['fb_loss'] = fb_loss.item()
# metrics['orth_loss'] = orth_loss.item()
# eye_diff = torch.matmul(B.T, B) / B.shape[0] - torch.eye(B.shape[1], device=B.device)
# metrics['orth_linf'] = torch.max(torch.abs(eye_diff)).item()
# metrics['orth_l2'] = eye_diff.norm().item() / math.sqrt(B.shape[1])
# if isinstance(self.fb_opt, torch.optim.Adam):
# metrics["fb_opt_lr"] = self.fb_opt.param_groups[0]["lr"]
# if self.cfg.goal_space in ["simplified_walker", "simplified_quadruped"]:
# metrics['max_velocity'] = goal_prime[:, -1].max().item()
#
# # optimize FB
# if self.encoder_opt is not None:
# self.encoder_opt.zero_grad(set_to_none=True)
# self.fb_opt.zero_grad(set_to_none=True)
# fb_loss.backward()
# self.fb_opt.step()
# if self.encoder_opt is not None:
# self.encoder_opt.step()
# return metrics
|
controllable_agent-main
|
url_benchmark/agent/fb_ddpg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import dataclasses
from types import ModuleType
import numpy as np
import torch
from url_benchmark import replay_buffer as rb
from url_benchmark import agent as agents
from . import fb_ddpg
from . import fb_modules
def get_cfg() -> fb_ddpg.FBDDPGAgentConfig:
# hopefully this can get simpler soon
return fb_ddpg.FBDDPGAgentConfig(
obs_shape=(4,), action_shape=(3,), obs_type="state", device="cpu", num_expl_steps=1, goal_space=None
)
def test_agent_init() -> None:
cfg = get_cfg()
agent = fb_ddpg.FBDDPGAgent(**dataclasses.asdict(cfg))
b = 12
shapes = dict(obs=(b, 4), next_obs=(b, 4), action=(b, 4), reward=(b,), discount=(b,))
iterator = (rb.EpisodeBatch(**{x: np.random.rand(*y).astype(np.float32)
for x, y in shapes.items()}) for _ in range(100)) # type: ignore
meta = agent.init_meta()
with torch.no_grad():
action = agent.act(next(iterator).obs[0], meta, 0, eval_mode=False)
assert action.shape == (3,)
def test_agents_config() -> None:
cfgs = []
for module in agents.__dict__.values():
if isinstance(module, ModuleType):
for obj in module.__dict__.values():
if inspect.isclass(obj) and issubclass(obj, agents.DDPGAgentConfig):
if obj not in cfgs:
cfgs.append(obj)
assert len(cfgs) >= 3
for cfg in cfgs:
# check that target and name have been updated to match the algo
assert cfg.name.replace("_", "") in cfg.__name__.lower()
assert cfg.name in cfg._target_
def test_multiinputs() -> None:
m, n = [10, 12]
x, y = (torch.rand([16, z]) for z in [m, n])
mip = fb_modules.MultinputNet([m, n], [100, 100, 32])
out = mip(x, y)
assert out.shape == (16, 32)
|
controllable_agent-main
|
url_benchmark/agent/test_agent.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from collections import OrderedDict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from url_benchmark import utils
from url_benchmark.dmc import TimeStep
from .ddpg import DDPGAgent, MetaDict
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, NoReturn, Tuple
"""
Reimplementation of https://github.com/RLAgent/state-marginal-matching:
- Removed redundant forward passes
- No updating p_z
- Added finetuning procedure from what's described in DIAYN
- VAE encodes and decodes from the encoding from DDPG when n > 1
as the paper does not make it clear how to include skills with pixel input
- When n=1, obs_type=pixel, remove the False from line 144
to input pixels into the vae
- TODO: when using pixel-based vae (n=1), gpu may run out of memory.
"""
class VAE(nn.Module):
def __init__(self, obs_dim, z_dim, code_dim, vae_beta, device) -> None:
super().__init__()
self.z_dim = z_dim
self.code_dim = code_dim
self.make_networks(obs_dim, z_dim, code_dim)
self.beta = vae_beta
self.apply(utils.weight_init)
self.device = device
def make_networks(self, obs_dim, z_dim, code_dim) -> None:
self.enc = nn.Sequential(nn.Linear(obs_dim + z_dim, 150), nn.ReLU(),
nn.Linear(150, 150), nn.ReLU())
self.enc_mu = nn.Linear(150, code_dim)
self.enc_logvar = nn.Linear(150, code_dim)
self.dec = nn.Sequential(nn.Linear(code_dim, 150), nn.ReLU(),
nn.Linear(150, 150), nn.ReLU(),
nn.Linear(150, obs_dim + z_dim))
def encode(self, obs_z) -> Tuple[Any, Any, Any]:
enc_features = self.enc(obs_z)
mu = self.enc_mu(enc_features)
logvar = self.enc_logvar(enc_features)
stds = (0.5 * logvar).exp()
return mu, logvar, stds
def forward(self, obs_z, epsilon) -> Tuple[Any, Tuple[Any, Any, Any]]:
mu, logvar, stds = self.encode(obs_z)
code = epsilon * stds + mu
obs_distr_params = self.dec(code)
return obs_distr_params, (mu, logvar, stds)
def loss(self, obs_z) -> Tuple[Any, Any]:
epsilon = torch.randn([obs_z.shape[0], self.code_dim]).to(self.device)
# pylint: disable=unused-variable
obs_distr_params, (mu, logvar, stds) = self(obs_z, epsilon)
kle = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(),
dim=1).mean()
log_prob = F.mse_loss(obs_z, obs_distr_params, reduction='none')
loss = self.beta * kle + log_prob.mean()
return loss, log_prob.sum(list(range(1, len(log_prob.shape)))).view(
log_prob.shape[0], 1)
class PVae(VAE):
def make_networks(self, obs_shape, z_dim, code_dim) -> None:
self.enc = nn.Sequential(nn.Conv2d(obs_shape[0], 32, 3, stride=2),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Flatten(),
nn.Linear(32 * 35 * 35, 150), nn.ReLU())
self.enc_mu = nn.Linear(150, code_dim)
self.enc_logvar = nn.Linear(150, code_dim)
self.dec = nn.Sequential(
nn.Linear(code_dim, 32 * 35 * 35), nn.ReLU(),
nn.Unflatten(dim=1, unflattened_size=(32, 35, 35)),
nn.ConvTranspose2d(32, 32, 3, stride=1), nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=1), nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=1), nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=1), nn.ReLU(),
nn.ConvTranspose2d(32, 32, 3, stride=2), nn.ReLU(),
nn.Conv2d(32, obs_shape[0], 4, stride=1))
class SMM(nn.Module):
def __init__(self, obs_dim, z_dim, hidden_dim, vae_beta, device) -> None:
super().__init__()
self.z_dim = z_dim
self.z_pred_net = nn.Sequential(nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, z_dim))
self.vae = VAE(obs_dim=obs_dim,
z_dim=z_dim,
code_dim=128,
vae_beta=vae_beta,
device=device)
self.apply(utils.weight_init)
def predict_logits(self, obs) -> Any:
z_pred_logits = self.z_pred_net(obs)
return z_pred_logits
def loss(self, logits, z) -> Any:
z_labels = torch.argmax(z, 1)
return nn.CrossEntropyLoss(reduction='none')(logits, z_labels)
class PSMM(nn.Module):
# pylint: disable=unused-argument
def __init__(self, obs_shape, z_dim, hidden_dim, vae_beta, device) -> None:
super().__init__()
self.z_dim = z_dim
self.vae = PVae(obs_dim=obs_shape,
z_dim=z_dim,
code_dim=128,
vae_beta=vae_beta,
device=device)
self.apply(utils.weight_init)
# discriminator not needed when n=1, as z is degenerate
def predict_logits(self, obs) -> NoReturn:
raise NotImplementedError
def loss(self, logits, z) -> NoReturn:
raise NotImplementedError
class SMMAgent(DDPGAgent):
def __init__(self, z_dim, sp_lr, vae_lr, vae_beta, state_ent_coef,
latent_ent_coef, latent_cond_ent_coef, update_encoder,
**kwargs) -> None:
self.z_dim = z_dim
self.state_ent_coef = state_ent_coef
self.latent_ent_coef = latent_ent_coef
self.latent_cond_ent_coef = latent_cond_ent_coef
self.update_encoder = update_encoder
kwargs["meta_dim"] = self.z_dim
super().__init__(**kwargs)
# self.obs_dim is now the real obs_dim (or repr_dim) + z_dim
self.smm = SMM(self.obs_dim - z_dim,
z_dim,
hidden_dim=kwargs['hidden_dim'],
vae_beta=vae_beta,
device=kwargs['device']).to(kwargs['device'])
self.pred_optimizer = torch.optim.Adam(
self.smm.z_pred_net.parameters(), lr=sp_lr)
self.vae_optimizer = torch.optim.Adam(self.smm.vae.parameters(),
lr=vae_lr)
self.smm.train()
# fine tuning SMM agent
self.ft_returns = np.zeros(z_dim, dtype=np.float32)
self.ft_not_finished = [True for z in range(z_dim)]
def init_meta(self) -> tp.Dict[str, np.ndarray]:
z = np.zeros(self.z_dim, dtype=np.float32)
z[np.random.choice(self.z_dim)] = 1.0
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
# during fine-tuning, find the best skill and fine-tune that one only.
if self.reward_free:
return self.update_meta_ft(meta, global_step, time_step)
# during training, change to randomly sampled z at the end of the episode
if time_step.last():
return self.init_meta()
return meta
def update_meta_ft(self, meta: MetaDict, global_step, time_step) -> MetaDict:
z_ind: tp.Any = meta['z'].argmax()
if any(self.ft_not_finished):
self.ft_returns[z_ind] += time_step.reward
if time_step.last():
if not any(self.ft_not_finished):
# choose the best
new_z_ind: int = self.ft_returns.argmax() # type: ignore
else:
# or the next z to try
self.ft_not_finished[z_ind] = False
not_tried_z: int = sum(self.ft_not_finished)
# uniformly sample from the remaining unused z
for i in range(self.z_dim):
if self.ft_not_finished[i]:
if np.random.random() < 1 / not_tried_z:
new_z_ind = i
break
not_tried_z -= 1
new_z = np.zeros(self.z_dim, dtype=np.float32)
new_z[new_z_ind] = 1.0
meta['z'] = new_z # type: ignore
return meta
def update_vae(self, obs_z) -> Tuple[Dict[str, Any], Any]:
metrics: tp.Dict[str, float] = {}
loss, h_s_z = self.smm.vae.loss(obs_z)
self.vae_optimizer.zero_grad()
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.vae_optimizer.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
metrics['loss_vae'] = loss.cpu().item()
return metrics, h_s_z
def update_pred(self, obs, z) -> Tuple[Dict[str, Any], Any]:
metrics: tp.Dict[str, float] = {}
logits = self.smm.predict_logits(obs)
h_z_s = self.smm.loss(logits, z).unsqueeze(-1)
loss = h_z_s.mean()
self.pred_optimizer.zero_grad()
loss.backward()
self.pred_optimizer.step()
metrics['loss_pred'] = loss.cpu().item()
return metrics, h_z_s
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size).to(self.device)
obs, action, extr_reward, discount, next_obs = batch.unpack()
z = batch.meta["z"]
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
obs_z = torch.cat([obs, z], dim=1) # do not learn encoder in the VAE
next_obs_z = torch.cat([next_obs, z], dim=1)
if self.reward_free:
vae_metrics, h_s_z = self.update_vae(obs_z)
pred_metrics, h_z_s = self.update_pred(obs.detach(), z)
h_z = np.log(self.z_dim) # One-hot z encoding
h_z *= torch.ones_like(extr_reward).to(self.device)
pred_log_ratios = self.state_ent_coef * h_s_z.detach(
) # p^*(s) is ignored, as state space dimension is inaccessible from pixel input
intr_reward = pred_log_ratios + self.latent_ent_coef * h_z + self.latent_cond_ent_coef * h_z_s.detach(
)
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics.update(vae_metrics)
metrics.update(pred_metrics)
metrics['intr_reward'] = intr_reward.mean().item()
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs_z = obs_z.detach()
next_obs_z = next_obs_z.detach()
# update critic
metrics.update(
self.update_critic(obs_z.detach(), action, reward, discount,
next_obs_z.detach(), step))
# update actor
metrics.update(self.update_actor(obs_z.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/smm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import torch
from torch import nn
from url_benchmark import utils
from .ddpg import DDPGAgent
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, Tuple
class ICM(nn.Module):
def __init__(self, obs_dim, action_dim, hidden_dim) -> None:
super().__init__()
self.forward_net = nn.Sequential(
nn.Linear(obs_dim + action_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, obs_dim))
self.backward_net = nn.Sequential(nn.Linear(2 * obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, action_dim),
nn.Tanh())
self.apply(utils.weight_init)
def forward(self, obs, action, next_obs) -> Tuple[Any, Any]:
assert obs.shape[0] == next_obs.shape[0]
assert obs.shape[0] == action.shape[0]
next_obs_hat = self.forward_net(torch.cat([obs, action], dim=-1))
action_hat = self.backward_net(torch.cat([obs, next_obs], dim=-1))
forward_error = torch.norm(next_obs - next_obs_hat,
dim=-1,
p=2,
keepdim=True)
backward_error = torch.norm(action - action_hat,
dim=-1,
p=2,
keepdim=True)
return forward_error, backward_error
class ICMAgent(DDPGAgent):
def __init__(self, icm_scale, update_encoder, **kwargs) -> None:
super().__init__(**kwargs)
self.icm_scale = icm_scale
self.update_encoder = update_encoder
self.icm = ICM(self.obs_dim, self.action_dim,
self.hidden_dim).to(self.device)
# optimizers
self.icm_opt = torch.optim.Adam(self.icm.parameters(), lr=self.lr)
self.icm.train()
# pylint: disable=unused-argument
def update_icm(self, obs, action, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
forward_error, backward_error = self.icm(obs, action, next_obs)
loss = forward_error.mean() + backward_error.mean()
self.icm_opt.zero_grad(set_to_none=True)
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.icm_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['icm_loss'] = loss.item()
return metrics
# pylint: disable=unused-argument
def compute_intr_reward(self, obs, action, next_obs, step) -> Any:
forward_error, _ = self.icm(obs, action, next_obs)
reward = forward_error * self.icm_scale
reward = torch.log(reward + 1.0)
return reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, extr_reward, discount, next_obs = batch.to(self.device).unpack()
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
metrics.update(self.update_icm(obs, action, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(obs, action, next_obs,
step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/icm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
from collections import OrderedDict
import dataclasses
import logging
import numpy as np
import torch
from torch import nn
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark import utils
from .fb_modules import mlp, Actor
import url_benchmark.goals as _goals
logger = logging.getLogger(__name__)
# MetaDict = tp.Mapping[str, tp.Union[np.ndarray, torch.Tensor]]
MetaDict = tp.Mapping[str, np.ndarray]
@dataclasses.dataclass
class GoalSMConfig:
# @package agent
_target_: str = "url_benchmark.agent.goal_sm.GoalSMAgent"
name: str = "goal_sm"
reward_free: bool = omegaconf.II("reward_free")
custom_reward: tp.Optional[str] = omegaconf.II("custom_reward")
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
critic_target_tau: float = 0.01
update_every_steps: float = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING
hidden_dim: int = 1024
feature_dim: int = 512
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)"
stddev_clip: float = 0.3 # 1.0
nstep: int = 1
batch_size: int = 1024 # 256 for pixels
init_critic: bool = True
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
future_ratio: float = 0
preprocess: bool = False
add_trunk: bool = False
update_meta_every_step: int = 500
cs = ConfigStore.instance()
cs.store(group="agent", name="goal_sm", node=GoalSMConfig)
class Critic(nn.Module):
""" forward representation class"""
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.action_dim = action_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_action_net = mlp(self.obs_dim + self.action_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim + self.action_dim, hidden_dim, "ntanh",
hidden_dim, "irelu",
hidden_dim, "irelu")
feature_dim = hidden_dim
seq = [feature_dim, hidden_dim, "irelu", 1]
self.F1 = mlp(*seq)
self.F2 = mlp(*seq)
self.apply(utils.weight_init)
def forward(self, obs, z, action):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs_action = self.obs_action_net(torch.cat([obs, action], dim=-1))
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
h = torch.cat([obs_action, obs_z], dim=-1)
else:
h = torch.cat([obs, z, action], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
F1 = self.F1(h)
F2 = self.F2(h)
return F1, F2
class GoalSMAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = GoalSMConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
self.goal_dim = 0
if cfg.goal_space is not None:
if cfg.goal_space == "quad_pos_speed":
self.goal_dim = 7 # ugly hack
else:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
self.goal_dim = len(g)
self.actor = Actor(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic: nn.Module = Critic(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic_target: nn.Module = Critic(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# optimizers
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=cfg.lr)
self.train()
self.critic_target.train()
def train(self, training: bool = True) -> None:
self.training = training
self.actor.train(training)
self.critic.train(training)
def init_from(self, other) -> None:
# copy parameters over
utils.hard_update_params(other.actor, self.actor)
utils.hard_update_params(other.critic, self.critic)
def init_meta(self, replay_loader: tp.Optional[ReplayBuffer] = None) -> MetaDict:
if replay_loader is not None:
batch = replay_loader.sample(self.cfg.batch_size)
assert batch.next_goal is not None
g = batch.next_goal[0]
else:
g = np.zeros((self.goal_dim,), dtype=np.float32)
meta = OrderedDict()
meta['g'] = g
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_meta_every_step == 0 and global_step > 1000: # skip first trajectory
return self.init_meta(replay_loader)
return meta
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
meta = OrderedDict()
meta['g'] = goal_array
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
# Not used, only for compatibility with pretrain.eval !!!
batch = replay_loader.sample(self.cfg.batch_size)
assert batch.next_goal is not None
g = batch.next_goal[0]
return self.get_goal_meta(g)
def act(self, obs, meta, step, eval_mode) -> np.ndarray:
device = torch.device(self.cfg.device)
obs = torch.as_tensor(obs, device=device).unsqueeze(0)
goals = []
for value in meta.values():
value = torch.as_tensor(value, device=device).unsqueeze(0)
goals.append(value)
goal = torch.cat(goals, dim=-1)
#assert obs.shape[-1] == self.obs_shape[-1]
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, goal, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample(clip=None)
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
@tp.no_type_check # TODO remove
def update_critic(self,
obs: torch.Tensor,
desired_goal: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
achieved_goal: torch.Tensor,
step: int) -> tp.Dict[str, float]:
metrics = {}
with torch.no_grad():
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, desired_goal, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
target_Q1, target_Q2 = self.critic_target(next_obs, desired_goal, next_action)
target_Q = torch.min(target_Q1, target_Q2)
Q1, Q2 = self.critic(obs, desired_goal, action)
Q1_diag, Q2_diag = self.critic(obs, achieved_goal, action)
loss_offdiag: tp.Any = 0.5 * sum((Q - discount * target_Q).pow(2).mean() for Q in [Q1, Q2])
loss_diag: tp.Any = -sum(Q.diag().mean() for Q in [Q1_diag, Q2_diag])
critic_loss = loss_offdiag + loss_diag
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['critic_target_q'] = target_Q.mean().item()
metrics['critic_q1'] = Q1.mean().item()
metrics['critic_q2'] = Q2.mean().item()
metrics['critic_loss'] = critic_loss.item()
metrics['stdev'] = stddev
# optimize critic
self.critic_opt.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_opt.step()
return metrics
@tp.no_type_check # TODO remove
def update_actor(self,
obs: torch.Tensor,
goal: torch.Tensor,
step: int) -> tp.Dict[str, float]:
metrics = {}
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, goal, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
Q1, Q2 = self.critic(obs, goal, action)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
#import ipdb; ipdb.set_trace()
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
achieved_goal = batch.next_goal
future_goal = batch.future_obs
if self.cfg.goal_space:
future_goal = batch.future_goal
obs = batch.obs
action = batch.action
discount = batch.discount
next_obs = batch.next_obs
desired_goal = batch.meta["g"]
# sample goal from replay
# new_batch = next(replay_loader)
# new_batch = new_batch.to(self.cfg.device)
# desired_goal = new_batch.next_goal # type: ignore
# perm = torch.randperm(self.cfg.batch_size)
# desired_goal = achieved_goal[perm]
if self.cfg.future_ratio > 0:
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)[0]
desired_goal[future_idxs] = future_goal[future_idxs] # type: ignore
# update critic
metrics.update(
self.update_critic(obs=obs, desired_goal=desired_goal, action=action,
discount=discount, next_obs=next_obs, achieved_goal=achieved_goal, step=step))
# update actor
metrics.update(self.update_actor(obs=obs, goal=desired_goal, step=step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.cfg.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/goal_sm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
import dataclasses
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from collections import OrderedDict
from url_benchmark import utils
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from .ddpg import DDPGAgent, MetaDict, DDPGAgentConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, Tuple
# TODO(HL): how to include GPI for continuous domain?
@dataclasses.dataclass
class APSAgentConfig(DDPGAgentConfig):
_target_: str = "url_benchmark.agent.aps.APSAgent"
name: str = "aps"
update_encoder: bool = omegaconf.II("update_encoder")
sf_dim: int = 10
update_task_every_step: int = 5
knn_rms: bool = True
knn_k: int = 12
knn_avg: bool = True
knn_clip: float = 0.0001
num_init_steps: int = 4096 # set to ${num_train_frames} to disable finetune policy parameters
lstsq_batch_size: int = 4096
num_inference_steps: int = 10000
cs = ConfigStore.instance()
cs.store(group="agent", name="aps", node=APSAgentConfig)
class CriticSF(nn.Module):
def __init__(self, obs_type, obs_dim, action_dim, feature_dim, hidden_dim,
sf_dim) -> None:
super().__init__()
self.obs_type = obs_type
if obs_type == 'pixels':
# for pixels actions will be added after trunk
self.trunk = nn.Sequential(nn.Linear(obs_dim, feature_dim),
nn.LayerNorm(feature_dim), nn.Tanh())
trunk_dim = feature_dim + action_dim
else:
# for states actions come in the beginning
self.trunk = nn.Sequential(
nn.Linear(obs_dim + action_dim, hidden_dim),
nn.LayerNorm(hidden_dim), nn.Tanh())
trunk_dim = hidden_dim
def make_q():
q_layers = []
q_layers += [
nn.Linear(trunk_dim, hidden_dim),
nn.ReLU(inplace=True)
]
if obs_type == 'pixels':
q_layers += [
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
]
q_layers += [nn.Linear(hidden_dim, sf_dim)]
return nn.Sequential(*q_layers)
self.Q1 = make_q()
self.Q2 = make_q()
self.apply(utils.weight_init)
def forward(self, obs, action, task) -> Tuple[Any, Any]:
inpt = obs if self.obs_type == 'pixels' else torch.cat([obs, action],
dim=-1)
h = self.trunk(inpt)
h = torch.cat([h, action], dim=-1) if self.obs_type == 'pixels' else h
q1 = self.Q1(h)
q2 = self.Q2(h)
q1 = torch.einsum("bi,bi->b", task, q1).reshape(-1, 1)
q2 = torch.einsum("bi,bi->b", task, q2).reshape(-1, 1)
return q1, q2
class APS(nn.Module):
def __init__(self, obs_dim, sf_dim, hidden_dim) -> None:
super().__init__()
self.state_feat_net = nn.Sequential(nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, sf_dim))
self.apply(utils.weight_init)
def forward(self, obs, norm=True) -> Any:
state_feat = self.state_feat_net(obs)
state_feat = F.normalize(state_feat, dim=-1) if norm else state_feat
return state_feat
class APSAgent(DDPGAgent):
def __init__(self, **kwargs: tp.Any) -> None:
cfg = APSAgentConfig(**kwargs)
# create actor and critic
# increase obs shape to include task dim (through meta_dim)
super().__init__(**kwargs, meta_dim=cfg.sf_dim)
self.cfg: APSAgentConfig = cfg # override base ddpg cfg type
# overwrite critic with critic sf
self.critic = CriticSF(cfg.obs_type, self.obs_dim, self.action_dim,
self.feature_dim, self.hidden_dim,
self.sf_dim).to(self.device)
self.critic_target = CriticSF(self.obs_type, self.obs_dim,
self.action_dim, self.feature_dim,
self.hidden_dim,
self.sf_dim).to(self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_opt = torch.optim.Adam(self.critic.parameters(),
lr=self.lr)
self.aps = APS(self.obs_dim - self.sf_dim, self.sf_dim,
kwargs['hidden_dim']).to(kwargs['device'])
# particle-based entropy
rms = utils.RMS(self.device)
self.pbe = utils.PBE(rms, cfg.knn_clip, cfg.knn_k, cfg.knn_avg, cfg.knn_rms,
cfg.device)
# optimizers
self.aps_opt = torch.optim.Adam(self.aps.parameters(), lr=self.lr)
self.train()
self.critic_target.train()
self.aps.train()
def init_meta(self) -> tp.Dict[str, np.ndarray]:
if self.solved_meta is not None:
return self.solved_meta
task = torch.randn(self.sf_dim)
task = task / torch.norm(task)
task_array = task.cpu().numpy()
meta = OrderedDict()
meta['task'] = task_array
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.update_task_every_step == 0:
return self.init_meta()
return meta
def update_aps(self, task, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
loss = self.compute_aps_loss(next_obs, task)
self.aps_opt.zero_grad(set_to_none=True)
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.aps_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['aps_loss'] = loss.item()
return metrics
def compute_intr_reward(self, task, next_obs, step) -> Tuple[Any, Any]:
# maxent reward
with torch.no_grad():
rep = self.aps(next_obs, norm=False)
reward = self.pbe(rep)
intr_ent_reward = reward.reshape(-1, 1)
# successor feature reward
rep = rep / torch.norm(rep, dim=1, keepdim=True)
intr_sf_reward = torch.einsum("bi,bi->b", task, rep).reshape(-1, 1)
return intr_ent_reward, intr_sf_reward
def compute_aps_loss(self, next_obs, task) -> Any:
"""MLE loss"""
loss = -torch.einsum("bi,bi->b", task, self.aps(next_obs)).mean()
return loss
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size).to(self.device)
obs, action, extr_reward, discount, next_obs = batch.unpack()
task = batch.meta["task"]
# augment and encode
obs = self.aug_and_encode(obs)
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
# freeze successor features at finetuning phase
metrics.update(self.update_aps(task, next_obs, step))
with torch.no_grad():
intr_ent_reward, intr_sf_reward = self.compute_intr_reward(
task, next_obs, step)
intr_reward = intr_ent_reward + intr_sf_reward
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
metrics['intr_ent_reward'] = intr_ent_reward.mean().item()
metrics['intr_sf_reward'] = intr_sf_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# extend observations with task
obs = torch.cat([obs, task], dim=1)
next_obs = torch.cat([next_obs, task], dim=1)
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), task, step))
# update actor
metrics.update(self.update_actor(obs.detach(), task, step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
@torch.no_grad()
def regress_meta(self, replay_loader, step):
obs, reward = [], []
batch_size = 0
while batch_size < self.lstsq_batch_size:
batch = replay_loader.sample(self.cfg.batch_size)
batch_obs, _, batch_reward, *_ = utils.to_torch(batch, self.device)
obs.append(batch_obs)
reward.append(batch_reward)
batch_size += batch_obs.size(0)
obs, reward = torch.cat(obs, 0), torch.cat(reward, 0)
obs = self.aug_and_encode(obs)
rep = self.aps(obs)
task = torch.linalg.lstsq(reward, rep)[0][:rep.size(1), :][0]
task = task / torch.norm(task)
task = task.cpu().numpy()
meta = OrderedDict()
meta['task'] = task
# save for evaluation
self.solved_meta = meta
return meta
@torch.no_grad()
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
@torch.no_grad()
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
rep = self.aps(obs)
# task = torch.linalg.lstsq(reward, rep)[0][:rep.size(1), :][0]
task = torch.linalg.lstsq(rep, reward)[0].squeeze()
task = task / torch.norm(task)
task = task.cpu().numpy()
meta = OrderedDict()
meta['task'] = task
# self.solved_meta = meta
return meta
def update_critic(self, obs, action, reward, discount, next_obs, task,
step) -> Dict[str, Any]:
"""diff is critic takes task as input"""
metrics: tp.Dict[str, float] = {}
with torch.no_grad():
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(next_obs, stddev)
next_action = dist.sample(clip=self.stddev_clip)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action,
task)
target_V = torch.min(target_Q1, target_Q2)
target_Q = reward + (discount * target_V)
Q1, Q2 = self.critic(obs, action, task)
critic_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
if self.use_tb or self.use_wandb:
metrics['critic_target_q'] = target_Q.mean().item()
metrics['critic_q1'] = Q1.mean().item()
metrics['critic_q2'] = Q2.mean().item()
metrics['critic_loss'] = critic_loss.item()
# optimize critic
self.critic_opt.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_opt.step()
return metrics
def update_actor(self, obs, task, step) -> Dict[str, Any]:
"""diff is critic takes task as input"""
metrics: tp.Dict[str, float] = {}
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(obs, stddev)
action = dist.sample(clip=self.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
Q1, Q2 = self.critic(obs, action, task)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.use_tb or self.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
|
controllable_agent-main
|
url_benchmark/agent/aps.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .ddpg import DDPGAgent as DDPGAgent
from .ddpg import DDPGAgentConfig as DDPGAgentConfig
from .fb_ddpg import FBDDPGAgent as FBDDPGAgent
from .aps import APSAgent as APSAgent
from .ddpg import MetaDict as MetaDict
# register agents for hydra
from .sf import SFAgent
from .goal_td3 import GoalTD3Agent
from .discrete_sf import DiscreteSFAgent
from .rnd import RNDAgent
from .diayn import DIAYNAgent
from .aps import APSAgent
from .proto import ProtoAgent
from .icm_apt import ICMAPTAgent
from .sf_svd import SFSVDAgent
from .new_aps import NEWAPSAgent
from .goal_sm import GoalSMAgent
from .max_ent import MaxEntAgent
from .uvf import UVFAgent
from .discrete_fb import DiscreteFBAgent
|
controllable_agent-main
|
url_benchmark/agent/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import typing as tp
import torch
from url_benchmark import utils
from .ddpg import DDPGAgent
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
import dataclasses
from url_benchmark.agent.ddpg import DDPGAgentConfig
from hydra.core.config_store import ConfigStore
import omegaconf
@dataclasses.dataclass
class MaxEntAgentConfig(DDPGAgentConfig):
_target_: str = "url_benchmark.agent.max_ent.MaxEntAgent"
name: str = "max_ent"
knn_rms: bool = True
knn_k: int = 12
knn_avg: bool = True
knn_clip: float = 0.0001
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
cs = ConfigStore.instance()
cs.store(group="agent", name="max_ent", node=MaxEntAgentConfig)
class MaxEntAgent(DDPGAgent):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
cfg = MaxEntAgentConfig(**kwargs)
self.cfg = cfg
# particle-based entropy
rms = utils.RMS(self.cfg.device)
self.pbe = utils.PBE(rms, self.cfg.knn_clip, self.cfg.knn_k, self.cfg.knn_avg, cfg.knn_rms,
self.cfg.device)
def compute_intr_reward(self, goal: torch.Tensor, step: int) -> torch.Tensor:
reward = self.pbe(goal)
intr_ent_reward = reward.reshape(-1, 1)
return intr_ent_reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.obs
action = batch.action
discount = batch.discount
next_goal = next_obs = batch.next_obs
if self.cfg.goal_space is not None: # type: ignore
assert batch.next_goal is not None
next_goal = batch.next_goal
with torch.no_grad():
reward = self.compute_intr_reward(goal=next_goal, step=step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = reward.mean().item()
# update critic
metrics.update(
self.update_critic(obs, action, reward, discount,
next_obs, step))
# update actor
metrics.update(self.update_actor(obs, step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/max_ent.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import torch
from torch import nn
from url_benchmark import utils
from .ddpg import DDPGAgent
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict
class Disagreement(nn.Module):
def __init__(self, obs_dim, action_dim, hidden_dim, n_models=5) -> None:
super().__init__()
self.ensemble = nn.ModuleList([
nn.Sequential(nn.Linear(obs_dim + action_dim, hidden_dim),
nn.ReLU(), nn.Linear(hidden_dim, obs_dim))
for _ in range(n_models)
])
def forward(self, obs, action, next_obs) -> Any:
#import ipdb; ipdb.set_trace()
assert obs.shape[0] == next_obs.shape[0]
assert obs.shape[0] == action.shape[0]
errors = []
for model in self.ensemble:
next_obs_hat = model(torch.cat([obs, action], dim=-1))
model_error = torch.norm(next_obs - next_obs_hat,
dim=-1,
p=2,
keepdim=True)
errors.append(model_error)
return torch.cat(errors, dim=1)
def get_disagreement(self, obs, action, next_obs) -> Any:
assert obs.shape[0] == next_obs.shape[0]
assert obs.shape[0] == action.shape[0]
preds = []
for model in self.ensemble:
next_obs_hat = model(torch.cat([obs, action], dim=-1))
preds.append(next_obs_hat)
preds_tensor = torch.stack(preds, dim=0)
return torch.var(preds_tensor, dim=0).mean(dim=-1)
class DisagreementAgent(DDPGAgent):
def __init__(self, update_encoder, **kwargs) -> None:
super().__init__(**kwargs)
self.update_encoder = update_encoder
self.disagreement = Disagreement(self.obs_dim, self.action_dim,
self.hidden_dim).to(self.device)
# optimizers
self.disagreement_opt = torch.optim.Adam(
self.disagreement.parameters(), lr=self.lr)
self.disagreement.train()
def update_disagreement(self, obs, action, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
error = self.disagreement(obs, action, next_obs)
loss = error.mean()
self.disagreement_opt.zero_grad(set_to_none=True)
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.disagreement_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['disagreement_loss'] = loss.item()
return metrics
def compute_intr_reward(self, obs, action, next_obs, step) -> Any:
reward = self.disagreement.get_disagreement(obs, action,
next_obs).unsqueeze(1)
return reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, extr_reward, discount, next_obs = batch.to(self.device).unpack()
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
metrics.update(
self.update_disagreement(obs, action, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(obs, action, next_obs,
step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/disagreement.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
from typing import Any, Dict
import copy
import dataclasses
import torch
from torch import nn
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from .ddpg import DDPGAgent, DDPGAgentConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
import url_benchmark.goals as _goals
@dataclasses.dataclass
class RNDAgentConfig(DDPGAgentConfig):
_target_: str = "url_benchmark.agent.rnd.RNDAgent"
name: str = "rnd"
rnd_rep_dim: int = 512
rnd_scale: float = 1.0
update_encoder: bool = omegaconf.II("update_encoder")
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
cs = ConfigStore.instance()
cs.store(group="agent", name="rnd", node=RNDAgentConfig)
class RND(nn.Module):
def __init__(self,
obs_dim,
hidden_dim,
rnd_rep_dim,
encoder,
aug,
obs_shape,
obs_type,
clip_val=5.) -> None:
super().__init__()
self.clip_val = clip_val
self.aug = aug
if obs_type == "pixels":
self.normalize_obs: nn.Module = nn.BatchNorm2d(obs_shape[0], affine=False)
else:
self.normalize_obs = nn.BatchNorm1d(obs_shape[0], affine=False)
self.predictor = nn.Sequential(encoder, nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, rnd_rep_dim))
self.target = nn.Sequential(copy.deepcopy(encoder),
nn.Linear(obs_dim, hidden_dim), nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, rnd_rep_dim))
for param in self.target.parameters():
param.requires_grad = False
self.apply(utils.weight_init)
def forward(self, obs) -> Any:
obs = self.aug(obs)
obs = self.normalize_obs(obs)
obs = torch.clamp(obs, -self.clip_val, self.clip_val)
prediction, target = self.predictor(obs), self.target(obs)
prediction_error = torch.square(target.detach() - prediction).mean(
dim=-1, keepdim=True)
return prediction_error
class RNDAgent(DDPGAgent):
def __init__(self, **kwargs: tp.Any) -> None:
super().__init__(**kwargs)
cfg = RNDAgentConfig(**kwargs)
self.cfg = cfg
goal_dim = self.obs_dim
if self.cfg.goal_space is not None:
goal_dim = _goals.get_goal_space_dim(self.cfg.goal_space)
self.rnd = RND(goal_dim, cfg.hidden_dim, cfg.rnd_rep_dim,
self.encoder, self.aug, (goal_dim, ),
cfg.obs_type).to(self.device)
self.intrinsic_reward_rms = utils.RMS(device=self.device)
# optimizers
self.rnd_opt = torch.optim.Adam(self.rnd.parameters(), lr=self.lr)
self.rnd.train()
# pylint: disable=unused-argument
def update_rnd(self, obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
prediction_error = self.rnd(obs)
loss = prediction_error.mean()
self.rnd_opt.zero_grad(set_to_none=True)
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.rnd_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['rnd_loss'] = loss.item()
return metrics
def compute_intr_reward(self, obs, step) -> Any:
prediction_error = self.rnd(obs)
_, intr_reward_var = self.intrinsic_reward_rms(prediction_error)
reward = self.rnd_scale * prediction_error / (
torch.sqrt(intr_reward_var) + 1e-8)
return reward
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
goal = obs = batch.obs
if self.cfg.goal_space is not None: # type: ignore
assert batch.goal is not None
goal = batch.goal
action = batch.action
extr_reward = batch.reward
discount = batch.discount
next_obs = batch.next_obs
# update RND first
if self.reward_free:
# note: one difference is that the RND module is updated off policy
metrics.update(self.update_rnd(goal, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(goal, step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
metrics['pred_error_mean'] = self.intrinsic_reward_rms.M.item()
metrics['pred_error_std'] = torch.sqrt(self.intrinsic_reward_rms.S).item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/rnd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import dataclasses
from typing import Any, Tuple
from collections import OrderedDict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark import utils
from .fb_modules import mlp
# MetaDict = tp.Mapping[str, tp.Union[np.ndarray, torch.Tensor]]
MetaDict = tp.Mapping[str, np.ndarray]
@dataclasses.dataclass
class DDPGAgentConfig:
_target_: str = "url_benchmark.agent.ddpg.DDPGAgent"
name: str = "ddpg"
reward_free: bool = omegaconf.II("reward_free")
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device")
lr: float = 1e-4
critic_target_tau: float = 0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb")
use_wandb: bool = omegaconf.II("use_wandb")
num_expl_steps: int = omegaconf.MISSING # to be specified later
hidden_dim: int = 1024
feature_dim: int = 50
stddev_schedule: float = 0.2
stddev_clip: float = 0.3
nstep: int = 3
batch_size: int = 1024 # 256 for pixels
init_critic: bool = True
# update_encoder: ${update_encoder} # not in the config
cs = ConfigStore.instance()
cs.store(group="agent", name="ddpg", node=DDPGAgentConfig)
class Encoder(nn.Module):
def __init__(self, obs_shape) -> None:
super().__init__()
assert len(obs_shape) == 3
self.repr_dim = 32 * 35 * 35
self.convnet = nn.Sequential(nn.Conv2d(obs_shape[0], 32, 3, stride=2),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1),
nn.ReLU())
self.apply(utils.weight_init)
def forward(self, obs) -> Any:
obs = obs / 255.0 - 0.5
h = self.convnet(obs)
h = h.view(h.shape[0], -1)
return h
class Actor(nn.Module):
def __init__(self, obs_type, obs_dim, action_dim, feature_dim, hidden_dim) -> None:
super().__init__()
feature_dim = feature_dim if obs_type == 'pixels' else hidden_dim
self.trunk = nn.Sequential(nn.Linear(obs_dim, feature_dim),
nn.LayerNorm(feature_dim), nn.Tanh())
policy_layers = []
policy_layers += [
nn.Linear(feature_dim, hidden_dim),
nn.ReLU(inplace=True)
]
# add additional hidden layer for pixels
if obs_type == 'pixels':
policy_layers += [
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
]
policy_layers += [nn.Linear(hidden_dim, action_dim)]
self.policy = nn.Sequential(*policy_layers)
self.apply(utils.weight_init)
def forward(self, obs, std) -> utils.TruncatedNormal:
h = self.trunk(obs)
mu = self.policy(h)
mu = torch.tanh(mu)
std = torch.ones_like(mu) * std
dist = utils.TruncatedNormal(mu, std)
return dist
class Critic(nn.Module):
def __init__(self, obs_type, obs_dim, action_dim, feature_dim, hidden_dim) -> None:
super().__init__()
self.obs_type = obs_type
if obs_type == 'pixels':
# for pixels actions will be added after trunk
self.trunk = nn.Sequential(nn.Linear(obs_dim, feature_dim),
nn.LayerNorm(feature_dim), nn.Tanh())
trunk_dim = feature_dim + action_dim
else:
# for states actions come in the beginning
self.trunk = nn.Sequential(
nn.Linear(obs_dim + action_dim, hidden_dim),
nn.LayerNorm(hidden_dim), nn.Tanh())
trunk_dim = hidden_dim
def make_q():
q_layers = []
q_layers += [
nn.Linear(trunk_dim, hidden_dim),
nn.ReLU(inplace=True)
]
if obs_type == 'pixels':
q_layers += [
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True)
]
q_layers += [nn.Linear(hidden_dim, 1)]
return nn.Sequential(*q_layers)
self.Q1 = make_q()
self.Q2 = make_q()
self.apply(utils.weight_init)
def forward(self, obs, action) -> Tuple[Any, Any]:
inpt = obs if self.obs_type == 'pixels' else torch.cat([obs, action],
dim=-1)
h = self.trunk(inpt)
h = torch.cat([h, action], dim=-1) if self.obs_type == 'pixels' else h
q1 = self.Q1(h)
q2 = self.Q2(h)
return q1, q2
class DDPGAgent:
# pylint: disable=unused-argument
def __init__(self, meta_dim: int = 0, **kwargs: tp.Any) -> None:
if self.__class__.__name__.startswith(("DIAYN", "APS", "RND", "Proto", "ICMAPT", "MaxEnt")): # HACK
cfg_fields = {field.name for field in dataclasses.fields(DDPGAgentConfig)}
# those have their own config, so lets curate the fields
# others will need to be ported in time
kwargs = {x: y for x, y in kwargs.items() if x in cfg_fields}
cfg = DDPGAgentConfig(**kwargs)
self.cfg = cfg
self.action_dim = cfg.action_shape[0]
self.solved_meta = None
# self.update_encoder = update_encoder # used in subclasses
# models
if cfg.obs_type == 'pixels':
self.aug: tp.Union[utils.RandomShiftsAug, nn.Identity] = utils.RandomShiftsAug(pad=4)
self.encoder: tp.Union[Encoder, nn.Identity] = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim + meta_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0] + meta_dim
self.actor = Actor(cfg.obs_type, self.obs_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim).to(cfg.device)
self.critic: nn.Module = Critic(cfg.obs_type, self.obs_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim).to(cfg.device)
self.critic_target: nn.Module = Critic(cfg.obs_type, self.obs_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim).to(cfg.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=cfg.lr)
self.reward_model: tp.Optional[torch.nn.Module] = None
self.reward_opt: tp.Optional[torch.optim.Adam] = None
if self.reward_free:
self.reward_model = mlp(self.obs_dim, cfg.hidden_dim, "ntanh", cfg.hidden_dim, # type: ignore
"relu", cfg.hidden_dim, "relu", 1).to(cfg.device) # type: ignore
self.reward_opt = torch.optim.Adam(self.reward_model.parameters(), lr=1e-3)
self.train()
self.critic_target.train()
def __getattr__(self, name: str) -> tp.Any:
# LEGACY: allow accessing the config directly as attribute
# to avoid having to rewrite everything at once
# cost: less type safety
if "cfg" in self.__dict__:
return getattr(self.cfg, name)
raise AttributeError
def train(self, training: bool = True) -> None:
self.training = training
self.encoder.train(training)
self.actor.train(training)
self.critic.train(training)
def init_from(self, other) -> None:
# copy parameters over
utils.hard_update_params(other.encoder, self.encoder)
utils.hard_update_params(other.actor, self.actor)
if self.init_critic:
utils.hard_update_params(other.critic.trunk, self.critic.trunk)
def init_meta(self) -> MetaDict:
return OrderedDict()
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
return meta
def act(self, obs, meta, step, eval_mode) -> np.ndarray:
obs = torch.as_tensor(obs, device=self.device).unsqueeze(0)
h = self.encoder(obs)
inputs = [h]
for value in meta.values():
value = torch.as_tensor(value, device=self.device).unsqueeze(0)
inputs.append(value)
inpt = torch.cat(inputs, dim=-1)
#assert obs.shape[-1] == self.obs_shape[-1]
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(inpt, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample(clip=None)
if step < self.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def train_reward(self, replay_loader: ReplayBuffer) -> None:
obs_list, reward_list = [], []
batch_size = 0
num_inference_steps = 10000
while batch_size < num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs = batch.to(self.device).unpack()
del obs, action, discount
obs_list.append(next_obs)
reward_list.append(reward)
batch_size += next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[: num_inference_steps], reward[: num_inference_steps]
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
assert self.reward_model is not None
for i in range(2000):
reward_loss = (self.reward_model(obs) - reward).pow(2).mean()
assert self.reward_opt is not None
self.reward_opt.zero_grad(set_to_none=True)
reward_loss.backward()
self.reward_opt.step()
print(f"iteration: {i}, reward_loss: {reward_loss.item()}")
# compute test loss:
while batch_size < num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs = batch.to(self.device).unpack()
del obs, action, discount
obs_list.append(next_obs)
reward_list.append(reward)
batch_size += next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[: num_inference_steps], reward[: num_inference_steps]
test_loss = (self.reward_model(obs) - reward).pow(2).mean()
print(f"Test Loss: {test_loss.item()}")
@tp.no_type_check # TODO remove
def update_critic(self, obs, action, reward, discount, next_obs, step) -> tp.Dict[str, float]:
metrics = {}
with torch.no_grad():
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(next_obs, stddev)
next_action = dist.sample(clip=self.stddev_clip)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1, target_Q2)
target_Q = reward + (discount * target_V)
Q1, Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
if self.use_tb or self.use_wandb:
metrics['critic_target_q'] = target_Q.mean().item()
metrics['critic_q1'] = Q1.mean().item()
metrics['critic_q2'] = Q2.mean().item()
metrics['critic_loss'] = critic_loss.item()
# optimize critic
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.critic_opt.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
return metrics
@tp.no_type_check # TODO remove
def update_actor(self, obs, step) -> tp.Dict[str, float]:
metrics = {}
stddev = utils.schedule(self.stddev_schedule, step)
dist = self.actor(obs, stddev)
action = dist.sample(clip=self.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
Q1, Q2 = self.critic(obs, action)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.use_tb or self.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def aug_and_encode(self, obs) -> Any:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
#import ipdb; ipdb.set_trace()
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs, *_ = batch.to(self.device).unpack()
if self.reward_free:
del reward
assert self.reward_model is not None
reward = self.reward_model(next_obs)
# augment and encode
obs = self.aug_and_encode(obs)
with torch.no_grad():
next_obs = self.aug_and_encode(next_obs)
if self.use_tb or self.use_wandb:
metrics['batch_reward'] = reward.mean().item()
# update critic
metrics.update(
self.update_critic(obs, action, reward, discount, next_obs, step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/ddpg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
from pathlib import Path
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from .ddpg import MetaDict
from .fb_modules import IdentityMap
from .ddpg import Encoder
from .fb_modules import Actor, DiagGaussianActor, ForwardMap, BackwardMap, mlp, OnlineCov
from dm_env import specs
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class SFSVDAgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.sf_svd.SFSVDAgent"
name: str = "sf_svd"
# reward_free: ${reward_free}
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
lr_coef: float = 5
sf_target_tau: float = 0.01 # 0.001-0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING # ??? # to be specified later
num_inference_steps: int = 5120
hidden_dim: int = 1024 # 128, 2048
backward_hidden_dim: int = 512 # 128, 2048
feature_dim: int = 512 # 128, 1024
z_dim: int = 100 # 30-200
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" # 0, 0.1, 0.2
stddev_clip: float = 0.3 # 1
update_z_every_step: int = 100
nstep: int = 1
batch_size: int = 1024
init_sf: bool = True
update_encoder: bool = omegaconf.II("update_encoder") # ${update_encoder}
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
# ortho_coef: float = 0.1 # 0.01-10
log_std_bounds: tp.Tuple[float, float] = (-5, 2) # param for DiagGaussianActor
temp: float = 1 # temperature for DiagGaussianActor
boltzmann: bool = False # set to true for DiagGaussianActor
debug: bool = False
preprocess: bool = True
num_sf_updates: int = 1
feature_learner: str = "p"
mix_ratio: float = 0.0
q_loss: bool = True
update_cov_every_step: int = 1000
add_trunk: bool = False
cs = ConfigStore.instance()
cs.store(group="agent", name="sf_svd", node=SFSVDAgentConfig)
class SVDLearner(nn.Module):
def __init__(self, obs_dim, action_dim, z_dim, hidden_dim) -> None:
super().__init__()
self.feature_net = mlp(obs_dim + action_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim, "L2")
self.mu_net = mlp(obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", z_dim)
self.apply(utils.weight_init)
def forward(self, obs: torch.Tensor, action: torch.Tensor, next_obs: torch.Tensor, future_obs: torch.Tensor):
del future_obs
phi = self.feature_net(torch.cat([obs, action], dim=1))
mu = self.mu_net(next_obs)
P = torch.einsum("sd, td -> st", phi, mu)
I = torch.eye(*P.size(), device=P.device)
off_diag = ~I.bool()
loss = - 2 * P.diag().mean() + P[off_diag].pow(2).mean()
# orthonormality loss
Cov = torch.matmul(phi, phi.T)
I = torch.eye(*Cov.size(), device=Cov.device)
off_diag = ~I.bool()
orth_loss_diag = - 2 * Cov.diag().mean()
orth_loss_offdiag = Cov[off_diag].pow(2).mean()
orth_loss = orth_loss_offdiag + orth_loss_diag
loss += orth_loss
return loss
class SFSVDAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = SFSVDAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
goal_dim = len(g)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
# create the network
if self.cfg.boltzmann:
self.actor: nn.Module = DiagGaussianActor(cfg.obs_type, self.obs_dim, cfg.z_dim, self.action_dim,
cfg.hidden_dim, cfg.log_std_bounds).to(cfg.device)
else:
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=cfg.add_trunk).to(cfg.device)
self.successor_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=cfg.add_trunk).to(cfg.device)
# build up the target network
self.successor_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=cfg.add_trunk).to(cfg.device)
self.feature_learner = SVDLearner(goal_dim, self.action_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# if cfg.debug:
# self.feature_learner: nn.Module = IdentityMap().to(cfg.device)
# self.feature_net = BackwardMap(cfg.obs_type, goal_dim, cfg.z_dim, cfg.backward_hidden_dim).to(cfg.device)
# load the weights into the target networks
self.successor_target_net.load_state_dict(self.successor_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.sf_opt = torch.optim.Adam(self.successor_net.parameters(), lr=cfg.lr)
self.phi_opt: tp.Optional[torch.optim.Adam] = None
self.phi_opt = torch.optim.Adam(self.feature_learner.parameters(), lr=cfg.lr_coef * cfg.lr)
self.train()
self.successor_target_net.train()
self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
# self.online_cov = OnlineCov(mom=0.99, dim=self.cfg.z_dim).to(self.cfg.device)
# self.online_cov.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.successor_net]:
net.train(training)
if self.phi_opt is not None:
self.feature_learner.train()
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder", "actor"]
if self.cfg.init_sf:
names += ["successor_net", "feature_learner", "successor_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def precompute_cov(self, replay_loader: ReplayBuffer) -> None:
print("computing Cov of phi to be used at inference")
obs_list = []
action_list = []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = batch.goal if self.cfg.goal_space is not None else batch.obs
if obs is None:
raise ValueError("Obs should never be None")
obs_list.append(obs)
action_list.append(batch.action)
batch_size += batch.next_obs.size(0)
obs = torch.cat(obs_list, 0)
action = torch.cat(action_list, 0)
self.inv_cov = self._compute_cov(torch.cat([obs, action], dim=1))
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# cov = torch.matmul(phi.T, phi) / phi.shape[0]
# self.inv_cov = torch.linalg.pinv(cov)
def _compute_cov(self, goal: torch.Tensor) -> torch.Tensor:
# compute inverse of cov of phi
with torch.no_grad():
phi = self.feature_learner.feature_net(goal)
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.inverse(cov)
return inv_cov
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
# assert self.cfg.feature_learner in ["FB"]
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
dummy_action = torch.zeros((1, self.action_dim), dtype=torch.float32).to(self.cfg.device)
with torch.no_grad():
z = self.feature_learner.feature_net(torch.cat([desired_goal, dummy_action], dim=1))
z = torch.matmul(z, self.inv_cov) # 1 x z_dim
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list, action_list = [], [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.goal if self.cfg.goal_space is not None else batch.obs)
action_list.append(batch.action)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward, action = torch.cat(obs_list, 0), torch.cat(reward_list, 0), torch.cat(action_list, 0) # type: ignore
obs, reward, action = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps], action[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_action_and_rewards(obs, action, reward)
def infer_meta_from_obs_action_and_rewards(self, obs: torch.Tensor, action: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
with torch.no_grad():
phi = self.feature_learner.feature_net(torch.cat([obs, action], dim=1))
z = torch.linalg.lstsq(phi, reward).solution # z_dim x 1
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=0) # be careful to the dimension
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def sample_z(self, size):
gaussian_rdv = torch.randn((size, self.cfg.z_dim), dtype=torch.float32)
z = math.sqrt(self.cfg.z_dim) * F.normalize(gaussian_rdv, dim=1)
return z
def init_meta(self) -> MetaDict:
if self.solved_meta is not None:
print('solved_meta')
return self.solved_meta
else:
z = self.sample_z(1)
z = z.squeeze().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
if self.cfg.boltzmann:
dist = self.actor(h, z)
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def update_sf(
self,
obs: torch.Tensor,
goal: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
future_goal: tp.Optional[torch.Tensor],
z: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# compute target successor measure
with torch.no_grad():
if self.cfg.boltzmann:
dist = self.actor(next_obs, z)
next_action = dist.sample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
next_F1, next_F2 = self.successor_target_net(next_obs, z, next_action) # batch x z_dim
target_phi = self.feature_learner.feature_net(torch.cat([goal, action], dim=1)).detach() # batch x z_dim
next_Q1, next_Q2 = [torch.einsum('sd, sd -> s', next_Fi, z) for next_Fi in [next_F1, next_F2]]
next_F = torch.where((next_Q1 < next_Q2).reshape(-1, 1), next_F1, next_F2)
target_F = target_phi + discount * next_F
F1, F2 = self.successor_net(obs, z, action)
if not self.cfg.q_loss:
# compute SF loss
sf_loss = F.mse_loss(F1, target_F) + F.mse_loss(F2, target_F)
else:
# alternative loss
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
target_Q = torch.einsum('sd, sd -> s', target_F, z)
sf_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
sf_loss /= self.cfg.z_dim
# compute feature loss
phi_loss = self.feature_learner(obs=goal, action=action, next_obs=next_goal, future_obs=future_goal)
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['target_F'] = target_F.mean().item()
metrics['F1'] = F1.mean().item()
metrics['phi'] = target_phi.mean().item()
metrics['phi_norm'] = torch.norm(target_phi, dim=-1).mean().item()
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['sf_loss'] = sf_loss.item()
if phi_loss is not None:
metrics['phi_loss'] = phi_loss.item()
if isinstance(self.sf_opt, torch.optim.Adam):
metrics["sf_opt_lr"] = self.sf_opt.param_groups[0]["lr"]
# if self.cfg.goal_space in ["simplified_walker", "simplified_quadruped"]:
# metrics['max_velocity'] = goal_prime[:, -1].max().item()
# optimize SF
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.sf_opt.zero_grad(set_to_none=True)
sf_loss.backward()
self.sf_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
# optimise phi
if self.phi_opt is not None:
self.phi_opt.zero_grad(set_to_none=True)
phi_loss.backward()
self.phi_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, z: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if self.cfg.boltzmann:
dist = self.actor(obs, z)
action = dist.rsample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.successor_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
Q = torch.min(Q1, Q2)
actor_loss = (self.cfg.temp * log_prob - Q).mean() if self.cfg.boltzmann else -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
# metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def aug_and_encode(self, obs: torch.Tensor) -> torch.Tensor:
obs = self.aug(obs)
return self.encoder(obs)
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
goal_list = []
for _ in range(self.cfg.num_sf_updates):
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs = goal = batch.obs
action = batch.action
discount = batch.discount
next_obs = next_goal = batch.next_obs
future_goal = batch.future_obs
if self.cfg.goal_space:
assert batch.goal is not None
assert batch.next_goal is not None
goal = batch.goal
next_goal = batch.next_goal
future_goal = batch.future_goal
goal_list.append(next_goal)
z = self.sample_z(self.cfg.batch_size).to(self.cfg.device)
if not z.shape[-1] == self.cfg.z_dim:
raise RuntimeError("There's something wrong with the logic here")
if self.cfg.mix_ratio > 0:
perm = torch.randperm(self.cfg.batch_size)
desired_goal = next_goal[perm]
dummy_action = torch.zeros((desired_goal.shape[0], self.action_dim), dtype=torch.float32).to(self.cfg.device)
with torch.no_grad():
phi = self.feature_learner.feature_net(torch.cat([desired_goal, dummy_action], dim=1))
# compute inverse of cov of phi
cov = torch.matmul(phi.T, phi) / phi.shape[0]
inv_cov = torch.inverse(cov)
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
with torch.no_grad():
new_z = phi[mix_idxs]
new_z = torch.matmul(new_z, inv_cov) # batch_size x z_dim
new_z = math.sqrt(self.cfg.z_dim) * F.normalize(new_z, dim=1)
z[mix_idxs] = new_z
metrics.update(self.update_sf(obs=obs, goal=goal, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, future_goal=future_goal,
z=z, step=step))
# update actor
metrics.update(self.update_actor(obs, z, step))
# update critic target
utils.soft_update_params(self.successor_net, self.successor_target_net,
self.cfg.sf_target_tau)
# update inv cov
# if step % self.cfg.update_cov_every_step == 0:
# logger.info("update online cov")
# obs_list = list()
# batch_size = 0
# while batch_size < 10000:
# batch = next(replay_loader)
# batch = batch.to(self.cfg.device)
# obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
# batch_size += batch.next_obs.size(0)
# obs = torch.cat(obs_list, 0)
# with torch.no_grad():
# phi = self.feature_learner.feature_net(obs)
# self.inv_cov = torch.inverse(self.online_cov(phi))
return metrics
|
controllable_agent-main
|
url_benchmark/agent/sf_svd.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import typing as tp
from collections import OrderedDict
import dataclasses
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark.dmc import TimeStep
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark import utils
from .fb_modules import mlp, Actor
from url_benchmark import goals as _goals
from pathlib import Path
logger = logging.getLogger(__name__)
# MetaDict = tp.Mapping[str, tp.Union[np.ndarray, torch.Tensor]]
MetaDict = tp.Mapping[str, np.ndarray]
@dataclasses.dataclass
class GoalTD3AgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.goal_td3.GoalTD3Agent"
name: str = "goal_td3"
reward_free: bool = omegaconf.II("reward_free")
custom_reward: tp.Optional[str] = omegaconf.II("custom_reward")
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
critic_target_tau: float = 0.01
update_every_steps: float = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING
hidden_dim: int = 1024
feature_dim: int = 512
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)"
stddev_clip: float = 0.3 # 1.0
nstep: int = 1
batch_size: int = 1024 # 256 for pixels
init_critic: bool = True
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
fb_reward: bool = False
future_ratio: float = 0
preprocess: bool = False
add_trunk: bool = False
supervised: bool = True
cs = ConfigStore.instance()
cs.store(group="agent", name="goal_td3", node=GoalTD3AgentConfig)
class Critic(nn.Module):
""" forward representation class"""
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.action_dim = action_dim
self.z_dim = z_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_action_net = mlp(self.obs_dim + self.action_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim + self.action_dim, hidden_dim, "ntanh")
feature_dim = hidden_dim
seq = [feature_dim, hidden_dim, "irelu", self.z_dim]
self.F1 = mlp(*seq)
self.F2 = mlp(*seq)
self.apply(utils.weight_init)
def forward(self, obs, z, action):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs_action = self.obs_action_net(torch.cat([obs, action], dim=-1))
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
h = torch.cat([obs_action, obs_z], dim=-1)
else:
h = torch.cat([obs, z, action], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
F1 = self.F1(h)
F2 = self.F2(h)
return F1, F2
class GoalTD3Agent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = GoalTD3AgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
self.goal_dim = 0
if cfg.goal_space is not None:
if cfg.goal_space == "quad_pos_speed":
self.goal_dim = 7 # ugly hack
else:
g = next(iter(_goals.goals.funcs[cfg.goal_space].values()))()
assert len(g.shape) == 1
self.goal_dim = len(g)
if self.cfg.fb_reward:
# FB
pt = Path("/checkpoint/atouati/ca/2022-09-09_proto_maze/results/fb_ddpg_5e-05/9/models/snapshot_1000000.pt")
# pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_sweep/2022.08.03/"
# "161531_fb_ddpg_point_mass_maze_reach_top_right_offline/1/models/snapshot_1000000.pt")
# Contr
# pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_paper/"
# "2022.08.22_point_mass_maze_reach_top_right/100239_sf_contrastive/0/models/snapshot_1000000.pt")
# Lap
# pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_paper/"
# "2022.08.23_point_mass_maze_reach_top_right/072210_sf_lap/1/models/snapshot_1000000.pt")
# pt = Path("/private/home/atouati/controllable_agent/url_benchmark/exp_paper/"
# "2022.08.25_point_mass_maze_reach_top_right/161812_new_aps/0/models/snapshot_2000000.pt")
print(f"loading {pt.resolve()}")
with pt.open("rb") as f:
payload = torch.load(f)
fb_agent = payload["agent"]
if hasattr(fb_agent, "feature_learner"):
self.feature_net = fb_agent.feature_learner.feature_net
else:
self.feature_net = fb_agent.backward_net
self.feature_net.eval()
self.goal_dim = fb_agent.cfg.z_dim
if "replay_loader" in payload.keys():
self.precompute_cov(payload["replay_loader"])
self.actor = Actor(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic: nn.Module = Critic(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic_target: nn.Module = Critic(self.obs_dim, self.goal_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.critic_target.load_state_dict(self.critic.state_dict())
# optimizers
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=cfg.lr)
self.reward_model: tp.Optional[torch.nn.Module] = None
self.reward_opt: tp.Optional[torch.optim.Adam] = None
if cfg.reward_free:
self.reward_model = mlp(self.obs_dim, cfg.hidden_dim, "ntanh", cfg.hidden_dim, # type: ignore
"relu", cfg.hidden_dim, "relu", 1).to(cfg.device) # type: ignore
self.reward_opt = torch.optim.Adam(self.reward_model.parameters(), lr=1e-3)
self.train()
self.critic_target.train()
def train(self, training: bool = True) -> None:
self.training = training
self.actor.train(training)
self.critic.train(training)
def precompute_cov(self, replay_loader: ReplayBuffer) -> None:
if not self.cfg.fb_reward:
return None
logger.info("computing Cov of phi to be used at inference")
obs_list: tp.List[torch.Tensor] = []
batch_size = 0
while batch_size < 100000:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs) # type: ignore
batch_size += batch.next_obs.size(0)
obs = torch.cat(obs_list, 0)
with torch.no_grad():
phi = self.feature_net(obs)
cov = torch.matmul(phi.T, phi) / phi.shape[0]
self.inv_cov = torch.linalg.pinv(cov)
def init_from(self, other) -> None:
# copy parameters over
utils.hard_update_params(other.actor, self.actor)
utils.hard_update_params(other.critic, self.critic)
def init_meta(self, custom_reward: tp.Optional[_goals.BaseReward] = None) -> MetaDict:
if isinstance(custom_reward, _goals.MazeMultiGoal):
idx = np.random.choice(len(custom_reward.goals))
desired_goal = custom_reward.goals[idx]
meta = OrderedDict()
meta["g"] = desired_goal
return meta
else:
return OrderedDict()
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
return meta
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
meta = OrderedDict()
meta['g'] = goal_array
return meta
def act(self, obs, meta, step, eval_mode) -> np.ndarray:
device = torch.device(self.cfg.device)
obs = torch.as_tensor(obs, device=device).unsqueeze(0)
goals = []
for value in meta.values():
value = torch.as_tensor(value, device=device).unsqueeze(0)
if self.cfg.fb_reward:
with torch.no_grad():
goals.append(torch.matmul(self.feature_net(value), self.inv_cov))
else:
goals.append(value)
goal = torch.cat(goals, dim=-1)
#assert obs.shape[-1] == self.obs_shape[-1]
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, goal, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample(clip=None)
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def train_reward(self, replay_loader: ReplayBuffer) -> None:
obs_list, reward_list = [], []
batch_size = 0
num_inference_steps = 10000
while batch_size < num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs = batch.to(self.cfg.device).unpack()
del obs, action, discount
obs_list.append(next_obs)
reward_list.append(reward)
batch_size += next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[: num_inference_steps], reward[: num_inference_steps]
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
assert self.reward_model is not None
for i in range(2000):
reward_loss = (self.reward_model(obs) - reward).pow(2).mean()
assert self.reward_opt is not None
self.reward_opt.zero_grad(set_to_none=True)
reward_loss.backward()
self.reward_opt.step()
print(f"iteration: {i}, reward_loss: {reward_loss.item()}")
# compute test loss:
while batch_size < num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, reward, discount, next_obs = batch.to(self.cfg.device).unpack()
del obs, action, discount
obs_list.append(next_obs)
reward_list.append(reward)
batch_size += next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[: num_inference_steps], reward[: num_inference_steps]
test_loss = (self.reward_model(obs) - reward).pow(2).mean()
print(f"Test Loss: {test_loss.item()}")
@tp.no_type_check # TODO remove
def update_critic(self,
obs: torch.Tensor,
goal: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
step: int) -> tp.Dict[str, float]:
metrics = {}
with torch.no_grad():
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, goal, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
target_Q1, target_Q2 = self.critic_target(next_obs, goal, next_action)
target_V = torch.min(target_Q1, target_Q2)
target_Q = reward + (discount * target_V)
Q1, Q2 = self.critic(obs, goal, action)
critic_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['critic_target_q'] = target_Q.mean().item()
metrics['critic_q1'] = Q1.mean().item()
metrics['critic_q2'] = Q2.mean().item()
metrics['critic_loss'] = critic_loss.item()
metrics['stdev'] = stddev
# optimize critic
self.critic_opt.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_opt.step()
return metrics
@tp.no_type_check # TODO remove
def update_actor(self,
obs: torch.Tensor,
goal: torch.Tensor,
step: int) -> tp.Dict[str, float]:
metrics = {}
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, goal, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
Q1, Q2 = self.critic(obs, goal, action)
Q = torch.min(Q1, Q2)
actor_loss = -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['actor_logprob'] = log_prob.mean().item()
metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int,
custom_reward: tp.Optional[_goals.BaseReward] = None) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
#import ipdb; ipdb.set_trace()
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
achieved_goal = batch.next_goal
future_goal = batch.future_obs
if self.cfg.goal_space:
future_goal = batch.future_goal
obs = batch.obs
action = batch.action
discount = batch.discount
reward = batch.reward
next_obs = batch.next_obs
if self.cfg.reward_free:
del reward
assert self.reward_model is not None
reward = self.reward_model(next_obs)
desired_goal: torch.Tensor = torch.tensor([], dtype=torch.float32, device=self.cfg.device)
device = torch.device(self.cfg.device)
if isinstance(custom_reward, _goals.MazeMultiGoal):
del reward
if self.cfg.supervised:
# sample uniform goal
idx = np.random.choice(len(custom_reward.goals), size=self.cfg.batch_size)
desired_goal = custom_reward.goals[idx]
# convert to tensor
desired_goal = torch.as_tensor(desired_goal, device=device)
else:
# sample goal from replay
new_batch = replay_loader.sample(self.cfg.batch_size)
new_batch = new_batch.to(self.cfg.device)
desired_goal = new_batch.next_goal # type: ignore
# perm = torch.randperm(self.cfg.batch_size)
# desired_goal = achieved_goal[perm]
if self.cfg.future_ratio > 0:
assert future_goal is not None
future_idxs = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.future_ratio)[0]
desired_goal[future_idxs] = future_goal[future_idxs] # type: ignore
if self.cfg.fb_reward:
# reward = (self.feature_net(achieved_goals) *
# torch.matmul(self.feature_net(desired_goals), self.inv_cov)).sum(dim=1, keepdims=True)
with torch.no_grad():
desired_goal = torch.matmul(self.feature_net(desired_goal), self.inv_cov)
reward = (self.feature_net(achieved_goal) * desired_goal).sum(dim=1, keepdims=True)
else:
reward, _ = custom_reward.from_goal(achieved_goal.cpu().numpy(), desired_goal.cpu().numpy()) # type: ignore
reward = torch.as_tensor(reward, device=device).unsqueeze(1) # type: ignore
# # augment obs
# obs = torch.cat([obs, desired_goal], dim=-1)
# next_obs = torch.cat([next_obs, desired_goal], dim=-1)
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['batch_reward'] = reward.mean().item()
# update critic
metrics.update(
self.update_critic(obs=obs, goal=desired_goal, action=action, reward=reward,
discount=discount, next_obs=next_obs, step=step))
# update actor
metrics.update(self.update_actor(obs=obs, goal=desired_goal, step=step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.cfg.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/goal_td3.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
import dataclasses
from copy import deepcopy
import torch
from torch import nn
import torch.nn.functional as F
from torch import distributions as pyd
from torch import jit
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from .ddpg import DDPGAgent
from .ddpg import DDPGAgentConfig as _BaseConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from typing import Any, Dict, TypeVar
_T = TypeVar('_T')
@jit.script
def sinkhorn_knopp(Q):
Q -= Q.max()
Q = torch.exp(Q).T
Q /= Q.sum()
r = torch.ones(Q.shape[0], device=Q.device) / Q.shape[0]
c = torch.ones(Q.shape[1], device=Q.device) / Q.shape[1]
for _ in range(3):
u = Q.sum(dim=1)
u = r / u
Q *= u.unsqueeze(dim=1)
Q *= (c / Q.sum(dim=0)).unsqueeze(dim=0)
Q = Q / Q.sum(dim=0, keepdim=True)
return Q.T
class Projector(nn.Module):
def __init__(self, pred_dim, proj_dim) -> None:
super().__init__()
self.trunk = nn.Sequential(nn.Linear(pred_dim, proj_dim), nn.ReLU(),
nn.Linear(proj_dim, pred_dim))
self.apply(utils.weight_init)
def forward(self, x) -> Any:
return self.trunk(x)
@dataclasses.dataclass
class ProtoAgentConfig(_BaseConfig):
_target_: str = "url_benchmark.agent.proto.ProtoAgent"
name: str = "proto"
update_encoder: bool = omegaconf.II("update_encoder")
pred_dim: int = 128
proj_dim: int = 512
num_protos: int = 512
tau: float = 0.1
topk: int = 3
queue_size: int = 2048
encoder_target_tau: float = 0.05
cs = ConfigStore.instance()
cs.store(group="agent", name="proto", node=ProtoAgentConfig)
class ProtoAgent(DDPGAgent):
def __init__(self, **kwargs) -> None:
cfg = ProtoAgentConfig(**kwargs)
super().__init__(**kwargs)
self.cfg = cfg # override base ddpg cfg type
# models
self.encoder_target = deepcopy(self.encoder)
self.predictor = nn.Linear(self.obs_dim, cfg.pred_dim).to(self.device)
self.predictor.apply(utils.weight_init)
self.predictor_target = deepcopy(self.predictor)
self.projector = Projector(cfg.pred_dim, cfg.proj_dim).to(self.device)
self.projector.apply(utils.weight_init)
# prototypes
self.protos = nn.Linear(cfg.pred_dim, cfg.num_protos,
bias=False).to(self.device)
self.protos.apply(utils.weight_init)
# candidate queue
self.queue = torch.zeros(cfg.queue_size, cfg.pred_dim, device=self.device)
self.queue_ptr = 0
# optimizers
self.proto_opt = torch.optim.Adam(utils.chain(
self.encoder.parameters(), self.predictor.parameters(),
self.projector.parameters(), self.protos.parameters()),
lr=self.lr)
self.predictor.train()
self.projector.train()
self.protos.train()
def init_from(self, other) -> None:
# copy parameters over
utils.hard_update_params(other.encoder, self.encoder)
utils.hard_update_params(other.actor, self.actor)
utils.hard_update_params(other.predictor, self.predictor)
utils.hard_update_params(other.projector, self.projector)
utils.hard_update_params(other.protos, self.protos)
if self.init_critic:
utils.hard_update_params(other.critic, self.critic)
def normalize_protos(self) -> None:
C = self.protos.weight.data.clone()
C = F.normalize(C, dim=1, p=2)
self.protos.weight.data.copy_(C)
# pylint: disable=unused-argument
def compute_intr_reward(self, obs, step) -> Any:
self.normalize_protos()
# find a candidate for each prototype
with torch.no_grad():
z = self.encoder(obs)
z = self.predictor(z)
z = F.normalize(z, dim=1, p=2)
scores = self.protos(z).T
prob = F.softmax(scores, dim=1)
candidates = pyd.Categorical(prob).sample()
# enqueue candidates
ptr = self.queue_ptr
self.queue[ptr:ptr + self.num_protos] = z[candidates]
self.queue_ptr = (ptr + self.num_protos) % self.queue.shape[0]
# compute distances between the batch and the queue of candidates
z_to_q = torch.norm(z[:, None, :] - self.queue[None, :, :], dim=2, p=2)
all_dists, _ = torch.topk(z_to_q, self.topk, dim=1, largest=False)
dist = all_dists[:, -1:]
reward = dist
return reward
# pylint: disable=unused-argument
def update_proto(self, obs, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
# normalize prototypes
self.normalize_protos()
# online network
s = self.encoder(obs)
s = self.predictor(s)
s = self.projector(s)
s = F.normalize(s, dim=1, p=2)
scores_s = self.protos(s)
log_p_s = F.log_softmax(scores_s / self.tau, dim=1)
# target network
with torch.no_grad():
t = self.encoder_target(next_obs)
t = self.predictor_target(t)
t = F.normalize(t, dim=1, p=2)
scores_t = self.protos(t)
q_t = sinkhorn_knopp(scores_t / self.tau)
# loss
loss = -(q_t * log_p_s).sum(dim=1).mean()
if self.use_tb or self.use_wandb:
metrics['repr_loss'] = loss.item()
self.proto_opt.zero_grad(set_to_none=True)
loss.backward()
self.proto_opt.step()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
obs, action, extr_reward, discount, next_obs = batch.to(self.device).unpack()
# augment and encode
with torch.no_grad():
obs = self.aug(obs)
next_obs = self.aug(next_obs)
if self.reward_free:
metrics.update(self.update_proto(obs, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(next_obs, step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
obs = self.encoder(obs)
next_obs = self.encoder(next_obs)
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.encoder, self.encoder_target,
self.encoder_target_tau)
utils.soft_update_params(self.predictor, self.predictor_target,
self.encoder_target_tau)
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/proto.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pdb # pylint: disable=unused-import
import math
import typing as tp
import torch
from torch import nn
import torch.nn.functional as F
from url_benchmark import utils
class OnlineCov(nn.Module):
def __init__(self, mom: float, dim: int) -> None:
super().__init__()
self.mom = mom # momentum
self.count = torch.nn.Parameter(torch.LongTensor([0]), requires_grad=False)
self.cov: tp.Any = torch.nn.Parameter(torch.zeros((dim, dim), dtype=torch.float32), requires_grad=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.training:
self.count += 1 # type: ignore
self.cov.data *= self.mom
self.cov.data += (1 - self.mom) * torch.matmul(x.T, x) / x.shape[0]
count = self.count.item()
cov = self.cov / (1 - self.mom**count)
return cov
class _L2(nn.Module):
def __init__(self, dim) -> None:
super().__init__()
self.dim = dim
def forward(self, x):
y = math.sqrt(self.dim) * F.normalize(x, dim=1)
return y
def _nl(name: str, dim: int) -> tp.List[nn.Module]:
"""Returns a non-linearity given name and dimension"""
if name == "irelu":
return [nn.ReLU(inplace=True)]
if name == "relu":
return [nn.ReLU()]
if name == "ntanh":
return [nn.LayerNorm(dim), nn.Tanh()]
if name == "layernorm":
return [nn.LayerNorm(dim)]
if name == "tanh":
return [nn.Tanh()]
if name == "L2":
return [_L2(dim)]
raise ValueError(f"Unknown non-linearity {name}")
def mlp(*layers: tp.Sequence[tp.Union[int, str]]) -> nn.Sequential:
"""Provides a sequence of linear layers and non-linearities
providing a sequence of dimension for the neurons, or name of
the non-linearities
Eg: mlp(10, 12, "relu", 15) returns:
Sequential(Linear(10, 12), ReLU(), Linear(12, 15))
"""
assert len(layers) >= 2
sequence: tp.List[nn.Module] = []
assert isinstance(layers[0], int), "First input must provide the dimension"
prev_dim: int = layers[0]
for layer in layers[1:]:
if isinstance(layer, str):
sequence.extend(_nl(layer, prev_dim))
else:
assert isinstance(layer, int)
sequence.append(nn.Linear(prev_dim, layer))
prev_dim = layer
return nn.Sequential(*sequence)
class Actor(nn.Module):
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.action_dim = action_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_net = mlp(self.obs_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh",
hidden_dim, "irelu",
hidden_dim, "irelu")
feature_dim = hidden_dim
self.policy = mlp(feature_dim, hidden_dim, "irelu", self.action_dim)
self.apply(utils.weight_init)
# initialize the last layer by zero
# self.policy[-1].weight.data.fill_(0.0)
def forward(self, obs, z, std):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
obs = self.obs_net(obs)
h = torch.cat([obs, obs_z], dim=-1)
else:
h = torch.cat([obs, z], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
mu = self.policy(h)
mu = torch.tanh(mu)
std = torch.ones_like(mu) * std
dist = utils.TruncatedNormal(mu, std)
return dist
class DiagGaussianActor(nn.Module):
def __init__(self, obs_dim, z_dim, action_dim, hidden_dim, log_std_bounds,
preprocess=False) -> None:
super().__init__()
self.z_dim = z_dim
self.log_std_bounds = log_std_bounds
self.preprocess = preprocess
feature_dim = obs_dim + z_dim
self.policy = mlp(feature_dim, hidden_dim, "ntanh", hidden_dim, "relu", 2 * action_dim)
self.apply(utils.weight_init)
def forward(self, obs, z):
assert z.shape[-1] == self.z_dim
h = torch.cat([obs, z], dim=-1)
mu, log_std = self.policy(h).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std + 1)
std = log_std.exp()
dist = utils.SquashedNormal(mu, std)
return dist
class ForwardMap(nn.Module):
""" forward representation class"""
def __init__(self, obs_dim, z_dim, action_dim, feature_dim, hidden_dim,
preprocess=False, add_trunk=True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.action_dim = action_dim
self.preprocess = preprocess
if self.preprocess:
self.obs_action_net = mlp(self.obs_dim + self.action_dim, hidden_dim, "ntanh", feature_dim, "irelu")
self.obs_z_net = mlp(self.obs_dim + self.z_dim, hidden_dim, "ntanh", feature_dim, "irelu")
if not add_trunk:
self.trunk: nn.Module = nn.Identity()
feature_dim = 2 * feature_dim
else:
self.trunk = mlp(2 * feature_dim, hidden_dim, "irelu")
feature_dim = hidden_dim
else:
self.trunk = mlp(self.obs_dim + self.z_dim + self.action_dim, hidden_dim, "ntanh",
hidden_dim, "irelu",
hidden_dim, "irelu")
feature_dim = hidden_dim
seq = [feature_dim, hidden_dim, "irelu", self.z_dim]
self.F1 = mlp(*seq)
self.F2 = mlp(*seq)
self.apply(utils.weight_init)
def forward(self, obs, z, action):
assert z.shape[-1] == self.z_dim
if self.preprocess:
obs_action = self.obs_action_net(torch.cat([obs, action], dim=-1))
obs_z = self.obs_z_net(torch.cat([obs, z], dim=-1))
h = torch.cat([obs_action, obs_z], dim=-1)
else:
h = torch.cat([obs, z, action], dim=-1)
if hasattr(self, "trunk"):
h = self.trunk(h)
F1 = self.F1(h)
F2 = self.F2(h)
return F1, F2
class IdentityMap(nn.Module):
def __init__(self) -> None:
super().__init__()
self.B = nn.Identity()
def forward(self, obs):
return self.B(obs)
class BackwardMap(nn.Module):
""" backward representation class"""
def __init__(self, obs_dim, z_dim, hidden_dim, norm_z: bool = True) -> None:
super().__init__()
self.obs_dim = obs_dim
self.z_dim = z_dim
self.norm_z = norm_z
self.B = mlp(self.obs_dim, hidden_dim, "ntanh", hidden_dim, "relu", self.z_dim)
self.apply(utils.weight_init)
def forward(self, obs):
if not hasattr(self, "norm_z"): # backward compatiblity
self.norm_z = True
B = self.B(obs)
if self.norm_z:
B = math.sqrt(self.z_dim) * F.normalize(B, dim=1)
return B
class MultinputNet(nn.Module):
"""Network with multiple inputs"""
def __init__(self, input_dims: tp.Sequence[int], sequence_dims: tp.Sequence[int]) -> None:
super().__init__()
input_dims = list(input_dims)
sequence_dims = list(sequence_dims)
dim0 = sequence_dims[0]
self.innets = nn.ModuleList([mlp(indim, dim0, "relu", dim0, "layernorm") for indim in input_dims]) # type: ignore
sequence: tp.List[tp.Union[str, int]] = [dim0]
for dim in sequence_dims[1:]:
sequence.extend(["relu", dim])
self.outnet = mlp(*sequence) # type: ignore
def forward(self, *tensors: torch.Tensor) -> torch.Tensor:
assert len(tensors) == len(self.innets)
out = sum(net(x) for net, x in zip(self.innets, tensors)) / len(self.innets)
return self.outnet(out) # type : ignore
|
controllable_agent-main
|
url_benchmark/agent/fb_modules.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import typing as tp
from typing import Any, Dict, Tuple
import math
from collections import OrderedDict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from url_benchmark import utils
from url_benchmark.dmc import TimeStep
from .ddpg import DDPGAgent, MetaDict, DDPGAgentConfig
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
@dataclasses.dataclass
class DIAYNAgentConfig(DDPGAgentConfig):
_target_: str = "url_benchmark.agent.diayn.DIAYNAgent"
name: str = "diayn"
update_encoder: bool = omegaconf.II("update_encoder")
skill_dim: int = 16
diayn_scale: float = 1.0
update_skill_every_step: int = 50
cs = ConfigStore.instance()
cs.store(group="agent", name="diayn", node=DIAYNAgentConfig)
class DIAYN(nn.Module):
def __init__(self, obs_dim: int, skill_dim: int, hidden_dim: int) -> None:
super().__init__()
self.skill_pred_net = nn.Sequential(nn.Linear(obs_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, skill_dim))
self.apply(utils.weight_init)
def forward(self, obs) -> Any:
skill_pred = self.skill_pred_net(obs)
return skill_pred
class DIAYNAgent(DDPGAgent):
def __init__(self, **kwargs) -> None:
cfg = DIAYNAgentConfig(**kwargs)
# create actor and critic
# increase obs shape to include skill dim (through meta_dim)
super().__init__(**kwargs, meta_dim=cfg.skill_dim)
self.cfg = cfg # override base ddpg cfg type
# create diayn
self.diayn = DIAYN(self.obs_dim - self.skill_dim, self.skill_dim,
kwargs['hidden_dim']).to(kwargs['device'])
# loss criterion
self.diayn_criterion = nn.CrossEntropyLoss()
# optimizers
self.diayn_opt = torch.optim.Adam(self.diayn.parameters(), lr=self.lr)
self.diayn.train()
def init_meta(self) -> tp.Dict[str, np.ndarray]:
skill = np.zeros(self.skill_dim, dtype=np.float32)
skill[np.random.choice(self.skill_dim)] = 1.0
meta = OrderedDict()
meta['skill'] = skill
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.update_skill_every_step == 0:
return self.init_meta()
return meta
def update_diayn(self, skill, next_obs, step) -> Dict[str, Any]:
metrics: tp.Dict[str, float] = {}
loss, df_accuracy = self.compute_diayn_loss(next_obs, skill)
self.diayn_opt.zero_grad()
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
loss.backward()
self.diayn_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
if self.use_tb or self.use_wandb:
metrics['diayn_loss'] = loss.item()
metrics['diayn_acc'] = df_accuracy
return metrics
def compute_intr_reward(self, skill, next_obs, step) -> Any:
z_hat = torch.argmax(skill, dim=1)
d_pred = self.diayn(next_obs)
d_pred_log_softmax = F.log_softmax(d_pred, dim=1)
# TODO pred_z unused, is that normal?
# _, pred_z = torch.max(d_pred_log_softmax, dim=1, keepdim=True)
reward = d_pred_log_softmax[torch.arange(d_pred.shape[0]),
z_hat] - math.log(1 / self.skill_dim)
reward = reward.reshape(-1, 1)
return reward * self.diayn_scale
def compute_diayn_loss(self, next_state, skill) -> Tuple[Any, Any]:
"""
DF Loss
"""
z_hat = torch.argmax(skill, dim=1)
d_pred = self.diayn(next_state)
d_pred_log_softmax = F.log_softmax(d_pred, dim=1)
_, pred_z = torch.max(d_pred_log_softmax, dim=1, keepdim=True)
d_loss = self.diayn_criterion(d_pred, z_hat)
df_accuracy = torch.sum(
torch.eq(z_hat,
pred_z.reshape(1,
list(
pred_z.size())[0])[0])).float() / list(
pred_z.size())[0]
return d_loss, df_accuracy
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size).to(self.device)
obs, action, extr_reward, discount, next_obs = batch.unpack()
skill = batch.meta["skill"]
# augment and encode
obs = self.aug_and_encode(obs)
next_obs = self.aug_and_encode(next_obs)
if self.reward_free:
metrics.update(self.update_diayn(skill, next_obs, step))
with torch.no_grad():
intr_reward = self.compute_intr_reward(skill, next_obs, step)
if self.use_tb or self.use_wandb:
metrics['intr_reward'] = intr_reward.mean().item()
reward = intr_reward
else:
reward = extr_reward
if self.use_tb or self.use_wandb:
metrics['extr_reward'] = extr_reward.mean().item()
metrics['batch_reward'] = reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
# extend observations with skill
obs = torch.cat([obs, skill], dim=1)
next_obs = torch.cat([next_obs, skill], dim=1)
# update critic
metrics.update(
self.update_critic(obs.detach(), action, reward, discount,
next_obs.detach(), step))
# update actor
metrics.update(self.update_actor(obs.detach(), step))
# update critic target
utils.soft_update_params(self.critic, self.critic_target,
self.critic_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/diayn.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=unused-import
import pdb
import copy
import math
import logging
import dataclasses
from collections import OrderedDict
import typing as tp
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from hydra.core.config_store import ConfigStore
import omegaconf
from dm_env import specs
from url_benchmark import utils
# from url_benchmark import replay_buffer as rb
from url_benchmark.in_memory_replay_buffer import ReplayBuffer
from url_benchmark.dmc import TimeStep
from url_benchmark import goals as _goals
from .ddpg import MetaDict
from .fb_modules import IdentityMap
from .ddpg import Encoder
from .fb_modules import Actor, DiagGaussianActor, ForwardMap, BackwardMap, OnlineCov
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class UVFAgentConfig:
# @package agent
_target_: str = "url_benchmark.agent.uvf.UVFAgent"
name: str = "uvf"
# reward_free: ${reward_free}
obs_type: str = omegaconf.MISSING # to be specified later
obs_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
action_shape: tp.Tuple[int, ...] = omegaconf.MISSING # to be specified later
device: str = omegaconf.II("device") # ${device}
lr: float = 1e-4
lr_coef: float = 1
fb_target_tau: float = 0.01 # 0.001-0.01
update_every_steps: int = 2
use_tb: bool = omegaconf.II("use_tb") # ${use_tb}
use_wandb: bool = omegaconf.II("use_wandb") # ${use_wandb}
use_hiplog: bool = omegaconf.II("use_hiplog") # ${use_wandb}
num_expl_steps: int = omegaconf.MISSING # ??? # to be specified later
num_inference_steps: int = 5120
hidden_dim: int = 1024 # 128, 2048
backward_hidden_dim: int = 512 # 128, 2048
feature_dim: int = 512 # 128, 1024
z_dim: int = 100 # 30-200
stddev_schedule: str = "0.2" # "linear(1,0.2,200000)" #
stddev_clip: float = 0.3 # 1
update_z_every_step: int = 300
update_z_proba: float = 1.0
nstep: int = 1
batch_size: int = 512 # multiple de 3, 500-5000
init_fb: bool = True
update_encoder: bool = omegaconf.II("update_encoder") # ${update_encoder}
goal_space: tp.Optional[str] = omegaconf.II("goal_space")
ortho_coef: float = 1.0 # 0.01-10
log_std_bounds: tp.Tuple[float, float] = (-5, 2) # param for DiagGaussianActor
temp: float = 1 # temperature for DiagGaussianActor
boltzmann: bool = False # set to true for DiagGaussianActor
debug: bool = False
future_ratio: float = 0.0
mix_ratio: float = 0.5 # 0-1
rand_weight: bool = False # True, False
preprocess: bool = True
norm_z: bool = True
q_loss: bool = False
additional_metric: bool = False
add_trunk: bool = False
cs = ConfigStore.instance()
cs.store(group="agent", name="uvf", node=UVFAgentConfig)
class UVFAgent:
# pylint: disable=unused-argument
def __init__(self,
**kwargs: tp.Any
):
cfg = UVFAgentConfig(**kwargs)
self.cfg = cfg
assert len(cfg.action_shape) == 1
self.action_dim = cfg.action_shape[0]
self.solved_meta: tp.Any = None
# models
if cfg.obs_type == 'pixels':
self.aug: nn.Module = utils.RandomShiftsAug(pad=4)
self.encoder: nn.Module = Encoder(cfg.obs_shape).to(cfg.device)
self.obs_dim = self.encoder.repr_dim
else:
self.aug = nn.Identity()
self.encoder = nn.Identity()
self.obs_dim = cfg.obs_shape[0]
if cfg.feature_dim < self.obs_dim:
logger.warning(f"feature_dim {cfg.feature_dim} should not be smaller that obs_dim {self.obs_dim}")
goal_dim = self.obs_dim
if cfg.goal_space is not None:
goal_dim = _goals.get_goal_space_dim(cfg.goal_space)
if cfg.z_dim < goal_dim:
logger.warning(f"z_dim {cfg.z_dim} should not be smaller that goal_dim {goal_dim}")
# create the network
if self.cfg.boltzmann:
self.actor: nn.Module = DiagGaussianActor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.hidden_dim, cfg.log_std_bounds).to(cfg.device)
else:
self.actor = Actor(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
self.forward_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
if cfg.debug:
self.backward_net: nn.Module = IdentityMap().to(cfg.device)
# self.backward_target_net: nn.Module = IdentityMap().to(cfg.device)
else:
self.backward_net = BackwardMap(goal_dim, cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
# self.backward_target_net = BackwardMap(goal_dim,
# cfg.z_dim, cfg.backward_hidden_dim, norm_z=cfg.norm_z).to(cfg.device)
# build up the target network
self.forward_target_net = ForwardMap(self.obs_dim, cfg.z_dim, self.action_dim,
cfg.feature_dim, cfg.hidden_dim,
preprocess=cfg.preprocess, add_trunk=self.cfg.add_trunk).to(cfg.device)
# load the weights into the target networks
self.forward_target_net.load_state_dict(self.forward_net.state_dict())
# self.backward_target_net.load_state_dict(self.backward_net.state_dict())
# optimizers
self.encoder_opt: tp.Optional[torch.optim.Adam] = None
if cfg.obs_type == 'pixels':
self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=cfg.lr)
self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=cfg.lr)
# params = [p for net in [self.forward_net, self.backward_net] for p in net.parameters()]
# self.fb_opt = torch.optim.Adam(params, lr=cfg.lr)
self.fb_opt = torch.optim.Adam([{'params': self.forward_net.parameters()}, # type: ignore
{'params': self.backward_net.parameters(), 'lr': cfg.lr_coef * cfg.lr}],
lr=cfg.lr)
self.train()
self.forward_target_net.train()
# self.backward_target_net.train()
self.actor_success: tp.List[float] = [] # only for debugging, can be removed eventually
# self.inv_cov = torch.eye(self.cfg.z_dim, dtype=torch.float32, device=self.cfg.device)
# self.online_cov = OnlineCov(mom=0.99, dim=self.cfg.z_dim).to(self.cfg.device)
# self.online_cov.train()
def train(self, training: bool = True) -> None:
self.training = training
for net in [self.encoder, self.actor, self.forward_net, self.backward_net]:
net.train(training)
def init_from(self, other) -> None:
# copy parameters over
names = ["encoder", "actor"]
if self.cfg.init_fb:
names += ["forward_net", "backward_net", "forward_target_net"] # + ["backward_target_net"]
for name in names:
utils.hard_update_params(getattr(other, name), getattr(self, name))
for key, val in self.__dict__.items():
if isinstance(val, torch.optim.Optimizer):
val.load_state_dict(copy.deepcopy(getattr(other, key).state_dict()))
def get_goal_meta(self, goal_array: np.ndarray) -> MetaDict:
desired_goal = torch.tensor(goal_array).unsqueeze(0).to(self.cfg.device)
with torch.no_grad():
z = self.backward_net(desired_goal)
# if self.cfg.norm_z:
# z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
z = z.squeeze(0).cpu().numpy()
meta = OrderedDict()
meta['z'] = z
return meta
def infer_meta(self, replay_loader: ReplayBuffer) -> MetaDict:
obs_list, reward_list = [], []
batch_size = 0
while batch_size < self.cfg.num_inference_steps:
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
obs_list.append(batch.next_goal if self.cfg.goal_space is not None else batch.next_obs)
reward_list.append(batch.reward)
batch_size += batch.next_obs.size(0)
obs, reward = torch.cat(obs_list, 0), torch.cat(reward_list, 0) # type: ignore
obs, reward = obs[:self.cfg.num_inference_steps], reward[:self.cfg.num_inference_steps]
return self.infer_meta_from_obs_and_rewards(obs, reward)
def infer_meta_from_obs_and_rewards(self, obs: torch.Tensor, reward: torch.Tensor) -> MetaDict:
print('max reward: ', reward.max().cpu().item())
print('99 percentile: ', torch.quantile(reward, 0.99).cpu().item())
print('median reward: ', reward.median().cpu().item())
print('min reward: ', reward.min().cpu().item())
print('mean reward: ', reward.mean().cpu().item())
print('num reward: ', reward.shape[0])
# filter out small reward
# pdb.set_trace()
# idx = torch.where(reward >= torch.quantile(reward, 0.99))[0]
# obs = obs[idx]
# reward = reward[idx]
with torch.no_grad():
B = self.backward_net(obs)
z = torch.matmul(reward.T, B) / reward.shape[0]
if self.cfg.norm_z:
z = math.sqrt(self.cfg.z_dim) * F.normalize(z, dim=1)
meta = OrderedDict()
meta['z'] = z.squeeze().cpu().numpy()
# self.solved_meta = meta
return meta
def init_meta(self, replay_loader: tp.Optional[ReplayBuffer] = None) -> MetaDict:
if replay_loader is not None:
batch = replay_loader.sample(self.cfg.batch_size)
assert batch.next_goal is not None
g = batch.next_goal[0]
meta = self.get_goal_meta(g)
else:
z = np.zeros((self.cfg.z_dim,), dtype=np.float32)
meta = OrderedDict()
meta['z'] = z
return meta
# pylint: disable=unused-argument
def update_meta(
self,
meta: MetaDict,
global_step: int,
time_step: TimeStep,
finetune: bool = False,
replay_loader: tp.Optional[ReplayBuffer] = None
) -> MetaDict:
if global_step % self.cfg.update_z_every_step == 0 and np.random.rand() < self.cfg.update_z_proba:
return self.init_meta()
return meta
def act(self, obs, meta, step, eval_mode) -> tp.Any:
obs = torch.as_tensor(obs, device=self.cfg.device).unsqueeze(0) # type: ignore
h = self.encoder(obs)
z = torch.as_tensor(meta['z'], device=self.cfg.device).unsqueeze(0) # type: ignore
if self.cfg.boltzmann:
dist = self.actor(h, z)
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(h, z, stddev)
if eval_mode:
action = dist.mean
else:
action = dist.sample()
if step < self.cfg.num_expl_steps:
action.uniform_(-1.0, 1.0)
return action.cpu().numpy()[0]
def update_fb(
self,
obs: torch.Tensor,
action: torch.Tensor,
discount: torch.Tensor,
next_obs: torch.Tensor,
next_goal: torch.Tensor,
desired_goal: torch.Tensor,
step: int
) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
# Q LOSS
epsilon = 1e-6
z = self.backward_net(desired_goal)
reward = (torch.norm(next_goal - desired_goal, dim=1, keepdim=False) < epsilon).float() # batch_size
with torch.no_grad():
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(next_obs, z, stddev)
next_action = dist.sample(clip=self.cfg.stddev_clip)
target_F1, target_F2 = self.forward_target_net(next_obs, z, next_action) # batch x z_dim
next_Q1, nextQ2 = [torch.einsum('sd, sd -> s', target_Fi, z) for target_Fi in [target_F1, target_F2]]
next_Q = torch.min(next_Q1, nextQ2)
target_Q = reward + discount.squeeze(1) * next_Q # batch_size
target_Q = target_Q.detach()
F1, F2 = self.forward_net(obs, z, action)
Q1, Q2 = [torch.einsum('sd, sd -> s', Fi, z) for Fi in [F1, F2]]
fb_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q)
if self.cfg.use_tb or self.cfg.use_wandb or self.cfg.use_hiplog:
metrics['z_norm'] = torch.norm(z, dim=-1).mean().item()
metrics['fb_loss'] = fb_loss.item()
if isinstance(self.fb_opt, torch.optim.Adam):
metrics["fb_opt_lr"] = self.fb_opt.param_groups[0]["lr"]
# optimize FB
if self.encoder_opt is not None:
self.encoder_opt.zero_grad(set_to_none=True)
self.fb_opt.zero_grad(set_to_none=True)
fb_loss.backward()
self.fb_opt.step()
if self.encoder_opt is not None:
self.encoder_opt.step()
return metrics
def update_actor(self, obs: torch.Tensor, desired_goal: torch.Tensor, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
z = self.backward_net(desired_goal)
if self.cfg.boltzmann:
dist = self.actor(obs, z)
action = dist.rsample()
else:
stddev = utils.schedule(self.cfg.stddev_schedule, step)
dist = self.actor(obs, z, stddev)
action = dist.sample(clip=self.cfg.stddev_clip)
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
F1, F2 = self.forward_net(obs, z, action)
Q1 = torch.einsum('sd, sd -> s', F1, z)
Q2 = torch.einsum('sd, sd -> s', F2, z)
Q = torch.min(Q1, Q2)
actor_loss = (self.cfg.temp * log_prob - Q).mean() if self.cfg.boltzmann else -Q.mean()
# optimize actor
self.actor_opt.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_opt.step()
if self.cfg.use_tb or self.cfg.use_wandb:
metrics['actor_loss'] = actor_loss.item()
metrics['q'] = Q.mean().item()
metrics['actor_logprob'] = log_prob.mean().item()
return metrics
def update(self, replay_loader: ReplayBuffer, step: int) -> tp.Dict[str, float]:
metrics: tp.Dict[str, float] = {}
if step % self.cfg.update_every_steps != 0:
return metrics
batch = replay_loader.sample(self.cfg.batch_size)
batch = batch.to(self.cfg.device)
# pdb.set_trace()
obs = batch.obs
action = batch.action
discount = batch.discount
next_obs = next_goal = batch.next_obs
if self.cfg.goal_space is not None:
assert batch.next_goal is not None
next_goal = batch.next_goal
# second_batch = replay_loader.sample(self.cfg.batch_size)
# second_batch = second_batch.to(self.cfg.device)
#
# desired_goal = second_batch.next_obs
# if self.cfg.goal_space is not None:
# assert second_batch.next_goal is not None
# desired_goal = second_batch.next_goal
perm = torch.randperm(self.cfg.batch_size)
desired_goal = next_goal[perm]
if self.cfg.mix_ratio > 0:
mix_idxs: tp.Any = np.where(np.random.uniform(size=self.cfg.batch_size) < self.cfg.mix_ratio)[0]
desired_goal[mix_idxs] = next_goal[mix_idxs]
metrics.update(self.update_fb(obs=obs, action=action, discount=discount,
next_obs=next_obs, next_goal=next_goal, desired_goal=desired_goal, step=step))
# update actor
metrics.update(self.update_actor(obs, desired_goal, step))
# update critic target
utils.soft_update_params(self.forward_net, self.forward_target_net,
self.cfg.fb_target_tau)
# utils.soft_update_params(self.backward_net, self.backward_target_net,
# self.cfg.fb_target_tau)
return metrics
|
controllable_agent-main
|
url_benchmark/agent/uvf.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Quadruped Domain."""
import collections
import typing as tp
from typing import Any
import os
from dm_control import mujoco
from dm_control.mujoco.wrapper import mjbindings
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import xml_tools
from lxml import etree
import numpy as np
from scipy import ndimage
enums = mjbindings.enums
mjlib = mjbindings.mjlib
_DEFAULT_TIME_LIMIT = 20
_CONTROL_TIMESTEP = .02
# Horizontal speeds above which the move reward is 1.
_RUN_SPEED = 5
_WALK_SPEED = 0.5
_JUMP_HEIGHT = 1.0
# Constants related to terrain generation.
_HEIGHTFIELD_ID = 0
_TERRAIN_SMOOTHNESS = 0.15 # 0.0: maximally bumpy; 1.0: completely smooth.
_TERRAIN_BUMP_SCALE = 2 # Spatial scale of terrain bumps (in meters).
# Named model elements.
_TOES = ['toe_front_left', 'toe_back_left', 'toe_back_right', 'toe_front_right']
_WALLS = ['wall_px', 'wall_py', 'wall_nx', 'wall_ny']
SUITE = containers.TaggedTasks()
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
# REMOVED since resources is undefined
# def get_model_and_assets() -> Tuple[Any, Any]:
# """Returns a tuple containing the model XML string and a dict of assets."""
# root_dir = os.path.dirname(os.path.dirname(__file__))
# xml = resources.GetResource(
# os.path.join(root_dir, 'custom_dmc_tasks', 'quadruped.xml'))
# return xml, common.ASSETS
def make_model(floor_size=None, terrain: bool = False, rangefinders: bool = False,
walls_and_ball: bool = False):
"""Returns the model XML string."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml_string = common.read_model(os.path.join(root_dir, 'custom_dmc_tasks', 'quadruped.xml'))
parser = etree.XMLParser(remove_blank_text=True)
mjcf = etree.XML(xml_string, parser)
# Set floor size.
if floor_size is not None:
floor_geom = mjcf.find('.//geom[@name=\'floor\']')
floor_geom.attrib['size'] = f'{floor_size} {floor_size} .5'
# Remove walls, ball and target.
if not walls_and_ball:
for wall in _WALLS:
wall_geom = xml_tools.find_element(mjcf, 'geom', wall)
wall_geom.getparent().remove(wall_geom)
# Remove ball.
ball_body = xml_tools.find_element(mjcf, 'body', 'ball')
ball_body.getparent().remove(ball_body)
# Remove target.
target_site = xml_tools.find_element(mjcf, 'site', 'target')
target_site.getparent().remove(target_site)
# Remove terrain.
if not terrain:
terrain_geom = xml_tools.find_element(mjcf, 'geom', 'terrain')
terrain_geom.getparent().remove(terrain_geom)
# Remove rangefinders if they're not used, as range computations can be
# expensive, especially in a scene with heightfields.
if not rangefinders:
rangefinder_sensors = mjcf.findall('.//rangefinder')
for rf in rangefinder_sensors:
rf.getparent().remove(rf)
return etree.tostring(mjcf, pretty_print=True)
@SUITE.add()
def stand(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Stand(random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def jump(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Jump(desired_height=_JUMP_HEIGHT, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def roll(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Roll(desired_speed=_WALK_SPEED, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def roll_fast(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Walk task."""
xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Roll(desired_speed=_RUN_SPEED, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def escape(time_limit: int = _DEFAULT_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns the Escape task."""
xml_string = make_model(floor_size=40, terrain=True, rangefinders=True)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Escape(random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add()
def fetch(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the Fetch task."""
xml_string = make_model(walls_and_ball=True)
physics = Physics.from_xml_string(xml_string, common.ASSETS)
task = Fetch(random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
# pylint: disable=attribute-defined-outside-init
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Quadruped domain."""
def _reload_from_data(self, data) -> None:
super()._reload_from_data(data)
# Clear cached sensor names when the physics is reloaded.
self._sensor_types_to_names: tp.Dict[tp.Tuple[tp.Any, ...], tp.List[str]] = {}
self._hinge_names: tp.List[str] = []
def _get_sensor_names(self, *sensor_types) -> Any:
try:
sensor_names = self._sensor_types_to_names[sensor_types]
except KeyError:
[sensor_ids] = np.where(np.in1d(self.model.sensor_type, sensor_types))
sensor_names = [self.model.id2name(s_id, 'sensor') for s_id in sensor_ids]
self._sensor_types_to_names[sensor_types] = sensor_names
return sensor_names
def torso_upright(self) -> np.ndarray:
"""Returns the dot-product of the torso z-axis and the global z-axis."""
return np.asarray(self.named.data.xmat['torso', 'zz'])
def torso_velocity(self) -> Any:
"""Returns the velocity of the torso, in the local frame."""
return self.named.data.sensordata['velocimeter'].copy()
def com_height(self) -> Any:
return self.named.data.sensordata['center_of_mass'].copy()[2]
def egocentric_state(self) -> Any:
"""Returns the state without global orientation or position."""
if not self._hinge_names:
[hinge_ids] = np.nonzero(self.model.jnt_type ==
enums.mjtJoint.mjJNT_HINGE)
self._hinge_names = [self.model.id2name(j_id, 'joint')
for j_id in hinge_ids]
return np.hstack((self.named.data.qpos[self._hinge_names],
self.named.data.qvel[self._hinge_names],
self.data.act))
def toe_positions(self) -> Any:
"""Returns toe positions in egocentric frame."""
torso_frame = self.named.data.xmat['torso'].reshape(3, 3)
torso_pos = self.named.data.xpos['torso']
torso_to_toe = self.named.data.xpos[_TOES] - torso_pos
return torso_to_toe.dot(torso_frame)
def force_torque(self) -> Any:
"""Returns scaled force/torque sensor readings at the toes."""
force_torque_sensors = self._get_sensor_names(enums.mjtSensor.mjSENS_FORCE,
enums.mjtSensor.mjSENS_TORQUE)
return np.arcsinh(self.named.data.sensordata[force_torque_sensors])
def imu(self) -> Any:
"""Returns IMU-like sensor readings."""
imu_sensors = self._get_sensor_names(enums.mjtSensor.mjSENS_GYRO,
enums.mjtSensor.mjSENS_ACCELEROMETER)
return self.named.data.sensordata[imu_sensors]
def rangefinder(self) -> Any:
"""Returns scaled rangefinder sensor readings."""
rf_sensors = self._get_sensor_names(enums.mjtSensor.mjSENS_RANGEFINDER)
rf_readings = self.named.data.sensordata[rf_sensors]
no_intersection = -1.0
return np.where(rf_readings == no_intersection, 1.0, np.tanh(rf_readings))
def origin_distance(self) -> np.ndarray:
"""Returns the distance from the origin to the workspace."""
return np.asarray(np.linalg.norm(self.named.data.site_xpos['workspace']))
def origin(self) -> Any:
"""Returns origin position in the torso frame."""
torso_frame = self.named.data.xmat['torso'].reshape(3, 3)
torso_pos = self.named.data.xpos['torso']
return -torso_pos.dot(torso_frame)
def ball_state(self) -> Any:
"""Returns ball position and velocity relative to the torso frame."""
data = self.named.data
torso_frame = data.xmat['torso'].reshape(3, 3)
ball_rel_pos = data.xpos['ball'] - data.xpos['torso']
ball_rel_vel = data.qvel['ball_root'][:3] - data.qvel['root'][:3]
ball_rot_vel = data.qvel['ball_root'][3:]
ball_state = np.vstack((ball_rel_pos, ball_rel_vel, ball_rot_vel))
return ball_state.dot(torso_frame).ravel()
def target_position(self) -> Any:
"""Returns target position in torso frame."""
torso_frame = self.named.data.xmat['torso'].reshape(3, 3)
torso_pos = self.named.data.xpos['torso']
torso_to_target = self.named.data.site_xpos['target'] - torso_pos
return torso_to_target.dot(torso_frame)
def ball_to_target_distance(self) -> Any:
"""Returns horizontal distance from the ball to the target."""
ball_to_target = (self.named.data.site_xpos['target'] -
self.named.data.xpos['ball'])
return np.linalg.norm(ball_to_target[:2])
def self_to_ball_distance(self) -> Any:
"""Returns horizontal distance from the quadruped workspace to the ball."""
self_to_ball = (self.named.data.site_xpos['workspace']
- self.named.data.xpos['ball'])
return np.linalg.norm(self_to_ball[:2])
def _find_non_contacting_height(physics, orientation, x_pos: float = 0.0, y_pos: float = 0.0) -> None:
"""Find a height with no contacts given a body orientation.
Args:
physics: An instance of `Physics`.
orientation: A quaternion.
x_pos: A float. Position along global x-axis.
y_pos: A float. Position along global y-axis.
Raises:
RuntimeError: If a non-contacting configuration has not been found after
10,000 attempts.
"""
z_pos = 0.0 # Start embedded in the floor.
num_contacts = 1
num_attempts = 0
# Move up in 1cm increments until no contacts.
while num_contacts > 0:
try:
with physics.reset_context():
physics.named.data.qpos['root'][:3] = x_pos, y_pos, z_pos
physics.named.data.qpos['root'][3:] = orientation
except control.PhysicsError:
# We may encounter a PhysicsError here due to filling the contact
# buffer, in which case we simply increment the height and continue.
pass
num_contacts = physics.data.ncon
z_pos += 0.01
num_attempts += 1
if num_attempts > 10000:
raise RuntimeError('Failed to find a non-contacting configuration.')
def _common_observations(physics) -> tp.Dict[str, Any]:
"""Returns the observations common to all tasks."""
obs = collections.OrderedDict()
obs['egocentric_state'] = physics.egocentric_state()
obs['torso_velocity'] = physics.torso_velocity()
obs['torso_upright'] = physics.torso_upright()
obs['imu'] = physics.imu()
obs['force_torque'] = physics.force_torque()
return obs
def _upright_reward(physics, deviation_angle: int = 0):
"""Returns a reward proportional to how upright the torso is.
Args:
physics: an instance of `Physics`.
deviation_angle: A float, in degrees. The reward is 0 when the torso is
exactly upside-down and 1 when the torso's z-axis is less than
`deviation_angle` away from the global z-axis.
"""
deviation = np.cos(np.deg2rad(deviation_angle))
return rewards.tolerance(
physics.torso_upright(),
bounds=(deviation, float('inf')),
sigmoid='linear',
margin=1 + deviation,
value_at_margin=0)
class Move(base.Task):
"""A quadruped task solved by moving forward at a designated speed."""
def __init__(self, desired_speed, random=None) -> None:
"""Initializes an instance of `Move`.
Args:
desired_speed: A float. If this value is zero, reward is given simply
for standing upright. Otherwise this specifies the horizontal velocity
at which the velocity-dependent reward component is maximized.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._desired_speed = desired_speed
super().__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
return _common_observations(physics)
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Move reward term.
move_reward = rewards.tolerance(
physics.torso_velocity()[0],
bounds=(self._desired_speed, float('inf')),
margin=self._desired_speed,
value_at_margin=0.5,
sigmoid='linear')
return _upright_reward(physics) * move_reward
class Stand(base.Task):
"""A quadruped task solved by moving forward at a designated speed."""
def __init__(self, random=None) -> None:
"""Initializes an instance of `Move`.
Args:
desired_speed: A float. If this value is zero, reward is given simply
for standing upright. Otherwise this specifies the horizontal velocity
at which the velocity-dependent reward component is maximized.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
super().__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
return _common_observations(physics)
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
return _upright_reward(physics)
class Jump(base.Task):
"""A quadruped task solved by moving forward at a designated speed."""
def __init__(self, desired_height, random=None) -> None:
"""Initializes an instance of `Move`.
Args:
desired_speed: A float. If this value is zero, reward is given simply
for standing upright. Otherwise this specifies the horizontal velocity
at which the velocity-dependent reward component is maximized.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._desired_height = desired_height
super().__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
return _common_observations(physics)
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Move reward term.
jump_up = rewards.tolerance(
physics.com_height(),
bounds=(self._desired_height, float('inf')),
margin=self._desired_height,
value_at_margin=0.5,
sigmoid='linear')
return _upright_reward(physics) * jump_up
class Roll(base.Task):
"""A quadruped task solved by moving forward at a designated speed."""
def __init__(self, desired_speed, random=None) -> None:
"""Initializes an instance of `Move`.
Args:
desired_speed: A float. If this value is zero, reward is given simply
for standing upright. Otherwise this specifies the horizontal velocity
at which the velocity-dependent reward component is maximized.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._desired_speed = desired_speed
super().__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
return _common_observations(physics)
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Move reward term.
move_reward = rewards.tolerance(
np.linalg.norm(physics.torso_velocity()),
bounds=(self._desired_speed, float('inf')),
margin=self._desired_speed,
value_at_margin=0.5,
sigmoid='linear')
return _upright_reward(physics) * move_reward
class Escape(base.Task):
"""A quadruped task solved by escaping a bowl-shaped terrain."""
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Get heightfield resolution, assert that it is square.
res = physics.model.hfield_nrow[_HEIGHTFIELD_ID]
assert res == physics.model.hfield_ncol[_HEIGHTFIELD_ID]
# Sinusoidal bowl shape.
row_grid, col_grid = np.ogrid[-1:1:res * 1j, -1:1:res * 1j]
radius = np.clip(np.sqrt(col_grid**2 + row_grid**2), .04, 1)
bowl_shape = .5 - np.cos(2 * np.pi * radius) / 2
# Random smooth bumps.
terrain_size = 2 * physics.model.hfield_size[_HEIGHTFIELD_ID, 0]
bump_res = int(terrain_size / _TERRAIN_BUMP_SCALE)
bumps = self.random.uniform(_TERRAIN_SMOOTHNESS, 1, (bump_res, bump_res))
smooth_bumps = ndimage.zoom(bumps, res / float(bump_res))
# Terrain is elementwise product.
terrain = bowl_shape * smooth_bumps
start_idx = physics.model.hfield_adr[_HEIGHTFIELD_ID]
physics.model.hfield_data[start_idx:start_idx + res**2] = terrain.ravel()
super().initialize_episode(physics)
# If we have a rendering context, we need to re-upload the modified
# heightfield data.
if physics.contexts:
with physics.contexts.gl.make_current() as ctx:
ctx.call(mjlib.mjr_uploadHField,
physics.model.ptr,
physics.contexts.mujoco.ptr,
_HEIGHTFIELD_ID)
# Initial configuration.
orientation = self.random.randn(4)
orientation /= np.linalg.norm(orientation)
_find_non_contacting_height(physics, orientation)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
obs = _common_observations(physics)
obs['origin'] = physics.origin()
obs['rangefinder'] = physics.rangefinder()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Escape reward term.
terrain_size = physics.model.hfield_size[_HEIGHTFIELD_ID, 0]
escape_reward = rewards.tolerance(
physics.origin_distance(),
bounds=(terrain_size, float('inf')),
margin=terrain_size,
value_at_margin=0,
sigmoid='linear')
return _upright_reward(physics, deviation_angle=20) * escape_reward
class Fetch(base.Task):
"""A quadruped task solved by bringing a ball to the origin."""
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
Args:
physics: An instance of `Physics`.
"""
# Initial configuration, random azimuth and horizontal position.
azimuth = self.random.uniform(0, 2 * np.pi)
orientation = np.array((np.cos(azimuth / 2), 0, 0, np.sin(azimuth / 2)))
spawn_radius = 0.9 * physics.named.model.geom_size['floor', 0]
x_pos, y_pos = self.random.uniform(-spawn_radius, spawn_radius, size=(2,))
_find_non_contacting_height(physics, orientation, x_pos, y_pos)
# Initial ball state.
physics.named.data.qpos['ball_root'][:2] = self.random.uniform(
-spawn_radius, spawn_radius, size=(2,))
physics.named.data.qpos['ball_root'][2] = 2
physics.named.data.qvel['ball_root'][:2] = 5 * self.random.randn(2)
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation to the agent."""
obs = _common_observations(physics)
obs['ball_state'] = physics.ball_state()
obs['target_position'] = physics.target_position()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
# Reward for moving close to the ball.
arena_radius = physics.named.model.geom_size['floor', 0] * np.sqrt(2)
workspace_radius = physics.named.model.site_size['workspace', 0]
ball_radius = physics.named.model.geom_size['ball', 0]
reach_reward = rewards.tolerance(
physics.self_to_ball_distance(),
bounds=(0, workspace_radius + ball_radius),
sigmoid='linear',
margin=arena_radius, value_at_margin=0)
# Reward for bringing the ball to the target.
target_radius = physics.named.model.site_size['target', 0]
fetch_reward = rewards.tolerance(
physics.ball_to_target_distance(),
bounds=(0, target_radius),
sigmoid='linear',
margin=arena_radius, value_at_margin=0)
reach_then_fetch = reach_reward * (0.5 + 0.5 * fetch_reward)
return _upright_reward(physics) * reach_then_fetch
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/quadruped.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Point-mass domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
from dm_env import specs
import numpy as np
import os
_DEFAULT_TIME_LIMIT = 20
SUITE = containers.TaggedTasks()
TASKS = [('reach_top_left', np.array([-0.15, 0.15])),
('reach_top_right', np.array([0.15, 0.15])),
('reach_bottom_left', np.array([-0.15, -0.15])),
('reach_bottom_right', np.array([0.15, -0.15]))]
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward=False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets(task):
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(
os.path.join(root_dir, 'custom_dmc_tasks', f'point_mass_maze_{task}.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def multi_goal(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('multi_goal'))
task = MultiTaskPointMassMaze(target_id=0, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def reach_top_left(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('reach_top_left'))
task = MultiTaskPointMassMaze(target_id=0, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def reach_top_right(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('reach_top_right'))
task = MultiTaskPointMassMaze(target_id=1, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def reach_bottom_left(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('reach_bottom_left'))
task = MultiTaskPointMassMaze(target_id=2, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def reach_bottom_right(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets('reach_bottom_right'))
task = MultiTaskPointMassMaze(target_id=3, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""physics for the point_mass domain."""
def mass_to_target_dist(self, target):
"""Returns the distance from mass to the target."""
d = target - self.named.data.geom_xpos['pointmass'][:2]
return np.linalg.norm(d)
class MultiTaskPointMassMaze(base.Task):
"""A point_mass `Task` to reach target with smooth reward."""
def __init__(self, target_id, random=None) -> None:
"""Initialize an instance of `PointMassMaze`.
Args:
randomize_gains: A `bool`, whether to randomize the actuator gains.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._target = TASKS[target_id][1]
super().__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode.
If _randomize_gains is True, the relationship between the controls and
the joints is randomized, so that each control actuates a random linear
combination of joints.
Args:
physics: An instance of `mujoco.Physics`.
"""
randomizers.randomize_limited_and_rotational_joints(
physics, self.random)
physics.data.qpos[0] = np.random.uniform(-0.29, -0.15)
physics.data.qpos[1] = np.random.uniform(0.15, 0.29)
# import ipdb; ipdb.set_trace()
physics.named.data.geom_xpos['target'][:2] = self._target
super().initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of the state."""
obs = collections.OrderedDict()
obs['position'] = physics.position()
obs['velocity'] = physics.velocity()
return obs
def get_reward_spec(self):
return specs.Array(shape=(1,), dtype=np.float32, name='reward')
def get_reward(self, physics):
"""Returns a reward to the agent."""
target_size = .015
control_reward = rewards.tolerance(physics.control(), margin=1,
value_at_margin=0,
sigmoid='quadratic').mean()
small_control = (control_reward + 4) / 5
near_target = rewards.tolerance(physics.mass_to_target_dist(self._target),
bounds=(0, target_size), margin=target_size)
reward = near_target * small_control
return reward
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/point_mass_maze.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A task where the goal is to move the hand close to a target prop or site."""
import collections
from dm_control import composer
from dm_control.composer import initializers
from dm_control.composer.variation import distributions
from dm_control.entities import props
from dm_control.manipulation.shared import arenas
from dm_control.manipulation.shared import cameras
from dm_control.manipulation.shared import constants
from dm_control.manipulation.shared import observations
from dm_control.manipulation.shared import robots
from dm_control.manipulation.shared import workspaces
from dm_control.utils import rewards
from dm_env import specs
import numpy as np
_ReachWorkspace = collections.namedtuple(
'_ReachWorkspace', ['target_bbox', 'tcp_bbox', 'arm_offset'])
# Ensures that the props are not touching the table before settling.
_PROP_Z_OFFSET = 0.001
_DUPLO_WORKSPACE = _ReachWorkspace(
target_bbox=workspaces.BoundingBox(lower=(-0.1, -0.1, _PROP_Z_OFFSET),
upper=(0.1, 0.1, _PROP_Z_OFFSET)),
tcp_bbox=workspaces.BoundingBox(lower=(-0.1, -0.1, 0.2),
upper=(0.1, 0.1, 0.4)),
arm_offset=robots.ARM_OFFSET)
_SITE_WORKSPACE = _ReachWorkspace(
target_bbox=workspaces.BoundingBox(lower=(-0.2, -0.2, 0.02),
upper=(0.2, 0.2, 0.4)),
tcp_bbox=workspaces.BoundingBox(lower=(-0.2, -0.2, 0.02),
upper=(0.2, 0.2, 0.4)),
arm_offset=robots.ARM_OFFSET)
_TARGET_RADIUS = 0.05
_TIME_LIMIT = 10.
TASKS = [('reach_top_left', np.array([-0.09, 0.09, _PROP_Z_OFFSET])),
('reach_top_right', np.array([0.09, 0.09, _PROP_Z_OFFSET])),
('reach_bottom_left', np.array([-0.09, -0.09, _PROP_Z_OFFSET])),
('reach_bottom_right', np.array([0.09, -0.09, _PROP_Z_OFFSET]))]
def make(task_id, obs_type, seed):
obs_settings = observations.VISION if obs_type == 'pixels' else observations.PERFECT_FEATURES
task = _reach(task_id, obs_settings=obs_settings, use_site=True)
return composer.Environment(task,
time_limit=_TIME_LIMIT,
random_state=seed)
class MultiTaskReach(composer.Task):
"""Bring the hand close to a target prop or site."""
def __init__(self, task_id, arena, arm, hand, prop, obs_settings,
workspace, control_timestep):
"""Initializes a new `Reach` task.
Args:
arena: `composer.Entity` instance.
arm: `robot_base.RobotArm` instance.
hand: `robot_base.RobotHand` instance.
prop: `composer.Entity` instance specifying the prop to reach to, or None
in which case the target is a fixed site whose position is specified by
the workspace.
obs_settings: `observations.ObservationSettings` instance.
workspace: `_ReachWorkspace` specifying the placement of the prop and TCP.
control_timestep: Float specifying the control timestep in seconds.
"""
self._arena = arena
self._arm = arm
self._hand = hand
self._arm.attach(self._hand)
self._arena.attach_offset(self._arm, offset=workspace.arm_offset)
self.control_timestep = control_timestep
self._tcp_initializer = initializers.ToolCenterPointInitializer(
self._hand,
self._arm,
position=distributions.Uniform(*workspace.tcp_bbox),
quaternion=workspaces.DOWN_QUATERNION)
# Add custom camera observable.
self._task_observables = cameras.add_camera_observables(
arena, obs_settings, cameras.FRONT_CLOSE)
if task_id == 'reach_multitask':
self._targets = [target for (_, target) in TASKS]
else:
self._targets = [
target for (task, target) in TASKS if task == task_id
]
assert len(self._targets) > 0
#target_pos_distribution = distributions.Uniform(*TASKS[task_id])
self._prop = prop
if prop:
# The prop itself is used to visualize the target location.
self._make_target_site(parent_entity=prop, visible=False)
self._target = self._arena.add_free_entity(prop)
self._prop_placer = initializers.PropPlacer(
props=[prop],
position=target_pos_distribution,
quaternion=workspaces.uniform_z_rotation,
settle_physics=True)
else:
if len(self._targets) == 1:
self._target = self._make_target_site(parent_entity=arena,
visible=True)
#obs = observable.MJCFFeature('pos', self._target)
# obs.configure(**obs_settings.prop_pose._asdict())
#self._task_observables['target_position'] = obs
# Add sites for visualizing the prop and target bounding boxes.
workspaces.add_bbox_site(body=self.root_entity.mjcf_model.worldbody,
lower=workspace.tcp_bbox.lower,
upper=workspace.tcp_bbox.upper,
rgba=constants.GREEN,
name='tcp_spawn_area')
workspaces.add_bbox_site(body=self.root_entity.mjcf_model.worldbody,
lower=workspace.target_bbox.lower,
upper=workspace.target_bbox.upper,
rgba=constants.BLUE,
name='target_spawn_area')
def _make_target_site(self, parent_entity, visible):
return workspaces.add_target_site(
body=parent_entity.mjcf_model.worldbody,
radius=_TARGET_RADIUS,
visible=visible,
rgba=constants.RED,
name='target_site')
@property
def root_entity(self):
return self._arena
@property
def arm(self):
return self._arm
@property
def hand(self):
return self._hand
def get_reward_spec(self):
n = len(self._targets)
return specs.Array(shape=(n,), dtype=np.float32, name='reward')
@property
def task_observables(self):
return self._task_observables
def get_reward(self, physics):
hand_pos = physics.bind(self._hand.tool_center_point).xpos
rews = []
for target_pos in self._targets:
distance = np.linalg.norm(hand_pos - target_pos)
reward = rewards.tolerance(distance,
bounds=(0, _TARGET_RADIUS),
margin=_TARGET_RADIUS)
rews.append(reward)
rews = np.array(rews).astype(np.float32)
if len(self._targets) == 1:
return rews[0]
return rews
def initialize_episode(self, physics, random_state):
self._hand.set_grasp(physics, close_factors=random_state.uniform())
self._tcp_initializer(physics, random_state)
if self._prop:
self._prop_placer(physics, random_state)
else:
if len(self._targets) == 1:
physics.bind(self._target).pos = self._targets[0]
def _reach(task_id, obs_settings, use_site):
"""Configure and instantiate a `Reach` task.
Args:
obs_settings: An `observations.ObservationSettings` instance.
use_site: Boolean, if True then the target will be a fixed site, otherwise
it will be a moveable Duplo brick.
Returns:
An instance of `reach.Reach`.
"""
arena = arenas.Standard()
arm = robots.make_arm(obs_settings=obs_settings)
hand = robots.make_hand(obs_settings=obs_settings)
if use_site:
workspace = _SITE_WORKSPACE
prop = None
else:
workspace = _DUPLO_WORKSPACE
prop = props.Duplo(observable_options=observations.make_options(
obs_settings, observations.FREEPROP_OBSERVABLES))
task = MultiTaskReach(task_id,
arena=arena,
arm=arm,
hand=hand,
prop=prop,
obs_settings=obs_settings,
workspace=workspace,
control_timestep=constants.CONTROL_TIMESTEP)
return task
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/jaco.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Planar Walker Domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Any, Tuple
import typing as tp
import os
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
_CONTROL_TIMESTEP: float
_DEFAULT_TIME_LIMIT: int
_RUN_SPEED: int
_SPIN_SPEED: int
_STAND_HEIGHT: float
_WALK_SPEED: int
# from dm_control import suite # TODO useless?
_DEFAULT_TIME_LIMIT = 25
_CONTROL_TIMESTEP = .025
# Minimal height of torso over foot above which stand reward is 1.
_STAND_HEIGHT = 1.2
# Horizontal speeds (meters/second) above which move reward is 1.
_WALK_SPEED = 1
_RUN_SPEED = 8
_SPIN_SPEED = 5
SUITE = containers.TaggedTasks()
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets() -> Tuple[Any, Any]:
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(os.path.join(root_dir, 'custom_dmc_tasks',
'walker.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def flip(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = PlanarWalker(move_speed=_RUN_SPEED,
forward=True,
flip=True,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Walker domain."""
def torso_upright(self) -> Any:
"""Returns projection from z-axes of torso to the z-axes of world."""
return self.named.data.xmat['torso', 'zz']
def torso_height(self) -> Any:
"""Returns the height of the torso."""
return self.named.data.xpos['torso', 'z']
def horizontal_velocity(self) -> Any:
"""Returns the horizontal velocity of the center-of-mass."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def orientations(self) -> Any:
"""Returns planar orientations of all bodies."""
return self.named.data.xmat[1:, ['xx', 'xz']].ravel()
def angmomentum(self) -> Any:
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class PlanarWalker(base.Task):
"""A planar walker task."""
def __init__(self, move_speed, forward=True, flip=False, random=None) -> None:
"""Initializes an instance of `PlanarWalker`.
Args:
move_speed: A float. If this value is zero, reward is given simply for
standing up. Otherwise this specifies a target horizontal velocity for
the walking task.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._move_speed = move_speed
self._forward = 1 if forward else -1
self._flip = flip
super(PlanarWalker, self).__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode.
In 'standing' mode, use initial orientation and small velocities.
In 'random' mode, randomize joint angles and let fall to the floor.
Args:
physics: An instance of `Physics`.
"""
randomizers.randomize_limited_and_rotational_joints(
physics, self.random)
super(PlanarWalker, self).initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation of body orientations, height and velocites."""
obs = collections.OrderedDict()
obs['orientations'] = physics.orientations()
obs['height'] = physics.torso_height()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
standing = rewards.tolerance(physics.torso_height(),
bounds=(_STAND_HEIGHT, float('inf')),
margin=_STAND_HEIGHT / 2)
upright = (1 + physics.torso_upright()) / 2
stand_reward = (3 * standing + upright) / 4
if self._flip:
move_reward = rewards.tolerance(self._forward *
physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
move_reward = rewards.tolerance(
self._forward * physics.horizontal_velocity(),
bounds=(self._move_speed, float('inf')),
margin=self._move_speed / 2,
value_at_margin=0.5,
sigmoid='linear')
return stand_reward * (5 * move_reward + 1) / 6
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/walker.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import typing as tp
from . import cheetah
from . import walker
from . import hopper
from . import quadruped
from . import jaco
from . import point_mass_maze
def make(domain, task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
if domain == 'cheetah':
return cheetah.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'walker':
return walker.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'hopper':
return hopper.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'quadruped':
return quadruped.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'point_mass_maze':
return point_mass_maze.make(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
else:
raise ValueError(f'{task} not found')
assert None
def make_jaco(task, obs_type, seed) -> tp.Any:
return jaco.make(task, obs_type, seed)
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Cheetah Domain."""
import collections
import os
import typing as tp
from typing import Any, Tuple
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
_DEFAULT_TIME_LIMIT: int
_RUN_SPEED: int
_SPIN_SPEED: int
# How long the simulation will run, in seconds.
_DEFAULT_TIME_LIMIT = 10
# Running speed above which reward is 1.
_RUN_SPEED = 10
_WALK_SPEED = 2
_SPIN_SPEED = 5
SUITE = containers.TaggedTasks()
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets() -> Tuple[Any, Any]:
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(
os.path.join(root_dir, 'custom_dmc_tasks', 'cheetah.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def walk(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(move_speed=_WALK_SPEED, forward=True, flip=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def walk_backward(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(move_speed=_WALK_SPEED, forward=False, flip=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def run_backward(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=False, flip=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(move_speed=_WALK_SPEED, forward=True, flip=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip_backward(time_limit: int = _DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(move_speed=_WALK_SPEED, forward=False, flip=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Cheetah domain."""
def speed(self) -> Any:
"""Returns the horizontal speed of the Cheetah."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def angmomentum(self) -> Any:
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class Cheetah(base.Task):
"""A `Task` to train a running Cheetah."""
def __init__(self, move_speed=_RUN_SPEED, forward=True, flip=False, random=None) -> None:
self._move_speed = move_speed
self._forward = 1 if forward else -1
self._flip = flip
super(Cheetah, self).__init__(random=random)
self._timeout_progress = 0
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode."""
# The indexing below assumes that all joints have a single DOF.
assert physics.model.nq == physics.model.njnt
is_limited = physics.model.jnt_limited == 1
lower, upper = physics.model.jnt_range[is_limited].T
physics.data.qpos[is_limited] = self.random.uniform(lower, upper)
# Stabilize the model before the actual simulation.
for _ in range(200):
physics.step()
physics.data.time = 0
self._timeout_progress = 0
super().initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation of the state, ignoring horizontal position."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance.
obs['position'] = physics.data.qpos[1:].copy()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward to the agent."""
if self._flip:
reward = rewards.tolerance(self._forward * physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
reward = rewards.tolerance(self._forward * physics.speed(),
bounds=(self._move_speed, float('inf')),
margin=self._move_speed,
value_at_margin=0,
sigmoid='linear')
return reward
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/cheetah.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Hopper domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import typing as tp
from typing import Any, Tuple
import numpy as np
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
_CONTROL_TIMESTEP: float
_DEFAULT_TIME_LIMIT: int
_HOP_SPEED: int
_SPIN_SPEED: int
_STAND_HEIGHT: float
SUITE = containers.TaggedTasks()
_CONTROL_TIMESTEP = .02 # (Seconds)
# Default duration of an episode, in seconds.
_DEFAULT_TIME_LIMIT = 20
# Minimal height of torso over foot above which stand reward is 1.
_STAND_HEIGHT = 0.6
# Hopping speed above which hop reward is 1.
_HOP_SPEED = 2
_SPIN_SPEED = 5
def make(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward: bool = False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets() -> Tuple[Any, Any]:
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(
os.path.join(root_dir, 'custom_dmc_tasks', 'hopper.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def hop_backward(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to hop forward."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=True, forward=False, flip=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to hop forward."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=True, forward=True, flip=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip_backward(time_limit: int = _DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to hop forward."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=True, forward=False, flip=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Hopper domain."""
def height(self) -> Any:
"""Returns height of torso with respect to foot."""
return (self.named.data.xipos['torso', 'z'] -
self.named.data.xipos['foot', 'z'])
def speed(self) -> Any:
"""Returns horizontal speed of the Hopper."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def touch(self) -> Any:
"""Returns the signals from two foot touch sensors."""
return np.log1p(self.named.data.sensordata[['touch_toe',
'touch_heel']])
def angmomentum(self) -> Any:
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class Hopper(base.Task):
"""A Hopper's `Task` to train a standing and a jumping Hopper."""
def __init__(self, hopping, forward=True, flip=False, random=None) -> None:
"""Initialize an instance of `Hopper`.
Args:
hopping: Boolean, if True the task is to hop forwards, otherwise it is to
balance upright.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._hopping = hopping
self._forward = 1 if forward else -1
self._flip = flip
self._timeout_progress = 0
super(Hopper, self).__init__(random=random)
def initialize_episode(self, physics) -> None:
"""Sets the state of the environment at the start of each episode."""
randomizers.randomize_limited_and_rotational_joints(
physics, self.random)
self._timeout_progress = 0
super(Hopper, self).initialize_episode(physics)
def get_observation(self, physics) -> tp.Dict[str, Any]:
"""Returns an observation of positions, velocities and touch sensors."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance:
obs['position'] = physics.data.qpos[1:].copy()
obs['velocity'] = physics.velocity()
obs['touch'] = physics.touch()
return obs
def get_reward(self, physics) -> Any:
"""Returns a reward applicable to the performed task."""
standing = rewards.tolerance(physics.height(), (_STAND_HEIGHT, 2))
assert self._hopping
if self._flip:
hopping = rewards.tolerance(self._forward * physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
hopping = rewards.tolerance(self._forward * physics.speed(),
bounds=(_HOP_SPEED, float('inf')),
margin=_HOP_SPEED / 2,
value_at_margin=0.5,
sigmoid='linear')
return standing * hopping
|
controllable_agent-main
|
url_benchmark/custom_dmc_tasks/hopper.py
|
import sys
import numpy as np
def makeColorwheel():
# color encoding scheme
# adapted from the color circle idea described at
# http://members.shaw.ca/quadibloc/other/colint.htm
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3]) # r g b
col = 0
#RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0, RY, 1)/RY)
col += RY
#YG
colorwheel[col:YG+col, 0]= 255 - np.floor(255*np.arange(0, YG, 1)/YG)
colorwheel[col:YG+col, 1] = 255;
col += YG;
#GC
colorwheel[col:GC+col, 1]= 255
colorwheel[col:GC+col, 2] = np.floor(255*np.arange(0, GC, 1)/GC)
col += GC;
#CB
colorwheel[col:CB+col, 1]= 255 - np.floor(255*np.arange(0, CB, 1)/CB)
colorwheel[col:CB+col, 2] = 255
col += CB;
#BM
colorwheel[col:BM+col, 2]= 255
colorwheel[col:BM+col, 0] = np.floor(255*np.arange(0, BM, 1)/BM)
col += BM;
#MR
colorwheel[col:MR+col, 2]= 255 - np.floor(255*np.arange(0, MR, 1)/MR)
colorwheel[col:MR+col, 0] = 255
return colorwheel
def computeColor(u, v):
colorwheel = makeColorwheel();
nan_u = np.isnan(u)
nan_v = np.isnan(v)
nan_u = np.where(nan_u)
nan_v = np.where(nan_v)
u[nan_u] = 0
u[nan_v] = 0
v[nan_u] = 0
v[nan_v] = 0
ncols = colorwheel.shape[0]
radius = np.sqrt(u**2 + v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) /2 * (ncols-1) # -1~1 maped to 1~ncols
k0 = fk.astype(np.uint8) # 1, 2, ..., ncols
k1 = k0+1;
k1[k1 == ncols] = 0
f = fk - k0
img = np.empty([k1.shape[0], k1.shape[1],3])
ncolors = colorwheel.shape[1]
for i in range(ncolors):
tmp = colorwheel[:,i]
col0 = tmp[k0]/255
col1 = tmp[k1]/255
col = (1-f)*col0 + f*col1
idx = radius <= 1
col[idx] = 1 - radius[idx]*(1-col[idx]) # increase saturation with radius
col[~idx] *= 0.75 # out of range
img[:,:,2-i] = np.floor(255*col).astype(np.uint8)
return img.astype(np.uint8)
def computeImg(flow):
eps = sys.float_info.epsilon
UNKNOWN_FLOW_THRESH = 1e9
UNKNOWN_FLOW = 1e10
u = flow[: , : , 0]
v = flow[: , : , 1]
maxu = -999
maxv = -999
minu = 999
minv = 999
maxrad = -1
#fix unknown flow
greater_u = np.where(u > UNKNOWN_FLOW_THRESH)
greater_v = np.where(v > UNKNOWN_FLOW_THRESH)
u[greater_u] = 0
u[greater_v] = 0
v[greater_u] = 0
v[greater_v] = 0
maxu = max([maxu, np.amax(u)])
minu = min([minu, np.amin(u)])
maxv = max([maxv, np.amax(v)])
minv = min([minv, np.amin(v)])
rad = np.sqrt(np.multiply(u,u)+np.multiply(v,v))
maxrad = max([maxrad, np.amax(rad)])
u = u/(maxrad+eps)
v = v/(maxrad+eps)
img = computeColor(u, v)
return img
|
AR-Depth-main
|
flow_color.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def _bleu(ref_file, trans_file, subword_option=None):
max_order = 4
smooth = True
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with open(reference_filename) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference_list.append(reference.strip().split())
per_segment_references.append(reference_list)
translations = []
with open(trans_file) as fh:
for line in fh:
translations.append(line.strip().split())
bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
return round(100 * bleu_score,2)
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/evaluator/bleu.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import logging
import argparse
from bleu import _bleu
import json
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for code completion (line level).')
parser.add_argument('--answers', '-a', required=True, help="filename of the labels, in json format.")
parser.add_argument('--predictions', '-p', required=True, help="filename of the leaderboard predictions, in txt format.")
args = parser.parse_args()
preds = open(args.predictions, "r").readlines()
gts = open(args.answers, "r").readlines()
assert len(preds) == len(gts), f"Samples of predictions and answers are not equal, {len(preds)}: {len(gts)}"
total = len(gts)
EM = 0.0
wf = open("ground_truth.txt", "w")
for pred, gt in zip(preds, gts):
pred = pred.strip()
gt = json.loads(gt)["code"]
wf.write(gt+"\n")
if pred.split() == gt.split():
EM += 1
bleu_score = round(_bleu("ground_truth.txt", args.predictions), 2)
logger.info(f"BLEU: {bleu_score}, EM: {round(EM/total*100, 2)}")
try:
os.remove("ground_truth.txt")
except Exception:
pass
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/evaluator/evaluator.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text to code generation pipeline in CodeXGLUE
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import json
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from dataset import concodeDataset
from beam import Beam
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from bleu import _bleu
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
}
def load_and_cache_examples(args, tokenizer, evaluate=False):
dataset = concodeDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
block_size=args.block_size)
return dataset
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def update_config(model, tokenizer):
model.config.bos_token_id = tokenizer.bos_token_id
model.config.eos_token_id = tokenizer.eos_token_id
model.config.pad_token_id = tokenizer.pad_token_id
def train(args, train_dataset, model, tokenizer, fh, pool):
""" Train the model """
if args.local_rank in [-1, 0]:
args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard')
if not os.path.exists(args.tensorboard_dir):
os.makedirs(args.tensorboard_dir)
tb_writer = SummaryWriter(args.tensorboard_dir)
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True)
total_examples = len(train_dataset) * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
batch_size = args.batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# if args.max_steps > 0:
# t_total = args.max_steps
# args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
if args.num_train_epochs > 0:
t_total = total_examples // batch_size * args.num_train_epochs
args.max_steps = t_total
model.to(args.device)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu"))
if os.path.exists(optimizer_last):
optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu"))
if args.local_rank == 0:
torch.distributed.barrier()
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
output_device=args.local_rank%args.gpu_per_node,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", total_examples )
logger.info(" Num epoch = %d", t_total*batch_size//total_examples)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = args.start_step
tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0,0.0,0
# model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
best_bleu = 0.0
for idx in range(args.start_epoch, int(args.num_train_epochs)):
for step, (batch, token_labels) in enumerate(train_dataloader):
inputs = batch.to(args.device)
attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
model.train()
# outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask)
# loss = outputs[0]
outputs = model(inputs, attention_mask=attn_mask)
logits = outputs[0]
labels = inputs
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
flatten_shift_loss_mask = loss_mask[..., :-1].contiguous().view(-1)
ids = torch.nonzero(flatten_shift_loss_mask).view(-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[ids], shift_labels.view(-1)[ids])
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag=True
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if global_step % args.logging_steps == 0:
logger.info(" steps: %s ppl: %s", global_step, round(avg_loss,5))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar('lr', scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
tr_nb=global_step
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
checkpoint_prefix = "checkpoint"
# Save model checkpoint
if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
# results = evaluate(args, model, tokenizer, eval_when_training=True)
# for key, value in results.items():
# tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
# logger.info(" %s = %s", key, round(value,4))
# output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4)))
dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=100)
logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}")
output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(dev_bleu,2)))
if dev_bleu > best_bleu:
best_bleu = dev_bleu
logger.info(f"best bleu updated. saved in {output_dir}")
logger.info(f"best bleu: {best_bleu}")
else:
output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
# _rotate_checkpoints(args, checkpoint_prefix)
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save.save_pretrained(last_output_dir)
tokenizer.save_pretrained(last_output_dir)
idx_file = os.path.join(last_output_dir, 'idx_file.txt')
with open(idx_file, 'w', encoding='utf-8') as idxf:
idxf.write(str(0) + '\n')
torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
step_file = os.path.join(last_output_dir, 'step_file.txt')
with open(step_file, 'w', encoding='utf-8') as stepf:
stepf.write(str(global_step) + '\n')
# torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
# torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
# logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
break
if args.max_steps > 0 and global_step > args.max_steps:
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="", eval_when_training=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
#logger.info("***** Running evaluation {} *****".format(prefix))
#logger.info(" Num examples = %d", len(eval_dataset))
#logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for step, (batch, token_labels) in enumerate(eval_dataloader):
inputs = batch.to(args.device)
attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
with torch.no_grad():
outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"perplexity": float(perplexity)
}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
#logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
#logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def eval_bleu(args, model, tokenizer, file_type='test', num=2000):
dataset = concodeDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size, mode='test')
test_sampler = SequentialSampler(dataset)
test_dataloader = DataLoader(dataset, sampler=test_sampler, batch_size=1)
model.to(args.device)
model.zero_grad()
model.eval()
preds = []
for step, (batch, token_labels) in enumerate(test_dataloader):
if step >= num:
break
inputs = batch.to(args.device)
# with torch.no_grad():
# outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70, \
# bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.pad_token_id, pad_token_id=tokenizer.pad_token_id)
# # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95, \
# # bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.pad_token_id, pad_token_id=tokenizer.pad_token_id)
# # outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70)
# # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95)
# generation = tokenizer.decode(outputs[0])[len(tokenizer.decode(inputs[0])):]
# preds.append(generation.rstrip("<pad>"))
with torch.no_grad():
beam_size = 10
m = torch.nn.LogSoftmax(dim=-1)
outputs = model(inputs)[1]
p = []
zero = torch.cuda.LongTensor(1).fill_(0)
for i in range(inputs.shape[0]):
past_hidden = [x[:, i:i+1].expand(-1, beam_size, -1, -1, -1) for x in outputs]
# context_mask=source_mask[i:i+1,:].expand(beam_size,-1)
beam = Beam(beam_size, tokenizer.bos_token_id, tokenizer.eos_token_id)
input_ids = None
for _ in range(162):
if beam.done():
break
input_ids = beam.getCurrentState()
# context_mask=torch.cat((context_mask,input_ids*0+1),-1)
# mask=context_mask.unsqueeze(0).unsqueeze(-2).unsqueeze(-2).expand(self.config.n_layer, -1, -1, -1, -1)
transformer_outputs = model(input_ids, past=past_hidden)
out = m(transformer_outputs[0][:, -1, :]).data
# out = self.lsm(self.lm_head(transformer_outputs[0][:,-1,:])).data
beam.advance(out)
past_hidden = [x.data.index_select(1, beam.getCurrentOrigin()) for x in transformer_outputs[1]]
hyp = beam.getHyp(beam.getFinal())
pred =beam.buildTargetTokens(hyp)[:beam_size]
pred = [torch.cat([x.view(-1) for x in p]+[zero]*(162-len(p))).view(1,-1) for p in pred]
p.append(torch.cat(pred, 0).unsqueeze(0))
p = torch.cat(p, 0)
for pred in p:
t = pred[0].cpu().numpy()
t = list(t)
if 0 in t:
t = t[:t.index(0)]
text = tokenizer.decode(t, clean_up_tokenization_spaces=False)
# print(text)
preds.append(text)
if step % args.logging_steps == 0:
logger.info(f"{step} are done!")
golds = []
datafile = os.path.join(args.data_dir, f"{file_type}.json")
datas = open(datafile).readlines()
for x in datas[:num]:
x = json.loads(x)
golds.append(x["code"])
assert len(preds) == len(golds)
EM = []
with open(os.path.join(args.output_dir, f"{file_type}.output"), 'w') as f, open(os.path.join(args.output_dir, f"{file_type}.gold"), 'w') as f1:
for pred, gold in zip(preds, golds):
f.write(pred+'\n')
f1.write(gold+'\n')
EM.append(pred.split() == gold.split())
if file_type == "test":
return 0, 0
bleu_score = round(_bleu(os.path.join(args.output_dir, f"{file_type}.gold"), os.path.join(args.output_dir, f"{file_type}.output")), 2)
EM = round(np.mean(EM) * 100, 2)
return bleu_score, EM
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data path.")
parser.add_argument("--langs", default=None, type=str, required=True,
help="Languages to train, if all, train all languages in data_dir")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--model_type", default="gpt2", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--pretrain_dir", default="", type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--config_dir", type=str,
help="config name. Required when training from scratch")
parser.add_argument("--tokenizer_dir", type=str,
help="Pre-trained tokenizer dir. Required when training from scratch")
parser.add_argument("--load_name", type=str, default="pretrained",
help="Load pretrained model name")
parser.add_argument("--mlm", action='store_true',
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=1024, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_infer", action='store_true',
help="Whether to run inference on test set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=2, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=10,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument('--save_total_limit', type=int, default=None,
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--node_index", type=int, default=-1,
help="node index if multi-node running")
parser.add_argument("--gpu_per_node", type=int, default=-1,
help="num of gpus per node")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
parser.add_argument('--log_file', type=str, default='')
parser.add_argument('--tensorboard_dir', type=str)
pool = None
args = parser.parse_args()
# args.output_dir = os.path.join(args.output_dir, args.dataset)
if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling).")
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
logger.warning("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node))
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.local_rank += args.node_index * args.gpu_per_node
args.n_gpu = 1
args.device = device
# args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
# 使用FileHandler输出到文件
fh = logging.FileHandler(args.log_file)
logger.addHandler(fh)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
args.start_epoch = 0
args.start_step = 0
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
if args.do_train and os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
args.pretrain_dir = os.path.join(checkpoint_last)
args.config_name = os.path.join(checkpoint_last, 'config.json')
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
with open(idx_file, encoding='utf-8') as idxf:
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
step_file = os.path.join(checkpoint_last, 'step_file.txt')
if os.path.exists(step_file):
with open(step_file, encoding='utf-8') as stepf:
args.start_step = int(stepf.readlines()[0].strip())
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
# Load pre-trained model
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
pretrained = args.pretrain_dir
if pretrained:
tokenizer = tokenizer_class.from_pretrained(pretrained, do_lower_case=args.do_lower_case, bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', sep_token='concode_elem_sep')
logger.info(tokenizer.encode("<s> hello world <pad> </s>"))
model = model_class.from_pretrained(pretrained)
model.resize_token_embeddings(len(tokenizer))
update_config(model, tokenizer)
logger.info(model.config)
else:
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_dir, bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', sep_token='concode_elem_sep')
args.vocab_size = tokenizer.vocab_size
config = config_class.from_pretrained(args.config_dir)
model = model_class(config)
model.resize_token_embeddings(len(tokenizer))
update_config(model, tokenizer)
model_parameters = model.parameters()
num_params = sum([np.prod(p.size()) for p in model_parameters])
logger.info(f"Model has a total of {num_params} trainable parameters")
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, fh, pool)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.do_eval: # only works on 1 GPU
dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=2000)
logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}")
if args.do_infer: # only works on 1 GPU
test_bleu, test_EM = eval_bleu(args, model, tokenizer, file_type='test', num=2000)
logger.info(f"test bleu: {test_bleu}, test EM: {test_EM}")
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/code/run.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def _bleu(ref_file, trans_file, subword_option=None):
max_order = 4
smooth = True
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with open(reference_filename) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference_list.append(reference.strip().split())
per_segment_references.append(reference_list)
translations = []
with open(trans_file) as fh:
for line in fh:
translations.append(line.strip().split())
bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
return round(100 * bleu_score,2)
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/code/bleu.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import gc
import shutil
import json
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
class concodeDataset(Dataset):
def __init__(self, tokenizer, args, logger, file_type='train', block_size=512, mode='train'):
if args.local_rank==-1:
local_rank=0
world_size=1
else:
local_rank=args.local_rank
world_size=torch.distributed.get_world_size()
self.block_size = block_size
self.mode = mode
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
if mode != 'test' and os.path.exists(cached_file) and not args.overwrite_cache:
if file_type == 'train':
logger.warning("Loading features from cached file %s", cached_file)
with open(cached_file, 'rb') as handle:
data = pickle.load(handle)
self.inputs = data['inputs']
self.token_labels = data['token_labels']
else:
self.inputs = []
self.token_labels = []
datafile = os.path.join(args.data_dir, f"{file_type}.json")
if file_type == 'train':
logger.warning("Creating features from dataset file at %s", datafile)
datas = open(datafile).readlines()
length = len(datas)
logger.info("Data size: %d"%(length))
for idx, x in enumerate(datas):
if idx % (length//10) == 0:
percent = idx / (length//10) * 10
logger.warning("Rank %d, load %d"%(local_rank, percent))
if idx % world_size != local_rank:
continue
x = json.loads(x)
code = tokenizer.encode(x["code"])
nl = tokenizer.encode(x["nl"])
input_ids, input_labels = self.pad_and_get_mask(code, nl, tokenizer)
self.inputs.append(input_ids)
self.token_labels.append(input_labels)
if file_type == 'train':
logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
logger.warning("Saving features into cached file %s", cached_file)
if mode != 'test':
with open(cached_file, 'wb') as handle:
pickle.dump({'inputs': self.inputs, 'token_labels': self.token_labels}, handle, protocol=pickle.HIGHEST_PROTOCOL)
def pad_and_get_mask(self, code, nl, tokenizer):
if self.mode == 'test':
code = []
while (len(code) + len(nl) + 2 > self.block_size):
if (len(code) > len(nl)):
code = code[:-1]
else:
nl = nl[:-1]
if self.mode == 'train':
inputs = nl + [tokenizer.bos_token_id] + code + [tokenizer.eos_token_id]
labels = [1] * len(nl) + [2] * (len(code)+1) + [0]
else:
inputs = nl + [tokenizer.bos_token_id]
labels = [1] * len(nl) + [2]
return inputs, labels
assert len(inputs) <= self.block_size
pad_len = self.block_size - len(inputs)
inputs += [tokenizer.pad_token_id] * pad_len
labels += [0] * pad_len
assert len(inputs) == len(labels)
return inputs, labels
def __len__(self):
return len(self.inputs)
def __getitem__(self, item):
return torch.tensor(self.inputs[item]), torch.tensor(self.token_labels[item])
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/code/dataset.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
class Seq2Seq(nn.Module):
"""
Build Seqence-to-Sequence.
Parameters:
* `encoder`- encoder of seq2seq model. e.g. roberta
* `decoder`- decoder of seq2seq model. e.g. transformer
* `config`- configuration of encoder model.
* `beam_size`- beam size for beam search.
* `max_length`- max length of target for beam search.
* `sos_id`- start of symbol ids in target for beam search.
* `eos_id`- end of symbol ids in target for beam search.
"""
def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder=decoder
self.config=config
self.register_buffer("bias", torch.tril(torch.ones(2048, 2048)))
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.lsm = nn.LogSoftmax(dim=-1)
self.tie_weights()
self.beam_size=beam_size
self.max_length=max_length
self.sos_id=sos_id
self.eos_id=eos_id
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.encoder.embeddings.word_embeddings)
def forward(self, source_ids=None,source_mask=None,target_ids=None,target_mask=None,args=None):
outputs = self.encoder(source_ids, attention_mask=source_mask)
encoder_output = outputs[0].permute([1,0,2]).contiguous()
if target_ids is not None:
attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool())
hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous()
lm_logits = self.lm_head(hidden_states)
# Shift so that tokens < n predict n
active_loss = target_mask[..., 1:].ne(0).view(-1) == 1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = target_ids[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss],
shift_labels.view(-1)[active_loss])
outputs = loss,loss*active_loss.sum(),active_loss.sum()
return outputs
else:
#Predict
preds=[]
zero=torch.cuda.LongTensor(1).fill_(0)
for i in range(source_ids.shape[0]):
context=encoder_output[:,i:i+1]
context_mask=source_mask[i:i+1,:]
beam = Beam(self.beam_size,self.sos_id,self.eos_id)
input_ids=beam.getCurrentState()
context=context.repeat(1, self.beam_size,1)
context_mask=context_mask.repeat(self.beam_size,1)
for _ in range(self.max_length):
if beam.done():
break
attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool())
out = torch.tanh(self.dense(out))
hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:]
out = self.lsm(self.lm_head(hidden_states)).data
beam.advance(out)
input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin()))
input_ids=torch.cat((input_ids,beam.getCurrentState()),-1)
hyp= beam.getHyp(beam.getFinal())
pred=beam.buildTargetTokens(hyp)[:self.beam_size]
pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred]
preds.append(torch.cat(pred,0).unsqueeze(0))
preds=torch.cat(preds,0)
return preds
class Beam(object):
def __init__(self, size,sos,eos):
self.size = size
self.tt = torch.cuda
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(0)]
self.nextYs[0][0] = sos
# Has EOS topped the beam yet.
self._eos = eos
self.eosTop = False
# Time and k pair for finished.
self.finished = []
def getCurrentState(self):
"Get the outputs for the current timestep."
batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
return batch
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId // numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self._eos:
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >=self.size
def getFinal(self):
if len(self.finished) == 0:
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
self.finished.sort(key=lambda a: -a[0])
if len(self.finished) != self.size:
unfinished=[]
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] != self._eos:
s = self.scores[i]
unfinished.append((s, len(self.nextYs) - 1, i))
unfinished.sort(key=lambda a: -a[0])
self.finished+=unfinished[:self.size-len(self.finished)]
return self.finished[:self.size]
def getHyp(self, beam_res):
"""
Walk back to construct the full hypothesis.
"""
hyps=[]
for _,timestep, k in beam_res:
hyp = []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
k = self.prevKs[j][k]
hyps.append(hyp[::-1])
return hyps
def buildTargetTokens(self, preds):
sentence=[]
for pred in preds:
tokens = []
for tok in pred:
if tok==self._eos:
break
tokens.append(tok)
sentence.append(tokens)
return sentence
|
CodeGen-main
|
CodeXGLUE/Text-Code/text-to-code/code/beam.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import sys,json
import numpy as np
def read_answers(filename):
answers={}
with open(filename) as f:
for line in f:
line=line.strip()
js=json.loads(line)
answers[js['url']]=js['idx']
return answers
def read_predictions(filename):
predictions={}
with open(filename) as f:
for line in f:
line=line.strip()
js=json.loads(line)
predictions[js['url']]=js['answers']
return predictions
def calculate_scores(answers,predictions):
scores=[]
for key in answers:
if key not in predictions:
logging.error("Missing prediction for url {}.".format(key))
sys.exit()
flag=False
for rank,idx in enumerate(predictions[key]):
if idx==answers[key]:
scores.append(1/(rank+1))
flag=True
break
if flag is False:
scores.append(0)
result={}
result['MRR']=round(np.mean(scores),4)
return result
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for POJ-104 dataset.')
parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
args = parser.parse_args()
answers=read_answers(args.answers)
predictions=read_predictions(args.predictions)
scores=calculate_scores(answers,predictions)
print(scores)
if __name__ == '__main__':
main()
|
CodeGen-main
|
CodeXGLUE/Text-Code/NL-code-search-Adv/evaluator/evaluator.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import sys
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import json
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
import multiprocessing
from model import Model
cpu_cont = multiprocessing.cpu_count()
from transformers import (AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaModel, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
from codegen_sources.wrappers.models import ModelPython, ModelConfig, ModelPythonFunc
from codegen_sources.wrappers.tokenizer import PythonTokenizer, RobertaPythonTokenizer
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'xlm_python': (ModelConfig, ModelPython, PythonTokenizer),
'roberta_python': (ModelConfig, ModelPython, RobertaPythonTokenizer),
'xlm_python_func': (ModelConfig, ModelPythonFunc, PythonTokenizer),
'roberta_python_func': (ModelConfig, ModelPythonFunc, RobertaPythonTokenizer),
}
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
code_tokens,
code_ids,
nl_tokens,
nl_ids,
url,
idx,
):
self.code_tokens = code_tokens
self.code_ids = code_ids
self.nl_tokens = nl_tokens
self.nl_ids = nl_ids
self.url=url
self.idx=idx
def convert_examples_to_features(js,tokenizer,args):
#code
if args.model_type in ['xlm_python', 'xlm_java', 'xlm_java_func', 'xlm_python_func']:
if 'code' in js:
code = js['code']
else:
code = js['function']
code_tokens = tokenizer.tokenize(code, keep_comments=False)
else:
if 'code_tokens' in js:
code=' '.join(js['code_tokens'])
else:
code=' '.join(js['function_tokens'])
code_tokens=tokenizer.tokenize(code)
code_tokens = code_tokens[:args.block_size-2]
if len(code_tokens) == 0:
return None
code_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
code_ids = tokenizer.convert_tokens_to_ids(code_tokens)
padding_length = args.block_size - len(code_ids)
code_ids+=[tokenizer.pad_token_id]*padding_length
nl=' '.join(js['docstring_tokens'])
nl_tokens=tokenizer.tokenize(nl, is_text=True)[:args.block_size-2]
nl_tokens =[tokenizer.cls_token]+nl_tokens+[tokenizer.sep_token]
nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens)
padding_length = args.block_size - len(nl_ids)
nl_ids+=[tokenizer.pad_token_id]*padding_length
return InputFeatures(code_tokens,code_ids,nl_tokens,nl_ids,js['url'],js['idx'])
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path=None):
self.examples = []
data=[]
with open(file_path) as f:
for line in f:
line=line.strip()
js=json.loads(line)
data.append(js)
for i,js in enumerate(data):
features = convert_examples_to_features(js,tokenizer,args)
if features is None:
print(f" rm 1 example could not tokenized")
continue
self.examples.append(features)
if 'train' in file_path:
for idx, example in enumerate(self.examples[:3]):
logger.info("*** Example ***")
logger.info("idx: {}".format(idx))
logger.info("code_tokens: {}".format([x.replace('\u0120','_') for x in example.code_tokens]))
logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids))))
logger.info("nl_tokens: {}".format([x.replace('\u0120','_') for x in example.nl_tokens]))
logger.info("nl_ids: {}".format(' '.join(map(str, example.nl_ids))))
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return (torch.tensor(self.examples[i].code_ids),torch.tensor(self.examples[i].nl_ids))
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train(args, train_dataset, model, tokenizer):
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
args.max_steps=args.epoch*len( train_dataloader)
args.save_steps=len( train_dataloader)//10
args.warmup_steps=len( train_dataloader)
args.logging_steps=len( train_dataloader)
args.num_train_epochs=args.epoch
model.to(args.device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
num_training_steps=args.max_steps)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last))
if os.path.exists(optimizer_last):
optimizer.load_state_dict(torch.load(optimizer_last))
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.max_steps)
global_step = args.start_step
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
best_mrr=0.0
best_acc=0.0
# model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
for idx in range(args.start_epoch, int(args.num_train_epochs)):
bar = train_dataloader
tr_num=0
train_loss=0
for step, batch in enumerate(bar):
code_inputs = batch[0].to(args.device)
nl_inputs = batch[1].to(args.device)
model.train()
loss,code_vec,nl_vec = model(code_inputs,nl_inputs)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
tr_num+=1
train_loss+=loss.item()
if avg_loss==0:
avg_loss=tr_loss
avg_loss=round(train_loss/tr_num,5)
if (step+1)% 100==0:
logger.info("epoch {} step {} loss {}".format(idx,step+1,avg_loss))
#bar.set_description("epoch {} loss {}".format(idx,avg_loss))
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag=True
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logging_loss = tr_loss
tr_nb=global_step
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer,eval_when_training=True)
for key, value in results.items():
logger.info(" %s = %s", key, round(value,4))
# Save model checkpoint
tr_num=0
train_loss=0
if results['eval_mrr']>best_acc:
best_acc=results['eval_mrr']
logger.info(" "+"*"*20)
logger.info(" Best mrr:%s",round(best_acc,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-mrr'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
eval_dataset=None
def evaluate(args, model, tokenizer,eval_when_training=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
global eval_dataset
if eval_dataset is None:
eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
code_vecs=[]
nl_vecs=[]
for batch in eval_dataloader:
code_inputs = batch[0].to(args.device)
nl_inputs = batch[1].to(args.device)
with torch.no_grad():
lm_loss,code_vec,nl_vec = model(code_inputs,nl_inputs)
eval_loss += lm_loss.mean().item()
code_vecs.append(code_vec.cpu().numpy())
nl_vecs.append(nl_vec.cpu().numpy())
nb_eval_steps += 1
code_vecs=np.concatenate(code_vecs,0)
nl_vecs=np.concatenate(nl_vecs,0)
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.tensor(eval_loss)
scores=np.matmul(nl_vecs,code_vecs.T)
ranks=[]
for i in range(len(scores)):
score=scores[i,i]
rank=1
for j in range(len(scores)):
if i!=j and scores[i,j]>=score:
rank+=1
ranks.append(1/rank)
result = {
"eval_loss": float(perplexity),
"eval_mrr":float(np.mean(ranks))
}
return result
def test(args, model, tokenizer):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_dataset = TextDataset(tokenizer, args,args.test_data_file)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running Test *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
code_vecs=[]
nl_vecs=[]
for batch in eval_dataloader:
code_inputs = batch[0].to(args.device)
nl_inputs = batch[1].to(args.device)
with torch.no_grad():
lm_loss,code_vec,nl_vec = model(code_inputs,nl_inputs)
eval_loss += lm_loss.mean().item()
code_vecs.append(code_vec.cpu().numpy())
nl_vecs.append(nl_vec.cpu().numpy())
nb_eval_steps += 1
code_vecs=np.concatenate(code_vecs,0)
nl_vecs=np.concatenate(nl_vecs,0)
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.tensor(eval_loss)
scores=np.matmul(nl_vecs,code_vecs.T)
sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
indexs=[]
urls=[]
for example in eval_dataset.examples:
indexs.append(example.idx)
urls.append(example.url)
with open(os.path.join(args.output_dir,"predictions.jsonl"),'w') as f:
for index,url,sort_id in zip(indexs,urls,sort_ids):
js={}
js['url']=url
js['answers']=[]
for idx in sort_id[:100]:
js['answers'].append(indexs[int(idx)])
f.write(json.dumps(js)+'\n')
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--model_type", default="bert", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--mlm", action='store_true',
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument('--save_total_limit', type=int, default=None,
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--epoch', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args.seed)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
args.start_epoch = 0
args.start_step = 0
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
args.config_name = os.path.join(checkpoint_last, 'config.json')
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
with open(idx_file, encoding='utf-8') as idxf:
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
step_file = os.path.join(checkpoint_last, 'step_file.txt')
if os.path.exists(step_file):
with open(step_file, encoding='utf-8') as stepf:
args.start_step = int(stepf.readlines()[0].strip())
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
config.num_labels=1
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.block_size <= 0:
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
if args.model_name_or_path:
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
else:
model = model_class(config)
model=Model(model,config,tokenizer,args)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = TextDataset(tokenizer, args,args.train_data_file)
if args.local_rank == 0:
torch.distributed.barrier()
train(args, train_dataset, model, tokenizer)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoint_prefix = 'checkpoint-best-mrr/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
result=evaluate(args, model, tokenizer)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],4)))
if args.do_test and args.local_rank in [-1, 0]:
checkpoint_prefix = 'checkpoint-best-mrr/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
test(args, model, tokenizer)
return results
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Text-Code/NL-code-search-Adv/code/run.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
class Model(nn.Module):
def __init__(self, encoder,config,tokenizer,args):
super(Model, self).__init__()
self.encoder = encoder
self.config=config
self.tokenizer=tokenizer
self.args=args
def forward(self, code_inputs,nl_inputs,return_vec=False):
bs=code_inputs.shape[0]
inputs=torch.cat((code_inputs,nl_inputs),0)
outputs=self.encoder(inputs,attention_mask=inputs.ne(1))[1]
code_vec=outputs[:bs]
nl_vec=outputs[bs:]
if return_vec:
return code_vec,nl_vec
scores=(nl_vec[:,None,:]*code_vec[None,:,:]).sum(-1)
loss_fct = CrossEntropyLoss()
loss = loss_fct(scores, torch.arange(bs, device=scores.device))
return loss,code_vec,nl_vec
|
CodeGen-main
|
CodeXGLUE/Text-Code/NL-code-search-Adv/code/model.py
|
#!/usr/bin/python
'''
This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
'''
# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
score_cooked(alltest, n=4): Score a list of cooked test sentences.
score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
'''
import sys, math, re, xml.sax.saxutils
import subprocess
import os
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
nonorm = 0
preserve_case = False
eff_ref_len = "shortest"
normalize1 = [
('<skipped>', ''), # strip "skipped" tags
(r'-\n', ''), # strip end-of-line hyphenation and join lines
(r'\n', ' '), # join lines
# (r'(\d)\s+(?=\d)', r'\1'), # join digits
]
normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
normalize2 = [
(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing
(r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit
(r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit
(r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit
]
normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
def normalize(s):
'''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
if (nonorm):
return s.split()
if type(s) is not str:
s = " ".join(s)
# language-independent part:
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"':'"'})
# language-dependent part (assuming Western languages):
s = " %s " % s
if not preserve_case:
s = s.lower() # this might not be identical to the original
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split()
def count_ngrams(words, n=4):
counts = {}
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] = counts.get(ngram, 0)+1
return counts
def cook_refs(refs, n=4):
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return ([len(ref) for ref in refs], maxcounts)
def cook_test(test, item, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflens, refmaxcounts)=item
test = normalize(test)
result = {}
result["testlen"] = len(test)
# Calculate effective reference sentence length.
if eff_ref_len == "shortest":
result["reflen"] = min(reflens)
elif eff_ref_len == "average":
result["reflen"] = float(sum(reflens))/len(reflens)
elif eff_ref_len == "closest":
min_diff = None
for reflen in reflens:
if min_diff is None or abs(reflen-len(test)) < min_diff:
min_diff = abs(reflen-len(test))
result['reflen'] = reflen
result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)]
result['correct'] = [0]*n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
for comps in allcomps:
for key in ['testlen','reflen']:
totalcomps[key] += comps[key]
for key in ['guess','correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if smooth == 1 and k > 0:
addsmooth = 1
logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min)
if guess == 0:
all_bleus.append(-10000000)
else:
all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess ))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1))
for i in range(len(all_bleus)):
if i ==0:
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth)
def splitPuncts(line):
return ' '.join(re.findall(r"[\w]+|[^\s\w]", line))
def computeMaps(predictions, goldfile):
predictionMap = {}
goldMap = {}
gf = open(goldfile, 'r')
for row in predictions:
cols = row.strip().split('\t')
if len(cols) == 1:
(rid, pred) = (cols[0], '')
else:
(rid, pred) = (cols[0], cols[1])
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for row in gf:
(rid, pred) = row.split('\t')
if rid in predictionMap: # Only insert if the id exists for the method
if rid not in goldMap:
goldMap[rid] = []
goldMap[rid].append(splitPuncts(pred.strip().lower()))
sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
return (goldMap, predictionMap)
#m1 is the reference map
#m2 is the prediction map
def bleuFromMaps(m1, m2):
score = [0] * 5
num = 0.0
for key in m1:
if key in m2:
bl = bleu(m1[key], m2[key][0])
score = [ score[i] + bl[i] for i in range(0, len(bl))]
num += 1
return [s * 100.0 / num for s in score]
if __name__ == '__main__':
reference_file = sys.argv[1]
predictions = []
for row in sys.stdin:
predictions.append(row)
(goldMap, predictionMap) = computeMaps(predictions, reference_file)
print (bleuFromMaps(goldMap, predictionMap)[0])
|
CodeGen-main
|
CodeXGLUE/Code-Text/code-to-text/evaluator/evaluator.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import
import numpy as np
import os
import sys
from torch import LongTensor
import bleu
import pickle
import torch
import json
import random
import logging
import argparse
from io import open
from itertools import cycle
import torch.nn as nn
from model import Seq2Seq
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
from pathlib import Path
from codegen_sources.wrappers.models import Model, ModelPython, ModelConfig, ModelPythonFunc, ModelJava
from codegen_sources.wrappers.tokenizer import JavaTokenizer, PythonTokenizer, RobertaPythonTokenizer, RobertaJavaTokenizer, Tokenizer
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
'xlm_python': (ModelConfig, ModelPython, PythonTokenizer),
'xlm_java': (ModelConfig, ModelJava, JavaTokenizer),
'roberta_python': (ModelConfig, ModelPython, RobertaPythonTokenizer),
'roberta_java': (ModelConfig, ModelJava, RobertaJavaTokenizer),
'xlm_python_func': (ModelConfig, ModelPythonFunc, JavaTokenizer),
'roberta_python_func': (ModelConfig, ModelPythonFunc, RobertaPythonTokenizer),
}
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class Example(object):
"""A single training/test example."""
def __init__(self,
idx,
source,
target,
):
self.idx = idx
self.source = source
self.target = target
def read_examples(filename, args):
"""Read examples from filename."""
examples=[]
with open(filename,encoding="utf-8") as f:
for idx, line in enumerate(f):
line=line.strip()
js=json.loads(line)
if 'idx' not in js:
js['idx']=idx
if 'xlm' in args.model_type:
code=js['code']
docstring = js['docstring']
if "python" in filename:
assert docstring in code
code = code.replace(docstring, "")
else:
code=' '.join(js['code_tokens']).replace('\n',' ')
code=' '.join(code.strip().split())
nl=' '.join(js['docstring_tokens']).replace('\n','')
nl=' '.join(nl.strip().split())
examples.append(
Example(
idx = idx,
source=code,
target = nl,
)
)
return examples
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
example_id,
source_ids,
target_ids,
source_mask,
target_mask,
):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
self.source_mask = source_mask
self.target_mask = target_mask
def convert_examples_to_features(examples, tokenizer, args,stage=None):
features = []
for example_index, example in enumerate(examples):
#source
source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2]
source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
source_mask = [1] * (len(source_tokens))
padding_length = args.max_source_length - len(source_ids)
source_ids+=[tokenizer.pad_token_id]*padding_length
source_mask+=[0]*padding_length
#target
if stage=="test":
target_tokens = tokenizer.tokenize("None", is_text=True) if isinstance(tokenizer, Tokenizer) else tokenizer.tokenize("None")
else:
target_tokens = tokenizer.tokenize(example.target, is_text=True)[:args.max_target_length-2] if isinstance(tokenizer, Tokenizer) else tokenizer.tokenize(example.target)[:args.max_target_length-2]
target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token]
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
target_mask = [1] *len(target_ids)
padding_length = args.max_target_length - len(target_ids)
target_ids+=[tokenizer.pad_token_id]*padding_length
target_mask+=[0]*padding_length
if example_index < 5:
if stage=='train':
logger.info("*** Example ***")
logger.info("idx: {}".format(example.idx))
logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens]))
logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
logger.info("source_mask: {}".format(' '.join(map(str, source_mask))))
logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens]))
logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
logger.info("target_mask: {}".format(' '.join(map(str, target_mask))))
features.append(
InputFeatures(
example_index,
source_ids,
target_ids,
source_mask,
target_mask,
)
)
return features
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type: e.g. roberta")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model: e.g. roberta-base" )
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--load_model_path", default=None, type=str,
help="Path to trained model: Should contain the .bin files" )
## Other parameters
parser.add_argument("--train_filename", default=None, type=str,
help="The train filename. Should contain the .jsonl files for this task.")
parser.add_argument("--dev_filename", default=None, type=str,
help="The dev filename. Should contain the .jsonl files for this task.")
parser.add_argument("--test_filename", default=None, type=str,
help="The test filename. Should contain the .jsonl files for this task.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_source_length", default=64, type=int,
help="The maximum total source sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--max_target_length", default=32, type=int,
help="The maximum total target sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--beam_size", default=10, type=int,
help="beam size for beam search")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--eval_steps", default=-1, type=int,
help="")
parser.add_argument("--train_steps", default=-1, type=int,
help="")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
# print arguments
args = parser.parse_args()
logger.info(args)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1))
args.device = device
# Set seed
set_seed(args.seed)
# make dir if output_dir not exist
if os.path.exists(args.output_dir) is False:
os.makedirs(args.output_dir)
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,do_lower_case=args.do_lower_case)
#budild model
encoder = model_class.from_pretrained(args.model_name_or_path,from_tf=bool('.ckpt' in args.model_name_or_path),config=config)
decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=8)
decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,
beam_size=args.beam_size,max_length=args.max_target_length,
sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)
if args.load_model_path is not None:
logger.info("reload model from {}".format(args.load_model_path))
model.load_state_dict(torch.load(args.load_model_path))
model.to(device)
if args.local_rank != -1:
# Distributed training
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif args.n_gpu > 1:
# multi-gpu training
model = torch.nn.DataParallel(model)
if args.do_train:
# Prepare training data loader
train_examples = read_examples(args.train_filename, args)
train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')
all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)
all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps)
num_train_optimization_steps = args.train_steps
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=int(len(train_dataloader)*args.num_train_epochs*0.1),
num_training_steps=len(train_dataloader)*args.num_train_epochs)
#Start training
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num epoch = %d", args.num_train_epochs)
model.train()
dev_dataset={}
nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6
for epoch in range(args.num_train_epochs):
bar = train_dataloader
for step, batch in enumerate(bar):
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,target_ids,target_mask = batch
loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)
if step % 100 == 0:
print("step {}: epoch {} loss {}".format(step, epoch,train_loss))
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:
#Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
if args.do_eval:
#Eval model with dev dataset
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
eval_flag=False
if 'dev_loss' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_loss']
else:
eval_examples = read_examples(args.dev_filename, args)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long)
all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
dev_dataset['dev_loss']=eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info("\n***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
#Start Evaling model
model.eval()
eval_loss,tokens_num = 0,0
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,target_ids,target_mask = batch
with torch.no_grad():
_,loss,num = model(source_ids=source_ids,source_mask=source_mask,
target_ids=target_ids,target_mask=target_mask)
eval_loss += loss.sum().item()
tokens_num += num.sum().item()
#Pring loss of dev dataset
model.train()
eval_loss = eval_loss / tokens_num
result = {'eval_ppl': round(np.exp(eval_loss),5),
'global_step': global_step+1,
'train_loss': round(train_loss,5)}
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
logger.info(" "+"*"*20)
#save last checkpoint
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(last_output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if eval_loss<best_loss:
logger.info(" Best ppl:%s",round(np.exp(eval_loss),5))
logger.info(" "+"*"*20)
best_loss=eval_loss
# Save best checkpoint for best ppl
output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
#Calculate bleu
if 'dev_bleu' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_bleu']
else:
eval_examples = read_examples(args.dev_filename, args)
eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask)
dev_dataset['dev_bleu']=eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
p=[]
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask= batch
with torch.no_grad():
preds = model(source_ids=source_ids,source_mask=source_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t, clean_up_tokenization_spaces=False, text=True)\
if isinstance(tokenizer, JavaTokenizer) or isinstance(tokenizer, PythonTokenizer)\
else tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions=[]
with open(os.path.join(args.output_dir,"dev.output"),'w') as f, open(os.path.join(args.output_dir,"dev.gold"),'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(str(gold.idx)+'\t'+ref)
f.write(str(gold.idx)+'\t'+ref+'\n')
f1.write(str(gold.idx)+'\t'+gold.target+'\n')
(goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "dev.gold"))
dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2)
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" "+"*"*20)
if dev_bleu>best_bleu:
logger.info(" Best bleu:%s",dev_bleu)
logger.info(" "+"*"*20)
best_bleu=dev_bleu
# Save best checkpoint for best bleu
output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if args.do_test:
files=[]
if args.dev_filename is not None:
files.append(args.dev_filename)
if args.test_filename is not None:
files.append(args.test_filename)
for idx,file in enumerate(files):
logger.info("Test file: {}".format(file))
eval_examples = read_examples(file, args)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_source_mask)
# Calculate bleu
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
p=[]
for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask= batch
with torch.no_grad():
preds = model(source_ids=source_ids,source_mask=source_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False, text=True) \
if isinstance(tokenizer, JavaTokenizer) or isinstance(tokenizer, PythonTokenizer) \
else tokenizer.decode(t, clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions=[]
with open(os.path.join(args.output_dir,"test_{}.output".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,"test_{}.gold".format(str(idx))),'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(str(gold.idx)+'\t'+ref)
f.write(str(gold.idx)+'\t'+ref+'\n')
f1.write(str(gold.idx)+'\t'+gold.target+'\n')
(goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "test_{}.gold".format(idx)))
dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2)
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" "+"*"*20)
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Code-Text/code-to-text/code/run.py
|
#!/usr/bin/python
'''
This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
'''
# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
score_cooked(alltest, n=4): Score a list of cooked test sentences.
score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
'''
import sys, math, re, xml.sax.saxutils
import subprocess
import os
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
nonorm = 0
preserve_case = False
eff_ref_len = "shortest"
normalize1 = [
('<skipped>', ''), # strip "skipped" tags
(r'-\n', ''), # strip end-of-line hyphenation and join lines
(r'\n', ' '), # join lines
# (r'(\d)\s+(?=\d)', r'\1'), # join digits
]
normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
normalize2 = [
(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing
(r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit
(r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit
(r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit
]
normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
def normalize(s):
'''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
if (nonorm):
return s.split()
if type(s) is not str:
s = " ".join(s)
# language-independent part:
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"':'"'})
# language-dependent part (assuming Western languages):
s = " %s " % s
if not preserve_case:
s = s.lower() # this might not be identical to the original
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split()
def count_ngrams(words, n=4):
counts = {}
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] = counts.get(ngram, 0)+1
return counts
def cook_refs(refs, n=4):
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return ([len(ref) for ref in refs], maxcounts)
def cook_test(test, item, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflens, refmaxcounts)=item
test = normalize(test)
result = {}
result["testlen"] = len(test)
# Calculate effective reference sentence length.
if eff_ref_len == "shortest":
result["reflen"] = min(reflens)
elif eff_ref_len == "average":
result["reflen"] = float(sum(reflens))/len(reflens)
elif eff_ref_len == "closest":
min_diff = None
for reflen in reflens:
if min_diff is None or abs(reflen-len(test)) < min_diff:
min_diff = abs(reflen-len(test))
result['reflen'] = reflen
result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)]
result['correct'] = [0]*n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
for comps in allcomps:
for key in ['testlen','reflen']:
totalcomps[key] += comps[key]
for key in ['guess','correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if smooth == 1 and k > 0:
addsmooth = 1
logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min)
if guess == 0:
all_bleus.append(-10000000)
else:
all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess ))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1))
for i in range(len(all_bleus)):
if i ==0:
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth)
def splitPuncts(line):
return ' '.join(re.findall(r"[\w]+|[^\s\w]", line))
def computeMaps(predictions, goldfile):
predictionMap = {}
goldMap = {}
gf = open(goldfile, 'r')
for row in predictions:
cols = row.strip().split('\t')
if len(cols) == 1:
(rid, pred) = (cols[0], '')
else:
(rid, pred) = (cols[0], cols[1])
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for row in gf:
(rid, pred) = row.split('\t')
if rid in predictionMap: # Only insert if the id exists for the method
if rid not in goldMap:
goldMap[rid] = []
goldMap[rid].append(splitPuncts(pred.strip().lower()))
sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
return (goldMap, predictionMap)
#m1 is the reference map
#m2 is the prediction map
def bleuFromMaps(m1, m2):
score = [0] * 5
num = 0.0
for key in m1:
if key in m2:
bl = bleu(m1[key], m2[key][0])
score = [ score[i] + bl[i] for i in range(0, len(bl))]
num += 1
return [s * 100.0 / num for s in score]
if __name__ == '__main__':
reference_file = sys.argv[1]
predictions = []
for row in sys.stdin:
predictions.append(row)
(goldMap, predictionMap) = computeMaps(predictions, reference_file)
print (bleuFromMaps(goldMap, predictionMap)[0])
|
CodeGen-main
|
CodeXGLUE/Code-Text/code-to-text/code/bleu.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
class Seq2Seq(nn.Module):
"""
Build Seqence-to-Sequence.
Parameters:
* `encoder`- encoder of seq2seq model. e.g. roberta
* `decoder`- decoder of seq2seq model. e.g. transformer
* `config`- configuration of encoder model.
* `beam_size`- beam size for beam search.
* `max_length`- max length of target for beam search.
* `sos_id`- start of symbol ids in target for beam search.
* `eos_id`- end of symbol ids in target for beam search.
"""
def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder=decoder
self.config=config
self.register_buffer("bias", torch.tril(torch.ones(2048, 2048)))
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.lsm = nn.LogSoftmax(dim=-1)
self.tie_weights()
self.beam_size=beam_size
self.max_length=max_length
self.sos_id=sos_id
self.eos_id=eos_id
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.encoder.embeddings.word_embeddings)
def forward(self, source_ids=None,source_mask=None,target_ids=None,target_mask=None,args=None):
outputs = self.encoder(source_ids, attention_mask=source_mask)
encoder_output = outputs[0].permute([1,0,2]).contiguous()
if target_ids is not None:
attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool())
hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous()
lm_logits = self.lm_head(hidden_states)
# Shift so that tokens < n predict n
active_loss = target_mask[..., 1:].ne(0).view(-1) == 1
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = target_ids[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss],
shift_labels.view(-1)[active_loss])
outputs = loss,loss*active_loss.sum(),active_loss.sum()
return outputs
else:
#Predict
preds=[]
zero=torch.cuda.LongTensor(1).fill_(0)
for i in range(source_ids.shape[0]):
context=encoder_output[:,i:i+1]
context_mask=source_mask[i:i+1,:]
beam = Beam(self.beam_size,self.sos_id,self.eos_id)
input_ids=beam.getCurrentState()
context=context.repeat(1, self.beam_size,1)
context_mask=context_mask.repeat(self.beam_size,1)
for _ in range(self.max_length):
if beam.done():
break
attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]])
tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous()
out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool())
out = torch.tanh(self.dense(out))
hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:]
out = self.lsm(self.lm_head(hidden_states)).data
beam.advance(out)
# print(input_ids)
# print(beam)
beam_origin = beam.getCurrentOrigin()
if input_ids.dtype != torch.int64 or beam_origin.dtype != torch.int64:
print('type error, casting to long')
print(f"input ids {input_ids}")
print(f"beam current origin {beam_origin}")
select = input_ids.data.index_select(0, beam_origin)
input_ids.data.copy_(select)
input_ids=torch.cat((input_ids,beam.getCurrentState()),-1)
hyp= beam.getHyp(beam.getFinal())
pred=beam.buildTargetTokens(hyp)[:self.beam_size]
pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred]
preds.append(torch.cat(pred,0).unsqueeze(0))
preds=torch.cat(preds,0)
return preds
class Beam(object):
def __init__(self, size,sos,eos):
self.size = size
self.tt = torch.cuda
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(0)]
self.nextYs[0][0] = sos
# Has EOS topped the beam yet.
self._eos = eos
self.eosTop = False
# Time and k pair for finished.
self.finished = []
def getCurrentState(self):
"Get the outputs for the current timestep."
batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
return batch
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId // numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self._eos:
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >=self.size
def getFinal(self):
if len(self.finished) == 0:
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
self.finished.sort(key=lambda a: -a[0])
if len(self.finished) != self.size:
unfinished=[]
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] != self._eos:
s = self.scores[i]
unfinished.append((s, len(self.nextYs) - 1, i))
unfinished.sort(key=lambda a: -a[0])
self.finished+=unfinished[:self.size-len(self.finished)]
return self.finished[:self.size]
def getHyp(self, beam_res):
"""
Walk back to construct the full hypothesis.
"""
hyps=[]
for _,timestep, k in beam_res:
hyp = []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
k = self.prevKs[j][k]
hyps.append(hyp[::-1])
return hyps
def buildTargetTokens(self, preds):
sentence=[]
for pred in preds:
tokens = []
for tok in pred:
if tok==self._eos:
break
tokens.append(tok)
sentence.append(tokens)
return sentence
|
CodeGen-main
|
CodeXGLUE/Code-Text/code-to-text/code/model.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import sys
from sklearn.metrics import recall_score,precision_score,f1_score
def read_answers(filename):
answers={}
with open(filename) as f:
for line in f:
line=line.strip()
idx1,idx2,label=line.split()
answers[(idx1,idx2)]=label
return answers
def read_predictions(filename):
predictions={}
with open(filename) as f:
for line in f:
line=line.strip()
idx1,idx2,label=line.split()
predictions[(idx1,idx2)]=label
return predictions
def calculate_scores(answers,predictions):
y_trues,y_preds=[],[]
for key in answers:
if key not in predictions:
logging.error("Missing prediction for ({},{}) pair.".format(key[0],key[1]))
sys.exit()
y_trues.append(answers[key])
y_preds.append(predictions[key])
scores={}
scores['Recall']=recall_score(y_trues, y_preds, average='macro')
scores['Prediction']=precision_score(y_trues, y_preds, average='macro')
scores['F1']=f1_score(y_trues, y_preds, average='macro')
return scores
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for BigCloneBench dataset.')
parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
args = parser.parse_args()
answers=read_answers(args.answers)
predictions=read_predictions(args.predictions)
scores=calculate_scores(answers,predictions)
print(scores)
if __name__ == '__main__':
main()
|
CodeGen-main
|
CodeXGLUE/Code-Code/Clone-detection-BigCloneBench/evaluator/evaluator.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
from concurrent.futures.thread import ThreadPoolExecutor
from multiprocessing import cpu_count
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import sys
from codegen_sources.wrappers.models import Model, ModelConfig, ModelJava
from codegen_sources.wrappers.tokenizer import JavaTokenizer, RobertaJavaTokenizer
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm
import multiprocessing
from model import Model
cpu_cont = 16
from transformers import (AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaModel, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'xlm_java': (ModelConfig, ModelJava, JavaTokenizer),
'roberta_java': (ModelConfig, ModelJava, RobertaJavaTokenizer),
}
def get_example(item):
url1,url2,label,tokenizer,args,cache,url_to_code=item
if url1 in cache:
code1=cache[url1].copy()
else:
try:
code=' '.join(url_to_code[url1].split())
except:
code=""
code1=tokenizer.tokenize(code)
if url2 in cache:
code2=cache[url2].copy()
else:
try:
code=' '.join(url_to_code[url2].split())
except:
code=""
code2=tokenizer.tokenize(code)
return convert_examples_to_features(code1,code2,label,url1,url2,tokenizer,args,cache)
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
input_tokens,
input_ids,
label,
url1,
url2
):
self.input_tokens = input_tokens
self.input_ids = input_ids
self.label=label
self.url1=url1
self.url2=url2
def convert_examples_to_features(code1_tokens,code2_tokens,label,url1,url2,tokenizer,args,cache):
#source
code1_tokens=code1_tokens[:args.block_size-2]
code1_tokens =[tokenizer.cls_token]+code1_tokens+[tokenizer.sep_token]
code2_tokens=code2_tokens[:args.block_size-2]
code2_tokens =[tokenizer.cls_token]+code2_tokens+[tokenizer.sep_token]
code1_ids=tokenizer.convert_tokens_to_ids(code1_tokens)
padding_length = args.block_size - len(code1_ids)
code1_ids+=[tokenizer.pad_token_id]*padding_length
code2_ids=tokenizer.convert_tokens_to_ids(code2_tokens)
padding_length = args.block_size - len(code2_ids)
code2_ids+=[tokenizer.pad_token_id]*padding_length
source_tokens=code1_tokens+code2_tokens
source_ids=code1_ids+code2_ids
return InputFeatures(source_tokens,source_ids,label,url1,url2)
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path='train', block_size=512,pool=None):
postfix=file_path.split('/')[-1].split('.txt')[0]
self.examples = []
index_filename=file_path
logger.info("Creating features from index file at %s ", index_filename)
url_to_code={}
with open('/'.join(index_filename.split('/')[:-1])+'/data.jsonl') as f:
for line in f:
line=line.strip()
js=json.loads(line)
url_to_code[js['idx']]=js['func']
data=[]
cache={}
f=open(index_filename)
with open(index_filename) as f:
for line in f:
line=line.strip()
url1,url2,label=line.split('\t')
if url1 not in url_to_code or url2 not in url_to_code:
continue
if label=='0':
label=0
else:
label=1
data.append((url1,url2,label,tokenizer, args,cache,url_to_code))
if 'test' not in postfix:
data=random.sample(data,int(len(data)*0.1))
executor = ThreadPoolExecutor(max_workers=cpu_count())
self.examples=list(executor.map(get_example, data))
if 'train' in postfix:
for idx, example in enumerate(self.examples[:3]):
logger.info("*** Example ***")
logger.info("idx: {}".format(idx))
logger.info("label: {}".format(example.label))
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item].input_ids),torch.tensor(self.examples[item].label)
def load_and_cache_examples(args, tokenizer, evaluate=False,test=False,pool=None):
dataset = TextDataset(tokenizer, args, file_path=args.test_data_file if test else (args.eval_data_file if evaluate else args.train_data_file),block_size=args.block_size,pool=pool)
return dataset
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def train(args, train_dataset, model, tokenizer,pool):
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
args.max_steps=args.epoch*len( train_dataloader)
args.save_steps=len( train_dataloader)
args.warmup_steps=len( train_dataloader)
args.logging_steps=len( train_dataloader)
args.num_train_epochs=args.epoch
model.to(args.device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last))
if os.path.exists(optimizer_last):
optimizer.load_state_dict(torch.load(optimizer_last))
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.max_steps)
global_step = args.start_step
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
best_mrr=0.0
best_f1=0
# model.resize_token_embeddings(len(tokenizer))
model.zero_grad()
set_seed(args.seed) # Added here for reproducibility (even between python 2 and 3)
for idx in range(args.start_epoch, int(args.num_train_epochs)):
bar = train_dataloader
tr_num=0
train_loss=0
for step, batch in enumerate(bar):
inputs = batch[0].to(args.device)
labels=batch[1].to(args.device)
model.train()
loss,logits = model(inputs,labels)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
tr_num+=1
train_loss+=loss.item()
if avg_loss==0:
avg_loss=tr_loss
avg_loss=round(train_loss/tr_num,5)
if step % 100 == 0:
logger.info("step {}: epoch {} loss {}".format(step, idx,avg_loss))
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag=True
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logging_loss = tr_loss
tr_nb=global_step
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer,pool=pool,eval_when_training=True)
# Save model checkpoint
if results['eval_f1']>best_f1:
best_f1=results['eval_f1']
logger.info(" "+"*"*20)
logger.info(" Best f1:%s",round(best_f1,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-f1'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
break
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="",pool=None,eval_when_training=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True,pool=pool)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits=[]
y_trues=[]
for batch in eval_dataloader:
inputs = batch[0].to(args.device)
labels=batch[1].to(args.device)
with torch.no_grad():
lm_loss,logit = model(inputs,labels)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
logits=np.concatenate(logits,0)
y_trues=np.concatenate(y_trues,0)
best_threshold=0
best_f1=0
for i in range(1,100):
threshold=i/100
y_preds=logits[:,1]>threshold
from sklearn.metrics import recall_score
recall=recall_score(y_trues, y_preds, average='macro')
from sklearn.metrics import precision_score
precision=precision_score(y_trues, y_preds, average='macro')
from sklearn.metrics import f1_score
f1=f1_score(y_trues, y_preds, average='macro')
if f1>best_f1:
best_f1=f1
best_threshold=threshold
y_preds=logits[:,1]>best_threshold
from sklearn.metrics import recall_score
recall=recall_score(y_trues, y_preds, average='macro')
from sklearn.metrics import precision_score
precision=precision_score(y_trues, y_preds, average='macro')
from sklearn.metrics import f1_score
f1=f1_score(y_trues, y_preds, average='macro')
result = {
"eval_recall": float(recall),
"eval_precision": float(precision),
"eval_f1": float(f1),
"eval_threshold":best_threshold,
}
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],4)))
return result
def test(args, model, tokenizer, prefix="",pool=None,best_threshold=0):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_dataset = load_and_cache_examples(args, tokenizer, test=True,pool=pool)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running Test {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits=[]
y_trues=[]
for batch in eval_dataloader:
inputs = batch[0].to(args.device)
labels=batch[1].to(args.device)
with torch.no_grad():
lm_loss,logit = model(inputs,labels)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
logits=np.concatenate(logits,0)
y_preds=logits[:,1]>best_threshold
with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f:
for example,pred in zip(eval_dataset.examples,y_preds):
if pred:
f.write(example.url1+'\t'+example.url2+'\t'+'1'+'\n')
else:
f.write(example.url1+'\t'+example.url2+'\t'+'0'+'\n')
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--model_type", default="bert", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--mlm", action='store_true',
help="Train with masked-language modeling loss instead of language modeling.")
parser.add_argument("--mlm_probability", type=float, default=0.15,
help="Ratio of tokens to mask for masked language modeling loss")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--cache_dir", default="", type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=50,
help="Save checkpoint every X updates steps.")
parser.add_argument('--save_total_limit', type=int, default=None,
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--epoch', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
pool = multiprocessing.Pool(cpu_cont)
args = parser.parse_args()
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args.seed)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
args.start_epoch = 0
args.start_step = 0
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
args.config_name = os.path.join(checkpoint_last, 'config.json')
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
with open(idx_file, encoding='utf-8') as idxf:
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
step_file = os.path.join(checkpoint_last, 'step_file.txt')
if os.path.exists(step_file):
with open(step_file, encoding='utf-8') as stepf:
args.start_step = int(stepf.readlines()[0].strip())
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
config.num_labels=2
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.block_size <= 0:
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
if args.model_name_or_path:
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
else:
model = model_class(config)
model=Model(model,config,tokenizer,args)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False,pool=pool)
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, train_dataset, model, tokenizer,pool)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoint_prefix = 'checkpoint-best-f1/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
result=evaluate(args, model, tokenizer,pool=pool)
if args.do_test and args.local_rank in [-1, 0]:
checkpoint_prefix = 'checkpoint-best-f1/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
test(args, model, tokenizer,pool=pool,best_threshold=0.5)
return results
if __name__ == "__main__":
main()
|
CodeGen-main
|
CodeXGLUE/Code-Code/Clone-detection-BigCloneBench/code/run.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch
from torch.autograd import Variable
import copy
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size*2, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, 2)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = x.reshape(-1,x.size(-1)*2)
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class Model(nn.Module):
def __init__(self, encoder,config,tokenizer,args):
super(Model, self).__init__()
self.encoder = encoder
self.config=config
self.tokenizer=tokenizer
self.classifier=RobertaClassificationHead(config)
self.args=args
def forward(self, input_ids=None,labels=None):
input_ids=input_ids.view(-1,self.args.block_size)
outputs = self.encoder(input_ids= input_ids,attention_mask=input_ids.ne(1))[0]
logits=self.classifier(outputs)
prob=F.softmax(logits)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, labels)
return loss,prob
else:
return prob
|
CodeGen-main
|
CodeXGLUE/Code-Code/Clone-detection-BigCloneBench/code/model.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
if n <= 1 :
return False
for i in range ( 2 , n ) :
if n % i == 0 :
return False ;
return True
#TOFILL
if __name__ == '__main__':
param = [
(37,),
(39,),
(73,),
(8,),
(28,),
(66,),
(20,),
(36,),
(6,),
(51,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/PRIMALITY_TEST_SET_1_INTRODUCTION_AND_SCHOOL_METHOD.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( S , n ) :
found = False
S.sort ( )
for i in range ( n - 1 , - 1 , - 1 ) :
for j in range ( 0 , n ) :
if ( i == j ) :
continue
for k in range ( j + 1 , n ) :
if ( i == k ) :
continue
for l in range ( k + 1 , n ) :
if ( i == l ) :
continue
if ( S [ i ] == S [ j ] + S [ k ] + S [ l ] ) :
found = True
return S [ i ]
if ( found == False ) :
return - 1
#TOFILL
if __name__ == '__main__':
param = [
([8, 12, 14, 15, 16, 20, 27, 28, 29, 30, 35, 41, 46, 51, 53, 55, 55, 58, 63, 64, 72, 73, 75, 75, 75, 82, 82, 86, 89, 91, 92, 94, 95, 95, 97, 97, 98],24,),
([-62, 48, -22, -44, -58, -50, -82, 34, 26, -2, 86, -44, 92, -96, 42, -20, 10, 74, -56, -12, -28, -40],19,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1],8,),
([84, 58, 10, 67, 77, 66, 10, 47, 65, 55, 54],5,),
([-46, -28, -20, -18, 4, 8, 18, 38, 90, 90],6,),
([0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0],35,),
([11, 13, 14, 21, 26, 28, 36, 39, 41, 42, 43, 44, 49, 49, 57, 58, 59, 59, 63, 64, 67, 69, 70, 75, 78, 79, 83, 83, 86, 91, 92, 93, 96, 96, 96, 97],30,),
([74, 52, -16, 34, -88, 62, 54, 46, -82, 76, -48, 54, 50, -66, -18, 78, -48, 38, 96, -32, -82, 0, -76, 46, -56, 4, -30, -70, -62],16,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],17,),
([55, 74, 18, 4, 68, 66, 33, 61, 66, 92, 21, 9, 49, 14, 99, 87, 74, 6, 11, 25, 5, 58, 56, 20],23,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/FIND_LARGEST_D_IN_ARRAY_SUCH_THAT_A_B_C_D.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( s ) :
n = len ( s ) ;
sub_count = ( n * ( n + 1 ) ) // 2 ;
arr = [ 0 ] * sub_count ;
index = 0 ;
for i in range ( n ) :
for j in range ( 1 , n - i + 1 ) :
arr [ index ] = s [ i : i + j ] ;
index += 1 ;
arr.sort ( ) ;
res = "" ;
for i in range ( sub_count ) :
res += arr [ i ] ;
return res ;
#TOFILL
if __name__ == '__main__':
param = [
('sqGOi',),
('848580',),
('01001110011001',),
('ZhWXUKmeiI',),
('0917296541285',),
('01101001111100',),
('tjP kR',),
('999907',),
('011100',),
('qJPHNSJOUj',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/LEXICOGRAPHICAL_CONCATENATION_SUBSTRINGS_STRING.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold(str1, str2):
if (len(str1) > len(str2)):
t = str1
str1 = str2
str2 = t
str = ""
n1 = len(str1)
n2 = len(str2)
str1 = str1[:: - 1]
str2 = str2[:: - 1]
carry = 0
for i in range(n1):
sum = ((ord(str1[i]) - 48) + ((ord(str2[i]) - 48) + carry))
str += chr(sum % 10 + 48)
carry = int(sum / 10)
for i in range(n1, n2):
sum = ((ord(str2[i]) - 48) + carry)
str += chr(sum % 10 + 48)
carry = (int)(sum / 10)
if (carry):
str += chr(carry + 48)
str = str[:: - 1]
return str
#TOFILL
if __name__ == '__main__':
param = [
('VkfzrPG', 'rKZ',),
('0526110506447', '903',),
('011010010', '110100000',),
('sPAwZACc ', 'liYMsojPiinOV',),
('3', '611',),
('0101', '01110101011',),
('VTtNu', 'Wsmc',),
('2317170', '898421173423',),
('111111000010', '01100001110111',),
('Ktt', 'CTbbVX wGBkE',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success += 1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/SUM_TWO_LARGE_NUMBERS.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( num , divisor ) :
while ( num >= divisor ) :
num -= divisor ;
return num ;
#TOFILL
if __name__ == '__main__':
param = [
(70,13,),
(77,3,),
(77,73,),
(88,54,),
(96,39,),
(6,10,),
(79,95,),
(44,32,),
(26,86,),
(82,91,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/PROGRAM_TO_FIND_REMAINDER_WITHOUT_USING_MODULO_OR_OPERATOR_2.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
while ( int ( n / 100 ) ) :
last_digit = int ( n % 10 )
n = int ( n / 10 )
n += last_digit * 3
return ( n % 29 == 0 )
#TOFILL
if __name__ == '__main__':
param = [
(29,),
(0,),
(65,),
(1419,),
(54,),
(7,),
(44,),
(34,),
(1160,),
(292929002929,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/NUMBER_IS_DIVISIBLE_BY_29_OR_NOT.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
num = n ;
dec_value = 0 ;
base1 = 1 ;
len1 = len ( num ) ;
for i in range ( len1 - 1 , - 1 , - 1 ) :
if ( num [ i ] == '1' ) :
dec_value += base1 ;
base1 = base1 * 2 ;
return dec_value ;
#TOFILL
if __name__ == '__main__':
param = [
('uEmIAgF',),
('753310137',),
('010011010',),
('kNi',),
('04562016903312',),
('000111101',),
('bk',),
('9',),
('1',),
('XxT nXLlk',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/PROGRAM_BINARY_DECIMAL_CONVERSION_1.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , n ) :
found = False
arr.sort ( )
for i in range ( 0 , n - 1 ) :
l = i + 1
r = n - 1
x = arr [ i ]
while ( l < r ) :
if ( x + arr [ l ] + arr [ r ] == 0 ) :
print ( x , arr [ l ] , arr [ r ] )
l += 1
r -= 1
found = True
elif ( x + arr [ l ] + arr [ r ] < 0 ) :
l += 1
else :
r -= 1
if ( found == False ) :
print ( " No Triplet Found" )
#TOFILL
if __name__ == '__main__':
param = [
([4, 24, 27, 34, 39, 41, 67, 69, 84, 91, 94],7,),
([14, 8, 92, 46, 62, 8, 8, 70, 98, -20, -16, -6, -2, -36, 46, 46, -26, 50, 76, 96, -32, 2, -32, 72, 48, 24, 64, 42, 40, 92],29,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],15,),
([47, 69, 42, 36, 82, 65, 84],3,),
([-98, -74, -62, -60, -60, -32],5,),
([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0],35,),
([1, 4, 4, 9, 20, 23, 24, 27, 28, 29, 31, 35, 42, 45, 46, 47, 49, 52, 55, 57, 62, 67, 72, 78, 79, 82, 86, 86, 88],26,),
([92, 0, 56, 90, -10, -46, 44, -86, -16, -90, -92, -44, -88, 24, -80, -98, 68, -86, 98, -10, 18, -40, 98, 40, -58, -6, -38, 72, 90],15,),
([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],17,),
([7, 3, 37, 60, 6, 26, 30, 21, 7, 59, 18, 69, 40, 47, 34, 19, 51, 27, 4, 7, 56, 4, 57, 62, 54, 9, 93, 31, 9, 85],28,)
]
filled_function_param = [
([4, 24, 27, 34, 39, 41, 67, 69, 84, 91, 94],7,),
([14, 8, 92, 46, 62, 8, 8, 70, 98, -20, -16, -6, -2, -36, 46, 46, -26, 50, 76, 96, -32, 2, -32, 72, 48, 24, 64, 42, 40, 92],29,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],15,),
([47, 69, 42, 36, 82, 65, 84],3,),
([-98, -74, -62, -60, -60, -32],5,),
([1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0],35,),
([1, 4, 4, 9, 20, 23, 24, 27, 28, 29, 31, 35, 42, 45, 46, 47, 49, 52, 55, 57, 62, 67, 72, 78, 79, 82, 86, 86, 88],26,),
([92, 0, 56, 90, -10, -46, 44, -86, -16, -90, -92, -44, -88, 24, -80, -98, 68, -86, 98, -10, 18, -40, 98, 40, -58, -6, -38, 72, 90],15,),
([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],17,),
([7, 3, 37, 60, 6, 26, 30, 21, 7, 59, 18, 69, 40, 47, 34, 19, 51, 27, 4, 7, 56, 4, 57, 62, 54, 9, 93, 31, 9, 85],28,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/FIND_TRIPLETS_ARRAY_WHOSE_SUM_EQUAL_ZERO_2.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , n ) :
inc , dcr = dict ( ) , dict ( )
len_inc , len_dcr = [ 0 ] * n , [ 0 ] * n
longLen = 0
for i in range ( n ) :
len = 0
if inc.get ( arr [ i ] - 1 ) in inc.values ( ) :
len = inc.get ( arr [ i ] - 1 )
inc [ arr [ i ] ] = len_inc [ i ] = len + 1
for i in range ( n - 1 , - 1 , - 1 ) :
len = 0
if dcr.get ( arr [ i ] - 1 ) in dcr.values ( ) :
len = dcr.get ( arr [ i ] - 1 )
dcr [ arr [ i ] ] = len_dcr [ i ] = len + 1
for i in range ( n ) :
if longLen < ( len_inc [ i ] + len_dcr [ i ] - 1 ) :
longLen = len_inc [ i ] + len_dcr [ i ] - 1
return longLen
#TOFILL
if __name__ == '__main__':
param = [
([78],0,),
([-6, -18, -48, 58, -54, 76, 80, -56, 86, 58, -86, -86, -88, 32, 12, 58, 58, -16, 86, -24, 84, 86, 36, 18, 30, -32, -4, -36, -72, -4, 42, 94],18,),
([0, 1],1,),
([92, 26, 72, 8, 66, 28, 34, 61, 28],5,),
([-86, -82, -76, -68, -66, -64, -62, -56, -48, -42, -38, -30, -22, -18, -10, -10, -4, -2, 4, 28, 42, 44, 50, 50, 56, 58, 60, 76, 82, 86, 86, 98],25,),
([0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0],17,),
([3, 4, 8, 9, 12, 13, 16, 19, 23, 25, 29, 31, 34, 36, 38, 41, 42, 47, 49, 50, 51, 51, 58, 63, 66, 70, 73, 74, 75, 75, 75, 76, 76, 80, 82, 83, 83, 84, 86, 89, 90, 91, 91, 95, 96],44,),
([4, -76, 60, 48, -14, 72],3,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],17,),
([66, 80, 79, 72, 1, 67, 20, 67, 32, 40, 22, 64, 58, 67, 10, 21, 37, 49],15,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/LENGTH_LONGEST_STRICT_BITONIC_SUBSEQUENCE.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b ) :
if ( a < 0 ) :
a = - a
if ( b < 0 ) :
b = - b
mod = a
while ( mod >= b ) :
mod = mod - b
if ( a < 0 ) :
return - mod
return mod
#TOFILL
if __name__ == '__main__':
param = [
(3243.229719038493,5659.926861939672,),
(-4362.665881044217,-9196.507113304497,),
(7255.066257575837,2623.200060506935,),
(-6929.554320261099,-3009.0234530313287,),
(3569.942027998315,6920.809419868375,),
(-6513.849053096595,-70.95992406437102,),
(7333.183189243961,580.3500610971768,),
(-2856.1752826258803,-9625.97442825802,),
(9787.228111241662,2419.6844962423256,),
(-1722.873699288031,-8370.700544254058,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if abs(1 - (0.0000001 + abs(f_gold(*parameters_set))) / (abs(f_filled(*parameters_set)) + 0.0000001)) < 0.001:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/MODULUS_TWO_FLOAT_DOUBLE_NUMBERS.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( str ) :
n = len ( str ) ;
return int ( n * ( n + 1 ) / 2 ) ;
#TOFILL
if __name__ == '__main__':
param = [
('gZFGZsHCimLf',),
('505357',),
('011011101',),
('ovfwP Osauz',),
('92132238746026',),
('01100',),
('RaOWYQRfiWKSyC',),
('861330202',),
('001100010',),
('uvpKlGUBLOMba',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/NUMBER_SUBSTRINGS_STRING.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , l , r , x ) :
if ( r >= l ) :
mid = int ( l + ( r - l ) / 2 )
if ( arr [ mid ] == x ) : return mid
if ( mid > l and arr [ mid - 1 ] == x ) :
return ( mid - 1 )
if ( mid < r and arr [ mid + 1 ] == x ) :
return ( mid + 1 )
if ( arr [ mid ] > x ) :
return f_gold ( arr , l , mid - 2 , x )
return f_gold ( arr , mid + 2 , r , x )
return - 1
#TOFILL
if __name__ == '__main__':
param = [
([6,7,15,42,47,54,56,59,59,64,68,70,71,75,91,93], 0, 15, 71),
([6,7,15,42,47,56,54,59,59,64,68,71,70, 75,91,93], 0, 15, 71),
([-92,-96,-68,-40,70], 0, 4, , -96),
([-92,-86,-68,-40,70], 0, 4, 20),
([-3,-1,0,30,10,45,70,60], 0, 7, 0),
([-3,-1,0,10,5,45,60,50], 0, 7, 12),
([-3,-1,0,10,30,45,60,70], 0, 7, 18),
([0,0,1], 0, 2, 20),
([1,1,1], 0, 2, 17),
([30,2,30,45], 0, 3, 28)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/SEARCH_ALMOST_SORTED_ARRAY.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( x ) :
next = 0
if ( x ) :
rightOne = x & - ( x )
nextHigherOneBit = x + int ( rightOne )
rightOnesPattern = x ^ int ( nextHigherOneBit )
rightOnesPattern = ( int ( rightOnesPattern ) / int ( rightOne ) )
rightOnesPattern = int ( rightOnesPattern ) >> 2
next = nextHigherOneBit | rightOnesPattern
return next
#TOFILL
if __name__ == '__main__':
param = [
(42,),
(75,),
(94,),
(5,),
(52,),
(22,),
(77,),
(44,),
(85,),
(59,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/NEXT_HIGHER_NUMBER_WITH_SAME_NUMBER_OF_SET_BITS.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
def f_gold(arr, n, A, B, C):
for i in range(n):
arr[i] = (A * arr[i] * arr[i] + B * arr[i] + C)
index = - (sys.maxsize - 1)
maximum = - (sys.maxsize - 1)
for i in range(n):
if maximum < arr[i]:
index = i
maximum = arr[i]
i = 0
j = n - 1
new_arr = [0] * n
k = 0
while i < index and j > index:
if arr[i] < arr[j]:
new_arr[k] = arr[i]
k += 1
i += 1
else:
new_arr[k] = arr[j]
k += 1
j -= 1
while i < index:
new_arr[k] = arr[i]
k += 1
i += 1
while j > index:
new_arr[k] = arr[j]
k += 1
j -= 1
new_arr[n - 1] = maximum
for i in range(n):
arr[i] = new_arr[i]
#TOFILL
if __name__ == '__main__':
param = [
([9, 30, 49, 65, 78, 85, 85, 92], 4, 4, 5, 4,),
([-48, 89, -60, 66, 71, -37, 47, -50, 61, 41, -22, -3, 90, -57, 77, -64, 22,
8, -90, -5, -94, -43, 29, -29, 86, -79, -8, 27, -20, -44, 16], 18, 20, 20, 23,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 25, 26, 15, 18,),
([87, 70, 77, 87, 73, 81, 66, 19, 83, 7, 63, 42, 42, 59, 20, 73, 17, 27, 47, 2, 63, 62, 19, 17, 69, 39,
82, 71, 81, 39, 36, 40, 45, 4, 25, 69, 30, 76, 68, 88, 29, 73, 68, 51, 24, 14, 69, 18], 33, 42, 35, 41,),
([-91, -85, -77, -73, -70, -68, -24, -21, -12, -
1, 9, 29, 48, 52, 56, 63, 88], 8, 12, 8, 8,),
([0, 0, 0, 1, 1, 0, 1, 1, 1, 1], 7, 8, 6, 7,),
([4, 5, 9, 14, 18, 20, 22, 23, 25, 28, 30, 31, 34, 35, 36, 38, 38, 39, 44, 48, 49, 51,
54, 55, 59, 64, 66, 71, 72, 72, 73, 76, 78, 82, 82, 84, 92, 93, 95], 22, 33, 19, 25,),
([40, 6, 33, 8, 78, -58, 2, 24, 40, 3, 46, 94, -26, 8, 22, -83, 96, -29, -
38, -59, 19, 62, 98, -55, -42, 79, 26, 62, -56, -85, -22], 20, 16, 19, 16,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 23, 21, 19, 23,),
([3, 68, 40, 48, 54, 35, 95, 56, 89, 40, 77, 68, 46, 78, 13, 27, 6, 17, 36, 99,
81, 2, 77, 52, 66, 52, 92, 43, 90, 22, 55, 67, 99, 60, 58], 28, 21, 23, 23,)
]
filled_function_param = [
([9, 30, 49, 65, 78, 85, 85, 92], 4, 4, 5, 4,),
([-48, 89, -60, 66, 71, -37, 47, -50, 61, 41, -22, -3, 90, -57, 77, -64, 22,
8, -90, -5, -94, -43, 29, -29, 86, -79, -8, 27, -20, -44, 16], 18, 20, 20, 23,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 25, 26, 15, 18,),
([87, 70, 77, 87, 73, 81, 66, 19, 83, 7, 63, 42, 42, 59, 20, 73, 17, 27, 47, 2, 63, 62, 19, 17, 69, 39,
82, 71, 81, 39, 36, 40, 45, 4, 25, 69, 30, 76, 68, 88, 29, 73, 68, 51, 24, 14, 69, 18], 33, 42, 35, 41,),
([-91, -85, -77, -73, -70, -68, -24, -21, -12, -
1, 9, 29, 48, 52, 56, 63, 88], 8, 12, 8, 8,),
([0, 0, 0, 1, 1, 0, 1, 1, 1, 1], 7, 8, 6, 7,),
([4, 5, 9, 14, 18, 20, 22, 23, 25, 28, 30, 31, 34, 35, 36, 38, 38, 39, 44, 48, 49, 51,
54, 55, 59, 64, 66, 71, 72, 72, 73, 76, 78, 82, 82, 84, 92, 93, 95], 22, 33, 19, 25,),
([40, 6, 33, 8, 78, -58, 2, 24, 40, 3, 46, 94, -26, 8, 22, -83, 96, -29, -
38, -59, 19, 62, 98, -55, -42, 79, 26, 62, -56, -85, -22], 20, 16, 19, 16,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 23, 21, 19, 23,),
([3, 68, 40, 48, 54, 35, 95, 56, 89, 40, 77, 68, 46, 78, 13, 27, 6, 17, 36, 99,
81, 2, 77, 52, 66, 52, 92, 43, 90, 22, 55, 67, 99, 60, 58], 28, 21, 23, 23,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success += 1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/SORT_ARRAY_APPLYING_GIVEN_EQUATION.py
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
if n < 3 :
return n
elif n >= 3 and n < 10 :
return n - 1
po = 1
while n / po > 9 :
po = po * 10
msd = n / po
if msd != 3 :
return f_gold ( msd ) * f_gold ( po - 1 ) + f_gold ( msd ) + f_gold ( n % po )
else :
return f_gold ( msd * po - 1 )
#TOFILL
if __name__ == '__main__':
param = [
(85,),
(86,),
(3,),
(35,),
(59,),
(38,),
(33,),
(15,),
(75,),
(74,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
|
CodeGen-main
|
data/transcoder_evaluation_gfg/python/COUNT_NUMBERS_THAT_DONT_CONTAIN_3.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.