file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVlabs/DiffRL/externals/rl_games/rl_games/envs/test/test_asymmetric_env.py | import gym
import numpy as np
from rl_games.common.wrappers import MaskVelocityWrapper
class TestAsymmetricCritic(gym.Env):
def __init__(self, wrapped_env_name, **kwargs):
gym.Env.__init__(self)
self.apply_mask = kwargs.pop('apply_mask', True)
self.use_central_value = kwargs.pop('use_central_value', True)
self.env = gym.make(wrapped_env_name)
if self.apply_mask:
if wrapped_env_name not in ["CartPole-v1", "Pendulum-v0", "LunarLander-v2", "LunarLanderContinuous-v2"]:
raise 'unsupported env'
self.mask = MaskVelocityWrapper(self.env, wrapped_env_name).mask
else:
self.mask = 1
self.n_agents = 1
self.use_central_value = True
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.state_space = self.env.observation_space
def get_number_of_agents(self):
return self.n_agents
def reset(self):
obs = self.env.reset()
obs_dict = {}
obs_dict["obs"] = obs * self.mask
obs_dict["state"] = obs
if self.use_central_value:
obses = obs_dict
else:
obses = obs_dict["obs"].astype(np.float32)
return obses
def step(self, actions):
obs, rewards, dones, info = self.env.step(actions)
obs_dict = {}
obs_dict["obs"] = obs * self.mask
obs_dict["state"] = obs
if self.use_central_value:
obses = obs_dict
else:
obses = obs_dict["obs"].astype(np.float32)
return obses, rewards, dones, info
def has_action_mask(self):
return False
| 1,715 | Python | 31.377358 | 116 | 0.580758 |
NVlabs/DiffRL/externals/rl_games/rl_games/envs/diambra/diambra.py | import gym
import numpy as np
import os
import random
from diambra_environment.diambraGym import diambraGym
from diambra_environment.makeDiambraEnv import make_diambra_env
class DiambraEnv(gym.Env):
def __init__(self, **kwargs):
gym.Env.__init__(self)
self.seed = kwargs.pop('seed', None)
self.difficulty = kwargs.pop('difficulty', 3)
self.env_path = kwargs.pop('env_path', "/home/trrrrr/Documents/github/ml/diambra/DIAMBRAenvironment-main")
self.character = kwargs.pop('character', 'Raidou')
self.frame_stack = kwargs.pop('frame_stack', 3)
self.attacks_buttons = kwargs.pop('attacks_buttons', False)
self._game_num = 0
self.n_agents = 1
self.rank = random.randint(0, 100500)
repo_base_path = os.path.abspath(self.env_path) # Absolute path to your DIAMBRA environment
env_kwargs = {}
env_kwargs["gameId"] = "doapp"
env_kwargs["roms_path"] = os.path.join(repo_base_path, "roms/") # Absolute path to roms
env_kwargs["mame_diambra_step_ratio"] = 6
env_kwargs["render"] = False
env_kwargs["lock_fps"] = False # Locks to 60 FPS
env_kwargs["sound"] = env_kwargs["lock_fps"] and env_kwargs["render"]
env_kwargs["player"] = "Random"
env_kwargs["difficulty"] = self.difficulty
env_kwargs["characters"] = [[self.character, "Random"], [self.character, "Random"]]
env_kwargs["charOutfits"] = [2, 2]
gym_kwargs = {}
gym_kwargs["P2brain"] = None
gym_kwargs["continue_game"] = 0.0
gym_kwargs["show_final"] = False
gym_kwargs["gamePads"] = [None, None]
gym_kwargs["actionSpace"] = ["discrete", "multiDiscrete"]
#gym_kwargs["attackButCombinations"] = [False, False]
gym_kwargs["attackButCombinations"] = [self.attacks_buttons, self.attacks_buttons]
gym_kwargs["actBufLen"] = 12
wrapper_kwargs = {}
wrapper_kwargs["hwc_obs_resize"] = [128, 128, 1]
wrapper_kwargs["normalize_rewards"] = True
wrapper_kwargs["clip_rewards"] = False
wrapper_kwargs["frame_stack"] = self.frame_stack
wrapper_kwargs["dilation"] = 1
wrapper_kwargs["scale"] = True
wrapper_kwargs["scale_mod"] = 0
key_to_add = []
key_to_add.append("actionsBuf")
key_to_add.append("ownHealth")
key_to_add.append("oppHealth")
key_to_add.append("ownPosition")
key_to_add.append("oppPosition")
key_to_add.append("stage")
key_to_add.append("character")
self.env = make_diambra_env(diambraGym, env_prefix="Train" + str(self.rank), seed= self.rank,
diambra_kwargs=env_kwargs,
diambra_gym_kwargs=gym_kwargs,
wrapper_kwargs=wrapper_kwargs,
key_to_add=key_to_add)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
def _preproc_state_obs(self, obs):
return obs
def reset(self):
self._game_num += 1
obs = self.env.reset() # rename, to think remove
obs_dict = self._preproc_state_obs(obs)
return obs_dict
def step(self, actions):
obs, reward, done, info = self.env.step(actions)
return obs, reward, done, info
def has_action_mask(self):
return False | 3,496 | Python | 38.292134 | 114 | 0.588673 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/torch_ext.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.optimizer import Optimizer
numpy_to_torch_dtype_dict = {
np.dtype('bool') : torch.bool,
np.dtype('uint8') : torch.uint8,
np.dtype('int8') : torch.int8,
np.dtype('int16') : torch.int16,
np.dtype('int32') : torch.int32,
np.dtype('int64') : torch.int64,
np.dtype('float16') : torch.float16,
np.dtype('float32') : torch.float32,
np.dtype('float64') : torch.float64,
np.dtype('complex64') : torch.complex64,
np.dtype('complex128') : torch.complex128,
}
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma, reduce=True):
c1 = torch.log(p1_sigma/p0_sigma + 1e-5)
c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions
if reduce:
return kl.mean()
else:
return kl
def mean_mask(input, mask, sum_mask):
return (input * rnn_masks).sum() / sum_mask
def shape_whc_to_cwh(shape):
#if len(shape) == 2:
# return (shape[1], shape[0])
if len(shape) == 3:
return (shape[2], shape[0], shape[1])
return shape
def safe_filesystem_op(func, *args, **kwargs):
"""
This is to prevent spurious crashes related to saving checkpoints or restoring from checkpoints in a Network
Filesystem environment (i.e. NGC cloud or SLURM)
"""
num_attempts = 5
for attempt in range(num_attempts):
try:
return func(*args, **kwargs)
except Exception as exc:
print(f'Exception {exc} when trying to execute {func} with args:{args} and kwargs:{kwargs}...')
wait_sec = 2 ** attempt
print(f'Waiting {wait_sec} before trying again...')
time.sleep(wait_sec)
raise RuntimeError(f'Could not execute {func}, give up after {num_attempts} attempts...')
def safe_save(state, filename):
return safe_filesystem_op(torch.save, state, filename)
def safe_load(filename):
return safe_filesystem_op(torch.load, filename)
def save_checkpoint(filename, state):
print("=> saving checkpoint '{}'".format(filename + '.pth'))
safe_save(state, filename + '.pth')
def load_checkpoint(filename):
print("=> loading checkpoint '{}'".format(filename))
state = safe_load(filename)
return state
def parameterized_truncated_normal(uniform, mu, sigma, a, b):
normal = torch.distributions.normal.Normal(0, 1)
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
alpha_normal_cdf = normal.cdf(torch.from_numpy(np.array(alpha)))
p = alpha_normal_cdf + (normal.cdf(torch.from_numpy(np.array(beta))) - alpha_normal_cdf) * uniform
p = p.numpy()
one = np.array(1, dtype=p.dtype)
epsilon = np.array(np.finfo(p.dtype).eps, dtype=p.dtype)
v = np.clip(2 * p - 1, -one + epsilon, one - epsilon)
x = mu + sigma * np.sqrt(2) * torch.erfinv(torch.from_numpy(v))
x = torch.clamp(x, a, b)
return x
def truncated_normal(uniform, mu=0.0, sigma=1.0, a=-2, b=2):
return parameterized_truncated_normal(uniform, mu, sigma, a, b)
def sample_truncated_normal(shape=(), mu=0.0, sigma=1.0, a=-2, b=2):
return truncated_normal(torch.from_numpy(np.random.uniform(0, 1, shape)), mu, sigma, a, b)
def variance_scaling_initializer(tensor, mode='fan_in',scale = 2.0):
fan = torch.nn.init._calculate_correct_fan(tensor, mode)
print(fan, scale)
sigma = np.sqrt(scale / fan)
with torch.no_grad():
tensor[:] = sample_truncated_normal(tensor.size(), sigma=sigma)
return tensor
def random_sample(obs_batch, prob):
num_batches = obs_batch.size()[0]
permutation = torch.randperm(num_batches, device=obs_batch.device)
start = 0
end = int(prob * num_batches)
indices = permutation[start:end]
return torch.index_select(obs_batch, 0, indices)
def mean_list(val):
return torch.mean(torch.stack(val))
def apply_masks(losses, mask=None):
sum_mask = None
if mask is not None:
mask = mask.unsqueeze(1)
sum_mask = mask.numel()#
#sum_mask = mask.sum()
res_losses = [(l * mask).sum() / sum_mask for l in losses]
else:
res_losses = [torch.mean(l) for l in losses]
return res_losses, sum_mask
def normalization_with_masks(values, masks):
sum_mask = masks.sum()
values_mask = values * masks
values_mean = values_mask.sum() / sum_mask
min_sqr = ((((values_mask)**2)/sum_mask).sum() - ((values_mask/sum_mask).sum())**2)
values_std = torch.sqrt(min_sqr * sum_mask / (sum_mask-1))
normalized_values = (values_mask - values_mean) / (values_std + 1e-8)
return normalized_values
class CoordConv2d(nn.Conv2d):
pool = {}
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels + 2, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
@staticmethod
def get_coord(x):
key = int(x.size(0)), int(x.size(2)), int(x.size(3)), x.type()
if key not in CoordConv2d.pool:
theta = torch.Tensor([[[1, 0, 0], [0, 1, 0]]])
coord = torch.nn.functional.affine_grid(theta, torch.Size([1, 1, x.size(2), x.size(3)])).permute([0, 3, 1, 2]).repeat(
x.size(0), 1, 1, 1).type_as(x)
CoordConv2d.pool[key] = coord
return CoordConv2d.pool[key]
def forward(self, x):
return torch.nn.functional.conv2d(torch.cat([x, self.get_coord(x).type_as(x)], 1), self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class LayerNorm2d(nn.Module):
"""
Layer norm the just works on the channel axis for a Conv2d
Ref:
- code modified from https://github.com/Scitator/Run-Skeleton-Run/blob/master/common/modules/LayerNorm.py
- paper: https://arxiv.org/abs/1607.06450
Usage:
ln = LayerNormConv(3)
x = Variable(torch.rand((1,3,4,2)))
ln(x).size()
"""
def __init__(self, features, eps=1e-6):
super().__init__()
self.register_buffer("gamma", torch.ones(features).unsqueeze(-1).unsqueeze(-1))
self.register_buffer("beta", torch.ones(features).unsqueeze(-1).unsqueeze(-1))
self.eps = eps
self.features = features
def _check_input_dim(self, input):
if input.size(1) != self.gamma.nelement():
raise ValueError('got {}-feature tensor, expected {}'
.format(input.size(1), self.features))
def forward(self, x):
self._check_input_dim(x)
x_flat = x.transpose(1,-1).contiguous().view((-1, x.size(1)))
mean = x_flat.mean(0).unsqueeze(-1).unsqueeze(-1).expand_as(x)
std = x_flat.std(0).unsqueeze(-1).unsqueeze(-1).expand_as(x)
return self.gamma.expand_as(x) * (x - mean) / (std + self.eps) + self.beta.expand_as(x)
class DiscreteActionsEncoder(nn.Module):
def __init__(self, actions_max, mlp_out, emb_size, num_agents, use_embedding):
super().__init__()
self.actions_max = actions_max
self.emb_size = emb_size
self.num_agents = num_agents
self.use_embedding = use_embedding
if use_embedding:
self.embedding = torch.nn.Embedding(actions_max, emb_size)
else:
self.emb_size = actions_max
self.linear = torch.nn.Linear(self.emb_size * num_agents, mlp_out)
def forward(self, discrete_actions):
if self.use_embedding:
emb = self.embedding(discrete_actions)
else:
emb = torch.nn.functional.one_hot(discrete_actions, num_classes=self.actions_max)
emb = emb.view( -1, self.emb_size * self.num_agents).float()
emb = self.linear(emb)
return emb
def get_model_gradients(model):
grad_list = []
for param in model.parameters():
grad_list.append(param.grad)
return grad_list
def get_mean(v):
if len(v) > 0:
mean = np.mean(v)
else:
mean = 0
return mean
class CategoricalMaskedNaive(torch.distributions.Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=None):
self.masks = masks
if self.masks is None:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
inf_mask = torch.log(masks.float())
logits = logits + inf_mask
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def entropy(self):
if self.masks is None:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p[p_log_p != p_log_p] = 0
return -p_log_p.sum(-1)
class CategoricalMasked(torch.distributions.Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=None):
self.masks = masks
if masks is None:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
self.device = self.masks.device
logits = torch.where(self.masks, logits, torch.tensor(-1e+8).to(self.device))
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def rsample(self):
u = torch.distributions.Uniform(low=torch.zeros_like(self.logits, device = self.logits.device), high=torch.ones_like(self.logits, device = self.logits.device)).sample()
#print(u.size(), self.logits.size())
rand_logits = self.logits -(-u.log()).log()
return torch.max(rand_logits, axis=-1)[1]
def entropy(self):
if self.masks is None:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p = torch.where(self.masks, p_log_p, torch.tensor(0.0).to(self.device))
return -p_log_p.sum(-1)
class AverageMeter(nn.Module):
def __init__(self, in_shape, max_size):
super(AverageMeter, self).__init__()
self.max_size = max_size
self.current_size = 0
self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32))
def update(self, values):
size = values.size()[0]
if size == 0:
return
new_mean = torch.mean(values.float(), dim=0)
size = np.clip(size, 0, self.max_size)
old_size = min(self.max_size - size, self.current_size)
size_sum = old_size + size
self.current_size = size_sum
self.mean = (self.mean * old_size + new_mean * size) / size_sum
def clear(self):
self.current_size = 0
self.mean.fill_(0)
def __len__(self):
return self.current_size
def get_mean(self):
return self.mean.squeeze(0).cpu().numpy()
class IdentityRNN(nn.Module):
def __init__(self, in_shape, out_shape):
super(IdentityRNN, self).__init__()
assert(in_shape == out_shape)
self.identity = torch.nn.Identity()
def forward(self, x, h):
return self.identity(x), h
| 11,332 | Python | 35.092357 | 176 | 0.607395 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/sac_agent.py | from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common import vecenv
from rl_games.common import schedulers
from rl_games.common import experience
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from torch import optim
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import time
import os
class SACAgent:
def __init__(self, base_name, config):
print(config)
# TODO: Get obs shape and self.network
self.base_init(base_name, config)
self.num_seed_steps = config["num_seed_steps"]
self.gamma = config["gamma"]
self.critic_tau = config["critic_tau"]
self.batch_size = config["batch_size"]
self.init_alpha = config["init_alpha"]
self.learnable_temperature = config["learnable_temperature"]
self.replay_buffer_size = config["replay_buffer_size"]
self.num_steps_per_episode = config.get("num_steps_per_episode", 1)
self.normalize_input = config.get("normalize_input", False)
self.max_env_steps = config.get("max_env_steps", 1000) # temporary, in future we will use other approach
print(self.batch_size, self.num_actors, self.num_agents)
self.num_frames_per_epoch = self.num_actors * self.num_steps_per_episode
self.log_alpha = torch.tensor(np.log(self.init_alpha)).float().to(self.sac_device)
self.log_alpha.requires_grad = True
action_space = self.env_info['action_space']
self.actions_num = action_space.shape[0]
self.action_range = [
float(self.env_info['action_space'].low.min()),
float(self.env_info['action_space'].high.max())
]
obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape)
net_config = {
'obs_dim': self.env_info["observation_space"].shape[0],
'action_dim': self.env_info["action_space"].shape[0],
'actions_num' : self.actions_num,
'input_shape' : obs_shape
}
self.model = self.network.build(net_config)
self.model.to(self.sac_device)
print("Number of Agents", self.num_actors, "Batch Size", self.batch_size)
self.actor_optimizer = torch.optim.Adam(self.model.sac_network.actor.parameters(),
lr=self.config['actor_lr'],
betas=self.config.get("actor_betas", [0.9, 0.999]))
self.critic_optimizer = torch.optim.Adam(self.model.sac_network.critic.parameters(),
lr=self.config["critic_lr"],
betas=self.config.get("critic_betas", [0.9, 0.999]))
self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha],
lr=self.config["alpha_lr"],
betas=self.config.get("alphas_betas", [0.9, 0.999]))
self.replay_buffer = experience.VectorizedReplayBuffer(self.env_info['observation_space'].shape,
self.env_info['action_space'].shape,
self.replay_buffer_size,
self.sac_device)
self.target_entropy_coef = config.get("target_entropy_coef", 0.5)
self.target_entropy = self.target_entropy_coef * -self.env_info['action_space'].shape[0]
print("Target entropy", self.target_entropy)
self.step = 0
self.algo_observer = config['features']['observer']
# TODO: Is there a better way to get the maximum number of episodes?
self.max_episodes = torch.ones(self.num_actors, device=self.sac_device)*self.num_steps_per_episode
# self.episode_lengths = np.zeros(self.num_actors, dtype=int)
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.sac_device)
def base_init(self, base_name, config):
self.config = config
self.env_config = config.get('env_config', {})
self.num_actors = config.get('num_actors', 1)
self.env_name = config['env_name']
print("Env name:", self.env_name)
self.env_info = config.get('env_info')
if self.env_info is None:
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.env_info = self.vec_env.get_env_info()
self.sac_device = config.get('device', 'cuda:0')
#temporary:
self.ppo_device = self.sac_device
print('Env info:')
print(self.env_info)
self.rewards_shaper = config['reward_shaper']
self.observation_space = self.env_info['observation_space']
self.weight_decay = config.get('weight_decay', 0.0)
#self.use_action_masks = config.get('use_action_masks', False)
self.is_train = config.get('is_train', True)
self.c_loss = nn.MSELoss()
# self.c2_loss = nn.SmoothL1Loss()
self.save_best_after = config.get('save_best_after', 500)
self.print_stats = config.get('print_stats', True)
self.rnn_states = None
self.name = base_name
self.max_epochs = self.config.get('max_epochs', 1e6)
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_agents = self.env_info.get('agents', 1)
self.obs_shape = self.observation_space.shape
self.games_to_track = self.config.get('games_to_track', 100)
self.game_rewards = torch_ext.AverageMeter(1, self.games_to_track).to(self.sac_device)
self.game_lengths = torch_ext.AverageMeter(1, self.games_to_track).to(self.sac_device)
self.obs = None
self.min_alpha = torch.tensor(np.log(1)).float().to(self.sac_device)
self.frame = 0
self.update_time = 0
self.last_mean_rewards = -100500
self.play_time = 0
self.epoch_num = 0
# self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
# print("Run Directory:", config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.experiment_dir = config.get('logdir', './')
self.nn_dir = os.path.join(self.experiment_dir, 'nn')
self.summaries_dir = os.path.join(self.experiment_dir, 'runs')
os.makedirs(self.experiment_dir, exist_ok=True)
os.makedirs(self.nn_dir, exist_ok=True)
os.makedirs(self.summaries_dir, exist_ok=True)
self.writer = SummaryWriter(self.summaries_dir)
print("Run Directory:", self.summaries_dir)
self.is_tensor_obses = None
self.is_rnn = False
self.last_rnn_indices = None
self.last_state_indices = None
def init_tensors(self):
if self.observation_space.dtype == np.uint8:
torch_dtype = torch.uint8
else:
torch_dtype = torch.float32
batch_size = self.num_agents * self.num_actors
self.current_rewards = torch.zeros(batch_size, dtype=torch.float32, device=self.sac_device)
self.current_lengths = torch.zeros(batch_size, dtype=torch.long, device=self.sac_device)
self.dones = torch.zeros((batch_size,), dtype=torch.uint8, device=self.sac_device)
@property
def alpha(self):
return self.log_alpha.exp()
@property
def device(self):
return self.sac_device
def get_full_state_weights(self):
state = self.get_weights()
state['steps'] = self.step
state['actor_optimizer'] = self.actor_optimizer.state_dict()
state['critic_optimizer'] = self.critic_optimizer.state_dict()
state['log_alpha_optimizer'] = self.log_alpha_optimizer.state_dict()
return state
def get_weights(self):
state = {'actor': self.model.sac_network.actor.state_dict(),
'critic': self.model.sac_network.critic.state_dict(),
'critic_target': self.model.sac_network.critic_target.state_dict()}
return state
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def set_weights(self, weights):
self.model.sac_network.actor.load_state_dict(weights['actor'])
self.model.sac_network.critic.load_state_dict(weights['critic'])
self.model.sac_network.critic_target.load_state_dict(weights['critic_target'])
if self.normalize_input:
self.running_mean_std.load_state_dict(weights['running_mean_std'])
def set_full_state_weights(self, weights):
self.set_weights(weights)
self.step = weights['step']
self.actor_optimizer.load_state_dict(weights['actor_optimizer'])
self.critic_optimizer.load_state_dict(weights['critic_optimizer'])
self.log_alpha_optimizer.load_state_dict(weights['log_alpha_optimizer'])
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def set_eval(self):
self.model.eval()
if self.normalize_input:
self.running_mean_std.eval()
def set_train(self):
self.model.train()
if self.normalize_input:
self.running_mean_std.train()
def update_critic(self, obs, action, reward, next_obs, not_done,
step):
with torch.no_grad():
dist = self.model.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.model.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1, target_Q2) - self.alpha * log_prob
target_Q = reward + (not_done * self.gamma * target_V)
target_Q = target_Q.detach()
# get current Q estimates
current_Q1, current_Q2 = self.model.critic(obs, action)
critic1_loss = self.c_loss(current_Q1, target_Q)
critic2_loss = self.c_loss(current_Q2, target_Q)
critic_loss = critic1_loss + critic2_loss
self.critic_optimizer.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_optimizer.step()
return critic_loss.detach(), critic1_loss.detach(), critic2_loss.detach()
def update_actor_and_alpha(self, obs, step):
for p in self.model.sac_network.critic.parameters():
p.requires_grad = False
dist = self.model.actor(obs)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
entropy = dist.entropy().sum(-1, keepdim=True).mean()
actor_Q1, actor_Q2 = self.model.critic(obs, action)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (torch.max(self.alpha.detach(), self.min_alpha) * log_prob - actor_Q)
actor_loss = actor_loss.mean()
self.actor_optimizer.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_optimizer.step()
for p in self.model.sac_network.critic.parameters():
p.requires_grad = True
if self.learnable_temperature:
alpha_loss = (self.alpha *
(-log_prob - self.target_entropy).detach()).mean()
self.log_alpha_optimizer.zero_grad(set_to_none=True)
alpha_loss.backward()
self.log_alpha_optimizer.step()
else:
alpha_loss = None
return actor_loss.detach(), entropy.detach(), self.alpha.detach(), alpha_loss # TODO: maybe not self.alpha
def soft_update_params(self, net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def update(self, step):
obs, action, reward, next_obs, done = self.replay_buffer.sample(self.batch_size)
not_done = ~done
obs = self.preproc_obs(obs)
next_obs = self.preproc_obs(next_obs)
critic_loss, critic1_loss, critic2_loss = self.update_critic(obs, action, reward, next_obs, not_done, step)
actor_loss, entropy, alpha, alpha_loss = self.update_actor_and_alpha(obs, step)
actor_loss_info = actor_loss, entropy, alpha, alpha_loss
self.soft_update_params(self.model.sac_network.critic, self.model.sac_network.critic_target,
self.critic_tau)
return actor_loss_info, critic1_loss, critic2_loss
def preproc_obs(self, obs):
if isinstance(obs, dict):
obs = obs['obs']
if self.normalize_input:
obs = self.running_mean_std(obs)
return obs
def env_step(self, actions):
obs, rewards, dones, infos = self.vec_env.step(actions) # (obs_space) -> (n, obs_space)
self.step += self.num_actors
if self.is_tensor_obses:
return obs, rewards, dones, infos
else:
return torch.from_numpy(obs).to(self.sac_device), torch.from_numpy(rewards).to(self.sac_device), torch.from_numpy(dones).to(self.sac_device), infos
def env_reset(self):
with torch.no_grad():
obs = self.vec_env.reset()
if self.is_tensor_obses is None:
self.is_tensor_obses = torch.is_tensor(obs)
print("Observations are tensors:", self.is_tensor_obses)
if self.is_tensor_obses:
return obs.to(self.sac_device)
else:
return torch.from_numpy(obs).to(self.sac_device)
def act(self, obs, action_dim, sample=False):
obs = self.preproc_obs(obs)
dist = self.model.actor(obs)
actions = dist.sample() if sample else dist.mean
actions = actions.clamp(*self.action_range)
assert actions.ndim == 2
return actions
def extract_actor_stats(self, actor_losses, entropies, alphas, alpha_losses, actor_loss_info):
actor_loss, entropy, alpha, alpha_loss = actor_loss_info
actor_losses.append(actor_loss)
entropies.append(entropy)
if alpha_losses is not None:
alphas.append(alpha)
alpha_losses.append(alpha_loss)
def play_steps(self, random_exploration=False):
total_time_start = time.time()
total_update_time = 0
total_time = 0
step_time = 0.0
actor_losses = []
entropies = []
alphas = []
alpha_losses = []
critic1_losses = []
critic2_losses = []
obs = self.obs
for _ in range(self.num_steps_per_episode):
self.set_eval()
if random_exploration:
action = torch.rand((self.num_actors, *self.env_info["action_space"].shape), device=self.sac_device) * 2 - 1
else:
with torch.no_grad():
action = self.act(obs.float(), self.env_info["action_space"].shape, sample=True)
step_start = time.time()
with torch.no_grad():
next_obs, rewards, dones, infos = self.env_step(action)
step_end = time.time()
self.current_rewards += rewards
self.current_lengths += 1
total_time += step_end - step_start
step_time += (step_end - step_start)
all_done_indices = dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
not_dones = 1.0 - dones.float()
self.algo_observer.process_infos(infos, done_indices)
no_timeouts = self.current_lengths != self.max_env_steps
dones = dones * no_timeouts
self.current_rewards = self.current_rewards * not_dones
self.current_lengths = self.current_lengths * not_dones
if isinstance(obs, dict):
obs = obs['obs']
if isinstance(next_obs, dict):
next_obs = next_obs['obs']
rewards = self.rewards_shaper(rewards)
#if torch.min(obs) < -150 or torch.max(obs) > 150:
# print('ATATATA')
#else:
self.replay_buffer.add(obs, action, torch.unsqueeze(rewards, 1), next_obs, torch.unsqueeze(dones, 1))
self.obs = obs = next_obs.clone()
if not random_exploration:
self.set_train()
update_time_start = time.time()
actor_loss_info, critic1_loss, critic2_loss = self.update(self.epoch_num)
update_time_end = time.time()
update_time = update_time_end - update_time_start
self.extract_actor_stats(actor_losses, entropies, alphas, alpha_losses, actor_loss_info)
critic1_losses.append(critic1_loss)
critic2_losses.append(critic2_loss)
else:
update_time = 0
total_update_time += update_time
total_time_end = time.time()
total_time = total_time_end - total_time_start
play_time = total_time - total_update_time
return step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses
def train_epoch(self):
if self.epoch_num < self.num_seed_steps:
step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.play_steps(random_exploration=True)
else:
step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.play_steps(random_exploration=False)
return step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses
def train(self):
self.init_tensors()
self.algo_observer.after_init(self)
self.last_mean_rewards = -100500
total_time = 0
# rep_count = 0
self.frame = 0
self.obs = self.env_reset()
while True:
self.epoch_num += 1
step_time, play_time, update_time, epoch_total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.train_epoch()
total_time += epoch_total_time
scaled_time = epoch_total_time
scaled_play_time = play_time
curr_frames = self.num_frames_per_epoch
self.frame += curr_frames
frame = self.frame #TODO: Fix frame
# print(frame)
self.writer.add_scalar('performance/step_inference_rl_update_fps', curr_frames / scaled_time, frame)
self.writer.add_scalar('performance/step_inference_fps', curr_frames / scaled_play_time, frame)
self.writer.add_scalar('performance/step_fps', curr_frames / step_time, frame)
self.writer.add_scalar('performance/rl_update_time', update_time, frame)
self.writer.add_scalar('performance/step_inference_time', play_time, frame)
self.writer.add_scalar('performance/step_time', step_time, frame)
if self.epoch_num >= self.num_seed_steps:
self.writer.add_scalar('losses/a_loss', torch_ext.mean_list(actor_losses).item(), frame)
self.writer.add_scalar('losses/c1_loss', torch_ext.mean_list(critic1_losses).item(), frame)
self.writer.add_scalar('losses/c2_loss', torch_ext.mean_list(critic2_losses).item(), frame)
self.writer.add_scalar('losses/entropy', torch_ext.mean_list(entropies).item(), frame)
if alpha_losses[0] is not None:
self.writer.add_scalar('losses/alpha_loss', torch_ext.mean_list(alpha_losses).item(), frame)
self.writer.add_scalar('info/alpha', torch_ext.mean_list(alphas).item(), frame)
self.writer.add_scalar('info/epochs', self.epoch_num, frame)
self.algo_observer.after_print_stats(frame, self.epoch_num, total_time)
mean_rewards = 0
mean_lengths = 0
if self.game_rewards.current_size > 0:
mean_rewards = self.game_rewards.get_mean()
mean_lengths = self.game_lengths.get_mean()
self.writer.add_scalar('rewards/step', mean_rewards, frame)
self.writer.add_scalar('rewards/iter', mean_rewards, self.epoch_num)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/step', mean_lengths, frame)
# self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards and self.epoch_num >= self.save_best_after:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
# self.save("./nn/" + self.config['name'])
self.save(os.path.join(self.nn_dir, self.config['name']))
# if self.last_mean_rewards > self.config.get('score_to_win', float('inf')):
# print('Network won!')
# self.save("./nn/" + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards))
# return self.last_mean_rewards, self.epoch_num
if self.epoch_num > self.max_epochs:
# self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards))
self.save(os.path.join(self.nn_dir, 'last_' + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards)))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, self.epoch_num
update_time = 0
if self.print_stats:
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'epoch: {self.epoch_num} fps step: {fps_step:.1f} fps total: {fps_total:.1f} reward: {mean_rewards:.3f} episode len: {mean_lengths:.3f}') | 22,630 | Python | 41.943074 | 186 | 0.595095 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/d2rl.py | import torch
class D2RLNet(torch.nn.Module):
def __init__(self, input_size,
units,
activations,
norm_func_name = None):
torch.nn.Module.__init__(self)
self.activations = torch.nn.ModuleList(activations)
self.linears = torch.nn.ModuleList([])
self.norm_layers = torch.nn.ModuleList([])
self.num_layers = len(units)
last_size = input_size
for i in range(self.num_layers):
self.linears.append(torch.nn.Linear(last_size, units[i]))
last_size = units[i] + input_size
if norm_func_name == 'layer_norm':
self.norm_layers.append(torch.nn.LayerNorm(units[i]))
elif norm_func_name == 'batch_norm':
self.norm_layers.append(torch.nn.BatchNorm1d(units[i]))
else:
self.norm_layers.append(torch.nn.Identity())
def forward(self, input):
x = self.linears[0](input)
x = self.activations[0](x)
x = self.norm_layers[0](x)
for i in range(1,self.num_layers):
x = torch.cat([x,input], dim=1)
x = self.linears[i](x)
x = self.norm_layers[i](x)
x = self.activations[i](x)
return x | 1,259 | Python | 37.181817 | 71 | 0.544083 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/players.py | from rl_games.common.player import BasePlayer
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common.tr_helpers import unsqueeze_obs
import gym
import torch
from torch import nn
import numpy as np
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class PpoPlayerContinuous(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
self.actions_num = self.action_space.shape[0]
self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device)
self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device)
self.mask = [False]
self.normalize_input = self.config['normalize_input']
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
self.running_mean_std.eval()
def get_action(self, obs, is_determenistic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
mu = res_dict['mus']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if is_determenistic:
current_action = mu
else:
current_action = action
current_action = torch.squeeze(current_action.detach())
return rescale_actions(self.actions_low, self.actions_high, torch.clamp(current_action, -1.0, 1.0))
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def reset(self):
self.init_rnn()
class PpoPlayerDiscrete(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
if type(self.action_space) is gym.spaces.Discrete:
self.actions_num = self.action_space.n
self.is_multi_discrete = False
if type(self.action_space) is gym.spaces.Tuple:
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
self.mask = [False]
self.normalize_input = self.config['normalize_input']
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.value_size
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
self.running_mean_std.eval()
def get_masked_action(self, obs, action_masks, is_determenistic = True):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
action_masks = torch.Tensor(action_masks).to(self.device)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'action_masks' : action_masks,
'rnn_states' : self.states
}
self.model.eval()
with torch.no_grad():
neglogp, value, action, logits, self.states = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_determenistic:
action = [torch.argmax(logit.detach(), axis=-1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_determenistic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def get_action(self, obs, is_determenistic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
self.model.eval()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_determenistic:
action = [torch.argmax(logit.detach(), axis=1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_determenistic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def reset(self):
self.init_rnn()
class SACPlayer(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
self.actions_num = self.action_space.shape[0]
self.action_range = [
float(self.env_info['action_space'].low.min()),
float(self.env_info['action_space'].high.max())
]
obs_shape = torch_ext.shape_whc_to_cwh(self.state_shape)
self.normalize_input = False
config = {
'obs_dim': self.env_info["observation_space"].shape[0],
'action_dim': self.env_info["action_space"].shape[0],
'actions_num' : self.actions_num,
'input_shape' : obs_shape
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
# if self.normalize_input:
# self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
# self.running_mean_std.eval()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.sac_network.actor.load_state_dict(checkpoint['actor'])
self.model.sac_network.critic.load_state_dict(checkpoint['critic'])
self.model.sac_network.critic_target.load_state_dict(checkpoint['critic_target'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def get_action(self, obs, sample=False):
dist = self.model.actor(obs)
actions = dist.sample() if sample else dist.mean
actions = actions.clamp(*self.action_range).to(self.device)
assert actions.ndim == 2
return actions
def reset(self):
pass | 7,933 | Python | 36.074766 | 108 | 0.576831 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/self_play_manager.py | import numpy as np
class SelfPlayManager:
def __init__(self, config, writter):
self.config = config
self.writter = writter
self.update_score = self.config['update_score']
self.games_to_check = self.config['games_to_check']
self.check_scores = self.config.get('check_scores', False)
self.env_update_num = self.config.get('env_update_num', 1)
self.env_indexes = np.arange(start=0, stop=self.env_update_num)
self.updates_num = 0
def update(self, algo):
self.updates_num += 1
if self.check_scores:
data = algo.game_scores
else:
data = algo.game_rewards
if len(data) >= self.games_to_check:
mean_scores = data.get_mean()
mean_rewards = algo.game_rewards.get_mean()
if mean_scores > self.update_score:
print('Mean scores: ', mean_scores, ' mean rewards: ', mean_rewards, ' updating weights')
algo.clear_stats()
self.writter.add_scalar('selfplay/iters_update_weigths', self.updates_num, algo.frame)
algo.vec_env.set_weights(self.env_indexes, algo.get_weights())
self.env_indexes = (self.env_indexes + 1) % (algo.num_actors)
self.updates_num = 0
| 1,332 | Python | 40.656249 | 105 | 0.572072 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/sac_helper.py | # from rl_games.algos_torch.network_builder import NetworkBuilder
from torch import distributions as pyd
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
def entropy(self):
return self.base_dist.entropy()
| 1,720 | Python | 28.169491 | 137 | 0.647093 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/a2c_discrete.py | from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch import ppg_aux
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class DiscreteA2CAgent(a2c_common.DiscreteA2CBase):
def __init__(self, base_name, config):
a2c_common.DiscreteA2CBase.__init__(self, base_name, config)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1)
}
self.model = self.network.build(config)
self.model.to(self.ppo_device)
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.normalize_input:
if isinstance(self.observation_space, gym.spaces.Dict):
self.running_mean_std = RunningMeanStdObs(obs_shape).to(self.ppo_device)
else:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.ppo_device)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'num_steps' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'model' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', False)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if 'phasic_policy_gradients' in self.config:
self.has_phasic_policy_gradients = True
self.ppg_aux_loss = ppg_aux.PPGAux(self, self.config['phasic_policy_gradients'])
self.has_value_loss = (self.has_central_value \
and self.use_experimental_cv) \
or not self.has_phasic_policy_gradients
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
processed_obs = self._preproc_obs(obs['obs'])
action_masks = torch.BoolTensor(action_masks).to(self.ppo_device)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'action_masks' : action_masks,
'rnn_states' : self.rnn_states
}
with torch.no_grad():
res_dict = self.model(input_dict)
if self.has_central_value:
input_dict = {
'is_train': False,
'states' : obs['states'],
#'actions' : action,
}
value = self.get_central_value(input_dict)
res_dict['values'] = value
if self.normalize_value:
value = self.value_mean_std(value, True)
if self.is_multi_discrete:
action_masks = torch.cat(action_masks, dim=-1)
res_dict['action_masks'] = action_masks
return res_dict
def train_actor_critic(self, input_dict):
self.set_train()
self.calc_gradients(input_dict)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.last_lr
return self.train_result
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr = self.last_lr
kl = 1.0
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
if self.use_action_masks:
batch_dict['action_masks'] = input_dict['action_masks']
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
a_loss = common_losses.actor_loss(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy = losses[0], losses[1], losses[2]
loss = a_loss + 0.5 *c_loss * self.critic_coef - entropy * self.entropy_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
kl_dist = 0.5 * ((old_action_log_probs_batch - action_log_probs)**2)
if self.is_rnn:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() # / sum_mask
else:
kl_dist = kl_dist.mean()
if self.has_phasic_policy_gradients:
c_loss = self.ppg_aux_loss.train_value(self,input_dict)
self.train_result = (a_loss, c_loss, entropy, kl_dist,self.last_lr, lr_mul)
| 7,889 | Python | 38.848485 | 142 | 0.566865 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/ppg_aux.py |
from rl_games.common import tr_helpers
from rl_games.algos_torch import torch_ext
from rl_games.common import common_losses
from rl_games.common.datasets import DatasetList
import torch
from torch import nn
from torch import optim
import copy
class PPGAux:
def __init__(self, algo, config):
self.config = config
self.writer = algo.writer
self.mini_epoch = config['mini_epochs']
self.mini_batch = config['minibatch_size']
self.mixed_precision = algo.mixed_precision
self.is_rnn = algo.network.is_rnn()
self.kl_coef = config.get('kl_coef', 1.0)
self.n_aux = config.get('n_aux', 16)
self.is_continuous = True
self.last_lr = config['learning_rate']
self.optimizer = optim.Adam(algo.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=algo.weight_decay)
self.scaler = torch.cuda.amp.GradScaler(enabled=self.mixed_precision)
self._freeze_grads(algo.model)
self.value_optimizer = optim.Adam(filter(lambda p: p.requires_grad, algo.model.parameters()), float(self.last_lr), eps=1e-08, weight_decay=algo.weight_decay)
self.value_scaler = torch.cuda.amp.GradScaler(enabled=self.mixed_precision)
self._unfreeze_grads(algo.model)
self.dataset_list = DatasetList()
def _freeze_grads(self, model):
for param in model.parameters():
param.requires_grad = False
model.a2c_network.value.weight.requires_grad = True
model.a2c_network.value.bias.requires_grad = True
def _unfreeze_grads(self, model):
for param in model.parameters():
param.requires_grad = True
def train_value(self, algo, input_dict):
value_preds_batch = input_dict['old_values']
return_batch = input_dict['returns']
obs_batch = input_dict['obs']
actions_batch = input_dict['actions']
obs_batch = algo._preproc_obs(obs_batch)
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = algo.model(batch_dict)
values = res_dict['values']
c_loss = common_losses.critic_loss(value_preds_batch, values, algo.e_clip, return_batch, algo.clip_value)
losses, sum_mask = torch_ext.apply_masks([c_loss], rnn_masks)
c_loss = losses[0]
loss = c_loss
if algo.multi_gpu:
self.value_optimizer.zero_grad()
else:
for param in algo.model.parameters():
param.grad = None
self.value_scaler.scale(loss).backward()
if algo.truncate_grads:
if algo.multi_gpu:
self.value_optimizer.synchronize()
self.value_scaler.unscale_(self.value_optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
with self.value_optimizer.skip_synchronize():
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
else:
self.value_scaler.unscale_(self.value_optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
else:
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
return loss.detach()
def update(self, algo):
self.dataset_list.add_dataset(algo.dataset)
def train_net(self, algo):
self.update(algo)
if algo.epoch_num % self.n_aux != 0:
return
self.old_model = copy.deepcopy(algo.model)
self.old_model.eval()
dataset = self.dataset_list
for _ in range(self.mini_epoch):
for idx in range(len(dataset)):
loss_c, loss_kl = self.calc_gradients(algo, dataset[idx])
avg_loss_c = loss_c / len(dataset)
avg_loss_kl = loss_kl / len(dataset)
if self.writer != None:
self.writer.add_scalar('losses/pgg_loss_c', avg_loss_c, algo.frame)
self.writer.add_scalar('losses/pgg_loss_kl', avg_loss_kl, algo.frame)
self.dataset_list.clear()
def calc_gradients(self, algo, input_dict):
value_preds_batch = input_dict['old_values']
return_batch = input_dict['returns']
obs_batch = input_dict['obs']
actions_batch = input_dict['actions']
obs_batch = algo._preproc_obs(obs_batch)
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
#if self.use_action_masks:
# batch_dict['action_masks'] = input_dict['action_masks']
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
with torch.no_grad():
old_dict = self.old_model(batch_dict.copy())
res_dict = algo.model(batch_dict)
values = res_dict['values']
if 'mu' in res_dict:
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
mu = res_dict['mus']
sigma = res_dict['sigmas']
#kl_loss = torch_ext.policy_kl(mu, sigma.detach(), old_mu_batch, old_sigma_batch, False)
kl_loss = torch.abs(mu - old_mu_batch)
else:
kl_loss = algo.model.kl(res_dict, old_dict)
c_loss = common_losses.critic_loss(value_preds_batch, values, algo.e_clip, return_batch, algo.clip_value)
losses, sum_mask = torch_ext.apply_masks([c_loss, kl_loss.unsqueeze(1)], rnn_masks)
c_loss, kl_loss = losses[0], losses[1]
loss = c_loss + kl_loss * self.kl_coef
if algo.multi_gpu:
self.optimizer.zero_grad()
else:
for param in algo.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
if algo.truncate_grads:
if algo.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
return c_loss, kl_loss
| 7,361 | Python | 39.010869 | 165 | 0.570439 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/central_value.py | import torch
from torch import nn
import numpy as np
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.common import schedulers
class CentralValueTrain(nn.Module):
def __init__(self, state_shape, value_size, ppo_device, num_agents, num_steps, num_actors, num_actions, seq_len, model, config, writter, max_epochs, multi_gpu):
nn.Module.__init__(self)
self.ppo_device = ppo_device
self.num_agents, self.num_steps, self.num_actors, self.seq_len = num_agents, num_steps, num_actors, seq_len
self.num_actions = num_actions
self.state_shape = state_shape
self.value_size = value_size
self.max_epochs = max_epochs
self.multi_gpu = multi_gpu
self.truncate_grads = config.get('truncate_grads', False)
state_config = {
'value_size' : value_size,
'input_shape' : state_shape,
'actions_num' : num_actions,
'num_agents' : num_agents,
'num_seqs' : num_actors
}
self.config = config
self.model = model.build('cvalue', **state_config)
self.lr = float(config['learning_rate'])
self.linear_lr = config.get('lr_schedule') == 'linear'
if self.linear_lr:
self.scheduler = schedulers.LinearScheduler(self.lr,
max_steps=self.max_epochs,
apply_to_entropy=False,
start_entropy_coef=0)
else:
self.scheduler = schedulers.IdentityScheduler()
self.mini_epoch = config['mini_epochs']
self.mini_batch = config['minibatch_size']
self.num_minibatches = self.num_steps * self.num_actors // self.mini_batch
self.clip_value = config['clip_value']
self.normalize_input = config['normalize_input']
self.writter = writter
self.weight_decay = config.get('weight_decay', 0.0)
self.optimizer = torch.optim.Adam(self.model.parameters(), float(self.lr), eps=1e-08, weight_decay=self.weight_decay)
self.frame = 0
self.epoch_num = 0
self.running_mean_std = None
self.grad_norm = config.get('grad_norm', 1)
self.truncate_grads = config.get('truncate_grads', False)
self.e_clip = config.get('e_clip', 0.2)
self.truncate_grad = self.config.get('truncate_grads', False)
if self.normalize_input:
self.running_mean_std = RunningMeanStd(state_shape)
self.is_rnn = self.model.is_rnn()
self.rnn_states = None
self.batch_size = self.num_steps * self.num_actors
if self.is_rnn:
self.rnn_states = self.model.get_default_rnn_state()
self.rnn_states = [s.to(self.ppo_device) for s in self.rnn_states]
num_seqs = self.num_steps * self.num_actors // self.seq_len
assert((self.num_steps * self.num_actors // self.num_minibatches) % self.seq_len == 0)
self.mb_rnn_states = [torch.zeros((s.size()[0], num_seqs, s.size()[2]), dtype = torch.float32, device=self.ppo_device) for s in self.rnn_states]
self.dataset = datasets.PPODataset(self.batch_size, self.mini_batch, True, self.is_rnn, self.ppo_device, self.seq_len)
def update_lr(self, lr):
if self.multi_gpu:
lr_tensor = torch.tensor([lr])
self.hvd.broadcast_value(lr_tensor, 'cv_learning_rate')
lr = lr_tensor.item()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def get_stats_weights(self):
if self.normalize_input:
return self.running_mean_std.state_dict()
else:
return {}
def set_stats_weights(self, weights):
self.running_mean_std.load_state_dict(weights)
def update_dataset(self, batch_dict):
value_preds = batch_dict['old_values']
returns = batch_dict['returns']
actions = batch_dict['actions']
rnn_masks = batch_dict['rnn_masks']
if self.num_agents > 1:
res = self.update_multiagent_tensors(value_preds, returns, actions, rnn_masks)
batch_dict['old_values'] = res[0]
batch_dict['returns'] = res[1]
batch_dict['actions'] = res[2]
if self.is_rnn:
batch_dict['rnn_states'] = self.mb_rnn_states
if self.num_agents > 1:
rnn_masks = res[3]
batch_dict['rnn_masks'] = rnn_masks
self.dataset.update_values_dict(batch_dict)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
for k,v in obs_batch.items():
obs_batch[k] = self._preproc_obs(v)
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
if self.normalize_input:
obs_batch = self.running_mean_std(obs_batch)
return obs_batch
def pre_step_rnn(self, rnn_indices, state_indices):
if self.num_agents > 1:
rnn_indices = rnn_indices[::self.num_agents]
shifts = rnn_indices % (self.num_steps // self.seq_len)
rnn_indices = (rnn_indices - shifts) // self.num_agents + shifts
state_indices = state_indices[::self.num_agents] // self.num_agents
for s, mb_s in zip(self.rnn_states, self.mb_rnn_states):
mb_s[:, rnn_indices, :] = s[:, state_indices, :]
def post_step_rnn(self, all_done_indices):
all_done_indices = all_done_indices[::self.num_agents] // self.num_agents
for s in self.rnn_states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
def forward(self, input_dict):
value, rnn_states = self.model(input_dict)
return value, rnn_states
def get_value(self, input_dict):
self.eval()
obs_batch = input_dict['states']
actions = input_dict.get('actions', None)
obs_batch = self._preproc_obs(obs_batch)
value, self.rnn_states = self.forward({'obs' : obs_batch, 'actions': actions,
'rnn_states': self.rnn_states})
if self.num_agents > 1:
value = value.repeat(1, self.num_agents)
value = value.view(value.size()[0]*self.num_agents, -1)
return value
def train_critic(self, input_dict):
self.train()
loss = self.calc_gradients(input_dict)
return loss.item()
def update_multiagent_tensors(self, value_preds, returns, actions, rnn_masks):
batch_size = self.batch_size
ma_batch_size = self.num_actors * self.num_agents * self.num_steps
value_preds = value_preds.view(self.num_actors, self.num_agents, self.num_steps, self.value_size).transpose(0,1)
returns = returns.view(self.num_actors, self.num_agents, self.num_steps, self.value_size).transpose(0,1)
value_preds = value_preds.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
returns = returns.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
if self.is_rnn:
rnn_masks = rnn_masks.view(self.num_actors, self.num_agents, self.num_steps).transpose(0,1)
rnn_masks = rnn_masks.flatten(0)[:batch_size]
return value_preds, returns, actions, rnn_masks
def train_net(self):
self.train()
loss = 0
for _ in range(self.mini_epoch):
for idx in range(len(self.dataset)):
loss += self.train_critic(self.dataset[idx])
avg_loss = loss / (self.mini_epoch * self.num_minibatches)
self.epoch_num += 1
self.lr, _ = self.scheduler.update(self.lr, 0, self.epoch_num, 0, 0)
self.update_lr(self.lr)
self.frame += self.batch_size
if self.writter != None:
self.writter.add_scalar('losses/cval_loss', avg_loss, self.frame)
self.writter.add_scalar('info/cval_lr', self.lr, self.frame)
return avg_loss
def calc_gradients(self, batch):
obs_batch = self._preproc_obs(batch['obs'])
value_preds_batch = batch['old_values']
returns_batch = batch['returns']
actions_batch = batch['actions']
rnn_masks_batch = batch.get('rnn_masks')
batch_dict = {'obs' : obs_batch,
'actions' : actions_batch,
'seq_length' : self.seq_len }
if self.is_rnn:
batch_dict['rnn_states'] = batch['rnn_states']
values, _ = self.forward(batch_dict)
loss = common_losses.critic_loss(value_preds_batch, values, self.e_clip, returns_batch, self.clip_value)
losses, _ = torch_ext.apply_masks([loss], rnn_masks_batch)
loss = losses[0]
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
loss.backward()
#TODO: Refactor this ugliest code of they year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
#self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.optimizer.step()
else:
#self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.optimizer.step()
else:
self.optimizer.step()
return loss
| 9,703 | Python | 41.561403 | 164 | 0.587241 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/models.py | import rl_games.algos_torch.layers
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
import rl_games.common.divergence as divergence
from rl_games.algos_torch.torch_ext import CategoricalMasked
from torch.distributions import Categorical
from rl_games.algos_torch.sac_helper import SquashedNormal
class BaseModel():
def __init__(self):
pass
def is_rnn(self):
return False
def is_separate_critic(self):
return False
class ModelA2C(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2C.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['logits']
q = q_dict['logits']
return divergence.d_kl_discrete(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
action_masks = input_dict.get('action_masks', None)
prev_actions = input_dict.get('prev_actions', None)
logits, value, states = self.a2c_network(input_dict)
if is_train:
categorical = CategoricalMasked(logits=logits, masks=action_masks)
prev_neglogp = -categorical.log_prob(prev_actions)
entropy = categorical.entropy()
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'logits' : categorical.logits,
'values' : value,
'entropy' : entropy,
'rnn_states' : states
}
return result
else:
categorical = CategoricalMasked(logits=logits, masks=action_masks)
selected_action = categorical.sample().long()
neglogp = -categorical.log_prob(selected_action)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'logits' : categorical.logits,
'rnn_states' : states
}
return result
class ModelA2CMultiDiscrete(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2CMultiDiscrete.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['logits']
q = q_dict['logits']
return divergence.d_kl_discrete_list(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
action_masks = input_dict.get('action_masks', None)
prev_actions = input_dict.get('prev_actions', None)
logits, value, states = self.a2c_network(input_dict)
if is_train:
if action_masks is None:
categorical = [Categorical(logits=logit) for logit in logits]
else:
categorical = [CategoricalMasked(logits=logit, masks=mask) for logit, mask in zip(logits, action_masks)]
prev_actions = torch.split(prev_actions, 1, dim=-1)
prev_neglogp = [-c.log_prob(a.squeeze()) for c,a in zip(categorical, prev_actions)]
prev_neglogp = torch.stack(prev_neglogp, dim=-1).sum(dim=-1)
entropy = [c.entropy() for c in categorical]
entropy = torch.stack(entropy, dim=-1).sum(dim=-1)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'logits' : [c.logits for c in categorical],
'values' : value,
'entropy' : torch.squeeze(entropy),
'rnn_states' : states
}
return result
else:
if action_masks is None:
categorical = [Categorical(logits=logit) for logit in logits]
else:
categorical = [CategoricalMasked(logits=logit, masks=mask) for logit, mask in zip(logits, action_masks)]
selected_action = [c.sample().long() for c in categorical]
neglogp = [-c.log_prob(a.squeeze()) for c,a in zip(categorical, selected_action)]
selected_action = torch.stack(selected_action, dim=-1)
neglogp = torch.stack(neglogp, dim=-1).sum(dim=-1)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'logits' : [c.logits for c in categorical],
'rnn_states' : states
}
return result
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2CContinuous.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['mu'], p_dict['sigma']
q = q_dict['mu'], q_dict['sigma']
return divergence.d_kl_normal(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
mu, sigma, value, states = self.a2c_network(input_dict)
distr = torch.distributions.Normal(mu, sigma)
if is_train:
entropy = distr.entropy().sum(dim=-1)
prev_neglogp = -distr.log_prob(prev_actions).sum(dim=-1)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'value' : value,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
else:
selected_action = distr.sample().squeeze()
neglogp = -distr.log_prob(selected_action).sum(dim=-1)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : torch.squeeze(value),
'actions' : selected_action,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
net = self.network_builder.build('a2c', **config)
for name, _ in net.named_parameters():
print(name)
return ModelA2CContinuousLogStd.Network(net)
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
mu, logstd, value, states = self.a2c_network(input_dict)
sigma = torch.exp(logstd)
distr = torch.distributions.Normal(mu, sigma)
if is_train:
entropy = distr.entropy().sum(dim=-1)
prev_neglogp = self.neglogp(prev_actions, mu, sigma, logstd)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'values' : value,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
else:
selected_action = distr.sample()
neglogp = self.neglogp(selected_action, mu, sigma, logstd)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
def neglogp(self, x, mean, std, logstd):
return 0.5 * (((x - mean) / std)**2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ logstd.sum(dim=-1)
class ModelSACContinuous(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelSACContinuous.Network(self.network_builder.build('sac', **config))
class Network(nn.Module):
def __init__(self, sac_network):
nn.Module.__init__(self)
self.sac_network = sac_network
def critic(self, obs, action):
return self.sac_network.critic(obs, action)
def critic_target(self, obs, action):
return self.sac_network.critic_target(obs, action)
def actor(self, obs):
return self.sac_network.actor(obs)
def is_rnn(self):
return False
def forward(self, input_dict):
is_train = input_dict.pop('is_train', True)
mu, sigma = self.sac_network(input_dict)
dist = SquashedNormal(mu, sigma)
return dist
| 10,919 | Python | 36.142857 | 140 | 0.514699 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/model_builder.py | from rl_games.common import object_factory
import rl_games.algos_torch
from rl_games.algos_torch import network_builder
from rl_games.algos_torch import models
NETWORK_REGISTRY = {}
def register_network(name, target_class):
NETWORK_REGISTRY[name] = lambda **kwargs : target_class()
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs : models.ModelA2C(network))
self.model_factory.register_builder('multi_discrete_a2c', lambda network, **kwargs : models.ModelA2CMultiDiscrete(network))
self.model_factory.register_builder('continuous_a2c', lambda network, **kwargs : models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd', lambda network, **kwargs : models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('soft_actor_critic', lambda network, **kwargs : models.ModelSACContinuous(network))
#self.model_factory.register_builder('dqn', lambda network, **kwargs : models.AtariDQN(network))
self.network_factory = object_factory.ObjectFactory()
self.network_factory.set_builders(NETWORK_REGISTRY)
self.network_factory.register_builder('actor_critic', lambda **kwargs : network_builder.A2CBuilder())
self.network_factory.register_builder('resnet_actor_critic', lambda **kwargs : network_builder.A2CResnetBuilder())
self.network_factory.register_builder('rnd_curiosity', lambda **kwargs : network_builder.RNDCuriosityBuilder())
self.network_factory.register_builder('soft_actor_critic', lambda **kwargs: network_builder.SACBuilder())
def load(self, params):
self.model_name = params['model']['name']
self.network_name = params['network']['name']
network = self.network_factory.create(self.network_name)
network.load(params['network'])
model = self.model_factory.create(self.model_name, network=network)
return model | 2,062 | Python | 53.289472 | 137 | 0.723084 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/moving_mean_std.py | import torch
import torch.nn as nn
import numpy as np
'''
updates moving statistics with momentum
'''
class MovingMeanStd(nn.Module):
def __init__(self, insize, momentum = 0.9998, epsilon=1e-05, per_channel=False, norm_only=False):
super(MovingMeanStd, self).__init__()
self.insize = insize
self.epsilon = epsilon
self.momentum = momentum
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("moving_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("moving_var", torch.ones(in_size, dtype = torch.float64))
def forward(self, input, unnorm=False):
if self.training:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.moving_mean = self.moving_mean * self.momentum + mean * (1 - self.momentum)
self.moving_var = self.moving_var * self.momentum + var * (1 - self.momentum)
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.moving_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.moving_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.moving_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.moving_mean
current_var = self.moving_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y | 2,521 | Python | 42.482758 | 101 | 0.554145 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/layers.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_weight", torch.zeros(out_features, in_features))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
self.register_buffer("epsilon_bias", torch.zeros(out_features))
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(3 / self.in_features)
self.weight.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, input):
self.epsilon_weight.normal_()
bias = self.bias
if bias is not None:
self.epsilon_bias.normal_()
bias = bias + self.sigma_bias * self.epsilon_bias.data
return F.linear(input, self.weight + self.sigma_weight * self.epsilon_weight.data, bias)
class NoisyFactorizedLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True):
super(NoisyFactorizedLinear, self).__init__(in_features, out_features, bias=bias)
sigma_init = sigma_zero / math.sqrt(in_features)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_input", torch.zeros(1, in_features))
self.register_buffer("epsilon_output", torch.zeros(out_features, 1))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
def forward(self, input):
self.epsison_input.normal_()
self.epsilon_output.normal_()
func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x))
eps_in = func(self.epsilon_input.data)
eps_out = func(self.epsilon_output.data)
bias = self.bias
if bias is not None:
bias = bias + self.sigma_bias * eps_out.t()
noise_v = torch.mul(eps_in, eps_out)
return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)
class LSTMWithDones(nn.Module):
def __init__(self, input_sz: int, hidden_sz: int):
super().__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
self.weight_ih = nn.Parameter(torch.Tensor(input_sz, hidden_sz * 4))
self.weight_hh = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz * 4))
self.bias = nn.Parameter(torch.Tensor(hidden_sz * 4))
self.init_weights()
def init_weights(self):
for p in self.parameters():
if p.data.ndimension() >= 2:
nn.init.xavier_uniform_(p.data)
else:
nn.init.zeros_(p.data)
def forward(self, x, dones, init_states):
"""Assumes x is of shape (batch, sequence, feature)"""
bs, seq_sz, _ = x.size()
hidden_seq = []
assert(init_states)
h_t, c_t = init_states
HS = self.hidden_size
for t in range(seq_sz):
d = dones[:, t]
h_t = h_t * (1 - d)
c_t = c_t * (1 - d)
x_t = x[:, t, :]
# batch the computations into a single matrix multiplication
gates = x_t @ self.weight_ih + h_t @ self.weight_hh + self.bias
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS*2]), # forget
torch.tanh(gates[:, HS*2:HS*3]),
torch.sigmoid(gates[:, HS*3:]), # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=1)
# reshape from shape (sequence, batch, feature) to (batch, sequence, feature)
hidden_seq = hidden_seq.transpose(1, 0).contiguous()
return hidden_seq, (h_t, c_t) | 4,148 | Python | 39.67647 | 96 | 0.578833 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/network_builder.py | from rl_games.common import object_factory
from rl_games.algos_torch import torch_ext
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
import numpy as np
from rl_games.algos_torch.d2rl import D2RLNet
from rl_games.algos_torch.sac_helper import SquashedNormal
def _create_initializer(func, **kwargs):
return lambda v : func(v, **kwargs)
class NetworkBuilder:
def __init__(self, **kwargs):
pass
def load(self, params):
pass
def build(self, name, **kwargs):
pass
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
class BaseNetwork(nn.Module):
def __init__(self, **kwargs):
nn.Module.__init__(self, **kwargs)
self.activations_factory = object_factory.ObjectFactory()
self.activations_factory.register_builder('relu', lambda **kwargs : nn.ReLU(**kwargs))
self.activations_factory.register_builder('tanh', lambda **kwargs : nn.Tanh(**kwargs))
self.activations_factory.register_builder('sigmoid', lambda **kwargs : nn.Sigmoid(**kwargs))
self.activations_factory.register_builder('elu', lambda **kwargs : nn.ELU(**kwargs))
self.activations_factory.register_builder('selu', lambda **kwargs : nn.SELU(**kwargs))
self.activations_factory.register_builder('softplus', lambda **kwargs : nn.Softplus(**kwargs))
self.activations_factory.register_builder('None', lambda **kwargs : nn.Identity())
self.init_factory = object_factory.ObjectFactory()
#self.init_factory.register_builder('normc_initializer', lambda **kwargs : normc_initializer(**kwargs))
self.init_factory.register_builder('const_initializer', lambda **kwargs : _create_initializer(nn.init.constant_,**kwargs))
self.init_factory.register_builder('orthogonal_initializer', lambda **kwargs : _create_initializer(nn.init.orthogonal_,**kwargs))
self.init_factory.register_builder('glorot_normal_initializer', lambda **kwargs : _create_initializer(nn.init.xavier_normal_,**kwargs))
self.init_factory.register_builder('glorot_uniform_initializer', lambda **kwargs : _create_initializer(nn.init.xavier_uniform_,**kwargs))
self.init_factory.register_builder('variance_scaling_initializer', lambda **kwargs : _create_initializer(torch_ext.variance_scaling_initializer,**kwargs))
self.init_factory.register_builder('random_uniform_initializer', lambda **kwargs : _create_initializer(nn.init.uniform_,**kwargs))
self.init_factory.register_builder('kaiming_normal', lambda **kwargs : _create_initializer(nn.init.kaiming_normal_,**kwargs))
self.init_factory.register_builder('orthogonal', lambda **kwargs : _create_initializer(nn.init.orthogonal_,**kwargs))
self.init_factory.register_builder('default', lambda **kwargs : nn.Identity() )
def is_separate_critic(self):
return False
def is_rnn(self):
return False
def get_default_rnn_state(self):
return None
def _calc_input_size(self, input_shape,cnn_layers=None):
if cnn_layers is None:
assert(len(input_shape) == 1)
return input_shape[0]
else:
return nn.Sequential(*cnn_layers)(torch.rand(1, *(input_shape))).flatten(1).data.size(1)
def _noisy_dense(self, inputs, units):
return layers.NoisyFactorizedLinear(inputs, units)
def _build_rnn(self, name, input, units, layers):
if name == 'identity':
return torch_ext.IdentityRNN(input, units)
if name == 'lstm':
return torch.nn.LSTM(input, units, layers, batch_first=True)
if name == 'gru':
return torch.nn.GRU(input, units, layers, batch_first=True)
if name == 'sru':
from sru import SRU
return SRU(input, units, layers, dropout=0, layer_norm=False)
def _build_sequential_mlp(self,
input_size,
units,
activation,
dense_func,
norm_only_first_layer=False,
norm_func_name = None):
print('build mlp:', input_size)
in_size = input_size
layers = []
need_norm = True
for unit in units:
layers.append(dense_func(in_size, unit))
layers.append(self.activations_factory.create(activation))
if not need_norm:
continue
if norm_only_first_layer and norm_func_name is not None:
need_norm = False
if norm_func_name == 'layer_norm':
layers.append(torch.nn.LayerNorm(unit))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm1d(unit))
in_size = unit
return nn.Sequential(*layers)
def _build_mlp(self,
input_size,
units,
activation,
dense_func,
norm_only_first_layer=False,
norm_func_name = None,
d2rl=False):
if d2rl:
act_layers = [self.activations_factory.create(activation) for i in range(len(units))]
return D2RLNet(input_size, units, act_layers, norm_func_name)
else:
return self._build_sequential_mlp(input_size, units, activation, dense_func, norm_func_name = None,)
def _build_conv(self, ctype, **kwargs):
print('conv_name:', ctype)
if ctype == 'conv2d':
return self._build_cnn2d(**kwargs)
if ctype == 'coord_conv2d':
return self._build_cnn2d(conv_func=torch_ext.CoordConv2d, **kwargs)
if ctype == 'conv1d':
return self._build_cnn1d(**kwargs)
def _build_cnn2d(self, input_shape, convs, activation, conv_func=torch.nn.Conv2d, norm_func_name=None):
in_channels = input_shape[0]
layers = []
for conv in convs:
layers.append(conv_func(in_channels=in_channels,
out_channels=conv['filters'],
kernel_size=conv['kernel_size'],
stride=conv['strides'], padding=conv['padding']))
conv_func=torch.nn.Conv2d
act = self.activations_factory.create(activation)
layers.append(act)
in_channels = conv['filters']
if norm_func_name == 'layer_norm':
layers.append(torch_ext.LayerNorm2d(in_channels))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm2d(in_channels))
return nn.Sequential(*layers)
def _build_cnn1d(self, input_shape, convs, activation, norm_func_name=None):
print('conv1d input shape:', input_shape)
in_channels = input_shape[0]
layers = []
for conv in convs:
layers.append(torch.nn.Conv1d(in_channels, conv['filters'], conv['kernel_size'], conv['strides'], conv['padding']))
act = self.activations_factory.create(activation)
layers.append(act)
in_channels = conv['filters']
if norm_func_name == 'layer_norm':
layers.append(torch.nn.LayerNorm(in_channels))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm2d(in_channels))
return nn.Sequential(*layers)
class A2CBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
self.value_size = kwargs.pop('value_size', 1)
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
NetworkBuilder.BaseNetwork.__init__(self)
self.load(params)
self.actor_cnn = nn.Sequential()
self.critic_cnn = nn.Sequential()
self.actor_mlp = nn.Sequential()
self.critic_mlp = nn.Sequential()
if self.has_cnn:
input_shape = torch_ext.shape_whc_to_cwh(input_shape)
cnn_args = {
'ctype' : self.cnn['type'],
'input_shape' : input_shape,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'norm_func_name' : self.normalization,
}
self.actor_cnn = self._build_conv(**cnn_args)
if self.separate:
self.critic_cnn = self._build_conv( **cnn_args)
mlp_input_shape = self._calc_input_size(input_shape, self.actor_cnn)
in_mlp_shape = mlp_input_shape
if len(self.units) == 0:
out_size = mlp_input_shape
else:
out_size = self.units[-1]
if self.has_rnn:
if not self.is_rnn_before_mlp:
rnn_in_size = out_size
out_size = self.rnn_units
if self.rnn_concat_input:
rnn_in_size += in_mlp_shape
else:
rnn_in_size = in_mlp_shape
in_mlp_shape = self.rnn_units
if self.separate:
self.a_rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
self.c_rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
if self.rnn_ln:
self.a_layer_norm = torch.nn.LayerNorm(self.rnn_units)
self.c_layer_norm = torch.nn.LayerNorm(self.rnn_units)
else:
self.rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
if self.rnn_ln:
self.layer_norm = torch.nn.LayerNorm(self.rnn_units)
mlp_args = {
'input_size' : in_mlp_shape,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
self.actor_mlp = self._build_mlp(**mlp_args)
if self.separate:
self.critic_mlp = self._build_mlp(**mlp_args)
self.value = torch.nn.Linear(out_size, self.value_size)
self.value_act = self.activations_factory.create(self.value_activation)
if self.is_discrete:
self.logits = torch.nn.Linear(out_size, actions_num)
'''
for multidiscrete actions num is a tuple
'''
if self.is_multi_discrete:
self.logits = torch.nn.ModuleList([torch.nn.Linear(out_size, num) for num in actions_num])
if self.is_continuous:
self.mu = torch.nn.Linear(out_size, actions_num)
self.mu_act = self.activations_factory.create(self.space_config['mu_activation'])
mu_init = self.init_factory.create(**self.space_config['mu_init'])
self.sigma_act = self.activations_factory.create(self.space_config['sigma_activation'])
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
if self.space_config['fixed_sigma']:
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=True, dtype=torch.float32), requires_grad=True)
else:
self.sigma = torch.nn.Linear(out_size, actions_num)
mlp_init = self.init_factory.create(**self.initializer)
if self.has_cnn:
cnn_init = self.init_factory.create(**self.cnn['initializer'])
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
cnn_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if self.is_continuous:
mu_init(self.mu.weight)
if self.space_config['fixed_sigma']:
sigma_init(self.sigma)
else:
sigma_init(self.sigma.weight)
def forward(self, obs_dict):
obs = obs_dict['obs']
states = obs_dict.get('rnn_states', None)
seq_length = obs_dict.get('seq_length', 1)
if self.has_cnn:
# for obs shape 4
# input expected shape (B, W, H, C)
# convert to (B, C, W, H)
if len(obs.shape) == 4:
obs = obs.permute((0, 3, 1, 2))
if self.separate:
a_out = c_out = obs
a_out = self.actor_cnn(a_out)
a_out = a_out.contiguous().view(a_out.size(0), -1)
c_out = self.critic_cnn(c_out)
c_out = c_out.contiguous().view(c_out.size(0), -1)
if self.has_rnn:
if not self.is_rnn_before_mlp:
a_out_in = a_out
c_out_in = c_out
a_out = self.actor_mlp(a_out_in)
c_out = self.critic_mlp(c_out_in)
if self.rnn_concat_input:
a_out = torch.cat([a_out, a_out_in], dim=1)
c_out = torch.cat([c_out, c_out_in], dim=1)
batch_size = a_out.size()[0]
num_seqs = batch_size // seq_length
a_out = a_out.reshape(num_seqs, seq_length, -1)
c_out = c_out.reshape(num_seqs, seq_length, -1)
if self.rnn_name == 'sru':
a_out =a_out.transpose(0,1)
c_out =c_out.transpose(0,1)
if len(states) == 2:
a_states = states[0]
c_states = states[1]
else:
a_states = states[:2]
c_states = states[2:]
a_out, a_states = self.a_rnn(a_out, a_states)
c_out, c_states = self.c_rnn(c_out, c_states)
if self.rnn_name == 'sru':
a_out = a_out.transpose(0,1)
c_out = c_out.transpose(0,1)
else:
if self.rnn_ln:
a_out = self.a_layer_norm(a_out)
c_out = self.c_layer_norm(c_out)
a_out = a_out.contiguous().reshape(a_out.size()[0] * a_out.size()[1], -1)
c_out = c_out.contiguous().reshape(c_out.size()[0] * c_out.size()[1], -1)
if type(a_states) is not tuple:
a_states = (a_states,)
c_states = (c_states,)
states = a_states + c_states
if self.is_rnn_before_mlp:
a_out = self.actor_mlp(a_out)
c_out = self.critic_mlp(c_out)
else:
a_out = self.actor_mlp(a_out)
c_out = self.critic_mlp(c_out)
value = self.value_act(self.value(c_out))
if self.is_discrete:
logits = self.logits(a_out)
return logits, value, states
if self.is_multi_discrete:
logits = [logit(a_out) for logit in self.logits]
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(a_out))
if self.space_config['fixed_sigma']:
sigma = mu * 0.0 + self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(a_out))
return mu, sigma, value, states
else:
out = obs
out = self.actor_cnn(out)
out = out.flatten(1)
if self.has_rnn:
out_in = out
if not self.is_rnn_before_mlp:
out_in = out
out = self.actor_mlp(out)
if self.rnn_concat_input:
out = torch.cat([out, out_in], dim=1)
batch_size = out.size()[0]
num_seqs = batch_size // seq_length
out = out.reshape(num_seqs, seq_length, -1)
if len(states) == 1:
states = states[0]
if self.rnn_name == 'sru':
out = out.transpose(0,1)
out, states = self.rnn(out, states)
out = out.contiguous().reshape(out.size()[0] * out.size()[1], -1)
if self.rnn_name == 'sru':
out = out.transpose(0,1)
if self.rnn_ln:
out = self.layer_norm(out)
if self.is_rnn_before_mlp:
out = self.actor_mlp(out)
if type(states) is not tuple:
states = (states,)
else:
out = self.actor_mlp(out)
value = self.value_act(self.value(out))
if self.central_value:
return value, states
if self.is_discrete:
logits = self.logits(out)
return logits, value, states
if self.is_multi_discrete:
logits = [logit(out) for logit in self.logits]
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(out))
if self.space_config['fixed_sigma']:
sigma = self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(out))
return mu, mu*0 + sigma, value, states
def is_separate_critic(self):
return self.separate
def is_rnn(self):
return self.has_rnn
def get_default_rnn_state(self):
if not self.has_rnn:
return None
num_layers = self.rnn_layers
if self.rnn_name == 'identity':
rnn_units = 1
else:
rnn_units = self.rnn_units
if self.rnn_name == 'lstm':
if self.separate:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
if self.separate:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),)
def load(self, params):
self.separate = params.get('separate', False)
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_d2rl = params['mlp'].get('d2rl', False)
self.norm_only_first_layer = params['mlp'].get('norm_only_first_layer', False)
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_rnn = 'rnn' in params
self.has_space = 'space' in params
self.central_value = params.get('central_value', False)
self.joint_obs_actions_config = params.get('joint_obs_actions', None)
if self.has_space:
self.is_multi_discrete = 'multi_discrete'in params['space']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
elif self.is_multi_discrete:
self.space_config = params['space']['multi_discrete']
else:
self.is_discrete = False
self.is_continuous = False
self.is_multi_discrete = False
if self.has_rnn:
self.rnn_units = params['rnn']['units']
self.rnn_layers = params['rnn']['layers']
self.rnn_name = params['rnn']['name']
self.rnn_ln = params['rnn'].get('layer_norm', False)
self.is_rnn_before_mlp = params['rnn'].get('before_mlp', False)
self.rnn_concat_input = params['rnn'].get('concat_input', False)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
net = A2CBuilder.Network(self.params, **kwargs)
return net
class Conv2dAuto(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2) # dynamic add padding based on the kernel_size
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False):
super().__init__()
self.use_bn = use_bn
self.conv = Conv2dAuto(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, bias=not use_bn)
if use_bn:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, activation='relu', use_bn=False, use_zero_init=True, use_attention=False):
super().__init__()
self.use_zero_init=use_zero_init
self.use_attention = use_attention
if use_zero_init:
self.alpha = nn.Parameter(torch.zeros(1))
self.activation = activation
self.conv1 = ConvBlock(channels, channels, use_bn)
self.conv2 = ConvBlock(channels, channels, use_bn)
self.activate1 = nn.ELU()
self.activate2 = nn.ELU()
if use_attention:
self.ca = ChannelAttention(channels)
self.sa = SpatialAttention()
def forward(self, x):
residual = x
x = self.activate1(x)
x = self.conv1(x)
x = self.activate2(x)
x = self.conv2(x)
if self.use_attention:
x = self.ca(x) * x
x = self.sa(x) * x
if self.use_zero_init:
x = x * self.alpha + residual
else:
x = x + residual
return x
class ImpalaSequential(nn.Module):
def __init__(self, in_channels, out_channels, activation='elu', use_bn=True, use_zero_init=False):
super().__init__()
self.conv = ConvBlock(in_channels, out_channels, use_bn)
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_block1 = ResidualBlock(out_channels, activation=activation, use_bn=use_bn, use_zero_init=use_zero_init)
self.res_block2 = ResidualBlock(out_channels, activation=activation, use_bn=use_bn, use_zero_init=use_zero_init)
def forward(self, x):
x = self.conv(x)
x = self.max_pool(x)
x = self.res_block1(x)
x = self.res_block2(x)
return x
class A2CResnetBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
input_shape = torch_ext.shape_whc_to_cwh(input_shape)
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
self.value_size = kwargs.pop('value_size', 1)
NetworkBuilder.BaseNetwork.__init__(self, **kwargs)
self.load(params)
self.cnn = self._build_impala(input_shape, self.conv_depths)
mlp_input_shape = self._calc_input_size(input_shape, self.cnn)
in_mlp_shape = mlp_input_shape
if len(self.units) == 0:
out_size = mlp_input_shape
else:
out_size = self.units[-1]
if self.has_rnn:
if not self.is_rnn_before_mlp:
rnn_in_size = out_size
out_size = self.rnn_units
else:
rnn_in_size = in_mlp_shape
in_mlp_shape = self.rnn_units
self.rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
#self.layer_norm = torch.nn.LayerNorm(self.rnn_units)
mlp_args = {
'input_size' : in_mlp_shape,
'units' :self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear
}
self.mlp = self._build_mlp(**mlp_args)
self.value = torch.nn.Linear(out_size, self.value_size)
self.value_act = self.activations_factory.create(self.value_activation)
self.flatten_act = self.activations_factory.create(self.activation)
if self.is_discrete:
self.logits = torch.nn.Linear(out_size, actions_num)
if self.is_continuous:
self.mu = torch.nn.Linear(out_size, actions_num)
self.mu_act = self.activations_factory.create(self.space_config['mu_activation'])
mu_init = self.init_factory.create(**self.space_config['mu_init'])
self.sigma_act = self.activations_factory.create(self.space_config['sigma_activation'])
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
if self.space_config['fixed_sigma']:
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=True, dtype=torch.float32), requires_grad=True)
else:
self.sigma = torch.nn.Linear(out_size, actions_num)
mlp_init = self.init_factory.create(**self.initializer)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
#nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('elu'))
for m in self.mlp:
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if self.is_discrete:
mlp_init(self.logits.weight)
if self.is_continuous:
mu_init(self.mu.weight)
if self.space_config['fixed_sigma']:
sigma_init(self.sigma)
else:
sigma_init(self.sigma.weight)
mlp_init(self.value.weight)
def forward(self, obs_dict):
obs = obs_dict['obs']
obs = obs.permute((0, 3, 1, 2))
states = obs_dict.get('rnn_states', None)
seq_length = obs_dict.get('seq_length', 1)
out = obs
out = self.cnn(out)
out = out.flatten(1)
out = self.flatten_act(out)
if self.has_rnn:
if not self.is_rnn_before_mlp:
out = self.mlp(out)
batch_size = out.size()[0]
num_seqs = batch_size // seq_length
out = out.reshape(num_seqs, seq_length, -1)
if len(states) == 1:
states = states[0]
out, states = self.rnn(out, states)
out = out.contiguous().reshape(out.size()[0] * out.size()[1], -1)
#out = self.layer_norm(out)
if type(states) is not tuple:
states = (states,)
if self.is_rnn_before_mlp:
for l in self.mlp:
out = l(out)
else:
for l in self.mlp:
out = l(out)
value = self.value_act(self.value(out))
if self.is_discrete:
logits = self.logits(out)
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(out))
if self.space_config['fixed_sigma']:
sigma = self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(out))
return mu, mu*0 + sigma, value, states
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous' in params['space']
self.is_multi_discrete = 'multi_discrete'in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
elif self.is_multi_discrete:
self.space_config = params['space']['multi_discrete']
self.has_rnn = 'rnn' in params
if self.has_rnn:
self.rnn_units = params['rnn']['units']
self.rnn_layers = params['rnn']['layers']
self.rnn_name = params['rnn']['name']
self.is_rnn_before_mlp = params['rnn'].get('before_mlp', False)
self.has_cnn = True
self.conv_depths = params['cnn']['conv_depths']
def _build_impala(self, input_shape, depths):
in_channels = input_shape[0]
layers = nn.ModuleList()
for d in depths:
layers.append(ImpalaSequential(in_channels, d))
in_channels = d
return nn.Sequential(*layers)
def is_separate_critic(self):
return False
def is_rnn(self):
return self.has_rnn
def get_default_rnn_state(self):
num_layers = self.rnn_layers
if self.rnn_name == 'lstm':
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)),
torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
def build(self, name, **kwargs):
net = A2CResnetBuilder.Network(self.params, **kwargs)
return net
class DiagGaussianActor(NetworkBuilder.BaseNetwork):
"""torch.distributions implementation of an diagonal Gaussian policy."""
def __init__(self, output_dim, log_std_bounds, **mlp_args):
super().__init__()
self.log_std_bounds = log_std_bounds
self.trunk = self._build_mlp(**mlp_args)
last_layer = list(self.trunk.children())[-2].out_features
self.trunk = nn.Sequential(*list(self.trunk.children()), nn.Linear(last_layer, output_dim))
def forward(self, obs):
mu, log_std = self.trunk(obs).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
#log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = torch.clamp(log_std, log_std_min, log_std_max)
#log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std + 1)
std = log_std.exp()
# TODO: Refactor
dist = SquashedNormal(mu, std)
# Modify to only return mu and std
return dist
class DoubleQCritic(NetworkBuilder.BaseNetwork):
"""Critic network, employes double Q-learning."""
def __init__(self, output_dim, **mlp_args):
super().__init__()
self.Q1 = self._build_mlp(**mlp_args)
last_layer = list(self.Q1.children())[-2].out_features
self.Q1 = nn.Sequential(*list(self.Q1.children()), nn.Linear(last_layer, output_dim))
self.Q2 = self._build_mlp(**mlp_args)
last_layer = list(self.Q2.children())[-2].out_features
self.Q2 = nn.Sequential(*list(self.Q2.children()), nn.Linear(last_layer, output_dim))
def forward(self, obs, action):
assert obs.size(0) == action.size(0)
obs_action = torch.cat([obs, action], dim=-1)
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
return q1, q2
class SACBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
def build(self, name, **kwargs):
net = SACBuilder.Network(self.params, **kwargs)
return net
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
obs_dim = kwargs.pop('obs_dim')
action_dim = kwargs.pop('action_dim')
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
NetworkBuilder.BaseNetwork.__init__(self)
self.load(params)
mlp_input_shape = input_shape
actor_mlp_args = {
'input_size' : obs_dim,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
critic_mlp_args = {
'input_size' : obs_dim + action_dim,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
print("Building Actor")
self.actor = self._build_actor(2*action_dim, self.log_std_bounds, **actor_mlp_args)
if self.separate:
print("Building Critic")
self.critic = self._build_critic(1, **critic_mlp_args)
print("Building Critic Target")
self.critic_target = self._build_critic(1, **critic_mlp_args)
self.critic_target.load_state_dict(self.critic.state_dict())
mlp_init = self.init_factory.create(**self.initializer)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
cnn_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
def _build_critic(self, output_dim, **mlp_args):
return DoubleQCritic(output_dim, **mlp_args)
def _build_actor(self, output_dim, log_std_bounds, **mlp_args):
return DiagGaussianActor(output_dim, log_std_bounds, **mlp_args)
def forward(self, obs_dict):
"""TODO"""
obs = obs_dict['obs']
mu, sigma = self.actor(obs)
return mu, sigma
def is_separate_critic(self):
return self.separate
def load(self, params):
self.separate = params.get('separate', True)
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_d2rl = params['mlp'].get('d2rl', False)
self.norm_only_first_layer = params['mlp'].get('norm_only_first_layer', False)
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_space = 'space' in params
self.value_shape = params.get('value_shape', 1)
self.central_value = params.get('central_value', False)
self.joint_obs_actions_config = params.get('joint_obs_actions', None)
self.log_std_bounds = params.get('log_std_bounds', None)
if self.has_space:
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
else:
self.is_discrete = False
self.is_continuous = False
'''
class DQNBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_dueling = params['dueling']
self.atoms = params['atoms']
self.is_noisy = params['noisy']
self.normalization = params.get('normalization', None)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
is_train = kwargs.pop('is_train', True)
if self.is_noisy:
dense_layer = self._noisy_dense
else:
dense_layer = torch.nn.Linear
with tf.variable_scope(name, reuse=reuse):
out = input
if self.has_cnn:
cnn_args = {
'name' :'dqn_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out = self._build_conv(**cnn_args)
out = tf.contrib.layers.flatten(out)
mlp_args = {
'name' :'dqn_mlp',
'input' : out,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train,
'dense_func' : dense_layer
}
if self.is_dueling:
if len(self.units) > 1:
mlp_args['units'] = self.units[:-1]
out = self._build_mlp(**mlp_args)
hidden_value = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_val')
hidden_advantage = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_adv')
value = dense_layer(inputs=hidden_value, units=self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), activation=tf.identity, kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='value')
advantage = dense_layer(inputs=hidden_advantage, units= actions_num * self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='advantage')
advantage = tf.reshape(advantage, shape = [-1, actions_num, self.atoms])
value = tf.reshape(value, shape = [-1, 1, self.atoms])
q_values = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
else:
mlp_args['units'] = self.units
out = self._build_mlp('dqn_mlp', out, self.units, self.activation, self.initializer, self.regularizer)
q_values = dense_layer(inputs=out, units=actions_num *self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='q_vals')
q_values = tf.reshape(q_values, shape = [-1, actions_num, self.atoms])
if self.atoms == 1:
return tf.squeeze(q_values)
else:
return q_values
'''
| 43,505 | Python | 42.160714 | 301 | 0.520607 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/a2c_continuous.py | from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch import ppg_aux
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class A2CAgent(a2c_common.ContinuousA2CBase):
def __init__(self, base_name, config):
a2c_common.ContinuousA2CBase.__init__(self, base_name, config)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1)
}
self.model = self.network.build(config)
self.model.to(self.ppo_device)
self.states = None
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.normalize_input:
if isinstance(self.observation_space,gym.spaces.Dict):
self.running_mean_std = RunningMeanStdObs(obs_shape).to(self.ppo_device)
else:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.ppo_device)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'num_steps' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'model' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', True)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if 'phasic_policy_gradients' in self.config:
self.has_phasic_policy_gradients = True
self.ppg_aux_loss = ppg_aux.PPGAux(self, self.config['phasic_policy_gradients'])
self.has_value_loss = (self.has_central_value \
and self.use_experimental_cv) \
or not self.has_phasic_policy_gradients
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr = self.last_lr
kl = 1.0
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
mu = res_dict['mus']
sigma = res_dict['sigmas']
a_loss = common_losses.actor_loss(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
b_loss = self.bound_loss(mu)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
loss = a_loss + 0.5 * c_loss * self.critic_coef - entropy * self.entropy_coef + b_loss * self.bounds_loss_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
#TODO: Refactor this ugliest code of they year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
reduce_kl = not self.is_rnn
kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
if self.is_rnn:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask
self.train_result = (a_loss, c_loss, entropy, \
kl_dist, self.last_lr, lr_mul, \
mu.detach(), sigma.detach(), b_loss)
def train_actor_critic(self, input_dict):
self.calc_gradients(input_dict)
return self.train_result
def bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.1
mu_loss_high = torch.clamp_max(mu - soft_bound, 0.0)**2
mu_loss_low = torch.clamp_max(-mu + soft_bound, 0.0)**2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss
| 7,399 | Python | 39.217391 | 142 | 0.570347 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_torch/running_mean_std.py | import torch
import torch.nn as nn
import numpy as np
'''
updates statistic from a full data
'''
class RunningMeanStd(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
super(RunningMeanStd, self).__init__()
print('RunningMeanStd: ', insize)
self.insize = insize
self.epsilon = epsilon
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("running_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("running_var", torch.ones(in_size, dtype = torch.float64))
self.register_buffer("count", torch.ones((), dtype = torch.float64))
def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta**2 * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def forward(self, input, unnorm=False):
if self.training:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.running_mean, self.running_var, self.count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count,
mean, var, input.size()[0] )
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.running_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.running_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.running_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.running_mean
current_var = self.running_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
if self.norm_only:
y = input/ torch.sqrt(current_var.float() + self.epsilon)
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
class RunningMeanStdObs(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
assert(insize is dict)
super(RunningMeanStdObs, self).__init__()
self.running_mean_std = nn.ModuleDict({
k : RunningMeanStd(v, epsilon, per_channel, norm_only) for k,v in insize.items()
})
def forward(self, input, unnorm=False):
res = {k : self.running_mean_std(v, unnorm) for k,v in input.items()}
return res | 3,757 | Python | 41.224719 | 152 | 0.558957 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/tensorflow_utils.py | import tensorflow as tf
import numpy as np
import collections
from collections import deque, OrderedDict
def unflatten(vector, shapes):
i = 0
arrays = []
for shape in shapes:
size = np.prod(shape, dtype=np.int)
array = vector[i:(i + size)].reshape(shape)
arrays.append(array)
i += size
assert len(vector) == i, "Passed weight does not have the correct shape."
return arrays
class TensorFlowVariables(object):
"""A class used to set and get weights for Tensorflow networks.
Attributes:
sess (tf.Session): The tensorflow session used to run assignment.
variables (Dict[str, tf.Variable]): Extracted variables from the loss
or additional variables that are passed in.
placeholders (Dict[str, tf.placeholders]): Placeholders for weights.
assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.
"""
def __init__(self, output, sess=None, input_variables=None):
"""Creates TensorFlowVariables containing extracted variables.
The variables are extracted by performing a BFS search on the
dependency graph with loss as the root node. After the tree is
traversed and those variables are collected, we append input_variables
to the collected variables. For each variable in the list, the
variable has a placeholder and assignment operation created for it.
Args:
output (tf.Operation, List[tf.Operation]): The tensorflow
operation to extract all variables from.
sess (tf.Session): Session used for running the get and set
methods.
input_variables (List[tf.Variables]): Variables to include in the
list.
"""
self.sess = sess
if not isinstance(output, (list, tuple)):
output = [output]
queue = deque(output)
variable_names = []
explored_inputs = set(output)
# We do a BFS on the dependency graph of the input function to find
# the variables.
while len(queue) != 0:
tf_obj = queue.popleft()
if tf_obj is None:
continue
# The object put into the queue is not necessarily an operation,
# so we want the op attribute to get the operation underlying the
# object. Only operations contain the inputs that we can explore.
if hasattr(tf_obj, "op"):
tf_obj = tf_obj.op
for input_op in tf_obj.inputs:
if input_op not in explored_inputs:
queue.append(input_op)
explored_inputs.add(input_op)
# Tensorflow control inputs can be circular, so we keep track of
# explored operations.
for control in tf_obj.control_inputs:
if control not in explored_inputs:
queue.append(control)
explored_inputs.add(control)
if "Variable" in tf_obj.node_def.op:
variable_names.append(tf_obj.node_def.name)
self.variables = OrderedDict()
variable_list = [
v for v in tf.global_variables()
if v.op.node_def.name in variable_names
]
if input_variables is not None:
variable_list += input_variables
for v in variable_list:
self.variables[v.op.node_def.name] = v
self.placeholders = {}
self.assignment_nodes = {}
# Create new placeholders to put in custom weights.
for k, var in self.variables.items():
self.placeholders[k] = tf.placeholder(
var.value().dtype,
var.get_shape().as_list(),
name="Placeholder_" + k)
self.assignment_nodes[k] = var.assign(self.placeholders[k])
def set_session(self, sess):
"""Sets the current session used by the class.
Args:
sess (tf.Session): Session to set the attribute with.
"""
self.sess = sess
def get_flat_size(self):
"""Returns the total length of all of the flattened variables.
Returns:
The length of all flattened variables concatenated.
"""
return sum(
np.prod(v.get_shape().as_list()) for v in self.variables.values())
def _check_sess(self):
"""Checks if the session is set, and if not throw an error message."""
assert self.sess is not None, ("The session is not set. Set the "
"session either by passing it into the "
"TensorFlowVariables constructor or by "
"calling set_session(sess).")
def get_flat(self):
"""Gets the weights and returns them as a flat array.
Returns:
1D Array containing the flattened weights.
"""
self._check_sess()
return np.concatenate([
v.eval(session=self.sess).flatten()
for v in self.variables.values()
])
def set_flat(self, new_weights):
"""Sets the weights to new_weights, converting from a flat array.
Note:
You can only set all weights in the network using this function,
i.e., the length of the array must match get_flat_size.
Args:
new_weights (np.ndarray): Flat array containing weights.
"""
self._check_sess()
shapes = [v.get_shape().as_list() for v in self.variables.values()]
arrays = unflatten(new_weights, shapes)
placeholders = [
self.placeholders[k] for k, v in self.variables.items()
]
self.sess.run(
list(self.assignment_nodes.values()),
feed_dict=dict(zip(placeholders, arrays)))
def get_weights(self):
"""Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights.
"""
self._check_sess()
return {
k: v.eval(session=self.sess)
for k, v in self.variables.items()
}
def set_weights(self, new_weights):
"""Sets the weights to new_weights.
Note:
Can set subsets of variables as well, by only passing in the
variables you want to be set.
Args:
new_weights (Dict): Dictionary mapping variable names to their
weights.
"""
self._check_sess()
assign_list = [
self.assignment_nodes[name] for name in new_weights.keys()
if name in self.assignment_nodes
]
assert assign_list, ("No variables in the input matched those in the "
"network. Possible cause: Two networks were "
"defined in the same TensorFlow graph. To fix "
"this, place each network definition in its own "
"tf.Graph.")
self.sess.run(
assign_list,
feed_dict={
self.placeholders[name]: value
for (name, value) in new_weights.items()
if name in self.placeholders
}) | 7,289 | Python | 39.5 | 79 | 0.571409 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/tf_moving_mean_std.py | import tensorflow as tf
from tensorflow.python.training.moving_averages import assign_moving_average
class MovingMeanStd(object):
def __init__(self, shape, epsilon, decay, clamp = 5.0):
self.moving_mean = tf.Variable(tf.constant(0.0, shape=shape, dtype=tf.float64), trainable=False)#, name='moving_mean')
self.moving_variance = tf.Variable(tf.constant(1.0, shape=shape, dtype=tf.float64), trainable=False)#, name='moving_variance' )
self.epsilon = epsilon
self.shape = shape
self.decay = decay
self.count = tf.Variable(tf.constant(epsilon, shape=shape, dtype=tf.float64), trainable=False)
self.clamp = clamp
def update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + tf.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def normalize(self, x, train=True):
x64 = tf.cast(x, tf.float64)
if train:
shape = x.get_shape().as_list()
if (len(shape) == 2):
axis = [0]
if (len(shape) == 3):
axis = [0, 1]
if (len(shape) == 4):
axis = [0, 1, 2]
mean, var = tf.nn.moments(x64, axis)
new_mean, new_var, new_count = self.update_mean_var_count_from_moments(self.moving_mean, self.moving_variance, self.count, mean, var, tf.cast(tf.shape(x)[0], tf.float64))
mean_op = self.moving_mean.assign(new_mean)
var_op = self.moving_variance.assign(tf.maximum(new_var, 1e-2))
count_op = self.count.assign(new_count)
with tf.control_dependencies([mean_op, var_op, count_op]):
res = tf.cast((x64 - self.moving_mean) / (tf.sqrt(self.moving_variance)), tf.float32)
return tf.clip_by_value(res, -self.clamp, self.clamp)
else:
res = tf.cast((x64 - self.moving_mean) / (tf.sqrt(self.moving_variance)), tf.float32)
return tf.clip_by_value(res, -self.clamp, self.clamp) | 2,361 | Python | 49.255318 | 182 | 0.581957 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/players.py | from rl_games.common import env_configurations
from rl_games.algos_tf14 import dqnagent
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class BasePlayer(object):
def __init__(self, sess, config):
self.config = config
self.sess = sess
self.env_name = self.config['env_name']
self.env_spaces = env_configurations.get_env_info(self.config)
self.obs_space, self.action_space, self.num_agents = self.env_spaces['observation_space'], self.env_spaces['action_space'], self.env_spaces['agents']
self.env = None
self.env_config = self.config.get('env_config', None)
def restore(self, fn):
raise NotImplementedError('restore')
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def create_env(self):
return env_configurations.configurations[self.env_name]['env_creator']()
def get_action(self, obs, is_determenistic = False):
raise NotImplementedError('step')
def get_masked_action(self, obs, mask, is_determenistic = False):
raise NotImplementedError('step')
def reset(self):
raise NotImplementedError('raise')
def run(self, n_games=1000, n_game_life = 1, render= False):
self.env = self.create_env()
sum_rewards = 0
sum_steps = 0
sum_game_res = 0
n_games = n_games * n_game_life
has_masks = False
has_masks_func = getattr(self.env, "has_action_mask", None) is not None
if has_masks_func:
has_masks = self.env.has_action_mask()
is_determenistic = True
for _ in range(n_games):
cr = 0
steps = 0
s = self.env.reset()
for _ in range(5000):
if has_masks:
masks = self.env.get_action_mask()
action = self.get_masked_action(s, masks, is_determenistic)
else:
action = self.get_action(s, is_determenistic)
s, r, done, info = self.env.step(action)
cr += r
steps += 1
if render:
self.env.render(mode = 'human')
if not np.isscalar(done):
done = done.any()
if done:
game_res = 0.0
if isinstance(info, dict):
if 'battle_won' in info:
game_res = info['battle_won']
if 'scores' in info:
game_res = info['scores']
print('reward:', np.mean(cr), 'steps:', steps, 'scores:', game_res)
sum_game_res += game_res
sum_rewards += np.mean(cr)
sum_steps += steps
break
print('av reward:', sum_rewards / n_games * n_game_life, 'av steps:', sum_steps / n_games * n_game_life, 'scores:', sum_game_res / n_games * n_game_life)
class PpoPlayerContinuous(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.network = config['network']
self.obs_ph = tf.placeholder('float32', (None, ) + self.obs_space.shape, name = 'obs')
self.actions_num = self.action_space.shape[0]
self.actions_low = self.action_space.low
self.actions_high = self.action_space.high
self.mask = [False]
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.normalize_input = self.config['normalize_input']
self.input_obs = self.obs_ph
if self.obs_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = self.obs_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=False)
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : 1,
'games_num' : 1,
'actions_num' : self.actions_num,
'prev_actions_ph' : None
}
self.last_state = None
if self.network.is_rnn():
self.neglop, self.value, self.action, _, self.mu, _, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.run_dict, reuse=False)
self.last_state = self.initial_state
else:
self.neglop, self.value, self.action, _, self.mu, _ = self.network(self.run_dict, reuse=False)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def get_action(self, obs, is_determenistic = True):
if is_determenistic:
ret_action = self.mu
else:
ret_action = self.action
if self.network.is_rnn():
action, self.last_state = self.sess.run([ret_action, self.lstm_state], {self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action = self.sess.run([ret_action], {self.obs_ph : obs})
action = np.squeeze(action)
return rescale_actions(self.actions_low, self.actions_high, np.clip(action, -1.0, 1.0))
def restore(self, fn):
self.saver.restore(self.sess, fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state
#self.mask = [True]
class PpoPlayerDiscrete(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.network = config['network']
self.use_action_masks = config.get('use_action_masks', False)
self.obs_ph = tf.placeholder(self.obs_space.dtype, (None, ) + self.obs_space.shape, name = 'obs')
self.actions_num = self.action_space.n
if self.use_action_masks:
print('using masks for action')
self.action_mask_ph = tf.placeholder('int32', (None, self.actions_num), name = 'actions_mask')
else:
self.action_mask_ph = None
self.mask = [False] * self.num_agents
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.normalize_input = self.config['normalize_input']
self.input_obs = self.obs_ph
if self.obs_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = self.obs_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=False)
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.num_agents,
'games_num' : self.num_agents,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
'action_mask_ph' : self.action_mask_ph
}
self.last_state = None
if self.network.is_rnn():
self.neglop , self.value, self.action, _,self.states_ph, self.masks_ph, self.lstm_state, self.initial_state, self.logits = self.network(self.run_dict, reuse=False)
self.last_state = self.initial_state * self.num_agents
else:
self.neglop , self.value, self.action, _, self.logits = self.network(self.run_dict, reuse=False)
self.variables = TensorFlowVariables([self.neglop, self.value, self.action], self.sess)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def get_action(self, obs, is_determenistic = True):
ret_action = self.action
if self.network.is_rnn():
action, self.last_state, logits = self.sess.run([ret_action, self.lstm_state, self.logits], {self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action, logits = self.sess.run([ret_action, self.logits], {self.obs_ph : obs})
if is_determenistic:
return np.argmax(logits, axis = -1).astype(np.int32)
else:
return int(np.squeeze(action))
def get_masked_action(self, obs, mask, is_determenistic = False):
#if is_determenistic:
ret_action = self.action
if self.network.is_rnn():
action, self.last_state, logits = self.sess.run([ret_action, self.lstm_state, self.logits], {self.action_mask_ph : mask, self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action, logits = self.sess.run([ret_action, self.logits], {self.action_mask_ph : mask, self.obs_ph : obs})
if is_determenistic:
logits = np.array(logits)
return np.argmax(logits, axis = -1).astype(np.int32)
else:
return np.squeeze(action).astype(np.int32)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state
class DQNPlayer(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.dqn = dqnagent.DQNAgent(sess, 'player', self.obs_space, self.action_space, config)
def get_action(self, obs, is_determenistic = False):
return self.dqn.get_action(np.squeeze(obs), 0.0)
def restore(self, fn):
self.dqn.restore(fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state | 10,057 | Python | 38.754941 | 213 | 0.577608 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/networks.py | import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
tfd = tfp.distributions
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def sample_noise(shape, mean = 0.0, std = 1.0):
noise = tf.random_normal(shape, mean = mean, stddev = std)
return noise
# Added by Andrew Liao
# for NoisyNet-DQN (using Factorised Gaussian noise)
# modified from ```dense``` function
def noisy_dense(inputs, units, name, bias=True, activation=tf.identity, mean = 0.0, std = 1.0):
# the function used in eq.7,8
def f(x):
return tf.multiply(tf.sign(x), tf.pow(tf.abs(x), 0.5))
# Initializer of \mu and \sigma
mu_init = tf.random_uniform_initializer(minval=-1*1/np.power(inputs.get_shape().as_list()[1], 0.5),
maxval=1*1/np.power(inputs.get_shape().as_list()[1], 0.5))
sigma_init = tf.constant_initializer(0.4/np.power(inputs.get_shape().as_list()[1], 0.5))
# Sample noise from gaussian
p = sample_noise([inputs.get_shape().as_list()[1], 1], mean = 0.0, std = 1.0)
q = sample_noise([1, units], mean = 0.0, std = 1.0)
f_p = f(p); f_q = f(q)
w_epsilon = f_p*f_q; b_epsilon = tf.squeeze(f_q)
# w = w_mu + w_sigma*w_epsilon
w_mu = tf.get_variable(name + "/w_mu", [inputs.get_shape()[1], units], initializer=mu_init)
w_sigma = tf.get_variable(name + "/w_sigma", [inputs.get_shape()[1], units], initializer=sigma_init)
w = w_mu + tf.multiply(w_sigma, w_epsilon)
ret = tf.matmul(inputs, w)
if bias:
# b = b_mu + b_sigma*b_epsilon
b_mu = tf.get_variable(name + "/b_mu", [units], initializer=mu_init)
b_sigma = tf.get_variable(name + "/b_sigma", [units], initializer=sigma_init)
b = b_mu + tf.multiply(b_sigma, b_epsilon)
return activation(ret + b)
else:
return activation(ret)
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def lstm(xs, ms, s, scope, nh, nin):
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(), dtype=tf.float32 )
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init() )
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, nin):
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init())
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init())
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
tk = 0
for idx, (x, m) in enumerate(zip(xs, ms)):
print(tk)
tk = tk + 1
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
'''
used lstm from openai baseline as the most convenient way to work with dones.
TODO: try to use more efficient tensorflow way
'''
def openai_lstm(name, inputs, states_ph, dones_ph, units, env_num, batch_num, layer_norm=True):
nbatch = batch_num
nsteps = nbatch // env_num
print('nbatch: ', nbatch)
print('env_num: ', env_num)
dones_ph = tf.to_float(dones_ph)
inputs_seq = batch_to_seq(inputs, env_num, nsteps)
dones_seq = batch_to_seq(dones_ph, env_num, nsteps)
nin = inputs.get_shape()[1].value
with tf.variable_scope(name):
if layer_norm:
hidden_seq, final_state = lnlstm(inputs_seq, dones_seq, states_ph, scope='lnlstm', nin=nin, nh=units)
else:
hidden_seq, final_state = lstm(inputs_seq, dones_seq, states_ph, scope='lstm', nin=nin, nh=units)
hidden = seq_to_batch(hidden_seq)
initial_state = np.zeros(states_ph.shape.as_list(), dtype=float)
return [hidden, final_state, initial_state]
def distributional_output(inputs, actions_num, atoms_num):
distributed_qs = tf.layers.dense(inputs=inputs, activation=tf.nn.softmax, units=atoms_num * actions_num)
distributed_qs = tf.reshape(distributed_qs, shape = [-1, actions_num, atoms_num])
distributed_qs = tf.nn.softmax(distributed_qs, dim = -1)
return distributed_qs
def distributional_noisy_output(inputs, actions_num, atoms_num, name, mean = 0.0, std = 1.0):
distributed_qs = noisy_dense(inputs=inputs, name=name, activation=tf.nn.softmax, units=atoms_num * actions_num, mean=mean, std=std)
distributed_qs = tf.reshape(distributed_qs, shape = [-1, actions_num, atoms_num])
distributed_qs = tf.nn.softmax(distributed_qs, dim = -1)
return distributed_qs
def atari_conv_net(inputs):
NUM_FILTERS_1 = 32
NUM_FILTERS_2 = 64
NUM_FILTERS_3 = 64
conv1 = tf.layers.conv2d(inputs=inputs,
filters=NUM_FILTERS_1,
kernel_size=[8, 8],
strides=(4, 4),
activation=tf.nn.relu)
conv2 = tf.layers.conv2d(inputs=conv1,
filters=NUM_FILTERS_2,
kernel_size=[4, 4],
strides=(2, 2),
activation=tf.nn.relu)
conv3 = tf.layers.conv2d(inputs=conv2,
filters=NUM_FILTERS_3,
kernel_size=[3, 3],
strides=(1, 1),
activation=tf.nn.relu)
return conv3
def dqn_network(name, inputs, actions_num, atoms_num = 1, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten,
units=NUM_HIDDEN_NODES,
activation=tf.nn.relu)
if atoms_num == 1:
logits = tf.layers.dense(inputs=hidden, units=actions_num)
else:
logits = distributional_output(inputs=hidden, actions_num=actions_num, atoms_num=atoms_num)
return logits
'''
dueling_type = 'SIMPLE', 'AVERAGE', 'MAX'
'''
def dueling_dqn_network(name, inputs, actions_num, reuse=False, dueling_type = 'AVERAGE'):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hidden_advantage = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden_value, units=1)
advantage = tf.layers.dense(inputs=hidden_advantage, units=actions_num)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def dueling_dqn_network_with_batch_norm(name, inputs, actions_num, reuse=False, dueling_type = 'AVERAGE', is_train=True):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net_batch_norm(inputs, is_train)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hidden_advantage = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden_value, units=1)
advantage = tf.layers.dense(inputs=hidden_advantage, units=actions_num)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def noisy_dqn_network(name, inputs, actions_num, mean, std, atoms_num = 1, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = noisy_dense(inputs=flatten,
units=NUM_HIDDEN_NODES,
activation=tf.nn.relu, name = 'noisy_fc1')
if atoms_num == 1:
logits = noisy_dense(inputs=hidden, units=actions_num, name = 'noisy_fc2', mean = mean, std = std)
else:
logits = distributional_noisy_output(inputs=hidden, actions_num=actions_num, atoms_num = atoms_num, name = 'noisy_fc2', mean = mean, std = std)
return logits
'''
dueling_type = 'SIMPLE', 'AVERAGE', 'MAX'
'''
def noisy_dueling_dqn_network(name, inputs, actions_num, mean, std, reuse=False, dueling_type = 'AVERAGE'):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_v1', mean = mean, std = std)
hidden_advantage = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_a1', mean = mean, std = std)
value = noisy_dense(inputs=hidden_value, units=1, name = 'noisy_v2', mean = mean, std = std)
advantage = noisy_dense(inputs=hidden_advantage, units=actions_num, name = 'noisy_a2', mean = mean, std = std)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def noisy_dueling_dqn_network_with_batch_norm(name, inputs, actions_num, mean, std, reuse=False, dueling_type = 'AVERAGE', is_train=True):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net_batch_norm(inputs, is_train)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_v1', mean = mean, std = std)
hidden_advantage = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_a1', mean = mean, std = std)
value = noisy_dense(inputs=hidden_value, units=1, name = 'noisy_v2', mean = mean, std = std)
advantage = noisy_dense(inputs=hidden_advantage, units=actions_num, name = 'noisy_a2', mean = mean, std = std)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def default_small_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False, activation=tf.nn.elu):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 128
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 32
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, kernel_initializer=normc_initializer(0.01), activation=None)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False, activation=tf.nn.elu):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None, kernel_initializer=hidden_init)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, kernel_initializer=normc_initializer(0.01), activation=None)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network_separated_logstd(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden_init = normc_initializer(1.0) # tf.random_normal_initializer(stddev= 1.0)
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=tf.nn.elu, kernel_initializer=hidden_init)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None, kernel_initializer=hidden_init)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None,)
#std = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
#logstd = tf.layers.dense(inputs=hidden2a, units=actions_num)
logstd = tf.get_variable(name='log_std', shape=(actions_num), initializer=tf.constant_initializer(0.0), trainable=True)
return mu, mu * 0 + logstd, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden0 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.relu)
hidden1 = tf.layers.dense(inputs=hidden0, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden2, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2, units=actions_num, activation=None)
return logits, value
def default_a2c_lstm_network(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 128
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 64
LSTM_UNITS = 64
hidden0 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.relu)
hidden1 = tf.layers.dense(inputs=hidden0, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.float32, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden2, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def default_a2c_lstm_network_separated(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
LSTM_UNITS = 128
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
hidden = tf.concat((hidden1a, hidden1c), axis=1)
lstm_out, lstm_state, initial_state = openai_lstm('lstm_a', hidden, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
value = tf.layers.dense(inputs=lstm_outc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None, kernel_initializer=tf.random_uniform_initializer(-0.01, 0.01))
var = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_lstm_network_separated(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 32
NUM_HIDDEN_NODES2 = 32
#NUM_HIDDEN_NODES3 = 16
LSTM_UNITS = 16
hidden1c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
hidden1a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2* 2*LSTM_UNITS])
states_a, states_c = tf.split(states_ph, 2, axis=1)
lstm_outa, lstm_statae, initial_statea = openai_lstm('lstm_actions', hidden2a, dones_ph=dones_ph, states_ph=states_a, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
lstm_outc, lstm_statec, initial_statec = openai_lstm('lstm_critics', hidden2c, dones_ph=dones_ph, states_ph=states_c, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
initial_state = np.concatenate((initial_statea, initial_statec), axis=1)
lstm_state = tf.concat( values=(lstm_statae, lstm_statec), axis=1)
#lstm_outa = tf.layers.dense(inputs=lstm_outa, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
#lstm_outc = tf.layers.dense(inputs=lstm_outc, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=lstm_outc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_lstm_network(name, inputs, actions_num, env_num, batch_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 32
NUM_HIDDEN_NODES2 = 32
LSTM_UNITS = 16
hidden1 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden2, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_network_separated(name, inputs, actions_num, activation = tf.nn.elu, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 64
hidden1c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=activation)
hidden1a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def simple_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden1 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden2, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2, units=actions_num, activation=None)
return logits, value
def atari_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3a = atari_conv_net(inputs)
conv3c = atari_conv_net(inputs)
flattena = tf.contrib.layers.flatten(inputs = conv3a)
flattenc = tf.contrib.layers.flatten(inputs = conv3c)
hiddena = tf.layers.dense(inputs=flattena, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hiddenc = tf.layers.dense(inputs=flattenc, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hiddenc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hiddena, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hiddena, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hiddena, units=actions_num, activation=None)
return logits, value
def atari_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden, units=actions_num, activation=None)
return logits, value
def atari_a2c_network_lstm(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
LSTM_UNITS = 256
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
| 32,489 | Python | 50.984 | 181 | 0.645141 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/a2c_discrete.py | from rl_games.common import tr_helpers, vecenv
#from rl_games.algos_tf14 import networks
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque, OrderedDict
from tensorboardX import SummaryWriter
import gym
from datetime import datetime
def swap_and_flatten01(arr):
"""
swap and then flatten axes 0 and 1
"""
if arr is None:
return arr
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
class A2CAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
observation_shape = observation_space.shape
self.use_action_masks = config.get('use_action_masks', False)
self.is_train = config.get('is_train', True)
self.self_play = config.get('self_play', False)
self.name = base_name
self.config = config
self.env_name = config['env_name']
self.ppo = config['ppo']
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_actors = config['num_actors']
self.env_config = self.config.get('env_config', {})
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.num_agents = self.vec_env.get_number_of_agents()
self.horizon_length = config['horizon_length']
self.seq_len = self.config['seq_length']
self.normalize_advantage = config['normalize_advantage']
self.normalize_input = self.config['normalize_input']
self.state_shape = observation_shape
self.critic_coef = config['critic_coef']
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.sess = sess
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.ignore_dead_batches = self.config.get('ignore_dead_batches', False)
self.dones = np.asarray([False]*self.num_actors *self.num_agents, dtype=np.bool)
self.current_rewards = np.asarray([0]*self.num_actors *self.num_agents, dtype=np.float32)
self.current_lengths = np.asarray([0]*self.num_actors *self.num_agents, dtype=np.float32)
self.games_to_track = self.config.get('games_to_track', 100)
self.game_rewards = deque([], maxlen=self.games_to_track)
self.game_lengths = deque([], maxlen=self.games_to_track)
self.game_scores = deque([], maxlen=self.games_to_track)
self.obs_ph = tf.placeholder(observation_space.dtype, (None, ) + observation_shape, name = 'obs')
self.target_obs_ph = tf.placeholder(observation_space.dtype, (None, ) + observation_shape, name = 'target_obs')
self.actions_num = action_space.n
self.actions_ph = tf.placeholder('int32', (None,), name = 'actions')
if self.use_action_masks:
self.action_mask_ph = tf.placeholder('int32', (None, self.actions_num), name = 'actions_mask')
else:
self.action_mask_ph = None
self.old_logp_actions_ph = tf.placeholder('float32', (None, ), name = 'old_logpactions')
self.rewards_ph = tf.placeholder('float32', (None,), name = 'rewards')
self.old_values_ph = tf.placeholder('float32', (None,), name = 'old_values')
self.advantages_ph = tf.placeholder('float32', (None,), name = 'advantages')
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
self.input_obs = self.obs_ph
self.input_target_obs = self.target_obs_ph
if observation_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_target_obs = tf.to_float(self.input_target_obs) / 255.0
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, self.epoch_num, config['max_epochs'], end_learning_rate=0.001, power=config.get('decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, self.epoch_num,config['max_epochs'], decay_rate = config['decay_rate'])
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = observation_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=True)
self.input_target_obs = self.moving_mean_std.normalize(self.input_target_obs, train=False)
games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.train_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.config['minibatch_size'],
'games_num' : games_num,
'actions_num' : self.actions_num,
'prev_actions_ph' : self.actions_ph,
'action_mask_ph' : None
}
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_target_obs,
'batch_num' : self.num_actors * self.num_agents,
'games_num' : self.num_actors * self.num_agents,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
'action_mask_ph' : self.action_mask_ph
}
self.states = None
if self.network.is_rnn():
self.logp_actions ,self.state_values, self.action, self.entropy, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_states_ph, self.target_masks_ph, self.target_lstm_state, self.target_initial_state, self.logits = self.network(self.run_dict, reuse=True)
self.states = self.target_initial_state
else:
self.logp_actions ,self.state_values, self.action, self.entropy = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.logits = self.network(self.run_dict, reuse=True)
self.saver = tf.train.Saver()
self.variables = TensorFlowVariables([self.target_action, self.target_state_values, self.target_neglogp], self.sess)
if self.is_train:
self.setup_losses()
self.sess.run(tf.global_variables_initializer())
def setup_losses(self):
curr_e_clip = self.e_clip * self.lr_multiplier
if (self.ppo):
self.prob_ratio = tf.exp(self.old_logp_actions_ph - self.logp_actions)
self.pg_loss_unclipped = -tf.multiply(self.advantages_ph, self.prob_ratio)
self.pg_loss_clipped = -tf.multiply(self.advantages_ph, tf.clip_by_value(self.prob_ratio, 1.- curr_e_clip, 1.+ curr_e_clip))
self.actor_loss = tf.maximum(self.pg_loss_unclipped, self.pg_loss_clipped)
else:
self.actor_loss = self.logp_actions * self.advantages_ph
self.actor_loss = tf.reduce_mean(self.actor_loss)
self.c_loss = (tf.squeeze(self.state_values) - self.rewards_ph)**2
if self.clip_value:
self.cliped_values = self.old_values_ph + tf.clip_by_value(tf.squeeze(self.state_values) - self.old_values_ph, - curr_e_clip, curr_e_clip)
self.c_loss_clipped = tf.square(self.cliped_values - self.rewards_ph)
self.critic_loss = tf.maximum(self.c_loss, self.c_loss_clipped)
else:
self.critic_loss = self.c_loss
self.critic_loss = tf.reduce_mean(self.critic_loss)
self.kl_approx = 0.5 * tf.stop_gradient(tf.reduce_mean((self.old_logp_actions_ph - self.logp_actions)**2))
if self.is_adaptive_lr:
self.current_lr = tf.where(self.kl_approx > (2.0 * self.kl_threshold), tf.maximum(self.current_lr / 1.5, 1e-6), self.current_lr)
self.current_lr = tf.where(self.kl_approx < (0.5 * self.kl_threshold), tf.minimum(self.current_lr * 1.5, 1e-2), self.current_lr)
self.loss = self.actor_loss + 0.5 * self.critic_coef * self.critic_loss - self.config['entropy_coef'] * self.entropy
self.reg_loss = tf.losses.get_regularization_loss()
self.loss += self.reg_loss
self.train_step = tf.train.AdamOptimizer(self.current_lr * self.lr_multiplier)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
grads = tf.gradients(self.loss, self.weights)
if self.config['truncate_grads']:
grads, _ = tf.clip_by_global_norm(grads, self.grad_norm)
grads = list(zip(grads, self.weights))
self.train_op = self.train_step.apply_gradients(grads)
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def get_action_values(self, obs):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.target_obs_ph : obs}), None)
def get_masked_action_values(self, obs, action_masks):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp, self.logits]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.action_mask_ph: action_masks, self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.action_mask_ph: action_masks, self.target_obs_ph : obs}), None)
def get_values(self, obs):
if self.network.is_rnn():
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs})
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def play_steps(self):
# here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = []
epinfos = []
# for n in range number of steps
for _ in range(self.horizon_length):
if self.network.is_rnn():
mb_states.append(self.states)
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
if self.use_action_masks:
actions, values, neglogpacs, _, self.states = self.get_masked_action_values(self.obs, masks)
else:
actions, values, neglogpacs, self.states = self.get_action_values(self.obs)
actions = np.squeeze(actions)
values = np.squeeze(values)
neglogpacs = np.squeeze(neglogpacs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones.copy())
self.obs[:], rewards, self.dones, infos = self.vec_env.step(actions)
self.current_rewards += rewards
self.current_lengths += 1
for reward, length, done, info in zip(self.current_rewards[::self.num_agents], self.current_lengths[::self.num_agents], self.dones[::self.num_agents], infos):
if done:
self.game_rewards.append(reward)
self.game_lengths.append(length)
game_res = 1.0
if isinstance(info, dict):
game_res = info.get('battle_won', 0.5)
self.game_scores.append(game_res)
self.current_rewards = self.current_rewards * (1.0 - self.dones)
self.current_lengths = self.current_lengths * (1.0 - self.dones)
shaped_rewards = self.rewards_shaper(rewards)
epinfos.append(infos)
mb_rewards.append(shaped_rewards)
#using openai baseline approach
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_states = np.asarray(mb_states, dtype=np.float32)
last_values = self.get_values(self.obs)
last_values = np.squeeze(last_values)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
if self.network.is_rnn():
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states )), epinfos)
else:
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), None, epinfos)
return result
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def train(self):
self.obs = self.vec_env.reset()
batch_size = self.horizon_length * self.num_actors * self.num_agents
batch_size_envs = self.horizon_length * self.num_actors
minibatch_size = self.config['minibatch_size']
mini_epochs_num = self.config['mini_epochs']
num_minibatches = batch_size // minibatch_size
last_lr = self.config['learning_rate']
frame = 0
update_time = 0
self.last_mean_rewards = -100500
play_time = 0
epoch_num = 0
max_epochs = self.config.get('max_epochs', 1e6)
start_time = time.time()
total_time = 0
rep_count = 0
while True:
play_time_start = time.time()
epoch_num = self.update_epoch()
frame += batch_size_envs
obses, returns, dones, actions, values, neglogpacs, lstm_states, _ = self.play_steps()
advantages = returns - values
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
a_losses = []
c_losses = []
entropies = []
kls = []
play_time_end = time.time()
play_time = play_time_end - play_time_start
update_time_start = time.time()
if self.network.is_rnn():
total_games = batch_size // self.seq_len
num_games_batch = minibatch_size // self.seq_len
game_indexes = np.arange(total_games)
flat_indexes = np.arange(total_games * self.seq_len).reshape(total_games, self.seq_len)
lstm_states = lstm_states[::self.seq_len]
for _ in range(0, mini_epochs_num):
np.random.shuffle(game_indexes)
for i in range(0, num_minibatches):
batch = range(i * num_games_batch, (i + 1) * num_games_batch)
mb_indexes = game_indexes[batch]
mbatch = flat_indexes[mb_indexes].ravel()
dict = {}
dict[self.old_values_ph] = values[mbatch]
dict[self.old_logp_actions_ph] = neglogpacs[mbatch]
dict[self.advantages_ph] = advantages[mbatch]
dict[self.rewards_ph] = returns[mbatch]
dict[self.actions_ph] = actions[mbatch]
dict[self.obs_ph] = obses[mbatch]
dict[self.masks_ph] = dones[mbatch]
dict[self.states_ph] = lstm_states[mb_indexes]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_approx, self.current_lr, self.lr_multiplier, self.train_op]
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
a_loss, c_loss, entropy, kl, last_lr, lr_mul,_, _ = self.sess.run(run_ops, dict)
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
else:
for _ in range(0, mini_epochs_num):
permutation = np.random.permutation(batch_size)
obses = obses[permutation]
returns = returns[permutation]
actions = actions[permutation]
values = values[permutation]
neglogpacs = neglogpacs[permutation]
advantages = advantages[permutation]
for i in range(0, num_minibatches):
batch = range(i * minibatch_size, (i + 1) * minibatch_size)
dict = {self.obs_ph: obses[batch], self.actions_ph : actions[batch], self.rewards_ph : returns[batch],
self.advantages_ph : advantages[batch], self.old_logp_actions_ph : neglogpacs[batch], self.old_values_ph : values[batch]}
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_approx, self.current_lr, self.lr_multiplier, self.train_op]
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
a_loss, c_loss, entropy, kl, last_lr, lr_mul, _, _ = self.sess.run(run_ops, dict)
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
update_time_end = time.time()
update_time = update_time_end - update_time_start
sum_time = update_time + play_time
total_time = update_time_end - start_time
if True:
scaled_time = self.num_agents * sum_time
print('frames per seconds: ', batch_size / scaled_time)
self.writer.add_scalar('performance/fps', batch_size / scaled_time, frame)
self.writer.add_scalar('performance/update_time', update_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('losses/a_loss', np.mean(a_losses), frame)
self.writer.add_scalar('losses/c_loss', np.mean(c_losses), frame)
self.writer.add_scalar('losses/entropy', np.mean(entropies), frame)
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', np.mean(kls), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
if len(self.game_rewards) > 0:
mean_rewards = np.mean(self.game_rewards)
mean_lengths = np.mean(self.game_lengths)
mean_scores = np.mean(self.game_scores)
self.writer.add_scalar('rewards/mean', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/mean', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/time', mean_scores, total_time)
if rep_count % 10 == 0:
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
rep_count += 1
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.config['name'])
if self.last_mean_rewards > self.config['score_to_win']:
print('Network won!')
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
if epoch_num > max_epochs:
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, epoch_num
update_time = 0 | 22,809 | Python | 49.688889 | 232 | 0.577404 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/dqnagent.py | from rl_games.common import tr_helpers, vecenv, experience, env_configurations
from rl_games.common.categorical import CategoricalQ
from rl_games.algos_tf14 import networks, models
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque
from tensorboardX import SummaryWriter
from datetime import datetime
class DQNAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
observation_shape = observation_space.shape
actions_num = action_space.n
self.config = config
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.games_to_track = config.get('games_to_track', 100)
self.max_epochs = config.get('max_epochs', 1e6)
self.game_rewards = deque([], maxlen=self.games_to_track)
self.game_lengths = deque([], maxlen=self.games_to_track)
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, end_learning_rate=0.001, power=config.get(config, 'decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, decay_rate = config['decay_rate'])
self.env_name = config['env_name']
self.network = config['network']
self.state_shape = observation_shape
self.actions_num = actions_num
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.epsilon = self.config['epsilon']
self.rewards_shaper = self.config['reward_shaper']
self.epsilon_processor = tr_helpers.LinearValueProcessor(self.config['epsilon'], self.config['min_epsilon'], self.config['epsilon_decay_frames'])
self.beta_processor = tr_helpers.LinearValueProcessor(self.config['priority_beta'], self.config['max_beta'], self.config['beta_decay_frames'])
if self.env_name:
self.env = env_configurations.configurations[self.env_name]['env_creator']()
self.sess = sess
self.horizon_length = self.config['horizon_length']
self.states = deque([], maxlen=self.horizon_length)
self.is_prioritized = config['replay_buffer_type'] != 'normal'
self.atoms_num = self.config['atoms_num']
self.is_categorical = self.atoms_num > 1
if self.is_categorical:
self.v_min = self.config['v_min']
self.v_max = self.config['v_max']
self.delta_z = (self.v_max - self.v_min) / (self.atoms_num - 1)
self.all_z = tf.range(self.v_min, self.v_max + self.delta_z, self.delta_z)
self.categorical = CategoricalQ(self.atoms_num, self.v_min, self.v_max)
if not self.is_prioritized:
self.exp_buffer = experience.ReplayBuffer(config['replay_buffer_size'], observation_space)
else:
self.exp_buffer = experience.PrioritizedReplayBuffer(config['replay_buffer_size'], config['priority_alpha'], observation_space)
self.sample_weights_ph = tf.placeholder(tf.float32, shape= [None,] , name='sample_weights')
self.obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'obs_ph')
self.actions_ph = tf.placeholder(tf.int32, shape=[None,], name = 'actions_ph')
self.rewards_ph = tf.placeholder(tf.float32, shape=[None,], name = 'rewards_ph')
self.next_obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'next_obs_ph')
self.is_done_ph = tf.placeholder(tf.float32, shape=[None,], name = 'is_done_ph')
self.is_not_done = 1 - self.is_done_ph
self.name = base_name
self.gamma = self.config['gamma']
self.gamma_step = self.gamma**self.horizon_length
self.input_obs = self.obs_ph
self.input_next_obs = self.next_obs_ph
if observation_space.dtype == np.uint8:
print('scaling obs')
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_next_obs = tf.to_float(self.input_next_obs) / 255.0
if self.atoms_num == 1:
self.setup_qvalues(actions_num)
else:
self.setup_cat_qvalues(actions_num)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
self.saver = tf.train.Saver()
self.assigns_op = [tf.assign(w_target, w_self, validate_shape=True) for w_self, w_target in zip(self.weights, self.target_weights)]
self.variables = TensorFlowVariables(self.qvalues, self.sess)
if self.env_name:
sess.run(tf.global_variables_initializer())
self._reset()
def _get_q(self, probs):
res = probs * self.all_z
return tf.reduce_sum(res, axis=2)
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def setup_cat_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.logits = self.network(config, reuse=False)
self.qvalues_c = tf.nn.softmax(self.logits, axis = 2)
self.qvalues = self._get_q(self.qvalues_c)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_logits = self.network(config, reuse=False)
self.target_qvalues_c = tf.nn.softmax(self.target_logits, axis = 2)
self.target_qvalues = self._get_q(self.target_qvalues_c)
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_logits = tf.stop_gradient(self.network(config, reuse=True))
self.next_qvalues_c = tf.nn.softmax(self.next_logits, axis = 2)
self.next_qvalues = self._get_q(self.next_qvalues_c)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_values = tf.reduce_sum(tf.expand_dims(tf.one_hot(self.actions_ph, actions_num), -1) * self.logits, reduction_indices = (1,))
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
else:
self.next_selected_actions = tf.argmax(self.target_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
self.proj_dir_ph = tf.placeholder(tf.float32, shape=[None, self.atoms_num], name = 'best_proj_dir')
log_probs = tf.nn.log_softmax( self.current_action_values, axis=1)
if self.is_prioritized:
# we need to return loss to update priority buffer
self.abs_errors = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1) + 1e-5
self.td_loss = self.abs_errors * self.sample_weights_ph
else:
self.td_loss = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1)
self.td_loss_mean = tf.reduce_mean(self.td_loss)
def setup_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.qvalues = self.network(config, reuse=False)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_qvalues = tf.stop_gradient(self.network(config, reuse=False))
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_qvalues = tf.stop_gradient(self.network(config, reuse=True))
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_qvalues = tf.reduce_sum(tf.one_hot(self.actions_ph, actions_num) * self.qvalues, reduction_indices = 1)
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( self.target_qvalues * self.next_selected_actions_onehot , reduction_indices=[1,] ))
else:
self.next_state_values_target = tf.stop_gradient(tf.reduce_max(self.target_qvalues, reduction_indices=1))
self.reference_qvalues = self.rewards_ph + self.gamma_step *self.is_not_done * self.next_state_values_target
if self.is_prioritized:
# we need to return l1 loss to update priority buffer
self.abs_errors = tf.abs(self.current_action_qvalues - self.reference_qvalues) + 1e-5
# the same as multiply gradients later (other way is used in different examples over internet)
self.td_loss = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.NONE) * self.sample_weights_ph
self.td_loss_mean = tf.reduce_mean(self.td_loss)
else:
self.td_loss_mean = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.MEAN)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
if self.env_name:
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def _reset(self):
self.states.clear()
if self.env_name:
self.state = self.env.reset()
self.total_reward = 0.0
self.total_shaped_reward = 0.0
self.step_count = 0
def get_qvalues(self, state):
return self.sess.run(self.qvalues, {self.obs_ph: state})
def get_action(self, state, epsilon=0.0):
if np.random.random() < epsilon:
action = self.env.action_space.sample()
else:
qvals = self.get_qvalues([state])
action = np.argmax(qvals)
return action
def play_steps(self, steps, epsilon=0.0):
done_reward = None
done_shaped_reward = None
done_steps = None
steps_rewards = 0
cur_gamma = 1
cur_states_len = len(self.states)
# always break after one
while True:
if cur_states_len > 0:
state = self.states[-1][0]
else:
state = self.state
action = self.get_action(state, epsilon)
new_state, reward, is_done, _ = self.env.step(action)
#reward = reward * (1 - is_done)
self.step_count += 1
self.total_reward += reward
shaped_reward = self.rewards_shaper(reward)
self.total_shaped_reward += shaped_reward
self.states.append([new_state, action, shaped_reward])
if len(self.states) < steps:
break
for i in range(steps):
sreward = self.states[i][2]
steps_rewards += sreward * cur_gamma
cur_gamma = cur_gamma * self.gamma
next_state, current_action, _ = self.states[0]
self.exp_buffer.add(self.state, current_action, steps_rewards, new_state, is_done)
self.state = next_state
break
if is_done:
done_reward = self.total_reward
done_steps = self.step_count
done_shaped_reward = self.total_shaped_reward
self._reset()
return done_reward, done_shaped_reward, done_steps
def load_weigths_into_target_network(self):
self.sess.run(self.assigns_op)
def sample_batch(self, exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch
}
def sample_prioritized_batch(self, exp_replay, batch_size, beta):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch, sample_weights, sample_idxes = exp_replay.sample(batch_size, beta)
batch = { self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch, self.sample_weights_ph: sample_weights }
return [batch , sample_idxes]
def train(self):
mem_free_steps = 0
self.last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
self.load_weigths_into_target_network()
for _ in range(0, self.config['num_steps_fill_buffer']):
self.play_steps(self.horizon_length, self.epsilon)
steps_per_epoch = self.config['steps_per_epoch']
num_epochs_to_copy = self.config['num_epochs_to_copy']
batch_size = self.config['batch_size']
lives_reward = self.config['lives_reward']
episodes_to_log = self.config['episodes_to_log']
frame = 0
play_time = 0
update_time = 0
rewards = []
shaped_rewards = []
steps = []
losses = deque([], maxlen=100)
while True:
epoch_num = self.update_epoch()
t_play_start = time.time()
self.epsilon = self.epsilon_processor(frame)
self.beta = self.beta_processor(frame)
for _ in range(0, steps_per_epoch):
reward, shaped_reward, step = self.play_steps(self.horizon_length, self.epsilon)
if reward != None:
self.game_lengths.append(step)
self.game_rewards.append(reward)
#shaped_rewards.append(shaped_reward)
t_play_end = time.time()
play_time += t_play_end - t_play_start
# train
frame = frame + steps_per_epoch
t_start = time.time()
if self.is_categorical:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.horizon_length)
batch[self.proj_dir_ph] = projected
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.horizon_length)
batch[self.proj_dir_ph] = projected
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
else:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
losses.append(loss_t)
t_end = time.time()
update_time += t_end - t_start
total_time += update_time
if frame % 1000 == 0:
mem_free_steps += 1
if mem_free_steps == 10:
mem_free_steps = 0
tr_helpers.free_mem()
sum_time = update_time + play_time
print('frames per seconds: ', 1000 / (sum_time))
self.writer.add_scalar('performance/fps', 1000 / sum_time, frame)
self.writer.add_scalar('performance/upd_time', update_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('losses/td_loss', np.mean(losses), frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/lr', self.learning_rate*lr_mul, frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
self.writer.add_scalar('info/epsilon', self.epsilon, frame)
if self.is_prioritized:
self.writer.add_scalar('beta', self.beta, frame)
update_time = 0
play_time = 0
num_games = len(self.game_rewards)
if num_games > 10:
d = num_games / lives_reward
mean_rewards = np.sum(self.game_rewards) / d
mean_lengths = np.sum(self.game_lengths) / d
self.writer.add_scalar('rewards/mean', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/mean', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
if self.last_mean_rewards > self.config['score_to_win']:
print('network won!')
return self.last_mean_rewards, epoch_num
#clear_output(True)
# adjust agent parameters
if frame % num_epochs_to_copy == 0:
self.load_weigths_into_target_network()
if epoch_num >= self.max_epochs:
print('Max epochs reached')
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(np.sum(self.game_rewards) * lives_reward / len(self.game_rewards)))
return self.last_mean_rewards, epoch_num
| 21,405 | Python | 48.322581 | 191 | 0.592245 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/models.py | import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
from rl_games.algos_tf14 import networks
tfd = tfp.distributions
def entry_stop_gradients(target, mask):
mask_h = tf.abs(mask-1)
return tf.stop_gradient(mask_h * target) + mask * target
class BaseModel(object):
def is_rnn(self):
return False
class ModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=False, is_train=is_train,reuse=reuse)
#if action_mask_ph is not None:
#masks = tf.layers.dense(tf.to_float(action_mask_ph), actions_num, activation=tf.nn.elu)
#logits = masks + logits
#logits = entry_stop_gradients(logits, tf.to_float(action_mask_ph))
probs = tf.nn.softmax(logits)
# Gumbel Softmax
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=probs)
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.stop_gradient(one_hot_actions))
return neglogp, value, action, entropy, logits
else:
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mu, sigma, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train = is_train, reuse=reuse)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mean, logstd, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train=True, reuse=reuse)
std = tf.exp(logstd)
norm_dist = tfd.Normal(mean, std)
action = mean + std * tf.random_normal(tf.shape(mean))
#action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph is None:
neglogp = self.neglogp(action, mean, std, logstd)
return neglogp, value, action, entropy, mean, std
prev_neglogp = self.neglogp(prev_actions_ph, mean, std, logstd)
return prev_neglogp, value, action, entropy, mean, std
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
class LSTMModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, logstd, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
std = tf.exp(logstd)
action = mu + std * tf.random_normal(tf.shape(mu))
norm_dist = tfd.Normal(mu, std)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, var, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
sigma = tf.sqrt(var)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=False, is_train=is_train, reuse=reuse)
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.nn.softmax(logits))
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=one_hot_actions)
return neglogp, value, action, entropy, states_ph, masks_ph, lstm_state, initial_state, logits
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy, states_ph, masks_ph, lstm_state, initial_state
class AtariDQN(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
'''
TODO: fix is_train
'''
is_train = name == 'agent'
return self.network(name=name, inputs=inputs, actions_num=actions_num, is_train=is_train, reuse=reuse)
| 10,090 | Python | 40.356557 | 167 | 0.599405 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/model_builder.py | from rl_games.common import object_factory
import rl_games.algos_tf14
from rl_games.algos_tf14 import network_builder
from rl_games.algos_tf14 import models
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs : models.ModelA2C(network))
self.model_factory.register_builder('discrete_a2c_lstm', lambda network, **kwargs : models.LSTMModelA2C(network))
self.model_factory.register_builder('continuous_a2c', lambda network, **kwargs : models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd', lambda network, **kwargs : models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('continuous_a2c_lstm', lambda network, **kwargs : models.LSTMModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_lstm_logstd', lambda network, **kwargs : models.LSTMModelA2CContinuousLogStd(network))
self.model_factory.register_builder('dqn', lambda network, **kwargs : models.AtariDQN(network))
self.network_factory = object_factory.ObjectFactory()
self.network_factory.register_builder('actor_critic', lambda **kwargs : network_builder.A2CBuilder())
self.network_factory.register_builder('dqn', lambda **kwargs : network_builder.DQNBuilder())
def load(self, params):
self.model_name = params['model']['name']
self.network_name = params['network']['name']
network = self.network_factory.create(self.network_name)
network.load(params['network'])
model = self.model_factory.create(self.model_name, network=network)
return model
| 1,761 | Python | 49.342856 | 146 | 0.721181 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/network_builder.py | import tensorflow as tf
import numpy as np
from rl_games.algos_tf14 import networks
from rl_games.common import object_factory
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class NetworkBuilder:
def __init__(self, **kwargs):
self.activations_factory = object_factory.ObjectFactory()
self.activations_factory.register_builder('relu', lambda **kwargs : tf.nn.relu)
self.activations_factory.register_builder('tanh', lambda **kwargs : tf.nn.tanh)
self.activations_factory.register_builder('sigmoid', lambda **kwargs : tf.nn.sigmoid)
self.activations_factory.register_builder('elu', lambda **kwargs : tf.nn.elu)
self.activations_factory.register_builder('selu', lambda **kwargs : tf.nn.selu)
self.activations_factory.register_builder('softplus', lambda **kwargs : tf.nn.softplus)
self.activations_factory.register_builder('None', lambda **kwargs : None)
self.init_factory = object_factory.ObjectFactory()
self.init_factory.register_builder('normc_initializer', lambda **kwargs : normc_initializer(**kwargs))
self.init_factory.register_builder('const_initializer', lambda **kwargs : tf.constant_initializer(**kwargs))
self.init_factory.register_builder('orthogonal_initializer', lambda **kwargs : tf.orthogonal_initializer(**kwargs))
self.init_factory.register_builder('glorot_normal_initializer', lambda **kwargs : tf.glorot_normal_initializer(**kwargs))
self.init_factory.register_builder('glorot_uniform_initializer', lambda **kwargs : tf.glorot_uniform_initializer(**kwargs))
self.init_factory.register_builder('variance_scaling_initializer', lambda **kwargs : tf.variance_scaling_initializer(**kwargs))
self.init_factory.register_builder('random_uniform_initializer', lambda **kwargs : tf.random_uniform_initializer(**kwargs))
self.init_factory.register_builder('None', lambda **kwargs : None)
self.regularizer_factory = object_factory.ObjectFactory()
self.regularizer_factory.register_builder('l1_regularizer', lambda **kwargs : tf.contrib.layers.l1_regularizer(**kwargs))
self.regularizer_factory.register_builder('l2_regularizer', lambda **kwargs : tf.contrib.layers.l2_regularizer(**kwargs))
self.regularizer_factory.register_builder('l1l2_regularizer', lambda **kwargs : tf.contrib.layers.l1l2_regularizer(**kwargs))
self.regularizer_factory.register_builder('None', lambda **kwargs : None)
def load(self, params):
pass
def build(self, name, **kwargs):
pass
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
def _noisy_dense(self, inputs, units, activation, kernel_initializer, kernel_regularizer, name):
return networks.noisy_dense(inputs, units, name, True, activation)
def _build_mlp(self,
name,
input,
units,
activation,
initializer,
regularizer,
norm_func_name = None,
dense_func = tf.layers.dense,
is_train=True):
out = input
ind = 0
for unit in units:
ind += 1
out = dense_func(out, units=unit,
activation=self.activations_factory.create(activation),
kernel_initializer = self.init_factory.create(**initializer),
kernel_regularizer = self.regularizer_factory.create(**regularizer),
#bias_initializer=tf.random_uniform_initializer(-0.1, 0.1),
name=name + str(ind))
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, training=is_train)
return out
def _build_lstm(self, name, input, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.float32, [batch_num])
states_ph = tf.placeholder(tf.float32, [games_num, 2*units])
lstm_out, lstm_state, initial_state = networks.openai_lstm(name, input, dones_ph=dones_ph, states_ph=states_ph, units=units, env_num=games_num, batch_num=batch_num)
return lstm_out, lstm_state, initial_state, dones_ph, states_ph
def _build_lstm2(self, name, inputs, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [games_num, 2*units])
hidden = tf.concat((inputs[0], inputs[1]), axis=1)
lstm_out, lstm_state, initial_state = networks.openai_lstm(name, hidden, dones_ph=dones_ph, states_ph=states_ph, units=units, env_num=games_num, batch_num=batch_num)
#lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
return lstm_out, lstm_state, initial_state, dones_ph, states_ph
def _build_lstm_sep(self, name, inputs, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.bool, [batch_num], name='lstm_masks')
states_ph = tf.placeholder(tf.float32, [games_num, 4*units], name='lstm_states')
statesa, statesc = tf.split(states_ph, 2, axis=1)
a_out, lstm_statea, initial_statea = networks.openai_lstm(name +'a', inputs[0], dones_ph=dones_ph, states_ph=statesa, units=units, env_num=games_num, batch_num=batch_num)
c_out, lstm_statec, initial_statec = networks.openai_lstm(name + 'c', inputs[1], dones_ph=dones_ph, states_ph=statesc, units=units, env_num=games_num, batch_num=batch_num)
lstm_state = tf.concat([lstm_statea, lstm_statec], axis=1)
initial_state = np.concatenate([initial_statea, initial_statec], axis=1)
#lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
return a_out, c_out, lstm_state, initial_state, dones_ph, states_ph
def _build_conv(self, ctype, **kwargs):
print('conv_name:', ctype)
if ctype == 'conv2d':
return self._build_cnn(**kwargs)
if ctype == 'conv1d':
return self._build_cnn1d(**kwargs)
def _build_cnn(self, name, input, convs, activation, initializer, regularizer, norm_func_name=None, is_train=True):
out = input
ind = 0
for conv in convs:
print(out.shape.as_list())
ind += 1
config = conv.copy()
config['filters'] = conv['filters']
config['padding'] = conv['padding']
config['kernel_size'] = [conv['kernel_size']] * 2
config['strides'] = [conv['strides']] * 2
config['activation'] = self.activations_factory.create(activation)
config['kernel_initializer'] = self.init_factory.create(**initializer)
config['kernel_regularizer'] = self.regularizer_factory.create(**regularizer)
config['name'] = name + str(ind)
out = tf.layers.conv2d(inputs=out, **config)
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, name='bn_'+ config['name'], training=is_train)
return out
def _build_cnn1d(self, name, input, convs, activation, initializer, regularizer, norm_func_name=None, is_train=True):
out = input
ind = 0
print('_build_cnn1d')
for conv in convs:
ind += 1
config = conv.copy()
config['activation'] = self.activations_factory.create(activation)
config['kernel_initializer'] = self.init_factory.create(**initializer)
config['kernel_regularizer'] = self.regularizer_factory.create(**regularizer)
config['name'] = name + str(ind)
#config['bias_initializer'] = tf.random_uniform_initializer,
# bias_initializer=tf.random_uniform_initializer(-0.1, 0.1)
out = tf.layers.conv1d(inputs=out, **config)
print('shapes of layer_' + str(ind), str(out.get_shape().as_list()))
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, training=is_train)
return out
class A2CBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_lstm = 'lstm' in params
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
if self.has_lstm:
self.lstm_units = params['lstm']['units']
self.concated = params['lstm']['concated']
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
batch_num = kwargs.pop('batch_num', 1)
games_num = kwargs.pop('games_num', 1)
is_train = kwargs.pop('is_train', True)
with tf.variable_scope(name, reuse=reuse):
actor_input = critic_input = input
if self.has_cnn:
cnn_args = {
'name' :'actor_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
actor_input = self._build_conv(**cnn_args)
actor_input = tf.contrib.layers.flatten(actor_input)
critic_input = actor_input
if self.separate:
cnn_args['name'] = 'critic_cnn'
critic_input = self._build_conv( **cnn_args)
critic_input = tf.contrib.layers.flatten(critic_input)
mlp_args = {
'name' :'actor_fc',
'input' : actor_input,
'units' :self.units,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out_actor = self._build_mlp(**mlp_args)
if self.separate:
mlp_args['name'] = 'critic_fc'
mlp_args['input'] = critic_input
out_critic = self._build_mlp(**mlp_args)
if self.has_lstm:
if self.concated:
out_actor, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm2('lstm', [out_actor, out_critic], self.lstm_units, batch_num, games_num)
out_critic = out_actor
else:
out_actor, out_critic, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm_sep('lstm_', [out_actor, out_critic], self.lstm_units, batch_num, games_num)
else:
if self.has_lstm:
out_actor, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm('lstm', out_actor, self.lstm_units, batch_num, games_num)
out_critic = out_actor
value = tf.layers.dense(out_critic, units = 1, kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.value_activation), name='value')
if self.is_continuous:
mu = tf.layers.dense(out_actor, units = actions_num, activation=self.activations_factory.create(self.space_config['mu_activation']),
kernel_initializer = self.init_factory.create(**self.space_config['mu_init']), name='mu')
if self.space_config['fixed_sigma']:
sigma_out = tf.get_variable(name='sigma_out', shape=(actions_num), initializer=self.init_factory.create(**self.space_config['sigma_init']), trainable=True)
else:
sigma_out = tf.layers.dense(out_actor, units = actions_num, kernel_initializer=self.init_factory.create(**self.space_config['sigma_init']), activation=self.activations_factory.create(self.space_config['sigma_activation']), name='sigma_out')
if self.has_lstm:
return mu, mu * 0 + sigma_out, value, states_ph, dones_ph, lstm_state, initial_state
return mu, mu * 0 + sigma_out, value
if self.is_discrete:
logits = tf.layers.dense(inputs=out_actor, units=actions_num, name='logits', kernel_initializer = self.init_factory.create(**self.initializer))
if self.has_lstm:
return logits, value, states_ph, dones_ph, lstm_state, initial_state
return logits, value
class DQNBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_dueling = params['dueling']
self.atoms = params['atoms']
self.is_noisy = params['noisy']
self.normalization = params.get('normalization', None)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
is_train = kwargs.pop('is_train', True)
if self.is_noisy:
dense_layer = self._noisy_dense
else:
dense_layer = tf.layers.dense
with tf.variable_scope(name, reuse=reuse):
out = input
if self.has_cnn:
cnn_args = {
'name' :'dqn_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out = self._build_conv(**cnn_args)
out = tf.contrib.layers.flatten(out)
mlp_args = {
'name' :'dqn_mlp',
'input' : out,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train,
'dense_func' : dense_layer
}
if self.is_dueling:
if len(self.units) > 1:
mlp_args['units'] = self.units[:-1]
out = self._build_mlp(**mlp_args)
hidden_value = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_val')
hidden_advantage = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_adv')
value = dense_layer(inputs=hidden_value, units=self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), activation=tf.identity, kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='value')
advantage = dense_layer(inputs=hidden_advantage, units= actions_num * self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='advantage')
advantage = tf.reshape(advantage, shape = [-1, actions_num, self.atoms])
value = tf.reshape(value, shape = [-1, 1, self.atoms])
q_values = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
else:
mlp_args['units'] = self.units
out = self._build_mlp('dqn_mlp', out, self.units, self.activation, self.initializer, self.regularizer)
q_values = dense_layer(inputs=out, units=actions_num *self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='q_vals')
q_values = tf.reshape(q_values, shape = [-1, actions_num, self.atoms])
if self.atoms == 1:
return tf.squeeze(q_values)
else:
return q_values
| 18,263 | Python | 51.034188 | 301 | 0.592345 |
NVlabs/DiffRL/externals/rl_games/rl_games/algos_tf14/a2c_continuous.py | from rl_games.common import tr_helpers, vecenv
from rl_games.algos_tf14 import networks
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque, OrderedDict
from tensorboardX import SummaryWriter
import gym
import ray
from datetime import datetime
def swap_and_flatten01(arr):
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
#(-1, 1) -> (low, high)
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
#(horizon_length, actions_num)
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = np.log(p0_sigma/p1_sigma + 1e-5)
c2 = (np.square(p0_sigma) + np.square(p1_mu - p0_mu))/(2.0 *(np.square(p1_sigma) + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = np.mean(np.sum(kl, axis = -1)) # returning mean between all steps of sum between all actions
return kl
def policy_kl_tf(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = tf.log(p1_sigma/p0_sigma + 1e-5)
c2 = (tf.square(p0_sigma) + tf.square(p1_mu - p0_mu))/(2.0 * (tf.square(p1_sigma) + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = tf.reduce_mean(tf.reduce_sum(kl, axis=-1)) # returning mean between all steps of sum between all actions
return kl
class A2CAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
self.name = base_name
self.actions_low = action_space.low
self.actions_high = action_space.high
self.env_name = config['env_name']
self.ppo = config['ppo']
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_actors = config['num_actors']
self.env_config = config.get('env_config', {})
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.num_agents = self.vec_env.get_number_of_agents()
self.horizon_length = config['horizon_length']
self.normalize_advantage = config['normalize_advantage']
self.config = config
self.state_shape = observation_space.shape
self.critic_coef = config['critic_coef']
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.sess = sess
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.normalize_input = self.config['normalize_input']
self.seq_len = self.config['seq_length']
self.dones = np.asarray([False]*self.num_actors, dtype=np.bool)
self.current_rewards = np.asarray([0]*self.num_actors, dtype=np.float32)
self.current_lengths = np.asarray([0]*self.num_actors, dtype=np.float32)
self.game_rewards = deque([], maxlen=100)
self.game_lengths = deque([], maxlen=100)
self.obs_ph = tf.placeholder('float32', (None, ) + self.state_shape, name = 'obs')
self.target_obs_ph = tf.placeholder('float32', (None, ) + self.state_shape, name = 'target_obs')
self.actions_num = action_space.shape[0]
self.actions_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'actions')
self.old_mu_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'old_mu_ph')
self.old_sigma_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'old_sigma_ph')
self.old_neglogp_actions_ph = tf.placeholder('float32', (None, ), name = 'old_logpactions')
self.rewards_ph = tf.placeholder('float32', (None,), name = 'rewards')
self.old_values_ph = tf.placeholder('float32', (None,), name = 'old_values')
self.advantages_ph = tf.placeholder('float32', (None,), name = 'advantages')
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.epoch_num = tf.Variable(tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
self.bounds_loss_coef = config.get('bounds_loss_coef', None)
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=config['max_epochs'], end_learning_rate=0.001, power=config.get('decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=config['max_epochs'], decay_rate = config['decay_rate'])
self.input_obs = self.obs_ph
self.input_target_obs = self.target_obs_ph
if observation_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_target_obs = tf.to_float(self.input_target_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = observation_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=True)
self.input_target_obs = self.moving_mean_std.normalize(self.input_target_obs, train=False)
games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.train_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.config['minibatch_size'],
'games_num' : games_num,
'actions_num' : self.actions_num,
'prev_actions_ph' : self.actions_ph,
}
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_target_obs,
'batch_num' : self.num_actors,
'games_num' : self.num_actors,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
}
self.states = None
if self.network.is_rnn():
self.neglogp_actions ,self.state_values, self.action, self.entropy, self.mu, self.sigma, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_mu, self.target_sigma, self.target_states_ph, self.target_masks_ph, self.target_lstm_state, self.target_initial_state = self.network(self.run_dict, reuse=True)
self.states = self.target_initial_state
else:
self.neglogp_actions ,self.state_values, self.action, self.entropy, self.mu, self.sigma = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_mu, self.target_sigma = self.network(self.run_dict, reuse=True)
curr_e_clip = self.e_clip * self.lr_multiplier
if (self.ppo):
self.prob_ratio = tf.exp(self.old_neglogp_actions_ph - self.neglogp_actions)
self.prob_ratio = tf.clip_by_value(self.prob_ratio, 0.0, 16.0)
self.pg_loss_unclipped = -tf.multiply(self.advantages_ph, self.prob_ratio)
self.pg_loss_clipped = -tf.multiply(self.advantages_ph, tf.clip_by_value(self.prob_ratio, 1.- curr_e_clip, 1.+ curr_e_clip))
self.actor_loss = tf.reduce_mean(tf.maximum(self.pg_loss_unclipped, self.pg_loss_clipped))
else:
self.actor_loss = tf.reduce_mean(self.neglogp_actions * self.advantages_ph)
self.c_loss = (tf.squeeze(self.state_values) - self.rewards_ph)**2
if self.clip_value:
self.cliped_values = self.old_values_ph + tf.clip_by_value(tf.squeeze(self.state_values) - self.old_values_ph, -curr_e_clip, curr_e_clip)
self.c_loss_clipped = tf.square(self.cliped_values - self.rewards_ph)
self.critic_loss = tf.reduce_mean(tf.maximum(self.c_loss, self.c_loss_clipped))
else:
self.critic_loss = tf.reduce_mean(self.c_loss)
self._calc_kl_dist()
self.loss = self.actor_loss + 0.5 * self.critic_coef * self.critic_loss - self.config['entropy_coef'] * self.entropy
self._apply_bound_loss()
self.reg_loss = tf.losses.get_regularization_loss()
self.loss += self.reg_loss
self.train_step = tf.train.AdamOptimizer(self.current_lr * self.lr_multiplier)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
grads = tf.gradients(self.loss, self.weights)
if self.config['truncate_grads']:
grads, _ = tf.clip_by_global_norm(grads, self.grad_norm)
grads = list(zip(grads, self.weights))
self.train_op = self.train_step.apply_gradients(grads)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def _calc_kl_dist(self):
self.kl_dist = policy_kl_tf(self.mu, self.sigma, self.old_mu_ph, self.old_sigma_ph)
if self.is_adaptive_lr:
self.current_lr = tf.where(self.kl_dist > (2.0 * self.kl_threshold), tf.maximum(self.current_lr / 1.5, 1e-6), self.current_lr)
self.current_lr = tf.where(self.kl_dist < (0.5 * self.kl_threshold), tf.minimum(self.current_lr * 1.5, 1e-2), self.current_lr)
def _apply_bound_loss(self):
if self.bounds_loss_coef:
soft_bound = 1.1
mu_loss_high = tf.square(tf.maximum(0.0, self.mu - soft_bound))
mu_loss_low = tf.square(tf.maximum(0.0, -soft_bound - self.mu))
self.bounds_loss = tf.reduce_sum(mu_loss_high + mu_loss_low, axis=1)
self.loss += self.bounds_loss * self.bounds_loss_coef
else:
self.bounds_loss = None
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def get_action_values(self, obs):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp, self.target_mu, self.target_sigma]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.target_obs_ph : obs}), None)
def get_values(self, obs):
if self.network.is_rnn():
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs})
def play_steps(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs, mb_mus, mb_sigmas = [],[],[],[],[],[],[],[]
mb_states = []
epinfos = []
# For n in range number of steps
for _ in range(self.horizon_length):
if self.network.is_rnn():
mb_states.append(self.states)
actions, values, neglogpacs, mu, sigma, self.states = self.get_action_values(self.obs)
#actions = np.squeeze(actions)
values = np.squeeze(values)
neglogpacs = np.squeeze(neglogpacs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones.copy())
mb_mus.append(mu)
mb_sigmas.append(sigma)
self.obs[:], rewards, self.dones, infos = self.vec_env.step(rescale_actions(self.actions_low, self.actions_high, np.clip(actions, -1.0, 1.0)))
self.current_rewards += rewards
self.current_lengths += 1
for reward, length, done in zip(self.current_rewards, self.current_lengths, self.dones):
if done:
self.game_rewards.append(reward)
self.game_lengths.append(length)
shaped_rewards = self.rewards_shaper(rewards)
epinfos.append(infos)
mb_rewards.append(shaped_rewards)
self.current_rewards = self.current_rewards * (1.0 - self.dones)
self.current_lengths = self.current_lengths * (1.0 - self.dones)
#using openai baseline approach
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_mus = np.asarray(mb_mus, dtype=np.float32)
mb_sigmas = np.asarray(mb_sigmas, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_states = np.asarray(mb_states, dtype=np.float32)
last_values = self.get_values(self.obs)
last_values = np.squeeze(last_values)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
if self.network.is_rnn():
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_mus, mb_sigmas, mb_states )), epinfos)
else:
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_mus, mb_sigmas)), None, epinfos)
return result
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def train(self):
max_epochs = self.config.get('max_epochs', 1e6)
self.obs = self.vec_env.reset()
batch_size = self.horizon_length * self.num_actors * self.num_agents
minibatch_size = self.config['minibatch_size']
mini_epochs_num = self.config['mini_epochs']
num_minibatches = batch_size // minibatch_size
last_lr = self.config['learning_rate']
self.last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
while True:
play_time_start = time.time()
epoch_num = self.update_epoch()
frame += batch_size
obses, returns, dones, actions, values, neglogpacs, mus, sigmas, lstm_states, _ = self.play_steps()
advantages = returns - values
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
a_losses = []
c_losses = []
b_losses = []
entropies = []
kls = []
play_time_end = time.time()
play_time = play_time_end - play_time_start
update_time_start = time.time()
if self.network.is_rnn():
total_games = batch_size // self.seq_len
num_games_batch = minibatch_size // self.seq_len
game_indexes = np.arange(total_games)
flat_indexes = np.arange(total_games * self.seq_len).reshape(total_games, self.seq_len)
lstm_states = lstm_states[::self.seq_len]
for _ in range(0, mini_epochs_num):
np.random.shuffle(game_indexes)
for i in range(0, num_minibatches):
batch = range(i * num_games_batch, (i + 1) * num_games_batch)
mb_indexes = game_indexes[batch]
mbatch = flat_indexes[mb_indexes].ravel()
dict = {}
dict[self.old_values_ph] = values[mbatch]
dict[self.old_neglogp_actions_ph] = neglogpacs[mbatch]
dict[self.advantages_ph] = advantages[mbatch]
dict[self.rewards_ph] = returns[mbatch]
dict[self.actions_ph] = actions[mbatch]
dict[self.obs_ph] = obses[mbatch]
dict[self.old_mu_ph] = mus[mbatch]
dict[self.old_sigma_ph] = sigmas[mbatch]
dict[self.masks_ph] = dones[mbatch]
dict[self.states_ph] = lstm_states[mb_indexes]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_dist, self.current_lr, self.mu, self.sigma, self.lr_multiplier]
if self.bounds_loss is not None:
run_ops.append(self.bounds_loss)
run_ops.append(self.train_op)
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
res_dict = self.sess.run(run_ops, dict)
a_loss = res_dict[0]
c_loss = res_dict[1]
entropy = res_dict[2]
kl = res_dict[3]
last_lr = res_dict[4]
cmu = res_dict[5]
csigma = res_dict[6]
lr_mul = res_dict[7]
if self.bounds_loss is not None:
b_loss = res_dict[8]
b_losses.append(b_loss)
mus[mbatch] = cmu
sigmas[mbatch] = csigma
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
else:
for _ in range(0, mini_epochs_num):
permutation = np.random.permutation(batch_size)
obses = obses[permutation]
returns = returns[permutation]
actions = actions[permutation]
values = values[permutation]
neglogpacs = neglogpacs[permutation]
advantages = advantages[permutation]
mus = mus[permutation]
sigmas = sigmas[permutation]
for i in range(0, num_minibatches):
batch = range(i * minibatch_size, (i + 1) * minibatch_size)
dict = {self.obs_ph: obses[batch], self.actions_ph : actions[batch], self.rewards_ph : returns[batch],
self.advantages_ph : advantages[batch], self.old_neglogp_actions_ph : neglogpacs[batch], self.old_values_ph : values[batch]}
dict[self.old_mu_ph] = mus[batch]
dict[self.old_sigma_ph] = sigmas[batch]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_dist, self.current_lr, self.mu, self.sigma, self.lr_multiplier]
if self.bounds_loss is not None:
run_ops.append(self.bounds_loss)
run_ops.append(self.train_op)
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
res_dict = self.sess.run(run_ops, dict)
a_loss = res_dict[0]
c_loss = res_dict[1]
entropy = res_dict[2]
kl = res_dict[3]
last_lr = res_dict[4]
cmu = res_dict[5]
csigma = res_dict[6]
lr_mul = res_dict[7]
if self.bounds_loss is not None:
b_loss = res_dict[8]
b_losses.append(b_loss)
mus[batch] = cmu
sigmas[batch] = csigma
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
update_time_end = time.time()
update_time = update_time_end - update_time_start
sum_time = update_time + play_time
total_time = update_time_end - start_time
if self.rank == 0:
scaled_time = sum_time # self.num_agents *
scaled_play_time = play_time # self.num_agents *
if self.print_stats:
fps_step = batch_size / scaled_play_time
fps_total = batch_size / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
# performance
self.writer.add_scalar('performance/total_fps', batch_size / sum_time, frame)
self.writer.add_scalar('performance/step_fps', batch_size / play_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('performance/update_time', update_time, frame)
# losses
self.writer.add_scalar('losses/a_loss', np.mean(a_losses), frame)
self.writer.add_scalar('losses/c_loss', np.mean(c_losses), frame)
if len(b_losses) > 0:
self.writer.add_scalar('losses/bounds_loss', np.mean(b_losses), frame)
self.writer.add_scalar('losses/entropy', np.mean(entropies), frame)
# info
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', np.mean(kls), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
if len(self.game_rewards) > 0:
mean_rewards = np.mean(self.game_rewards)
mean_lengths = np.mean(self.game_lengths)
self.writer.add_scalar('rewards/frame', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/frame', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.name)
if self.last_mean_rewards > self.config['score_to_win']:
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
if epoch_num > max_epochs:
print('MAX EPOCHS NUM!')
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
update_time = 0
| 24,499 | Python | 48.295775 | 253 | 0.561982 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_pendulum_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: glorot_normal_initializer
gain: 0.01
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 32]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 2
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-3
name: pendulum
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: Pendulum-v0
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.016
normalize_input: False
bounds_loss_coef: 0
| 1,266 | YAML | 18.796875 | 41 | 0.559242 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_lunar.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: glorot_normal_initializer
#scal: 0.01
sigma_init:
name: const_initializer
value: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: relu
initializer:
name: glorot_normal_initializer
#gain: 2
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: LunarLanderContinuous-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0
| 1,271 | YAML | 18.875 | 41 | 0.558615 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_cartpole_masked_velocity_rnn.yaml |
#Cartpole without velocities lstm test
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [64, 64]
activation: relu
normalization: 'layer_norm'
norm_only_first_layer: True
initializer:
name: default
regularizer:
name: None
rnn:
name: 'lstm'
units: 64
layers: 1
before_mlp: False
concat_input: True
layer_norm: True
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: cartpole_vel_info
score_to_win: 500
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: CartPoleMaskedVelocity-v1
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 4 | 1,117 | YAML | 17.327869 | 39 | 0.598926 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppg_walker.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_ppg
score_to_win: 290
grad_norm: 0.5
entropy_coef: 0 #-0.005
truncate_grads: False
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 1
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.004
normalize_input: False
bounds_loss_coef: 0.0005
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 1024
mini_epochs: 6
player:
render: True
determenistic: True
games_num: 200
| 1,536 | YAML | 20.347222 | 56 | 0.558594 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_continuous.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: openai_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: BipedalWalkerHardcore-v3
| 1,271 | YAML | 18.272727 | 39 | 0.552321 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_flex_humanoid_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [256,128,64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: 'nn/humanoid_torch.pth'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'humanoid_torch'
score_to_win : 20000
grad_norm : 0.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexHumanoid
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 32
minibatch_size : 4096
mini_epochs : 4
critic_coef : 1
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : False
normalize_value : True
bounds_loss_coef: 0.000
max_epochs: 12000 | 1,468 | YAML | 18.851351 | 39 | 0.547684 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_reacher.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128]
activation: relu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn1:
name: lstm
units: 64
layers: 1
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: ReacherPyBulletEnv-v0
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: none
kl_threshold: 0.008
normalize_input: True
seq_length: 16
bounds_loss_coef: 0.00
max_epochs: 10000
weight_decay: 0.0001
player:
render: True
games_num: 200
experiment_config1:
start_exp: 0
start_sub_exp: 0
experiments:
- exp:
- path: config.bounds_loss_coef
value: [0.5]
| 1,593 | YAML | 18.925 | 56 | 0.549278 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_walker.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 256
mini_epochs: 4
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.005
normalize_input: True
bounds_loss_coef: 0.00
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
player:
render: True
determenistic: True
games_num: 200
| 1,408 | YAML | 19.720588 | 56 | 0.555398 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_pendulum.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.01
sigma_init:
name: const_initializer
value: 0
fixed_sigma: False
mlp:
units: [32, 32]
activation: elu
initializer:
name: default
scale: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: Pendulum-v0
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0
| 1,223 | YAML | 18.125 | 39 | 0.546198 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_revenge_rnd.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
value_shape: 2
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
config:
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.999
tau: 0.9
learning_rate: 1e-4
name: atari
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.002
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.1
clip_value: True
num_actors: 32
horizon_length: 512
minibatch_size: 4096
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
env_config:
name: MontezumaRevengeNoFrameskip-v4
rnd_config:
scale_value: 1.0
episodic: True
episode_length: 256
gamma: 0.99
mini_epochs: 2
minibatch_size: 1024
learning_rate: 1e-4
network:
name: rnd_curiosity
cnn:
type: conv2d
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnd:
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
net:
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
rnd:
units: [512,512, 512]
net:
units: [512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
scale: 2 | 3,072 | YAML | 21.762963 | 42 | 0.427083 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_flex_humanoid_torch_rnn.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
normalization: 'layer_norm'
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.01
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [256,128]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: lstm
units: 64
layers: 1
before_mlp: False
load_checkpoint: True
load_path: 'nn/humanoid_torch_rnn.pth'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 8e-4
name : 'humanoid_torch_rnn'
score_to_win : 20000
grad_norm : 5
entropy_coef : 0
truncate_grads : True
env_name : FlexHumanoid
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 256
minibatch_size : 8192
mini_epochs : 4
critic_coef : 1
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length: 16
bounds_loss_coef: 0.000
weight_decay: 0.001
max_epochs: 6000 | 1,608 | YAML | 18.621951 | 40 | 0.54291 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_cartpole.yaml |
#Cartpole MLP
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [32, 32]
activation: relu
initializer:
name: default
regularizer:
name: None
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: cartpole_vel_info
score_to_win: 500
grad_norm: 1.0
entropy_coef: 0.01
truncate_grads: True
env_name: CartPole-v1
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 32
minibatch_size: 64
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
device: 'cuda:0' | 878 | YAML | 16.235294 | 29 | 0.592255 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_flex_ant_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [128, 64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 16
minibatch_size : 4096
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
normalize_value : True
bounds_loss_coef: 0.0001
| 1,425 | YAML | 18.27027 | 39 | 0.53614 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_continuous_lstm.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_lstm_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: normc_initializer
std: 0.01
sigma_init:
name: const_initializer
value: 0.0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: relu
initializer:
name: normc_initializer
std: 1
regularizer:
name: 'None'
lstm:
units: 128
concated: False
load_checkpoint: False
load_path: 'nn/runBipedalWalkerHardcore-v2'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: walker_lstm
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.000
truncate_grads: True
env_name: BipedalWalkerHardcore-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.5
max_epochs: 5000
| 1,334 | YAML | 19.227272 | 45 | 0.561469 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/carracing_ppo.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
load_checkpoint: False
load_path: 'nn/runCarRacing-v0'
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: racing
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.000
truncate_grads: True
env_name: CarRacing-v0
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: True
#lr_schedule: adaptive
# kl_threshold: 0.008
bounds_loss_coef: 0.001
# max_epochs: 5000
player:
render: True
deterministic: True | 1,684 | YAML | 18.593023 | 33 | 0.541568 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppg_walker_hardcore.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
load_checkpoint: True
load_path: './nn/walker_hc_ppg.pth'
config:
reward_shaper:
#min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_hc_ppg
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0 #-0.005
truncate_grads: False
env_name: BipedalWalkerHardcore-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 4096
minibatch_size: 8192
mini_epochs: 1
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.004
normalize_input: False
bounds_loss_coef: 0.0005
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 1024
mini_epochs: 6
player:
render: True
determenistic: True
games_num: 200
| 1,510 | YAML | 20.28169 | 41 | 0.559603 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/rainbow_dqn_breakout.yaml | params:
algo:
name: dqn
model:
name: dqn
load_checkpoint: False
load_path: 'nn/breakoutep=3638750.0rew=201.75'
network:
name: dqn
dueling: True
atoms: 51
noisy: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 'valid'
- filters: 64
kernel_size: 4
strides: 2
padding: 'valid'
- filters: 64
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
gamma : 0.99
learning_rate : 0.0001
steps_per_epoch : 4
batch_size : 32
epsilon : 0.00
min_epsilon : 0.00
epsilon_decay_frames : 1000000
num_epochs_to_copy : 10000
name : 'breakout'
env_name: BreakoutNoFrameskip-v4
is_double : True
score_to_win : 600
num_steps_fill_buffer : 100000
replay_buffer_type : 'prioritized'
replay_buffer_size : 1000000
priority_beta : 0.4
priority_alpha : 0.6
beta_decay_frames : 1000000
max_beta : 1
horizon_length : 3
episodes_to_log : 100
lives_reward : 5
atoms_num : 51
v_min : -10
v_max : 10
games_to_track : 100
lr_schedule : None
max_epochs: 10000000
| 1,525 | YAML | 19.346666 | 48 | 0.550164 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_smac.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 6h_vs_8z
frames: 2
random_invalid_step: False | 979 | YAML | 17.148148 | 32 | 0.581205 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_multiwalker.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128, 64]
d2rl: False
activation: relu
initializer:
name: default
load_checkpoint: False
load_path: './nn/multiwalker.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 1e-4
name: multiwalker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: multiwalker_env
ppo: True
e_clip: 0.2
use_experimental_cv: False
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 3072 #768 #3072 #1536
mini_epochs: 4
critic_coef: 1
schedule_type: 'standard'
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
normalize_value: True
bounds_loss_coef: 0.0001
max_epochs: 10000
weight_decay: 0.0000
player:
render: True
games_num: 200
env_config:
central_value: True
use_prev_actions: True
apply_agent_ids: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 3e-4
clip_value: False
normalize_input: True
truncate_grads: False
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256, 128]
activation: elu
initializer:
name: default | 1,881 | YAML | 20.632184 | 43 | 0.549176 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_walker_hardcore.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128, 64]
d2rl: False
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
load_checkpoint: False
load_path: './nn/walker_hc.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_hc
score_to_win: 300
grad_norm: 1.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalkerHardcore-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 4096
minibatch_size: 8192
mini_epochs: 4
critic_coef: 1
schedule_type: 'standard'
lr_schedule: 'adaptive' #None #
kl_threshold: 0.008
normalize_input: True
seq_length: 4
bounds_loss_coef: 0.00
max_epochs: 100000
weight_decay: 0
player:
render: False
games_num: 200
determenistic: True
| 1,420 | YAML | 19.897059 | 41 | 0.554225 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_flex_ant_torch_rnn.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
normalization: 'layer_norm'
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: False
mlp:
units: [128]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: 'lstm'
units: 64
layers: 1
before_mlp: False
load_checkpoint: False
load_path: 'nn/ant_torch.pth'
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch_rnn'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0
weight_decay: 0.001
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 256
minibatch_size : 8192
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length : 32
bounds_loss_coef: 0.000
| 1,580 | YAML | 18.280488 | 39 | 0.533544 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_smac_cnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/5m_vs_6m2smac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 5m_vs_6m2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 5m_vs_6m
frames: 4
transpose: True
random_invalid_step: False | 1,512 | YAML | 18.64935 | 35 | 0.547619 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_flex_ant_torch_rnn_copy.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: 'lstm'
units: 128
layers: 1
before_mlp: True
load_checkpoint: False
load_path: 'nn/ant_torch.pth'
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 128
minibatch_size : 4096
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length : 16
bounds_loss_coef: 0.0
| 1,509 | YAML | 18.113924 | 39 | 0.530152 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/dqn.yaml | params:
algo:
name: dqn
model:
name: dqn
load_checkpoint: False
load_path: path
network:
name: dqn
dueling: True
atoms: 1
noisy: False
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 'valid'
- filters: 64
kernel_size: 4
strides: 2
padding: 'valid'
- filters: 64
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 0.1
gamma : 0.99
learning_rate : 0.0005
steps_per_epoch : 4
batch_size : 128
epsilon : 0.90
min_epsilon : 0.02
epsilon_decay_frames : 100000
num_epochs_to_copy : 10000
name : 'pong_dddqn_config1'
env_name: PongNoFrameskip-v4
is_double : True
score_to_win : 20.9
num_steps_fill_buffer : 10000
replay_buffer_type : 'normal'
replay_buffer_size : 100000
priority_beta : 0.4
priority_alpha : 0.6
beta_decay_frames : 100000
max_beta : 1
horizon_length : 3
episodes_to_log : 10
lives_reward : 1
atoms_num : 1
games_to_track : 20
lr_schedule : polynom_decay
max_epochs: 100000
experiment_config:
start_exp: 0
start_sub_exp: 3
experiments:
# - exp:
# - path: config.learning_rate
# value: [0.0005, 0.0002]
- exp:
- path: network.initializer
value:
- name: variance_scaling_initializer
scale: 2
- name: glorot_normal_initializer
- name: glorot_uniform_initializer
- name: orthogonal_initializer
gain: 1.41421356237
- path: network.cnn.initializer
value:
- name: variance_scaling_initializer
scale: 2
- name: glorot_normal_initializer
- name: glorot_uniform_initializer
- name: orthogonal_initializer
gain: 1.41421356237
| 2,195 | YAML | 20.742574 | 46 | 0.553531 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/ppo_lunar_continiuos_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64]
activation: relu
initializer:
name: default
scale: 2
rnn:
name: 'lstm'
units: 64
layers: 1
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-3
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: LunarLanderContinuous-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
schedule_type: standard
normalize_input: True
seq_length: 4
bounds_loss_coef: 0
player:
render: True
| 1,276 | YAML | 17.779412 | 41 | 0.544671 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_discrete.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [32,32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: False
multi_head_value: False
player:
games_num: 100
determenistic: True
| 1,207 | YAML | 17.584615 | 33 | 0.589892 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [64]
#normalization: 'layer_norm'
activation: elu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
units: 64
layers: 1
layer_norm: True
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: test_asymmetric
score_to_win: 100000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: openai_gym
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
seq_length: 4
weight_decay: 0.0000
env_config:
name: TestAsymmetricEnv-v0
wrapped_env_name: "LunarLander-v2"
apply_mask: False
use_central_value: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False
| 1,707 | YAML | 18.632184 | 40 | 0.557704 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_rnn_multidiscrete.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [64, 64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: test_rnn_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: False
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: True
player:
games_num: 100
determenistic: True
central_value_config1:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: False
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64,64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False | 1,898 | YAML | 18.989473 | 32 | 0.555848 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_discrete_multidiscrete_mhv.yaml | params:
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [32,32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md_mhv
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
multi_discrete_space: True
multi_head_value: True
player:
games_num: 100
determenistic: True
| 1,223 | YAML | 17.830769 | 32 | 0.592805 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_ppo_walker_truncated_time.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/walker_truncated_step_1000.pth'
config:
name: walker_truncated_step_1000
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_input: True
normalize_advantage: True
normalize_value: True
value_bootstrap: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
schedule_type: standard
lr_schedule: adaptive
kl_threshold: 0.005
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 256
mini_epochs: 4
critic_coef: 2
bounds_loss_coef: 0.00
max_epochs: 10000
#weight_decay: 0.0001
env_config:
steps_limit: 1000
player:
render: True
determenistic: True
games_num: 200
| 1,426 | YAML | 17.776316 | 50 | 0.585554 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_rnn_multidiscrete_mhv.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_rnn_md_mhv
score_to_win: 0.99
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
multi_discrete_space: True
multi_head_value: True
player:
games_num: 100
determenistic: True
| 1,362 | YAML | 17.671233 | 32 | 0.5837 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete_mhv_mops.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: testnet
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md_multi_obs
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: False
weight_decay: 0.0000
max_epochs: 10000
seq_length: 16
save_best_after: 10
save_frequency: 20
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: False
min_dist: 2
max_dist: 8
use_central_value: True
multi_obs_space: True
multi_head_value: False
player:
games_num: 100
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: False
truncate_grads: True
grad_norm: 10
network:
name: testnet
central_value: True
mlp:
units: [64,32]
activation: relu
initializer:
name: default | 1,461 | YAML | 19.885714 | 30 | 0.588638 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete_mhv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 32
layers: 1
layer_norm: False
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: True
weight_decay: 0.0000
max_epochs: 10000
seq_length: 16
save_best_after: 10
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: False
multi_head_value: False
player:
games_num: 100
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False | 1,941 | YAML | 19.020618 | 33 | 0.55796 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/test/test_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_rnn
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
player:
games_num: 100
determenistic: True
| 1,270 | YAML | 16.901408 | 32 | 0.577165 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/27m_vs_30m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/27msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 27m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3456
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 27m_vs_30m
frames: 4
transpose: False
random_invalid_step: False | 1,459 | YAML | 18.466666 | 33 | 0.544894 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s_vs_5z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: 3s_vs_5z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,040 | YAML | 17.263158 | 32 | 0.577885 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/last_3s_vs_5z_cvep=10001rew=9.585825.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s_vs_5z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 24
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
max_epochs: 50000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
| 1,579 | YAML | 18.75 | 58 | 0.569981 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/6h_vs_8z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z_separate
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.002
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 2
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
central_value: False
reward_only_positive: False
obs_last_action: True
frames: 1
#flatten: False | 1,108 | YAML | 18.120689 | 34 | 0.590253 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/8m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 8m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
max_epochs: 10000
env_config:
name: 8m
frames: 1
transpose: False
random_invalid_step: False | 1,061 | YAML | 17.631579 | 32 | 0.589067 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/2c_vs_64zg.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2c_vs_64zg_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2c_vs_64zg
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 512
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 2c_vs_64zg
frames: 4
transpose: True
random_invalid_step: False
| 1,512 | YAML | 18.397436 | 32 | 0.546958 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
seed: 322
load_checkpoint: False
load_path: 'nn/3s5z_vs_3s6zsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
frames: 4
transpose: False
random_invalid_step: False | 1,600 | YAML | 19.265823 | 40 | 0.555 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: ''
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096 # 8 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
#reward_negative_scale: 0.9
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: True
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,580 | YAML | 19.269231 | 34 | 0.567722 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/5m_vs_6m_rnn_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5m_vs_6m_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 5m_vs_6m_rnn_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
entropy_coef: 0.02
truncate_grads: True
grad_norm: 10
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560 # 5 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length: 8
#max_epochs: 10000
env_config:
name: 5m_vs_6m
central_value: True
reward_only_positive: True
obs_last_action: False
apply_agent_ids: True
player:
render: False
games_num: 200
n_game_life: 1
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
#normalization: layer_norm
name: actor_critic
central_value: True
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
#reward_negative_scale: 0.1 | 1,962 | YAML | 18.828283 | 34 | 0.553517 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3m_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False | 1,099 | YAML | 17.032787 | 32 | 0.579618 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3m_torch_cv_joint.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
env_config:
name: 3m
frames: 1
transpose: False
central_value: True
reward_only_positive: True
state_last_action: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
joint_obs_actions:
embedding: False
embedding_scale: 1 #(actions // embedding_scale)
mlp_scale: 4 # (mlp from obs size) // mlp_out_scale
mlp:
units: [256, 128]
activation: relu
initializer:
#name: default
name: default
scale: 2
regularizer:
name: 'None' | 1,706 | YAML | 19.817073 | 63 | 0.559789 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3m_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,532 | YAML | 18.909091 | 34 | 0.567885 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv_joint.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s_vs_5z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 24
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
max_epochs: 50000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
joint_obs_actions:
embedding: False
embedding_scale: 1 #(actions // embedding_scale)
mlp_scale: 4 # (mlp from obs size) // mlp_out_scale
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
| 1,762 | YAML | 19.741176 | 63 | 0.565834 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s_vs_4z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/3s_vs_4z_lstm'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: sc2_fc
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 1536
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 3s_vs_4z
frames: 1
random_invalid_step: False | 1,036 | YAML | 17.192982 | 32 | 0.578185 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/MMM2_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
scale: 1.3
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 0
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: MMM2_cnn
reward_shaper:
scale_value: 1.3
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
use_action_masks: True
env_config:
name: MMM2
frames: 4
transpose: False # for pytorch transpose == not Transpose in tf
random_invalid_step: False
replay_save_freq: 100 | 1,531 | YAML | 18.896104 | 69 | 0.548661 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
seed: 322
load_checkpoint: False
load_path: 'nn/3s5z_vs_3s6z_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6zaa
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
frames: 4
transpose: True
random_invalid_step: False | 1,532 | YAML | 18.909091 | 34 | 0.550914 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/5m_vs_6m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 5m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 5m_vs_6m
frames: 4
transpose: False
random_invalid_step: False | 1,455 | YAML | 18.413333 | 32 | 0.543643 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s_vs_5z_torch_lstm.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
before_mlp: False
config:
name: 3s_vs_5z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 256
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 32
use_action_masks: True
max_epochs: 20000
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,120 | YAML | 17.683333 | 32 | 0.576786 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/2s_vs_1c.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/2s_vs_1c_lstm'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: 2m_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 1,039 | YAML | 17.245614 | 32 | 0.578441 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/MMM2.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/MMM_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: MMM2_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
use_action_masks: True
ignore_dead_batches : False
seq_length: 4
env_config:
name: MMM
frames: 4
transpose: True
random_invalid_step: False | 1,500 | YAML | 18.493506 | 32 | 0.544667 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/8m_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 8m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
max_epochs: 10000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 8m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: False
obs_last_action: True
| 1,581 | YAML | 18.292683 | 34 | 0.56673 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3m_torch_cv_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3m_cv_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
truncate_grads: True
grad_norm: 0.5
entropy_coef: 0.001
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length : 8
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 0.5
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1 | 1,711 | YAML | 18.906977 | 34 | 0.549971 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3m_cnn_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnnsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 1
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 3m
frames: 4
transpose: True
random_invalid_step: True
| 1,523 | YAML | 17.814815 | 40 | 0.545634 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3m_torch_sparse.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnnsmac_cnn'
network:
name: actor_critic
separate: True
value_shape: 2
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
reward_sparse: True
transpose: False
random_invalid_step: False
rnd_config:
scale_value: 1
episodic: True
episode_length: 128
gamma: 0.99
mini_epochs: 2
minibatch_size: 1536
learning_rate: 5e-4
network:
name: rnd_curiosity
mlp:
rnd:
units: [512, 256,128,64]
net:
units: [128, 64, 64]
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,625 | YAML | 19.074074 | 38 | 0.536 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/2m_vs_1z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2m_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 978 | YAML | 17.129629 | 32 | 0.580777 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/5m_vs_6m_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5m_vs_6m_cv.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 5m_vs_6m_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
entropy_coef: 0.005
truncate_grads: True
grad_norm: 1.5
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560 # 5 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length: 8
#max_epochs: 10000
env_config:
name: 5m_vs_6m
central_value: False
reward_only_positive: True
obs_last_action: True
apply_agent_ids: False
player:
render: False
games_num: 200
n_game_life: 1
determenistic: True
#reward_negative_scale: 0.1 | 1,365 | YAML | 18.239436 | 34 | 0.58022 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3s_vs_5z_cv_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
truncate_grads: True
grad_norm: 0.5
entropy_coef: 0.005
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length : 4
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 0.5
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1 | 1,745 | YAML | 19.068965 | 34 | 0.553582 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/corridor.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/corridor_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
frames: 4
transpose: True
random_invalid_step: False | 1,511 | YAML | 18.636363 | 32 | 0.550629 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/10m_vs_11m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/27msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 10m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 10m_vs_11m
frames: 14
transpose: False
random_invalid_step: False | 1,460 | YAML | 18.48 | 33 | 0.545205 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/corridor_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/corridor_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
reward_negative_scale: 0.05
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 3e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,598 | YAML | 19.5 | 34 | 0.571339 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/27m_vs_30m_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 27m_vs_30m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3456
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 8
use_action_masks: True
ignore_dead_batches : False
#max_epochs: 10000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
env_config:
name: 27m_vs_30m
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
apply_agent_ids: True | 1,776 | YAML | 19.193182 | 32 | 0.556869 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/2m_vs_1z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2m_vs_1z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2s_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 972 | YAML | 17.35849 | 32 | 0.583333 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False | 1,022 | YAML | 17.267857 | 32 | 0.588063 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/corridor_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2c_vs_64zgsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
frames: 4
transpose: False
random_invalid_step: False | 1,542 | YAML | 18.782051 | 40 | 0.552529 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/6h_vs_8z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
frames: 4
transpose: True
random_invalid_step: False
| 1,512 | YAML | 18.397436 | 32 | 0.546296 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3m.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 4
transpose: True
random_invalid_step: False | 1,493 | YAML | 18.402597 | 32 | 0.545211 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/6h_vs_8z_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: ''
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: False
config:
name: 6h_vs_8z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
#reward_negative_scale: 0.9
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: True
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: False | 1,734 | YAML | 18.942529 | 34 | 0.553633 |
NVlabs/DiffRL/externals/rl_games/rl_games/configs/smac/3s_vs_5z_torch_lstm2.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
before_mlp: False
config:
name: 3s_vs_5z2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
max_epochs: 20000
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,093 | YAML | 17.542373 | 32 | 0.573651 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.