file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/models.py | import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
from rl_games.algos_tf14 import networks
tfd = tfp.distributions
def entry_stop_gradients(target, mask):
mask_h = tf.abs(mask-1)
return tf.stop_gradient(mask_h * target) + mask * target
class BaseModel(object):
def is_rnn(self):
return False
class ModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=False, is_train=is_train,reuse=reuse)
#if action_mask_ph is not None:
#masks = tf.layers.dense(tf.to_float(action_mask_ph), actions_num, activation=tf.nn.elu)
#logits = masks + logits
#logits = entry_stop_gradients(logits, tf.to_float(action_mask_ph))
probs = tf.nn.softmax(logits)
# Gumbel Softmax
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=probs)
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.stop_gradient(one_hot_actions))
return neglogp, value, action, entropy, logits
else:
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mu, sigma, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train = is_train, reuse=reuse)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mean, logstd, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train=True, reuse=reuse)
std = tf.exp(logstd)
norm_dist = tfd.Normal(mean, std)
action = mean + std * tf.random_normal(tf.shape(mean))
#action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph is None:
neglogp = self.neglogp(action, mean, std, logstd)
return neglogp, value, action, entropy, mean, std
prev_neglogp = self.neglogp(prev_actions_ph, mean, std, logstd)
return prev_neglogp, value, action, entropy, mean, std
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
class LSTMModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, logstd, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
std = tf.exp(logstd)
action = mu + std * tf.random_normal(tf.shape(mu))
norm_dist = tfd.Normal(mu, std)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, var, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
sigma = tf.sqrt(var)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=False, is_train=is_train, reuse=reuse)
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.nn.softmax(logits))
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=one_hot_actions)
return neglogp, value, action, entropy, states_ph, masks_ph, lstm_state, initial_state, logits
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy, states_ph, masks_ph, lstm_state, initial_state
class AtariDQN(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
'''
TODO: fix is_train
'''
is_train = name == 'agent'
return self.network(name=name, inputs=inputs, actions_num=actions_num, is_train=is_train, reuse=reuse)
| 10,090 | Python | 40.356557 | 167 | 0.599405 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/model_builder.py | from rl_games.common import object_factory
import rl_games.algos_tf14
from rl_games.algos_tf14 import network_builder
from rl_games.algos_tf14 import models
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs : models.ModelA2C(network))
self.model_factory.register_builder('discrete_a2c_lstm', lambda network, **kwargs : models.LSTMModelA2C(network))
self.model_factory.register_builder('continuous_a2c', lambda network, **kwargs : models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd', lambda network, **kwargs : models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('continuous_a2c_lstm', lambda network, **kwargs : models.LSTMModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_lstm_logstd', lambda network, **kwargs : models.LSTMModelA2CContinuousLogStd(network))
self.model_factory.register_builder('dqn', lambda network, **kwargs : models.AtariDQN(network))
self.network_factory = object_factory.ObjectFactory()
self.network_factory.register_builder('actor_critic', lambda **kwargs : network_builder.A2CBuilder())
self.network_factory.register_builder('dqn', lambda **kwargs : network_builder.DQNBuilder())
def load(self, params):
self.model_name = params['model']['name']
self.network_name = params['network']['name']
network = self.network_factory.create(self.network_name)
network.load(params['network'])
model = self.model_factory.create(self.model_name, network=network)
return model
| 1,761 | Python | 49.342856 | 146 | 0.721181 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/network_builder.py | import tensorflow as tf
import numpy as np
from rl_games.algos_tf14 import networks
from rl_games.common import object_factory
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class NetworkBuilder:
def __init__(self, **kwargs):
self.activations_factory = object_factory.ObjectFactory()
self.activations_factory.register_builder('relu', lambda **kwargs : tf.nn.relu)
self.activations_factory.register_builder('tanh', lambda **kwargs : tf.nn.tanh)
self.activations_factory.register_builder('sigmoid', lambda **kwargs : tf.nn.sigmoid)
self.activations_factory.register_builder('elu', lambda **kwargs : tf.nn.elu)
self.activations_factory.register_builder('selu', lambda **kwargs : tf.nn.selu)
self.activations_factory.register_builder('softplus', lambda **kwargs : tf.nn.softplus)
self.activations_factory.register_builder('None', lambda **kwargs : None)
self.init_factory = object_factory.ObjectFactory()
self.init_factory.register_builder('normc_initializer', lambda **kwargs : normc_initializer(**kwargs))
self.init_factory.register_builder('const_initializer', lambda **kwargs : tf.constant_initializer(**kwargs))
self.init_factory.register_builder('orthogonal_initializer', lambda **kwargs : tf.orthogonal_initializer(**kwargs))
self.init_factory.register_builder('glorot_normal_initializer', lambda **kwargs : tf.glorot_normal_initializer(**kwargs))
self.init_factory.register_builder('glorot_uniform_initializer', lambda **kwargs : tf.glorot_uniform_initializer(**kwargs))
self.init_factory.register_builder('variance_scaling_initializer', lambda **kwargs : tf.variance_scaling_initializer(**kwargs))
self.init_factory.register_builder('random_uniform_initializer', lambda **kwargs : tf.random_uniform_initializer(**kwargs))
self.init_factory.register_builder('None', lambda **kwargs : None)
self.regularizer_factory = object_factory.ObjectFactory()
self.regularizer_factory.register_builder('l1_regularizer', lambda **kwargs : tf.contrib.layers.l1_regularizer(**kwargs))
self.regularizer_factory.register_builder('l2_regularizer', lambda **kwargs : tf.contrib.layers.l2_regularizer(**kwargs))
self.regularizer_factory.register_builder('l1l2_regularizer', lambda **kwargs : tf.contrib.layers.l1l2_regularizer(**kwargs))
self.regularizer_factory.register_builder('None', lambda **kwargs : None)
def load(self, params):
pass
def build(self, name, **kwargs):
pass
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
def _noisy_dense(self, inputs, units, activation, kernel_initializer, kernel_regularizer, name):
return networks.noisy_dense(inputs, units, name, True, activation)
def _build_mlp(self,
name,
input,
units,
activation,
initializer,
regularizer,
norm_func_name = None,
dense_func = tf.layers.dense,
is_train=True):
out = input
ind = 0
for unit in units:
ind += 1
out = dense_func(out, units=unit,
activation=self.activations_factory.create(activation),
kernel_initializer = self.init_factory.create(**initializer),
kernel_regularizer = self.regularizer_factory.create(**regularizer),
#bias_initializer=tf.random_uniform_initializer(-0.1, 0.1),
name=name + str(ind))
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, training=is_train)
return out
def _build_lstm(self, name, input, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.float32, [batch_num])
states_ph = tf.placeholder(tf.float32, [games_num, 2*units])
lstm_out, lstm_state, initial_state = networks.openai_lstm(name, input, dones_ph=dones_ph, states_ph=states_ph, units=units, env_num=games_num, batch_num=batch_num)
return lstm_out, lstm_state, initial_state, dones_ph, states_ph
def _build_lstm2(self, name, inputs, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [games_num, 2*units])
hidden = tf.concat((inputs[0], inputs[1]), axis=1)
lstm_out, lstm_state, initial_state = networks.openai_lstm(name, hidden, dones_ph=dones_ph, states_ph=states_ph, units=units, env_num=games_num, batch_num=batch_num)
#lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
return lstm_out, lstm_state, initial_state, dones_ph, states_ph
def _build_lstm_sep(self, name, inputs, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.bool, [batch_num], name='lstm_masks')
states_ph = tf.placeholder(tf.float32, [games_num, 4*units], name='lstm_states')
statesa, statesc = tf.split(states_ph, 2, axis=1)
a_out, lstm_statea, initial_statea = networks.openai_lstm(name +'a', inputs[0], dones_ph=dones_ph, states_ph=statesa, units=units, env_num=games_num, batch_num=batch_num)
c_out, lstm_statec, initial_statec = networks.openai_lstm(name + 'c', inputs[1], dones_ph=dones_ph, states_ph=statesc, units=units, env_num=games_num, batch_num=batch_num)
lstm_state = tf.concat([lstm_statea, lstm_statec], axis=1)
initial_state = np.concatenate([initial_statea, initial_statec], axis=1)
#lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
return a_out, c_out, lstm_state, initial_state, dones_ph, states_ph
def _build_conv(self, ctype, **kwargs):
print('conv_name:', ctype)
if ctype == 'conv2d':
return self._build_cnn(**kwargs)
if ctype == 'conv1d':
return self._build_cnn1d(**kwargs)
def _build_cnn(self, name, input, convs, activation, initializer, regularizer, norm_func_name=None, is_train=True):
out = input
ind = 0
for conv in convs:
print(out.shape.as_list())
ind += 1
config = conv.copy()
config['filters'] = conv['filters']
config['padding'] = conv['padding']
config['kernel_size'] = [conv['kernel_size']] * 2
config['strides'] = [conv['strides']] * 2
config['activation'] = self.activations_factory.create(activation)
config['kernel_initializer'] = self.init_factory.create(**initializer)
config['kernel_regularizer'] = self.regularizer_factory.create(**regularizer)
config['name'] = name + str(ind)
out = tf.layers.conv2d(inputs=out, **config)
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, name='bn_'+ config['name'], training=is_train)
return out
def _build_cnn1d(self, name, input, convs, activation, initializer, regularizer, norm_func_name=None, is_train=True):
out = input
ind = 0
print('_build_cnn1d')
for conv in convs:
ind += 1
config = conv.copy()
config['activation'] = self.activations_factory.create(activation)
config['kernel_initializer'] = self.init_factory.create(**initializer)
config['kernel_regularizer'] = self.regularizer_factory.create(**regularizer)
config['name'] = name + str(ind)
#config['bias_initializer'] = tf.random_uniform_initializer,
# bias_initializer=tf.random_uniform_initializer(-0.1, 0.1)
out = tf.layers.conv1d(inputs=out, **config)
print('shapes of layer_' + str(ind), str(out.get_shape().as_list()))
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, training=is_train)
return out
class A2CBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_lstm = 'lstm' in params
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
if self.has_lstm:
self.lstm_units = params['lstm']['units']
self.concated = params['lstm']['concated']
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
batch_num = kwargs.pop('batch_num', 1)
games_num = kwargs.pop('games_num', 1)
is_train = kwargs.pop('is_train', True)
with tf.variable_scope(name, reuse=reuse):
actor_input = critic_input = input
if self.has_cnn:
cnn_args = {
'name' :'actor_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
actor_input = self._build_conv(**cnn_args)
actor_input = tf.contrib.layers.flatten(actor_input)
critic_input = actor_input
if self.separate:
cnn_args['name'] = 'critic_cnn'
critic_input = self._build_conv( **cnn_args)
critic_input = tf.contrib.layers.flatten(critic_input)
mlp_args = {
'name' :'actor_fc',
'input' : actor_input,
'units' :self.units,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out_actor = self._build_mlp(**mlp_args)
if self.separate:
mlp_args['name'] = 'critic_fc'
mlp_args['input'] = critic_input
out_critic = self._build_mlp(**mlp_args)
if self.has_lstm:
if self.concated:
out_actor, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm2('lstm', [out_actor, out_critic], self.lstm_units, batch_num, games_num)
out_critic = out_actor
else:
out_actor, out_critic, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm_sep('lstm_', [out_actor, out_critic], self.lstm_units, batch_num, games_num)
else:
if self.has_lstm:
out_actor, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm('lstm', out_actor, self.lstm_units, batch_num, games_num)
out_critic = out_actor
value = tf.layers.dense(out_critic, units = 1, kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.value_activation), name='value')
if self.is_continuous:
mu = tf.layers.dense(out_actor, units = actions_num, activation=self.activations_factory.create(self.space_config['mu_activation']),
kernel_initializer = self.init_factory.create(**self.space_config['mu_init']), name='mu')
if self.space_config['fixed_sigma']:
sigma_out = tf.get_variable(name='sigma_out', shape=(actions_num), initializer=self.init_factory.create(**self.space_config['sigma_init']), trainable=True)
else:
sigma_out = tf.layers.dense(out_actor, units = actions_num, kernel_initializer=self.init_factory.create(**self.space_config['sigma_init']), activation=self.activations_factory.create(self.space_config['sigma_activation']), name='sigma_out')
if self.has_lstm:
return mu, mu * 0 + sigma_out, value, states_ph, dones_ph, lstm_state, initial_state
return mu, mu * 0 + sigma_out, value
if self.is_discrete:
logits = tf.layers.dense(inputs=out_actor, units=actions_num, name='logits', kernel_initializer = self.init_factory.create(**self.initializer))
if self.has_lstm:
return logits, value, states_ph, dones_ph, lstm_state, initial_state
return logits, value
class DQNBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_dueling = params['dueling']
self.atoms = params['atoms']
self.is_noisy = params['noisy']
self.normalization = params.get('normalization', None)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
is_train = kwargs.pop('is_train', True)
if self.is_noisy:
dense_layer = self._noisy_dense
else:
dense_layer = tf.layers.dense
with tf.variable_scope(name, reuse=reuse):
out = input
if self.has_cnn:
cnn_args = {
'name' :'dqn_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out = self._build_conv(**cnn_args)
out = tf.contrib.layers.flatten(out)
mlp_args = {
'name' :'dqn_mlp',
'input' : out,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train,
'dense_func' : dense_layer
}
if self.is_dueling:
if len(self.units) > 1:
mlp_args['units'] = self.units[:-1]
out = self._build_mlp(**mlp_args)
hidden_value = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_val')
hidden_advantage = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_adv')
value = dense_layer(inputs=hidden_value, units=self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), activation=tf.identity, kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='value')
advantage = dense_layer(inputs=hidden_advantage, units= actions_num * self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='advantage')
advantage = tf.reshape(advantage, shape = [-1, actions_num, self.atoms])
value = tf.reshape(value, shape = [-1, 1, self.atoms])
q_values = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
else:
mlp_args['units'] = self.units
out = self._build_mlp('dqn_mlp', out, self.units, self.activation, self.initializer, self.regularizer)
q_values = dense_layer(inputs=out, units=actions_num *self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='q_vals')
q_values = tf.reshape(q_values, shape = [-1, actions_num, self.atoms])
if self.atoms == 1:
return tf.squeeze(q_values)
else:
return q_values
| 18,263 | Python | 51.034188 | 301 | 0.592345 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/a2c_continuous.py | from rl_games.common import tr_helpers, vecenv
from rl_games.algos_tf14 import networks
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque, OrderedDict
from tensorboardX import SummaryWriter
import gym
import ray
from datetime import datetime
def swap_and_flatten01(arr):
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
#(-1, 1) -> (low, high)
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
#(horizon_length, actions_num)
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = np.log(p0_sigma/p1_sigma + 1e-5)
c2 = (np.square(p0_sigma) + np.square(p1_mu - p0_mu))/(2.0 *(np.square(p1_sigma) + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = np.mean(np.sum(kl, axis = -1)) # returning mean between all steps of sum between all actions
return kl
def policy_kl_tf(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = tf.log(p1_sigma/p0_sigma + 1e-5)
c2 = (tf.square(p0_sigma) + tf.square(p1_mu - p0_mu))/(2.0 * (tf.square(p1_sigma) + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = tf.reduce_mean(tf.reduce_sum(kl, axis=-1)) # returning mean between all steps of sum between all actions
return kl
class A2CAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
self.name = base_name
self.actions_low = action_space.low
self.actions_high = action_space.high
self.env_name = config['env_name']
self.ppo = config['ppo']
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_actors = config['num_actors']
self.env_config = config.get('env_config', {})
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.num_agents = self.vec_env.get_number_of_agents()
self.horizon_length = config['horizon_length']
self.normalize_advantage = config['normalize_advantage']
self.config = config
self.state_shape = observation_space.shape
self.critic_coef = config['critic_coef']
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.sess = sess
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.normalize_input = self.config['normalize_input']
self.seq_len = self.config['seq_length']
self.dones = np.asarray([False]*self.num_actors, dtype=np.bool)
self.current_rewards = np.asarray([0]*self.num_actors, dtype=np.float32)
self.current_lengths = np.asarray([0]*self.num_actors, dtype=np.float32)
self.game_rewards = deque([], maxlen=100)
self.game_lengths = deque([], maxlen=100)
self.obs_ph = tf.placeholder('float32', (None, ) + self.state_shape, name = 'obs')
self.target_obs_ph = tf.placeholder('float32', (None, ) + self.state_shape, name = 'target_obs')
self.actions_num = action_space.shape[0]
self.actions_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'actions')
self.old_mu_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'old_mu_ph')
self.old_sigma_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'old_sigma_ph')
self.old_neglogp_actions_ph = tf.placeholder('float32', (None, ), name = 'old_logpactions')
self.rewards_ph = tf.placeholder('float32', (None,), name = 'rewards')
self.old_values_ph = tf.placeholder('float32', (None,), name = 'old_values')
self.advantages_ph = tf.placeholder('float32', (None,), name = 'advantages')
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.epoch_num = tf.Variable(tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
self.bounds_loss_coef = config.get('bounds_loss_coef', None)
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=config['max_epochs'], end_learning_rate=0.001, power=config.get('decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=config['max_epochs'], decay_rate = config['decay_rate'])
self.input_obs = self.obs_ph
self.input_target_obs = self.target_obs_ph
if observation_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_target_obs = tf.to_float(self.input_target_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = observation_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=True)
self.input_target_obs = self.moving_mean_std.normalize(self.input_target_obs, train=False)
games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.train_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.config['minibatch_size'],
'games_num' : games_num,
'actions_num' : self.actions_num,
'prev_actions_ph' : self.actions_ph,
}
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_target_obs,
'batch_num' : self.num_actors,
'games_num' : self.num_actors,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
}
self.states = None
if self.network.is_rnn():
self.neglogp_actions ,self.state_values, self.action, self.entropy, self.mu, self.sigma, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_mu, self.target_sigma, self.target_states_ph, self.target_masks_ph, self.target_lstm_state, self.target_initial_state = self.network(self.run_dict, reuse=True)
self.states = self.target_initial_state
else:
self.neglogp_actions ,self.state_values, self.action, self.entropy, self.mu, self.sigma = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_mu, self.target_sigma = self.network(self.run_dict, reuse=True)
curr_e_clip = self.e_clip * self.lr_multiplier
if (self.ppo):
self.prob_ratio = tf.exp(self.old_neglogp_actions_ph - self.neglogp_actions)
self.prob_ratio = tf.clip_by_value(self.prob_ratio, 0.0, 16.0)
self.pg_loss_unclipped = -tf.multiply(self.advantages_ph, self.prob_ratio)
self.pg_loss_clipped = -tf.multiply(self.advantages_ph, tf.clip_by_value(self.prob_ratio, 1.- curr_e_clip, 1.+ curr_e_clip))
self.actor_loss = tf.reduce_mean(tf.maximum(self.pg_loss_unclipped, self.pg_loss_clipped))
else:
self.actor_loss = tf.reduce_mean(self.neglogp_actions * self.advantages_ph)
self.c_loss = (tf.squeeze(self.state_values) - self.rewards_ph)**2
if self.clip_value:
self.cliped_values = self.old_values_ph + tf.clip_by_value(tf.squeeze(self.state_values) - self.old_values_ph, -curr_e_clip, curr_e_clip)
self.c_loss_clipped = tf.square(self.cliped_values - self.rewards_ph)
self.critic_loss = tf.reduce_mean(tf.maximum(self.c_loss, self.c_loss_clipped))
else:
self.critic_loss = tf.reduce_mean(self.c_loss)
self._calc_kl_dist()
self.loss = self.actor_loss + 0.5 * self.critic_coef * self.critic_loss - self.config['entropy_coef'] * self.entropy
self._apply_bound_loss()
self.reg_loss = tf.losses.get_regularization_loss()
self.loss += self.reg_loss
self.train_step = tf.train.AdamOptimizer(self.current_lr * self.lr_multiplier)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
grads = tf.gradients(self.loss, self.weights)
if self.config['truncate_grads']:
grads, _ = tf.clip_by_global_norm(grads, self.grad_norm)
grads = list(zip(grads, self.weights))
self.train_op = self.train_step.apply_gradients(grads)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def _calc_kl_dist(self):
self.kl_dist = policy_kl_tf(self.mu, self.sigma, self.old_mu_ph, self.old_sigma_ph)
if self.is_adaptive_lr:
self.current_lr = tf.where(self.kl_dist > (2.0 * self.kl_threshold), tf.maximum(self.current_lr / 1.5, 1e-6), self.current_lr)
self.current_lr = tf.where(self.kl_dist < (0.5 * self.kl_threshold), tf.minimum(self.current_lr * 1.5, 1e-2), self.current_lr)
def _apply_bound_loss(self):
if self.bounds_loss_coef:
soft_bound = 1.1
mu_loss_high = tf.square(tf.maximum(0.0, self.mu - soft_bound))
mu_loss_low = tf.square(tf.maximum(0.0, -soft_bound - self.mu))
self.bounds_loss = tf.reduce_sum(mu_loss_high + mu_loss_low, axis=1)
self.loss += self.bounds_loss * self.bounds_loss_coef
else:
self.bounds_loss = None
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def get_action_values(self, obs):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp, self.target_mu, self.target_sigma]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.target_obs_ph : obs}), None)
def get_values(self, obs):
if self.network.is_rnn():
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs})
def play_steps(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs, mb_mus, mb_sigmas = [],[],[],[],[],[],[],[]
mb_states = []
epinfos = []
# For n in range number of steps
for _ in range(self.horizon_length):
if self.network.is_rnn():
mb_states.append(self.states)
actions, values, neglogpacs, mu, sigma, self.states = self.get_action_values(self.obs)
#actions = np.squeeze(actions)
values = np.squeeze(values)
neglogpacs = np.squeeze(neglogpacs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones.copy())
mb_mus.append(mu)
mb_sigmas.append(sigma)
self.obs[:], rewards, self.dones, infos = self.vec_env.step(rescale_actions(self.actions_low, self.actions_high, np.clip(actions, -1.0, 1.0)))
self.current_rewards += rewards
self.current_lengths += 1
for reward, length, done in zip(self.current_rewards, self.current_lengths, self.dones):
if done:
self.game_rewards.append(reward)
self.game_lengths.append(length)
shaped_rewards = self.rewards_shaper(rewards)
epinfos.append(infos)
mb_rewards.append(shaped_rewards)
self.current_rewards = self.current_rewards * (1.0 - self.dones)
self.current_lengths = self.current_lengths * (1.0 - self.dones)
#using openai baseline approach
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_mus = np.asarray(mb_mus, dtype=np.float32)
mb_sigmas = np.asarray(mb_sigmas, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_states = np.asarray(mb_states, dtype=np.float32)
last_values = self.get_values(self.obs)
last_values = np.squeeze(last_values)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
if self.network.is_rnn():
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_mus, mb_sigmas, mb_states )), epinfos)
else:
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_mus, mb_sigmas)), None, epinfos)
return result
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def train(self):
max_epochs = self.config.get('max_epochs', 1e6)
self.obs = self.vec_env.reset()
batch_size = self.horizon_length * self.num_actors * self.num_agents
minibatch_size = self.config['minibatch_size']
mini_epochs_num = self.config['mini_epochs']
num_minibatches = batch_size // minibatch_size
last_lr = self.config['learning_rate']
self.last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
while True:
play_time_start = time.time()
epoch_num = self.update_epoch()
frame += batch_size
obses, returns, dones, actions, values, neglogpacs, mus, sigmas, lstm_states, _ = self.play_steps()
advantages = returns - values
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
a_losses = []
c_losses = []
b_losses = []
entropies = []
kls = []
play_time_end = time.time()
play_time = play_time_end - play_time_start
update_time_start = time.time()
if self.network.is_rnn():
total_games = batch_size // self.seq_len
num_games_batch = minibatch_size // self.seq_len
game_indexes = np.arange(total_games)
flat_indexes = np.arange(total_games * self.seq_len).reshape(total_games, self.seq_len)
lstm_states = lstm_states[::self.seq_len]
for _ in range(0, mini_epochs_num):
np.random.shuffle(game_indexes)
for i in range(0, num_minibatches):
batch = range(i * num_games_batch, (i + 1) * num_games_batch)
mb_indexes = game_indexes[batch]
mbatch = flat_indexes[mb_indexes].ravel()
dict = {}
dict[self.old_values_ph] = values[mbatch]
dict[self.old_neglogp_actions_ph] = neglogpacs[mbatch]
dict[self.advantages_ph] = advantages[mbatch]
dict[self.rewards_ph] = returns[mbatch]
dict[self.actions_ph] = actions[mbatch]
dict[self.obs_ph] = obses[mbatch]
dict[self.old_mu_ph] = mus[mbatch]
dict[self.old_sigma_ph] = sigmas[mbatch]
dict[self.masks_ph] = dones[mbatch]
dict[self.states_ph] = lstm_states[mb_indexes]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_dist, self.current_lr, self.mu, self.sigma, self.lr_multiplier]
if self.bounds_loss is not None:
run_ops.append(self.bounds_loss)
run_ops.append(self.train_op)
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
res_dict = self.sess.run(run_ops, dict)
a_loss = res_dict[0]
c_loss = res_dict[1]
entropy = res_dict[2]
kl = res_dict[3]
last_lr = res_dict[4]
cmu = res_dict[5]
csigma = res_dict[6]
lr_mul = res_dict[7]
if self.bounds_loss is not None:
b_loss = res_dict[8]
b_losses.append(b_loss)
mus[mbatch] = cmu
sigmas[mbatch] = csigma
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
else:
for _ in range(0, mini_epochs_num):
permutation = np.random.permutation(batch_size)
obses = obses[permutation]
returns = returns[permutation]
actions = actions[permutation]
values = values[permutation]
neglogpacs = neglogpacs[permutation]
advantages = advantages[permutation]
mus = mus[permutation]
sigmas = sigmas[permutation]
for i in range(0, num_minibatches):
batch = range(i * minibatch_size, (i + 1) * minibatch_size)
dict = {self.obs_ph: obses[batch], self.actions_ph : actions[batch], self.rewards_ph : returns[batch],
self.advantages_ph : advantages[batch], self.old_neglogp_actions_ph : neglogpacs[batch], self.old_values_ph : values[batch]}
dict[self.old_mu_ph] = mus[batch]
dict[self.old_sigma_ph] = sigmas[batch]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_dist, self.current_lr, self.mu, self.sigma, self.lr_multiplier]
if self.bounds_loss is not None:
run_ops.append(self.bounds_loss)
run_ops.append(self.train_op)
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
res_dict = self.sess.run(run_ops, dict)
a_loss = res_dict[0]
c_loss = res_dict[1]
entropy = res_dict[2]
kl = res_dict[3]
last_lr = res_dict[4]
cmu = res_dict[5]
csigma = res_dict[6]
lr_mul = res_dict[7]
if self.bounds_loss is not None:
b_loss = res_dict[8]
b_losses.append(b_loss)
mus[batch] = cmu
sigmas[batch] = csigma
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
update_time_end = time.time()
update_time = update_time_end - update_time_start
sum_time = update_time + play_time
total_time = update_time_end - start_time
if self.rank == 0:
scaled_time = sum_time # self.num_agents *
scaled_play_time = play_time # self.num_agents *
if self.print_stats:
fps_step = batch_size / scaled_play_time
fps_total = batch_size / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
# performance
self.writer.add_scalar('performance/total_fps', batch_size / sum_time, frame)
self.writer.add_scalar('performance/step_fps', batch_size / play_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('performance/update_time', update_time, frame)
# losses
self.writer.add_scalar('losses/a_loss', np.mean(a_losses), frame)
self.writer.add_scalar('losses/c_loss', np.mean(c_losses), frame)
if len(b_losses) > 0:
self.writer.add_scalar('losses/bounds_loss', np.mean(b_losses), frame)
self.writer.add_scalar('losses/entropy', np.mean(entropies), frame)
# info
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', np.mean(kls), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
if len(self.game_rewards) > 0:
mean_rewards = np.mean(self.game_rewards)
mean_lengths = np.mean(self.game_lengths)
self.writer.add_scalar('rewards/frame', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/frame', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.name)
if self.last_mean_rewards > self.config['score_to_win']:
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
if epoch_num > max_epochs:
print('MAX EPOCHS NUM!')
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
update_time = 0
| 24,499 | Python | 48.295775 | 253 | 0.561982 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_pendulum_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: glorot_normal_initializer
gain: 0.01
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 32]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 2
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-3
name: pendulum
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: Pendulum-v0
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.016
normalize_input: False
bounds_loss_coef: 0
| 1,266 | YAML | 18.796875 | 41 | 0.559242 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_lunar.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: glorot_normal_initializer
#scal: 0.01
sigma_init:
name: const_initializer
value: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: relu
initializer:
name: glorot_normal_initializer
#gain: 2
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: LunarLanderContinuous-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0
| 1,271 | YAML | 18.875 | 41 | 0.558615 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_cartpole_masked_velocity_rnn.yaml |
#Cartpole without velocities lstm test
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [64, 64]
activation: relu
normalization: 'layer_norm'
norm_only_first_layer: True
initializer:
name: default
regularizer:
name: None
rnn:
name: 'lstm'
units: 64
layers: 1
before_mlp: False
concat_input: True
layer_norm: True
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: cartpole_vel_info
score_to_win: 500
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: CartPoleMaskedVelocity-v1
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 4 | 1,117 | YAML | 17.327869 | 39 | 0.598926 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppg_walker.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_ppg
score_to_win: 290
grad_norm: 0.5
entropy_coef: 0 #-0.005
truncate_grads: False
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 1
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.004
normalize_input: False
bounds_loss_coef: 0.0005
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 1024
mini_epochs: 6
player:
render: True
determenistic: True
games_num: 200
| 1,536 | YAML | 20.347222 | 56 | 0.558594 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_continuous.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: openai_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: BipedalWalkerHardcore-v3
| 1,271 | YAML | 18.272727 | 39 | 0.552321 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_humanoid_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [256,128,64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: 'nn/humanoid_torch.pth'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'humanoid_torch'
score_to_win : 20000
grad_norm : 0.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexHumanoid
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 32
minibatch_size : 4096
mini_epochs : 4
critic_coef : 1
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : False
normalize_value : True
bounds_loss_coef: 0.000
max_epochs: 12000 | 1,468 | YAML | 18.851351 | 39 | 0.547684 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_reacher.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128]
activation: relu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn1:
name: lstm
units: 64
layers: 1
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: ReacherPyBulletEnv-v0
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: none
kl_threshold: 0.008
normalize_input: True
seq_length: 16
bounds_loss_coef: 0.00
max_epochs: 10000
weight_decay: 0.0001
player:
render: True
games_num: 200
experiment_config1:
start_exp: 0
start_sub_exp: 0
experiments:
- exp:
- path: config.bounds_loss_coef
value: [0.5]
| 1,593 | YAML | 18.925 | 56 | 0.549278 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_walker.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 256
mini_epochs: 4
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.005
normalize_input: True
bounds_loss_coef: 0.00
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
player:
render: True
determenistic: True
games_num: 200
| 1,408 | YAML | 19.720588 | 56 | 0.555398 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_pendulum.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.01
sigma_init:
name: const_initializer
value: 0
fixed_sigma: False
mlp:
units: [32, 32]
activation: elu
initializer:
name: default
scale: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: Pendulum-v0
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0
| 1,223 | YAML | 18.125 | 39 | 0.546198 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_revenge_rnd.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
value_shape: 2
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
config:
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.999
tau: 0.9
learning_rate: 1e-4
name: atari
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.002
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.1
clip_value: True
num_actors: 32
horizon_length: 512
minibatch_size: 4096
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
env_config:
name: MontezumaRevengeNoFrameskip-v4
rnd_config:
scale_value: 1.0
episodic: True
episode_length: 256
gamma: 0.99
mini_epochs: 2
minibatch_size: 1024
learning_rate: 1e-4
network:
name: rnd_curiosity
cnn:
type: conv2d
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnd:
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
net:
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
rnd:
units: [512,512, 512]
net:
units: [512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
scale: 2 | 3,072 | YAML | 21.762963 | 42 | 0.427083 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_humanoid_torch_rnn.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
normalization: 'layer_norm'
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.01
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [256,128]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: lstm
units: 64
layers: 1
before_mlp: False
load_checkpoint: True
load_path: 'nn/humanoid_torch_rnn.pth'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 8e-4
name : 'humanoid_torch_rnn'
score_to_win : 20000
grad_norm : 5
entropy_coef : 0
truncate_grads : True
env_name : FlexHumanoid
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 256
minibatch_size : 8192
mini_epochs : 4
critic_coef : 1
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length: 16
bounds_loss_coef: 0.000
weight_decay: 0.001
max_epochs: 6000 | 1,608 | YAML | 18.621951 | 40 | 0.54291 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_cartpole.yaml |
#Cartpole MLP
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [32, 32]
activation: relu
initializer:
name: default
regularizer:
name: None
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: cartpole_vel_info
score_to_win: 500
grad_norm: 1.0
entropy_coef: 0.01
truncate_grads: True
env_name: CartPole-v1
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 32
minibatch_size: 64
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
device: 'cuda:0' | 878 | YAML | 16.235294 | 29 | 0.592255 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_ant_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [128, 64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 16
minibatch_size : 4096
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
normalize_value : True
bounds_loss_coef: 0.0001
| 1,425 | YAML | 18.27027 | 39 | 0.53614 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_continuous_lstm.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_lstm_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: normc_initializer
std: 0.01
sigma_init:
name: const_initializer
value: 0.0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: relu
initializer:
name: normc_initializer
std: 1
regularizer:
name: 'None'
lstm:
units: 128
concated: False
load_checkpoint: False
load_path: 'nn/runBipedalWalkerHardcore-v2'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: walker_lstm
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.000
truncate_grads: True
env_name: BipedalWalkerHardcore-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.5
max_epochs: 5000
| 1,334 | YAML | 19.227272 | 45 | 0.561469 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/carracing_ppo.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
load_checkpoint: False
load_path: 'nn/runCarRacing-v0'
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: racing
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.000
truncate_grads: True
env_name: CarRacing-v0
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: True
#lr_schedule: adaptive
# kl_threshold: 0.008
bounds_loss_coef: 0.001
# max_epochs: 5000
player:
render: True
deterministic: True | 1,684 | YAML | 18.593023 | 33 | 0.541568 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppg_walker_hardcore.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
load_checkpoint: True
load_path: './nn/walker_hc_ppg.pth'
config:
reward_shaper:
#min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_hc_ppg
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0 #-0.005
truncate_grads: False
env_name: BipedalWalkerHardcore-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 4096
minibatch_size: 8192
mini_epochs: 1
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.004
normalize_input: False
bounds_loss_coef: 0.0005
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 1024
mini_epochs: 6
player:
render: True
determenistic: True
games_num: 200
| 1,510 | YAML | 20.28169 | 41 | 0.559603 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/rainbow_dqn_breakout.yaml | params:
algo:
name: dqn
model:
name: dqn
load_checkpoint: False
load_path: 'nn/breakoutep=3638750.0rew=201.75'
network:
name: dqn
dueling: True
atoms: 51
noisy: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 'valid'
- filters: 64
kernel_size: 4
strides: 2
padding: 'valid'
- filters: 64
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
gamma : 0.99
learning_rate : 0.0001
steps_per_epoch : 4
batch_size : 32
epsilon : 0.00
min_epsilon : 0.00
epsilon_decay_frames : 1000000
num_epochs_to_copy : 10000
name : 'breakout'
env_name: BreakoutNoFrameskip-v4
is_double : True
score_to_win : 600
num_steps_fill_buffer : 100000
replay_buffer_type : 'prioritized'
replay_buffer_size : 1000000
priority_beta : 0.4
priority_alpha : 0.6
beta_decay_frames : 1000000
max_beta : 1
horizon_length : 3
episodes_to_log : 100
lives_reward : 5
atoms_num : 51
v_min : -10
v_max : 10
games_to_track : 100
lr_schedule : None
max_epochs: 10000000
| 1,525 | YAML | 19.346666 | 48 | 0.550164 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_smac.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 6h_vs_8z
frames: 2
random_invalid_step: False | 979 | YAML | 17.148148 | 32 | 0.581205 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_multiwalker.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128, 64]
d2rl: False
activation: relu
initializer:
name: default
load_checkpoint: False
load_path: './nn/multiwalker.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 1e-4
name: multiwalker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: multiwalker_env
ppo: True
e_clip: 0.2
use_experimental_cv: False
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 3072 #768 #3072 #1536
mini_epochs: 4
critic_coef: 1
schedule_type: 'standard'
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
normalize_value: True
bounds_loss_coef: 0.0001
max_epochs: 10000
weight_decay: 0.0000
player:
render: True
games_num: 200
env_config:
central_value: True
use_prev_actions: True
apply_agent_ids: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 3e-4
clip_value: False
normalize_input: True
truncate_grads: False
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256, 128]
activation: elu
initializer:
name: default | 1,881 | YAML | 20.632184 | 43 | 0.549176 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_walker_hardcore.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128, 64]
d2rl: False
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
load_checkpoint: False
load_path: './nn/walker_hc.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_hc
score_to_win: 300
grad_norm: 1.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalkerHardcore-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 4096
minibatch_size: 8192
mini_epochs: 4
critic_coef: 1
schedule_type: 'standard'
lr_schedule: 'adaptive' #None #
kl_threshold: 0.008
normalize_input: True
seq_length: 4
bounds_loss_coef: 0.00
max_epochs: 100000
weight_decay: 0
player:
render: False
games_num: 200
determenistic: True
| 1,420 | YAML | 19.897059 | 41 | 0.554225 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_ant_torch_rnn.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
normalization: 'layer_norm'
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: False
mlp:
units: [128]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: 'lstm'
units: 64
layers: 1
before_mlp: False
load_checkpoint: False
load_path: 'nn/ant_torch.pth'
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch_rnn'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0
weight_decay: 0.001
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 256
minibatch_size : 8192
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length : 32
bounds_loss_coef: 0.000
| 1,580 | YAML | 18.280488 | 39 | 0.533544 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_smac_cnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/5m_vs_6m2smac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 5m_vs_6m2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 5m_vs_6m
frames: 4
transpose: True
random_invalid_step: False | 1,512 | YAML | 18.64935 | 35 | 0.547619 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_ant_torch_rnn_copy.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: 'lstm'
units: 128
layers: 1
before_mlp: True
load_checkpoint: False
load_path: 'nn/ant_torch.pth'
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 128
minibatch_size : 4096
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length : 16
bounds_loss_coef: 0.0
| 1,509 | YAML | 18.113924 | 39 | 0.530152 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dqn.yaml | params:
algo:
name: dqn
model:
name: dqn
load_checkpoint: False
load_path: path
network:
name: dqn
dueling: True
atoms: 1
noisy: False
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 'valid'
- filters: 64
kernel_size: 4
strides: 2
padding: 'valid'
- filters: 64
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 0.1
gamma : 0.99
learning_rate : 0.0005
steps_per_epoch : 4
batch_size : 128
epsilon : 0.90
min_epsilon : 0.02
epsilon_decay_frames : 100000
num_epochs_to_copy : 10000
name : 'pong_dddqn_config1'
env_name: PongNoFrameskip-v4
is_double : True
score_to_win : 20.9
num_steps_fill_buffer : 10000
replay_buffer_type : 'normal'
replay_buffer_size : 100000
priority_beta : 0.4
priority_alpha : 0.6
beta_decay_frames : 100000
max_beta : 1
horizon_length : 3
episodes_to_log : 10
lives_reward : 1
atoms_num : 1
games_to_track : 20
lr_schedule : polynom_decay
max_epochs: 100000
experiment_config:
start_exp: 0
start_sub_exp: 3
experiments:
# - exp:
# - path: config.learning_rate
# value: [0.0005, 0.0002]
- exp:
- path: network.initializer
value:
- name: variance_scaling_initializer
scale: 2
- name: glorot_normal_initializer
- name: glorot_uniform_initializer
- name: orthogonal_initializer
gain: 1.41421356237
- path: network.cnn.initializer
value:
- name: variance_scaling_initializer
scale: 2
- name: glorot_normal_initializer
- name: glorot_uniform_initializer
- name: orthogonal_initializer
gain: 1.41421356237
| 2,195 | YAML | 20.742574 | 46 | 0.553531 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_lunar_continiuos_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64]
activation: relu
initializer:
name: default
scale: 2
rnn:
name: 'lstm'
units: 64
layers: 1
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-3
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: LunarLanderContinuous-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
schedule_type: standard
normalize_input: True
seq_length: 4
bounds_loss_coef: 0
player:
render: True
| 1,276 | YAML | 17.779412 | 41 | 0.544671 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_discrete.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [32,32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: False
multi_head_value: False
player:
games_num: 100
determenistic: True
| 1,207 | YAML | 17.584615 | 33 | 0.589892 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [64]
#normalization: 'layer_norm'
activation: elu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
units: 64
layers: 1
layer_norm: True
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: test_asymmetric
score_to_win: 100000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: openai_gym
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
seq_length: 4
weight_decay: 0.0000
env_config:
name: TestAsymmetricEnv-v0
wrapped_env_name: "LunarLander-v2"
apply_mask: False
use_central_value: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False
| 1,707 | YAML | 18.632184 | 40 | 0.557704 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_rnn_multidiscrete.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [64, 64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: test_rnn_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: False
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: True
player:
games_num: 100
determenistic: True
central_value_config1:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: False
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64,64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False | 1,898 | YAML | 18.989473 | 32 | 0.555848 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_discrete_multidiscrete_mhv.yaml | params:
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [32,32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md_mhv
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
multi_discrete_space: True
multi_head_value: True
player:
games_num: 100
determenistic: True
| 1,223 | YAML | 17.830769 | 32 | 0.592805 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_ppo_walker_truncated_time.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/walker_truncated_step_1000.pth'
config:
name: walker_truncated_step_1000
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_input: True
normalize_advantage: True
normalize_value: True
value_bootstrap: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
schedule_type: standard
lr_schedule: adaptive
kl_threshold: 0.005
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 256
mini_epochs: 4
critic_coef: 2
bounds_loss_coef: 0.00
max_epochs: 10000
#weight_decay: 0.0001
env_config:
steps_limit: 1000
player:
render: True
determenistic: True
games_num: 200
| 1,426 | YAML | 17.776316 | 50 | 0.585554 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_rnn_multidiscrete_mhv.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_rnn_md_mhv
score_to_win: 0.99
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
multi_discrete_space: True
multi_head_value: True
player:
games_num: 100
determenistic: True
| 1,362 | YAML | 17.671233 | 32 | 0.5837 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete_mhv_mops.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: testnet
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md_multi_obs
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: False
weight_decay: 0.0000
max_epochs: 10000
seq_length: 16
save_best_after: 10
save_frequency: 20
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: False
min_dist: 2
max_dist: 8
use_central_value: True
multi_obs_space: True
multi_head_value: False
player:
games_num: 100
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: False
truncate_grads: True
grad_norm: 10
network:
name: testnet
central_value: True
mlp:
units: [64,32]
activation: relu
initializer:
name: default | 1,461 | YAML | 19.885714 | 30 | 0.588638 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete_mhv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 32
layers: 1
layer_norm: False
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: True
weight_decay: 0.0000
max_epochs: 10000
seq_length: 16
save_best_after: 10
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: False
multi_head_value: False
player:
games_num: 100
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False | 1,941 | YAML | 19.020618 | 33 | 0.55796 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_rnn
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
player:
games_num: 100
determenistic: True
| 1,270 | YAML | 16.901408 | 32 | 0.577165 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/27m_vs_30m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/27msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 27m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3456
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 27m_vs_30m
frames: 4
transpose: False
random_invalid_step: False | 1,459 | YAML | 18.466666 | 33 | 0.544894 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: 3s_vs_5z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,040 | YAML | 17.263158 | 32 | 0.577885 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/last_3s_vs_5z_cvep=10001rew=9.585825.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s_vs_5z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 24
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
max_epochs: 50000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
| 1,579 | YAML | 18.75 | 58 | 0.569981 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/6h_vs_8z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z_separate
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.002
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 2
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
central_value: False
reward_only_positive: False
obs_last_action: True
frames: 1
#flatten: False | 1,108 | YAML | 18.120689 | 34 | 0.590253 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/8m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 8m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
max_epochs: 10000
env_config:
name: 8m
frames: 1
transpose: False
random_invalid_step: False | 1,061 | YAML | 17.631579 | 32 | 0.589067 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/2c_vs_64zg.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2c_vs_64zg_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2c_vs_64zg
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 512
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 2c_vs_64zg
frames: 4
transpose: True
random_invalid_step: False
| 1,512 | YAML | 18.397436 | 32 | 0.546958 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
seed: 322
load_checkpoint: False
load_path: 'nn/3s5z_vs_3s6zsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
frames: 4
transpose: False
random_invalid_step: False | 1,600 | YAML | 19.265823 | 40 | 0.555 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: ''
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096 # 8 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
#reward_negative_scale: 0.9
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: True
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,580 | YAML | 19.269231 | 34 | 0.567722 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/5m_vs_6m_rnn_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5m_vs_6m_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 5m_vs_6m_rnn_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
entropy_coef: 0.02
truncate_grads: True
grad_norm: 10
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560 # 5 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length: 8
#max_epochs: 10000
env_config:
name: 5m_vs_6m
central_value: True
reward_only_positive: True
obs_last_action: False
apply_agent_ids: True
player:
render: False
games_num: 200
n_game_life: 1
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
#normalization: layer_norm
name: actor_critic
central_value: True
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
#reward_negative_scale: 0.1 | 1,962 | YAML | 18.828283 | 34 | 0.553517 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False | 1,099 | YAML | 17.032787 | 32 | 0.579618 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_cv_joint.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
env_config:
name: 3m
frames: 1
transpose: False
central_value: True
reward_only_positive: True
state_last_action: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
joint_obs_actions:
embedding: False
embedding_scale: 1 #(actions // embedding_scale)
mlp_scale: 4 # (mlp from obs size) // mlp_out_scale
mlp:
units: [256, 128]
activation: relu
initializer:
#name: default
name: default
scale: 2
regularizer:
name: 'None' | 1,706 | YAML | 19.817073 | 63 | 0.559789 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,532 | YAML | 18.909091 | 34 | 0.567885 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv_joint.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s_vs_5z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 24
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
max_epochs: 50000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
joint_obs_actions:
embedding: False
embedding_scale: 1 #(actions // embedding_scale)
mlp_scale: 4 # (mlp from obs size) // mlp_out_scale
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
| 1,762 | YAML | 19.741176 | 63 | 0.565834 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_4z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/3s_vs_4z_lstm'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: sc2_fc
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 1536
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 3s_vs_4z
frames: 1
random_invalid_step: False | 1,036 | YAML | 17.192982 | 32 | 0.578185 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/MMM2_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
scale: 1.3
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 0
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: MMM2_cnn
reward_shaper:
scale_value: 1.3
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
use_action_masks: True
env_config:
name: MMM2
frames: 4
transpose: False # for pytorch transpose == not Transpose in tf
random_invalid_step: False
replay_save_freq: 100 | 1,531 | YAML | 18.896104 | 69 | 0.548661 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
seed: 322
load_checkpoint: False
load_path: 'nn/3s5z_vs_3s6z_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6zaa
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
frames: 4
transpose: True
random_invalid_step: False | 1,532 | YAML | 18.909091 | 34 | 0.550914 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/5m_vs_6m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 5m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 5m_vs_6m
frames: 4
transpose: False
random_invalid_step: False | 1,455 | YAML | 18.413333 | 32 | 0.543643 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_torch_lstm.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
before_mlp: False
config:
name: 3s_vs_5z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 256
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 32
use_action_masks: True
max_epochs: 20000
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,120 | YAML | 17.683333 | 32 | 0.576786 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/2s_vs_1c.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/2s_vs_1c_lstm'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: 2m_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 1,039 | YAML | 17.245614 | 32 | 0.578441 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/MMM2.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/MMM_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: MMM2_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
use_action_masks: True
ignore_dead_batches : False
seq_length: 4
env_config:
name: MMM
frames: 4
transpose: True
random_invalid_step: False | 1,500 | YAML | 18.493506 | 32 | 0.544667 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/8m_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 8m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
max_epochs: 10000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 8m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: False
obs_last_action: True
| 1,581 | YAML | 18.292683 | 34 | 0.56673 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_cv_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3m_cv_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
truncate_grads: True
grad_norm: 0.5
entropy_coef: 0.001
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length : 8
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 0.5
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1 | 1,711 | YAML | 18.906977 | 34 | 0.549971 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_cnn_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnnsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 1
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 3m
frames: 4
transpose: True
random_invalid_step: True
| 1,523 | YAML | 17.814815 | 40 | 0.545634 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_sparse.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnnsmac_cnn'
network:
name: actor_critic
separate: True
value_shape: 2
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
reward_sparse: True
transpose: False
random_invalid_step: False
rnd_config:
scale_value: 1
episodic: True
episode_length: 128
gamma: 0.99
mini_epochs: 2
minibatch_size: 1536
learning_rate: 5e-4
network:
name: rnd_curiosity
mlp:
rnd:
units: [512, 256,128,64]
net:
units: [128, 64, 64]
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,625 | YAML | 19.074074 | 38 | 0.536 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/2m_vs_1z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2m_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 978 | YAML | 17.129629 | 32 | 0.580777 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/5m_vs_6m_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5m_vs_6m_cv.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 5m_vs_6m_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
entropy_coef: 0.005
truncate_grads: True
grad_norm: 1.5
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560 # 5 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length: 8
#max_epochs: 10000
env_config:
name: 5m_vs_6m
central_value: False
reward_only_positive: True
obs_last_action: True
apply_agent_ids: False
player:
render: False
games_num: 200
n_game_life: 1
determenistic: True
#reward_negative_scale: 0.1 | 1,365 | YAML | 18.239436 | 34 | 0.58022 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3s_vs_5z_cv_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
truncate_grads: True
grad_norm: 0.5
entropy_coef: 0.005
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length : 4
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 0.5
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1 | 1,745 | YAML | 19.068965 | 34 | 0.553582 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/corridor.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/corridor_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
frames: 4
transpose: True
random_invalid_step: False | 1,511 | YAML | 18.636363 | 32 | 0.550629 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/10m_vs_11m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/27msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 10m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 10m_vs_11m
frames: 14
transpose: False
random_invalid_step: False | 1,460 | YAML | 18.48 | 33 | 0.545205 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/corridor_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/corridor_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
reward_negative_scale: 0.05
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 3e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,598 | YAML | 19.5 | 34 | 0.571339 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/27m_vs_30m_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 27m_vs_30m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3456
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 8
use_action_masks: True
ignore_dead_batches : False
#max_epochs: 10000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
env_config:
name: 27m_vs_30m
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
apply_agent_ids: True | 1,776 | YAML | 19.193182 | 32 | 0.556869 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/2m_vs_1z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2m_vs_1z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2s_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 972 | YAML | 17.35849 | 32 | 0.583333 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False | 1,022 | YAML | 17.267857 | 32 | 0.588063 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/corridor_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2c_vs_64zgsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
frames: 4
transpose: False
random_invalid_step: False | 1,542 | YAML | 18.782051 | 40 | 0.552529 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/6h_vs_8z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
frames: 4
transpose: True
random_invalid_step: False
| 1,512 | YAML | 18.397436 | 32 | 0.546296 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 4
transpose: True
random_invalid_step: False | 1,493 | YAML | 18.402597 | 32 | 0.545211 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/6h_vs_8z_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: ''
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: False
config:
name: 6h_vs_8z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
#reward_negative_scale: 0.9
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: True
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: False | 1,734 | YAML | 18.942529 | 34 | 0.553633 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_torch_lstm2.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
before_mlp: False
config:
name: 3s_vs_5z2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
max_epochs: 20000
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,093 | YAML | 17.542373 | 32 | 0.573651 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/5m_vs_6m.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 5m_vs_6m_bias
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 5m_vs_6m
frames: 4
transpose: True
random_invalid_step: False | 1,514 | YAML | 18.675324 | 32 | 0.548217 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_space_invaders_resnet.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/invaders_resnet.pth'
network:
name: resnet_actor_critic
separate: False
value_shape: 1
space:
discrete:
cnn:
conv_depths: [16, 32, 32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: default
rnn:
name: lstm
units: 256
layers: 1
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: invaders_resnet
score_to_win: 100000
grad_norm: 1.5
entropy_coef: 0.001
truncate_grads: True
env_name: 'atari_gym' #'openai_gym' #'PongNoFrameskip-v4' #
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: none
kl_threshold: 0.01
normalize_input: False
seq_length: 4
max_epochs: 200000
env_config:
skip: 3
name: 'SpaceInvadersNoFrameskip-v4'
episode_life: False
player:
render: True
games_num: 10
n_game_life: 1
determenistic: True
| 1,416 | YAML | 17.166666 | 63 | 0.565678 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_pacman_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/pacman_ff.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
#min_val: -1
#max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: pacman_ff_no_normalize
score_to_win: 50000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: 'atari_gym'
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: linear
schedule_entropy: True
normalize_input: False
normalize_value: True
max_epochs: 20000
env_config:
skip: 4
name: 'MsPacmanNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True
render_sleep: 0.05 | 1,692 | YAML | 18.686046 | 39 | 0.543144 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_gopher.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/pacman_ff.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
scale_value: 1
#min_val: -1
#max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: gopher_ff
score_to_win: 50000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: 'atari_gym'
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: linear
schedule_entropy: True
normalize_input: False
normalize_value: True
max_epochs: 50000
env_config:
skip: 4
name: 'GopherNoFrameskip-v4'
episode_life: False
player:
render: True
games_num: 10
n_game_life: 1
determenistic: True
render_sleep: 0.001 | 1,679 | YAML | 18.534884 | 39 | 0.540798 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_space_invaders_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/invader_lstm.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
rnn:
name: lstm
units: 256
layers: 1
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
name: invader_lstm
score_to_win: 9000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
env_config:
skip: 3
name: 'SpaceInvadersNoFrameskip-v4'
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True | 1,740 | YAML | 18.131868 | 41 | 0.538506 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_space_invaders_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/invader.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
name: invader
score_to_win: 9000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 24
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
env_config:
skip: 3
name: 'SpaceInvadersNoFrameskip-v4'
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True | 1,672 | YAML | 18.229885 | 41 | 0.54067 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_breakout_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
rnn:
name: lstm
units: 256
layers: 1
#layer_norm: True
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: breakout_lstm
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: BreakoutNoFrameskip-v4
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 3
critic_coef: 1
lr_schedule: None # adaptive
kl_threshold: 0.01
normalize_input: False
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
player:
render: True
games_num: 100
n_game_life: 5
determenistic: False | 1,687 | YAML | 18.181818 | 39 | 0.540605 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_pacman_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/pacman_ff.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
rnn:
before_mlp: False
name: lstm
units: 512
layers: 1
layer_norm: True
config:
reward_shaper:
#min_val: -1
#max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: pacman_rnn
score_to_win: 50000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: 'atari_gym'
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
seq_len: 16
lr_schedule: linear
schedule_entropy: True
normalize_input: False
normalize_value: True
max_epochs: 50000
env_config:
skip: 4
name: 'MsPacmanNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True
render_sleep: 0.05 | 1,801 | YAML | 18.586956 | 39 | 0.53859 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppg_breakout_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: orthogonal_initializer
gain: 1.41421356237
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: orthogonal_initializer
gain: 1.41421356237
config:
reward_shaper:
min_val: -1
max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: breakout_ppg
score_to_win: 900
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 24
horizon_length: 128
minibatch_size: 512
mini_epochs: 1
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
#lr_schedule: linear
#schedule_entropy: True
normalize_value: True
normalize_input: False
max_epochs: 20000
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 512
mini_epochs: 6
n_aux: 16
kl_coef: 1.0
env_config:
skip: 4
name: 'BreakoutNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 200
n_game_life: 5
determenistic: False | 1,747 | YAML | 18.640449 | 36 | 0.551231 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppg_pong.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.995
tau: 0.9
learning_rate: 5e-4
name: pong_ppg
score_to_win: 20.5
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: PongNoFrameskip-v4
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 24
horizon_length: 128
minibatch_size: 256
mini_epochs: 1
critic_coef: 1
lr_schedule: none
#kl_threshold: 0.008
#schedule_entropy : True
normalize_value: False
normalize_input: False
max_epochs: 1500
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 256
mini_epochs: 6
n_aux: 16
kl_coef: 1.0
player:
render: True
games_num: 100
n_game_life: 1
determenistic: True | 1,640 | YAML | 18.535714 | 39 | 0.545122 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_breakout_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: orthogonal_initializer
gain: 1.41421356237
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: orthogonal_initializer
gain: 1.41421356237
config:
reward_shaper:
min_val: -1
max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: breakout_ppo
score_to_win: 900
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 24
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
#lr_schedule: linear
#schedule_entropy: True
normalize_value: True
normalize_input: False
max_epochs: 3000
env_config:
skip: 4
name: 'BreakoutNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 200
n_game_life: 5
determenistic: True | 1,598 | YAML | 18.26506 | 36 | 0.553191 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_pong.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: PongNoFrameskip
score_to_win: 20.0
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: PongNoFrameskip-v4
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 24
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: none
#kl_threshold: 0.008
#schedule_entropy : True
normalize_value: True
normalize_input: False
max_epochs: 1500
player:
render: True
games_num: 100
n_game_life: 1
determenistic: True | 1,510 | YAML | 18.371795 | 39 | 0.550331 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_pong_soft_aug.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1.41421356237
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1.41421356237
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: PongNoFrameskip_soft_aug
score_to_win: 20
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: PongNoFrameskip-v4
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 24
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: none
#kl_threshold: 0.008
#schedule_entropy : True
normalize_input: False
max_epochs: 1500
features:
soft_augmentation:
aug_coef: 0.001
transform:
name: 'default'
player:
render: True
games_num: 100
n_game_life: 1
determenistic: True | 1,561 | YAML | 18.525 | 39 | 0.55221 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ma/ppo_slime_self_play.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/slime_pvp.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [128,64]
activation: elu
initializer:
name: default
regularizer:
name: 'None'
config:
name: slime_pvp2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: slime_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 500
self_play_config:
update_score: 1
games_to_check: 200
check_scores : False
env_config:
name: SlimeVolleyDiscrete-v0
#neg_scale: 1 #0.5
self_play: True
config_path: 'rl_games/configs/ma/ppo_slime_self_play.yaml'
player:
render: True
games_num: 200
n_game_life: 1
determenistic: True
device_name: 'cpu' | 1,294 | YAML | 18.328358 | 65 | 0.59119 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ma/ppo_connect4_self_play.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/connect4.pth'
network:
name: actor_critic
separate: False
normalization: batch_norm
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
config:
name: connect4_3
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: connect4_env
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 1000
use_action_masks: True
weight_decay: 0.001
self_play_config:
update_score: 0.1
games_to_check: 100
env_update_num: 8
env_config:
name: connect_four_v0
self_play: True
is_human: False
random_agent: False
config_path: 'rl_games/configs/ma/ppo_connect4_self_play.yaml' | 1,735 | YAML | 19.915662 | 68 | 0.563689 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ma/ppo_slime_v0.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [128,64]
activation: elu
initializer:
name: default
regularizer:
name: 'None'
config:
name: slime
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: slime_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: False
ignore_dead_batches : False
env_config:
name: SlimeVolleyDiscrete-v0
player:
render: True
games_num: 200
n_game_life: 1
determenistic: True | 1,093 | YAML | 16.645161 | 34 | 0.590119 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ma/ppo_connect4_self_play_resnet.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/connect4_rn.pth'
network:
name: connect4net
blocks: 5
config:
name: connect4_rn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: connect4_env
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 4
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 1000
use_action_masks: True
weight_decay: 0.001
self_play_config:
update_score: 0.1
games_to_check: 100
env_update_num: 4
env_config:
name: connect_four_v0
self_play: True
is_human: True
random_agent: False
config_path: 'rl_games/configs/ma/ppo_connect4_self_play_resnet.yaml' | 1,052 | YAML | 19.25 | 75 | 0.613118 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/humanoid2.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: dm_humanoid
score_to_win: 10000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 4
horizon_length: 4096
minibatch_size: 4096
mini_epochs: 15
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.0
env_config:
name: Humanoid2Run-v0
flat_observation: True
| 1,305 | YAML | 18.492537 | 39 | 0.549425 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/ppo_dm_control.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
value_shape: 2
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.999
tau: 0.9
learning_rate: 1e-4
name: dm_control
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 2
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: True
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: AcrobotSwingup_sparse-v0
flat_observation: True
rnd_config:
scale_value: 4.0
exp_percent: 0.25
adv_coef: 0.5
gamma: 0.99
mini_epochs: 2
minibatch_size: 1024
learning_rate: 5e-4
network:
name: rnd_curiosity
mlp:
rnd:
units: [64,64,16]
net:
units: [16,16]
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,835 | YAML | 19.4 | 39 | 0.510627 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/walker_run.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: walker
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: WalkerRun-v0
flat_observation: True
| 1,297 | YAML | 18.373134 | 39 | 0.547417 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/cartpole.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 16]
activation: relu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: cartpole
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.0000
env_config:
name: CartpoleBalance-v0
flat_observation: True
| 1,301 | YAML | 18.432836 | 39 | 0.550346 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/humanoid_run_rnd.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
value_shape: 2
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: dm_humanoid
score_to_win: 10000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 1024
minibatch_size: 4096
mini_epochs: 15
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: True
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: HumanoidRun-v0
flat_observation: True
rnd_config:
scale_value: 1.0
gamma: 0.99
mini_epochs: 2
minibatch_size: 4096
learning_rate: 5e-4
exp_percent: 0.25
adv_coef: 0.5
network:
name: rnd_curiosity
mlp:
rnd:
units: [256,128,32]
net:
units: [128,32]
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,830 | YAML | 19.573033 | 39 | 0.512022 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/humanoid_run.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: dm_humanoid
score_to_win: 10000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 1024
minibatch_size: 4096
mini_epochs: 15
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: HumanoidRun-v0
flat_observation: True
| 1,307 | YAML | 18.522388 | 39 | 0.550115 |
KallPap/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/humanoid_run_conv1d.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: False
cnn:
type: conv1d
activation: elu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 128
kernel_size: 2
strides: 1
padding: 0
mlp:
units: [128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: humanoid_conv
score_to_win: 15000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 1024
minibatch_size: 8192
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
frames: 4
name: Humanoid2Run-v0
flat_observation: True
| 1,829 | YAML | 18.677419 | 39 | 0.515582 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.