file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_walker_hardcore.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128, 64]
d2rl: False
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
load_checkpoint: False
load_path: './nn/walker_hc.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_hc
score_to_win: 300
grad_norm: 1.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalkerHardcore-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 4096
minibatch_size: 8192
mini_epochs: 4
critic_coef: 1
schedule_type: 'standard'
lr_schedule: 'adaptive' #None #
kl_threshold: 0.008
normalize_input: True
seq_length: 4
bounds_loss_coef: 0.00
max_epochs: 100000
weight_decay: 0
player:
render: False
games_num: 200
determenistic: True
| 1,420 | YAML | 19.897059 | 41 | 0.554225 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_ant_torch_rnn.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
normalization: 'layer_norm'
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: False
mlp:
units: [128]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: 'lstm'
units: 64
layers: 1
before_mlp: False
load_checkpoint: False
load_path: 'nn/ant_torch.pth'
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch_rnn'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0
weight_decay: 0.001
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 256
minibatch_size : 8192
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length : 32
bounds_loss_coef: 0.000
| 1,580 | YAML | 18.280488 | 39 | 0.533544 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_smac_cnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/5m_vs_6m2smac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 5m_vs_6m2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 5m_vs_6m
frames: 4
transpose: True
random_invalid_step: False | 1,512 | YAML | 18.64935 | 35 | 0.547619 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_ant_torch_rnn_copy.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: 'lstm'
units: 128
layers: 1
before_mlp: True
load_checkpoint: False
load_path: 'nn/ant_torch.pth'
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 128
minibatch_size : 4096
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length : 16
bounds_loss_coef: 0.0
| 1,509 | YAML | 18.113924 | 39 | 0.530152 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dqn.yaml | params:
algo:
name: dqn
model:
name: dqn
load_checkpoint: False
load_path: path
network:
name: dqn
dueling: True
atoms: 1
noisy: False
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 'valid'
- filters: 64
kernel_size: 4
strides: 2
padding: 'valid'
- filters: 64
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 0.1
gamma : 0.99
learning_rate : 0.0005
steps_per_epoch : 4
batch_size : 128
epsilon : 0.90
min_epsilon : 0.02
epsilon_decay_frames : 100000
num_epochs_to_copy : 10000
name : 'pong_dddqn_config1'
env_name: PongNoFrameskip-v4
is_double : True
score_to_win : 20.9
num_steps_fill_buffer : 10000
replay_buffer_type : 'normal'
replay_buffer_size : 100000
priority_beta : 0.4
priority_alpha : 0.6
beta_decay_frames : 100000
max_beta : 1
horizon_length : 3
episodes_to_log : 10
lives_reward : 1
atoms_num : 1
games_to_track : 20
lr_schedule : polynom_decay
max_epochs: 100000
experiment_config:
start_exp: 0
start_sub_exp: 3
experiments:
# - exp:
# - path: config.learning_rate
# value: [0.0005, 0.0002]
- exp:
- path: network.initializer
value:
- name: variance_scaling_initializer
scale: 2
- name: glorot_normal_initializer
- name: glorot_uniform_initializer
- name: orthogonal_initializer
gain: 1.41421356237
- path: network.cnn.initializer
value:
- name: variance_scaling_initializer
scale: 2
- name: glorot_normal_initializer
- name: glorot_uniform_initializer
- name: orthogonal_initializer
gain: 1.41421356237
| 2,195 | YAML | 20.742574 | 46 | 0.553531 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_lunar_continiuos_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64]
activation: relu
initializer:
name: default
scale: 2
rnn:
name: 'lstm'
units: 64
layers: 1
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-3
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: LunarLanderContinuous-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
schedule_type: standard
normalize_input: True
seq_length: 4
bounds_loss_coef: 0
player:
render: True
| 1,276 | YAML | 17.779412 | 41 | 0.544671 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_discrete.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [32,32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: False
multi_head_value: False
player:
games_num: 100
determenistic: True
| 1,207 | YAML | 17.584615 | 33 | 0.589892 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [64]
#normalization: 'layer_norm'
activation: elu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
units: 64
layers: 1
layer_norm: True
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: test_asymmetric
score_to_win: 100000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: openai_gym
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
seq_length: 4
weight_decay: 0.0000
env_config:
name: TestAsymmetricEnv-v0
wrapped_env_name: "LunarLander-v2"
apply_mask: False
use_central_value: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False
| 1,707 | YAML | 18.632184 | 40 | 0.557704 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_rnn_multidiscrete.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [64, 64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: test_rnn_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: False
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: True
player:
games_num: 100
determenistic: True
central_value_config1:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: False
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64,64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False | 1,898 | YAML | 18.989473 | 32 | 0.555848 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_discrete_multidiscrete_mhv.yaml | params:
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [32,32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md_mhv
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
multi_discrete_space: True
multi_head_value: True
player:
games_num: 100
determenistic: True
| 1,223 | YAML | 17.830769 | 32 | 0.592805 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_ppo_walker_truncated_time.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/walker_truncated_step_1000.pth'
config:
name: walker_truncated_step_1000
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_input: True
normalize_advantage: True
normalize_value: True
value_bootstrap: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
schedule_type: standard
lr_schedule: adaptive
kl_threshold: 0.005
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 256
mini_epochs: 4
critic_coef: 2
bounds_loss_coef: 0.00
max_epochs: 10000
#weight_decay: 0.0001
env_config:
steps_limit: 1000
player:
render: True
determenistic: True
games_num: 200
| 1,426 | YAML | 17.776316 | 50 | 0.585554 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_rnn_multidiscrete_mhv.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: multi_discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
multi_discrete:
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_rnn_md_mhv
score_to_win: 0.99
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
multi_discrete_space: True
multi_head_value: True
player:
games_num: 100
determenistic: True
| 1,362 | YAML | 17.671233 | 32 | 0.5837 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete_mhv_mops.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: testnet
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md_multi_obs
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: False
weight_decay: 0.0000
max_epochs: 10000
seq_length: 16
save_best_after: 10
save_frequency: 20
env_config:
name: TestRnnEnv-v0
hide_object: False
apply_dist_reward: False
min_dist: 2
max_dist: 8
use_central_value: True
multi_obs_space: True
multi_head_value: False
player:
games_num: 100
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: False
truncate_grads: True
grad_norm: 10
network:
name: testnet
central_value: True
mlp:
units: [64,32]
activation: relu
initializer:
name: default | 1,461 | YAML | 19.885714 | 30 | 0.588638 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_asymmetric_discrete_mhv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 32
layers: 1
layer_norm: False
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_md
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: True
weight_decay: 0.0000
max_epochs: 10000
seq_length: 16
save_best_after: 10
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: True
multi_discrete_space: False
multi_head_value: False
player:
games_num: 100
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
name: actor_critic
central_value: True
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
layer_norm: False
before_mlp: False | 1,941 | YAML | 19.020618 | 33 | 0.55796 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/test/test_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
#normalization: 'layer_norm'
space:
discrete:
mlp:
units: [64]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: 'lstm'
#layer_norm: True
units: 64
layers: 1
before_mlp: False
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: test_rnn
score_to_win: 0.95
grad_norm: 10.5
entropy_coef: 0.005
truncate_grads: True
env_name: test_env
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
max_epochs: 10000
env_config:
name: TestRnnEnv-v0
hide_object: True
apply_dist_reward: True
min_dist: 2
max_dist: 8
use_central_value: False
player:
games_num: 100
determenistic: True
| 1,270 | YAML | 16.901408 | 32 | 0.577165 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/27m_vs_30m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/27msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 27m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3456
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 27m_vs_30m
frames: 4
transpose: False
random_invalid_step: False | 1,459 | YAML | 18.466666 | 33 | 0.544894 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: 3s_vs_5z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,040 | YAML | 17.263158 | 32 | 0.577885 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/last_3s_vs_5z_cvep=10001rew=9.585825.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s_vs_5z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 24
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
max_epochs: 50000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
| 1,579 | YAML | 18.75 | 58 | 0.569981 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/6h_vs_8z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z_separate
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.002
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 2
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
central_value: False
reward_only_positive: False
obs_last_action: True
frames: 1
#flatten: False | 1,108 | YAML | 18.120689 | 34 | 0.590253 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/8m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 8m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
max_epochs: 10000
env_config:
name: 8m
frames: 1
transpose: False
random_invalid_step: False | 1,061 | YAML | 17.631579 | 32 | 0.589067 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/2c_vs_64zg.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2c_vs_64zg_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2c_vs_64zg
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 512
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 2c_vs_64zg
frames: 4
transpose: True
random_invalid_step: False
| 1,512 | YAML | 18.397436 | 32 | 0.546958 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
seed: 322
load_checkpoint: False
load_path: 'nn/3s5z_vs_3s6zsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
frames: 4
transpose: False
random_invalid_step: False | 1,600 | YAML | 19.265823 | 40 | 0.555 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: ''
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096 # 8 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
#reward_negative_scale: 0.9
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: True
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,580 | YAML | 19.269231 | 34 | 0.567722 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/5m_vs_6m_rnn_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5m_vs_6m_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 5m_vs_6m_rnn_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
entropy_coef: 0.02
truncate_grads: True
grad_norm: 10
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560 # 5 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length: 8
#max_epochs: 10000
env_config:
name: 5m_vs_6m
central_value: True
reward_only_positive: True
obs_last_action: False
apply_agent_ids: True
player:
render: False
games_num: 200
n_game_life: 1
determenistic: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 10
network:
#normalization: layer_norm
name: actor_critic
central_value: True
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
#reward_negative_scale: 0.1 | 1,962 | YAML | 18.828283 | 34 | 0.553517 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False | 1,099 | YAML | 17.032787 | 32 | 0.579618 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_cv_joint.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
env_config:
name: 3m
frames: 1
transpose: False
central_value: True
reward_only_positive: True
state_last_action: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
joint_obs_actions:
embedding: False
embedding_scale: 1 #(actions // embedding_scale)
mlp_scale: 4 # (mlp from obs size) // mlp_out_scale
mlp:
units: [256, 128]
activation: relu
initializer:
#name: default
name: default
scale: 2
regularizer:
name: 'None' | 1,706 | YAML | 19.817073 | 63 | 0.559789 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,532 | YAML | 18.909091 | 34 | 0.567885 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv_joint.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s_vs_5z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 24
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
max_epochs: 50000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
joint_obs_actions:
embedding: False
embedding_scale: 1 #(actions // embedding_scale)
mlp_scale: 4 # (mlp from obs size) // mlp_out_scale
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
| 1,762 | YAML | 19.741176 | 63 | 0.565834 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_4z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/3s_vs_4z_lstm'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: sc2_fc
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 1536
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 3s_vs_4z
frames: 1
random_invalid_step: False | 1,036 | YAML | 17.192982 | 32 | 0.578185 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/MMM2_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
scale: 1.3
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 0
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: MMM2_cnn
reward_shaper:
scale_value: 1.3
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
use_action_masks: True
env_config:
name: MMM2
frames: 4
transpose: False # for pytorch transpose == not Transpose in tf
random_invalid_step: False
replay_save_freq: 100 | 1,531 | YAML | 18.896104 | 69 | 0.548661 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s5z_vs_3s6z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
seed: 322
load_checkpoint: False
load_path: 'nn/3s5z_vs_3s6z_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3s5z_vs_3s6zaa
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3s5z_vs_3s6z
frames: 4
transpose: True
random_invalid_step: False | 1,532 | YAML | 18.909091 | 34 | 0.550914 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/5m_vs_6m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 5m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 5m_vs_6m
frames: 4
transpose: False
random_invalid_step: False | 1,455 | YAML | 18.413333 | 32 | 0.543643 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_torch_lstm.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 64
layers: 1
before_mlp: False
config:
name: 3s_vs_5z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 256
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 32
use_action_masks: True
max_epochs: 20000
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,120 | YAML | 17.683333 | 32 | 0.576786 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/2s_vs_1c.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c_lstm
load_checkpoint: False
load_path: 'nn/2s_vs_1c_lstm'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
lstm:
units: 128
concated: False
config:
name: 2m_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 1,039 | YAML | 17.245614 | 32 | 0.578441 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/MMM2.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/MMM_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: MMM2_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 64
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
use_action_masks: True
ignore_dead_batches : False
seq_length: 4
env_config:
name: MMM
frames: 4
transpose: True
random_invalid_step: False | 1,500 | YAML | 18.493506 | 32 | 0.544667 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/8m_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 8m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 4096 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
max_epochs: 10000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256,128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
env_config:
name: 8m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: False
obs_last_action: True
| 1,581 | YAML | 18.292683 | 34 | 0.56673 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_cv_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3m_cv_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
truncate_grads: True
grad_norm: 0.5
entropy_coef: 0.001
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length : 8
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 0.5
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1 | 1,711 | YAML | 18.906977 | 34 | 0.549971 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_cnn_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnnsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 1
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 3m
frames: 4
transpose: True
random_invalid_step: True
| 1,523 | YAML | 17.814815 | 40 | 0.545634 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch_sparse.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnnsmac_cnn'
network:
name: actor_critic
separate: True
value_shape: 2
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
reward_sparse: True
transpose: False
random_invalid_step: False
rnd_config:
scale_value: 1
episodic: True
episode_length: 128
gamma: 0.99
mini_epochs: 2
minibatch_size: 1536
learning_rate: 5e-4
network:
name: rnd_curiosity
mlp:
rnd:
units: [512, 256,128,64]
net:
units: [128, 64, 64]
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,625 | YAML | 19.074074 | 38 | 0.536 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/2m_vs_1z_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2m_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 978 | YAML | 17.129629 | 32 | 0.580777 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/5m_vs_6m_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5m_vs_6m_cv.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 5m_vs_6m_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
entropy_coef: 0.005
truncate_grads: True
grad_norm: 1.5
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560 # 5 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length: 8
#max_epochs: 10000
env_config:
name: 5m_vs_6m
central_value: False
reward_only_positive: True
obs_last_action: True
apply_agent_ids: False
player:
render: False
games_num: 200
n_game_life: 1
determenistic: True
#reward_negative_scale: 0.1 | 1,365 | YAML | 18.239436 | 34 | 0.58022 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_cv_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
config:
name: 3s_vs_5z_cv_rnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
truncate_grads: True
grad_norm: 0.5
entropy_coef: 0.005
env_name: smac
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 8
horizon_length: 128
minibatch_size: 1536 # 3 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
normalize_value: False
use_action_masks: True
seq_length : 4
env_config:
name: 3s_vs_5z
frames: 1
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
truncate_grads: True
grad_norm: 0.5
network:
name: actor_critic
central_value: True
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1 | 1,745 | YAML | 19.068965 | 34 | 0.553582 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/corridor.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/corridor_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
frames: 4
transpose: True
random_invalid_step: False | 1,511 | YAML | 18.636363 | 32 | 0.550629 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/10m_vs_11m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/27msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 256
kernel_size: 3
strides: 1
padding: 1
- filters: 512
kernel_size: 3
strides: 1
padding: 1
- filters: 1024
kernel_size: 3
strides: 1
padding: 1
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 10m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 4
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 2
use_action_masks: True
env_config:
name: 10m_vs_11m
frames: 14
transpose: False
random_invalid_step: False | 1,460 | YAML | 18.48 | 33 | 0.545205 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/corridor_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/corridor_cv.pth'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
reward_negative_scale: 0.05
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 3e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256, 128]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,598 | YAML | 19.5 | 34 | 0.571339 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/27m_vs_30m_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
config:
name: 27m_vs_30m_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3456
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 8
use_action_masks: True
ignore_dead_batches : False
#max_epochs: 10000
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 1e-4
clip_value: False
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [1024, 512]
activation: relu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: True
env_config:
name: 27m_vs_30m
transpose: False
random_invalid_step: False
central_value: True
reward_only_positive: True
obs_last_action: True
apply_agent_ids: True | 1,776 | YAML | 19.193182 | 32 | 0.556869 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/2m_vs_1z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2m_vs_1z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 2s_vs_1z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
seq_length: 4
use_action_masks: True
env_config:
name: 2m_vs_1z
frames: 1
random_invalid_step: False | 972 | YAML | 17.35849 | 32 | 0.583333 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 1
transpose: False
random_invalid_step: False | 1,022 | YAML | 17.267857 | 32 | 0.588063 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/corridor_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/2c_vs_64zgsmac_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: glorot_uniform_initializer
gain: 1.4241
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
- filters: 256
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: corridor_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: corridor
frames: 4
transpose: False
random_invalid_step: False | 1,542 | YAML | 18.782051 | 40 | 0.552529 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/6h_vs_8z.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/6h_vs_8z_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
frames: 4
transpose: True
random_invalid_step: False
| 1,512 | YAML | 18.397436 | 32 | 0.546296 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3m.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3m_cnn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 3m_cnn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 3m
frames: 4
transpose: True
random_invalid_step: False | 1,493 | YAML | 18.402597 | 32 | 0.545211 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/6h_vs_8z_torch_cv.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: ''
network:
name: actor_critic
separate: False
#normalization: layer_norm
space:
discrete:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: False
config:
name: 6h_vs_8z_cv
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072 # 6 * 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: True
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 6h_vs_8z
central_value: True
reward_only_positive: False
obs_last_action: True
frames: 1
#reward_negative_scale: 0.9
#apply_agent_ids: True
#flatten: False
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 5e-4
clip_value: True
normalize_input: True
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
layer_norm: False | 1,734 | YAML | 18.942529 | 34 | 0.553633 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/3s_vs_5z_torch_lstm2.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/3s_vs_5z'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
rnn:
name: lstm
units: 128
layers: 1
before_mlp: False
config:
name: 3s_vs_5z2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1536 #1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
max_epochs: 20000
env_config:
name: 3s_vs_5z
frames: 1
random_invalid_step: False | 1,093 | YAML | 17.542373 | 32 | 0.573651 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/smac/5m_vs_6m.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/5msmac_cnn.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
cnn:
type: conv1d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 2
padding: 'same'
- filters: 128
kernel_size: 3
strides: 1
padding: 'valid'
- filters: 256
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 5m_vs_6m_bias
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: smac_cnn
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 2560
mini_epochs: 1
critic_coef: 2
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 2
use_action_masks: True
ignore_dead_batches : False
env_config:
name: 5m_vs_6m
frames: 4
transpose: True
random_invalid_step: False | 1,514 | YAML | 18.675324 | 32 | 0.548217 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_space_invaders_resnet.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/invaders_resnet.pth'
network:
name: resnet_actor_critic
separate: False
value_shape: 1
space:
discrete:
cnn:
conv_depths: [16, 32, 32]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: default
rnn:
name: lstm
units: 256
layers: 1
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: invaders_resnet
score_to_win: 100000
grad_norm: 1.5
entropy_coef: 0.001
truncate_grads: True
env_name: 'atari_gym' #'openai_gym' #'PongNoFrameskip-v4' #
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: none
kl_threshold: 0.01
normalize_input: False
seq_length: 4
max_epochs: 200000
env_config:
skip: 3
name: 'SpaceInvadersNoFrameskip-v4'
episode_life: False
player:
render: True
games_num: 10
n_game_life: 1
determenistic: True
| 1,416 | YAML | 17.166666 | 63 | 0.565678 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_pacman_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/pacman_ff.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
#min_val: -1
#max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: pacman_ff_no_normalize
score_to_win: 50000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: 'atari_gym'
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: linear
schedule_entropy: True
normalize_input: False
normalize_value: True
max_epochs: 20000
env_config:
skip: 4
name: 'MsPacmanNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True
render_sleep: 0.05 | 1,692 | YAML | 18.686046 | 39 | 0.543144 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_gopher.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/pacman_ff.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
scale_value: 1
#min_val: -1
#max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: gopher_ff
score_to_win: 50000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: 'atari_gym'
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: linear
schedule_entropy: True
normalize_input: False
normalize_value: True
max_epochs: 50000
env_config:
skip: 4
name: 'GopherNoFrameskip-v4'
episode_life: False
player:
render: True
games_num: 10
n_game_life: 1
determenistic: True
render_sleep: 0.001 | 1,679 | YAML | 18.534884 | 39 | 0.540798 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_space_invaders_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/invader_lstm.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
rnn:
name: lstm
units: 256
layers: 1
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
name: invader_lstm
score_to_win: 9000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
env_config:
skip: 3
name: 'SpaceInvadersNoFrameskip-v4'
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True | 1,740 | YAML | 18.131868 | 41 | 0.538506 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_space_invaders_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/invader.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
name: invader
score_to_win: 9000
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 24
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
env_config:
skip: 3
name: 'SpaceInvadersNoFrameskip-v4'
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True | 1,672 | YAML | 18.229885 | 41 | 0.54067 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_breakout_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
rnn:
name: lstm
units: 256
layers: 1
#layer_norm: True
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: breakout_lstm
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: BreakoutNoFrameskip-v4
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 3
critic_coef: 1
lr_schedule: None # adaptive
kl_threshold: 0.01
normalize_input: False
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
player:
render: True
games_num: 100
n_game_life: 5
determenistic: False | 1,687 | YAML | 18.181818 | 39 | 0.540605 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_pacman_torch_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/pacman_ff.pth'
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
rnn:
before_mlp: False
name: lstm
units: 512
layers: 1
layer_norm: True
config:
reward_shaper:
#min_val: -1
#max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-4
name: pacman_rnn
score_to_win: 50000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: 'atari_gym'
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
seq_len: 16
lr_schedule: linear
schedule_entropy: True
normalize_input: False
normalize_value: True
max_epochs: 50000
env_config:
skip: 4
name: 'MsPacmanNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 10
n_game_life: 3
determenistic: True
render_sleep: 0.05 | 1,801 | YAML | 18.586956 | 39 | 0.53859 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppg_breakout_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: orthogonal_initializer
gain: 1.41421356237
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: orthogonal_initializer
gain: 1.41421356237
config:
reward_shaper:
min_val: -1
max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: breakout_ppg
score_to_win: 900
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 24
horizon_length: 128
minibatch_size: 512
mini_epochs: 1
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
#lr_schedule: linear
#schedule_entropy: True
normalize_value: True
normalize_input: False
max_epochs: 20000
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 512
mini_epochs: 6
n_aux: 16
kl_coef: 1.0
env_config:
skip: 4
name: 'BreakoutNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 200
n_game_life: 5
determenistic: False | 1,747 | YAML | 18.640449 | 36 | 0.551231 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppg_pong.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.995
tau: 0.9
learning_rate: 5e-4
name: pong_ppg
score_to_win: 20.5
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: PongNoFrameskip-v4
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 24
horizon_length: 128
minibatch_size: 256
mini_epochs: 1
critic_coef: 1
lr_schedule: none
#kl_threshold: 0.008
#schedule_entropy : True
normalize_value: False
normalize_input: False
max_epochs: 1500
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 256
mini_epochs: 6
n_aux: 16
kl_coef: 1.0
player:
render: True
games_num: 100
n_game_life: 1
determenistic: True | 1,640 | YAML | 18.535714 | 39 | 0.545122 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_breakout_torch.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: orthogonal_initializer
gain: 1.41421356237
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: orthogonal_initializer
gain: 1.41421356237
config:
reward_shaper:
min_val: -1
max_val: 1
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: breakout_ppo
score_to_win: 900
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 24
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
#lr_schedule: linear
#schedule_entropy: True
normalize_value: True
normalize_input: False
max_epochs: 3000
env_config:
skip: 4
name: 'BreakoutNoFrameskip-v4'
episode_life: True
player:
render: True
games_num: 200
n_game_life: 5
determenistic: True | 1,598 | YAML | 18.26506 | 36 | 0.553191 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_pong.yaml | params:
seed: 322
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1.4142
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: PongNoFrameskip
score_to_win: 20.0
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: PongNoFrameskip-v4
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 24
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: none
#kl_threshold: 0.008
#schedule_entropy : True
normalize_value: True
normalize_input: False
max_epochs: 1500
player:
render: True
games_num: 100
n_game_life: 1
determenistic: True | 1,510 | YAML | 18.371795 | 39 | 0.550331 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/atari/ppo_pong_soft_aug.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1.41421356237
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 1.41421356237
config:
reward_shaper:
min_val: -1
max_val: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: PongNoFrameskip_soft_aug
score_to_win: 20
grad_norm: 10
entropy_coef: 0.01
truncate_grads: True
env_name: PongNoFrameskip-v4
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 24
horizon_length: 128
minibatch_size: 1536
mini_epochs: 4
critic_coef: 1
lr_schedule: none
#kl_threshold: 0.008
#schedule_entropy : True
normalize_input: False
max_epochs: 1500
features:
soft_augmentation:
aug_coef: 0.001
transform:
name: 'default'
player:
render: True
games_num: 100
n_game_life: 1
determenistic: True | 1,561 | YAML | 18.525 | 39 | 0.55221 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ma/ppo_slime_self_play.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/slime_pvp.pth'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [128,64]
activation: elu
initializer:
name: default
regularizer:
name: 'None'
config:
name: slime_pvp2
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: slime_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 512
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 500
self_play_config:
update_score: 1
games_to_check: 200
check_scores : False
env_config:
name: SlimeVolleyDiscrete-v0
#neg_scale: 1 #0.5
self_play: True
config_path: 'rl_games/configs/ma/ppo_slime_self_play.yaml'
player:
render: True
games_num: 200
n_game_life: 1
determenistic: True
device_name: 'cpu' | 1,294 | YAML | 18.328358 | 65 | 0.59119 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ma/ppo_connect4_self_play.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/connect4.pth'
network:
name: actor_critic
separate: False
normalization: batch_norm
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 128
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
config:
name: connect4_3
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: connect4_env
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 1000
use_action_masks: True
weight_decay: 0.001
self_play_config:
update_score: 0.1
games_to_check: 100
env_update_num: 8
env_config:
name: connect_four_v0
self_play: True
is_human: False
random_agent: False
config_path: 'rl_games/configs/ma/ppo_connect4_self_play.yaml' | 1,735 | YAML | 19.915662 | 68 | 0.563689 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ma/ppo_slime_v0.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn'
network:
name: actor_critic
separate: True
#normalization: layer_norm
space:
discrete:
mlp:
units: [128,64]
activation: elu
initializer:
name: default
regularizer:
name: 'None'
config:
name: slime
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 20
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: slime_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: False
ignore_dead_batches : False
env_config:
name: SlimeVolleyDiscrete-v0
player:
render: True
games_num: 200
n_game_life: 1
determenistic: True | 1,093 | YAML | 16.645161 | 34 | 0.590119 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ma/ppo_connect4_self_play_resnet.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: True
load_path: 'nn/connect4_rn.pth'
network:
name: connect4net
blocks: 5
config:
name: connect4_rn
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 2e-4
score_to_win: 100
grad_norm: 0.5
entropy_coef: 0.005
truncate_grads: True
env_name: connect4_env
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 4
horizon_length: 128
minibatch_size: 512
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
games_to_track: 1000
use_action_masks: True
weight_decay: 0.001
self_play_config:
update_score: 0.1
games_to_check: 100
env_update_num: 4
env_config:
name: connect_four_v0
self_play: True
is_human: True
random_agent: False
config_path: 'rl_games/configs/ma/ppo_connect4_self_play_resnet.yaml' | 1,052 | YAML | 19.25 | 75 | 0.613118 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/humanoid2.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: dm_humanoid
score_to_win: 10000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 4
horizon_length: 4096
minibatch_size: 4096
mini_epochs: 15
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.0
env_config:
name: Humanoid2Run-v0
flat_observation: True
| 1,305 | YAML | 18.492537 | 39 | 0.549425 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/ppo_dm_control.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
value_shape: 2
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.999
tau: 0.9
learning_rate: 1e-4
name: dm_control
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 2
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: True
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: AcrobotSwingup_sparse-v0
flat_observation: True
rnd_config:
scale_value: 4.0
exp_percent: 0.25
adv_coef: 0.5
gamma: 0.99
mini_epochs: 2
minibatch_size: 1024
learning_rate: 5e-4
network:
name: rnd_curiosity
mlp:
rnd:
units: [64,64,16]
net:
units: [16,16]
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,835 | YAML | 19.4 | 39 | 0.510627 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/walker_run.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: walker
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: WalkerRun-v0
flat_observation: True
| 1,297 | YAML | 18.373134 | 39 | 0.547417 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/cartpole.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 16]
activation: relu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: cartpole
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.0000
env_config:
name: CartpoleBalance-v0
flat_observation: True
| 1,301 | YAML | 18.432836 | 39 | 0.550346 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/humanoid_run_rnd.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
value_shape: 2
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: dm_humanoid
score_to_win: 10000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 1024
minibatch_size: 4096
mini_epochs: 15
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: True
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: HumanoidRun-v0
flat_observation: True
rnd_config:
scale_value: 1.0
gamma: 0.99
mini_epochs: 2
minibatch_size: 4096
learning_rate: 5e-4
exp_percent: 0.25
adv_coef: 0.5
network:
name: rnd_curiosity
mlp:
rnd:
units: [256,128,32]
net:
units: [128,32]
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None' | 1,830 | YAML | 19.573033 | 39 | 0.512022 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/humanoid_run.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: dm_humanoid
score_to_win: 10000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 1024
minibatch_size: 4096
mini_epochs: 15
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: HumanoidRun-v0
flat_observation: True
| 1,307 | YAML | 18.522388 | 39 | 0.550115 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/dm_control/humanoid_run_conv1d.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: False
cnn:
type: conv1d
activation: elu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 64
kernel_size: 3
strides: 1
padding: 1
- filters: 128
kernel_size: 2
strides: 1
padding: 0
mlp:
units: [128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: humanoid_conv
score_to_win: 15000
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: dm_control
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 1024
minibatch_size: 8192
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
frames: 4
name: Humanoid2Run-v0
flat_observation: True
| 1,829 | YAML | 18.677419 | 39 | 0.515582 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/minigrid/minigrid_rnn.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: False
#normalization: 'layer_norm'
space:
discrete:
cnn:
type: conv2d
activation: relu
initializer:
name: glorot_normal_initializer
gain: 1.4142
regularizer:
name: 'None'
convs:
- filters: 16
kernel_size: 8
strides: 4
padding: 0
- filters: 32
kernel_size: 4
strides: 2
padding: 0
mlp:
units: [128]
activation: relu
regularizer:
name: 'None'
initializer:
name: glorot_normal_initializer
gain: 1.4142
rnn:
name: 'lstm'
units: 128
layers: 1
before_mlp: True
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: minigrid_env_rnn
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: minigrid_env
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 16
weight_decay: 0.0000
env_config:
#action_bonus: True
#state_bonus : True
name: MiniGrid-MemoryS7-v0
fully_obs: False
player:
games_num: 100
render: True
determenistic: False
| 1,629 | YAML | 18.404762 | 39 | 0.553714 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/openai/ppo_gym_hand.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [400, 200, 100]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: HandBlockDenseXYZ
score_to_win: 10000
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
env_name: openai_robot_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 12
critic_coef: 2
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: True
seq_length: 4
bounds_loss_coef: 0.0001
max_epochs: 10000
env_config:
name: HandVMManipulateBlockRotateXYZDense-v0 | 1,327 | YAML | 19.75 | 52 | 0.565185 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/openai/ppo_gym_humanoid.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [400, 200, 100]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: Humanoid
score_to_win: 100080
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
env_name: openai_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 12
critic_coef: 2
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 4
bounds_loss_coef: 0.0001
max_epochs: 10000
env_config:
name: Humanoid-v3
| 1,288 | YAML | 18.830769 | 39 | 0.552019 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/openai/ppo_gym_ant.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: Hand_block
score_to_win: 100080
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
env_name: openai_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 2048
mini_epochs: 12
critic_coef: 2
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 4
bounds_loss_coef: 0.0001
max_epochs: 10000
env_config:
name: Ant-v3
| 1,284 | YAML | 18.76923 | 39 | 0.549844 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/brax/ppo_ant.yaml | params:
seed: 7
#devices: [0, 0]
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/Ant_brax.pth
config:
name: 'Ant_brax'
env_name: brax
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 1000
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 8
num_actors: 8192
minibatch_size: 32768
mini_epochs: 4
critic_coef: 2
clip_value: False
bounds_loss_coef: 0.0001
env_config:
env_name: 'ant'
| 1,326 | YAML | 17.178082 | 33 | 0.581448 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/brax/ppo_humanoid.yaml | params:
seed: 7
#devices: [0, 0]
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/Humanoid_brax.pth
config:
name: 'Humanoid_brax'
env_name: brax
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 2000
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.00
truncate_grads: True
e_clip: 0.2
horizon_length: 16
num_actors: 8192
minibatch_size: 32768
mini_epochs: 5
critic_coef: 2
clip_value: False
bounds_loss_coef: 0.0004
env_config:
env_name: 'humanoid'
| 1,344 | YAML | 17.424657 | 33 | 0.587054 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/brax/ppo_ur5e.yaml | params:
seed: 7
#devices: [0, 0]
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/Ur5e_brax.pth
config:
name: 'Ur5e_brax'
env_name: brax
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 2000
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.00
truncate_grads: True
e_clip: 0.2
horizon_length: 16
num_actors: 8192
minibatch_size: 32768
mini_epochs: 5
critic_coef: 2
clip_value: False
bounds_loss_coef: 0.0004
env_config:
env_name: 'ur5e'
| 1,332 | YAML | 17.260274 | 33 | 0.583333 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/brax/sac_ant.yaml | params:
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [256, 128, 64]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/Ant.pth
config:
name: 'Ant_brax'
env_name : brax
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 2000000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.005
actor_lr: 0.0005
critic_lr: 0.0005
critic_tau: 0.005
batch_size: 4096
learnable_temperature: true
num_seed_steps: 5
replay_buffer_size: 1000000
num_actors: 128
env_config:
env_name: 'ant' | 869 | YAML | 17.125 | 31 | 0.593786 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/brax/ppo_halfcheetah.yaml | params:
seed: 7
#devices: [0, 0]
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/Halfcheetah_brax.pth
config:
name: 'Halfcheetah_brax'
env_name: brax
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 2000
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.00
truncate_grads: True
e_clip: 0.2
horizon_length: 16
num_actors: 8192
minibatch_size: 32768
mini_epochs: 5
critic_coef: 2
clip_value: False
bounds_loss_coef: 0.0004
env_config:
env_name: 'halfcheetah'
| 1,353 | YAML | 17.547945 | 36 | 0.5898 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/brax/sac_humanoid.yaml | params:
algo:
name: sac
model:
name: soft_actor_critic
network:
name: soft_actor_critic
separate: True
space:
continuous:
mlp:
units: [512, 256]
activation: relu
initializer:
name: default
log_std_bounds: [-5, 2]
load_checkpoint: False
load_path: nn/Ant.pth
config:
name: 'humanoid_brax_sac'
env_name : brax
normalize_input: True
reward_shaper:
scale_value: 1
device: cuda
max_epochs: 2000000
num_steps_per_episode: 128
save_best_after: 100
save_frequency: 10000
gamma: 0.99
init_alpha: 1
alpha_lr: 0.0002
actor_lr: 0.0003
critic_lr: 0.0003
critic_tau: 0.005
batch_size: 2048
learnable_temperature: true
num_seed_steps: 2 # total steps: num_actors * num_steps_per_episode * num_seed_steps
replay_buffer_size: 1000000
num_actors: 64
env_config:
env_name: 'humanoid' | 941 | YAML | 18.224489 | 88 | 0.612115 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/brax/ppo_grasp.yaml | params:
seed: 7
#devices: [0, 0]
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/Grasp_brax.pth
config:
name: 'Grasp_brax'
env_name: brax
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 2000
save_best_after: 100
save_frequency: 50
grad_norm: 1.0
entropy_coef: 0.00
truncate_grads: True
e_clip: 0.2
horizon_length: 16
num_actors: 8192
minibatch_size: 32768
mini_epochs: 5
critic_coef: 2
clip_value: False
bounds_loss_coef: 0.0004
env_config:
env_name: 'grasp'
| 1,335 | YAML | 17.30137 | 33 | 0.58427 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/procgen/ppo_coinrun.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: resnet_actor_critic
separate: False
value_shape: 1
space:
discrete:
cnn:
conv_depths: [16, 32, 32]
activation: elu
initializer:
name: default
regularizer:
name: 'None'
mlp:
units: [512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
rnn1:
name: lstm
units: 256
layers: 1
config:
reward_shaper:
max_val: 10
normalize_advantage: True
gamma: 0.999
tau: 0.95
learning_rate: 1e-4
name: atari
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: 'openai_gym' #'openai_gym' #'PongNoFrameskip-v4' #
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 3
critic_coef: 1
lr_schedule: polynom_decay
kl_threshold: 0.01
normalize_input: False
seq_length: 4
max_epochs: 2000
env_config:
name: "procgen:procgen-coinrun-v0"
procgen: True
frames: 4
num_levels: 1000
start_level: 323
limit_steps: True
distribution_mode: 'easy'
| 1,372 | YAML | 18.069444 | 64 | 0.561953 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/distributed/hvd_wrapper.py | import torch
import horovod.torch as hvd
import os
class HorovodWrapper:
def __init__(self):
hvd.init()
self.rank = hvd.rank()
self.rank_size = hvd.size()
print('Starting horovod with rank: {0}, size: {1}'.format(self.rank, self.rank_size))
#self.device_name = 'cpu'
self.device_name = 'cuda:' + str(self.rank)
def update_algo_config(self, config):
config['device'] = self.device_name
if self.rank != 0:
config['print_stats'] = False
config['lr_schedule'] = None
return config
def setup_algo(self, algo):
hvd.broadcast_parameters(algo.model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(algo.optimizer, root_rank=0)
algo.optimizer = hvd.DistributedOptimizer(algo.optimizer, named_parameters=algo.model.named_parameters())
self.sync_stats(algo)
if algo.has_central_value:
hvd.broadcast_optimizer_state(algo.central_value_net.optimizer, root_rank=0)
hvd.broadcast_parameters(algo.central_value_net.state_dict(), root_rank=0)
algo.central_value_net.optimizer = hvd.DistributedOptimizer(algo.central_value_net.optimizer, named_parameters=algo.central_value_net.model.named_parameters())
def sync_stats(self, algo):
stats_dict = algo.get_stats_weights()
for k,v in stats_dict.items():
for in_k, in_v in v.items():
in_v.data = hvd.allreduce(in_v, name=k + in_k)
algo.curr_frames = hvd.allreduce(torch.tensor(algo.curr_frames), average=False).item()
def broadcast_value(self, val, name):
hvd.broadcast_parameters({name: val}, root_rank=0)
def is_root(self):
return self.rank == 0
def average_stats(self, stats_dict):
res_dict = {}
for k,v in stats_dict.items():
res_dict[k] = self.metric_average(v, k)
def average_value(self, val, name):
avg_tensor = hvd.allreduce(val, name=name)
return avg_tensor
| 2,030 | Python | 35.927272 | 171 | 0.625123 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/interval_summary_writer.py | import time
class IntervalSummaryWriter:
"""
Summary writer wrapper designed to reduce the size of tf.events files.
It will prevent the learner from writing the summaries more often than a specified interval, i.e. if the
current interval is 20 seconds and we wrote our last summary for a particular summary key at 01:00, all summaries
until 01:20 for that key will be ignored.
The interval is adaptive: it will approach 1/200th of the total training time, but no less than interval_sec_min
and no greater than interval_sec_max.
This was created to facilitate really big training runs, such as with Population-Based training, where summary
folders reached tens of gigabytes.
"""
def __init__(self, summary_writer, cfg):
self.experiment_start = time.time()
# prevents noisy summaries when experiments are restarted
self.defer_summaries_sec = cfg.get('defer_summaries_sec', 5)
self.interval_sec_min = cfg.get('summaries_interval_sec_min', 5)
self.interval_sec_max = cfg.get('summaries_interval_sec_max', 300)
self.last_interval = self.interval_sec_min
# interval between summaries will be close to this fraction of the total training time,
# i.e. for a run that lasted 200 minutes we write one summary every minute.
self.summaries_relative_step = 1.0 / 200
self.writer = summary_writer
self.last_write_for_tag = dict()
def _calc_interval(self):
"""Write summaries more often in the beginning of the run."""
if self.last_interval >= self.interval_sec_max:
return self.last_interval
seconds_since_start = time.time() - self.experiment_start
interval = seconds_since_start * self.summaries_relative_step
interval = min(interval, self.interval_sec_max)
interval = max(interval, self.interval_sec_min)
self.last_interval = interval
return interval
def add_scalar(self, tag, value, step, *args, **kwargs):
if step == 0:
# removes faulty summaries that appear after the experiment restart
# print('Skip summaries with step=0')
return
seconds_since_start = time.time() - self.experiment_start
if seconds_since_start < self.defer_summaries_sec:
return
last_write = self.last_write_for_tag.get(tag, 0)
seconds_since_last_write = time.time() - last_write
interval = self._calc_interval()
if seconds_since_last_write >= interval:
self.writer.add_scalar(tag, value, step, *args, **kwargs)
self.last_write_for_tag[tag] = time.time()
def __getattr__(self, attr):
return getattr(self.writer, attr) | 2,750 | Python | 40.681818 | 117 | 0.663273 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/a2c_common.py | import os
from rl_games.common import tr_helpers
from rl_games.common import vecenv
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.algos_torch.moving_mean_std import MovingMeanStd
from rl_games.algos_torch.self_play_manager import SelfPlayManager
from rl_games.algos_torch import torch_ext
from rl_games.common import schedulers
from rl_games.common.experience import ExperienceBuffer
from rl_games.common.interval_summary_writer import IntervalSummaryWriter
import numpy as np
import collections
import time
from collections import deque, OrderedDict
import gym
from datetime import datetime
from tensorboardX import SummaryWriter
import torch
from torch import nn
from time import sleep
def swap_and_flatten01(arr):
"""
swap and then flatten axes 0 and 1
"""
if arr is None:
return arr
s = arr.size()
return arr.transpose(0, 1).reshape(s[0] * s[1], *s[2:])
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class A2CBase:
def __init__(self, base_name, config):
pbt_str = ''
if config.get('population_based_training', False):
# in PBT, make sure experiment name contains a unique id of the policy within a population
pbt_str = f'_pbt_{config["pbt_idx"]:02d}'
# This helps in PBT when we need to restart an experiment with the exact same name, rather than
# generating a new name with the timestamp every time.
full_experiment_name = config.get('full_experiment_name', None)
if full_experiment_name:
print(f'Exact experiment name requested from command line: {full_experiment_name}')
self.experiment_name = full_experiment_name
else:
self.experiment_name = config['name'] + pbt_str + datetime.now().strftime("_%d-%H-%M-%S")
self.config = config
self.algo_observer = config['features']['observer']
self.algo_observer.before_init(base_name, config, self.experiment_name)
self.multi_gpu = config.get('multi_gpu', False)
self.rank = 0
self.rank_size = 1
if self.multi_gpu:
from rl_games.distributed.hvd_wrapper import HorovodWrapper
self.hvd = HorovodWrapper()
self.config = self.hvd.update_algo_config(config)
self.rank = self.hvd.rank
self.rank_size = self.hvd.rank_size
self.network_path = config.get('network_path', "./nn/")
self.log_path = config.get('log_path', "runs/")
self.env_config = config.get('env_config', {})
self.num_actors = config['num_actors']
self.env_name = config['env_name']
self.env_info = config.get('env_info')
if self.env_info is None:
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.env_info = self.vec_env.get_env_info()
self.ppo_device = config.get('device', 'cuda:0')
print('Env info:')
print(self.env_info)
self.value_size = self.env_info.get('value_size',1)
self.observation_space = self.env_info['observation_space']
self.weight_decay = config.get('weight_decay', 0.0)
self.use_action_masks = config.get('use_action_masks', False)
self.is_train = config.get('is_train', True)
self.central_value_config = self.config.get('central_value_config', None)
self.has_central_value = self.central_value_config is not None
self.truncate_grads = self.config.get('truncate_grads', False)
if self.has_central_value:
self.state_space = self.env_info.get('state_space', None)
if isinstance(self.state_space,gym.spaces.Dict):
self.state_shape = {}
for k,v in self.state_space.spaces.items():
self.state_shape[k] = v.shape
else:
self.state_shape = self.state_space.shape
self.self_play_config = self.config.get('self_play_config', None)
self.has_self_play_config = self.self_play_config is not None
self.self_play = config.get('self_play', False)
self.save_freq = config.get('save_frequency', 0)
self.save_best_after = config.get('save_best_after', 100)
self.print_stats = config.get('print_stats', True)
self.rnn_states = None
self.name = base_name
self.ppo = config['ppo']
self.max_epochs = self.config.get('max_epochs', 1e6)
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.linear_lr = config['lr_schedule'] == 'linear'
self.schedule_type = config.get('schedule_type', 'legacy')
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
self.scheduler = schedulers.AdaptiveScheduler(self.kl_threshold)
elif self.linear_lr:
self.scheduler = schedulers.LinearScheduler(float(config['learning_rate']),
max_steps=self.max_epochs,
apply_to_entropy=config.get('schedule_entropy', False),
start_entropy_coef=config.get('entropy_coef'))
else:
self.scheduler = schedulers.IdentityScheduler()
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_agents = self.env_info.get('agents', 1)
# self.horizon_length = config['horizon_length']
self.horizon_length = config['steps_num']
self.seq_len = self.config.get('seq_length', 4)
self.normalize_advantage = config['normalize_advantage']
self.normalize_input = self.config['normalize_input']
self.normalize_value = self.config.get('normalize_value', False)
self.truncate_grads = self.config.get('truncate_grads', False)
self.has_phasic_policy_gradients = False
if isinstance(self.observation_space,gym.spaces.Dict):
self.obs_shape = {}
for k,v in self.observation_space.spaces.items():
self.obs_shape[k] = v.shape
else:
self.obs_shape = self.observation_space.shape
self.critic_coef = config['critic_coef']
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.games_to_track = self.config.get('games_to_track', 100)
self.game_rewards = torch_ext.AverageMeter(self.value_size, self.games_to_track).to(self.ppo_device)
self.game_lengths = torch_ext.AverageMeter(1, self.games_to_track).to(self.ppo_device)
self.obs = None
self.games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.batch_size = self.horizon_length * self.num_actors * self.num_agents
self.batch_size_envs = self.horizon_length * self.num_actors
self.minibatch_size = self.config['minibatch_size']
self.mini_epochs_num = self.config['mini_epochs']
self.num_minibatches = self.batch_size // self.minibatch_size
assert(self.batch_size % self.minibatch_size == 0)
self.mixed_precision = self.config.get('mixed_precision', False)
self.scaler = torch.cuda.amp.GradScaler(enabled=self.mixed_precision)
self.last_lr = self.config['learning_rate']
self.frame = 0
self.update_time = 0
self.mean_rewards = self.last_mean_rewards = -100500
self.play_time = 0
self.epoch_num = 0
# allows us to specify a folder where all experiments will reside
self.train_dir = config.get('train_dir', 'train_dir')
# a folder inside of train_dir containing everything related to a particular experiment
# self.experiment_dir = os.path.join(self.train_dir, self.experiment_name)
self.experiment_dir = config.get('logdir', './')
# folders inside <train_dir>/<experiment_dir> for a specific purpose
self.nn_dir = os.path.join(self.experiment_dir, 'nn')
self.summaries_dir = os.path.join(self.experiment_dir, 'runs')
os.makedirs(self.train_dir, exist_ok=True)
os.makedirs(self.experiment_dir, exist_ok=True)
os.makedirs(self.nn_dir, exist_ok=True)
os.makedirs(self.summaries_dir, exist_ok=True)
self.entropy_coef = self.config['entropy_coef']
if self.rank == 0:
writer = SummaryWriter(self.summaries_dir)
self.writer = IntervalSummaryWriter(writer, self.config)
else:
self.writer = None
self.value_bootstrap = self.config.get('value_bootstrap')
if self.normalize_value:
self.value_mean_std = RunningMeanStd((1,)).to(self.ppo_device)
self.is_tensor_obses = False
self.last_rnn_indices = None
self.last_state_indices = None
#self_play
if self.has_self_play_config:
print('Initializing SelfPlay Manager')
self.self_play_manager = SelfPlayManager(self.self_play_config, self.writer)
# features
self.algo_observer = config['features']['observer']
self.soft_aug = config['features'].get('soft_augmentation', None)
self.has_soft_aug = self.soft_aug is not None
# soft augmentation not yet supported
assert not self.has_soft_aug
def write_stats(self, total_time, epoch_num, step_time, play_time, update_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul, frame, scaled_time, scaled_play_time, curr_frames):
# do we need scaled time?
self.writer.add_scalar('performance/step_inference_rl_update_fps', curr_frames / scaled_time, frame)
self.writer.add_scalar('performance/step_inference_fps', curr_frames / scaled_play_time, frame)
self.writer.add_scalar('performance/step_fps', curr_frames / step_time, frame)
self.writer.add_scalar('performance/rl_update_time', update_time, frame)
self.writer.add_scalar('performance/step_inference_time', play_time, frame)
self.writer.add_scalar('performance/step_time', step_time, frame)
self.writer.add_scalar('losses/a_loss', torch_ext.mean_list(a_losses).item(), frame)
self.writer.add_scalar('losses/c_loss', torch_ext.mean_list(c_losses).item(), frame)
self.writer.add_scalar('losses/entropy', torch_ext.mean_list(entropies).item(), frame)
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', torch_ext.mean_list(kls).item(), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
self.algo_observer.after_print_stats(frame, epoch_num, total_time)
def set_eval(self):
self.model.eval()
if self.normalize_input:
self.running_mean_std.eval()
if self.normalize_value:
self.value_mean_std.eval()
def set_train(self):
self.model.train()
if self.normalize_input:
self.running_mean_std.train()
if self.normalize_value:
self.value_mean_std.train()
def update_lr(self, lr):
if self.multi_gpu:
lr_tensor = torch.tensor([lr])
self.hvd.broadcast_value(lr_tensor, 'learning_rate')
lr = lr_tensor.item()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
#if self.has_central_value:
# self.central_value_net.update_lr(lr)
def get_action_values(self, obs):
processed_obs = self._preproc_obs(obs['obs'])
self.model.eval()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'rnn_states' : self.rnn_states
}
with torch.no_grad():
res_dict = self.model(input_dict)
if self.has_central_value:
states = obs['states']
input_dict = {
'is_train': False,
'states' : states,
#'actions' : res_dict['action'],
#'rnn_states' : self.rnn_states
}
value = self.get_central_value(input_dict)
res_dict['values'] = value
if self.normalize_value:
res_dict['values'] = self.value_mean_std(res_dict['values'], True)
return res_dict
def get_values(self, obs):
with torch.no_grad():
if self.has_central_value:
states = obs['states']
self.central_value_net.eval()
input_dict = {
'is_train': False,
'states' : states,
'actions' : None,
'is_done': self.dones,
}
value = self.get_central_value(input_dict)
else:
self.model.eval()
processed_obs = self._preproc_obs(obs['obs'])
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'rnn_states' : self.rnn_states
}
result = self.model(input_dict)
value = result['values']
if self.normalize_value:
value = self.value_mean_std(value, True)
return value
@property
def device(self):
return self.ppo_device
def reset_envs(self):
self.obs = self.env_reset()
def init_tensors(self):
batch_size = self.num_agents * self.num_actors
algo_info = {
'num_actors' : self.num_actors,
'horizon_length' : self.horizon_length,
'has_central_value' : self.has_central_value,
'use_action_masks' : self.use_action_masks
}
self.experience_buffer = ExperienceBuffer(self.env_info, algo_info, self.ppo_device)
val_shape = (self.horizon_length, batch_size, self.value_size)
current_rewards_shape = (batch_size, self.value_size)
self.current_rewards = torch.zeros(current_rewards_shape, dtype=torch.float32, device=self.ppo_device)
self.current_lengths = torch.zeros(batch_size, dtype=torch.float32, device=self.ppo_device)
self.dones = torch.ones((batch_size,), dtype=torch.uint8, device=self.ppo_device)
if self.is_rnn:
self.rnn_states = self.model.get_default_rnn_state()
self.rnn_states = [s.to(self.ppo_device) for s in self.rnn_states]
batch_size = self.num_agents * self.num_actors
num_seqs = self.horizon_length * batch_size // self.seq_len
assert((self.horizon_length * batch_size // self.num_minibatches) % self.seq_len == 0)
self.mb_rnn_states = [torch.zeros((s.size()[0], num_seqs, s.size()[2]), dtype = torch.float32, device=self.ppo_device) for s in self.rnn_states]
def init_rnn_from_model(self, model):
self.is_rnn = self.model.is_rnn()
def init_rnn_step(self, batch_size, mb_rnn_states):
mb_rnn_states = self.mb_rnn_states
mb_rnn_masks = torch.zeros(self.horizon_length*batch_size, dtype = torch.float32, device=self.ppo_device)
steps_mask = torch.arange(0, batch_size * self.horizon_length, self.horizon_length, dtype=torch.long, device=self.ppo_device)
play_mask = torch.arange(0, batch_size, 1, dtype=torch.long, device=self.ppo_device)
steps_state = torch.arange(0, batch_size * self.horizon_length//self.seq_len, self.horizon_length//self.seq_len, dtype=torch.long, device=self.ppo_device)
indices = torch.zeros((batch_size), dtype = torch.long, device=self.ppo_device)
return mb_rnn_masks, indices, steps_mask, steps_state, play_mask, mb_rnn_states
def process_rnn_indices(self, mb_rnn_masks, indices, steps_mask, steps_state, mb_rnn_states):
seq_indices = None
if indices.max().item() >= self.horizon_length:
return seq_indices, True
mb_rnn_masks[indices + steps_mask] = 1
seq_indices = indices % self.seq_len
state_indices = (seq_indices == 0).nonzero(as_tuple=False)
state_pos = indices // self.seq_len
rnn_indices = state_pos[state_indices] + steps_state[state_indices]
for s, mb_s in zip(self.rnn_states, mb_rnn_states):
mb_s[:, rnn_indices, :] = s[:, state_indices, :]
self.last_rnn_indices = rnn_indices
self.last_state_indices = state_indices
return seq_indices, False
def process_rnn_dones(self, all_done_indices, indices, seq_indices):
if len(all_done_indices) > 0:
shifts = self.seq_len - 1 - seq_indices[all_done_indices]
indices[all_done_indices] += shifts
for s in self.rnn_states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
indices += 1
def cast_obs(self, obs):
if isinstance(obs, torch.Tensor):
self.is_tensor_obses = True
elif isinstance(obs, np.ndarray):
assert(self.observation_space.dtype != np.int8)
if self.observation_space.dtype == np.uint8:
obs = torch.ByteTensor(obs).to(self.ppo_device)
else:
obs = torch.FloatTensor(obs).to(self.ppo_device)
return obs
def obs_to_tensors(self, obs):
obs_is_dict = isinstance(obs, dict)
if obs_is_dict:
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value)
else:
upd_obs = self.cast_obs(obs)
if not obs_is_dict or 'obs' not in obs:
upd_obs = {'obs' : upd_obs}
return upd_obs
def _obs_to_tensors_internal(self, obs):
if isinstance(obs, dict):
upd_obs = {}
for key, value in obs.items():
upd_obs[key] = self._obs_to_tensors_internal(value)
else:
upd_obs = self.cast_obs(obs)
return upd_obs
def preprocess_actions(self, actions):
if not self.is_tensor_obses:
actions = actions.cpu().numpy()
return actions
def env_step(self, actions):
actions = self.preprocess_actions(actions)
obs, rewards, dones, infos = self.vec_env.step(actions)
if self.is_tensor_obses:
if self.value_size == 1:
rewards = rewards.unsqueeze(1)
return self.obs_to_tensors(obs), rewards.to(self.ppo_device), dones.to(self.ppo_device), infos
else:
if self.value_size == 1:
rewards = np.expand_dims(rewards, axis=1)
return self.obs_to_tensors(obs), torch.from_numpy(rewards).to(self.ppo_device).float(), torch.from_numpy(dones).to(self.ppo_device), infos
def env_reset(self):
obs = self.vec_env.reset()
obs = self.obs_to_tensors(obs)
return obs
def discount_values(self, fdones, last_extrinsic_values, mb_fdones, mb_extrinsic_values, mb_rewards):
lastgaelam = 0
mb_advs = torch.zeros_like(mb_rewards)
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - fdones
nextvalues = last_extrinsic_values
else:
nextnonterminal = 1.0 - mb_fdones[t+1]
nextvalues = mb_extrinsic_values[t+1]
nextnonterminal = nextnonterminal.unsqueeze(1)
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_extrinsic_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
return mb_advs
def discount_values_masks(self, fdones, last_extrinsic_values, mb_fdones, mb_extrinsic_values, mb_rewards, mb_masks):
lastgaelam = 0
mb_advs = torch.zeros_like(mb_rewards)
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - fdones
nextvalues = last_extrinsic_values
else:
nextnonterminal = 1.0 - mb_fdones[t+1]
nextvalues = mb_extrinsic_values[t+1]
nextnonterminal = nextnonterminal.unsqueeze(1)
masks_t = mb_masks[t].unsqueeze(1)
delta = (mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_extrinsic_values[t])
mb_advs[t] = lastgaelam = (delta + self.gamma * self.tau * nextnonterminal * lastgaelam) * masks_t
return mb_advs
def clear_stats(self):
batch_size = self.num_agents * self.num_actors
self.game_rewards.clear()
self.game_lengths.clear()
self.mean_rewards = self.last_mean_rewards = -100500
self.algo_observer.after_clear_stats()
def update_epoch(self):
pass
def train(self):
pass
def prepare_dataset(self, batch_dict):
pass
def train_epoch(self):
self.vec_env.set_train_info(self.frame)
def train_actor_critic(self, obs_dict, opt_step=True):
pass
def calc_gradients(self):
pass
def get_central_value(self, obs_dict):
return self.central_value_net.get_value(obs_dict)
def train_central_value(self):
return self.central_value_net.train_net()
def get_full_state_weights(self):
state = self.get_weights()
state['epoch'] = self.epoch_num
state['optimizer'] = self.optimizer.state_dict()
if self.has_central_value:
state['assymetric_vf_nets'] = self.central_value_net.state_dict()
state['frame'] = self.frame
# This is actually the best reward ever achieved. last_mean_rewards is perhaps not the best variable name
# We save it to the checkpoint to prevent overriding the "best ever" checkpoint upon experiment restart
state['last_mean_rewards'] = self.last_mean_rewards
env_state = self.vec_env.get_env_state()
state['env_state'] = env_state
return state
def set_full_state_weights(self, weights):
self.set_weights(weights)
self.epoch_num = weights['epoch']
if self.has_central_value:
self.central_value_net.load_state_dict(weights['assymetric_vf_nets'])
self.optimizer.load_state_dict(weights['optimizer'])
self.frame = weights.get('frame', 0)
self.last_mean_rewards = weights.get('last_mean_rewards', -100500)
env_state = weights.get('env_state', None)
self.vec_env.set_env_state(env_state)
def get_weights(self):
state = self.get_stats_weights()
state['model'] = self.model.state_dict()
return state
def get_stats_weights(self):
state = {}
if self.normalize_input:
state['running_mean_std'] = self.running_mean_std.state_dict()
if self.normalize_value:
state['reward_mean_std'] = self.value_mean_std.state_dict()
if self.has_central_value:
state['assymetric_vf_mean_std'] = self.central_value_net.get_stats_weights()
if self.mixed_precision:
state['scaler'] = self.scaler.state_dict()
return state
def set_stats_weights(self, weights):
if self.normalize_input:
self.running_mean_std.load_state_dict(weights['running_mean_std'])
if self.normalize_value:
self.value_mean_std.load_state_dict(weights['reward_mean_std'])
if self.has_central_value:
self.central_value_net.set_stats_weights(weights['assymetric_vf_mean_std'])
if self.mixed_precision and 'scaler' in weights:
self.scaler.load_state_dict(weights['scaler'])
def set_weights(self, weights):
self.model.load_state_dict(weights['model'])
self.set_stats_weights(weights)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
for k,v in obs_batch.items():
obs_batch[k] = self._preproc_obs(v)
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
if self.normalize_input:
obs_batch = self.running_mean_std(obs_batch)
return obs_batch
def play_steps(self):
epinfos = []
update_list = self.update_list
step_time = 0.0
for n in range(self.horizon_length):
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
res_dict = self.get_masked_action_values(self.obs, masks)
else:
res_dict = self.get_action_values(self.obs)
self.experience_buffer.update_data('obses', n, self.obs['obs'])
self.experience_buffer.update_data('dones', n, self.dones)
for k in update_list:
self.experience_buffer.update_data(k, n, res_dict[k])
if self.has_central_value:
self.experience_buffer.update_data('states', n, self.obs['states'])
step_time_start = time.time()
self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions'])
step_time_end = time.time()
step_time += (step_time_end - step_time_start)
shaped_rewards = self.rewards_shaper(rewards)
if self.value_bootstrap and 'time_outs' in infos:
shaped_rewards += self.gamma * res_dict['values'] * self.cast_obs(infos['time_outs']).unsqueeze(1).float()
self.experience_buffer.update_data('rewards', n, shaped_rewards)
self.current_rewards += rewards
self.current_lengths += 1
all_done_indices = self.dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
self.algo_observer.process_infos(infos, done_indices)
not_dones = 1.0 - self.dones.float()
self.current_rewards = self.current_rewards * not_dones.unsqueeze(1)
self.current_lengths = self.current_lengths * not_dones
last_values = self.get_values(self.obs)
fdones = self.dones.float()
mb_fdones = self.experience_buffer.tensor_dict['dones'].float()
mb_values = self.experience_buffer.tensor_dict['values']
mb_rewards = self.experience_buffer.tensor_dict['rewards']
mb_advs = self.discount_values(fdones, last_values, mb_fdones, mb_values, mb_rewards)
mb_returns = mb_advs + mb_values
batch_dict = self.experience_buffer.get_transformed_list(swap_and_flatten01, self.tensor_list)
batch_dict['returns'] = swap_and_flatten01(mb_returns)
batch_dict['played_frames'] = self.batch_size
batch_dict['step_time'] = step_time
return batch_dict
def play_steps_rnn(self):
mb_rnn_states = []
epinfos = []
self.experience_buffer.tensor_dict['values'].fill_(0)
self.experience_buffer.tensor_dict['rewards'].fill_(0)
self.experience_buffer.tensor_dict['dones'].fill_(1)
step_time = 0.0
update_list = self.update_list
batch_size = self.num_agents * self.num_actors
mb_rnn_masks = None
mb_rnn_masks, indices, steps_mask, steps_state, play_mask, mb_rnn_states = self.init_rnn_step(batch_size, mb_rnn_states)
for n in range(self.horizon_length):
seq_indices, full_tensor = self.process_rnn_indices(mb_rnn_masks, indices, steps_mask, steps_state, mb_rnn_states)
if full_tensor:
break
if self.has_central_value:
self.central_value_net.pre_step_rnn(self.last_rnn_indices, self.last_state_indices)
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
res_dict = self.get_masked_action_values(self.obs, masks)
else:
res_dict = self.get_action_values(self.obs)
self.rnn_states = res_dict['rnn_states']
self.experience_buffer.update_data_rnn('obses', indices, play_mask, self.obs['obs'])
self.experience_buffer.update_data_rnn('dones', indices, play_mask, self.dones.byte())
for k in update_list:
self.experience_buffer.update_data_rnn(k, indices, play_mask, res_dict[k])
if self.has_central_value:
self.experience_buffer.update_data_rnn('states', indices[::self.num_agents] ,play_mask[::self.num_agents]//self.num_agents, self.obs['states'])
step_time_start = time.time()
self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions'])
step_time_end = time.time()
step_time += (step_time_end - step_time_start)
shaped_rewards = self.rewards_shaper(rewards)
if self.value_bootstrap and 'time_outs' in infos:
shaped_rewards += self.gamma * res_dict['values'] * self.cast_obs(infos['time_outs']).unsqueeze(1).float()
self.experience_buffer.update_data_rnn('rewards', indices, play_mask, shaped_rewards)
self.current_rewards += rewards
self.current_lengths += 1
all_done_indices = self.dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.process_rnn_dones(all_done_indices, indices, seq_indices)
if self.has_central_value:
self.central_value_net.post_step_rnn(all_done_indices)
self.algo_observer.process_infos(infos, done_indices)
fdones = self.dones.float()
not_dones = 1.0 - self.dones.float()
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
self.current_rewards = self.current_rewards * not_dones.unsqueeze(1)
self.current_lengths = self.current_lengths * not_dones
last_values = self.get_values(self.obs)
fdones = self.dones.float()
mb_fdones = self.experience_buffer.tensor_dict['dones'].float()
mb_values = self.experience_buffer.tensor_dict['values']
mb_rewards = self.experience_buffer.tensor_dict['rewards']
non_finished = (indices != self.horizon_length).nonzero(as_tuple=False)
ind_to_fill = indices[non_finished]
mb_fdones[ind_to_fill,non_finished] = fdones[non_finished]
mb_values[ind_to_fill,non_finished] = last_values[non_finished]
fdones[non_finished] = 1.0
last_values[non_finished] = 0
mb_advs = self.discount_values_masks(fdones, last_values, mb_fdones, mb_values, mb_rewards, mb_rnn_masks.view(-1,self.horizon_length).transpose(0,1))
mb_returns = mb_advs + mb_values
batch_dict = self.experience_buffer.get_transformed_list(swap_and_flatten01, self.tensor_list)
batch_dict['returns'] = swap_and_flatten01(mb_returns)
batch_dict['rnn_states'] = mb_rnn_states
batch_dict['rnn_masks'] = mb_rnn_masks
batch_dict['played_frames'] = n * self.num_actors * self.num_agents
batch_dict['step_time'] = step_time
return batch_dict
class DiscreteA2CBase(A2CBase):
def __init__(self, base_name, config):
A2CBase.__init__(self, base_name, config)
batch_size = self.num_agents * self.num_actors
action_space = self.env_info['action_space']
if type(action_space) is gym.spaces.Discrete:
self.actions_shape = (self.horizon_length, batch_size)
self.actions_num = action_space.n
self.is_multi_discrete = False
if type(action_space) is gym.spaces.Tuple:
self.actions_shape = (self.horizon_length, batch_size, len(action_space))
self.actions_num = [action.n for action in action_space]
self.is_multi_discrete = True
self.is_discrete = True
def init_tensors(self):
A2CBase.init_tensors(self)
self.update_list = ['actions', 'neglogpacs', 'values']
if self.use_action_masks:
self.update_list += ['action_masks']
self.tensor_list = self.update_list + ['obses', 'states', 'dones']
def train_epoch(self):
super().train_epoch()
self.set_eval()
play_time_start = time.time()
with torch.no_grad():
if self.is_rnn:
batch_dict = self.play_steps_rnn()
else:
batch_dict = self.play_steps()
self.set_train()
play_time_end = time.time()
update_time_start = time.time()
rnn_masks = batch_dict.get('rnn_masks', None)
self.curr_frames = batch_dict.pop('played_frames')
self.prepare_dataset(batch_dict)
self.algo_observer.after_steps()
a_losses = []
c_losses = []
entropies = []
kls = []
if self.has_central_value:
self.train_central_value()
if self.is_rnn:
print('non masked rnn obs ratio: ', rnn_masks.sum().item() / (rnn_masks.nelement()))
for _ in range(0, self.mini_epochs_num):
ep_kls = []
for i in range(len(self.dataset)):
a_loss, c_loss, entropy, kl, last_lr, lr_mul = self.train_actor_critic(self.dataset[i])
a_losses.append(a_loss)
c_losses.append(c_loss)
ep_kls.append(kl)
entropies.append(entropy)
av_kls = torch_ext.mean_list(ep_kls)
if self.multi_gpu:
av_kls = self.hvd.average_value(av_kls, 'ep_kls')
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item())
self.update_lr(self.last_lr)
kls.append(av_kls)
if self.has_phasic_policy_gradients:
self.ppg_aux_loss.train_net(self)
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return batch_dict['step_time'], play_time, update_time, total_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul
def prepare_dataset(self, batch_dict):
rnn_masks = batch_dict.get('rnn_masks', None)
obses = batch_dict['obses']
returns = batch_dict['returns']
values = batch_dict['values']
actions = batch_dict['actions']
neglogpacs = batch_dict['neglogpacs']
rnn_states = batch_dict.get('rnn_states', None)
advantages = returns - values
if self.normalize_value:
values = self.value_mean_std(values)
returns = self.value_mean_std(returns)
advantages = torch.sum(advantages, axis=1)
if self.normalize_advantage:
if self.is_rnn:
advantages = torch_ext.normalization_with_masks(advantages, rnn_masks)
else:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
dataset_dict = {}
dataset_dict['old_values'] = values
dataset_dict['old_logp_actions'] = neglogpacs
dataset_dict['advantages'] = advantages
dataset_dict['returns'] = returns
dataset_dict['actions'] = actions
dataset_dict['obs'] = obses
dataset_dict['rnn_states'] = rnn_states
dataset_dict['rnn_masks'] = rnn_masks
if self.use_action_masks:
dataset_dict['action_masks'] = batch_dict['action_masks']
self.dataset.update_values_dict(dataset_dict)
if self.has_central_value:
dataset_dict = {}
dataset_dict['old_values'] = values
dataset_dict['advantages'] = advantages
dataset_dict['returns'] = returns
dataset_dict['actions'] = actions
dataset_dict['obs'] = batch_dict['states']
dataset_dict['rnn_masks'] = rnn_masks
self.central_value_net.update_dataset(dataset_dict)
def train(self):
self.init_tensors()
self.mean_rewards = self.last_mean_rewards = -100500
start_time = time.time()
total_time = 0
rep_count = 0
# self.frame = 0 # loading from checkpoint
self.obs = self.env_reset()
if self.multi_gpu:
self.hvd.setup_algo(self)
while True:
epoch_num = self.update_epoch()
step_time, play_time, update_time, sum_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul = self.train_epoch()
# cleaning memory to optimize space
self.dataset.update_values_dict(None)
if self.multi_gpu:
self.hvd.sync_stats(self)
total_time += sum_time
self.frame += curr_frames
total_time += sum_time
if self.rank == 0:
scaled_time = sum_time #self.num_agents * sum_time
scaled_play_time = play_time #self.num_agents * play_time
curr_frames = self.curr_frames
frame = self.frame
self.write_stats(total_time, epoch_num, step_time, play_time, update_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul, frame, scaled_time, scaled_play_time, curr_frames)
if self.has_soft_aug:
self.writer.add_scalar('losses/aug_loss', np.mean(aug_losses), frame)
self.algo_observer.after_print_stats(frame, epoch_num, total_time)
if self.game_rewards.current_size > 0:
mean_rewards = self.game_rewards.get_mean()
mean_lengths = self.game_lengths.get_mean()
self.mean_rewards = mean_rewards[0]
for i in range(self.value_size):
rewards_name = 'rewards' if i == 0 else 'rewards{0}'.format(i)
self.writer.add_scalar(rewards_name + '/step'.format(i), mean_rewards[i], frame)
self.writer.add_scalar(rewards_name + '/iter'.format(i), mean_rewards[i], epoch_num)
self.writer.add_scalar(rewards_name + '/time'.format(i), mean_rewards[i], total_time)
self.writer.add_scalar('episode_lengths/step', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if self.has_self_play_config:
self.self_play_manager.update(self)
# removed equal signs (i.e. "rew=") from the checkpoint name since it messes with hydra CLI parsing
checkpoint_name = self.config['name'] + 'ep' + str(epoch_num) + 'rew' + str(mean_rewards)
if self.save_freq > 0:
if (epoch_num % self.save_freq == 0) and (mean_rewards <= self.last_mean_rewards):
self.save(os.path.join(self.nn_dir, 'last_' + checkpoint_name))
if mean_rewards[0] > self.last_mean_rewards and epoch_num >= self.save_best_after:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards[0]
self.save(os.path.join(self.nn_dir, self.config['name']))
if self.last_mean_rewards > self.config['score_to_win']:
print('Network won!')
self.save(os.path.join(self.nn_dir, checkpoint_name))
return self.last_mean_rewards, epoch_num
if epoch_num > self.max_epochs:
self.save(os.path.join(self.nn_dir, 'last_' + checkpoint_name))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, epoch_num
update_time = 0
if self.print_stats:
fps_step = curr_frames / step_time
fps_step_inference = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps step and policy inference: {fps_step_inference:.1f} fps total: {fps_total:.1f}')
class ContinuousA2CBase(A2CBase):
def __init__(self, base_name, config):
A2CBase.__init__(self, base_name, config)
self.is_discrete = False
action_space = self.env_info['action_space']
self.actions_num = action_space.shape[0]
self.bounds_loss_coef = config.get('bounds_loss_coef', None)
# todo introduce device instead of cuda()
self.actions_low = torch.from_numpy(action_space.low.copy()).float().to(self.ppo_device)
self.actions_high = torch.from_numpy(action_space.high.copy()).float().to(self.ppo_device)
def preprocess_actions(self, actions):
clamped_actions = torch.clamp(actions, -1.0, 1.0)
rescaled_actions = rescale_actions(self.actions_low, self.actions_high, clamped_actions)
if not self.is_tensor_obses:
rescaled_actions = rescaled_actions.cpu().numpy()
return rescaled_actions
def init_tensors(self):
A2CBase.init_tensors(self)
self.update_list = ['actions', 'neglogpacs', 'values', 'mus', 'sigmas']
self.tensor_list = self.update_list + ['obses', 'states', 'dones']
def train_epoch(self):
super().train_epoch()
self.set_eval()
play_time_start = time.time()
with torch.no_grad():
if self.is_rnn:
batch_dict = self.play_steps_rnn()
else:
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
rnn_masks = batch_dict.get('rnn_masks', None)
self.set_train()
self.curr_frames = batch_dict.pop('played_frames')
self.prepare_dataset(batch_dict)
self.algo_observer.after_steps()
if self.has_central_value:
self.train_central_value()
a_losses = []
c_losses = []
b_losses = []
entropies = []
kls = []
if self.is_rnn:
frames_mask_ratio = rnn_masks.sum().item() / (rnn_masks.nelement())
print(frames_mask_ratio)
for _ in range(0, self.mini_epochs_num):
ep_kls = []
for i in range(len(self.dataset)):
a_loss, c_loss, entropy, kl, last_lr, lr_mul, cmu, csigma, b_loss = self.train_actor_critic(self.dataset[i])
a_losses.append(a_loss)
c_losses.append(c_loss)
ep_kls.append(kl)
entropies.append(entropy)
if self.bounds_loss_coef is not None:
b_losses.append(b_loss)
self.dataset.update_mu_sigma(cmu, csigma)
if self.schedule_type == 'legacy':
if self.multi_gpu:
kl = self.hvd.average_value(kl, 'ep_kls')
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0,kl.item())
self.update_lr(self.last_lr)
av_kls = torch_ext.mean_list(ep_kls)
if self.schedule_type == 'standard':
if self.multi_gpu:
av_kls = self.hvd.average_value(av_kls, 'ep_kls')
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0,av_kls.item())
self.update_lr(self.last_lr)
kls.append(av_kls)
if self.schedule_type == 'standard_epoch':
if self.multi_gpu:
av_kls = self.hvd.average_value(torch_ext.mean_list(kls), 'ep_kls')
self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0,av_kls.item())
self.update_lr(self.last_lr)
if self.has_phasic_policy_gradients:
self.ppg_aux_loss.train_net(self)
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return batch_dict['step_time'], play_time, update_time, total_time, a_losses, c_losses, b_losses, entropies, kls, last_lr, lr_mul
def prepare_dataset(self, batch_dict):
obses = batch_dict['obses']
returns = batch_dict['returns']
dones = batch_dict['dones']
values = batch_dict['values']
actions = batch_dict['actions']
neglogpacs = batch_dict['neglogpacs']
mus = batch_dict['mus']
sigmas = batch_dict['sigmas']
rnn_states = batch_dict.get('rnn_states', None)
rnn_masks = batch_dict.get('rnn_masks', None)
advantages = returns - values
if self.normalize_value:
values = self.value_mean_std(values)
returns = self.value_mean_std(returns)
advantages = torch.sum(advantages, axis=1)
if self.normalize_advantage:
if self.is_rnn:
advantages = torch_ext.normalization_with_masks(advantages, rnn_masks)
else:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
dataset_dict = {}
dataset_dict['old_values'] = values
dataset_dict['old_logp_actions'] = neglogpacs
dataset_dict['advantages'] = advantages
dataset_dict['returns'] = returns
dataset_dict['actions'] = actions
dataset_dict['obs'] = obses
dataset_dict['rnn_states'] = rnn_states
dataset_dict['rnn_masks'] = rnn_masks
dataset_dict['mu'] = mus
dataset_dict['sigma'] = sigmas
self.dataset.update_values_dict(dataset_dict)
if self.has_central_value:
dataset_dict = {}
dataset_dict['old_values'] = values
dataset_dict['advantages'] = advantages
dataset_dict['returns'] = returns
dataset_dict['actions'] = actions
dataset_dict['obs'] = batch_dict['states']
dataset_dict['rnn_masks'] = rnn_masks
self.central_value_net.update_dataset(dataset_dict)
def train(self):
self.init_tensors()
self.last_mean_rewards = -100500
start_time = time.time()
total_time = 0
rep_count = 0
self.obs = self.env_reset()
self.curr_frames = self.batch_size_envs
if self.multi_gpu:
self.hvd.setup_algo(self)
while True:
epoch_num = self.update_epoch()
step_time, play_time, update_time, sum_time, a_losses, c_losses, b_losses, entropies, kls, last_lr, lr_mul = self.train_epoch()
total_time += sum_time
frame = self.frame
# cleaning memory to optimize space
self.dataset.update_values_dict(None)
if self.multi_gpu:
self.hvd.sync_stats(self)
if self.rank == 0:
# do we need scaled_time?
scaled_time = sum_time #self.num_agents * sum_time
scaled_play_time = play_time #self.num_agents * play_time
curr_frames = self.curr_frames
self.frame += curr_frames
self.write_stats(total_time, epoch_num, step_time, play_time, update_time, a_losses, c_losses, entropies, kls, last_lr, lr_mul, frame, scaled_time, scaled_play_time, curr_frames)
if len(b_losses) > 0:
self.writer.add_scalar('losses/bounds_loss', torch_ext.mean_list(b_losses).item(), frame)
if self.has_soft_aug:
self.writer.add_scalar('losses/aug_loss', np.mean(aug_losses), frame)
mean_rewards = [0]
mean_lengths = 0
if self.game_rewards.current_size > 0:
mean_rewards = self.game_rewards.get_mean()
mean_lengths = self.game_lengths.get_mean()
self.mean_rewards = mean_rewards[0]
for i in range(self.value_size):
rewards_name = 'rewards' if i == 0 else 'rewards{0}'.format(i)
self.writer.add_scalar(rewards_name + '/step'.format(i), mean_rewards[i], frame)
self.writer.add_scalar(rewards_name + '/iter'.format(i), mean_rewards[i], epoch_num)
self.writer.add_scalar(rewards_name + '/time'.format(i), mean_rewards[i], total_time)
self.writer.add_scalar('episode_lengths/step', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if self.has_self_play_config:
self.self_play_manager.update(self)
checkpoint_name = self.config['name'] + 'ep' + str(epoch_num) + 'rew' + str(mean_rewards)
if self.save_freq > 0:
if (epoch_num % self.save_freq == 0) and (mean_rewards[0] <= self.last_mean_rewards):
self.save(os.path.join(self.nn_dir, 'last_' + checkpoint_name))
if mean_rewards[0] > self.last_mean_rewards and epoch_num >= self.save_best_after:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards[0]
self.save(os.path.join(self.nn_dir, self.config['name']))
if self.last_mean_rewards > self.config['score_to_win']:
print('Network won!')
self.save(os.path.join(self.nn_dir, checkpoint_name))
return self.last_mean_rewards, epoch_num
if epoch_num > self.max_epochs:
self.save(os.path.join(self.nn_dir, 'last_' + self.config['name'] + 'ep' + str(epoch_num) + 'rew' + str(mean_rewards)))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, epoch_num
update_time = 0
if self.print_stats:
fps_step = curr_frames / step_time
fps_step_inference = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
# print(f'fps step: {fps_step:.1f} fps step and policy inference: {fps_step_inference:.1f} fps total: {fps_total:.1f} mean reward: {mean_rewards[0]:.2f} mean lengths: {mean_lengths:.1f}')
print(f'epoch: {epoch_num} fps step: {fps_step:.1f} fps total: {fps_total:.1f} mean reward: {mean_rewards[0]:.2f} mean lengths: {mean_lengths:.1f}')
| 51,540 | Python | 41.915071 | 208 | 0.585642 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/experiment.py | import copy
import yaml
class Experiment:
def __init__(self, config, experiment_config):
self.config = copy.deepcopy(config)
self.best_config = copy.deepcopy(self.config)
self.experiment_config = experiment_config
self.best_results = -100500, 0
self.use_best_prev_result = self.experiment_config.get('use_best_prev_result', True)
self.experiments = self.experiment_config['experiments']
self.last_exp_idx = self.experiment_config.get('start_exp', 0)
self.sub_idx = self.experiment_config.get('start_sub_exp', 0)
self.done = False
self.results = {}
self.create_config()
def _set_parameter(self, config, path, value):
keys = path.split('.')
sub_conf = config
for key in keys[:-1]:
sub_conf = sub_conf[key]
print('set:' + str(keys) + ':' + str(value))
sub_conf[keys[-1]] = value
def set_results(self, rewards, epochs):
self.results[(self.last_exp_idx, self.sub_idx)] = rewards, epochs
if self.best_results[0] < rewards:
self.best_results = rewards, epochs
def create_config(self):
if self.done:
self.current_config = None
return
self.current_config = copy.deepcopy(self.config)
self.current_config['config']['name'] += '_' + str(self.last_exp_idx) + '_' + str(self.sub_idx)
print('Experiment name: ' + self.current_config['config']['name'])
for key in self.experiments[self.last_exp_idx]['exp']:
self._set_parameter(self.current_config, key['path'], key['value'][self.sub_idx])
with open('data.yml', 'w') as outfile:
yaml.dump(self.current_config, outfile, default_flow_style=False)
def get_next_config(self):
config = self.current_config
max_vals = len(self.experiments[0]['exp'][0]['value'])
self.sub_idx += 1
if self.sub_idx >= max_vals:
self.sub_idx = 0
self.last_exp_idx += 1
if self.last_exp_idx >= len(self.experiments):
self.done = True
else:
self.last_exp_idx += 1
self.create_config()
return config
#def __iter__(self):
# print('__iter__')
# return self
def __next__(self):
print('__next__')
res = self.get_next_config()
if res is not None:
yield res
| 2,457 | Python | 33.619718 | 103 | 0.564103 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/schedulers.py |
class RLScheduler:
def __init__(self):
pass
def update(self,current_lr, entropy_coef, epoch, frames, **kwargs):
pass
class IdentityScheduler(RLScheduler):
def __init__(self):
super().__init__()
def update(self, current_lr, entropy_coef, epoch, frames, kl_dist, **kwargs):
return current_lr, entropy_coef
class AdaptiveScheduler(RLScheduler):
def __init__(self, kl_threshold = 0.008):
super().__init__()
self.min_lr = 1e-6
self.max_lr = 1e-2
self.kl_threshold = kl_threshold
def update(self, current_lr, entropy_coef, epoch, frames, kl_dist, **kwargs):
lr = current_lr
if kl_dist > (2.0 * self.kl_threshold):
lr = max(current_lr / 1.5, self.min_lr)
if kl_dist < (0.5 * self.kl_threshold):
lr = min(current_lr * 1.5, self.max_lr)
return lr, entropy_coef
class LinearScheduler(RLScheduler):
def __init__(self, start_lr, min_lr=1e-6, max_steps = 1000000, use_epochs=True, apply_to_entropy=False, **kwargs):
super().__init__()
self.start_lr = start_lr
self.min_lr = min_lr
self.max_steps = max_steps
self.use_epochs = use_epochs
self.apply_to_entropy = apply_to_entropy
if apply_to_entropy:
self.start_entropy_coef = kwargs.pop('start_entropy_coef', 0.01)
self.min_entropy_coef = kwargs.pop('min_entropy_coef', 0.0001)
def update(self, current_lr, entropy_coef, epoch, frames, kl_dist, **kwargs):
if self.use_epochs:
steps = epoch
else:
steps = frames
mul = max(0, self.max_steps - steps)/self.max_steps
lr = self.min_lr + (self.start_lr - self.min_lr) * mul
if self.apply_to_entropy:
entropy_coef = self.min_entropy_coef + (self.start_entropy_coef - self.min_entropy_coef) * mul
return lr, entropy_coef | 1,948 | Python | 33.803571 | 118 | 0.582136 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/wrappers.py | import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
from copy import copy
class InfoWrapper(gym.Wrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self.reward = 0
def reset(self, **kwargs):
self.reward = 0
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
self.reward += reward
if done:
info['scores'] = self.reward
return observation, reward, done, info
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on True game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class EpisodeStackedEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.max_stacked_steps = 1000
self.current_steps=0
def step(self, action):
obs, reward, done, info = self.env.step(action)
if reward == 0:
self.current_steps += 1
else:
self.current_steps = 0
if self.current_steps == self.max_stacked_steps:
self.current_steps = 0
print('max_stacked_steps!')
done = True
reward = -1
obs = self.env.reset()
return obs, reward, done, info
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env,skip=4, use_max = True):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
self.use_max = use_max
# most recent raw observations (for max pooling across time steps)
if self.use_max:
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
else:
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.float32)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if self.use_max:
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
else:
self._obs_buffer[0] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
if self.use_max:
max_frame = self._obs_buffer.max(axis=0)
else:
max_frame = self._obs_buffer[0]
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = width
self.height = height
self.grayscale = grayscale
if self.grayscale:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
else:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 3), dtype=np.uint8)
def observation(self, frame):
import cv2
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
if self.grayscale:
frame = np.expand_dims(frame, -1)
return frame
class FrameStack(gym.Wrapper):
def __init__(self, env, k, flat = False):
"""
Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.flat = flat
self.frames = deque([], maxlen=k)
observation_space = env.observation_space
self.shp = shp = observation_space.shape
#TODO: remove consts -1 and 1
if flat:
self.observation_space = spaces.Box(low=-1, high=1, shape=(shp[:-1] + (shp[-1] * k,)), dtype=observation_space.dtype)
else:
if len(shp) == 1:
self.observation_space = spaces.Box(low=-1, high=1, shape=(k, shp[0]), dtype=observation_space.dtype)
else:
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
if self.flat:
return np.squeeze(self.frames).flatten()
else:
if len(self.shp) == 1:
res = np.concatenate([f[..., np.newaxis] for f in self.frames], axis=-1)
#print('shape:', np.shape(res))
#print('shape:', np.shape(np.transpose(res)))
return np.transpose(res)
else:
return np.concatenate(self.frames, axis=-1)
#return LazyFrames(list(self.frames))
class BatchedFrameStack(gym.Wrapper):
def __init__(self, env, k, transpose = False, flatten = False):
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
self.shp = shp = env.observation_space.shape
self.transpose = transpose
self.flatten = flatten
if transpose:
assert(not flatten)
self.observation_space = spaces.Box(low=0, high=1, shape=(shp[0], k), dtype=env.observation_space.dtype)
else:
if flatten:
self.observation_space = spaces.Box(low=0, high=1, shape=(k *shp[0],), dtype=env.observation_space.dtype)
else:
self.observation_space = spaces.Box(low=0, high=1, shape=(k, shp[0]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
if self.transpose:
frames = np.transpose(self.frames, (1, 2, 0))
else:
if self.flatten:
frames = np.array(self.frames)
shape = np.shape(frames)
frames = np.transpose(self.frames, (1, 0, 2))
frames = np.reshape(self.frames, (shape[1], shape[0] * shape[2]))
else:
frames = np.transpose(self.frames, (1, 0, 2))
return frames
class BatchedFrameStackWithStates(gym.Wrapper):
def __init__(self, env, k, transpose = False, flatten = False):
gym.Wrapper.__init__(self, env)
self.k = k
self.obses = deque([], maxlen=k)
self.states = deque([], maxlen=k)
self.shp = shp = env.observation_space.shape
self.state_shp = state_shp = env.state_space.shape
self.transpose = transpose
self.flatten = flatten
if transpose:
assert(not flatten)
self.observation_space = spaces.Box(low=0, high=1, shape=(shp[0], k), dtype=env.observation_space.dtype)
self.state_space = spaces.Box(low=0, high=1, shape=(state_shp[0], k), dtype=env.observation_space.dtype)
else:
if flatten:
self.observation_space = spaces.Box(low=0, high=1, shape=(k*shp[0],), dtype=env.observation_space.dtype)
self.state_space = spaces.Box(low=0, high=1, shape=(k*state_shp[0],), dtype=env.observation_space.dtype)
else:
self.observation_space = spaces.Box(low=0, high=1, shape=(k, shp[0]), dtype=env.observation_space.dtype)
self.state_space = spaces.Box(low=0, high=1, shape=(k, state_shp[0]), dtype=env.observation_space.dtype)
def reset(self):
obs_dict = self.env.reset()
ob = obs_dict["obs"]
state = obs_dict["state"]
for _ in range(self.k):
self.obses.append(ob)
self.states.append(state)
return self._get_ob()
def step(self, action):
obs_dict, reward, done, info = self.env.step(action)
ob = obs_dict["obs"]
state = obs_dict["state"]
self.obses.append(ob)
self.states.append(state)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.obses) == self.k
obses = self.process_data(self.obses)
states = self.process_data(self.states)
return {"obs": obses, "state" : states}
def process_data(self, data):
if len(np.shape(data)) < 3:
return np.array(data)
if self.transpose:
obses = np.transpose(data, (1, 2, 0))
else:
if self.flatten:
obses = np.array(data)
shape = np.shape(obses)
obses = np.transpose(data, (1, 0, 2))
obses = np.reshape(data, (shape[1], shape[0] * shape[2]))
else:
obses = np.transpose(data, (1, 0, 2))
return obses
class ProcgenStack(gym.Wrapper):
def __init__(self, env, k = 2, greyscale=True):
gym.Wrapper.__init__(self, env)
self.k = k
self.curr_frame = 0
self.frames = deque([], maxlen=k)
self.greyscale=greyscale
self.prev_frame = None
shp = env.observation_space.shape
if greyscale:
shape = (shp[:-1] + (shp[-1] + k - 1,))
else:
shape = (shp[:-1] + (shp[-1] * k,))
self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)
def reset(self):
import cv2
frames = self.env.reset()
self.frames.append(frames)
if self.greyscale:
self.prev_frame = np.expand_dims(cv2.cvtColor(frames, cv2.COLOR_RGB2GRAY), axis=-1)
for _ in range(self.k-1):
self.frames.append(self.prev_frame)
else:
for _ in range(self.k-1):
self.frames.append(frames)
return self._get_ob()
def step(self, action):
import cv2
frames, reward, done, info = self.env.step(action)
if self.greyscale:
self.frames[self.k-1] = self.prev_frame
self.prev_frame = np.expand_dims(cv2.cvtColor(frames, cv2.COLOR_RGB2GRAY), axis=-1)
self.frames.append(frames)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
stacked_frames = np.concatenate(self.frames, axis=-1)
return stacked_frames
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class ReallyDoneWrapper(gym.Wrapper):
def __init__(self, env):
"""
Make it work with video monitor to record whole game video isntead of one life
"""
self.old_env = env
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
old_lives = self.env.unwrapped.ale.lives()
obs, reward, done, info = self.env.step(action)
lives = self.env.unwrapped.ale.lives()
if done:
return obs, reward, done, info
if old_lives > lives:
print('lives:', lives)
obs, _, done, _ = self.env.step(1)
done = lives == 0
return obs, reward, done, info
class AllowBacktracking(gym.Wrapper):
"""
Use deltas in max(X) as the reward, rather than deltas
in X. This way, agents are not discouraged too heavily
from exploring backwards if there is no way to advance
head-on in the level.
"""
def __init__(self, env):
super(AllowBacktracking, self).__init__(env)
self._cur_x = 0
self._max_x = 0
def reset(self, **kwargs): # pylint: disable=E0202
self._cur_x = 0
self._max_x = 0
return self.env.reset(**kwargs)
def step(self, action): # pylint: disable=E0202
obs, rew, done, info = self.env.step(action)
self._cur_x += rew
rew = max(0, self._cur_x - self._max_x)
self._max_x = max(self._max_x, self._cur_x)
return obs, rew, done, info
def unwrap(env):
if hasattr(env, "unwrapped"):
return env.unwrapped
elif hasattr(env, "env"):
return unwrap(env.env)
elif hasattr(env, "leg_env"):
return unwrap(env.leg_env)
else:
return env
class StickyActionEnv(gym.Wrapper):
def __init__(self, env, p=0.25):
super(StickyActionEnv, self).__init__(env)
self.p = p
self.last_action = 0
def reset(self):
self.last_action = 0
return self.env.reset()
def step(self, action):
if self.unwrapped.np_random.uniform() < self.p:
action = self.last_action
self.last_action = action
obs, reward, done, info = self.env.step(action)
return obs, reward, done, info
class MontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env, room_address):
super(MontezumaInfoWrapper, self).__init__(env)
self.room_address = room_address
self.visited_rooms = set()
def get_current_room(self):
ram = unwrap(self.env).ale.getRAM()
assert len(ram) == 128
return int(ram[self.room_address])
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.visited_rooms.add(self.get_current_room())
if done:
if 'scores' not in info:
info['scores'] = {}
info['scores'].update(visited_rooms=copy(self.visited_rooms))
self.visited_rooms.clear()
return obs, rew, done, info
def reset(self):
return self.env.reset()
class TimeLimit(gym.Wrapper):
"""
A little bit changed original openai's TimeLimit env.
Main difference is that we always send true or false in infos['time_outs']
"""
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self.concat_infos = True
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def step(self, action):
assert self._elapsed_steps is not None, "Cannot call env.step() before calling reset()"
observation, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
info['time_outs'] = False
if self._elapsed_steps >= self._max_episode_steps:
info['time_outs'] = True
done = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class MaskVelocityWrapper(gym.ObservationWrapper):
"""
Gym environment observation wrapper used to mask velocity terms in
observations. The intention is the make the MDP partially observatiable.
"""
def __init__(self, env, name):
super(MaskVelocityWrapper, self).__init__(env)
if name == "CartPole-v1":
self.mask = np.array([1., 0., 1., 0.])
elif name == "Pendulum-v0":
self.mask = np.array([1., 1., 0.])
elif name == "LunarLander-v2":
self.mask = np.array([1., 1., 0., 0., 1., 0., 1., 1,])
elif name == "LunarLanderContinuous-v2":
self.mask = np.array([1., 1., 0., 0., 1., 0., 1., 1,])
else:
raise NotImplementedError
def observation(self, observation):
return observation * self.mask
def make_atari(env_id, timelimit=True, noop_max=0, skip=4, sticky=False, directory=None):
env = gym.make(env_id)
if 'Montezuma' in env_id:
env = MontezumaInfoWrapper(env, room_address=3 if 'Montezuma' in env_id else 1)
env = StickyActionEnv(env)
env = InfoWrapper(env)
if directory != None:
env = gym.wrappers.Monitor(env,directory=directory,force=True)
if sticky:
env = StickyActionEnv(env)
if not timelimit:
env = env.env
#assert 'NoFrameskip' in env.spec.id
if noop_max > 0:
env = NoopResetEnv(env, noop_max=noop_max)
env = MaxAndSkipEnv(env, skip=skip)
#env = EpisodeStackedEnv(env)
return env
def wrap_deepmind(env, episode_life=False, clip_rewards=True, frame_stack=True, scale =False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
def wrap_carracing(env, clip_rewards=True, frame_stack=True, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
def make_car_racing(env_id, skip=4):
env = make_atari(env_id, noop_max=0, skip=skip)
return wrap_carracing(env, clip_rewards=False)
def make_atari_deepmind(env_id, noop_max=30, skip=4, sticky=False, episode_life=True):
env = make_atari(env_id, noop_max=noop_max, skip=skip, sticky=sticky)
return wrap_deepmind(env, episode_life=episode_life, clip_rewards=False)
| 22,702 | Python | 33.927692 | 134 | 0.576337 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/rollouts.py |
'''
TODO: move play_steps here
'''
class Rollout:
def __init__(self, gamma):
self.gamma = gamma
def play_steps(self, env, max_steps_count = 1):
pass
class DiscretePpoRollout(Rollout):
def __init__(self, gamma, lam):
super(Rollout, self).__init__(gamma)
self.lam = lam
def play_steps(self, env, max_steps_count = 1):
pass | 382 | Python | 18.149999 | 51 | 0.578534 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/common_losses.py | from torch import nn
import torch
def critic_loss(value_preds_batch, values, curr_e_clip, return_batch, clip_value):
if clip_value:
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-curr_e_clip, curr_e_clip)
value_losses = (values - return_batch)**2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses,
value_losses_clipped)
else:
c_loss = (return_batch - values)**2
return c_loss
def actor_loss(old_action_log_probs_batch, action_log_probs, advantage, is_ppo, curr_e_clip):
if is_ppo:
ratio = torch.exp(old_action_log_probs_batch - action_log_probs)
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - curr_e_clip,
1.0 + curr_e_clip)
a_loss = torch.max(-surr1, -surr2)
else:
a_loss = (action_log_probs * advantage)
return a_loss | 1,020 | Python | 36.814813 | 93 | 0.583333 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/env_configurations.py | from rl_games.common import wrappers
from rl_games.common import tr_helpers
import rl_games.envs.test
from rl_games.envs.brax import create_brax_env
import gym
from gym.wrappers import FlattenObservation, FilterObservation
import numpy as np
#FLEX_PATH = '/home/viktor/Documents/rl/FlexRobotics'
FLEX_PATH = '/home/trrrrr/Documents/FlexRobotics-master'
class HCRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.max([-10, reward])
class DMControlReward(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self.num_stops = 0
self.max_stops = 1000
self.reward_threshold = 0.001
def reset(self, **kwargs):
self.num_stops = 0
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
if reward < self.reward_threshold:
self.num_stops += 1
else:
self.num_stops = max(0, self.num_stops-1)
if self.num_stops > self.max_stops:
#print('too many stops!')
reward = -10
observation = self.reset()
done = True
return observation, self.reward(reward), done, info
def reward(self, reward):
return reward
class DMControlObsWrapper(gym.ObservationWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def observation(self, obs):
return obs['observations']
def create_default_gym_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
is_procgen = kwargs.pop('procgen', False)
limit_steps = kwargs.pop('limit_steps', False)
env = gym.make(name, **kwargs)
if frames > 1:
if is_procgen:
env = wrappers.ProcgenStack(env, frames, True)
else:
env = wrappers.FrameStack(env, frames, False)
if limit_steps:
env = wrappers.LimitStepsWrapper(env)
return env
def create_goal_gym_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
env = gym.make(name, **kwargs)
env = FlattenObservation(FilterObservation(env, ['observation', 'desired_goal']))
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
if limit_steps:
env = wrappers.LimitStepsWrapper(env)
return env
def create_slime_gym_env(**kwargs):
import slimevolleygym
from rl_games.envs.slimevolley_selfplay import SlimeVolleySelfplay
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
self_play = kwargs.pop('self_play', False)
if self_play:
env = SlimeVolleySelfplay(name, **kwargs)
else:
env = gym.make(name, **kwargs)
return env
def create_connect_four_env(**kwargs):
from rl_games.envs.connect4_selfplay import ConnectFourSelfPlay
name = kwargs.pop('name')
limit_steps = kwargs.pop('limit_steps', False)
self_play = kwargs.pop('self_play', False)
if self_play:
env = ConnectFourSelfPlay(name, **kwargs)
else:
env = gym.make(name, **kwargs)
return env
def create_atari_gym_env(**kwargs):
#frames = kwargs.pop('frames', 1)
name = kwargs.pop('name')
skip = kwargs.pop('skip',4)
episode_life = kwargs.pop('episode_life',True)
env = wrappers.make_atari_deepmind(name, skip=skip,episode_life=episode_life)
return env
def create_dm_control_env(**kwargs):
frames = kwargs.pop('frames', 1)
name = 'dm2gym:'+ kwargs.pop('name')
env = gym.make(name, environment_kwargs=kwargs)
env = DMControlReward(env)
env = DMControlObsWrapper(env)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
def create_super_mario_env(name='SuperMarioBros-v1'):
import gym
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
import gym_super_mario_bros
env = gym_super_mario_bros.make(name)
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = wrappers.MaxAndSkipEnv(env, skip=4)
env = wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=True)
return env
def create_super_mario_env_stage1(name='SuperMarioBrosRandomStage1-v1'):
import gym
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, COMPLEX_MOVEMENT
import gym_super_mario_bros
stage_names = [
'SuperMarioBros-1-1-v1',
'SuperMarioBros-1-2-v1',
'SuperMarioBros-1-3-v1',
'SuperMarioBros-1-4-v1',
]
env = gym_super_mario_bros.make(stage_names[1])
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = wrappers.MaxAndSkipEnv(env, skip=4)
env = wrappers.wrap_deepmind(env, episode_life=False, clip_rewards=False, frame_stack=True, scale=True)
#env = wrappers.AllowBacktracking(env)
return env
def create_quadrupped_env():
import gym
import roboschool
import quadruppedEnv
return wrappers.FrameStack(wrappers.MaxAndSkipEnv(gym.make('QuadruppedWalk-v1'), 4, False), 2, True)
def create_roboschool_env(name):
import gym
import roboschool
return gym.make(name)
def create_smac(name, **kwargs):
from rl_games.envs.smac_env import SMACEnv
frames = kwargs.pop('frames', 1)
transpose = kwargs.pop('transpose', False)
flatten = kwargs.pop('flatten', True)
has_cv = kwargs.get('central_value', False)
env = SMACEnv(name, **kwargs)
if frames > 1:
if has_cv:
env = wrappers.BatchedFrameStackWithStates(env, frames, transpose=False, flatten=flatten)
else:
env = wrappers.BatchedFrameStack(env, frames, transpose=False, flatten=flatten)
return env
def create_smac_cnn(name, **kwargs):
from rl_games.envs.smac_env import SMACEnv
has_cv = kwargs.get('central_value', False)
frames = kwargs.pop('frames', 4)
transpose = kwargs.pop('transpose', False)
env = SMACEnv(name, **kwargs)
if has_cv:
env = wrappers.BatchedFrameStackWithStates(env, frames, transpose=transpose)
else:
env = wrappers.BatchedFrameStack(env, frames, transpose=transpose)
return env
def create_test_env(name, **kwargs):
import rl_games.envs.test
env = gym.make(name, **kwargs)
return env
def create_minigrid_env(name, **kwargs):
import gym_minigrid
import gym_minigrid.wrappers
state_bonus = kwargs.pop('state_bonus', False)
action_bonus = kwargs.pop('action_bonus', False)
fully_obs = kwargs.pop('fully_obs', False)
env = gym.make(name, **kwargs)
if state_bonus:
env = gym_minigrid.wrappers.StateBonus(env)
if action_bonus:
env = gym_minigrid.wrappers.ActionBonus(env)
if fully_obs:
env = gym_minigrid.wrappers.RGBImgObsWrapper(env)
else:
env = gym_minigrid.wrappers.RGBImgPartialObsWrapper(env) # Get pixel observations
env = gym_minigrid.wrappers.ImgObsWrapper(env) # Get rid of the 'mission' field
print('minigird_env observation space shape:', env.observation_space)
return env
def create_multiwalker_env(**kwargs):
from rl_games.envs.multiwalker import MultiWalker
env = MultiWalker('', **kwargs)
return env
def create_diambra_env(**kwargs):
from rl_games.envs.diambra.diambra import DiambraEnv
env = DiambraEnv(**kwargs)
return env
def create_env(name, **kwargs):
steps_limit = kwargs.pop('steps_limit', None)
env = gym.make(name, **kwargs)
if steps_limit is not None:
env = wrappers.TimeLimit(env, steps_limit)
return env
configurations = {
'CartPole-v1' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : gym.make('CartPole-v1'),
},
'CartPoleMaskedVelocity-v1' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : wrappers.MaskVelocityWrapper(gym.make('CartPole-v1'), 'CartPole-v1'),
},
'MountainCarContinuous-v0' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda **kwargs : gym.make('MountainCarContinuous-v0'),
},
'MountainCar-v0' : {
'vecenv_type' : 'RAY',
'env_creator' : lambda : gym.make('MountainCar-v0'),
},
'Acrobot-v1' : {
'env_creator' : lambda **kwargs : gym.make('Acrobot-v1'),
'vecenv_type' : 'RAY'
},
'Pendulum-v0' : {
'env_creator' : lambda **kwargs : gym.make('Pendulum-v0'),
'vecenv_type' : 'RAY'
},
'LunarLander-v2' : {
'env_creator' : lambda **kwargs : gym.make('LunarLander-v2'),
'vecenv_type' : 'RAY'
},
'PongNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('PongNoFrameskip-v4', skip=4),
'vecenv_type' : 'RAY'
},
'BreakoutNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('BreakoutNoFrameskip-v4', skip=4,sticky=False),
'vecenv_type' : 'RAY'
},
'MsPacmanNoFrameskip-v4' : {
'env_creator' : lambda **kwargs : wrappers.make_atari_deepmind('MsPacmanNoFrameskip-v4', skip=4),
'vecenv_type' : 'RAY'
},
'CarRacing-v0' : {
'env_creator' : lambda **kwargs : wrappers.make_car_racing('CarRacing-v0', skip=4),
'vecenv_type' : 'RAY'
},
'RoboschoolAnt-v1' : {
'env_creator' : lambda **kwargs : create_roboschool_env('RoboschoolAnt-v1'),
'vecenv_type' : 'RAY'
},
'SuperMarioBros-v1' : {
'env_creator' : lambda : create_super_mario_env(),
'vecenv_type' : 'RAY'
},
'SuperMarioBrosRandomStages-v1' : {
'env_creator' : lambda : create_super_mario_env('SuperMarioBrosRandomStages-v1'),
'vecenv_type' : 'RAY'
},
'SuperMarioBrosRandomStage1-v1' : {
'env_creator' : lambda **kwargs : create_super_mario_env_stage1('SuperMarioBrosRandomStage1-v1'),
'vecenv_type' : 'RAY'
},
'RoboschoolHalfCheetah-v1' : {
'env_creator' : lambda **kwargs : create_roboschool_env('RoboschoolHalfCheetah-v1'),
'vecenv_type' : 'RAY'
},
'RoboschoolHumanoid-v1' : {
'env_creator' : lambda : wrappers.FrameStack(create_roboschool_env('RoboschoolHumanoid-v1'), 1, True),
'vecenv_type' : 'RAY'
},
'LunarLanderContinuous-v2' : {
'env_creator' : lambda **kwargs : gym.make('LunarLanderContinuous-v2'),
'vecenv_type' : 'RAY'
},
'RoboschoolHumanoidFlagrun-v1' : {
'env_creator' : lambda **kwargs : wrappers.FrameStack(create_roboschool_env('RoboschoolHumanoidFlagrun-v1'), 1, True),
'vecenv_type' : 'RAY'
},
'BipedalWalker-v3' : {
'env_creator' : lambda **kwargs : create_env('BipedalWalker-v3', **kwargs),
'vecenv_type' : 'RAY'
},
'BipedalWalkerCnn-v3' : {
'env_creator' : lambda **kwargs : wrappers.FrameStack(HCRewardEnv(gym.make('BipedalWalker-v3')), 4, False),
'vecenv_type' : 'RAY'
},
'BipedalWalkerHardcore-v3' : {
'env_creator' : lambda **kwargs : gym.make('BipedalWalkerHardcore-v3'),
'vecenv_type' : 'RAY'
},
'ReacherPyBulletEnv-v0' : {
'env_creator' : lambda **kwargs : create_roboschool_env('ReacherPyBulletEnv-v0'),
'vecenv_type' : 'RAY'
},
'BipedalWalkerHardcoreCnn-v3' : {
'env_creator' : lambda : wrappers.FrameStack(gym.make('BipedalWalkerHardcore-v3'), 4, False),
'vecenv_type' : 'RAY'
},
'QuadruppedWalk-v1' : {
'env_creator' : lambda **kwargs : create_quadrupped_env(),
'vecenv_type' : 'RAY'
},
'FlexAnt' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/ant.yaml'),
'vecenv_type' : 'ISAAC'
},
'FlexHumanoid' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/humanoid.yaml'),
'vecenv_type' : 'ISAAC'
},
'FlexHumanoidHard' : {
'env_creator' : lambda **kwargs : create_flex(FLEX_PATH + '/demo/gym/cfg/humanoid_hard.yaml'),
'vecenv_type' : 'ISAAC'
},
'smac' : {
'env_creator' : lambda **kwargs : create_smac(**kwargs),
'vecenv_type' : 'RAY_SMAC'
},
'smac_cnn' : {
'env_creator' : lambda **kwargs : create_smac_cnn(**kwargs),
'vecenv_type' : 'RAY_SMAC'
},
'dm_control' : {
'env_creator' : lambda **kwargs : create_dm_control_env(**kwargs),
'vecenv_type' : 'RAY'
},
'openai_gym' : {
'env_creator' : lambda **kwargs : create_default_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'openai_robot_gym' : {
'env_creator' : lambda **kwargs : create_goal_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'atari_gym' : {
'env_creator' : lambda **kwargs : create_atari_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'slime_gym' : {
'env_creator' : lambda **kwargs : create_slime_gym_env(**kwargs),
'vecenv_type' : 'RAY'
},
'test_env' : {
'env_creator' : lambda **kwargs : create_test_env(kwargs.pop('name'), **kwargs),
'vecenv_type' : 'RAY'
},
'minigrid_env' : {
'env_creator' : lambda **kwargs : create_minigrid_env(kwargs.pop('name'), **kwargs),
'vecenv_type' : 'RAY'
},
'connect4_env' : {
'env_creator' : lambda **kwargs : create_connect_four_env(**kwargs),
'vecenv_type' : 'RAY'
},
'multiwalker_env' : {
'env_creator' : lambda **kwargs : create_multiwalker_env(**kwargs),
'vecenv_type' : 'RAY'
},
'diambra': {
'env_creator': lambda **kwargs: create_diambra_env(**kwargs),
'vecenv_type': 'RAY'
},
'brax' : {
'env_creator': lambda **kwargs: create_brax_env(**kwargs),
'vecenv_type': 'BRAX'
},
}
def get_env_info(env):
result_shapes = {}
result_shapes['observation_space'] = env.observation_space
result_shapes['action_space'] = env.action_space
result_shapes['agents'] = 1
result_shapes['value_size'] = 1
if hasattr(env, "get_number_of_agents"):
result_shapes['agents'] = env.get_number_of_agents()
'''
if isinstance(result_shapes['observation_space'], gym.spaces.dict.Dict):
result_shapes['observation_space'] = observation_space['observations']
if isinstance(result_shapes['observation_space'], dict):
result_shapes['observation_space'] = observation_space['observations']
result_shapes['state_space'] = observation_space['states']
'''
if hasattr(env, "value_size"):
result_shapes['value_size'] = env.value_size
print(result_shapes)
return result_shapes
def get_obs_and_action_spaces_from_config(config):
env_config = config.get('env_config', {})
env = configurations[config['env_name']]['env_creator'](**env_config)
result_shapes = get_env_info(env)
env.close()
return result_shapes
def register(name, config):
configurations[name] = config | 15,163 | Python | 33 | 127 | 0.617028 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/experience.py | import numpy as np
import random
import gym
import torch
from rl_games.common.segment_tree import SumSegmentTree, MinSegmentTree
import torch
from rl_games.algos_torch.torch_ext import numpy_to_torch_dtype_dict
class ReplayBuffer(object):
def __init__(self, size, ob_space):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._obses = np.zeros((size,) + ob_space.shape, dtype=ob_space.dtype)
self._next_obses = np.zeros((size,) + ob_space.shape, dtype=ob_space.dtype)
self._rewards = np.zeros(size)
self._actions = np.zeros(size, dtype=np.int32)
self._dones = np.zeros(size, dtype=np.bool)
self._maxsize = size
self._next_idx = 0
self._curr_size = 0
def __len__(self):
return self._curr_size
def add(self, obs_t, action, reward, obs_tp1, done):
self._curr_size = min(self._curr_size + 1, self._maxsize )
self._obses[self._next_idx] = obs_t
self._next_obses[self._next_idx] = obs_tp1
self._rewards[self._next_idx] = reward
self._actions[self._next_idx] = action
self._dones[self._next_idx] = done
self._next_idx = (self._next_idx + 1) % self._maxsize
def _get(self, idx):
return self._obses[idx], self._actions[idx], self._rewards[idx], self._next_obses[idx], self._dones[idx]
def _encode_sample(self, idxes):
batch_size = len(idxes)
obses_t, actions, rewards, obses_tp1, dones = [None] * batch_size, [None] * batch_size, [None] * batch_size, [None] * batch_size, [None] * batch_size
it = 0
for i in idxes:
data = self._get(i)
obs_t, action, reward, obs_tp1, done = data
obses_t[it] = np.array(obs_t, copy=False)
actions[it] = np.array(action, copy=False)
rewards[it] = reward
obses_tp1[it] = np.array(obs_tp1, copy=False)
dones[it] = done
it = it + 1
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, self._curr_size - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha, ob_space):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size, ob_space)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super().add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
p_total = self._it_sum.sum(0, self._curr_size - 1)
every_range_len = p_total / batch_size
for i in range(batch_size):
mass = random.random() * every_range_len + i * every_range_len
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * self._curr_size) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * self._curr_size) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < self._curr_size
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
class VectorizedReplayBuffer:
def __init__(self, obs_shape, action_shape, capacity, device):
"""Create Vectorized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
See Also
--------
ReplayBuffer.__init__
"""
self.device = device
self.obses = torch.empty((capacity, *obs_shape), dtype=torch.float32, device=self.device)
self.next_obses = torch.empty((capacity, *obs_shape), dtype=torch.float32, device=self.device)
self.actions = torch.empty((capacity, *action_shape), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((capacity, 1), dtype=torch.float32, device=self.device)
self.dones = torch.empty((capacity, 1), dtype=torch.bool, device=self.device)
self.capacity = capacity
self.idx = 0
self.full = False
def add(self, obs, action, reward, next_obs, done):
num_observations = obs.shape[0]
remaining_capacity = min(self.capacity - self.idx, num_observations)
overflow = num_observations - remaining_capacity
if remaining_capacity < num_observations:
self.obses[0: overflow] = obs[-overflow:]
self.actions[0: overflow] = action[-overflow:]
self.rewards[0: overflow] = reward[-overflow:]
self.next_obses[0: overflow] = next_obs[-overflow:]
self.dones[0: overflow] = done[-overflow:]
self.full = True
self.obses[self.idx: self.idx + remaining_capacity] = obs[:remaining_capacity]
self.actions[self.idx: self.idx + remaining_capacity] = action[:remaining_capacity]
self.rewards[self.idx: self.idx + remaining_capacity] = reward[:remaining_capacity]
self.next_obses[self.idx: self.idx + remaining_capacity] = next_obs[:remaining_capacity]
self.dones[self.idx: self.idx + remaining_capacity] = done[:remaining_capacity]
self.idx = (self.idx + num_observations) % self.capacity
self.full = self.full or self.idx == 0
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obses: torch tensor
batch of observations
actions: torch tensor
batch of actions executed given obs
rewards: torch tensor
rewards received as results of executing act_batch
next_obses: torch tensor
next set of observations seen after executing act_batch
not_dones: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not
not_dones_no_max: torch tensor
inverse of whether the episode ended at this tuple of (observation, action) or not, specifically exlcuding maximum episode steps
"""
idxs = torch.randint(0,
self.capacity if self.full else self.idx,
(batch_size,), device=self.device)
obses = self.obses[idxs]
actions = self.actions[idxs]
rewards = self.rewards[idxs]
next_obses = self.next_obses[idxs]
dones = self.dones[idxs]
return obses, actions, rewards, next_obses, dones
class ExperienceBuffer:
'''
More generalized than replay buffers.
Implemented for on-policy algos
'''
def __init__(self, env_info, algo_info, device, aux_tensor_dict=None):
self.env_info = env_info
self.algo_info = algo_info
self.device = device
self.num_agents = env_info.get('agents', 1)
self.action_space = env_info['action_space']
self.num_actors = algo_info['num_actors']
self.horizon_length = algo_info['horizon_length']
self.has_central_value = algo_info['has_central_value']
self.use_action_masks = algo_info.get('use_action_masks', False)
batch_size = self.num_actors * self.num_agents
self.is_discrete = False
self.is_multi_discrete = False
self.is_continuous = False
self.obs_base_shape = (self.horizon_length, self.num_agents * self.num_actors)
self.state_base_shape = (self.horizon_length, self.num_actors)
if type(self.action_space) is gym.spaces.Discrete:
self.actions_shape = ()
self.actions_num = self.action_space.n
self.is_discrete = True
if type(self.action_space) is gym.spaces.Tuple:
self.actions_shape = (len(self.action_space),)
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
if type(self.action_space) is gym.spaces.Box:
self.actions_shape = (self.action_space.shape[0],)
self.actions_num = self.action_space.shape[0]
self.is_continuous = True
self.tensor_dict = {}
self._init_from_env_info(self.env_info)
self.aux_tensor_dict = aux_tensor_dict
if self.aux_tensor_dict is not None:
self._init_from_aux_dict(self.aux_tensor_dict)
def _init_from_env_info(self, env_info):
obs_base_shape = self.obs_base_shape
state_base_shape = self.state_base_shape
self.tensor_dict['obses'] = self._create_tensor_from_space(env_info['observation_space'], obs_base_shape)
if self.has_central_value:
self.tensor_dict['states'] = self._create_tensor_from_space(env_info['state_space'], state_base_shape)
val_space = gym.spaces.Box(low=0, high=1,shape=(env_info.get('value_size',1),))
self.tensor_dict['rewards'] = self._create_tensor_from_space(val_space, obs_base_shape)
self.tensor_dict['values'] = self._create_tensor_from_space(val_space, obs_base_shape)
self.tensor_dict['neglogpacs'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(), dtype=np.float32), obs_base_shape)
self.tensor_dict['dones'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(), dtype=np.uint8), obs_base_shape)
if self.is_discrete or self.is_multi_discrete:
self.tensor_dict['actions'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.long), obs_base_shape)
if self.use_action_masks:
self.tensor_dict['action_masks'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape + (np.sum(self.actions_num),), dtype=np.bool), obs_base_shape)
if self.is_continuous:
self.tensor_dict['actions'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
self.tensor_dict['mus'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
self.tensor_dict['sigmas'] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=self.actions_shape, dtype=np.float32), obs_base_shape)
def _init_from_aux_dict(self, tensor_dict):
obs_base_shape = self.obs_base_shape
for k,v in tensor_dict.items():
self.tensor_dict[k] = self._create_tensor_from_space(gym.spaces.Box(low=0, high=1,shape=(v), dtype=np.float32), obs_base_shape)
def _create_tensor_from_space(self, space, base_shape):
if type(space) is gym.spaces.Box:
dtype = numpy_to_torch_dtype_dict[space.dtype]
return torch.zeros(base_shape + space.shape, dtype= dtype, device = self.device)
if type(space) is gym.spaces.Discrete:
dtype = numpy_to_torch_dtype_dict[space.dtype]
return torch.zeros(base_shape, dtype= dtype, device = self.device)
if type(space) is gym.spaces.Tuple:
'''
assuming that tuple is only Discrete tuple
'''
dtype = numpy_to_torch_dtype_dict[space.dtype]
tuple_len = len(space)
return torch.zeros(base_shape +(tuple_len,), dtype= dtype, device = self.device)
if type(space) is gym.spaces.Dict:
t_dict = {}
for k,v in space.spaces.items():
t_dict[k] = self._create_tensor_from_space(v, base_shape)
return t_dict
def update_data(self, name, index, val):
if type(val) is dict:
for k,v in val.items():
self.tensor_dict[name][k][index,:] = v
else:
self.tensor_dict[name][index,:] = val
def update_data_rnn(self, name, indices,play_mask, val):
if type(val) is dict:
for k,v in val:
self.tensor_dict[name][k][indices,play_mask] = v
else:
self.tensor_dict[name][indices,play_mask] = val
def get_transformed(self, transform_op):
res_dict = {}
for k, v in self.tensor_dict.items():
if type(v) is dict:
transformed_dict = {}
for kd,vd in v.items():
transformed_dict[kd] = transform_op(vd)
res_dict[k] = transformed_dict
else:
res_dict[k] = transform_op(v)
return res_dict
def get_transformed_list(self, transform_op, tensor_list):
res_dict = {}
for k in tensor_list:
v = self.tensor_dict.get(k)
if v is None:
continue
if type(v) is dict:
transformed_dict = {}
for kd,vd in v.items():
transformed_dict[kd] = transform_op(vd)
res_dict[k] = transformed_dict
else:
res_dict[k] = transform_op(v)
return res_dict
| 17,184 | Python | 40.211031 | 194 | 0.589967 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/common/segment_tree.py | import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end) | 4,888 | Python | 35.214815 | 109 | 0.541121 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.