file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
vstrozzi/FRL-SHAC-Extension/examples/cfg/ppo/hopper.yaml | params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_ant_ppo.pth
config:
name: df_hopper_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 100
save_frequency: 400
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 1024
steps_num: 32
minibatch_size: 8192
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 1,517 | YAML | 17.975 | 33 | 0.588003 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/ppo/snu_humanoid.yaml | params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 512, 256]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_hum_mtu_ppo.pth
config:
name: df_hum_mtu_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 20000
save_best_after: 100
save_frequency: 1000
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 1024
steps_num: 32
minibatch_size: 8192
mini_epochs: 6
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 1,530 | YAML | 18.1375 | 34 | 0.590196 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/ppo/humanoid.yaml | params:
diff_env:
name: HumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 48
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_humanoid_ppo.pth
config:
name: df_humanoid_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 50
save_frequency: 400
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 1024
steps_num: 32
minibatch_size: 8192
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 5
num_actors: 1
print_stats: True | 1,529 | YAML | 18.367088 | 35 | 0.589274 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/ppo/ant.yaml | params:
diff_env:
name: AntEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_ant_ppo.pth
config:
name: df_ant_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 100
save_frequency: 400
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 2048
steps_num: 32
minibatch_size: 16384
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 24
num_actors: 3
print_stats: True
| 1,513 | YAML | 17.925 | 33 | 0.586913 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/ppo/cartpole_swing_up.yaml | params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_cartpole_swing.pth
config:
name: df_cartpole_swing_up
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 500
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
steps_num: 240
num_actors: 32
minibatch_size: 1920
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
# render: True
determenistic: True
games_num: 12
num_actors: 4
print_stats: True
| 1,552 | YAML | 18.172839 | 37 | 0.590851 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/ppo/cheetah.yaml | params:
diff_env:
name: CheetahEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [128, 64, 32]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
load_checkpoint: False
load_path: nn/df_ant_ppo.pth
config:
name: df_cheetah_ppo
env_name: dflex
multi_gpu: False
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
lr_threshold: 0.008
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 100
save_frequency: 400
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
num_actors: 1024
steps_num: 32
minibatch_size: 8192
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_len: 4
bounds_loss_coef: 0.0001
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 1,519 | YAML | 18 | 33 | 0.588545 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/shac/hopper.yaml | params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_hopper_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 2e-4 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 256
save_interval: 400
player:
determenistic: False
games_num: 1
num_actors: 1
print_stats: True
| 902 | YAML | 19.066666 | 48 | 0.600887 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/shac/snu_humanoid.yaml | params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
network:
actor: ActorStochasticMLP
actor_mlp:
units: [512, 256]
activation: elu
critic: CriticMLP
critic_mlp:
units: [256, 256]
activation: elu
config:
name: df_snu_humanoid_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 5e-4 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.995
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 400
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 910 | YAML | 19.244444 | 48 | 0.606593 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/shac/humanoid.yaml | params:
diff_env:
name: HumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 48
network:
actor: ActorStochasticMLP
actor_mlp:
units: [256, 128]
activation: elu
critic: CriticMLP
critic_mlp:
units: [128, 128]
activation: elu
config:
name: df_humanoid_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 5e-4 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.995
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 400
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 908 | YAML | 19.2 | 48 | 0.602423 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/shac/ant.yaml | params:
diff_env:
name: AntEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
network:
actor: ActorStochasticMLP # ActorDeterministicMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_ant_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda # ['td-lambda', 'one-step']
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 400
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 946 | YAML | 20.044444 | 56 | 0.602537 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/shac/cartpole_swing_up.yaml | params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
network:
actor: ActorStochasticMLP #ActorDeterministicMLP
actor_mlp:
units: [64, 64]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_cartpole_swing_up_shac
actor_learning_rate: 1e-2 # adam
critic_learning_rate: 1e-3 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda # ['td-lambda', 'one-step']
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 500
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 100
player:
determenistic: True
games_num: 4
num_actors: 4
print_stats: True
| 961 | YAML | 20.377777 | 56 | 0.611863 |
vstrozzi/FRL-SHAC-Extension/examples/cfg/shac/cheetah.yaml | params:
diff_env:
name: CheetahEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
network:
actor: ActorStochasticMLP # ActorDeterministicMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_cheetah_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda # ['td-lambda', 'one-step']
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 400
player:
determenistic: True
games_num: 1
num_actors: 1
print_stats: True
| 958 | YAML | 20.311111 | 56 | 0.60334 |
vstrozzi/FRL-SHAC-Extension/examples/logs/tmp/shac/04-23-2024-16-55-19/cfg.yaml | params:
config:
actor_learning_rate: 2e-3
betas:
- 0.7
- 0.95
critic_iterations: 16
critic_learning_rate: 2e-3
critic_method: td-lambda
gamma: 0.99
grad_norm: 1.0
lambda: 0.95
lr_schedule: linear
max_epochs: 2000
name: df_ant_shac
num_actors: 64
num_batch: 4
obs_rms: true
player:
determenistic: true
games_num: 1
num_actors: 1
print_stats: true
ret_rms: false
save_interval: 400
steps_num: 32
target_critic_alpha: 0.2
truncate_grads: true
diff_env:
MM_caching_frequency: 16
episode_length: 1000
name: AntEnv
stochastic_env: true
general:
cfg: ./cfg/shac/ant.yaml
checkpoint: Base
device: !!python/object/apply:torch.device
- cpu
logdir: logs/tmp/shac/04-23-2024-16-55-19
no_time_stamp: false
play: false
render: false
seed: 0
test: false
train: true
network:
actor: ActorStochasticMLP
actor_mlp:
activation: elu
units:
- 128
- 64
- 32
critic: CriticMLP
critic_mlp:
activation: elu
units:
- 64
- 64
| 1,149 | YAML | 18.166666 | 46 | 0.585727 |
vstrozzi/FRL-SHAC-Extension/envs/cartpole_swing_up.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class CartPoleSwingUpEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=1024, seed=0, episode_length=240, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = False):
num_obs = 5
num_act = 1
super(CartPoleSwingUpEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# action parameters
self.action_strength = 1000.
# loss related
self.pole_angle_penalty = 1.0
self.pole_velocity_penalty = 0.1
self.cart_position_penalty = 0.05
self.cart_velocity_penalty = 0.1
self.cart_action_penalty = 0.0
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "CartPoleSwingUp_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1. / 60.
self.sim_substeps = 4
self.sim_dt = self.dt
if self.visualize:
self.env_dist = 1.0
else:
self.env_dist = 0.0
self.num_joint_q = 2
self.num_joint_qd = 2
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.urdf_load(self.builder,
os.path.join(asset_folder, 'cartpole.urdf'),
df.transform((0.0, 2.5, 0.0 + self.env_dist * i), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
floating=False,
shape_kd=1e4,
limit_kd=1.)
self.builder.joint_q[i * self.num_joint_q + 1] = -math.pi
self.model = self.builder.finalize(self.device)
self.model.ground = False
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype = torch.float, device = self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
self.start_joint_q = self.state.joint_q.clone()
self.start_joint_qd = self.state.joint_qd.clone()
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 40):
try:
self.stage.Save()
except:
print('USD save error')
self.num_frames -= 40
def step(self, actions):
with df.ScopedTimer("simulate", active=False, detailed=False):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions
self.state.joint_act.view(self.num_envs, -1)[:, 0:1] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
#self.obs_buf_before_reset = self.obs_buf.clone()
with df.ScopedTimer("reset", active=False, detailed=False):
if len(env_ids) > 0:
self.reset(env_ids)
with df.ScopedTimer("render", active=False, detailed=False):
self.render()
#self.extras = {'obs_before_reset': self.obs_buf_before_reset}
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids=None, force_reset=True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# fixed start state
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = self.start_joint_q.view(-1, self.num_joint_q)[env_ids, :].clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = self.start_joint_qd.view(-1, self.num_joint_qd)[env_ids, :].clone()
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = \
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] \
+ np.pi * (torch.rand(size=(len(env_ids), self.num_joint_q), device=self.device) - 0.5)
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = \
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] \
+ 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self):
with torch.no_grad(): # TODO: check with Miles
current_joint_q = self.state.joint_q.clone()
current_joint_qd = self.state.joint_qd.clone()
current_joint_act = self.state.joint_act.clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.state.joint_act = current_joint_act
'''
This function starts collecting a new trajectory from the current states but cut off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and return the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def calculateObservations(self):
x = self.state.joint_q.view(self.num_envs, -1)[:, 0:1]
theta = self.state.joint_q.view(self.num_envs, -1)[:, 1:2]
xdot = self.state.joint_qd.view(self.num_envs, -1)[:, 0:1]
theta_dot = self.state.joint_qd.view(self.num_envs, -1)[:, 1:2]
# observations: [x, xdot, sin(theta), cos(theta), theta_dot]
self.obs_buf = torch.cat([x, xdot, torch.sin(theta), torch.cos(theta), theta_dot], dim = -1)
def calculateReward(self):
x = self.state.joint_q.view(self.num_envs, -1)[:, 0]
theta = tu.normalize_angle(self.state.joint_q.view(self.num_envs, -1)[:, 1])
xdot = self.state.joint_qd.view(self.num_envs, -1)[:, 0]
theta_dot = self.state.joint_qd.view(self.num_envs, -1)[:, 1]
self.rew_buf = -torch.pow(theta, 2.) * self.pole_angle_penalty \
- torch.pow(theta_dot, 2.) * self.pole_velocity_penalty \
- torch.pow(x, 2.) * self.cart_position_penalty \
- torch.pow(xdot, 2.) * self.cart_velocity_penalty \
- torch.sum(self.actions ** 2, dim = -1) * self.cart_action_penalty
# reset agents
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) | 9,011 | Python | 38.876106 | 187 | 0.582399 |
vstrozzi/FRL-SHAC-Extension/envs/__init__.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
from envs.ant import AntEnv
from envs.cheetah import CheetahEnv
from envs.hopper import HopperEnv
from envs.snu_humanoid import SNUHumanoidEnv
from envs.cartpole_swing_up import CartPoleSwingUpEnv
from envs.humanoid import HumanoidEnv | 694 | Python | 48.642854 | 76 | 0.832853 |
vstrozzi/FRL-SHAC-Extension/envs/snu_humanoid.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd, UsdGeom, Gf
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class SNUHumanoidEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1):
self.filter = { "Pelvis", "FemurR", "TibiaR", "TalusR", "FootThumbR", "FootPinkyR", "FemurL", "TibiaL", "TalusL", "FootThumbL", "FootPinkyL"}
self.skeletons = []
self.muscle_strengths = []
self.mtu_actuations = True
self.inv_control_freq = 1
# "humanoid_snu_lower"
self.num_joint_q = 29
self.num_joint_qd = 24
self.num_dof = self.num_joint_q - 7 # 22
self.num_muscles = 152
self.str_scale = 0.6
num_act = self.num_joint_qd - 6 # 18
num_obs = 71 # 13 + 22 + 18 + 18
if self.mtu_actuations:
num_obs = 53 # 71 - 18
if self.mtu_actuations:
num_act = self.num_muscles
super(SNUHumanoidEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.init_sim()
# other parameters
self.termination_height = 0.46
self.termination_tolerance = 0.05
self.height_rew_scale = 4.0
self.action_strength = 100.0
self.action_penalty = -0.001
self.joint_vel_obs_scaling = 0.1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + self.name + "HumanoidSNU_Low_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 48
self.sim_dt = self.dt
self.ground = True
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rot = df.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi*0.5)
self.start_rotation = tu.to_torch(self.start_rot, device=self.device, requires_grad=False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.heading_vec = self.x_unit_tensor.clone()
self.inv_start_rot = tu.quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = tu.to_torch([10000.0, 0.0, 0.0], device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_pos = []
if self.visualize:
self.env_dist = 2.0
else:
self.env_dist = 0. # set to zero for training for numerical consistency
start_height = 1.0
self.asset_folder = os.path.join(os.path.dirname(__file__), 'assets/snu')
asset_path = os.path.join(self.asset_folder, "human.xml")
muscle_path = os.path.join(self.asset_folder, "muscle284.xml")
for i in range(self.num_environments):
if self.mtu_actuations:
skeleton = lu.Skeleton(asset_path, muscle_path, self.builder, self.filter,
stiffness=5.0,
damping=2.0,
contact_ke=5e3,
contact_kd=2e3,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=1e3,
limit_kd=1e1,
armature=0.05)
else:
skeleton = lu.Skeleton(asset_path, None, self.builder, self.filter,
stiffness=5.0,
damping=2.0,
contact_ke=5e3,
contact_kd=2e3,
contact_kf=1e3,
contact_mu=0.5,
limit_ke=1e3,
limit_kd=1e1,
armature=0.05)
# set initial position 1m off the ground
self.builder.joint_q[skeleton.coord_start + 2] = i * self.env_dist
self.builder.joint_q[skeleton.coord_start + 1] = start_height
self.builder.joint_q[skeleton.coord_start + 3:skeleton.coord_start + 7] = self.start_rot
self.start_pos.append([self.builder.joint_q[skeleton.coord_start], start_height, self.builder.joint_q[skeleton.coord_start + 2]])
self.skeletons.append(skeleton)
num_muscles = len(self.skeletons[0].muscles)
num_q = int(len(self.builder.joint_q)/self.num_environments)
num_qd = int(len(self.builder.joint_qd)/self.num_environments)
print(num_q, num_qd)
print("Start joint_q: ", self.builder.joint_q[0:num_q])
print("Num muscles: ", num_muscles)
self.start_joint_q = self.builder.joint_q[7:num_q].copy()
self.start_joint_target = self.start_joint_q.copy()
for m in self.skeletons[0].muscles:
self.muscle_strengths.append(self.str_scale * m.muscle_strength)
for mi in range(len(self.muscle_strengths)):
self.muscle_strengths[mi] = self.str_scale * self.muscle_strengths[mi]
self.muscle_strengths = tu.to_torch(self.muscle_strengths, device=self.device).repeat(self.num_envs)
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
with torch.no_grad():
muscle_start = 0
skel_index = 0
for s in self.skeletons:
for mesh, link in s.mesh_map.items():
if link != -1:
X_sc = df.transform_expand(self.state.body_X_sc[link].tolist())
mesh_path = os.path.join(self.asset_folder, "OBJ/" + mesh + ".usd")
self.renderer.add_mesh(mesh, mesh_path, X_sc, 1.0, self.render_time)
for m in range(len(s.muscles)):
start = self.model.muscle_start[muscle_start + m].item()
end = self.model.muscle_start[muscle_start + m + 1].item()
points = []
for w in range(start, end):
link = self.model.muscle_links[w].item()
point = self.model.muscle_points[w].cpu().numpy()
X_sc = df.transform_expand(self.state.body_X_sc[link].cpu().tolist())
points.append(Gf.Vec3f(df.transform_point(X_sc, point).tolist()))
self.renderer.add_line_strip(points, name=s.muscles[m].name + str(skel_index), radius=0.0075, color=(self.model.muscle_activation[muscle_start + m]/self.muscle_strengths[m], 0.2, 0.5), time=self.render_time)
muscle_start += len(s.muscles)
skel_index += 1
self.render_time += self.dt * self.inv_control_freq
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 1):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= 1
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
actions = actions * 0.5 + 0.5
##### an ugly fix for simulation nan values #### # reference: https://github.com/pytorch/pytorch/issues/15131
def create_hook():
def hook(grad):
torch.nan_to_num(grad, 0.0, 0.0, 0.0, out = grad)
return hook
if self.state.joint_q.requires_grad:
self.state.joint_q.register_hook(create_hook())
if self.state.joint_qd.requires_grad:
self.state.joint_qd.register_hook(create_hook())
if actions.requires_grad:
actions.register_hook(create_hook())
#################################################
self.actions = actions.clone()
for ci in range(self.inv_control_freq):
if self.mtu_actuations:
self.model.muscle_activation = actions.view(-1) * self.muscle_strengths
else:
self.state.joint_act.view(self.num_envs, -1)[:, 6:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
with df.ScopedTimer("render", False):
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] + 0.1 * (torch.rand(size=(len(env_ids), 3), device=self.device) - 0.5) * 2.
angle = (torch.rand(len(env_ids), device = self.device) - 0.5) * np.pi / 12.
axis = torch.nn.functional.normalize(torch.rand((len(env_ids), 3), device = self.device) - 0.5)
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = tu.quat_mul(self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7], tu.quat_from_angle_axis(angle, axis))
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {} # NOTE: any other things to restore?
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
torso_pos = self.state.joint_q.view(self.num_envs, -1)[:, 0:3]
torso_rot = self.state.joint_q.view(self.num_envs, -1)[:, 3:7]
lin_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 3:6]
ang_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets + self.start_pos - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot)
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1)
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0)
self.obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
self.state.joint_q.view(self.num_envs, -1)[:, 7:], # 11:33
self.joint_vel_obs_scaling * self.state.joint_qd.view(self.num_envs, -1)[:, 6:], # 33:51
up_vec[:, 1:2], # 51
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1)], # 52
dim = -1)
def calculateReward(self):
up_reward = 0.1 * self.obs_buf[:, 51]
heading_reward = self.obs_buf[:, 52]
height_diff = self.obs_buf[:, 0] - (self.termination_height + self.termination_tolerance)
height_reward = torch.clip(height_diff, -1.0, self.termination_tolerance)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward) # JIE: not smooth
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
act_penalty = torch.sum(torch.abs(self.actions), dim = -1) * self.action_penalty #torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + up_reward + heading_reward + act_penalty
# reset agents
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# an ugly fix for simulation nan values
nan_masks = torch.logical_or(torch.isnan(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isnan(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isnan(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
inf_masks = torch.logical_or(torch.isinf(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isinf(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isinf(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
invalid_value_masks = torch.logical_or((torch.abs(self.state.joint_q.view(self.num_environments, -1)) > 1e6).sum(-1) > 0,
(torch.abs(self.state.joint_qd.view(self.num_environments, -1)) > 1e6).sum(-1) > 0)
invalid_masks = torch.logical_or(invalid_value_masks, torch.logical_or(nan_masks, inf_masks))
self.reset_buf = torch.where(invalid_masks, torch.ones_like(self.reset_buf), self.reset_buf)
self.rew_buf[invalid_masks] = 0.
| 19,037 | Python | 42.967667 | 248 | 0.563429 |
vstrozzi/FRL-SHAC-Extension/envs/cheetah.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class CheetahEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = False):
num_obs = 17
num_act = 6
super(CheetahEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# other parameters
self.action_strength = 200.0
self.action_penalty = -0.1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Cheetah_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 16
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 9
self.num_joint_qd = 9
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rotation = torch.tensor([0.], device = self.device, requires_grad = False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.potentials = tu.to_torch([0.], device=self.device, requires_grad=False).repeat(self.num_envs)
self.prev_potentials = self.potentials.clone()
self.start_pos = []
self.start_joint_q = [0., 0., 0., 0., 0., 0.]
self.start_joint_target = [0., 0., 0., 0., 0., 0.]
start_height = -0.2
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
link_start = len(self.builder.joint_type)
lu.parse_mjcf(os.path.join(asset_folder, "half_cheetah.xml"), self.builder,
density=1000.0,
stiffness=0.0,
damping=1.0,
contact_ke=2.e+4,
contact_kd=1.e+3,
contact_kf=1.e+3,
contact_mu=1.,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=0.1,
radians=True, load_stiffness=True)
self.builder.joint_X_pj[link_start] = df.transform((0.0, 1.0, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5))
# base transform
self.start_pos.append([0.0, start_height])
# set joint targets to rest pose in mjcf
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 9] = [0., 0., 0., 0., 0., 0.]
self.builder.joint_target[i*self.num_joint_q + 3:i*self.num_joint_q + 9] = [0., 0., 0., 0., 0., 0.]
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
render_interval = 1
if (self.num_frames == render_interval):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= render_interval
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 3:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 2] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] + 0.1 * (torch.rand(size=(len(env_ids), 2), device=self.device) - 0.5) * 2.
self.state.joint_q.view(self.num_envs, -1)[env_ids, 2] = (torch.rand(len(env_ids), device = self.device) - 0.5) * 0.2
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] + 0.1 * (torch.rand(size=(len(env_ids), self.num_joint_q - 3), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
self.obs_buf = torch.cat([self.state.joint_q.view(self.num_envs, -1)[:, 1:], self.state.joint_qd.view(self.num_envs, -1)], dim = -1)
def calculateReward(self):
progress_reward = self.obs_buf[:, 8]
self.rew_buf = progress_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) | 10,563 | Python | 39.1673 | 226 | 0.594717 |
vstrozzi/FRL-SHAC-Extension/envs/humanoid.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class HumanoidEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1):
num_obs = 76
num_act = 21
super(HumanoidEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.init_sim()
# other parameters
self.termination_height = 0.74
self.motor_strengths = [
200,
200,
200,
200,
200,
600,
400,
100,
100,
200,
200,
600,
400,
100,
100,
100,
100,
200,
100,
100,
200]
self.motor_scale = 0.35
self.motor_strengths = tu.to_torch(self.motor_strengths, dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.action_penalty = -0.002
self.joint_vel_obs_scaling = 0.1
self.termination_tolerance = 0.1
self.height_rew_scale = 10.0
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Humanoid_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 48
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 28
self.num_joint_qd = 27
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rot = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
self.start_rotation = tu.to_torch(self.start_rot, device=self.device, requires_grad=False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.heading_vec = self.x_unit_tensor.clone()
self.inv_start_rot = tu.quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = tu.to_torch([200.0, 0.0, 0.0], device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_pos = []
if self.visualize:
self.env_dist = 2.5
else:
self.env_dist = 0. # set to zero for training for numerical consistency
start_height = 1.35
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.parse_mjcf(os.path.join(asset_folder, "humanoid.xml"), self.builder,
stiffness=5.0,
damping=0.1,
contact_ke=2.e+4,
contact_kd=5.e+3,
contact_kf=1.e+3,
contact_mu=0.75,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=0.007,
load_stiffness=True,
load_armature=True)
# base transform
start_pos_z = i*self.env_dist
self.start_pos.append([0.0, start_height, start_pos_z])
self.builder.joint_q[i*self.num_joint_q:i*self.num_joint_q + 3] = self.start_pos[-1]
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 7] = self.start_rot
num_q = int(len(self.builder.joint_q)/self.num_environments)
num_qd = int(len(self.builder.joint_qd)/self.num_environments)
print(num_q, num_qd)
print("Start joint_q: ", self.builder.joint_q[0:num_q])
self.start_joint_q = self.builder.joint_q[7:num_q].copy()
self.start_joint_target = self.start_joint_q.copy()
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
num_act = int(len(self.state.joint_act) / self.num_environments) - 6
print('num_act = ', num_act)
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 1):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= 1
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
# todo - make clip range a parameter
actions = torch.clip(actions, -1., 1.)
##### an ugly fix for simulation nan values #### # reference: https://github.com/pytorch/pytorch/issues/15131
def create_hook():
def hook(grad):
torch.nan_to_num(grad, 0.0, 0.0, 0.0, out = grad)
return hook
if self.state.joint_q.requires_grad:
self.state.joint_q.register_hook(create_hook())
if self.state.joint_qd.requires_grad:
self.state.joint_qd.register_hook(create_hook())
if actions.requires_grad:
actions.register_hook(create_hook())
#################################################
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 6:] = actions * self.motor_scale * self.motor_strengths
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] + 0.1 * (torch.rand(size=(len(env_ids), 3), device=self.device) - 0.5) * 2.
angle = (torch.rand(len(env_ids), device = self.device) - 0.5) * np.pi / 12.
axis = torch.nn.functional.normalize(torch.rand((len(env_ids), 3), device = self.device) - 0.5)
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = tu.quat_mul(self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7], tu.quat_from_angle_axis(angle, axis))
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] + 0.2 * (torch.rand(size=(len(env_ids), self.num_joint_q - 7), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
torso_pos = self.state.joint_q.view(self.num_envs, -1)[:, 0:3]
torso_rot = self.state.joint_q.view(self.num_envs, -1)[:, 3:7]
lin_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 3:6]
ang_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets + self.start_pos - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot)
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1)
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0)
self.obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
self.state.joint_q.view(self.num_envs, -1)[:, 7:], # 11:32
self.joint_vel_obs_scaling * self.state.joint_qd.view(self.num_envs, -1)[:, 6:], # 32:53
up_vec[:, 1:2], # 53:54
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1), # 54:55
self.actions.clone()], # 55:76
dim = -1)
def calculateReward(self):
up_reward = 0.1 * self.obs_buf[:, 53]
heading_reward = self.obs_buf[:, 54]
height_diff = self.obs_buf[:, 0] - (self.termination_height + self.termination_tolerance)
height_reward = torch.clip(height_diff, -1.0, self.termination_tolerance)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward)
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + up_reward + heading_reward + height_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# an ugly fix for simulation nan values
nan_masks = torch.logical_or(torch.isnan(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isnan(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isnan(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
inf_masks = torch.logical_or(torch.isinf(self.obs_buf).sum(-1) > 0, torch.logical_or(torch.isinf(self.state.joint_q.view(self.num_environments, -1)).sum(-1) > 0, torch.isinf(self.state.joint_qd.view(self.num_environments, -1)).sum(-1) > 0))
invalid_value_masks = torch.logical_or((torch.abs(self.state.joint_q.view(self.num_environments, -1)) > 1e6).sum(-1) > 0,
(torch.abs(self.state.joint_qd.view(self.num_environments, -1)) > 1e6).sum(-1) > 0)
invalid_masks = torch.logical_or(invalid_value_masks, torch.logical_or(nan_masks, inf_masks))
self.reset_buf = torch.where(invalid_masks, torch.ones_like(self.reset_buf), self.reset_buf)
self.rew_buf[invalid_masks] = 0. | 15,758 | Python | 41.707317 | 248 | 0.582054 |
vstrozzi/FRL-SHAC-Extension/envs/ant.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class AntEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = True):
num_obs = 37
num_act = 8
super(AntEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# other parameters
self.termination_height = 0.27
self.action_strength = 200.0
self.action_penalty = 0.0
self.joint_vel_obs_scaling = 0.1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Ant_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 16
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 15
self.num_joint_qd = 14
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rot = df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)
self.start_rotation = tu.to_torch(self.start_rot, device=self.device, requires_grad=False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.heading_vec = self.x_unit_tensor.clone()
self.inv_start_rot = tu.quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = tu.to_torch([10000.0, 0.0, 0.0], device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_pos = []
self.start_joint_q = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
self.start_joint_target = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
if self.visualize:
self.env_dist = 2.5
else:
self.env_dist = 0. # set to zero for training for numerical consistency
start_height = 0.75
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.parse_mjcf(os.path.join(asset_folder, "ant.xml"), self.builder,
density=1000.0,
stiffness=0.0,
damping=1.0,
contact_ke=4.e+4,
contact_kd=1.e+4,
contact_kf=3.e+3,
contact_mu=0.75,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=0.05)
# base transform
start_pos_z = i*self.env_dist
self.start_pos.append([0.0, start_height, start_pos_z])
self.builder.joint_q[i*self.num_joint_q:i*self.num_joint_q + 3] = self.start_pos[-1]
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 7] = self.start_rot
# set joint targets to rest pose in mjcf
self.builder.joint_q[i*self.num_joint_q + 7:i*self.num_joint_q + 15] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
self.builder.joint_target[i*self.num_joint_q + 7:i*self.num_joint_q + 15] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
render_interval = 1
if (self.num_frames == render_interval):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= render_interval
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 6:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:3] + 0.1 * (torch.rand(size=(len(env_ids), 3), device=self.device) - 0.5) * 2.
angle = (torch.rand(len(env_ids), device = self.device) - 0.5) * np.pi / 12.
axis = torch.nn.functional.normalize(torch.rand((len(env_ids), 3), device = self.device) - 0.5)
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7] = tu.quat_mul(self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:7], tu.quat_from_angle_axis(angle, axis))
self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 7:] + 0.2 * (torch.rand(size=(len(env_ids), self.num_joint_q - 7), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.5 * (torch.rand(size=(len(env_ids), 14), device=self.device) - 0.5)
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
torso_pos = self.state.joint_q.view(self.num_envs, -1)[:, 0:3]
torso_rot = self.state.joint_q.view(self.num_envs, -1)[:, 3:7]
lin_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 3:6]
ang_vel = self.state.joint_qd.view(self.num_envs, -1)[:, 0:3]
# convert the linear velocity of the torso from twist representation to the velocity of the center of mass in world frame
lin_vel = lin_vel - torch.cross(torso_pos, ang_vel, dim = -1)
to_target = self.targets + self.start_pos - torso_pos
to_target[:, 1] = 0.0
target_dirs = tu.normalize(to_target)
torso_quat = tu.quat_mul(torso_rot, self.inv_start_rot)
up_vec = tu.quat_rotate(torso_quat, self.basis_vec1)
heading_vec = tu.quat_rotate(torso_quat, self.basis_vec0)
self.obs_buf = torch.cat([torso_pos[:, 1:2], # 0
torso_rot, # 1:5
lin_vel, # 5:8
ang_vel, # 8:11
self.state.joint_q.view(self.num_envs, -1)[:, 7:], # 11:19
self.joint_vel_obs_scaling * self.state.joint_qd.view(self.num_envs, -1)[:, 6:], # 19:27
up_vec[:, 1:2], # 27
(heading_vec * target_dirs).sum(dim = -1).unsqueeze(-1), # 28
self.actions.clone()], # 29:37
dim = -1)
def calculateReward(self):
up_reward = 0.1 * self.obs_buf[:, 27]
heading_reward = self.obs_buf[:, 28]
height_reward = self.obs_buf[:, 0] - self.termination_height
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + up_reward + heading_reward + height_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
if self.early_termination:
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) | 13,087 | Python | 41.631922 | 226 | 0.585008 |
vstrozzi/FRL-SHAC-Extension/envs/dflex_env.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import torch
import dflex as df
import xml.etree.ElementTree as ET
from gym import spaces
class DFlexEnv:
def __init__(self, num_envs, num_obs, num_act, episode_length, MM_caching_frequency = 1, seed=0, no_grad=True, render=False, device='cuda:0'):
self.seed = seed
self.no_grad = no_grad
df.config.no_grad = self.no_grad
self.episode_length = episode_length
self.device = device
self.visualize = render
self.sim_time = 0.0
self.num_frames = 0 # record the number of frames for rendering
self.num_environments = num_envs
self.num_agents = 1
self.MM_caching_frequency = MM_caching_frequency
# initialize observation and action space
self.num_observations = num_obs
self.num_actions = num_act
self.obs_space = spaces.Box(np.ones(self.num_observations) * -np.Inf, np.ones(self.num_observations) * np.Inf)
self.act_space = spaces.Box(np.ones(self.num_actions) * -1., np.ones(self.num_actions) * 1.)
# allocate buffers
self.obs_buf = torch.zeros(
(self.num_envs, self.num_observations), device=self.device, dtype=torch.float, requires_grad=False)
self.rew_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.float, requires_grad=False)
self.reset_buf = torch.ones(
self.num_envs, device=self.device, dtype=torch.long, requires_grad=False)
# end of the episode
self.termination_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long, requires_grad=False)
self.progress_buf = torch.zeros(
self.num_envs, device=self.device, dtype=torch.long, requires_grad=False)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device = self.device, dtype = torch.float, requires_grad = False)
self.extras = {}
def get_number_of_agents(self):
return self.num_agents
@property
def observation_space(self):
return self.obs_space
@property
def action_space(self):
return self.act_space
@property
def num_envs(self):
return self.num_environments
@property
def num_acts(self):
return self.num_actions
@property
def num_obs(self):
return self.num_observations
def get_state(self):
return self.state.joint_q.clone(), self.state.joint_qd.clone()
def reset_with_state(self, init_joint_q, init_joint_qd, env_ids=None, force_reset=True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# fixed start state
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = init_joint_q.view(-1, self.num_joint_q)[env_ids, :].clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = init_joint_qd.view(-1, self.num_joint_qd)[env_ids, :].clone()
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf | 3,847 | Python | 33.981818 | 146 | 0.641799 |
vstrozzi/FRL-SHAC-Extension/envs/hopper.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#from numpy.lib.function_base import angle
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from copy import deepcopy
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class HopperEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=4096, seed=0, episode_length=1000, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = True):
num_obs = 11
num_act = 3
super(HopperEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# other parameters
self.termination_height = -0.45
self.termination_angle = np.pi / 6.
self.termination_height_tolerance = 0.15
self.termination_angle_tolerance = 0.05
self.height_rew_scale = 1.0
self.action_strength = 200.0
self.action_penalty = -1e-1
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "Hopper_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1.0/60.0
self.sim_substeps = 16
self.sim_dt = self.dt
self.ground = True
self.num_joint_q = 6
self.num_joint_qd = 6
self.x_unit_tensor = tu.to_torch([1, 0, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.y_unit_tensor = tu.to_torch([0, 1, 0], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.z_unit_tensor = tu.to_torch([0, 0, 1], dtype=torch.float, device=self.device, requires_grad=False).repeat((self.num_envs, 1))
self.start_rotation = torch.tensor([0.], device = self.device, requires_grad = False)
# initialize some data used later on
# todo - switch to z-up
self.up_vec = self.y_unit_tensor.clone()
self.start_pos = []
self.start_joint_q = [0., 0., 0.]
self.start_joint_target = [0., 0., 0.]
start_height = 0.0
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
link_start = len(self.builder.joint_type)
lu.parse_mjcf(os.path.join(asset_folder, "hopper.xml"), self.builder,
density=1000.0,
stiffness=0.0,
damping=2.0,
contact_ke=2.e+4,
contact_kd=1.e+3,
contact_kf=1.e+3,
contact_mu=0.9,
limit_ke=1.e+3,
limit_kd=1.e+1,
armature=1.0,
radians=True, load_stiffness=True)
self.builder.joint_X_pj[link_start] = df.transform((0.0, 0.0, 0.0), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5))
# base transform
self.start_pos.append([0.0, start_height])
# set joint targets to rest pose in mjcf
self.builder.joint_q[i*self.num_joint_q + 3:i*self.num_joint_q + 6] = [0., 0., 0.]
self.builder.joint_target[i*self.num_joint_q + 3:i*self.num_joint_q + 6] = [0., 0., 0., 0.]
self.start_pos = tu.to_torch(self.start_pos, device=self.device)
self.start_joint_q = tu.to_torch(self.start_joint_q, device=self.device)
self.start_joint_target = tu.to_torch(self.start_joint_target, device=self.device)
# finalize model
self.model = self.builder.finalize(self.device)
self.model.ground = self.ground
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype=torch.float32, device=self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
if (self.model.ground):
self.model.collide(self.state)
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
render_interval = 1
if (self.num_frames == render_interval):
try:
self.stage.Save()
except:
print("USD save error")
self.num_frames -= render_interval
def step(self, actions):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions.clone()
self.state.joint_act.view(self.num_envs, -1)[:, 3:] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
if len(env_ids) > 0:
self.reset(env_ids)
self.render()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids = None, force_reset = True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# clone the state to avoid gradient error
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
# fixed start state
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] = self.start_pos[env_ids, :].clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 2] = self.start_rotation.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] = self.start_joint_q.clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.
# randomization
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 0:2] + 0.05 * (torch.rand(size=(len(env_ids), 2), device=self.device) - 0.5) * 2.
self.state.joint_q.view(self.num_envs, -1)[env_ids, 2] = (torch.rand(len(env_ids), device = self.device) - 0.5) * 0.1
self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] = self.state.joint_q.view(self.num_envs, -1)[env_ids, 3:] + 0.05 * (torch.rand(size=(len(env_ids), self.num_joint_q - 3), device = self.device) - 0.5) * 2.
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = 0.05 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5) * 2.
# clear action
self.actions = self.actions.clone()
self.actions[env_ids, :] = torch.zeros((len(env_ids), self.num_actions), device = self.device, dtype = torch.float)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self, checkpoint = None):
with torch.no_grad():
if checkpoint is None:
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
current_joint_q = checkpoint['joint_q'].clone()
current_joint_qd = checkpoint['joint_qd'].clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.actions = checkpoint['actions'].clone()
self.progress_buf = checkpoint['progress_buf'].clone()
'''
This function starts collecting a new trajectory from the current states but cuts off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and it returns the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def get_checkpoint(self):
checkpoint = {}
checkpoint['joint_q'] = self.state.joint_q.clone()
checkpoint['joint_qd'] = self.state.joint_qd.clone()
checkpoint['actions'] = self.actions.clone()
checkpoint['progress_buf'] = self.progress_buf.clone()
return checkpoint
def calculateObservations(self):
self.obs_buf = torch.cat([self.state.joint_q.view(self.num_envs, -1)[:, 1:], self.state.joint_qd.view(self.num_envs, -1)], dim = -1)
def calculateReward(self):
height_diff = self.obs_buf[:, 0] - (self.termination_height + self.termination_height_tolerance)
height_reward = torch.clip(height_diff, -1.0, 0.3)
height_reward = torch.where(height_reward < 0.0, -200.0 * height_reward * height_reward, height_reward)
height_reward = torch.where(height_reward > 0.0, self.height_rew_scale * height_reward, height_reward)
angle_reward = 1. * (-self.obs_buf[:, 1] ** 2 / (self.termination_angle ** 2) + 1.)
progress_reward = self.obs_buf[:, 5]
self.rew_buf = progress_reward + height_reward + angle_reward + torch.sum(self.actions ** 2, dim = -1) * self.action_penalty
# reset agents
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
if self.early_termination:
self.reset_buf = torch.where(self.obs_buf[:, 0] < self.termination_height, torch.ones_like(self.reset_buf), self.reset_buf) | 11,333 | Python | 39.916967 | 227 | 0.599224 |
vstrozzi/FRL-SHAC-Extension/envs/assets/humanoid.xml | <mujoco model="humanoid">
<statistic extent="2" center="0 0 1"/>
<option timestep="0.00555"/>
<default>
<motor ctrlrange="-1 1" ctrllimited="true"/>
<default class="body">
<geom type="capsule" condim="1" friction="1.0 0.05 0.05" solimp=".9 .99 .003" solref=".015 1" material="self"/>
<joint limited="true" type="hinge" damping="0.1" stiffness="5" armature=".007" solimplimit="0 .99 .01"/>
<site size=".04" group="3"/>
<default class="force-torque">
<site type="box" size=".01 .01 .02" rgba="1 0 0 1" />
</default>
<default class="touch">
<site type="capsule" rgba="0 0 1 .3"/>
</default>
</default>
</default>
<worldbody>
<geom name="floor" type="plane" conaffinity="1" size="100 100 .2" material="grid"/>
<body name="torso" pos="0 0 1.5" childclass="body">
<light name="top" pos="0 0 2" mode="trackcom"/>
<camera name="back" pos="-3 0 1" xyaxes="0 -1 0 1 0 2" mode="trackcom"/>
<camera name="side" pos="0 -3 1" xyaxes="1 0 0 0 1 2" mode="trackcom"/>
<joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/>
<site name="root" class="force-torque"/>
<geom name="torso" type="capsule" fromto="0 -.07 0 0 .07 0" size=".07"/>
<geom name="upper_waist" type="capsule" fromto="-.01 -.06 -.12 -.01 .06 -.12" size=".06"/>
<site name="torso" class="touch" type="box" pos="0 0 -.05" size=".075 .14 .13"/>
<geom name="head" type="sphere" size=".09" pos="0 0 .19"/>
<body name="lower_waist" pos="-.01 0 -.260" quat="1.000 0 -.002 0">
<geom name="lower_waist" type="capsule" fromto="0 -.06 0 0 .06 0" size=".06"/>
<site name="lower_waist" class="touch" size=".061 .06" zaxis="0 1 0"/>
<joint limited="true" name="abdomen_z" pos="0 0 .065" axis="0 0 1" range="-45 45" damping="5" stiffness="20" armature=".02"/>
<joint limited="true" name="abdomen_y" pos="0 0 .065" axis="0 1 0" range="-75 30" damping="5" stiffness="20" armature=".01"/>
<body name="pelvis" pos="0 0 -.165" quat="1.000 0 -.002 0">
<joint limited="true" name="abdomen_x" pos="0 0 .1" axis="1 0 0" range="-35 35" damping="5" stiffness="10" armature=".01"/>
<geom name="butt" type="capsule" fromto="-.02 -.07 0 -.02 .07 0" size=".09"/>
<site name="butt" class="touch" size=".091 .07" pos="-.02 0 0" zaxis="0 1 0"/>
<body name="right_thigh" pos="0 -.1 -.04">
<site name="right_hip" class="force-torque"/>
<joint limited="true" name="right_hip_x" axis="1 0 0" range="-25 5" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="right_hip_z" axis="0 0 1" range="-60 35" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="right_hip_y" axis="0 1 0" range="-80 20" damping="5" stiffness="20" armature=".01"/>
<geom name="right_thigh" type="capsule" fromto="0 0 0 0 .01 -.34" size=".06"/>
<site name="right_thigh" class="touch" pos="0 .005 -.17" size=".061 .17" zaxis="0 -1 34"/>
<body name="right_shin" pos="0 .01 -.403">
<site name="right_knee" class="force-torque" pos="0 0 .02"/>
<joint limited="true" name="right_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/>
<geom name="right_shin" type="capsule" fromto="0 0 0 0 0 -.3" size=".049"/>
<site name="right_shin" class="touch" pos="0 0 -.15" size=".05 .15"/>
<body name="right_foot" pos="0 0 -.39">
<site name="right_ankle" class="force-torque"/>
<joint limited="true" name="right_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" damping="1.0" stiffness="2" armature=".006"/>
<joint limited="true" name="right_ankle_x" pos="0 0 .08" axis="1 0 .5" range="-50 50" damping="1.0" stiffness="2" armature=".006"/>
<geom name="right_right_foot" type="capsule" fromto="-.07 -.02 0 .14 -.04 0" size=".027"/>
<geom name="left_right_foot" type="capsule" fromto="-.07 0 0 .14 .02 0" size=".027"/>
<site name="right_right_foot" class="touch" pos=".035 -.03 0" size=".03 .11" zaxis="21 -2 0"/>
<site name="left_right_foot" class="touch" pos=".035 .01 0" size=".03 .11" zaxis="21 2 0"/>
</body>
</body>
</body>
<body name="left_thigh" pos="0 .1 -.04">
<site name="left_hip" class="force-torque"/>
<joint limited="true" name="left_hip_x" axis="-1 0 0" range="-25 5" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="left_hip_z" axis="0 0 -1" range="-60 35" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="left_hip_y" axis="0 1 0" range="-80 20" damping="5" stiffness="20" armature=".01"/>
<geom name="left_thigh" type="capsule" fromto="0 0 0 0 -.01 -.34" size=".06"/>
<site name="left_thigh" class="touch" pos="0 -.005 -.17" size=".061 .17" zaxis="0 1 34"/>
<body name="left_shin" pos="0 -.01 -.403">
<site name="left_knee" class="force-torque" pos="0 0 .02"/>
<joint limited="true" name="left_knee" pos="0 0 .02" axis="0 -1 0" range="-160 2"/>
<geom name="left_shin" type="capsule" fromto="0 0 0 0 0 -.3" size=".049"/>
<site name="left_shin" class="touch" pos="0 0 -.15" size=".05 .15"/>
<body name="left_foot" pos="0 0 -.39">
<site name="left_ankle" class="force-torque"/>
<joint limited="true" name="left_ankle_y" pos="0 0 .08" axis="0 1 0" range="-50 50" damping="1.0" stiffness="2" armature=".006"/>
<joint limited="true" name="left_ankle_x" pos="0 0 .08" axis="1 0 .5" range="-50 50" damping="1.0" stiffness="2" armature=".006"/>
<geom name="left_left_foot" type="capsule" fromto="-.07 .02 0 .14 .04 0" size=".027"/>
<geom name="right_left_foot" type="capsule" fromto="-.07 0 0 .14 -.02 0" size=".027"/>
<site name="right_left_foot" class="touch" pos=".035 -.01 0" size=".03 .11" zaxis="21 -2 0"/>
<site name="left_left_foot" class="touch" pos=".035 .03 0" size=".03 .11" zaxis="21 2 0"/>
</body>
</body>
</body>
</body>
</body>
<body name="right_upper_arm" pos="0 -.17 .06">
<joint limited="true" name="right_shoulder1" axis="2 1 1" range="-60 60" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="right_shoulder2" axis="0 -1 1" range="-60 60" damping="5" stiffness="10" armature=".01"/>
<geom name="right_upper_arm" type="capsule" fromto="0 0 0 .16 -.16 -.16" size=".04 .16"/>
<site name="right_upper_arm" class="touch" pos=".08 -.08 -.08" size=".041 .14" zaxis="1 -1 -1"/>
<body name="right_lower_arm" pos=".18 -.18 -.18">
<joint limited="true" name="right_elbow" axis="0 -1 1" range="-90 50" damping="1.0" stiffness="2" armature=".006"/>
<geom name="right_lower_arm" type="capsule" fromto=".01 .01 .01 .17 .17 .17" size=".031"/>
<site name="right_lower_arm" class="touch" pos=".09 .09 .09" size=".032 .14" zaxis="1 1 1"/>
<geom name="right_hand" type="sphere" size=".04" pos=".18 .18 .18"/>
</body>
</body>
<body name="left_upper_arm" pos="0 .17 .06">
<joint limited="true" name="left_shoulder1" axis="-2 1 -1" range="-60 60" damping="5" stiffness="10" armature=".01"/>
<joint limited="true" name="left_shoulder2" axis="0 -1 -1" range="-60 60" damping="5" stiffness="10" armature=".01"/>
<geom name="left_upper_arm" type="capsule" fromto="0 0 0 .16 .16 -.16" size=".04 .16"/>
<site name="left_upper_arm" class="touch" pos=".08 .08 -.08" size=".041 .14" zaxis="1 1 -1"/>
<body name="left_lower_arm" pos=".18 .18 -.18">
<joint limited="true" name="left_elbow" axis="0 -1 -1" range="-90 50" damping="1.0" stiffness="2" armature=".006"/>
<geom name="left_lower_arm" type="capsule" fromto=".01 -.01 .01 .17 -.17 .17" size=".031"/>
<site name="left_lower_arm" class="touch" pos=".09 -.09 .09" size=".032 .14" zaxis="1 -1 1"/>
<geom name="left_hand" type="sphere" size=".04" pos=".18 -.18 .18"/>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor name='abdomen_y' gear='67.5' joint='abdomen_y'/>
<motor name='abdomen_z' gear='67.5' joint='abdomen_z'/>
<motor name='abdomen_x' gear='67.5' joint='abdomen_x'/>
<motor name='right_hip_x' gear='45.0' joint='right_hip_x'/>
<motor name='right_hip_z' gear='45.0' joint='right_hip_z'/>
<motor name='right_hip_y' gear='135.0' joint='right_hip_y'/>
<motor name='right_knee' gear='90.0' joint='right_knee'/>
<motor name='right_ankle_x' gear='22.5' joint='right_ankle_x'/>
<motor name='right_ankle_y' gear='22.5' joint='right_ankle_y'/>
<motor name='left_hip_x' gear='45.0' joint='left_hip_x'/>
<motor name='left_hip_z' gear='45.0' joint='left_hip_z'/>
<motor name='left_hip_y' gear='135.0' joint='left_hip_y'/>
<motor name='left_knee' gear='90.0' joint='left_knee'/>
<motor name='left_ankle_x' gear='22.5' joint='left_ankle_x'/>
<motor name='left_ankle_y' gear='22.5' joint='left_ankle_y'/>
<motor name='right_shoulder1' gear='67.5' joint='right_shoulder1'/>
<motor name='right_shoulder2' gear='67.5' joint='right_shoulder2'/>
<motor name='right_elbow' gear='45.0' joint='right_elbow'/>
<motor name='left_shoulder1' gear='67.5' joint='left_shoulder1'/>
<motor name='left_shoulder2' gear='67.5' joint='left_shoulder2'/>
<motor name='left_elbow' gear='45.0' joint='left_elbow'/>
</actuator>
<sensor>
<subtreelinvel name="torso_subtreelinvel" body="torso"/>
<accelerometer name="torso_accel" site="root"/>
<velocimeter name="torso_vel" site="root"/>
<gyro name="torso_gyro" site="root"/>
<force name="left_ankle_force" site="left_ankle"/>
<force name="right_ankle_force" site="right_ankle"/>
<force name="left_knee_force" site="left_knee"/>
<force name="right_knee_force" site="right_knee"/>
<force name="left_hip_force" site="left_hip"/>
<force name="right_hip_force" site="right_hip"/>
<torque name="left_ankle_torque" site="left_ankle"/>
<torque name="right_ankle_torque" site="right_ankle"/>
<torque name="left_knee_torque" site="left_knee"/>
<torque name="right_knee_torque" site="right_knee"/>
<torque name="left_hip_torque" site="left_hip"/>
<torque name="right_hip_torque" site="right_hip"/>
<touch name="torso_touch" site="torso"/>
<touch name="head_touch" site="head"/>
<touch name="lower_waist_touch" site="lower_waist"/>
<touch name="butt_touch" site="butt"/>
<touch name="right_thigh_touch" site="right_thigh"/>
<touch name="right_shin_touch" site="right_shin"/>
<touch name="right_right_foot_touch" site="right_right_foot"/>
<touch name="left_right_foot_touch" site="left_right_foot"/>
<touch name="left_thigh_touch" site="left_thigh"/>
<touch name="left_shin_touch" site="left_shin"/>
<touch name="right_left_foot_touch" site="right_left_foot"/>
<touch name="left_left_foot_touch" site="left_left_foot"/>
<touch name="right_upper_arm_touch" site="right_upper_arm"/>
<touch name="right_lower_arm_touch" site="right_lower_arm"/>
<touch name="right_hand_touch" site="right_hand"/>
<touch name="left_upper_arm_touch" site="left_upper_arm"/>
<touch name="left_lower_arm_touch" site="left_lower_arm"/>
<touch name="left_hand_touch" site="left_hand"/>
</sensor>
</mujoco>
| 12,020 | XML | 64.331521 | 147 | 0.562396 |
vstrozzi/FRL-SHAC-Extension/envs/assets/hopper.xml | <mujoco model="hopper">
<compiler angle="radian" />
<option integrator="RK4" />
<size njmax="500" nconmax="100" />
<visual>
<map znear="0.02" />
</visual>
<default class="main">
<joint limited="true" armature="1" damping="1" />
<geom condim="1" solimp="0.8 0.8 0.01 0.5 2" margin="0.001" material="geom" rgba="0.8 0.6 0.4 1" />
<general ctrllimited="true" ctrlrange="-0.4 0.4" />
</default>
<asset>
<texture type="skybox" builtin="gradient" rgb1="0.4 0.5 0.6" rgb2="0 0 0" width="100" height="600" />
<texture type="cube" name="texgeom" builtin="flat" mark="cross" rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" markrgb="1 1 1" width="127" height="762" />
<texture type="2d" name="texplane" builtin="checker" rgb1="0 0 0" rgb2="0.8 0.8 0.8" width="100" height="100" />
<material name="MatPlane" texture="texplane" texrepeat="60 60" specular="1" shininess="1" reflectance="0.5" />
<material name="geom" texture="texgeom" texuniform="true" />
</asset>
<worldbody>
<geom name="floor" size="20 20 0.125" type="plane" condim="3" material="MatPlane" rgba="0.8 0.9 0.8 1" />
<light pos="0 0 1.3" dir="0 0 -1" directional="true" cutoff="100" exponent="1" diffuse="1 1 1" specular="0.1 0.1 0.1" />
<body name="torso" pos="0 0 1.25">
<joint name="rootx" pos="0 0 -1.25" axis="1 0 0" type="slide" limited="false" armature="0" damping="0" />
<joint name="rootz" pos="0 0 0" axis="0 0 1" type="slide" ref="1.25" limited="false" armature="0" damping="0" />
<joint name="rooty" pos="0 0 0" axis="0 1 0" limited="false" type="hinge" armature="0" damping="0" />
<geom name="torso_geom" size="0.05 0.2" type="capsule" friction="0.9 0.005 0.0001" />
<body name="thigh" pos="0 0 -0.2">
<joint name="thigh_joint" pos="0 0 0" type="hinge" axis="0 -1 0" range="-2.61799 0" />
<geom name="thigh_geom" size="0.05 0.225" pos="0 0 -0.225" type="capsule" friction="0.9 0.005 0.0001" />
<body name="leg" pos="0 0 -0.7">
<joint name="leg_joint" pos="0 0 0.25" type="hinge" axis="0 -1 0" range="-2.61799 0" />
<geom name="leg_geom" size="0.04 0.25" type="capsule" friction="0.9 0.005 0.0001" />
<body name="foot" pos="0.0 0 -0.25">
<joint name="foot_joint" pos="0 0 0.0" type="hinge" axis="0 -1 0" range="-0.785398 0.785398" />
<geom name="foot_geom" size="0.06 0.195" pos="0.06 0 0.0" quat="0.707107 0 -0.707107 0" type="capsule" friction="2 0.005 0.0001" />
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<general joint="thigh_joint" ctrlrange="-1 1" gear="200 0 0 0 0 0" />
<general joint="leg_joint" ctrlrange="-1 1" gear="200 0 0 0 0 0" />
<general joint="foot_joint" ctrlrange="-1 1" gear="200 0 0 0 0 0" />
</actuator>
</mujoco>
| 3,049 | XML | 62.541665 | 155 | 0.544441 |
vstrozzi/FRL-SHAC-Extension/envs/assets/half_cheetah.xml | <!-- Cheetah Model
The state space is populated with joints in the order that they are
defined in this file. The actuators also operate on joints.
State-Space (name/joint/parameter):
- rootx slider position (m)
- rootz slider position (m)
- rooty hinge angle (rad)
- bthigh hinge angle (rad)
- bshin hinge angle (rad)
- bfoot hinge angle (rad)
- fthigh hinge angle (rad)
- fshin hinge angle (rad)
- ffoot hinge angle (rad)
- rootx slider velocity (m/s)
- rootz slider velocity (m/s)
- rooty hinge angular velocity (rad/s)
- bthigh hinge angular velocity (rad/s)
- bshin hinge angular velocity (rad/s)
- bfoot hinge angular velocity (rad/s)
- fthigh hinge angular velocity (rad/s)
- fshin hinge angular velocity (rad/s)
- ffoot hinge angular velocity (rad/s)
Actuators (name/actuator/parameter):
- bthigh hinge torque (N m)
- bshin hinge torque (N m)
- bfoot hinge torque (N m)
- fthigh hinge torque (N m)
- fshin hinge torque (N m)
- ffoot hinge torque (N m)
-->
<mujoco model="cheetah">
<compiler angle="radian" coordinate="local" inertiafromgeom="true" settotalmass="14"/>
<default>
<joint armature=".1" damping=".01" limited="true" solimplimit="0 .8 .03" solreflimit=".02 1" stiffness="8"/>
<geom conaffinity="0" condim="3" contype="1" friction="0.8 .1 .1" rgba="0.8 0.6 .4 1" solimp="0.0 0.8 0.01" solref="0.02 1"/>
<motor ctrllimited="true" ctrlrange="-1 1"/>
</default>
<size nstack="300000" nuser_geom="1"/>
<option gravity="0 0 -9.81" timestep="0.01"/>
<worldbody>
<body name="torso" pos="0 0 0">
<joint armature="0" axis="1 0 0" damping="0" limited="false" name="ignorex" pos="0 0 0" stiffness="0" type="slide"/>
<joint armature="0" axis="0 0 1" damping="0" limited="false" name="ignorez" pos="0 0 0" stiffness="0" type="slide"/>
<joint armature="0" axis="0 1 0" damping="0" limited="false" name="ignorey" pos="0 0 0" stiffness="0" type="hinge"/>
<geom fromto="-.5 0 0 .5 0 0" name="torso" size="0.046" type="capsule"/>
<geom axisangle="0 1 0 .87" name="head" pos=".6 0 .1" size="0.046 .15" type="capsule"/>
<!-- <site name='tip' pos='.15 0 .11'/>-->
<body name="bthigh" pos="-.5 0 0">
<joint axis="0 1 0" damping="6" name="bthigh" pos="0 0 0" range="-.52 1.05" stiffness="240" type="hinge"/>
<geom axisangle="0 1 0 -3.8" name="bthigh" pos=".1 0 -.13" size="0.046 .145" type="capsule"/>
<body name="bshin" pos=".16 0 -.25">
<joint axis="0 1 0" damping="4.5" name="bshin" pos="0 0 0" range="-.785 .785" stiffness="180" type="hinge"/>
<geom axisangle="0 1 0 -2.03" name="bshin" pos="-.14 0 -.07" rgba="0.9 0.6 0.6 1" size="0.046 .15" type="capsule"/>
<body name="bfoot" pos="-.28 0 -.14">
<joint axis="0 1 0" damping="3" name="bfoot" pos="0 0 0" range="-.4 .785" stiffness="120" type="hinge"/>
<geom axisangle="0 1 0 -.27" name="bfoot" pos=".03 0 -.097" rgba="0.9 0.6 0.6 1" size="0.046 .094" type="capsule"/>
<inertial mass="10"/>
</body>
</body>
</body>
<body name="fthigh" pos=".5 0 0">
<joint axis="0 1 0" damping="4.5" name="fthigh" pos="0 0 0" range="-1.5 0.8" stiffness="180" type="hinge"/>
<geom axisangle="0 1 0 .52" name="fthigh" pos="-.07 0 -.12" size="0.046 .133" type="capsule"/>
<body name="fshin" pos="-.14 0 -.24">
<joint axis="0 1 0" damping="3" name="fshin" pos="0 0 0" range="-1.2 1.1" stiffness="120" type="hinge"/>
<geom axisangle="0 1 0 -.6" name="fshin" pos=".065 0 -.09" rgba="0.9 0.6 0.6 1" size="0.046 .106" type="capsule"/>
<body name="ffoot" pos=".13 0 -.18">
<joint axis="0 1 0" damping="1.5" name="ffoot" pos="0 0 0" range="-3.1 -0.3" stiffness="60" type="hinge"/>
<geom axisangle="0 1 0 -.6" name="ffoot" pos=".045 0 -.07" rgba="0.9 0.6 0.6 1" size="0.046 .07" type="capsule"/>
<inertial mass="10"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor gear="120" joint="bthigh" name="bthigh"/>
<motor gear="90" joint="bshin" name="bshin"/>
<motor gear="60" joint="bfoot" name="bfoot"/>
<motor gear="120" joint="fthigh" name="fthigh"/>
<motor gear="60" joint="fshin" name="fshin"/>
<motor gear="30" joint="ffoot" name="ffoot"/>
</actuator>
</mujoco>
| 4,788 | XML | 52.808988 | 129 | 0.540518 |
vstrozzi/FRL-SHAC-Extension/envs/assets/ant.xml | <mujoco model="ant">
<compiler angle="degree" coordinate="local" inertiafromgeom="true"/>
<option integrator="RK4" timestep="0.01"/>
<custom>
<numeric data="0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0" name="init_qpos"/>
</custom>
<default>
<joint armature="0.001" damping="1" limited="true"/>
<geom conaffinity="0" condim="3" density="5.0" friction="1.5 0.1 0.1" margin="0.01" rgba="0.97 0.38 0.06 1"/>
</default>
<worldbody>
<body name="torso" pos="0 0 0.75">
<geom name="torso_geom" pos="0 0 0" size="0.25" type="sphere"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="aux_1_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="aux_2_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="aux_3_geom" size="0.08" type="capsule"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="aux_4_geom" size="0.08" type="capsule" rgba=".999 .2 .02 1"/>
<joint armature="0" damping="0" limited="false" margin="0.01" name="root" pos="0 0 0" type="free"/>
<body name="front_left_leg" pos="0.2 0.2 0">
<joint axis="0 0 1" name="hip_1" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 0.2 0.0" name="left_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 0.2 0" name="front_left_foot">
<joint axis="-1 1 0" name="ankle_1" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 0.4 0.0" name="left_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
<body name="front_right_leg" pos="-0.2 0.2 0">
<joint axis="0 0 1" name="hip_2" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 0.2 0.0" name="right_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 0.2 0" name="front_right_foot">
<joint axis="1 1 0" name="ankle_2" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 0.4 0.0" name="right_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="left_back_leg" pos="-0.2 -0.2 0">
<joint axis="0 0 1" name="hip_3" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.2 -0.2 0.0" name="back_leg_geom" size="0.08" type="capsule"/>
<body pos="-0.2 -0.2 0" name="left_back_foot">
<joint axis="-1 1 0" name="ankle_3" pos="0.0 0.0 0.0" range="-100 -30" type="hinge"/>
<geom fromto="0.0 0.0 0.0 -0.4 -0.4 0.0" name="third_ankle_geom" size="0.08" type="capsule"/>
</body>
</body>
<body name="right_back_leg" pos="0.2 -0.2 0">
<joint axis="0 0 1" name="hip_4" pos="0.0 0.0 0.0" range="-40 40" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.2 -0.2 0.0" name="rightback_leg_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
<body pos="0.2 -0.2 0" name="right_back_foot">
<joint axis="1 1 0" name="ankle_4" pos="0.0 0.0 0.0" range="30 100" type="hinge"/>
<geom fromto="0.0 0.0 0.0 0.4 -0.4 0.0" name="fourth_ankle_geom" size="0.08" type="capsule" rgba=".999 .2 .1 1"/>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_4" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_1" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_2" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="hip_3" gear="150"/>
<motor ctrllimited="true" ctrlrange="-1.0 1.0" joint="ankle_3" gear="150"/>
</actuator>
</mujoco> | 4,043 | XML | 61.215384 | 125 | 0.550829 |
vstrozzi/FRL-SHAC-Extension/envs/assets/snu/human.xml | <Skeleton name="Human">
<Node name="Pelvis" parent="None" >
<Body type="Box" mass="15.0" size="0.2083 0.1454 0.1294" contact="Off" color="0.6 0.6 1.5 1.0" obj="Pelvis.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.9809 -0.0308 "/>
</Body>
<Joint type="Free" bvh="Character1_Hips">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.9809 -0.0308 "/>
</Joint>
</Node>
<Node name="FemurR" parent="Pelvis" >
<Body type="Box" mass="7.0" size="0.1271 0.4043 0.1398" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Femur.obj">
<Transformation linear="0.9998 -0.0174 -0.0024 -0.0175 -0.9997 -0.0172 -0.21 0.0172 -0.9998 " translation="-0.0959 0.7241 -0.0227 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightUpLeg" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0903 0.9337 -0.0116 "/>
</Joint>
</Node>
<Node name="TibiaR" parent="FemurR" >
<Body type="Box" mass="3.0" size="0.1198 0.4156 0.1141 " contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Tibia.obj">
<Transformation linear="0.9994 0.0348 -0.0030 0.0349 -0.9956 0.0871 0.0 -0.0872 -0.9962 " translation="-0.0928 0.3018 -0.0341 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" bvh="Character1_RightLeg" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0995 0.5387 -0.0103 "/>
</Joint>
</Node>
<Node name="TalusR" parent="TibiaR" endeffector="True">
<Body type="Box" mass="0.6" size="0.0756 0.0498 0.1570" contact="On" color="0.3 0.3 1.5 1.0" obj="R_Talus.obj">
<Transformation linear="0.9779 0.0256 0.2073 0.0199 -0.9994 0.0295 0.2079 -0.0247 -0.9778 " translation="-0.0826 0.0403 -0.0242 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightFoot" lower="-1.0 -1.0 -1.0" upper="1.0 1.0 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.08 0.0776 -0.0419"/>
</Joint>
</Node>
<Node name="FootThumbR" parent="TalusR" >
<Body type="Box" mass="0.2" size="0.0407 0.0262 0.0563 " contact="On" color="0.3 0.3 1.5 1.0" obj="R_FootThumb.obj">
<Transformation linear="0.9847 -0.0097 0.1739 -0.0129 -0.9998 0.0177 0.1737 -0.0196 -0.9846 " translation="-0.0765 0.0268 0.0938 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0781 0.0201 0.0692"/>
</Joint>
</Node>
<Node name="FootPinkyR" parent="TalusR" >
<Body type="Box" mass="0.2" size="0.0422 0.0238 0.0529 " contact="On" color="0.3 0.3 1.5 1.0" obj="R_FootPinky.obj">
<Transformation linear="0.9402 0.0126 0.3405 0.0083 -0.9999 0.0142 0.3407 -0.0105 -0.9401 " translation="-0.1244 0.0269 0.0810 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.1227 0.0142 0.0494"/>
</Joint>
</Node>
<Node name="FemurL" parent="Pelvis" >
<Body type="Box" mass="7.0" size="0.1271 0.4043 0.1398" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Femur.obj">
<Transformation linear="0.9998 -0.0174 -0.0024 0.0175 0.9997 0.0172 0.21 -0.0172 0.9998 " translation="0.0959 0.7241 -0.0227 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftUpLeg" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0903 0.9337 -0.0116 "/>
</Joint>
</Node>
<Node name="TibiaL" parent="FemurL" >
<Body type="Box" mass="3.0" size="0.1198 0.4156 0.1141 " contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Tibia.obj">
<Transformation linear="0.9994 0.0348 -0.0030 -0.0349 0.9956 -0.0871 -0.0 0.0872 0.9962 " translation="0.0928 0.3018 -0.0341 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" bvh="Character1_LeftLeg" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0995 0.5387 -0.0103 "/>
</Joint>
</Node>
<Node name="TalusL" parent="TibiaL" endeffector="True">
<Body type="Box" mass="0.6" size="0.0756 0.0498 0.1570" contact="On" color="0.6 0.6 1.5 1.0" obj="L_Talus.obj">
<Transformation linear="0.9779 0.0256 0.2073 -0.0199 0.9994 -0.0295 -0.2079 0.0247 0.9778 " translation="0.0826 0.0403 -0.0242 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftFoot" lower="-1.0 -1.0 -1.0" upper="1.0 1.0 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.08 0.0776 -0.0419 "/>
</Joint>
</Node>
<Node name="FootThumbL" parent="TalusL" >
<Body type="Box" mass="0.2" size="0.0407 0.0262 0.0563 " contact="On" color="0.6 0.6 1.5 1.0" obj="L_FootThumb.obj">
<Transformation linear="0.9402 0.0126 0.3405 -0.0083 0.9999 -0.0142 -0.3407 0.0105 0.9401 " translation="0.1244 0.0269 0.0810 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.1215 0.0116 0.0494 "/>
</Joint>
</Node>
<Node name="FootPinkyL" parent="TalusL" >
<Body type="Box" mass="0.2" size="0.0422 0.0238 0.0529 " contact="On" color="0.6 0.6 1.5 1.0" obj="L_FootPinky.obj">
<Transformation linear="0.9847 -0.0097 0.1739 0.0129 0.9998 -0.0177 -0.1737 0.0196 0.9846 " translation="0.0765 0.0268 0.0938 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0756 0.0118 0.0676 "/>
</Joint>
</Node>
<Node name="Spine" parent="Pelvis" >
<Body type="Box" mass="5.0" size="0.1170 0.0976 0.0984" contact="Off" color="0.6 0.6 1.5 1.0" obj="Spine.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 " translation="0.0 1.1204 -0.0401 "/>
</Body>
<Joint type="Ball" bvh="Character1_Spine" lower="-0.4 -0.4 -0.2 " upper="0.4 0.4 0.2 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.0675 -0.0434 "/>
</Joint>
</Node>
<Node name="Torso" parent="Spine" >
<Body type="Box" mass="10.0" size="0.1798 0.2181 0.1337" contact="Off" color="0.6 0.6 1.5 1.0" obj="Torso.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 -0.0092 0.0 0.0092 1.0 " translation="0.0 1.3032 -0.0398 "/>
</Body>
<Joint type="Ball" bvh="Character1_Spine1" lower="-0.4 -0.4 -0.2 " upper="0.4 0.4 0.2 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.1761 -0.0498 "/>
</Joint>
</Node>
<Node name="Neck" parent="Torso" >
<Body type="Box" mass="2.0" size="0.0793 0.0728 0.0652" contact="Off" color="0.6 0.6 1.5 1.0" obj="Neck.obj">
<Transformation linear="1.0 0.0 0.0 0.0 0.9732 -0.2301 0.0 0.2301 0.9732 " translation="0.0 1.5297 -0.0250 "/>
</Body>
<Joint type="Ball" bvh="Character1_Neck" lower="-0.4 -0.4 -0.4 " upper="0.6 0.6 1.5 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.4844 -0.0436 "/>
</Joint>
</Node>
<Node name="Head" parent="Neck" endeffector="True">
<Body type="Box" mass="2.0" size="0.1129 0.1144 0.1166" contact="Off" color="0.6 0.6 1.5 1.0" obj="Skull.obj">
<Transformation linear="1.0 0.0 0.0 0.0 0.9895 -0.1447 0.0 0.1447 0.9895 " translation="0.0 1.6527 -0.0123 "/>
</Body>
<Joint type="Ball" lower="-0.4 -0.4 -0.4 " upper="0.6 0.6 1.5 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.5652 -0.0086 "/>
</Joint>
</Node>
<Node name="ShoulderR" parent="Torso" >
<Body type="Box" mass="1.0" size="0.1635 0.0634 0.0645" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Shoulder.obj">
<Transformation linear="0.9985 -0.0048 0.0549 -0.0047 -1.0 -0.0011 0.0549 0.0008 -0.9985 " translation="-0.0981 1.4644 -0.0391 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightShoulder" lower="-0.5 -0.5 -0.5" upper="0.5 0.5 0.5">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0147 1.4535 -0.0381 "/>
</Joint>
</Node>
<Node name="ArmR" parent="ShoulderR" >
<Body type="Box" mass="1.0" size="0.3329 0.0542 0.0499" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Humerus.obj">
<Transformation linear="0.9960 0.0361 -0.0812 -0.0669 -0.2971 -0.952500 -0.0585 0.9542 -0.2936 " translation="-0.3578 1.4522 -0.0235 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightArm" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.1995 1.4350 -0.0353 "/>
</Joint>
</Node>
<Node name="ForeArmR" parent="ArmR" >
<Body type="Box" mass="0.5" size="0.2630 0.0506 0.0513" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Radius.obj">
<Transformation linear="0.9929 0.0823 -0.0856 -0.0517 -0.3492 -0.9356 -0.1069 0.9334 -0.3424 " translation="-0.6674 1.4699 -0.0059 "/>
</Body>
<Joint type="Revolute" axis="0.0 1.0 0.0" bvh="Character1_RightForeArm" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.5234 1.4607 -0.0105 "/>
</Joint>
</Node>
<Node name="HandR" parent="ForeArmR" endeffector="True">
<Body type="Box" mass="0.2" size="0.1306 0.0104 0.0846" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Hand.obj">
<Transformation linear="0.9712 0.2357 -0.0353 0.2243 -0.9540 -0.1990 -0.0806 0.1853 -0.9794 " translation="-0.8810 1.4647 0.0315 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightHand" lower="-0.7 -0.7 -0.7 " upper="0.7 0.7 0.7">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.8102 1.469 0.0194 "/>
</Joint>
</Node>
<Node name="ShoulderL" parent="Torso" >
<Body type="Box" mass="1.0" size="0.1635 0.0634 0.0645" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Shoulder.obj">
<Transformation linear="0.9985 -0.0048 0.0549 0.0047 1.0000 0.0011 -0.0549 -0.0008 0.9985 " translation="0.0981 1.4644 -0.0391 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftShoulder" lower="-0.5 -0.5 -0.5" upper="0.5 0.5 0.5">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0147 1.4535 -0.0381"/>
</Joint>
</Node>
<Node name="ArmL" parent="ShoulderL" >
<Body type="Box" mass="1.0" size="0.3329 0.0542 0.0499" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Humerus.obj">
<Transformation linear="0.9960 0.0361 -0.0812 0.0669 0.2971 0.9525 0.0585 -0.9542 0.2936 " translation="0.3578 1.4522 -0.0235 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftArm" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.1995 1.4350 -0.0353"/>
</Joint>
</Node>
<Node name="ForeArmL" parent="ArmL" >
<Body type="Box" mass="0.5" size="0.2630 0.0506 0.0513" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Radius.obj">
<Transformation linear="0.9929 0.0823 -0.0856 0.0517 0.3492 0.9356 0.1069 -0.9334 0.3424 " translation="0.6674 1.4699 -0.0059 "/>
</Body>
<Joint type="Revolute" axis="0.0 1.0 0.0" bvh="Character1_LeftForeArm" lower="-2.3" upper="0.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.5234 1.4607 -0.0105"/>
</Joint>
</Node>
<Node name="HandL" parent="ForeArmL" endeffector="True">
<Body type="Box" mass="0.2" size="0.1306 0.0104 0.0846" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Hand.obj">
<Transformation linear="0.9712 0.2357 -0.0353 -0.2243 0.9540 0.1990 0.0806 -0.1853 0.9794 " translation="0.8813 1.4640 0.0315 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftHand" lower="-0.7 -0.7 -0.7 " upper="0.7 0.7 0.7">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.8102 1.4694 0.0194"/>
</Joint>
</Node>
</Skeleton> | 12,775 | XML | 65.541666 | 148 | 0.570176 |
vstrozzi/FRL-SHAC-Extension/envs/assets/snu/arm.xml | <Skeleton name="Human">
<Node name="Pelvis" parent="None" >
<Body type="Box" mass="15.0" size="0.2083 0.1454 0.1294" contact="Off" color="0.6 0.6 1.5 1.0" obj="Pelvis.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.9809 -0.0308 "/>
</Body>
<Joint type="Free" bvh="Character1_Hips">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.9809 -0.0308 "/>
</Joint>
</Node>
<Node name="FemurR" parent="Pelvis" >
<Body type="Box" mass="7.0" size="0.1271 0.4043 0.1398" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Femur.obj">
<Transformation linear="0.9998 -0.0174 -0.0024 -0.0175 -0.9997 -0.0172 -0.21 0.0172 -0.9998 " translation="-0.0959 0.7241 -0.0227 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightUpLeg" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0903 0.9337 -0.0116 "/>
</Joint>
</Node>
<Node name="TibiaR" parent="FemurR" >
<Body type="Box" mass="3.0" size="0.1198 0.4156 0.1141 " contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Tibia.obj">
<Transformation linear="0.9994 0.0348 -0.0030 0.0349 -0.9956 0.0871 0.0 -0.0872 -0.9962 " translation="-0.0928 0.3018 -0.0341 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" bvh="Character1_RightLeg" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0995 0.5387 -0.0103 "/>
</Joint>
</Node>
<Node name="TalusR" parent="TibiaR" endeffector="True">
<Body type="Box" mass="0.6" size="0.0756 0.0498 0.1570" contact="On" color="0.3 0.3 1.5 1.0" obj="R_Talus.obj">
<Transformation linear="0.9779 0.0256 0.2073 0.0199 -0.9994 0.0295 0.2079 -0.0247 -0.9778 " translation="-0.0826 0.0403 -0.0242 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightFoot" lower="-1.0 -1.0 -1.0" upper="1.0 1.0 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.08 0.0776 -0.0419"/>
</Joint>
</Node>
<Node name="FootThumbR" parent="TalusR" >
<Body type="Box" mass="0.2" size="0.0407 0.0262 0.0563 " contact="On" color="0.3 0.3 1.5 1.0" obj="R_FootThumb.obj">
<Transformation linear="0.9847 -0.0097 0.1739 -0.0129 -0.9998 0.0177 0.1737 -0.0196 -0.9846 " translation="-0.0765 0.0268 0.0938 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0781 0.0201 0.0692"/>
</Joint>
</Node>
<Node name="FootPinkyR" parent="TalusR" >
<Body type="Box" mass="0.2" size="0.0422 0.0238 0.0529 " contact="On" color="0.3 0.3 1.5 1.0" obj="R_FootPinky.obj">
<Transformation linear="0.9402 0.0126 0.3405 0.0083 -0.9999 0.0142 0.3407 -0.0105 -0.9401 " translation="-0.1244 0.0269 0.0810 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.1227 0.0142 0.0494"/>
</Joint>
</Node>
<Node name="FemurL" parent="Pelvis" >
<Body type="Box" mass="7.0" size="0.1271 0.4043 0.1398" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Femur.obj">
<Transformation linear="0.9998 -0.0174 -0.0024 0.0175 0.9997 0.0172 0.21 -0.0172 0.9998 " translation="0.0959 0.7241 -0.0227 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftUpLeg" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0903 0.9337 -0.0116 "/>
</Joint>
</Node>
<Node name="TibiaL" parent="FemurL" >
<Body type="Box" mass="3.0" size="0.1198 0.4156 0.1141 " contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Tibia.obj">
<Transformation linear="0.9994 0.0348 -0.0030 -0.0349 0.9956 -0.0871 -0.0 0.0872 0.9962 " translation="0.0928 0.3018 -0.0341 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" bvh="Character1_LeftLeg" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0995 0.5387 -0.0103 "/>
</Joint>
</Node>
<Node name="TalusL" parent="TibiaL" endeffector="True">
<Body type="Box" mass="0.6" size="0.0756 0.0498 0.1570" contact="On" color="0.6 0.6 1.5 1.0" obj="L_Talus.obj">
<Transformation linear="0.9779 0.0256 0.2073 -0.0199 0.9994 -0.0295 -0.2079 0.0247 0.9778 " translation="0.0826 0.0403 -0.0242 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftFoot" lower="-1.0 -1.0 -1.0" upper="1.0 1.0 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.08 0.0776 -0.0419 "/>
</Joint>
</Node>
<Node name="FootThumbL" parent="TalusL" >
<Body type="Box" mass="0.2" size="0.0407 0.0262 0.0563 " contact="On" color="0.6 0.6 1.5 1.0" obj="L_FootThumb.obj">
<Transformation linear="0.9402 0.0126 0.3405 -0.0083 0.9999 -0.0142 -0.3407 0.0105 0.9401 " translation="0.1244 0.0269 0.0810 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.1215 0.0116 0.0494 "/>
</Joint>
</Node>
<Node name="FootPinkyL" parent="TalusL" >
<Body type="Box" mass="0.2" size="0.0422 0.0238 0.0529 " contact="On" color="0.6 0.6 1.5 1.0" obj="L_FootPinky.obj">
<Transformation linear="0.9847 -0.0097 0.1739 0.0129 0.9998 -0.0177 -0.1737 0.0196 0.9846 " translation="0.0765 0.0268 0.0938 "/>
</Body>
<Joint type="Revolute" axis ="1.0 0.0 0.0" lower="-0.6" upper="0.6">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0756 0.0118 0.0676 "/>
</Joint>
</Node>
<Node name="Spine" parent="Pelvis" >
<Body type="Box" mass="5.0" size="0.1170 0.0976 0.0984" contact="Off" color="0.6 0.6 1.5 1.0" obj="Spine.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0 " translation="0.0 1.1204 -0.0401 "/>
</Body>
<Joint type="Ball" bvh="Character1_Spine" lower="-0.4 -0.4 -0.2 " upper="0.4 0.4 0.2 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.0675 -0.0434 "/>
</Joint>
</Node>
<Node name="Torso" parent="Spine" >
<Body type="Box" mass="10.0" size="0.1798 0.2181 0.1337" contact="Off" color="0.6 0.6 1.5 1.0" obj="Torso.obj">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 -0.0092 0.0 0.0092 1.0 " translation="0.0 1.3032 -0.0398 "/>
</Body>
<Joint type="Fixed" bvh="Character1_Spine1" lower="-0.4 -0.4 -0.2 " upper="0.4 0.4 0.2 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.1761 -0.0498 "/>
</Joint>
</Node>
<Node name="Neck" parent="Torso" >
<Body type="Box" mass="2.0" size="0.0793 0.0728 0.0652" contact="Off" color="0.6 0.6 1.5 1.0" obj="Neck.obj">
<Transformation linear="1.0 0.0 0.0 0.0 0.9732 -0.2301 0.0 0.2301 0.9732 " translation="0.0 1.5297 -0.0250 "/>
</Body>
<Joint type="Ball" bvh="Character1_Neck" lower="-0.4 -0.4 -0.4 " upper="0.6 0.6 1.5 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.4844 -0.0436 "/>
</Joint>
</Node>
<Node name="Head" parent="Neck" endeffector="True">
<Body type="Box" mass="2.0" size="0.1129 0.1144 0.1166" contact="Off" color="0.6 0.6 1.5 1.0" obj="Skull.obj">
<Transformation linear="1.0 0.0 0.0 0.0 0.9895 -0.1447 0.0 0.1447 0.9895 " translation="0.0 1.6527 -0.0123 "/>
</Body>
<Joint type="Ball" lower="-0.4 -0.4 -0.4 " upper="0.6 0.6 1.5 ">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0. 1.5652 -0.0086 "/>
</Joint>
</Node>
<Node name="ShoulderR" parent="Torso" >
<Body type="Box" mass="1.0" size="0.1635 0.0634 0.0645" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Shoulder.obj">
<Transformation linear="0.9985 -0.0048 0.0549 -0.0047 -1.0 -0.0011 0.0549 0.0008 -0.9985 " translation="-0.0981 1.4644 -0.0391 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightShoulder" lower="-0.5 -0.5 -0.5" upper="0.5 0.5 0.5">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.0147 1.4535 -0.0381 "/>
</Joint>
</Node>
<Node name="ArmR" parent="ShoulderR" >
<Body type="Box" mass="1.0" size="0.3329 0.0542 0.0499" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Humerus.obj">
<Transformation linear="0.9960 0.0361 -0.0812 -0.0669 -0.2971 -0.952500 -0.0585 0.9542 -0.2936 " translation="-0.3578 1.4522 -0.0235 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightArm" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.1995 1.4350 -0.0353 "/>
</Joint>
</Node>
<Node name="ForeArmR" parent="ArmR" >
<Body type="Box" mass="0.5" size="0.2630 0.0506 0.0513" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Radius.obj">
<Transformation linear="0.9929 0.0823 -0.0856 -0.0517 -0.3492 -0.9356 -0.1069 0.9334 -0.3424 " translation="-0.6674 1.4699 -0.0059 "/>
</Body>
<Joint type="Revolute" axis="0.0 1.0 0.0" bvh="Character1_RightForeArm" lower="0.0" upper="2.3">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.5234 1.4607 -0.0105 "/>
</Joint>
</Node>
<Node name="HandR" parent="ForeArmR" endeffector="True">
<Body type="Box" mass="0.2" size="0.1306 0.0104 0.0846" contact="Off" color="0.3 0.3 1.5 1.0" obj="R_Hand.obj">
<Transformation linear="0.9712 0.2357 -0.0353 0.2243 -0.9540 -0.1990 -0.0806 0.1853 -0.9794 " translation="-0.8810 1.4647 0.0315 "/>
</Body>
<Joint type="Ball" bvh="Character1_RightHand" lower="-0.7 -0.7 -0.7 " upper="0.7 0.7 0.7">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="-0.8102 1.469 0.0194 "/>
</Joint>
</Node>
<Node name="ShoulderL" parent="Torso" >
<Body type="Box" mass="1.0" size="0.1635 0.0634 0.0645" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Shoulder.obj">
<Transformation linear="0.9985 -0.0048 0.0549 0.0047 1.0000 0.0011 -0.0549 -0.0008 0.9985 " translation="0.0981 1.4644 -0.0391 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftShoulder" lower="-0.5 -0.5 -0.5" upper="0.5 0.5 0.5">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0147 1.4535 -0.0381"/>
</Joint>
</Node>
<Node name="ArmL" parent="ShoulderL" >
<Body type="Box" mass="1.0" size="0.3329 0.0542 0.0499" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Humerus.obj">
<Transformation linear="0.9960 0.0361 -0.0812 0.0669 0.2971 0.9525 0.0585 -0.9542 0.2936 " translation="0.3578 1.4522 -0.0235 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftArm" lower="-2.0 -2.0 -2.0" upper="2.0 2.0 2.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.1995 1.4350 -0.0353"/>
</Joint>
</Node>
<Node name="ForeArmL" parent="ArmL" >
<Body type="Box" mass="0.5" size="0.2630 0.0506 0.0513" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Radius.obj">
<Transformation linear="0.9929 0.0823 -0.0856 0.0517 0.3492 0.9356 0.1069 -0.9334 0.3424 " translation="0.6674 1.4699 -0.0059 "/>
</Body>
<Joint type="Revolute" axis="0.0 1.0 0.0" bvh="Character1_LeftForeArm" lower="-2.3" upper="0.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.5234 1.4607 -0.0105"/>
</Joint>
</Node>
<Node name="HandL" parent="ForeArmL" endeffector="True">
<Body type="Box" mass="0.2" size="0.1306 0.0104 0.0846" contact="Off" color="0.6 0.6 1.5 1.0" obj="L_Hand.obj">
<Transformation linear="0.9712 0.2357 -0.0353 -0.2243 0.9540 0.1990 0.0806 -0.1853 0.9794 " translation="0.8813 1.4640 0.0315 "/>
</Body>
<Joint type="Ball" bvh="Character1_LeftHand" lower="-0.7 -0.7 -0.7 " upper="0.7 0.7 0.7">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.8102 1.4694 0.0194"/>
</Joint>
</Node>
</Skeleton> | 12,782 | XML | 63.560606 | 148 | 0.569942 |
vstrozzi/FRL-SHAC-Extension/envs/assets/snu/muscle284.xml | <Muscle>
<Unit name="L_Abductor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.629400 1.471000 -0.014000 " />
<Waypoint body="ForeArmL" p="0.732300 1.488400 0.018000 " />
<Waypoint body="ForeArmL" p="0.786300 1.491600 0.024800 " />
<Waypoint body="HandL" p="0.822700 1.472900 0.061900 " />
</Unit>
<Unit name="R_Abductor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.629400 1.471000 -0.014000 " />
<Waypoint body="ForeArmR" p="-0.732300 1.488400 0.018000 " />
<Waypoint body="ForeArmR" p="-0.786300 1.491600 0.024800 " />
<Waypoint body="HandR" p="-0.822700 1.472900 0.061900 " />
</Unit>
<Unit name="L_Adductor_Brevis" f0="151.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.031900 0.919600 0.041600 " />
<Waypoint body="FemurL" p="0.083100 0.833800 0.004900 " />
<Waypoint body="FemurL" p="0.110400 0.826200 -0.008400 " />
</Unit>
<Unit name="R_Adductor_Brevis" f0="151.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.031900 0.919600 0.041600 " />
<Waypoint body="FemurR" p="-0.083100 0.833800 0.004900 " />
<Waypoint body="FemurR" p="-0.110400 0.826200 -0.008400 " />
</Unit>
<Unit name="L_Adductor_Brevis1" f0="151.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.014100 0.911600 0.042700 " />
<Waypoint body="FemurL" p="0.076700 0.756500 -0.000700 " />
<Waypoint body="FemurL" p="0.104000 0.730500 0.002500 " />
</Unit>
<Unit name="R_Adductor_Brevis1" f0="151.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.014100 0.911600 0.042700 " />
<Waypoint body="FemurR" p="-0.076700 0.756500 -0.000700 " />
<Waypoint body="FemurR" p="-0.104000 0.730500 0.002500 " />
</Unit>
<Unit name="L_Adductor_Longus" f0="199.750000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.030200 0.921600 0.042700 " />
<Waypoint body="FemurL" p="0.100300 0.738600 0.002700 " />
<Waypoint body="FemurL" p="0.109600 0.701000 0.001400 " />
</Unit>
<Unit name="R_Adductor_Longus" f0="199.750000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.030200 0.921600 0.042700 " />
<Waypoint body="FemurR" p="-0.100300 0.738600 0.002700 " />
<Waypoint body="FemurR" p="-0.109600 0.701000 0.001400 " />
</Unit>
<Unit name="L_Adductor_Longus1" f0="199.750000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.014000 0.914800 0.048900 " />
<Waypoint body="FemurL" p="0.050500 0.729800 0.005100 " />
<Waypoint body="FemurL" p="0.099100 0.634300 0.001400 " />
</Unit>
<Unit name="R_Adductor_Longus1" f0="199.750000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.014000 0.914800 0.048900 " />
<Waypoint body="FemurR" p="-0.050500 0.729800 0.005100 " />
<Waypoint body="FemurR" p="-0.099100 0.634300 0.001400 " />
</Unit>
<Unit name="L_Adductor_Magnus" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.022300 0.891300 0.013400 " />
<Waypoint body="FemurL" p="0.106400 0.837500 -0.017200 " />
<Waypoint body="FemurL" p="0.133800 0.833900 -0.017600 " />
</Unit>
<Unit name="R_Adductor_Magnus" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.022300 0.891300 0.013400 " />
<Waypoint body="FemurR" p="-0.106400 0.837500 -0.017200 " />
<Waypoint body="FemurR" p="-0.133800 0.833900 -0.017600 " />
</Unit>
<Unit name="L_Adductor_Magnus1" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.023500 0.881300 0.013000 " />
<Waypoint body="FemurL" p="0.097700 0.800600 -0.023300 " />
<Waypoint body="FemurL" p="0.124400 0.759600 -0.002000 " />
</Unit>
<Unit name="R_Adductor_Magnus1" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.023500 0.881300 0.013000 " />
<Waypoint body="FemurR" p="-0.097700 0.800600 -0.023300 " />
<Waypoint body="FemurR" p="-0.124400 0.759600 -0.002000 " />
</Unit>
<Unit name="L_Adductor_Magnus2" f0="259.380000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.035600 0.870400 -0.025800 " />
<Waypoint body="FemurL" p="0.069900 0.809100 -0.024200 " />
<Waypoint body="FemurL" p="0.102600 0.745100 -0.024800 " />
<Waypoint body="FemurL" p="0.116600 0.719600 0.001200 " />
</Unit>
<Unit name="R_Adductor_Magnus2" f0="259.380000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.035600 0.870400 -0.025800 " />
<Waypoint body="FemurR" p="-0.069900 0.809100 -0.024200 " />
<Waypoint body="FemurR" p="-0.102600 0.745100 -0.024800 " />
<Waypoint body="FemurR" p="-0.116600 0.719600 0.001200 " />
</Unit>
<Unit name="L_Adductor_Magnus3" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.047500 0.869700 -0.043600 " />
<Waypoint body="FemurL" p="0.074400 0.781900 -0.034000 " />
<Waypoint body="FemurL" p="0.102400 0.704000 -0.022500 " />
<Waypoint body="FemurL" p="0.105400 0.641800 -0.002200 " />
</Unit>
<Unit name="R_Adductor_Magnus3" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.047500 0.869700 -0.043600 " />
<Waypoint body="FemurR" p="-0.074400 0.781900 -0.034000 " />
<Waypoint body="FemurR" p="-0.102400 0.704000 -0.022500 " />
<Waypoint body="FemurR" p="-0.105400 0.641800 -0.002200 " />
</Unit>
<Unit name="L_Adductor_Magnus4" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.068700 0.877200 -0.056000 " />
<Waypoint body="Pelvis" p="0.063000 0.844300 -0.048200 " />
<Waypoint body="FemurL" p="0.063700 0.641200 -0.031400 " />
<Waypoint body="FemurL" p="0.065300 0.555500 -0.028900 " />
</Unit>
<Unit name="R_Adductor_Magnus4" f0="259.380000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.068700 0.877200 -0.056000 " />
<Waypoint body="Pelvis" p="-0.063000 0.844300 -0.048200 " />
<Waypoint body="FemurR" p="-0.063700 0.641200 -0.031400 " />
<Waypoint body="FemurR" p="-0.065300 0.555500 -0.028900 " />
</Unit>
<Unit name="L_Anconeous" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.506400 1.482400 -0.009500 " />
<Waypoint body="ForeArmL" p="0.537100 1.479700 -0.026300 " />
<Waypoint body="ForeArmL" p="0.571200 1.468800 -0.029500 " />
</Unit>
<Unit name="R_Anconeous" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.506400 1.482400 -0.009500 " />
<Waypoint body="ForeArmR" p="-0.537100 1.479700 -0.026300 " />
<Waypoint body="ForeArmR" p="-0.571200 1.468800 -0.029500 " />
</Unit>
<Unit name="L_Bicep_Brachii_Long_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.169300 1.443700 -0.036900 " />
<Waypoint body="ArmL" p="0.177900 1.421700 -0.033000 " />
<Waypoint body="ArmL" p="0.181000 1.432000 -0.018300 " />
<Waypoint body="ArmL" p="0.191100 1.434300 -0.008400 " />
<Waypoint body="ArmL" p="0.214500 1.434800 -0.007100 " />
<Waypoint body="ArmL" p="0.259100 1.434100 -0.002400 " />
<Waypoint body="ForeArmL" p="0.529000 1.448300 0.025000 " />
<Waypoint body="ForeArmL" p="0.583200 1.462500 0.001900 " />
</Unit>
<Unit name="R_Bicep_Brachii_Long_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.169300 1.443700 -0.036900 " />
<Waypoint body="ArmR" p="-0.177900 1.421700 -0.033000 " />
<Waypoint body="ArmR" p="-0.181000 1.432000 -0.018300 " />
<Waypoint body="ArmR" p="-0.191100 1.434300 -0.008400 " />
<Waypoint body="ArmR" p="-0.214500 1.434800 -0.007100 " />
<Waypoint body="ArmR" p="-0.259100 1.434100 -0.002400 " />
<Waypoint body="ForeArmR" p="-0.529000 1.448300 0.025000 " />
<Waypoint body="ForeArmR" p="-0.583200 1.462500 0.001900 " />
</Unit>
<Unit name="L_Bicep_Brachii_Short_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.168400 1.434700 -0.007400 " />
<Waypoint body="ArmL" p="0.252000 1.411300 -0.007700 " />
<Waypoint body="ArmL" p="0.489000 1.425300 0.023400 " />
<Waypoint body="ForeArmL" p="0.585400 1.461400 -0.001300 " />
</Unit>
<Unit name="R_Bicep_Brachii_Short_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.168400 1.434700 -0.007400 " />
<Waypoint body="ArmR" p="-0.252000 1.411300 -0.007700 " />
<Waypoint body="ArmR" p="-0.489000 1.425300 0.023400 " />
<Waypoint body="ForeArmR" p="-0.585400 1.461400 -0.001300 " />
</Unit>
<Unit name="L_Bicep_Femoris_Longus" f0="705.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.070900 0.900200 -0.063600 " />
<Waypoint body="FemurL" p="0.096500 0.854800 -0.046300 " />
<Waypoint body="FemurL" p="0.139900 0.574300 -0.029200 " />
<Waypoint body="FemurL" p="0.144100 0.541600 -0.032800 " />
<Waypoint body="TibiaL" p="0.138200 0.488800 -0.038800 " />
</Unit>
<Unit name="R_Bicep_Femoris_Longus" f0="705.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.070900 0.900200 -0.063600 " />
<Waypoint body="FemurR" p="-0.096500 0.854800 -0.046300 " />
<Waypoint body="FemurR" p="-0.139900 0.574300 -0.029200 " />
<Waypoint body="FemurR" p="-0.144100 0.541600 -0.032800 " />
<Waypoint body="TibiaR" p="-0.138200 0.488800 -0.038800 " />
</Unit>
<Unit name="L_Bicep_Femoris_Short" f0="157.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.118200 0.729800 0.000200 " />
<Waypoint body="FemurL" p="0.143500 0.545000 -0.029700 " />
<Waypoint body="TibiaL" p="0.139800 0.489100 -0.034100 " />
</Unit>
<Unit name="R_Bicep_Femoris_Short" f0="157.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.118200 0.729800 0.000200 " />
<Waypoint body="FemurR" p="-0.143500 0.545000 -0.029700 " />
<Waypoint body="TibiaR" p="-0.139800 0.489100 -0.034100 " />
</Unit>
<Unit name="L_Bicep_Femoris_Short1" f0="157.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.111800 0.618400 0.001900 " />
<Waypoint body="FemurL" p="0.141600 0.532000 -0.019900 " />
<Waypoint body="TibiaL" p="0.137900 0.488500 -0.030700 " />
</Unit>
<Unit name="R_Bicep_Femoris_Short1" f0="157.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.111800 0.618400 0.001900 " />
<Waypoint body="FemurR" p="-0.141600 0.532000 -0.019900 " />
<Waypoint body="TibiaR" p="-0.137900 0.488500 -0.030700 " />
</Unit>
<Unit name="L_Brachialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.332100 1.460400 -0.019000 " />
<Waypoint body="ArmL" p="0.350000 1.471800 -0.008100 " />
<Waypoint body="ArmL" p="0.496300 1.460600 0.017500 " />
<Waypoint body="ForeArmL" p="0.557200 1.461900 -0.011000 " />
</Unit>
<Unit name="R_Brachialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.332100 1.460400 -0.019000 " />
<Waypoint body="ArmR" p="-0.350000 1.471800 -0.008100 " />
<Waypoint body="ArmR" p="-0.496300 1.460600 0.017500 " />
<Waypoint body="ForeArmR" p="-0.557200 1.461900 -0.011000 " />
</Unit>
<Unit name="L_Brachioradialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.442800 1.465200 -0.020900 " />
<Waypoint body="ArmL" p="0.465100 1.490300 -0.008200 " />
<Waypoint body="ArmL" p="0.499700 1.478900 0.025100 " />
<Waypoint body="ForeArmL" p="0.561800 1.460900 0.037700 " />
<Waypoint body="ForeArmL" p="0.708600 1.474300 0.036200 " />
<Waypoint body="ForeArmL" p="0.786700 1.488000 0.030200 " />
</Unit>
<Unit name="R_Brachioradialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.442800 1.465200 -0.020900 " />
<Waypoint body="ArmR" p="-0.465100 1.490300 -0.008200 " />
<Waypoint body="ArmR" p="-0.499700 1.478900 0.025100 " />
<Waypoint body="ForeArmR" p="-0.561800 1.460900 0.037700 " />
<Waypoint body="ForeArmR" p="-0.708600 1.474300 0.036200 " />
<Waypoint body="ForeArmR" p="-0.786700 1.488000 0.030200 " />
</Unit>
<Unit name="L_Coracobrachialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.168100 1.432600 -0.008300 " />
<Waypoint body="ArmL" p="0.228900 1.407100 -0.019200 " />
<Waypoint body="ArmL" p="0.312100 1.429100 -0.019400 " />
<Waypoint body="ArmL" p="0.338600 1.441800 -0.016700 " />
</Unit>
<Unit name="R_Coracobrachialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.168100 1.432600 -0.008300 " />
<Waypoint body="ArmR" p="-0.228900 1.407100 -0.019200 " />
<Waypoint body="ArmR" p="-0.312100 1.429100 -0.019400 " />
<Waypoint body="ArmR" p="-0.338600 1.441800 -0.016700 " />
</Unit>
<Unit name="L_Deltoid" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.143200 1.466200 -0.019000 " />
<Waypoint body="ShoulderL" p="0.160700 1.447600 0.001500 " />
<Waypoint body="ArmL" p="0.221300 1.411900 0.013700 " />
<Waypoint body="ArmL" p="0.268700 1.443100 0.014100 " />
<Waypoint body="ArmL" p="0.299600 1.446200 -0.010700 " />
</Unit>
<Unit name="R_Deltoid" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.143200 1.466200 -0.019000 " />
<Waypoint body="ShoulderR" p="-0.160700 1.447600 0.001500 " />
<Waypoint body="ArmR" p="-0.221300 1.411900 0.013700 " />
<Waypoint body="ArmR" p="-0.268700 1.443100 0.014100 " />
<Waypoint body="ArmR" p="-0.299600 1.446200 -0.010700 " />
</Unit>
<Unit name="L_Deltoid1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.197700 1.465900 -0.025700 " />
<Waypoint body="ArmL" p="0.186600 1.450500 -0.008600 " />
<Waypoint body="ArmL" p="0.227700 1.467700 0.006400 " />
<Waypoint body="ArmL" p="0.278600 1.469800 0.007400 " />
<Waypoint body="ArmL" p="0.318300 1.452900 -0.008100 " />
</Unit>
<Unit name="R_Deltoid1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.197700 1.465900 -0.025700 " />
<Waypoint body="ArmR" p="-0.186600 1.450500 -0.008600 " />
<Waypoint body="ArmR" p="-0.227700 1.467700 0.006400 " />
<Waypoint body="ArmR" p="-0.278600 1.469800 0.007400 " />
<Waypoint body="ArmR" p="-0.318300 1.452900 -0.008100 " />
</Unit>
<Unit name="L_Deltoid2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.203700 1.459300 -0.052000 " />
<Waypoint body="ShoulderL" p="0.193300 1.466900 -0.038600 " />
<Waypoint body="ArmL" p="0.236700 1.485800 -0.026200 " />
<Waypoint body="ArmL" p="0.295100 1.477600 -0.016200 " />
<Waypoint body="ArmL" p="0.324100 1.456900 -0.011200 " />
</Unit>
<Unit name="R_Deltoid2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.203700 1.459300 -0.052000 " />
<Waypoint body="ShoulderR" p="-0.193300 1.466900 -0.038600 " />
<Waypoint body="ArmR" p="-0.236700 1.485800 -0.026200 " />
<Waypoint body="ArmR" p="-0.295100 1.477600 -0.016200 " />
<Waypoint body="ArmR" p="-0.324100 1.456900 -0.011200 " />
</Unit>
<Unit name="L_Extensor_Carpi_Radialis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.478900 1.470500 -0.017300 " />
<Waypoint body="ArmL" p="0.501100 1.489700 -0.001000 " />
<Waypoint body="ForeArmL" p="0.552500 1.490000 0.029900 " />
<Waypoint body="ForeArmL" p="0.720600 1.483000 0.027900 " />
<Waypoint body="ForeArmL" p="0.782100 1.488200 0.013300 " />
<Waypoint body="HandL" p="0.829300 1.485400 0.038500 " />
</Unit>
<Unit name="R_Extensor_Carpi_Radialis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.478900 1.470500 -0.017300 " />
<Waypoint body="ArmR" p="-0.501100 1.489700 -0.001000 " />
<Waypoint body="ForeArmR" p="-0.552500 1.490000 0.029900 " />
<Waypoint body="ForeArmR" p="-0.720600 1.483000 0.027900 " />
<Waypoint body="ForeArmR" p="-0.782100 1.488200 0.013300 " />
<Waypoint body="HandR" p="-0.829300 1.485400 0.038500 " />
</Unit>
<Unit name="L_Extensor_Carpi_Ulnaris" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.518600 1.483100 -0.006700 " />
<Waypoint body="ForeArmL" p="0.559300 1.490700 -0.017100 " />
<Waypoint body="ForeArmL" p="0.652300 1.470700 -0.029700 " />
<Waypoint body="ForeArmL" p="0.785500 1.449400 0.000900 " />
<Waypoint body="HandL" p="0.825500 1.477700 0.001000 " />
</Unit>
<Unit name="R_Extensor_Carpi_Ulnaris" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.518600 1.483100 -0.006700 " />
<Waypoint body="ForeArmR" p="-0.559300 1.490700 -0.017100 " />
<Waypoint body="ForeArmR" p="-0.652300 1.470700 -0.029700 " />
<Waypoint body="ForeArmR" p="-0.785500 1.449400 0.000900 " />
<Waypoint body="HandR" p="-0.825500 1.477700 0.001000 " />
</Unit>
<Unit name="L_Extensor_Digiti_Minimi" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.520400 1.483700 -0.005400 " />
<Waypoint body="ForeArmL" p="0.548300 1.490000 -0.007600 " />
<Waypoint body="ForeArmL" p="0.783200 1.463200 -0.003600 " />
<Waypoint body="HandL" p="0.821600 1.482100 0.001400 " />
<Waypoint body="HandL" p="0.884700 1.462100 -0.005200 " />
<Waypoint body="HandL" p="0.927800 1.443100 -0.002500 " />
</Unit>
<Unit name="R_Extensor_Digiti_Minimi" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.520400 1.483700 -0.005400 " />
<Waypoint body="ForeArmR" p="-0.548300 1.490000 -0.007600 " />
<Waypoint body="ForeArmR" p="-0.783200 1.463200 -0.003600 " />
<Waypoint body="HandR" p="-0.821600 1.482100 0.001400 " />
<Waypoint body="HandR" p="-0.884700 1.462100 -0.005200 " />
<Waypoint body="HandR" p="-0.927800 1.443100 -0.002500 " />
</Unit>
<Unit name="L_Extensor_Digitorum_Longus" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.123300 0.482800 -0.012800 " />
<Waypoint body="TibiaL" p="0.124900 0.447400 -0.025500 " />
<Waypoint body="TibiaL" p="0.094400 0.112800 -0.025500 " />
<Waypoint body="TalusL" p="0.091900 0.084400 -0.015300 " />
<Waypoint body="TalusL" p="0.090000 0.027700 0.067600 " />
<Waypoint body="FootThumbL" p="0.092000 0.021200 0.096100 " />
<Waypoint body="FootThumbL" p="0.093800 0.013000 0.112100 " />
</Unit>
<Unit name="R_Extensor_Digitorum_Longus" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.123300 0.482800 -0.012800 " />
<Waypoint body="TibiaR" p="-0.124900 0.447400 -0.025500 " />
<Waypoint body="TibiaR" p="-0.094400 0.112800 -0.025500 " />
<Waypoint body="TalusR" p="-0.091900 0.084400 -0.015300 " />
<Waypoint body="TalusR" p="-0.090000 0.027700 0.067600 " />
<Waypoint body="FootThumbR" p="-0.092000 0.021200 0.096100 " />
<Waypoint body="FootThumbR" p="-0.093800 0.013000 0.112100 " />
</Unit>
<Unit name="L_Extensor_Digitorum_Longus1" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.128600 0.491900 -0.010000 " />
<Waypoint body="TibiaL" p="0.133600 0.407000 -0.020000 " />
<Waypoint body="TibiaL" p="0.097300 0.113900 -0.023900 " />
<Waypoint body="TalusL" p="0.098400 0.080700 -0.011500 " />
<Waypoint body="TalusL" p="0.104700 0.024500 0.061600 " />
<Waypoint body="FootPinkyL" p="0.107400 0.019500 0.079600 " />
<Waypoint body="FootPinkyL" p="0.112000 0.010600 0.103200 " />
</Unit>
<Unit name="R_Extensor_Digitorum_Longus1" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.128600 0.491900 -0.010000 " />
<Waypoint body="TibiaR" p="-0.133600 0.407000 -0.020000 " />
<Waypoint body="TibiaR" p="-0.097300 0.113900 -0.023900 " />
<Waypoint body="TalusR" p="-0.098400 0.080700 -0.011500 " />
<Waypoint body="TalusR" p="-0.104700 0.024500 0.061600 " />
<Waypoint body="FootPinkyR" p="-0.107400 0.019500 0.079600 " />
<Waypoint body="FootPinkyR" p="-0.112000 0.010600 0.103200 " />
</Unit>
<Unit name="L_Extensor_Digitorum_Longus2" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.127100 0.488400 -0.009500 " />
<Waypoint body="TibiaL" p="0.140800 0.406700 -0.014400 " />
<Waypoint body="TibiaL" p="0.098500 0.113700 -0.024500 " />
<Waypoint body="TalusL" p="0.101300 0.077500 -0.010600 " />
<Waypoint body="FootPinkyL" p="0.118000 0.026000 0.054300 " />
<Waypoint body="FootPinkyL" p="0.121400 0.022400 0.068700 " />
<Waypoint body="FootPinkyL" p="0.125200 0.012900 0.084600 " />
</Unit>
<Unit name="R_Extensor_Digitorum_Longus2" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.127100 0.488400 -0.009500 " />
<Waypoint body="TibiaR" p="-0.140800 0.406700 -0.014400 " />
<Waypoint body="TibiaR" p="-0.098500 0.113700 -0.024500 " />
<Waypoint body="TalusR" p="-0.101300 0.077500 -0.010600 " />
<Waypoint body="FootPinkyR" p="-0.118000 0.026000 0.054300 " />
<Waypoint body="FootPinkyR" p="-0.121400 0.022400 0.068700 " />
<Waypoint body="FootPinkyR" p="-0.125200 0.012900 0.084600 " />
</Unit>
<Unit name="L_Extensor_Digitorum_Longus3" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.130000 0.493100 -0.011700 " />
<Waypoint body="TibiaL" p="0.131500 0.407000 -0.033100 " />
<Waypoint body="TibiaL" p="0.103700 0.082400 -0.017500 " />
<Waypoint body="TalusL" p="0.114200 0.059400 0.000900 " />
<Waypoint body="TalusL" p="0.130700 0.028300 0.039500 " />
<Waypoint body="FootPinkyL" p="0.137100 0.009300 0.074500 " />
</Unit>
<Unit name="R_Extensor_Digitorum_Longus3" f0="172.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.130000 0.493100 -0.011700 " />
<Waypoint body="TibiaR" p="-0.131500 0.407000 -0.033100 " />
<Waypoint body="TibiaR" p="-0.103700 0.082400 -0.017500 " />
<Waypoint body="TalusR" p="-0.114200 0.059400 0.000900 " />
<Waypoint body="TalusR" p="-0.130700 0.028300 0.039500 " />
<Waypoint body="FootPinkyR" p="-0.137100 0.009300 0.074500 " />
</Unit>
<Unit name="L_Extensor_Digitorum1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.519300 1.487900 -0.001600 " />
<Waypoint body="ForeArmL" p="0.745800 1.482600 0.005500 " />
<Waypoint body="ForeArmL" p="0.782100 1.478400 0.002300 " />
<Waypoint body="HandL" p="0.824700 1.491700 0.026300 " />
<Waypoint body="HandL" p="0.895700 1.481400 0.034000 " />
<Waypoint body="HandL" p="0.960600 1.441800 0.044200 " />
</Unit>
<Unit name="R_Extensor_Digitorum1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.519300 1.487900 -0.001600 " />
<Waypoint body="ForeArmR" p="-0.745800 1.482600 0.005500 " />
<Waypoint body="ForeArmR" p="-0.782100 1.478400 0.002300 " />
<Waypoint body="HandR" p="-0.824700 1.491700 0.026300 " />
<Waypoint body="HandR" p="-0.895700 1.481400 0.034000 " />
<Waypoint body="HandR" p="-0.960600 1.441800 0.044200 " />
</Unit>
<Unit name="L_Extensor_Hallucis_Longus" f0="165.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.115100 0.380600 -0.028300 " />
<Waypoint body="TibiaL" p="0.097000 0.119900 -0.023000 " />
<Waypoint body="TalusL" p="0.083400 0.082500 -0.015100 " />
<Waypoint body="TalusL" p="0.072400 0.063500 0.027400 " />
<Waypoint body="TalusL" p="0.065600 0.031800 0.071700 " />
<Waypoint body="FootThumbL" p="0.060600 0.012900 0.112800 " />
</Unit>
<Unit name="R_Extensor_Hallucis_Longus" f0="165.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.115100 0.380600 -0.028300 " />
<Waypoint body="TibiaR" p="-0.097000 0.119900 -0.023000 " />
<Waypoint body="TalusR" p="-0.083400 0.082500 -0.015100 " />
<Waypoint body="TalusR" p="-0.072400 0.063500 0.027400 " />
<Waypoint body="TalusR" p="-0.065600 0.031800 0.071700 " />
<Waypoint body="FootThumbR" p="-0.060600 0.012900 0.112800 " />
</Unit>
<Unit name="L_Extensor_Pollicis_Brevis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.700700 1.470500 0.008700 " />
<Waypoint body="ForeArmL" p="0.791900 1.490900 0.019900 " />
<Waypoint body="HandL" p="0.816700 1.482000 0.054200 " />
<Waypoint body="HandL" p="0.855900 1.457500 0.079600 " />
</Unit>
<Unit name="R_Extensor_Pollicis_Brevis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.700700 1.470500 0.008700 " />
<Waypoint body="ForeArmR" p="-0.791900 1.490900 0.019900 " />
<Waypoint body="HandR" p="-0.816700 1.482000 0.054200 " />
<Waypoint body="HandR" p="-0.855900 1.457500 0.079600 " />
</Unit>
<Unit name="L_Extensor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.671800 1.469500 -0.007300 " />
<Waypoint body="ForeArmL" p="0.770900 1.479600 0.005500 " />
<Waypoint body="HandL" p="0.815100 1.490300 0.039500 " />
<Waypoint body="HandL" p="0.847400 1.466000 0.075500 " />
<Waypoint body="HandL" p="0.877000 1.446000 0.087800 " />
</Unit>
<Unit name="R_Extensor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.671800 1.469500 -0.007300 " />
<Waypoint body="ForeArmR" p="-0.770900 1.479600 0.005500 " />
<Waypoint body="HandR" p="-0.815100 1.490300 0.039500 " />
<Waypoint body="HandR" p="-0.847400 1.466000 0.075500 " />
<Waypoint body="HandR" p="-0.877000 1.446000 0.087800 " />
</Unit>
<Unit name="L_Flexor_Carpi_Radialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.518400 1.426200 -0.016200 " />
<Waypoint body="ForeArmL" p="0.741200 1.458600 0.027000 " />
<Waypoint body="ForeArmL" p="0.784600 1.465300 0.028700 " />
<Waypoint body="HandL" p="0.832400 1.474100 0.039100 " />
</Unit>
<Unit name="R_Flexor_Carpi_Radialis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.518400 1.426200 -0.016200 " />
<Waypoint body="ForeArmR" p="-0.741200 1.458600 0.027000 " />
<Waypoint body="ForeArmR" p="-0.784600 1.465300 0.028700 " />
<Waypoint body="HandR" p="-0.832400 1.474100 0.039100 " />
</Unit>
<Unit name="L_Flexor_Carpi_Ulnaris" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.525500 1.425600 -0.022000 " />
<Waypoint body="ForeArmL" p="0.581900 1.436100 -0.034700 " />
<Waypoint body="ForeArmL" p="0.759400 1.450100 0.006800 " />
<Waypoint body="HandL" p="0.805300 1.467100 0.009900 " />
</Unit>
<Unit name="R_Flexor_Carpi_Ulnaris" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.525500 1.425600 -0.022000 " />
<Waypoint body="ForeArmR" p="-0.581900 1.436100 -0.034700 " />
<Waypoint body="ForeArmR" p="-0.759400 1.450100 0.006800 " />
<Waypoint body="HandR" p="-0.805300 1.467100 0.009900 " />
</Unit>
<Unit name="L_Flexor_Digiti_Minimi_Brevis_Foot" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FootPinkyL" p="0.136400 0.011200 0.049600 " />
<Waypoint body="TalusL" p="0.120100 0.023600 -0.009200 " />
</Unit>
<Unit name="R_Flexor_Digiti_Minimi_Brevis_Foot" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FootPinkyR" p="-0.136400 0.011200 0.049600 " />
<Waypoint body="TalusR" p="-0.120100 0.023600 -0.009200 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Longus" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.089500 0.398600 -0.020400 " />
<Waypoint body="TibiaL" p="0.062700 0.111200 -0.055500 " />
<Waypoint body="TalusL" p="0.063700 0.040400 -0.022200 " />
<Waypoint body="TalusL" p="0.083100 0.032200 -0.001400 " />
<Waypoint body="TalusL" p="0.086700 0.009400 0.059100 " />
<Waypoint body="FootThumbL" p="0.092700 0.008800 0.108400 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Longus" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.089500 0.398600 -0.020400 " />
<Waypoint body="TibiaR" p="-0.062700 0.111200 -0.055500 " />
<Waypoint body="TalusR" p="-0.063700 0.040400 -0.022200 " />
<Waypoint body="TalusR" p="-0.083100 0.032200 -0.001400 " />
<Waypoint body="TalusR" p="-0.086700 0.009400 0.059100 " />
<Waypoint body="FootThumbR" p="-0.092700 0.008800 0.108400 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Longus1" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.089500 0.398600 -0.020400 " />
<Waypoint body="TibiaL" p="0.065700 0.111000 -0.056200 " />
<Waypoint body="TalusL" p="0.064900 0.040300 -0.023900 " />
<Waypoint body="TalusL" p="0.085000 0.031700 -0.008900 " />
<Waypoint body="TalusL" p="0.101600 0.007000 0.053000 " />
<Waypoint body="FootPinkyL" p="0.110200 0.009200 0.099700 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Longus1" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.089500 0.398600 -0.020400 " />
<Waypoint body="TibiaR" p="-0.065700 0.111000 -0.056200 " />
<Waypoint body="TalusR" p="-0.064900 0.040300 -0.023900 " />
<Waypoint body="TalusR" p="-0.085000 0.031700 -0.008900 " />
<Waypoint body="TalusR" p="-0.101600 0.007000 0.053000 " />
<Waypoint body="FootPinkyR" p="-0.110200 0.009200 0.099700 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Longus2" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.089600 0.389300 -0.023100 " />
<Waypoint body="TibiaL" p="0.066600 0.115900 -0.056200 " />
<Waypoint body="TalusL" p="0.063400 0.043000 -0.025700 " />
<Waypoint body="TalusL" p="0.091200 0.030200 -0.006400 " />
<Waypoint body="TalusL" p="0.115100 0.008900 0.042100 " />
<Waypoint body="FootPinkyL" p="0.124100 0.009500 0.083100 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Longus2" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.089600 0.389300 -0.023100 " />
<Waypoint body="TibiaR" p="-0.066600 0.115900 -0.056200 " />
<Waypoint body="TalusR" p="-0.063400 0.043000 -0.025700 " />
<Waypoint body="TalusR" p="-0.091200 0.030200 -0.006400 " />
<Waypoint body="TalusR" p="-0.115100 0.008900 0.042100 " />
<Waypoint body="FootPinkyR" p="-0.124100 0.009500 0.083100 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Longus3" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.083900 0.388100 -0.018800 " />
<Waypoint body="TibiaL" p="0.068200 0.120700 -0.056400 " />
<Waypoint body="TalusL" p="0.059800 0.051000 -0.027300 " />
<Waypoint body="TalusL" p="0.106800 0.026000 -0.001100 " />
<Waypoint body="TalusL" p="0.130900 0.008800 0.039000 " />
<Waypoint body="FootPinkyL" p="0.136400 0.007100 0.070500 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Longus3" f0="137.200000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.083900 0.388100 -0.018800 " />
<Waypoint body="TibiaR" p="-0.068200 0.120700 -0.056400 " />
<Waypoint body="TalusR" p="-0.059800 0.051000 -0.027300 " />
<Waypoint body="TalusR" p="-0.106800 0.026000 -0.001100 " />
<Waypoint body="TalusR" p="-0.130900 0.008800 0.039000 " />
<Waypoint body="FootPinkyR" p="-0.136400 0.007100 0.070500 " />
</Unit>
<Unit name="L_Flexor_Digitorum_Profundus2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.594200 1.465300 -0.009100 " />
<Waypoint body="ForeArmL" p="0.651800 1.456600 0.000400 " />
<Waypoint body="ForeArmL" p="0.783100 1.459500 0.023800 " />
<Waypoint body="HandL" p="0.828300 1.470900 0.028400 " />
<Waypoint body="HandL" p="0.955500 1.442100 0.043300 " />
</Unit>
<Unit name="R_Flexor_Digitorum_Profundus2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.594200 1.465300 -0.009100 " />
<Waypoint body="ForeArmR" p="-0.651800 1.456600 0.000400 " />
<Waypoint body="ForeArmR" p="-0.783100 1.459500 0.023800 " />
<Waypoint body="HandR" p="-0.828300 1.470900 0.028400 " />
<Waypoint body="HandR" p="-0.955500 1.442100 0.043300 " />
</Unit>
<Unit name="L_Flexor_Hallucis" f0="218.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.119700 0.393000 -0.038900 " />
<Waypoint body="TibiaL" p="0.074600 0.107600 -0.058000 " />
<Waypoint body="TalusL" p="0.061400 0.067100 -0.063500 " />
<Waypoint body="TalusL" p="0.067800 0.046700 -0.042800 " />
<Waypoint body="TalusL" p="0.064900 0.011400 0.057700 " />
<Waypoint body="FootThumbL" p="0.061700 0.008000 0.107200 " />
</Unit>
<Unit name="R_Flexor_Hallucis" f0="218.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.119700 0.393000 -0.038900 " />
<Waypoint body="TibiaR" p="-0.074600 0.107600 -0.058000 " />
<Waypoint body="TalusR" p="-0.061400 0.067100 -0.063500 " />
<Waypoint body="TalusR" p="-0.067800 0.046700 -0.042800 " />
<Waypoint body="TalusR" p="-0.064900 0.011400 0.057700 " />
<Waypoint body="FootThumbR" p="-0.061700 0.008000 0.107200 " />
</Unit>
<Unit name="L_Flexor_Hallucis1" f0="218.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.119700 0.393000 -0.038900 " />
<Waypoint body="TibiaL" p="0.074600 0.107600 -0.058000 " />
<Waypoint body="TalusL" p="0.061400 0.067100 -0.063500 " />
<Waypoint body="TalusL" p="0.067800 0.046700 -0.042800 " />
<Waypoint body="TalusL" p="0.064900 0.011400 0.057700 " />
<Waypoint body="FootThumbL" p="0.061700 0.008000 0.107200 " />
</Unit>
<Unit name="R_Flexor_Hallucis1" f0="218.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.119700 0.393000 -0.038900 " />
<Waypoint body="TibiaR" p="-0.074600 0.107600 -0.058000 " />
<Waypoint body="TalusR" p="-0.061400 0.067100 -0.063500 " />
<Waypoint body="TalusR" p="-0.067800 0.046700 -0.042800 " />
<Waypoint body="TalusR" p="-0.064900 0.011400 0.057700 " />
<Waypoint body="FootThumbR" p="-0.061700 0.008000 0.107200 " />
</Unit>
<Unit name="L_Flexor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmL" p="0.677200 1.471300 0.022400 " />
<Waypoint body="ForeArmL" p="0.784600 1.465900 0.028100 " />
<Waypoint body="HandL" p="0.813900 1.469600 0.030800 " />
<Waypoint body="HandL" p="0.830500 1.466600 0.057100 " />
<Waypoint body="HandL" p="0.878900 1.445600 0.083700 " />
</Unit>
<Unit name="R_Flexor_Pollicis_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ForeArmR" p="-0.677200 1.471300 0.022400 " />
<Waypoint body="ForeArmR" p="-0.784600 1.465900 0.028100 " />
<Waypoint body="HandR" p="-0.813900 1.469600 0.030800 " />
<Waypoint body="HandR" p="-0.830500 1.466600 0.057100 " />
<Waypoint body="HandR" p="-0.878900 1.445600 0.083700 " />
</Unit>
<Unit name="L_Gastrocnemius_Lateral_Head" f0="606.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.126400 0.562000 -0.005900 " />
<Waypoint body="FemurL" p="0.121900 0.554700 -0.038300 " />
<Waypoint body="TibiaL" p="0.126200 0.505900 -0.066200 " />
<Waypoint body="TibiaL" p="0.112000 0.302400 -0.091700 " />
</Unit>
<Unit name="R_Gastrocnemius_Lateral_Head" f0="606.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.126400 0.562000 -0.005900 " />
<Waypoint body="FemurR" p="-0.121900 0.554700 -0.038300 " />
<Waypoint body="TibiaR" p="-0.126200 0.505900 -0.066200 " />
<Waypoint body="TibiaR" p="-0.112000 0.302400 -0.091700 " />
</Unit>
<Unit name="L_Gastrocnemius_Medial_Head" f0="1308.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.075000 0.567300 -0.014400 " />
<Waypoint body="FemurL" p="0.095200 0.550700 -0.046600 " />
<Waypoint body="TibiaL" p="0.092400 0.505800 -0.069100 " />
<Waypoint body="TibiaL" p="0.060300 0.273200 -0.059200 " />
</Unit>
<Unit name="R_Gastrocnemius_Medial_Head" f0="1308.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.075000 0.567300 -0.014400 " />
<Waypoint body="FemurR" p="-0.095200 0.550700 -0.046600 " />
<Waypoint body="TibiaR" p="-0.092400 0.505800 -0.069100 " />
<Waypoint body="TibiaR" p="-0.060300 0.273200 -0.059200 " />
</Unit>
<Unit name="L_Gluteus_Maximus" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.053900 1.035800 -0.096200 " />
<Waypoint body="Pelvis" p="0.111500 1.013300 -0.089300 " />
<Waypoint body="FemurL" p="0.153100 0.939700 -0.046600 " />
<Waypoint body="FemurL" p="0.148200 0.872600 -0.016900 " />
</Unit>
<Unit name="R_Gluteus_Maximus" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.053900 1.035800 -0.096200 " />
<Waypoint body="Pelvis" p="-0.111500 1.013300 -0.089300 " />
<Waypoint body="FemurR" p="-0.153100 0.939700 -0.046600 " />
<Waypoint body="FemurR" p="-0.148200 0.872600 -0.016900 " />
</Unit>
<Unit name="L_Gluteus_Maximus1" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.038200 0.988600 -0.099300 " />
<Waypoint body="Pelvis" p="0.103800 0.968800 -0.110800 " />
<Waypoint body="FemurL" p="0.155300 0.900100 -0.049300 " />
<Waypoint body="FemurL" p="0.141600 0.845900 -0.011300 " />
</Unit>
<Unit name="R_Gluteus_Maximus1" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.038200 0.988600 -0.099300 " />
<Waypoint body="Pelvis" p="-0.103800 0.968800 -0.110800 " />
<Waypoint body="FemurR" p="-0.155300 0.900100 -0.049300 " />
<Waypoint body="FemurR" p="-0.141600 0.845900 -0.011300 " />
</Unit>
<Unit name="L_Gluteus_Maximus2" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.029700 0.949800 -0.094300 " />
<Waypoint body="Pelvis" p="0.051700 0.942200 -0.120100 " />
<Waypoint body="Pelvis" p="0.122100 0.906900 -0.097000 " />
<Waypoint body="FemurL" p="0.149300 0.840100 -0.036100 " />
<Waypoint body="FemurL" p="0.134200 0.818200 -0.008900 " />
</Unit>
<Unit name="R_Gluteus_Maximus2" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.029700 0.949800 -0.094300 " />
<Waypoint body="Pelvis" p="-0.051700 0.942200 -0.120100 " />
<Waypoint body="Pelvis" p="-0.122100 0.906900 -0.097000 " />
<Waypoint body="FemurR" p="-0.149300 0.840100 -0.036100 " />
<Waypoint body="FemurR" p="-0.134200 0.818200 -0.008900 " />
</Unit>
<Unit name="L_Gluteus_Maximus3" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.035200 0.919200 -0.080700 " />
<Waypoint body="Pelvis" p="0.066500 0.880800 -0.111700 " />
<Waypoint body="FemurL" p="0.124400 0.851200 -0.076200 " />
<Waypoint body="FemurL" p="0.130200 0.789300 -0.001200 " />
</Unit>
<Unit name="R_Gluteus_Maximus3" f0="370.520000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.035200 0.919200 -0.080700 " />
<Waypoint body="Pelvis" p="-0.066500 0.880800 -0.111700 " />
<Waypoint body="FemurR" p="-0.124400 0.851200 -0.076200 " />
<Waypoint body="FemurR" p="-0.130200 0.789300 -0.001200 " />
</Unit>
<Unit name="L_Gluteus_Maximus4" f0="370.520000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.045000 0.896000 -0.064800 " />
<Waypoint body="Pelvis" p="0.064500 0.848700 -0.073000 " />
<Waypoint body="FemurL" p="0.115600 0.809100 -0.040200 " />
<Waypoint body="FemurL" p="0.129100 0.772300 0.002800 " />
</Unit>
<Unit name="R_Gluteus_Maximus4" f0="370.520000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.045000 0.896000 -0.064800 " />
<Waypoint body="Pelvis" p="-0.064500 0.848700 -0.073000 " />
<Waypoint body="FemurR" p="-0.115600 0.809100 -0.040200 " />
<Waypoint body="FemurR" p="-0.129100 0.772300 0.002800 " />
</Unit>
<Unit name="L_Gluteus_Medius" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.129500 1.013800 0.028700 " />
<Waypoint body="FemurL" p="0.157200 0.945600 -0.005300 " />
<Waypoint body="FemurL" p="0.157400 0.923400 -0.006700 " />
</Unit>
<Unit name="R_Gluteus_Medius" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.129500 1.013800 0.028700 " />
<Waypoint body="FemurR" p="-0.157200 0.945600 -0.005300 " />
<Waypoint body="FemurR" p="-0.157400 0.923400 -0.006700 " />
</Unit>
<Unit name="L_Gluteus_Medius1" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.128200 1.067300 -0.029900 " />
<Waypoint body="FemurL" p="0.155500 0.950900 -0.026500 " />
<Waypoint body="FemurL" p="0.165600 0.891400 -0.008800 " />
</Unit>
<Unit name="R_Gluteus_Medius1" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.128200 1.067300 -0.029900 " />
<Waypoint body="FemurR" p="-0.155500 0.950900 -0.026500 " />
<Waypoint body="FemurR" p="-0.165600 0.891400 -0.008800 " />
</Unit>
<Unit name="L_Gluteus_Medius2" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.079200 1.064500 -0.069600 " />
<Waypoint body="Pelvis" p="0.122200 1.028400 -0.073600 " />
<Waypoint body="FemurL" p="0.159000 0.918600 -0.029900 " />
<Waypoint body="FemurL" p="0.159700 0.891200 -0.021000 " />
</Unit>
<Unit name="R_Gluteus_Medius2" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.079200 1.064500 -0.069600 " />
<Waypoint body="Pelvis" p="-0.122200 1.028400 -0.073600 " />
<Waypoint body="FemurR" p="-0.159000 0.918600 -0.029900 " />
<Waypoint body="FemurR" p="-0.159700 0.891200 -0.021000 " />
</Unit>
<Unit name="L_Gluteus_Medius3" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.061100 1.008700 -0.087500 " />
<Waypoint body="Pelvis" p="0.088300 0.988400 -0.082900 " />
<Waypoint body="FemurL" p="0.139700 0.936300 -0.048200 " />
<Waypoint body="FemurL" p="0.147400 0.899400 -0.033100 " />
</Unit>
<Unit name="R_Gluteus_Medius3" f0="549.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.061100 1.008700 -0.087500 " />
<Waypoint body="Pelvis" p="-0.088300 0.988400 -0.082900 " />
<Waypoint body="FemurR" p="-0.139700 0.936300 -0.048200 " />
<Waypoint body="FemurR" p="-0.147400 0.899400 -0.033100 " />
</Unit>
<Unit name="L_Gluteus_Minimus" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.068600 0.992600 -0.066800 " />
<Waypoint body="Pelvis" p="0.097800 0.971500 -0.059200 " />
<Waypoint body="FemurL" p="0.152300 0.932100 -0.011500 " />
<Waypoint body="FemurL" p="0.160700 0.905400 -0.004900 " />
</Unit>
<Unit name="R_Gluteus_Minimus" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.068600 0.992600 -0.066800 " />
<Waypoint body="Pelvis" p="-0.097800 0.971500 -0.059200 " />
<Waypoint body="FemurR" p="-0.152300 0.932100 -0.011500 " />
<Waypoint body="FemurR" p="-0.160700 0.905400 -0.004900 " />
</Unit>
<Unit name="L_Gluteus_Minimus1" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.098200 1.046000 -0.041700 " />
<Waypoint body="Pelvis" p="0.125700 1.015900 -0.040000 " />
<Waypoint body="FemurL" p="0.156400 0.933100 -0.001700 " />
<Waypoint body="FemurL" p="0.158300 0.893000 0.002200 " />
</Unit>
<Unit name="R_Gluteus_Minimus1" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.098200 1.046000 -0.041700 " />
<Waypoint body="Pelvis" p="-0.125700 1.015900 -0.040000 " />
<Waypoint body="FemurR" p="-0.156400 0.933100 -0.001700 " />
<Waypoint body="FemurR" p="-0.158300 0.893000 0.002200 " />
</Unit>
<Unit name="L_Gluteus_Minimus2" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.133400 1.037300 0.009000 " />
<Waypoint body="FemurL" p="0.154800 0.933000 0.005900 " />
<Waypoint body="FemurL" p="0.151600 0.897400 0.004600 " />
</Unit>
<Unit name="R_Gluteus_Minimus2" f0="198.333333" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.133400 1.037300 0.009000 " />
<Waypoint body="FemurR" p="-0.154800 0.933000 0.005900 " />
<Waypoint body="FemurR" p="-0.151600 0.897400 0.004600 " />
</Unit>
<Unit name="L_Gracilis" f0="137.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.011300 0.903100 0.030700 " />
<Waypoint body="FemurL" p="0.048900 0.529700 -0.042600 " />
<Waypoint body="TibiaL" p="0.061600 0.479800 -0.021700 " />
<Waypoint body="TibiaL" p="0.077600 0.465700 -0.003300 " />
</Unit>
<Unit name="R_Gracilis" f0="137.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.011300 0.903100 0.030700 " />
<Waypoint body="FemurR" p="-0.048900 0.529700 -0.042600 " />
<Waypoint body="TibiaR" p="-0.061600 0.479800 -0.021700 " />
<Waypoint body="TibiaR" p="-0.077600 0.465700 -0.003300 " />
</Unit>
<Unit name="L_Inferior_Gemellus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.066200 0.885700 -0.062100 " />
<Waypoint body="FemurL" p="0.124300 0.908300 -0.046900 " />
<Waypoint body="FemurL" p="0.135700 0.908900 -0.033200 " />
</Unit>
<Unit name="R_Inferior_Gemellus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.066200 0.885700 -0.062100 " />
<Waypoint body="FemurR" p="-0.124300 0.908300 -0.046900 " />
<Waypoint body="FemurR" p="-0.135700 0.908900 -0.033200 " />
</Unit>
<Unit name="L_Infraspinatus1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.091600 1.368800 -0.127100 " />
<Waypoint body="ShoulderL" p="0.187000 1.423800 -0.075700 " />
<Waypoint body="ShoulderL" p="0.203800 1.458100 -0.046900 " />
<Waypoint body="ShoulderL" p="0.198000 1.461500 -0.027000 " />
</Unit>
<Unit name="R_Infraspinatus1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.091600 1.368800 -0.127100 " />
<Waypoint body="ShoulderR" p="-0.187000 1.423800 -0.075700 " />
<Waypoint body="ShoulderR" p="-0.203800 1.458100 -0.046900 " />
<Waypoint body="ShoulderR" p="-0.198000 1.461500 -0.027000 " />
</Unit>
<Unit name="L_Latissimus_Dorsi" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.311400 -0.126600 " />
<Waypoint body="ShoulderL" p="0.115800 1.327800 -0.129300 " />
<Waypoint body="ShoulderL" p="0.152500 1.353400 -0.094600 " />
<Waypoint body="ArmL" p="0.244800 1.415400 -0.039800 " />
<Waypoint body="ArmL" p="0.224400 1.432000 -0.016800 " />
</Unit>
<Unit name="R_Latissimus_Dorsi" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.311400 -0.126600 " />
<Waypoint body="ShoulderR" p="-0.115800 1.327800 -0.129300 " />
<Waypoint body="ShoulderR" p="-0.152500 1.353400 -0.094600 " />
<Waypoint body="ArmR" p="-0.244800 1.415400 -0.039800 " />
<Waypoint body="ArmR" p="-0.224400 1.432000 -0.016800 " />
</Unit>
<Unit name="L_Latissimus_Dorsi3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="0.000600 1.103500 -0.092000 " />
<Waypoint body="Torso" p="0.101200 1.233200 -0.119000 " />
<Waypoint body="Torso" p="0.153700 1.300700 -0.098800 " />
<Waypoint body="ArmL" p="0.279500 1.420700 -0.045900 " />
<Waypoint body="ArmL" p="0.264300 1.422600 -0.024800 " />
<Waypoint body="ArmL" p="0.250400 1.435600 -0.016200 " />
</Unit>
<Unit name="R_Latissimus_Dorsi3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="-0.000600 1.103500 -0.092000 " />
<Waypoint body="Torso" p="-0.101200 1.233200 -0.119000 " />
<Waypoint body="Torso" p="-0.153700 1.300700 -0.098800 " />
<Waypoint body="ArmR" p="-0.279500 1.420700 -0.045900 " />
<Waypoint body="ArmR" p="-0.264300 1.422600 -0.024800 " />
<Waypoint body="ArmR" p="-0.250400 1.435600 -0.016200 " />
</Unit>
<Unit name="L_Latissimus_Dorsi5" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.077400 1.063600 -0.076000 " />
<Waypoint body="Torso" p="0.117900 1.178400 -0.077500 " />
<Waypoint body="Torso" p="0.169200 1.298600 -0.060000 " />
<Waypoint body="ArmL" p="0.282700 1.416800 -0.032700 " />
<Waypoint body="ArmL" p="0.259200 1.435500 -0.017400 " />
</Unit>
<Unit name="R_Latissimus_Dorsi5" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.077400 1.063600 -0.076000 " />
<Waypoint body="Torso" p="-0.117900 1.178400 -0.077500 " />
<Waypoint body="Torso" p="-0.169200 1.298600 -0.060000 " />
<Waypoint body="ArmR" p="-0.282700 1.416800 -0.032700 " />
<Waypoint body="ArmR" p="-0.259200 1.435500 -0.017400 " />
</Unit>
<Unit name="L_Longissimus_Capitis3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.026200 1.428700 -0.102300 " />
<Waypoint body="Torso" p="0.030500 1.500200 -0.074800 " />
<Waypoint body="Neck" p="0.031100 1.565300 -0.039000 " />
<Waypoint body="Head" p="0.057000 1.608800 -0.017300 " />
</Unit>
<Unit name="R_Longissimus_Capitis3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.026200 1.428700 -0.102300 " />
<Waypoint body="Torso" p="-0.030500 1.500200 -0.074800 " />
<Waypoint body="Neck" p="-0.031100 1.565300 -0.039000 " />
<Waypoint body="Head" p="-0.057000 1.608800 -0.017300 " />
</Unit>
<Unit name="L_Longissimus_Thoracis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.003600 0.898800 -0.072600 " />
<Waypoint body="Pelvis" p="0.020100 1.003800 -0.104000 " />
<Waypoint body="Spine" p="0.020800 1.092300 -0.080300 " />
<Waypoint body="Torso" p="0.029200 1.198600 -0.095400 " />
<Waypoint body="Torso" p="0.034500 1.274000 -0.119600 " />
<Waypoint body="Torso" p="0.036400 1.393700 -0.115200 " />
<Waypoint body="Torso" p="0.034300 1.454000 -0.093800 " />
<Waypoint body="Neck" p="0.032000 1.501100 -0.040400 " />
</Unit>
<Unit name="R_Longissimus_Thoracis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.003600 0.898800 -0.072600 " />
<Waypoint body="Pelvis" p="-0.020100 1.003800 -0.104000 " />
<Waypoint body="Spine" p="-0.020800 1.092300 -0.080300 " />
<Waypoint body="Torso" p="-0.029200 1.198600 -0.095400 " />
<Waypoint body="Torso" p="-0.034500 1.274000 -0.119600 " />
<Waypoint body="Torso" p="-0.036400 1.393700 -0.115200 " />
<Waypoint body="Torso" p="-0.034300 1.454000 -0.093800 " />
<Waypoint body="Neck" p="-0.032000 1.501100 -0.040400 " />
</Unit>
<Unit name="L_Longus_Capitis2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="0.019100 1.526900 -0.012500 " />
<Waypoint body="Neck" p="0.010400 1.588100 0.011300 " />
<Waypoint body="Head" p="0.002100 1.622700 0.010300 " />
</Unit>
<Unit name="R_Longus_Capitis2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="-0.019100 1.526900 -0.012500 " />
<Waypoint body="Neck" p="-0.010400 1.588100 0.011300 " />
<Waypoint body="Head" p="-0.002100 1.622700 0.010300 " />
</Unit>
<Unit name="L_Multifidus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.009100 0.923600 -0.091700 " />
<Waypoint body="Pelvis" p="0.011100 0.974800 -0.110600 " />
<Waypoint body="Pelvis" p="0.011700 1.013300 -0.100100 " />
<Waypoint body="Spine" p="0.009300 1.107200 -0.077700 " />
<Waypoint body="Torso" p="0.005600 1.179500 -0.085200 " />
<Waypoint body="Torso" p="0.000500 1.284600 -0.120700 " />
</Unit>
<Unit name="R_Multifidus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.009100 0.923600 -0.091700 " />
<Waypoint body="Pelvis" p="-0.011100 0.974800 -0.110600 " />
<Waypoint body="Pelvis" p="-0.011700 1.013300 -0.100100 " />
<Waypoint body="Spine" p="-0.009300 1.107200 -0.077700 " />
<Waypoint body="Torso" p="-0.005600 1.179500 -0.085200 " />
<Waypoint body="Torso" p="-0.000500 1.284600 -0.120700 " />
</Unit>
<Unit name="L_Obturator_Externus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.021200 0.911400 0.024700 " />
<Waypoint body="Pelvis" p="0.068400 0.894500 -0.028500 " />
<Waypoint body="FemurL" p="0.138000 0.909800 -0.026500 " />
</Unit>
<Unit name="R_Obturator_Externus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.021200 0.911400 0.024700 " />
<Waypoint body="Pelvis" p="-0.068400 0.894500 -0.028500 " />
<Waypoint body="FemurR" p="-0.138000 0.909800 -0.026500 " />
</Unit>
<Unit name="L_Obturator_Internus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.018500 0.905800 0.013900 " />
<Waypoint body="Pelvis" p="0.051600 0.905300 -0.058800 " />
<Waypoint body="Pelvis" p="0.074000 0.904600 -0.070500 " />
<Waypoint body="FemurL" p="0.138600 0.914000 -0.030600 " />
</Unit>
<Unit name="R_Obturator_Internus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.018500 0.905800 0.013900 " />
<Waypoint body="Pelvis" p="-0.051600 0.905300 -0.058800 " />
<Waypoint body="Pelvis" p="-0.074000 0.904600 -0.070500 " />
<Waypoint body="FemurR" p="-0.138600 0.914000 -0.030600 " />
</Unit>
<Unit name="L_Omohyoid" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.125400 1.456800 -0.062000 " />
<Waypoint body="ShoulderL" p="0.111000 1.479500 -0.032300 " />
<Waypoint body="Torso" p="0.046600 1.491300 0.000000 " />
<Waypoint body="ShoulderL" p="0.018300 1.506200 0.025200 " />
<Waypoint body="Head" p="0.013200 1.560100 0.043100 " />
</Unit>
<Unit name="R_Omohyoid" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.125400 1.456800 -0.062000 " />
<Waypoint body="ShoulderR" p="-0.111000 1.479500 -0.032300 " />
<Waypoint body="Torso" p="-0.046600 1.491300 0.000000 " />
<Waypoint body="ShoulderR" p="-0.018300 1.506200 0.025200 " />
<Waypoint body="Head" p="-0.013200 1.560100 0.043100 " />
</Unit>
<Unit name="L_Palmaris_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.522100 1.424400 -0.018800 " />
<Waypoint body="ForeArmL" p="0.643800 1.433500 0.000000 " />
<Waypoint body="ForeArmL" p="0.784200 1.459100 0.025400 " />
<Waypoint body="HandL" p="0.886300 1.461800 0.033000 " />
</Unit>
<Unit name="R_Palmaris_Longus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.522100 1.424400 -0.018800 " />
<Waypoint body="ForeArmR" p="-0.643800 1.433500 0.000000 " />
<Waypoint body="ForeArmR" p="-0.784200 1.459100 0.025400 " />
<Waypoint body="HandR" p="-0.886300 1.461800 0.033000 " />
</Unit>
<Unit name="L_Pectineus" f0="177.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.040400 0.927800 0.032700 " />
<Waypoint body="Pelvis" p="0.057200 0.917900 0.046900 " />
<Waypoint body="FemurL" p="0.101100 0.836800 -0.007700 " />
<Waypoint body="FemurL" p="0.112200 0.830300 -0.004200 " />
</Unit>
<Unit name="R_Pectineus" f0="177.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.040400 0.927800 0.032700 " />
<Waypoint body="Pelvis" p="-0.057200 0.917900 0.046900 " />
<Waypoint body="FemurR" p="-0.101100 0.836800 -0.007700 " />
<Waypoint body="FemurR" p="-0.112200 0.830300 -0.004200 " />
</Unit>
<Unit name="L_Pectoralis_Major" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.054800 1.462800 0.020200 " />
<Waypoint body="Torso" p="0.102100 1.436100 0.043400 " />
<Waypoint body="Torso" p="0.151800 1.405700 0.027600 " />
<Waypoint body="ArmL" p="0.244900 1.401200 0.003200 " />
<Waypoint body="ArmL" p="0.274200 1.446800 -0.009800 " />
</Unit>
<Unit name="R_Pectoralis_Major" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.054800 1.462800 0.020200 " />
<Waypoint body="Torso" p="-0.102100 1.436100 0.043400 " />
<Waypoint body="Torso" p="-0.151800 1.405700 0.027600 " />
<Waypoint body="ArmR" p="-0.244900 1.401200 0.003200 " />
<Waypoint body="ArmR" p="-0.274200 1.446800 -0.009800 " />
</Unit>
<Unit name="L_Pectoralis_Major2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.004300 1.367700 0.077300 " />
<Waypoint body="Torso" p="0.076600 1.371900 0.084300 " />
<Waypoint body="Torso" p="0.146000 1.374200 0.050500 " />
<Waypoint body="ArmL" p="0.248300 1.409600 -0.002500 " />
<Waypoint body="ArmL" p="0.247700 1.443900 -0.011600 " />
</Unit>
<Unit name="R_Pectoralis_Major2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.004300 1.367700 0.077300 " />
<Waypoint body="Torso" p="-0.076600 1.371900 0.084300 " />
<Waypoint body="Torso" p="-0.146000 1.374200 0.050500 " />
<Waypoint body="ArmR" p="-0.248300 1.409600 -0.002500 " />
<Waypoint body="ArmR" p="-0.247700 1.443900 -0.011600 " />
</Unit>
<Unit name="L_Pectoralis_Minor1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.085500 1.347400 0.079900 " />
<Waypoint body="Torso" p="0.114400 1.373600 0.059700 " />
<Waypoint body="ShoulderL" p="0.159200 1.448100 -0.017700 " />
</Unit>
<Unit name="R_Pectoralis_Minor1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.085500 1.347400 0.079900 " />
<Waypoint body="Torso" p="-0.114400 1.373600 0.059700 " />
<Waypoint body="ShoulderR" p="-0.159200 1.448100 -0.017700 " />
</Unit>
<Unit name="L_Peroneus_Brevis" f0="305.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.117900 0.283900 -0.044900 " />
<Waypoint body="TibiaL" p="0.112200 0.109700 -0.067800 " />
<Waypoint body="TalusL" p="0.101900 0.067700 -0.069000 " />
<Waypoint body="TalusL" p="0.116900 0.024300 -0.015100 " />
</Unit>
<Unit name="R_Peroneus_Brevis" f0="305.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.117900 0.283900 -0.044900 " />
<Waypoint body="TibiaR" p="-0.112200 0.109700 -0.067800 " />
<Waypoint body="TalusR" p="-0.101900 0.067700 -0.069000 " />
<Waypoint body="TalusR" p="-0.116900 0.024300 -0.015100 " />
</Unit>
<Unit name="L_Peroneus_Longus" f0="653.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.140500 0.479500 -0.026300 " />
<Waypoint body="TibiaL" p="0.152700 0.366000 -0.037900 " />
<Waypoint body="TibiaL" p="0.115600 0.103700 -0.063600 " />
<Waypoint body="TalusL" p="0.104900 0.059000 -0.068700 " />
<Waypoint body="TalusL" p="0.111800 0.039900 -0.041200 " />
<Waypoint body="TalusL" p="0.085000 0.037700 -0.011400 " />
<Waypoint body="TalusL" p="0.072100 0.036600 0.025000 " />
</Unit>
<Unit name="R_Peroneus_Longus" f0="653.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.140500 0.479500 -0.026300 " />
<Waypoint body="TibiaR" p="-0.152700 0.366000 -0.037900 " />
<Waypoint body="TibiaR" p="-0.115600 0.103700 -0.063600 " />
<Waypoint body="TalusR" p="-0.104900 0.059000 -0.068700 " />
<Waypoint body="TalusR" p="-0.111800 0.039900 -0.041200 " />
<Waypoint body="TalusR" p="-0.085000 0.037700 -0.011400 " />
<Waypoint body="TalusR" p="-0.072100 0.036600 0.025000 " />
</Unit>
<Unit name="L_Peroneus_Tertius" f0="45.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.107500 0.133200 -0.052300 " />
<Waypoint body="TibiaL" p="0.112000 0.081900 -0.026000 " />
<Waypoint body="TalusL" p="0.118900 0.034800 0.002500 " />
</Unit>
<Unit name="R_Peroneus_Tertius" f0="45.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.107500 0.133200 -0.052300 " />
<Waypoint body="TibiaR" p="-0.112000 0.081900 -0.026000 " />
<Waypoint body="TalusR" p="-0.118900 0.034800 0.002500 " />
</Unit>
<Unit name="L_Peroneus_Tertius1" f0="45.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.107500 0.133200 -0.052300 " />
<Waypoint body="TibiaL" p="0.112000 0.081900 -0.026000 " />
<Waypoint body="TalusL" p="0.118900 0.034800 0.002500 " />
</Unit>
<Unit name="R_Peroneus_Tertius1" f0="45.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.107500 0.133200 -0.052300 " />
<Waypoint body="TibiaR" p="-0.112000 0.081900 -0.026000 " />
<Waypoint body="TalusR" p="-0.118900 0.034800 0.002500 " />
</Unit>
<Unit name="L_Piriformis" f0="148.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.031600 0.981400 -0.089800 " />
<Waypoint body="FemurL" p="0.137200 0.930900 -0.025700 " />
</Unit>
<Unit name="R_Piriformis" f0="148.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.031600 0.981400 -0.089800 " />
<Waypoint body="FemurR" p="-0.137200 0.930900 -0.025700 " />
</Unit>
<Unit name="L_Piriformis1" f0="148.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.016000 0.936300 -0.088100 " />
<Waypoint body="FemurL" p="0.139700 0.920500 -0.022800 " />
</Unit>
<Unit name="R_Piriformis1" f0="148.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.016000 0.936300 -0.088100 " />
<Waypoint body="FemurR" p="-0.139700 0.920500 -0.022800 " />
</Unit>
<Unit name="L_Plantaris" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.119300 0.565800 -0.013200 " />
<Waypoint body="FemurL" p="0.111500 0.549400 -0.037000 " />
<Waypoint body="TibiaL" p="0.106800 0.498000 -0.049400 " />
<Waypoint body="TibiaL" p="0.073700 0.102800 -0.079600 " />
<Waypoint body="TalusL" p="0.075100 0.037300 -0.098000 " />
</Unit>
<Unit name="R_Plantaris" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.119300 0.565800 -0.013200 " />
<Waypoint body="FemurR" p="-0.111500 0.549400 -0.037000 " />
<Waypoint body="TibiaR" p="-0.106800 0.498000 -0.049400 " />
<Waypoint body="TibiaR" p="-0.073700 0.102800 -0.079600 " />
<Waypoint body="TalusR" p="-0.075100 0.037300 -0.098000 " />
</Unit>
<Unit name="L_Platysma1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.089300 1.451000 0.033000 " />
<Waypoint body="ShoulderL" p="0.047400 1.475800 0.018700 " />
<Waypoint body="Neck" p="0.030800 1.542500 0.022700 " />
<Waypoint body="Head" p="0.028400 1.555400 0.037200 " />
<Waypoint body="Head" p="0.033500 1.562100 0.068400 " />
</Unit>
<Unit name="R_Platysma1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.089300 1.451000 0.033000 " />
<Waypoint body="ShoulderR" p="-0.047400 1.475800 0.018700 " />
<Waypoint body="Neck" p="-0.030800 1.542500 0.022700 " />
<Waypoint body="Head" p="-0.028400 1.555400 0.037200 " />
<Waypoint body="Head" p="-0.033500 1.562100 0.068400 " />
</Unit>
<Unit name="L_Popliteus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.137300 0.540300 -0.012900 " />
<Waypoint body="FemurL" p="0.136300 0.526900 -0.033300 " />
<Waypoint body="TibiaL" p="0.116500 0.500900 -0.042900 " />
<Waypoint body="TibiaL" p="0.080500 0.455000 -0.018800 " />
</Unit>
<Unit name="R_Popliteus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.137300 0.540300 -0.012900 " />
<Waypoint body="FemurR" p="-0.136300 0.526900 -0.033300 " />
<Waypoint body="TibiaR" p="-0.116500 0.500900 -0.042900 " />
<Waypoint body="TibiaR" p="-0.080500 0.455000 -0.018800 " />
</Unit>
<Unit name="L_Psoas_Major" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.014600 1.222700 -0.048100 " />
<Waypoint body="Pelvis" p="0.092000 1.073400 -0.031100 " />
<Waypoint body="Pelvis" p="0.087100 0.931100 0.044900 " />
<Waypoint body="FemurL" p="0.094500 0.881500 0.001300 " />
<Waypoint body="FemurL" p="0.109600 0.850500 -0.015600 " />
</Unit>
<Unit name="R_Psoas_Major" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.014600 1.222700 -0.048100 " />
<Waypoint body="Pelvis" p="-0.092000 1.073400 -0.031100 " />
<Waypoint body="Pelvis" p="-0.087100 0.931100 0.044900 " />
<Waypoint body="FemurR" p="-0.094500 0.881500 0.001300 " />
<Waypoint body="FemurR" p="-0.109600 0.850500 -0.015600 " />
</Unit>
<Unit name="L_Psoas_Major1" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="0.021400 1.132400 -0.037200 " />
<Waypoint body="Pelvis" p="0.068300 1.033300 -0.020900 " />
<Waypoint body="Pelvis" p="0.074400 0.930400 0.043900 " />
<Waypoint body="FemurL" p="0.092400 0.877400 -0.007300 " />
<Waypoint body="FemurL" p="0.109800 0.856700 -0.009200 " />
</Unit>
<Unit name="R_Psoas_Major1" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="-0.021400 1.132400 -0.037200 " />
<Waypoint body="Pelvis" p="-0.068300 1.033300 -0.020900 " />
<Waypoint body="Pelvis" p="-0.074400 0.930400 0.043900 " />
<Waypoint body="FemurR" p="-0.092400 0.877400 -0.007300 " />
<Waypoint body="FemurR" p="-0.109800 0.856700 -0.009200 " />
</Unit>
<Unit name="L_Psoas_Major2" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="0.018400 1.048500 -0.037400 " />
<Waypoint body="Pelvis" p="0.053600 1.010400 -0.032900 " />
<Waypoint body="Pelvis" p="0.068500 0.929500 0.036600 " />
<Waypoint body="FemurL" p="0.092400 0.879400 0.001500 " />
<Waypoint body="FemurL" p="0.108500 0.856300 -0.014800 " />
</Unit>
<Unit name="R_Psoas_Major2" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="-0.018400 1.048500 -0.037400 " />
<Waypoint body="Pelvis" p="-0.053600 1.010400 -0.032900 " />
<Waypoint body="Pelvis" p="-0.068500 0.929500 0.036600 " />
<Waypoint body="FemurR" p="-0.092400 0.879400 0.001500 " />
<Waypoint body="FemurR" p="-0.108500 0.856300 -0.014800 " />
</Unit>
<Unit name="L_Psoas_Minor" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.011300 1.221400 -0.045600 " />
<Waypoint body="Spine" p="0.055300 1.120100 -0.011600 " />
<Waypoint body="Pelvis" p="0.063300 0.999200 -0.005400 " />
<Waypoint body="Pelvis" p="0.057800 0.938700 0.019800 " />
</Unit>
<Unit name="R_Psoas_Minor" f0="239.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.011300 1.221400 -0.045600 " />
<Waypoint body="Spine" p="-0.055300 1.120100 -0.011600 " />
<Waypoint body="Pelvis" p="-0.063300 0.999200 -0.005400 " />
<Waypoint body="Pelvis" p="-0.057800 0.938700 0.019800 " />
</Unit>
<Unit name="L_Quadratus_Femoris" f0="254.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.085900 0.917600 -0.043300 " />
<Waypoint body="Pelvis" p="0.108700 0.897900 -0.049000 " />
<Waypoint body="FemurL" p="0.136100 0.879700 -0.028600 " />
</Unit>
<Unit name="R_Quadratus_Femoris" f0="254.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.085900 0.917600 -0.043300 " />
<Waypoint body="Pelvis" p="-0.108700 0.897900 -0.049000 " />
<Waypoint body="FemurR" p="-0.136100 0.879700 -0.028600 " />
</Unit>
<Unit name="L_Quadratus_Lumborum1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.077300 1.068600 -0.069300 " />
<Waypoint body="Torso" p="0.047900 1.184700 -0.083700 " />
</Unit>
<Unit name="R_Quadratus_Lumborum1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.077300 1.068600 -0.069300 " />
<Waypoint body="Torso" p="-0.047900 1.184700 -0.083700 " />
</Unit>
<Unit name="L_Rectus_Femoris" f0="424.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.107500 0.980300 0.014400 " />
<Waypoint body="FemurL" p="0.116500 0.941600 0.031000 " />
<Waypoint body="FemurL" p="0.104500 0.602800 0.043200 " />
<Waypoint body="TibiaL" p="0.110800 0.542200 0.034900 " />
</Unit>
<Unit name="R_Rectus_Femoris" f0="424.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.107500 0.980300 0.014400 " />
<Waypoint body="FemurR" p="-0.116500 0.941600 0.031000 " />
<Waypoint body="FemurR" p="-0.104500 0.602800 0.043200 " />
<Waypoint body="TibiaR" p="-0.110800 0.542200 0.034900 " />
</Unit>
<Unit name="L_Rectus_Femoris1" f0="424.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.105900 0.973500 0.016500 " />
<Waypoint body="FemurL" p="0.106500 0.926300 0.031800 " />
<Waypoint body="FemurL" p="0.081600 0.606600 0.043100 " />
<Waypoint body="TibiaL" p="0.075700 0.539900 0.032000 " />
</Unit>
<Unit name="R_Rectus_Femoris1" f0="424.400000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.105900 0.973500 0.016500 " />
<Waypoint body="FemurR" p="-0.106500 0.926300 0.031800 " />
<Waypoint body="FemurR" p="-0.081600 0.606600 0.043100 " />
<Waypoint body="TibiaR" p="-0.075700 0.539900 0.032000 " />
</Unit>
<Unit name="L_Rhomboid_Major2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.426100 -0.119400 " />
<Waypoint body="Torso" p="0.040400 1.408500 -0.128600 " />
<Waypoint body="ShoulderL" p="0.086800 1.391200 -0.123400 " />
</Unit>
<Unit name="R_Rhomboid_Major2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.426100 -0.119400 " />
<Waypoint body="Torso" p="-0.040400 1.408500 -0.128600 " />
<Waypoint body="ShoulderR" p="-0.086800 1.391200 -0.123400 " />
</Unit>
<Unit name="L_Rhomboid_Minor" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="0.000000 1.507800 -0.087400 " />
<Waypoint body="Torso" p="0.022900 1.494400 -0.088100 " />
<Waypoint body="ShoulderL" p="0.090700 1.461100 -0.089900 " />
</Unit>
<Unit name="R_Rhomboid_Minor" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="-0.000000 1.507800 -0.087400 " />
<Waypoint body="Torso" p="-0.022900 1.494400 -0.088100 " />
<Waypoint body="ShoulderR" p="-0.090700 1.461100 -0.089900 " />
</Unit>
<Unit name="L_Sartorius" f0="113.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.124100 1.009800 0.031400 " />
<Waypoint body="FemurL" p="0.035200 0.707300 0.026000 " />
<Waypoint body="TibiaL" p="0.054400 0.496500 -0.022400 " />
<Waypoint body="TibiaL" p="0.090700 0.453900 0.009200 " />
</Unit>
<Unit name="R_Sartorius" f0="113.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.124100 1.009800 0.031400 " />
<Waypoint body="FemurR" p="-0.035200 0.707300 0.026000 " />
<Waypoint body="TibiaR" p="-0.054400 0.496500 -0.022400 " />
<Waypoint body="TibiaR" p="-0.090700 0.453900 0.009200 " />
</Unit>
<Unit name="L_Scalene_Anterior1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.058400 1.467000 -0.005800 " />
<Waypoint body="Neck" p="0.035000 1.504000 -0.009500 " />
<Waypoint body="Neck" p="0.018300 1.523600 -0.017300 " />
</Unit>
<Unit name="R_Scalene_Anterior1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.058400 1.467000 -0.005800 " />
<Waypoint body="Neck" p="-0.035000 1.504000 -0.009500 " />
<Waypoint body="Neck" p="-0.018300 1.523600 -0.017300 " />
</Unit>
<Unit name="L_Scalene_Middle4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.055600 1.481100 -0.034400 " />
<Waypoint body="Neck" p="0.039900 1.548400 -0.010800 " />
<Waypoint body="Neck" p="0.026700 1.571200 -0.006000 " />
</Unit>
<Unit name="R_Scalene_Middle4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.055600 1.481100 -0.034400 " />
<Waypoint body="Neck" p="-0.039900 1.548400 -0.010800 " />
<Waypoint body="Neck" p="-0.026700 1.571200 -0.006000 " />
</Unit>
<Unit name="L_Semimembranosus" f0="581.350000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.075100 0.901700 -0.057400 " />
<Waypoint body="Pelvis" p="0.070100 0.846200 -0.039100 " />
<Waypoint body="FemurL" p="0.053400 0.544300 -0.049600 " />
<Waypoint body="TibiaL" p="0.056700 0.511900 -0.042000 " />
<Waypoint body="TibiaL" p="0.062100 0.490300 -0.029700 " />
</Unit>
<Unit name="R_Semimembranosus" f0="581.350000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.075100 0.901700 -0.057400 " />
<Waypoint body="Pelvis" p="-0.070100 0.846200 -0.039100 " />
<Waypoint body="FemurR" p="-0.053400 0.544300 -0.049600 " />
<Waypoint body="TibiaR" p="-0.056700 0.511900 -0.042000 " />
<Waypoint body="TibiaR" p="-0.062100 0.490300 -0.029700 " />
</Unit>
<Unit name="L_Semimembranosus1" f0="581.350000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.078400 0.905300 -0.053300 " />
<Waypoint body="FemurL" p="0.093700 0.862300 -0.034300 " />
<Waypoint body="FemurL" p="0.104400 0.560200 -0.047900 " />
<Waypoint body="FemurL" p="0.081200 0.527200 -0.056200 " />
<Waypoint body="TibiaL" p="0.082000 0.495000 -0.042200 " />
</Unit>
<Unit name="R_Semimembranosus1" f0="581.350000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.078400 0.905300 -0.053300 " />
<Waypoint body="FemurR" p="-0.093700 0.862300 -0.034300 " />
<Waypoint body="FemurR" p="-0.104400 0.560200 -0.047900 " />
<Waypoint body="FemurR" p="-0.081200 0.527200 -0.056200 " />
<Waypoint body="TibiaR" p="-0.082000 0.495000 -0.042200 " />
</Unit>
<Unit name="L_Semispinalis_Capitis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.026000 1.431100 -0.100400 " />
<Waypoint body="Neck" p="0.014600 1.512500 -0.066300 " />
<Waypoint body="Neck" p="0.010900 1.566200 -0.054700 " />
<Waypoint body="Head" p="0.008700 1.614700 -0.069800 " />
</Unit>
<Unit name="R_Semispinalis_Capitis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.026000 1.431100 -0.100400 " />
<Waypoint body="Neck" p="-0.014600 1.512500 -0.066300 " />
<Waypoint body="Neck" p="-0.010900 1.566200 -0.054700 " />
<Waypoint body="Head" p="-0.008700 1.614700 -0.069800 " />
</Unit>
<Unit name="L_Semitendinosus" f0="301.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.068000 0.894100 -0.065200 " />
<Waypoint body="Pelvis" p="0.088100 0.853300 -0.046300 " />
<Waypoint body="FemurL" p="0.085600 0.565300 -0.061100 " />
<Waypoint body="TibiaL" p="0.070400 0.494600 -0.047500 " />
<Waypoint body="TibiaL" p="0.065500 0.471600 -0.026400 " />
<Waypoint body="TibiaL" p="0.079800 0.448400 -0.003800 " />
</Unit>
<Unit name="R_Semitendinosus" f0="301.900000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.068000 0.894100 -0.065200 " />
<Waypoint body="Pelvis" p="-0.088100 0.853300 -0.046300 " />
<Waypoint body="FemurR" p="-0.085600 0.565300 -0.061100 " />
<Waypoint body="TibiaR" p="-0.070400 0.494600 -0.047500 " />
<Waypoint body="TibiaR" p="-0.065500 0.471600 -0.026400 " />
<Waypoint body="TibiaR" p="-0.079800 0.448400 -0.003800 " />
</Unit>
<Unit name="L_Serratus_Anterior2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.090100 1.410200 -0.117700 " />
<Waypoint body="ShoulderL" p="0.104600 1.410000 -0.100000 " />
<Waypoint body="Torso" p="0.131200 1.404600 -0.043300 " />
<Waypoint body="Torso" p="0.120600 1.412000 -0.023900 " />
</Unit>
<Unit name="R_Serratus_Anterior2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.090100 1.410200 -0.117700 " />
<Waypoint body="ShoulderR" p="-0.104600 1.410000 -0.100000 " />
<Waypoint body="Torso" p="-0.131200 1.404600 -0.043300 " />
<Waypoint body="Torso" p="-0.120600 1.412000 -0.023900 " />
</Unit>
<Unit name="L_Serratus_Anterior4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.093900 1.348200 -0.128300 " />
<Waypoint body="Torso" p="0.115300 1.354700 -0.095600 " />
<Waypoint body="Torso" p="0.142600 1.328400 -0.011900 " />
<Waypoint body="Torso" p="0.126400 1.312800 0.047600 " />
</Unit>
<Unit name="R_Serratus_Anterior4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.093900 1.348200 -0.128300 " />
<Waypoint body="Torso" p="-0.115300 1.354700 -0.095600 " />
<Waypoint body="Torso" p="-0.142600 1.328400 -0.011900 " />
<Waypoint body="Torso" p="-0.126400 1.312800 0.047600 " />
</Unit>
<Unit name="L_Soleus" f0="1792.950000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.087500 0.468500 -0.023700 " />
<Waypoint body="TibiaL" p="0.087000 0.419000 -0.059200 " />
<Waypoint body="TibiaL" p="0.071100 0.150700 -0.060800 " />
<Waypoint body="TibiaL" p="0.073400 0.098300 -0.077500 " />
<Waypoint body="TalusL" p="0.072900 0.029900 -0.095200 " />
</Unit>
<Unit name="R_Soleus" f0="1792.950000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.087500 0.468500 -0.023700 " />
<Waypoint body="TibiaR" p="-0.087000 0.419000 -0.059200 " />
<Waypoint body="TibiaR" p="-0.071100 0.150700 -0.060800 " />
<Waypoint body="TibiaR" p="-0.073400 0.098300 -0.077500 " />
<Waypoint body="TalusR" p="-0.072900 0.029900 -0.095200 " />
</Unit>
<Unit name="L_Soleus1" f0="1792.950000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.136600 0.490900 -0.045500 " />
<Waypoint body="TibiaL" p="0.130800 0.393000 -0.075700 " />
<Waypoint body="TalusL" p="0.085300 0.086300 -0.085500 " />
<Waypoint body="TalusL" p="0.087600 0.029800 -0.098200 " />
</Unit>
<Unit name="R_Soleus1" f0="1792.950000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.136600 0.490900 -0.045500 " />
<Waypoint body="TibiaR" p="-0.130800 0.393000 -0.075700 " />
<Waypoint body="TalusR" p="-0.085300 0.086300 -0.085500 " />
<Waypoint body="TalusR" p="-0.087600 0.029800 -0.098200 " />
</Unit>
<Unit name="L_Splenius_Capitis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="0.000000 1.502100 -0.086200 " />
<Waypoint body="Neck" p="0.022400 1.555700 -0.056700 " />
<Waypoint body="Head" p="0.039100 1.595500 -0.048600 " />
<Waypoint body="Head" p="0.060600 1.639000 -0.045600 " />
</Unit>
<Unit name="R_Splenius_Capitis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="-0.000000 1.502100 -0.086200 " />
<Waypoint body="Neck" p="-0.022400 1.555700 -0.056700 " />
<Waypoint body="Head" p="-0.039100 1.595500 -0.048600 " />
<Waypoint body="Head" p="-0.060600 1.639000 -0.045600 " />
</Unit>
<Unit name="L_Splenius_Cervicis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.406700 -0.120100 " />
<Waypoint body="Torso" p="0.035700 1.496300 -0.079500 " />
<Waypoint body="Neck" p="0.039800 1.546300 -0.039200 " />
<Waypoint body="Neck" p="0.037500 1.591800 -0.005600 " />
</Unit>
<Unit name="R_Splenius_Cervicis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.406700 -0.120100 " />
<Waypoint body="Torso" p="-0.035700 1.496300 -0.079500 " />
<Waypoint body="Neck" p="-0.039800 1.546300 -0.039200 " />
<Waypoint body="Neck" p="-0.037500 1.591800 -0.005600 " />
</Unit>
<Unit name="L_Splenius_Cervicis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.344600 -0.125800 " />
<Waypoint body="Torso" p="0.033000 1.415500 -0.112900 " />
<Waypoint body="Torso" p="0.048700 1.492700 -0.070600 " />
<Waypoint body="Neck" p="0.042000 1.540600 -0.028700 " />
<Waypoint body="Neck" p="0.027000 1.571000 -0.006300 " />
</Unit>
<Unit name="R_Splenius_Cervicis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.344600 -0.125800 " />
<Waypoint body="Torso" p="-0.033000 1.415500 -0.112900 " />
<Waypoint body="Torso" p="-0.048700 1.492700 -0.070600 " />
<Waypoint body="Neck" p="-0.042000 1.540600 -0.028700 " />
<Waypoint body="Neck" p="-0.027000 1.571000 -0.006300 " />
</Unit>
<Unit name="L_Sternocleidomastoid1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.056800 1.465000 0.017500 " />
<Waypoint body="Neck" p="0.054600 1.572800 -0.030900 " />
<Waypoint body="Head" p="0.049000 1.638500 -0.060800 " />
</Unit>
<Unit name="R_Sternocleidomastoid1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.056800 1.465000 0.017500 " />
<Waypoint body="Neck" p="-0.054600 1.572800 -0.030900 " />
<Waypoint body="Head" p="-0.049000 1.638500 -0.060800 " />
</Unit>
<Unit name="L_Subclavian" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.053000 1.448900 0.021900 " />
<Waypoint body="ShoulderL" p="0.136800 1.460600 -0.024200 " />
</Unit>
<Unit name="R_Subclavian" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.053000 1.448900 0.021900 " />
<Waypoint body="ShoulderR" p="-0.136800 1.460600 -0.024200 " />
</Unit>
<Unit name="L_Subscapularis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.094400 1.384800 -0.119400 " />
<Waypoint body="ShoulderL" p="0.153300 1.419200 -0.040900 " />
<Waypoint body="ArmL" p="0.203200 1.406600 -0.016300 " />
<Waypoint body="ArmL" p="0.201300 1.413300 -0.017700 " />
</Unit>
<Unit name="R_Subscapularis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.094400 1.384800 -0.119400 " />
<Waypoint body="ShoulderR" p="-0.153300 1.419200 -0.040900 " />
<Waypoint body="ArmR" p="-0.203200 1.406600 -0.016300 " />
<Waypoint body="ArmR" p="-0.201300 1.413300 -0.017700 " />
</Unit>
<Unit name="L_Superior_Gemellus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.061300 0.918700 -0.059200 " />
<Waypoint body="Pelvis" p="0.090400 0.922400 -0.061300 " />
<Waypoint body="FemurL" p="0.140200 0.921300 -0.024800 " />
</Unit>
<Unit name="R_Superior_Gemellus" f0="50.000000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.061300 0.918700 -0.059200 " />
<Waypoint body="Pelvis" p="-0.090400 0.922400 -0.061300 " />
<Waypoint body="FemurR" p="-0.140200 0.921300 -0.024800 " />
</Unit>
<Unit name="L_Supraspinatus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.093300 1.467600 -0.081400 " />
<Waypoint body="ShoulderL" p="0.169200 1.460100 -0.044700 " />
<Waypoint body="ArmL" p="0.177300 1.434600 -0.027700 " />
<Waypoint body="ArmL" p="0.182700 1.440100 -0.022100 " />
</Unit>
<Unit name="R_Supraspinatus" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.093300 1.467600 -0.081400 " />
<Waypoint body="ShoulderR" p="-0.169200 1.460100 -0.044700 " />
<Waypoint body="ArmR" p="-0.177300 1.434600 -0.027700 " />
<Waypoint body="ArmR" p="-0.182700 1.440100 -0.022100 " />
</Unit>
<Unit name="L_Tensor_Fascia_Lata" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.137300 1.062800 -0.023900 " />
<Waypoint body="FemurL" p="0.162800 0.923700 -0.024600 " />
<Waypoint body="FemurL" p="0.159900 0.811900 -0.004500 " />
<Waypoint body="FemurL" p="0.141700 0.555800 0.005600 " />
<Waypoint body="TibiaL" p="0.132200 0.482000 -0.007900 " />
</Unit>
<Unit name="R_Tensor_Fascia_Lata" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.137300 1.062800 -0.023900 " />
<Waypoint body="FemurR" p="-0.162800 0.923700 -0.024600 " />
<Waypoint body="FemurR" p="-0.159900 0.811900 -0.004500 " />
<Waypoint body="FemurR" p="-0.141700 0.555800 0.005600 " />
<Waypoint body="TibiaR" p="-0.132200 0.482000 -0.007900 " />
</Unit>
<Unit name="L_Tensor_Fascia_Lata1" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.135400 1.030700 0.019200 " />
<Waypoint body="FemurL" p="0.115400 0.920800 0.055100 " />
<Waypoint body="FemurL" p="0.144300 0.607000 0.025000 " />
<Waypoint body="TibiaL" p="0.110600 0.542300 0.034200 " />
</Unit>
<Unit name="R_Tensor_Fascia_Lata1" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.135400 1.030700 0.019200 " />
<Waypoint body="FemurR" p="-0.115400 0.920800 0.055100 " />
<Waypoint body="FemurR" p="-0.144300 0.607000 0.025000 " />
<Waypoint body="TibiaR" p="-0.110600 0.542300 0.034200 " />
</Unit>
<Unit name="L_Tensor_Fascia_Lata2" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.142900 1.049200 0.003200 " />
<Waypoint body="FemurL" p="0.159000 0.917700 0.021900 " />
<Waypoint body="FemurL" p="0.134600 0.557100 0.015600 " />
<Waypoint body="TibiaL" p="0.121600 0.477400 0.004900 " />
</Unit>
<Unit name="R_Tensor_Fascia_Lata2" f0="77.500000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.142900 1.049200 0.003200 " />
<Waypoint body="FemurR" p="-0.159000 0.917700 0.021900 " />
<Waypoint body="FemurR" p="-0.134600 0.557100 0.015600 " />
<Waypoint body="TibiaR" p="-0.121600 0.477400 0.004900 " />
</Unit>
<Unit name="L_Teres_Major" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.100000 1.336900 -0.131100 " />
<Waypoint body="ShoulderL" p="0.159000 1.374300 -0.101300 " />
<Waypoint body="ArmL" p="0.250600 1.431000 -0.052200 " />
<Waypoint body="ArmL" p="0.243500 1.430100 -0.021200 " />
</Unit>
<Unit name="R_Teres_Major" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.100000 1.336900 -0.131100 " />
<Waypoint body="ShoulderR" p="-0.159000 1.374300 -0.101300 " />
<Waypoint body="ArmR" p="-0.250600 1.431000 -0.052200 " />
<Waypoint body="ArmR" p="-0.243500 1.430100 -0.021200 " />
</Unit>
<Unit name="L_Tibialis_Anterior" f0="673.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.130300 0.488500 -0.010100 " />
<Waypoint body="TibiaL" p="0.072200 0.103100 -0.014000 " />
<Waypoint body="TalusL" p="0.055900 0.061300 -0.009100 " />
<Waypoint body="TalusL" p="0.066100 0.037300 0.024200 " />
</Unit>
<Unit name="R_Tibialis_Anterior" f0="673.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.130300 0.488500 -0.010100 " />
<Waypoint body="TibiaR" p="-0.072200 0.103100 -0.014000 " />
<Waypoint body="TalusR" p="-0.055900 0.061300 -0.009100 " />
<Waypoint body="TalusR" p="-0.066100 0.037300 0.024200 " />
</Unit>
<Unit name="L_Tibialis_Posterior" f0="905.600000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaL" p="0.104800 0.472500 -0.023600 " />
<Waypoint body="TibiaL" p="0.084700 0.137800 -0.050400 " />
<Waypoint body="TibiaL" p="0.053700 0.091000 -0.052300 " />
<Waypoint body="TalusL" p="0.059000 0.048800 -0.021300 " />
<Waypoint body="TalusL" p="0.089900 0.039200 0.010000 " />
</Unit>
<Unit name="R_Tibialis_Posterior" f0="905.600000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="TibiaR" p="-0.104800 0.472500 -0.023600 " />
<Waypoint body="TibiaR" p="-0.084700 0.137800 -0.050400 " />
<Waypoint body="TibiaR" p="-0.053700 0.091000 -0.052300 " />
<Waypoint body="TalusR" p="-0.059000 0.048800 -0.021300 " />
<Waypoint body="TalusR" p="-0.089900 0.039200 0.010000 " />
</Unit>
<Unit name="L_Triceps_Lateral_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.243900 1.452400 -0.032900 " />
<Waypoint body="ArmL" p="0.319200 1.484700 -0.046100 " />
<Waypoint body="ArmL" p="0.488700 1.477900 -0.024200 " />
<Waypoint body="ForeArmL" p="0.523500 1.467000 -0.027000 " />
</Unit>
<Unit name="R_Triceps_Lateral_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.243900 1.452400 -0.032900 " />
<Waypoint body="ArmR" p="-0.319200 1.484700 -0.046100 " />
<Waypoint body="ArmR" p="-0.488700 1.477900 -0.024200 " />
<Waypoint body="ForeArmR" p="-0.523500 1.467000 -0.027000 " />
</Unit>
<Unit name="L_Triceps_Long_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderL" p="0.174200 1.411500 -0.063300 " />
<Waypoint body="ArmL" p="0.256000 1.443300 -0.060300 " />
<Waypoint body="ArmL" p="0.341900 1.464700 -0.075600 " />
<Waypoint body="ArmL" p="0.475900 1.462800 -0.048200 " />
<Waypoint body="ForeArmL" p="0.517200 1.462700 -0.033400 " />
</Unit>
<Unit name="R_Triceps_Long_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ShoulderR" p="-0.174200 1.411500 -0.063300 " />
<Waypoint body="ArmR" p="-0.256000 1.443300 -0.060300 " />
<Waypoint body="ArmR" p="-0.341900 1.464700 -0.075600 " />
<Waypoint body="ArmR" p="-0.475900 1.462800 -0.048200 " />
<Waypoint body="ForeArmR" p="-0.517200 1.462700 -0.033400 " />
</Unit>
<Unit name="L_Triceps_Medial_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmL" p="0.292100 1.442600 -0.033800 " />
<Waypoint body="ArmL" p="0.435900 1.428200 -0.036500 " />
<Waypoint body="ForeArmL" p="0.518300 1.454400 -0.028300 " />
</Unit>
<Unit name="R_Triceps_Medial_Head" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="ArmR" p="-0.292100 1.442600 -0.033800 " />
<Waypoint body="ArmR" p="-0.435900 1.428200 -0.036500 " />
<Waypoint body="ForeArmR" p="-0.518300 1.454400 -0.028300 " />
</Unit>
<Unit name="L_Vastus_Intermedius" f0="512.100000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.114500 0.871100 0.000900 " />
<Waypoint body="FemurL" p="0.096900 0.811600 0.033600 " />
<Waypoint body="FemurL" p="0.082200 0.604300 0.036300 " />
<Waypoint body="TibiaL" p="0.079500 0.545000 0.026800 " />
</Unit>
<Unit name="R_Vastus_Intermedius" f0="512.100000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.114500 0.871100 0.000900 " />
<Waypoint body="FemurR" p="-0.096900 0.811600 0.033600 " />
<Waypoint body="FemurR" p="-0.082200 0.604300 0.036300 " />
<Waypoint body="TibiaR" p="-0.079500 0.545000 0.026800 " />
</Unit>
<Unit name="L_Vastus_Intermedius1" f0="512.100000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.124000 0.871400 0.002600 " />
<Waypoint body="FemurL" p="0.130600 0.790100 0.032900 " />
<Waypoint body="FemurL" p="0.118200 0.650700 0.036700 " />
<Waypoint body="TibiaL" p="0.095000 0.557500 0.032300 " />
</Unit>
<Unit name="R_Vastus_Intermedius1" f0="512.100000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.124000 0.871400 0.002600 " />
<Waypoint body="FemurR" p="-0.130600 0.790100 0.032900 " />
<Waypoint body="FemurR" p="-0.118200 0.650700 0.036700 " />
<Waypoint body="TibiaR" p="-0.095000 0.557500 0.032300 " />
</Unit>
<Unit name="L_Vastus_Lateralis" f0="1127.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.148900 0.915000 0.001000 " />
<Waypoint body="FemurL" p="0.133900 0.882600 0.016900 " />
<Waypoint body="FemurL" p="0.105300 0.588000 0.039300 " />
<Waypoint body="TibiaL" p="0.088600 0.549500 0.035000 " />
</Unit>
<Unit name="R_Vastus_Lateralis" f0="1127.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.148900 0.915000 0.001000 " />
<Waypoint body="FemurR" p="-0.133900 0.882600 0.016900 " />
<Waypoint body="FemurR" p="-0.105300 0.588000 0.039300 " />
<Waypoint body="TibiaR" p="-0.088600 0.549500 0.035000 " />
</Unit>
<Unit name="L_Vastus_Lateralis1" f0="1127.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.159500 0.905200 -0.002200 " />
<Waypoint body="FemurL" p="0.153200 0.859200 -0.000000 " />
<Waypoint body="FemurL" p="0.136300 0.597400 0.008600 " />
<Waypoint body="TibiaL" p="0.102300 0.540500 0.035900 " />
</Unit>
<Unit name="R_Vastus_Lateralis1" f0="1127.700000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.159500 0.905200 -0.002200 " />
<Waypoint body="FemurR" p="-0.153200 0.859200 -0.000000 " />
<Waypoint body="FemurR" p="-0.136300 0.597400 0.008600 " />
<Waypoint body="TibiaR" p="-0.102300 0.540500 0.035900 " />
</Unit>
<Unit name="L_Vastus_Medialis" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.118600 0.871200 0.002900 " />
<Waypoint body="FemurL" p="0.039000 0.680200 0.013200 " />
<Waypoint body="FemurL" p="0.043200 0.604100 0.004100 " />
<Waypoint body="TibiaL" p="0.074700 0.541400 0.024100 " />
</Unit>
<Unit name="R_Vastus_Medialis" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.118600 0.871200 0.002900 " />
<Waypoint body="FemurR" p="-0.039000 0.680200 0.013200 " />
<Waypoint body="FemurR" p="-0.043200 0.604100 0.004100 " />
<Waypoint body="TibiaR" p="-0.074700 0.541400 0.024100 " />
</Unit>
<Unit name="L_Vastus_Medialis1" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.119100 0.867000 0.003500 " />
<Waypoint body="FemurL" p="0.080700 0.669600 0.048800 " />
<Waypoint body="TibiaL" p="0.087600 0.551300 0.035800 " />
</Unit>
<Unit name="R_Vastus_Medialis1" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.119100 0.867000 0.003500 " />
<Waypoint body="FemurR" p="-0.080700 0.669600 0.048800 " />
<Waypoint body="TibiaR" p="-0.087600 0.551300 0.035800 " />
</Unit>
<Unit name="L_Vastus_Medialis2" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurL" p="0.118600 0.871200 0.002900 " />
<Waypoint body="FemurL" p="0.057800 0.647400 0.037200 " />
<Waypoint body="TibiaL" p="0.076800 0.546900 0.031200 " />
</Unit>
<Unit name="R_Vastus_Medialis2" f0="721.850000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="FemurR" p="-0.118600 0.871200 0.002900 " />
<Waypoint body="FemurR" p="-0.057800 0.647400 0.037200 " />
<Waypoint body="TibiaR" p="-0.076800 0.546900 0.031200 " />
</Unit>
<Unit name="L_iliacus" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.068000 1.047100 -0.061100 " />
<Waypoint body="Pelvis" p="0.077600 0.942100 0.018000 " />
<Waypoint body="FemurL" p="0.094000 0.880500 -0.015000 " />
<Waypoint body="FemurL" p="0.111200 0.853400 -0.020900 " />
</Unit>
<Unit name="R_iliacus" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.068000 1.047100 -0.061100 " />
<Waypoint body="Pelvis" p="-0.077600 0.942100 0.018000 " />
<Waypoint body="FemurR" p="-0.094000 0.880500 -0.015000 " />
<Waypoint body="FemurR" p="-0.111200 0.853400 -0.020900 " />
</Unit>
<Unit name="L_iliacus1" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.116900 1.069700 -0.032400 " />
<Waypoint body="Pelvis" p="0.084100 0.973000 0.013000 " />
<Waypoint body="Pelvis" p="0.086800 0.917100 0.029700 " />
<Waypoint body="FemurL" p="0.099600 0.877100 -0.009200 " />
<Waypoint body="FemurL" p="0.118700 0.867700 -0.022800 " />
</Unit>
<Unit name="R_iliacus1" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.116900 1.069700 -0.032400 " />
<Waypoint body="Pelvis" p="-0.084100 0.973000 0.013000 " />
<Waypoint body="Pelvis" p="-0.086800 0.917100 0.029700 " />
<Waypoint body="FemurR" p="-0.099600 0.877100 -0.009200 " />
<Waypoint body="FemurR" p="-0.118700 0.867700 -0.022800 " />
</Unit>
<Unit name="L_iliacus2" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.128500 1.033500 0.022400 " />
<Waypoint body="Pelvis" p="0.099900 0.973000 0.031300 " />
<Waypoint body="FemurL" p="0.102000 0.908800 0.014700 " />
<Waypoint body="FemurL" p="0.109200 0.863700 -0.013300 " />
</Unit>
<Unit name="R_iliacus2" f0="207.300000" lm="1.000000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.128500 1.033500 0.022400 " />
<Waypoint body="Pelvis" p="-0.099900 0.973000 0.031300 " />
<Waypoint body="FemurR" p="-0.102000 0.908800 0.014700 " />
<Waypoint body="FemurR" p="-0.109200 0.863700 -0.013300 " />
</Unit>
<Unit name="L_iliocostalis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.003600 0.898900 -0.073600 " />
<Waypoint body="Pelvis" p="0.025200 1.026700 -0.101000 " />
<Waypoint body="Spine" p="0.052800 1.110900 -0.079000 " />
<Waypoint body="Torso" p="0.058200 1.174400 -0.093400 " />
<Waypoint body="Torso" p="0.063900 1.239200 -0.126200 " />
<Waypoint body="Torso" p="0.050100 1.433400 -0.104800 " />
<Waypoint body="Torso" p="0.041100 1.491600 -0.062400 " />
<Waypoint body="Neck" p="0.022400 1.538000 -0.010700 " />
</Unit>
<Unit name="R_iliocostalis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.003600 0.898900 -0.073600 " />
<Waypoint body="Pelvis" p="-0.025200 1.026700 -0.101000 " />
<Waypoint body="Spine" p="-0.052800 1.110900 -0.079000 " />
<Waypoint body="Torso" p="-0.058200 1.174400 -0.093400 " />
<Waypoint body="Torso" p="-0.063900 1.239200 -0.126200 " />
<Waypoint body="Torso" p="-0.050100 1.433400 -0.104800 " />
<Waypoint body="Torso" p="-0.041100 1.491600 -0.062400 " />
<Waypoint body="Neck" p="-0.022400 1.538000 -0.010700 " />
</Unit>
<Unit name="L_Rectus_Abdominis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.022100 0.922600 0.050800 " />
<Waypoint body="Pelvis" p="0.040200 1.029200 0.086100 " />
<Waypoint body="Torso" p="0.060100 1.110900 0.089400 " />
<Waypoint body="Torso" p="0.063500 1.170800 0.092300 " />
<Waypoint body="Torso" p="0.076200 1.304200 0.092900 " />
</Unit>
<Unit name="R_Rectus_Abdominis1" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.022100 0.922600 0.050800 " />
<Waypoint body="Pelvis" p="-0.040200 1.029200 0.086100 " />
<Waypoint body="Torso" p="-0.060100 1.110900 0.089400 " />
<Waypoint body="Torso" p="-0.063500 1.170800 0.092300 " />
<Waypoint body="Torso" p="-0.076200 1.304200 0.092900 " />
</Unit>
<Unit name="L_Serratus_Posterior_Inferior" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="0.000000 1.139400 -0.092000 " />
<Waypoint body="Torso" p="0.072300 1.156800 -0.084000 " />
<Waypoint body="Torso" p="0.080500 1.162800 -0.075700 " />
</Unit>
<Unit name="R_Serratus_Posterior_Inferior" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Spine" p="-0.000000 1.139400 -0.092000 " />
<Waypoint body="Torso" p="-0.072300 1.156800 -0.084000 " />
<Waypoint body="Torso" p="-0.080500 1.162800 -0.075700 " />
</Unit>
<Unit name="L_Transversus_Abdominis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.054100 1.043200 -0.092800 " />
<Waypoint body="Torso" p="0.063200 1.172600 -0.079300 " />
</Unit>
<Unit name="R_Transversus_Abdominis" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.054100 1.043200 -0.092800 " />
<Waypoint body="Torso" p="-0.063200 1.172600 -0.079300 " />
</Unit>
<Unit name="L_Transversus_Abdominis2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.135600 1.040700 0.017800 " />
<Waypoint body="Torso" p="0.111200 1.137900 -0.011800 " />
</Unit>
<Unit name="R_Transversus_Abdominis2" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.135600 1.040700 0.017800 " />
<Waypoint body="Torso" p="-0.111200 1.137900 -0.011800 " />
</Unit>
<Unit name="L_Transversus_Abdominis4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="0.016000 0.927700 0.053500 " />
<Waypoint body="Torso" p="0.038900 1.181000 0.093000 " />
<Waypoint body="Torso" p="0.021000 1.297800 0.093300 " />
</Unit>
<Unit name="R_Transversus_Abdominis4" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Pelvis" p="-0.016000 0.927700 0.053500 " />
<Waypoint body="Torso" p="-0.038900 1.181000 0.093000 " />
<Waypoint body="Torso" p="-0.021000 1.297800 0.093300 " />
</Unit>
<Unit name="L_Trapezius" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.179900 -0.096800 " />
<Waypoint body="Torso" p="0.034800 1.279400 -0.128000 " />
<Waypoint body="Torso" p="0.080500 1.345200 -0.135600 " />
<Waypoint body="ShoulderL" p="0.131400 1.447600 -0.102400 " />
</Unit>
<Unit name="R_Trapezius" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.179900 -0.096800 " />
<Waypoint body="Torso" p="-0.034800 1.279400 -0.128000 " />
<Waypoint body="Torso" p="-0.080500 1.345200 -0.135600 " />
<Waypoint body="ShoulderR" p="-0.131400 1.447600 -0.102400 " />
</Unit>
<Unit name="L_Trapezius3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="0.000000 1.437300 -0.119300 " />
<Waypoint body="ShoulderL" p="0.085900 1.476100 -0.103200 " />
<Waypoint body="ShoulderL" p="0.122700 1.472800 -0.092500 " />
<Waypoint body="ShoulderL" p="0.145500 1.455600 -0.091900 " />
</Unit>
<Unit name="R_Trapezius3" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Torso" p="-0.000000 1.437300 -0.119300 " />
<Waypoint body="ShoulderR" p="-0.085900 1.476100 -0.103200 " />
<Waypoint body="ShoulderR" p="-0.122700 1.472800 -0.092500 " />
<Waypoint body="ShoulderR" p="-0.145500 1.455600 -0.091900 " />
</Unit>
<Unit name="L_Trapezius5" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="0.000000 1.563000 -0.063600 " />
<Waypoint body="Neck" p="0.039300 1.549500 -0.062400 " />
<Waypoint body="ShoulderL" p="0.113300 1.496700 -0.064900 " />
<Waypoint body="ShoulderL" p="0.198900 1.460800 -0.056900 " />
</Unit>
<Unit name="R_Trapezius5" f0="1000.000000" lm="1.200000" lt="0.200000" pen_angle="0.000000" lmax="-0.100000">
<Waypoint body="Neck" p="-0.000000 1.563000 -0.063600 " />
<Waypoint body="Neck" p="-0.039300 1.549500 -0.062400 " />
<Waypoint body="ShoulderR" p="-0.113300 1.496700 -0.064900 " />
<Waypoint body="ShoulderR" p="-0.198900 1.460800 -0.056900 " />
</Unit>
</Muscle>
| 120,105 | XML | 66.36175 | 133 | 0.602073 |
vstrozzi/FRL-SHAC-Extension/envs/assets/snu/ground.xml | <Skeleton name="Ground">
<Node name="ground" parent="None" >
<Body type="Box" mass="15.0" size="1000.0 1.0 1000.0" contact="On" color="1.2 1.2 1.2 1.0">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 -0.49958 0.0"/>
</Body>
<Joint type="Weld">
<Transformation linear="1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" translation="0.0 0.0 0.0"/>
</Joint>
</Node>
</Skeleton>
| 457 | XML | 40.63636 | 105 | 0.538293 |
vstrozzi/FRL-SHAC-Extension/optim/gd.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
from torch.optim.optimizer import Optimizer
class GD(Optimizer):
r"""Implements Pure Gradient Descent algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
"""
def __init__(self, params, lr=1e-3):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
defaults = dict(lr=lr)
super(GD, self).__init__(params, defaults)
def __setstate__(self, state):
super(GD, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
p.add_(p.grad, alpha = -group['lr'])
return loss | 1,572 | Python | 33.955555 | 79 | 0.632952 |
vstrozzi/FRL-SHAC-Extension/algorithms/shac.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from multiprocessing.sharedctypes import Value
import sys, os
from torch.nn.utils.clip_grad import clip_grad_norm_
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import numpy as np
import copy
import torch
from tensorboardX import SummaryWriter
import yaml
import dflex as df
import envs
import models.actor
import models.critic
from utils.common import *
import utils.torch_utils as tu
from utils.running_mean_std import RunningMeanStd
from utils.dataset import CriticDataset
from utils.time_report import TimeReport
from utils.average_meter import AverageMeter
class SHAC:
def __init__(self, cfg):
env_fn = getattr(envs, cfg["params"]["diff_env"]["name"])
seeding(cfg["params"]["general"]["seed"])
self.env = env_fn(num_envs = cfg["params"]["config"]["num_actors"], \
device = cfg["params"]["general"]["device"], \
render = cfg["params"]["general"]["render"], \
seed = cfg["params"]["general"]["seed"], \
episode_length=cfg["params"]["diff_env"].get("episode_length", 250), \
stochastic_init = cfg["params"]["diff_env"].get("stochastic_env", True), \
MM_caching_frequency = cfg["params"]['diff_env'].get('MM_caching_frequency', 1), \
no_grad = False)
print('num_envs = ', self.env.num_envs)
print('num_actions = ', self.env.num_actions)
print('num_obs = ', self.env.num_obs)
self.num_envs = self.env.num_envs
self.num_obs = self.env.num_obs
self.num_actions = self.env.num_actions
self.max_episode_length = self.env.episode_length
self.device = cfg["params"]["general"]["device"]
self.gamma = cfg['params']['config'].get('gamma', 0.99)
self.critic_method = cfg['params']['config'].get('critic_method', 'one-step') # ['one-step', 'td-lambda']
if self.critic_method == 'td-lambda':
self.lam = cfg['params']['config'].get('lambda', 0.95)
self.steps_num = cfg["params"]["config"]["steps_num"]
self.max_epochs = cfg["params"]["config"]["max_epochs"]
self.actor_lr = float(cfg["params"]["config"]["actor_learning_rate"])
self.critic_lr = float(cfg['params']['config']['critic_learning_rate'])
self.lr_schedule = cfg['params']['config'].get('lr_schedule', 'linear')
self.target_critic_alpha = cfg['params']['config'].get('target_critic_alpha', 0.4)
self.obs_rms = None
if cfg['params']['config'].get('obs_rms', False):
self.obs_rms = RunningMeanStd(shape = (self.num_obs), device = self.device)
self.ret_rms = None
if cfg['params']['config'].get('ret_rms', False):
self.ret_rms = RunningMeanStd(shape = (), device = self.device)
self.rew_scale = cfg['params']['config'].get('rew_scale', 1.0)
self.critic_iterations = cfg['params']['config'].get('critic_iterations', 16)
self.num_batch = cfg['params']['config'].get('num_batch', 4)
self.batch_size = self.num_envs * self.steps_num // self.num_batch
self.name = cfg['params']['config'].get('name', "Ant")
self.truncate_grad = cfg["params"]["config"]["truncate_grads"]
self.grad_norm = cfg["params"]["config"]["grad_norm"]
if cfg['params']['general']['train']:
self.log_dir = cfg["params"]["general"]["logdir"]
os.makedirs(self.log_dir, exist_ok = True)
# save config
save_cfg = copy.deepcopy(cfg)
if 'general' in save_cfg['params']:
deleted_keys = []
for key in save_cfg['params']['general'].keys():
if key in save_cfg['params']['config']:
deleted_keys.append(key)
for key in deleted_keys:
del save_cfg['params']['general'][key]
yaml.dump(save_cfg, open(os.path.join(self.log_dir, 'cfg.yaml'), 'w'))
self.writer = SummaryWriter(os.path.join(self.log_dir, 'log'))
# save interval
self.save_interval = cfg["params"]["config"].get("save_interval", 500)
# stochastic inference
self.stochastic_evaluation = True
else:
self.stochastic_evaluation = not (cfg['params']['config']['player'].get('determenistic', False) or cfg['params']['config']['player'].get('deterministic', False))
self.steps_num = self.env.episode_length
# create actor critic network
self.actor_name = cfg["params"]["network"].get("actor", 'ActorStochasticMLP') # choices: ['ActorDeterministicMLP', 'ActorStochasticMLP']
self.critic_name = cfg["params"]["network"].get("critic", 'CriticMLP')
actor_fn = getattr(models.actor, self.actor_name)
self.actor = actor_fn(self.num_obs, self.num_actions, cfg['params']['network'], device = self.device)
critic_fn = getattr(models.critic, self.critic_name)
self.critic = critic_fn(self.num_obs, cfg['params']['network'], device = self.device)
self.all_params = list(self.actor.parameters()) + list(self.critic.parameters())
self.target_critic = copy.deepcopy(self.critic)
if cfg['params']['general']['train']:
self.save('init_policy')
# initialize optimizer
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), betas = cfg['params']['config']['betas'], lr = self.actor_lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), betas = cfg['params']['config']['betas'], lr = self.critic_lr)
# replay buffer
self.obs_buf = torch.zeros((self.steps_num, self.num_envs, self.num_obs), dtype = torch.float32, device = self.device)
self.rew_buf = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.done_mask = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.next_values = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.target_values = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.ret = torch.zeros((self.num_envs), dtype = torch.float32, device = self.device)
# for kl divergence computing
self.old_mus = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.old_sigmas = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.mus = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.sigmas = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
# counting variables
self.iter_count = 0
self.step_count = 0
# loss variables
self.episode_length_his = []
self.episode_loss_his = []
self.episode_discounted_loss_his = []
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
self.best_policy_loss = np.inf
self.actor_loss = np.inf
self.value_loss = np.inf
# average meter
self.episode_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_discounted_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_length_meter = AverageMeter(1, 100).to(self.device)
# timer
self.time_report = TimeReport()
def compute_actor_loss(self, deterministic = False):
rew_acc = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
next_values = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
actor_loss = torch.tensor(0., dtype = torch.float32, device = self.device)
with torch.no_grad():
if self.obs_rms is not None:
obs_rms = copy.deepcopy(self.obs_rms)
if self.ret_rms is not None:
ret_var = self.ret_rms.var.clone()
# initialize trajectory to cut off gradients between episodes.
obs = self.env.initialize_trajectory()
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
for i in range(self.steps_num):
# collect data for critic training
with torch.no_grad():
self.obs_buf[i] = obs.clone()
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, extra_info = self.env.step(torch.tanh(actions))
with torch.no_grad():
raw_rew = rew.clone()
# scale the reward
rew = rew * self.rew_scale
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
if self.ret_rms is not None:
# update ret rms
with torch.no_grad():
self.ret = self.ret * self.gamma + rew
self.ret_rms.update(self.ret)
rew = rew / torch.sqrt(ret_var + 1e-6)
self.episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
next_values[i + 1] = self.target_critic(obs).squeeze(-1)
for id in done_env_ids:
if torch.isnan(extra_info['obs_before_reset'][id]).sum() > 0 \
or torch.isinf(extra_info['obs_before_reset'][id]).sum() > 0 \
or (torch.abs(extra_info['obs_before_reset'][id]) > 1e6).sum() > 0: # ugly fix for nan values
next_values[i + 1, id] = 0.
elif self.episode_length[id] < self.max_episode_length: # early termination
next_values[i + 1, id] = 0.
else: # otherwise, use terminal value critic to estimate the long-term performance
if self.obs_rms is not None:
real_obs = obs_rms.normalize(extra_info['obs_before_reset'][id])
else:
real_obs = extra_info['obs_before_reset'][id]
next_values[i + 1, id] = self.target_critic(real_obs).squeeze(-1)
if (next_values[i + 1] > 1e6).sum() > 0 or (next_values[i + 1] < -1e6).sum() > 0:
print('next value error')
raise ValueError
rew_acc[i + 1, :] = rew_acc[i, :] + gamma * rew
if i < self.steps_num - 1:
actor_loss = actor_loss + (- rew_acc[i + 1, done_env_ids] - self.gamma * gamma[done_env_ids] * next_values[i + 1, done_env_ids]).sum()
else:
# terminate all envs at the end of optimization iteration
actor_loss = actor_loss + (- rew_acc[i + 1, :] - self.gamma * gamma * next_values[i + 1, :]).sum()
# compute gamma for next step
gamma = gamma * self.gamma
# clear up gamma and rew_acc for done envs
gamma[done_env_ids] = 1.
rew_acc[i + 1, done_env_ids] = 0.
# collect data for critic training
with torch.no_grad():
self.rew_buf[i] = rew.clone()
if i < self.steps_num - 1:
self.done_mask[i] = done.clone().to(torch.float32)
else:
self.done_mask[i, :] = 1.
self.next_values[i] = next_values[i + 1].clone()
# collect episode loss
with torch.no_grad():
self.episode_loss -= raw_rew
self.episode_discounted_loss -= self.episode_gamma * raw_rew
self.episode_gamma *= self.gamma
if len(done_env_ids) > 0:
self.episode_loss_meter.update(self.episode_loss[done_env_ids])
self.episode_discounted_loss_meter.update(self.episode_discounted_loss[done_env_ids])
self.episode_length_meter.update(self.episode_length[done_env_ids])
for done_env_id in done_env_ids:
if (self.episode_loss[done_env_id] > 1e6 or self.episode_loss[done_env_id] < -1e6):
print('ep loss error')
raise ValueError
self.episode_loss_his.append(self.episode_loss[done_env_id].item())
self.episode_discounted_loss_his.append(self.episode_discounted_loss[done_env_id].item())
self.episode_length_his.append(self.episode_length[done_env_id].item())
self.episode_loss[done_env_id] = 0.
self.episode_discounted_loss[done_env_id] = 0.
self.episode_length[done_env_id] = 0
self.episode_gamma[done_env_id] = 1.
actor_loss /= self.steps_num * self.num_envs
if self.ret_rms is not None:
actor_loss = actor_loss * torch.sqrt(ret_var + 1e-6)
self.actor_loss = actor_loss.detach().cpu().item()
self.step_count += self.steps_num * self.num_envs
return actor_loss
@torch.no_grad()
def evaluate_policy(self, num_games, deterministic = False):
episode_length_his = []
episode_loss_his = []
episode_discounted_loss_his = []
episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
obs = self.env.reset()
games_cnt = 0
while games_cnt < num_games:
if self.obs_rms is not None:
obs = self.obs_rms.normalize(obs)
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, _ = self.env.step(torch.tanh(actions))
episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
episode_loss -= rew
episode_discounted_loss -= episode_gamma * rew
episode_gamma *= self.gamma
if len(done_env_ids) > 0:
for done_env_id in done_env_ids:
print('loss = {:.2f}, len = {}'.format(episode_loss[done_env_id].item(), episode_length[done_env_id]))
episode_loss_his.append(episode_loss[done_env_id].item())
episode_discounted_loss_his.append(episode_discounted_loss[done_env_id].item())
episode_length_his.append(episode_length[done_env_id].item())
episode_loss[done_env_id] = 0.
episode_discounted_loss[done_env_id] = 0.
episode_length[done_env_id] = 0
episode_gamma[done_env_id] = 1.
games_cnt += 1
mean_episode_length = np.mean(np.array(episode_length_his))
mean_policy_loss = np.mean(np.array(episode_loss_his))
mean_policy_discounted_loss = np.mean(np.array(episode_discounted_loss_his))
return mean_policy_loss, mean_policy_discounted_loss, mean_episode_length
@torch.no_grad()
def compute_target_values(self):
if self.critic_method == 'one-step':
self.target_values = self.rew_buf + self.gamma * self.next_values
elif self.critic_method == 'td-lambda':
Ai = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
Bi = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
lam = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
for i in reversed(range(self.steps_num)):
lam = lam * self.lam * (1. - self.done_mask[i]) + self.done_mask[i]
Ai = (1.0 - self.done_mask[i]) * (self.lam * self.gamma * Ai + self.gamma * self.next_values[i] + (1. - lam) / (1. - self.lam) * self.rew_buf[i])
Bi = self.gamma * (self.next_values[i] * self.done_mask[i] + Bi * (1.0 - self.done_mask[i])) + self.rew_buf[i]
self.target_values[i] = (1.0 - self.lam) * Ai + lam * Bi
else:
raise NotImplementedError
def compute_critic_loss(self, batch_sample):
predicted_values = self.critic(batch_sample['obs']).squeeze(-1)
target_values = batch_sample['target_values']
critic_loss = ((predicted_values - target_values) ** 2).mean()
return critic_loss
def initialize_env(self):
self.env.clear_grad()
self.env.reset()
@torch.no_grad()
def run(self, num_games):
mean_policy_loss, mean_policy_discounted_loss, mean_episode_length = self.evaluate_policy(num_games = num_games, deterministic = not self.stochastic_evaluation)
print_info('mean episode loss = {}, mean discounted loss = {}, mean episode length = {}'.format(mean_policy_loss, mean_policy_discounted_loss, mean_episode_length))
def train(self):
self.start_time = time.time()
# add timers
self.time_report.add_timer("algorithm")
self.time_report.add_timer("compute actor loss")
self.time_report.add_timer("forward simulation")
self.time_report.add_timer("backward simulation")
self.time_report.add_timer("prepare critic dataset")
self.time_report.add_timer("actor training")
self.time_report.add_timer("critic training")
self.time_report.start_timer("algorithm")
# initializations
self.initialize_env()
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
def actor_closure():
self.actor_optimizer.zero_grad()
self.time_report.start_timer("compute actor loss")
self.time_report.start_timer("forward simulation")
actor_loss = self.compute_actor_loss()
self.time_report.end_timer("forward simulation")
self.time_report.start_timer("backward simulation")
actor_loss.backward()
self.time_report.end_timer("backward simulation")
with torch.no_grad():
self.grad_norm_before_clip = tu.grad_norm(self.actor.parameters())
if self.truncate_grad:
clip_grad_norm_(self.actor.parameters(), self.grad_norm)
self.grad_norm_after_clip = tu.grad_norm(self.actor.parameters())
# sanity check
if torch.isnan(self.grad_norm_before_clip) or self.grad_norm_before_clip > 1000000.:
print('NaN gradient')
raise ValueError
self.time_report.end_timer("compute actor loss")
return actor_loss
# main training process
for epoch in range(self.max_epochs):
time_start_epoch = time.time()
# learning rate schedule
if self.lr_schedule == 'linear':
actor_lr = (1e-5 - self.actor_lr) * float(epoch / self.max_epochs) + self.actor_lr
for param_group in self.actor_optimizer.param_groups:
param_group['lr'] = actor_lr
lr = actor_lr
critic_lr = (1e-5 - self.critic_lr) * float(epoch / self.max_epochs) + self.critic_lr
for param_group in self.critic_optimizer.param_groups:
param_group['lr'] = critic_lr
else:
lr = self.actor_lr
# train actor
self.time_report.start_timer("actor training")
self.actor_optimizer.step(actor_closure).detach().item()
self.time_report.end_timer("actor training")
# train critic
# prepare dataset
self.time_report.start_timer("prepare critic dataset")
with torch.no_grad():
self.compute_target_values()
dataset = CriticDataset(self.batch_size, self.obs_buf, self.target_values, drop_last = False)
self.time_report.end_timer("prepare critic dataset")
self.time_report.start_timer("critic training")
self.value_loss = 0.
for j in range(self.critic_iterations):
total_critic_loss = 0.
batch_cnt = 0
for i in range(len(dataset)):
batch_sample = dataset[i]
self.critic_optimizer.zero_grad()
training_critic_loss = self.compute_critic_loss(batch_sample)
training_critic_loss.backward()
# ugly fix for simulation nan problem
for params in self.critic.parameters():
params.grad.nan_to_num_(0.0, 0.0, 0.0)
if self.truncate_grad:
clip_grad_norm_(self.critic.parameters(), self.grad_norm)
self.critic_optimizer.step()
total_critic_loss += training_critic_loss
batch_cnt += 1
self.value_loss = (total_critic_loss / batch_cnt).detach().cpu().item()
print('value iter {}/{}, loss = {:7.6f}'.format(j + 1, self.critic_iterations, self.value_loss), end='\r')
self.time_report.end_timer("critic training")
self.iter_count += 1
time_end_epoch = time.time()
# logging
time_elapse = time.time() - self.start_time
self.writer.add_scalar('lr/iter', lr, self.iter_count)
self.writer.add_scalar('actor_loss/step', self.actor_loss, self.step_count)
self.writer.add_scalar('actor_loss/iter', self.actor_loss, self.iter_count)
self.writer.add_scalar('value_loss/step', self.value_loss, self.step_count)
self.writer.add_scalar('value_loss/iter', self.value_loss, self.iter_count)
if len(self.episode_loss_his) > 0:
mean_episode_length = self.episode_length_meter.get_mean()
mean_policy_loss = self.episode_loss_meter.get_mean()
mean_policy_discounted_loss = self.episode_discounted_loss_meter.get_mean()
if mean_policy_loss < self.best_policy_loss:
print_info("save best policy with loss {:.2f}".format(mean_policy_loss))
self.save()
self.best_policy_loss = mean_policy_loss
self.writer.add_scalar('policy_loss/step', mean_policy_loss, self.step_count)
self.writer.add_scalar('policy_loss/time', mean_policy_loss, time_elapse)
self.writer.add_scalar('policy_loss/iter', mean_policy_loss, self.iter_count)
self.writer.add_scalar('rewards/step', -mean_policy_loss, self.step_count)
self.writer.add_scalar('rewards/time', -mean_policy_loss, time_elapse)
self.writer.add_scalar('rewards/iter', -mean_policy_loss, self.iter_count)
self.writer.add_scalar('policy_discounted_loss/step', mean_policy_discounted_loss, self.step_count)
self.writer.add_scalar('policy_discounted_loss/iter', mean_policy_discounted_loss, self.iter_count)
self.writer.add_scalar('best_policy_loss/step', self.best_policy_loss, self.step_count)
self.writer.add_scalar('best_policy_loss/iter', self.best_policy_loss, self.iter_count)
self.writer.add_scalar('episode_lengths/iter', mean_episode_length, self.iter_count)
self.writer.add_scalar('episode_lengths/step', mean_episode_length, self.step_count)
self.writer.add_scalar('episode_lengths/time', mean_episode_length, time_elapse)
else:
mean_policy_loss = np.inf
mean_policy_discounted_loss = np.inf
mean_episode_length = 0
print('iter {}: ep loss {:.2f}, ep discounted loss {:.2f}, ep len {:.1f}, fps total {:.2f}, value loss {:.2f}, grad norm before clip {:.2f}, grad norm after clip {:.2f}'.format(\
self.iter_count, mean_policy_loss, mean_policy_discounted_loss, mean_episode_length, self.steps_num * self.num_envs / (time_end_epoch - time_start_epoch), self.value_loss, self.grad_norm_before_clip, self.grad_norm_after_clip))
self.writer.flush()
if self.save_interval > 0 and (self.iter_count % self.save_interval == 0):
self.save(self.name + "policy_iter{}_reward{:.3f}".format(self.iter_count, -mean_policy_loss))
# update target critic
with torch.no_grad():
alpha = self.target_critic_alpha
for param, param_targ in zip(self.critic.parameters(), self.target_critic.parameters()):
param_targ.data.mul_(alpha)
param_targ.data.add_((1. - alpha) * param.data)
self.time_report.end_timer("algorithm")
self.time_report.report()
self.save('final_policy')
# save reward/length history
self.episode_loss_his = np.array(self.episode_loss_his)
self.episode_discounted_loss_his = np.array(self.episode_discounted_loss_his)
self.episode_length_his = np.array(self.episode_length_his)
np.save(open(os.path.join(self.log_dir, 'episode_loss_his.npy'), 'wb'), self.episode_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_discounted_loss_his.npy'), 'wb'), self.episode_discounted_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_length_his.npy'), 'wb'), self.episode_length_his)
# evaluate the final policy's performance
self.run(self.num_envs)
self.close()
def play(self, cfg):
self.load(cfg['params']['general']['checkpoint'])
self.run(cfg['params']['config']['player']['games_num'])
def save(self, filename = None):
if filename is None:
filename = 'best_policy'
torch.save([self.actor, self.critic, self.target_critic, self.obs_rms, self.ret_rms], os.path.join(self.log_dir, "{}.pt".format(filename)))
def load(self, path):
checkpoint = torch.load(path)
self.actor = checkpoint[0].to(self.device)
self.critic = checkpoint[1].to(self.device)
self.target_critic = checkpoint[2].to(self.device)
self.obs_rms = checkpoint[3].to(self.device)
self.ret_rms = checkpoint[4].to(self.device) if checkpoint[4] is not None else checkpoint[4]
def close(self):
self.writer.close()
| 28,641 | Python | 48.553633 | 247 | 0.576726 |
vstrozzi/FRL-SHAC-Extension/algorithms/bptt.py | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
from torch.nn.utils.clip_grad import clip_grad_norm_
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import numpy as np
import copy
import torch
from tensorboardX import SummaryWriter
import yaml
import dflex as df
import envs
import models.actor
from optim.gd import GD
from utils.common import *
import utils.torch_utils as tu
from utils.time_report import TimeReport
from utils.average_meter import AverageMeter
from utils.running_mean_std import RunningMeanStd
class BPTT:
def __init__(self, cfg):
env_fn = getattr(envs, cfg["params"]["diff_env"]["name"])
seeding(cfg["params"]["general"]["seed"])
self.env = env_fn(num_envs = cfg["params"]["config"]["num_actors"], \
device = cfg["params"]["general"]["device"], \
render = cfg["params"]["general"]["render"], \
seed = cfg["params"]["general"]["seed"], \
episode_length=cfg["params"]["diff_env"].get("episode_length", 250), \
stochastic_init = cfg["params"]["diff_env"].get("stochastic_env", False), \
MM_caching_frequency = cfg["params"]['diff_env'].get('MM_caching_frequency', 1), \
no_grad = False)
print('num_envs = ', self.env.num_envs)
print('num_actions = ', self.env.num_actions)
print('num_obs = ', self.env.num_obs)
self.num_envs = self.env.num_envs
self.num_obs = self.env.num_obs
self.num_actions = self.env.num_actions
self.max_episode_length = self.env.episode_length
self.device = cfg["params"]["general"]["device"]
self.gamma = cfg['params']['config'].get('gamma', 0.99)
self.steps_num = cfg["params"]["config"]["steps_num"]
self.max_epochs = cfg["params"]["config"]["max_epochs"]
self.actor_lr = float(cfg["params"]["config"]["actor_learning_rate"])
self.lr_schedule = cfg['params']['config'].get('lr_schedule', 'linear')
self.obs_rms = None
if cfg['params']['config'].get('obs_rms', False):
self.obs_rms = RunningMeanStd(shape = (self.num_obs), device = self.device)
self.rew_scale = cfg['params']['config'].get('rew_scale', 1.0)
self.name = cfg['params']['config'].get('name', "Ant")
self.truncate_grad = cfg["params"]["config"]["truncate_grads"]
self.grad_norm = cfg["params"]["config"]["grad_norm"]
if cfg['params']['general']['train']:
self.log_dir = cfg["params"]["general"]["logdir"]
os.makedirs(self.log_dir, exist_ok = True)
# save config
save_cfg = copy.deepcopy(cfg)
if 'general' in save_cfg['params']:
deleted_keys = []
for key in save_cfg['params']['general'].keys():
if key in save_cfg['params']['config']:
deleted_keys.append(key)
for key in deleted_keys:
del save_cfg['params']['general'][key]
yaml.dump(save_cfg, open(os.path.join(self.log_dir, 'cfg.yaml'), 'w'))
self.writer = SummaryWriter(os.path.join(self.log_dir, 'log'))
# save interval
self.save_interval = cfg["params"]["config"].get("save_interval", 500)
# stochastic inference
self.stochastic_evaluation = True
else:
self.stochastic_evaluation = not (cfg['params']['config']['player'].get('determenistic', False) or cfg['params']['config']['player'].get('deterministic', False))
self.steps_num = self.env.episode_length
# create actor critic network
self.algo = cfg["params"]["algo"]['name'] # choices: ['gd', 'adam', 'SGD']
self.actor_name = cfg["params"]["network"].get("actor", 'ActorStochasticMLP') # choices: ['ActorDeterministicMLP', 'ActorStochasticMLP']
actor_fn = getattr(models.actor, self.actor_name)
self.actor = actor_fn(self.num_obs, self.num_actions, cfg['params']['network'], device = self.device)
if cfg['params']['general']['train']:
self.save('init_policy')
# initialize optimizer
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), betas = cfg['params']['config']['betas'], lr = self.actor_lr)
# counting variables
self.iter_count = 0
self.step_count = 0
# loss variables
self.episode_length_his = []
self.episode_loss_his = []
self.episode_discounted_loss_his = []
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
self.best_policy_loss = np.inf
self.actor_loss = np.inf
# average meter
self.episode_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_discounted_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_length_meter = AverageMeter(1, 100).to(self.device)
# timer
self.time_report = TimeReport()
def compute_actor_loss(self, deterministic = False):
rew_acc = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
actor_loss = torch.tensor(0., dtype = torch.float32, device = self.device)
with torch.no_grad():
if self.obs_rms is not None:
obs_rms = copy.deepcopy(self.obs_rms)
obs = self.env.initialize_trajectory()
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
for i in range(self.steps_num):
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, extra_info = self.env.step(torch.tanh(actions))
with torch.no_grad():
raw_rew = rew.clone()
# scale the reward
rew = rew * self.rew_scale
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
self.episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
# JIE
rew_acc[i + 1, :] = rew_acc[i, :] + gamma * rew
if i < self.steps_num - 1:
actor_loss = actor_loss + (- rew_acc[i + 1, done_env_ids]).sum()
else:
# terminate all envs at the end of optimization iteration
actor_loss = actor_loss + (- rew_acc[i + 1, :]).sum()
# compute gamma for next step
gamma = gamma * self.gamma
# clear up gamma and rew_acc for done envs
gamma[done_env_ids] = 1.
rew_acc[i + 1, done_env_ids] = 0.
# collect episode loss
with torch.no_grad():
self.episode_loss -= raw_rew
self.episode_discounted_loss -= self.episode_gamma * raw_rew
self.episode_gamma *= self.gamma
if len(done_env_ids) > 0:
self.episode_loss_meter.update(self.episode_loss[done_env_ids])
self.episode_discounted_loss_meter.update(self.episode_discounted_loss[done_env_ids])
self.episode_length_meter.update(self.episode_length[done_env_ids])
for done_env_id in done_env_ids:
if (self.episode_loss[done_env_id] > 1e6 or self.episode_loss[done_env_id] < -1e6):
print('ep loss error')
import IPython
IPython.embed()
self.episode_loss_his.append(self.episode_loss[done_env_id].item())
self.episode_discounted_loss_his.append(self.episode_discounted_loss[done_env_id].item())
self.episode_length_his.append(self.episode_length[done_env_id].item())
self.episode_loss[done_env_id] = 0.
self.episode_discounted_loss[done_env_id] = 0.
self.episode_length[done_env_id] = 0
self.episode_gamma[done_env_id] = 1.
actor_loss /= self.steps_num * self.num_envs
self.actor_loss = actor_loss.detach().cpu().item()
self.step_count += self.steps_num * self.num_envs
return actor_loss
@torch.no_grad()
def evaluate_policy(self, num_games, deterministic = False):
episode_length_his = []
episode_loss_his = []
episode_discounted_loss_his = []
episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
episode_length = torch.zeros(self.num_envs, dtype = int)
episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
obs = self.env.reset()
games_cnt = 0
while games_cnt < num_games:
if self.obs_rms is not None:
obs = self.obs_rms.normalize(obs)
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, _ = self.env.step(torch.tanh(actions))
episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
episode_loss -= rew
episode_discounted_loss -= episode_gamma * rew
episode_gamma *= self.gamma
if len(done_env_ids) > 0:
for done_env_id in done_env_ids:
print('loss = {:.2f}, len = {}'.format(episode_loss[done_env_id].item(), episode_length[done_env_id]))
episode_loss_his.append(episode_loss[done_env_id].item())
episode_discounted_loss_his.append(episode_discounted_loss[done_env_id].item())
episode_length_his.append(episode_length[done_env_id].item())
episode_loss[done_env_id] = 0.
episode_discounted_loss[done_env_id] = 0.
episode_length[done_env_id] = 0
episode_gamma[done_env_id] = 1.
games_cnt += 1
mean_episode_length = np.mean(np.array(episode_length_his))
mean_policy_loss = np.mean(np.array(episode_loss_his))
mean_policy_discounted_loss = np.mean(np.array(episode_discounted_loss_his))
return mean_policy_loss, mean_policy_discounted_loss, mean_episode_length
def initialize_env(self):
self.env.clear_grad()
self.env.reset()
@torch.no_grad()
def run(self, num_games):
mean_policy_loss, mean_policy_discounted_loss, mean_episode_length = self.evaluate_policy(num_games = num_games, deterministic = not self.stochastic_evaluation)
print_info('mean episode loss = {}, mean discounted loss = {}, mean episode length = {}'.format(mean_policy_loss, mean_policy_discounted_loss, mean_episode_length))
def train(self):
self.start_time = time.time()
# timers
self.time_report.add_timer("algorithm")
self.time_report.add_timer("compute actor loss")
self.time_report.add_timer("forward simulation")
self.time_report.add_timer("backward simulation")
self.time_report.add_timer("actor training")
self.time_report.start_timer("algorithm")
self.initialize_env()
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
def actor_closure():
self.actor_optimizer.zero_grad()
self.time_report.start_timer("compute actor loss")
self.time_report.start_timer("forward simulation")
actor_loss = self.compute_actor_loss()
self.time_report.end_timer("forward simulation")
self.time_report.start_timer("backward simulation")
actor_loss.backward()
self.time_report.end_timer("backward simulation")
with torch.no_grad():
self.grad_norm_before_clip = tu.grad_norm(self.actor.parameters())
if self.truncate_grad:
clip_grad_norm_(self.actor.parameters(), self.grad_norm)
self.grad_norm_after_clip = tu.grad_norm(self.actor.parameters())
if torch.isnan(self.grad_norm_before_clip):
# JIE
print('here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NaN gradient')
import IPython
IPython.embed()
for params in self.actor.parameters():
params.grad.zero_()
if torch.isnan(self.grad_norm_before_clip) or self.grad_norm_before_clip > 1000000.:
self.save("nan_policy")
self.time_report.end_timer("compute actor loss")
return actor_loss
for epoch in range(self.max_epochs):
time_start_epoch = time.time()
if self.lr_schedule == 'linear':
actor_lr = (1e-5 - self.actor_lr) * float(epoch / self.max_epochs) + self.actor_lr
for param_group in self.actor_optimizer.param_groups:
param_group['lr'] = actor_lr
lr = actor_lr
else:
lr = self.actor_lr
# train actor
self.time_report.start_timer("actor training")
self.actor_optimizer.step(actor_closure).detach().item()
self.time_report.end_timer("actor training")
self.iter_count += 1
time_end_epoch = time.time()
# logging
time_elapse = time.time() - self.start_time
self.writer.add_scalar('lr/iter', lr, self.iter_count)
self.writer.add_scalar('actor_loss/step', self.actor_loss, self.step_count)
self.writer.add_scalar('actor_loss/iter', self.actor_loss, self.iter_count)
if len(self.episode_loss_his) > 0:
mean_episode_length = self.episode_length_meter.get_mean()
mean_policy_loss = self.episode_loss_meter.get_mean()
mean_policy_discounted_loss = self.episode_discounted_loss_meter.get_mean()
if mean_policy_loss < self.best_policy_loss:
print_info("save best policy with loss {:.2f}".format(mean_policy_loss))
self.save()
self.best_policy_loss = mean_policy_loss
# self.save("latest_policy")
self.writer.add_scalar('policy_loss/step', mean_policy_loss, self.step_count)
self.writer.add_scalar('policy_loss/time', mean_policy_loss, time_elapse)
self.writer.add_scalar('policy_loss/iter', mean_policy_loss, self.iter_count)
self.writer.add_scalar('rewards/step', -mean_policy_loss, self.step_count)
self.writer.add_scalar('rewards/time', -mean_policy_loss, time_elapse)
self.writer.add_scalar('rewards/iter', -mean_policy_loss, self.iter_count)
self.writer.add_scalar('policy_discounted_loss/step', mean_policy_discounted_loss, self.step_count)
self.writer.add_scalar('policy_discounted_loss/iter', mean_policy_discounted_loss, self.iter_count)
self.writer.add_scalar('best_policy_loss/step', self.best_policy_loss, self.step_count)
self.writer.add_scalar('best_policy_loss/iter', self.best_policy_loss, self.iter_count)
self.writer.add_scalar('episode_lengths/iter', mean_episode_length, self.iter_count)
self.writer.add_scalar('episode_lengths/step', mean_episode_length, self.step_count)
self.writer.add_scalar('episode_lengths/time', mean_episode_length, time_elapse)
else:
mean_policy_loss = np.inf
mean_policy_discounted_loss = np.inf
mean_episode_length = 0
print('iter {}: ep loss {:.2f}, ep discounted loss {:.2f}, ep len {:.1f}, fps total {:.2f}, grad norm before clip {:.2f}, grad norm after clip {:.2f}'.format(\
self.iter_count, mean_policy_loss, mean_policy_discounted_loss, mean_episode_length, self.steps_num * self.num_envs / (time_end_epoch - time_start_epoch), self.grad_norm_before_clip, self.grad_norm_after_clip))
self.writer.flush()
if self.save_interval > 0 and (self.iter_count % self.save_interval == 0):
self.save(self.name + "policy_iter{}_reward{:.3f}".format(self.iter_count, -mean_policy_loss))
self.time_report.end_timer("algorithm")
self.time_report.report()
self.save('final_policy')
# save reward/length history
self.episode_loss_his = np.array(self.episode_loss_his)
self.episode_discounted_loss_his = np.array(self.episode_discounted_loss_his)
self.episode_length_his = np.array(self.episode_length_his)
np.save(open(os.path.join(self.log_dir, 'episode_loss_his.npy'), 'wb'), self.episode_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_discounted_loss_his.npy'), 'wb'), self.episode_discounted_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_length_his.npy'), 'wb'), self.episode_length_his)
# evaluate the final policy's performance
self.run(self.num_envs)
self.close()
def play(self, cfg):
self.load(cfg['params']['general']['checkpoint'])
self.run(cfg['params']['config']['player']['games_num'])
def save(self, filename = None):
if filename is None:
filename = 'best_policy'
torch.save([self.actor, self.obs_rms], os.path.join(self.log_dir, "{}.pt".format(filename)))
def load(self, path):
checkpoint = torch.load(path)
self.actor = checkpoint[0].to(self.device)
self.obs_rms = checkpoint[1].to(self.device)
def close(self):
self.writer.close()
| 19,636 | Python | 45.313679 | 230 | 0.577205 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/setup.py | """Setup script for rl_games"""
import sys
import os
import pathlib
from setuptools import setup, find_packages
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
print(find_packages())
setup(name='rl-games',
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/Denys88/rl_games",
packages = ['.','rl_games','docs'],
package_data={'rl_games':['*'],'docs':['*'],},
version='1.1.0',
author='Denys Makoviichuk, Viktor Makoviichuk',
author_email='[email protected], [email protected]',
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
#packages=["rlg"],
include_package_data=True,
install_requires=[
# this setup is only for pytorch
#
'gym>=0.17.2',
'numpy>=1.16.0',
'tensorboard>=1.14.0',
'tensorboardX>=1.6',
'setproctitle',
'psutil',
'pyyaml'
],
)
| 1,300 | Python | 27.91111 | 70 | 0.559231 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/runner.py | import numpy as np
import argparse, copy, os, yaml
import ray, signal
os.environ["XLA_PYTHON_CLIENT_PREALLOCATE"] = "false"
#import warnings
#warnings.filterwarnings("error")
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-tf", "--tf", required=False, help="run tensorflow runner", action='store_true')
ap.add_argument("-t", "--train", required=False, help="train network", action='store_true')
ap.add_argument("-p", "--play", required=False, help="play(test) network", action='store_true')
ap.add_argument("-c", "--checkpoint", required=False, help="path to checkpoint")
ap.add_argument("-f", "--file", required=True, help="path to config")
ap.add_argument("-na", "--num_actors", type=int, default=0, required=False,
help="number of envs running in parallel, if larger than 0 will overwrite the value in yaml config")
os.makedirs("nn", exist_ok=True)
os.makedirs("runs", exist_ok=True)
args = vars(ap.parse_args())
config_name = args['file']
print('Loading config: ', config_name)
with open(config_name, 'r') as stream:
config = yaml.safe_load(stream)
if args['num_actors'] > 0:
config['params']['config']['num_actors'] = args['num_actors']
if args['tf']:
from rl_games.tf14_runner import Runner
else:
from rl_games.torch_runner import Runner
ray.init(object_store_memory=1024*1024*1000)
#signal.signal(signal.SIGINT, exit_gracefully)
runner = Runner()
try:
runner.load(config)
except yaml.YAMLError as exc:
print(exc)
runner.reset()
runner.run(args)
ray.shutdown()
| 1,739 | Python | 33.799999 | 120 | 0.615296 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/README.md | # RL Games: High performance RL library
## Papers and related links
* Isaac Gym: High Performance GPU-Based Physics Simulation For Robot Learning: https://arxiv.org/abs/2108.10470
* Transferring Dexterous Manipulation from GPU Simulation to a Remote Real-World TriFinger: https://s2r2-ig.github.io/ https://arxiv.org/abs/2108.09779
* Is Independent Learning All You Need in the StarCraft Multi-Agent Challenge? <https://arxiv.org/abs/2011.09533>
## Some results on interesting environments
* [NVIDIA Isaac Gym](docs/ISAAC_GYM.md)




* [Starcraft 2 Multi Agents](docs/SMAC.md)
* [BRAX](docs/BRAX.md)
* [Old TF1.x results](docs/BRAX.md)
## Config file
* [Configuration](docs/CONFIG_PARAMS.md)
Implemented in Pytorch:
* PPO with the support of asymmetric actor-critic variant
* Support of end-to-end GPU accelerated training pipeline with Isaac Gym and Brax
* Masked actions support
* Multi-agent training, decentralized and centralized critic variants
* Self-play
Implemented in Tensorflow 1.x (not updates now):
* Rainbow DQN
* A2C
* PPO
# Installation
For maximum training performance a preliminary installation of Pytorch 1.9+ with CUDA 11.1 is highly recommended:
```conda install pytorch torchvision cudatoolkit=11.1 -c pytorch -c nvidia``` or:
```pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.htm```
Then:
```pip install rl-games```
# Training
**NVIDIA Isaac Gym**
Download and follow the installation instructions from https://developer.nvidia.com/isaac-gym
Run from ```python/rlgpu``` directory:
Ant
```python rlg_train.py --task Ant --headless```
```python rlg_train.py --task Ant --play --checkpoint nn/Ant.pth --num_envs 100```
Humanoid
```python rlg_train.py --task Humanoid --headless```
```python rlg_train.py --task Humanoid --play --checkpoint nn/Humanoid.pth --num_envs 100```
Shadow Hand block orientation task
```python rlg_train.py --task ShadowHand --headless```
```python rlg_train.py --task ShadowHand --play --checkpoint nn/ShadowHand.pth --num_envs 100```
**Atari Pong**
```python runner.py --train --file rl_games/configs/atari/ppo_pong.yaml```
```python runner.py --play --file rl_games/configs/atari/ppo_pong.yaml --checkpoint nn/PongNoFrameskip.pth```
**Brax Ant**
```python runner.py --train --file rl_games/configs/brax/ppo_ant.yaml```
```python runner.py --play --file rl_games/configs/atari/ppo_ant.yaml --checkpoint nn/Ant_brax.pth```
# Release Notes
1.1.0
* Added to pypi: ```pip install rl-games```
* Added reporting env (sim) step fps, without policy inference. Improved naming.
* Renames in yaml config for better readability: steps_num to horizon_length amd lr_threshold to kl_threshold
# Troubleshouting
* Some of the supported envs are not installed with setup.py, you need to manually install them
* Starting from rl-games 1.1.0 old yaml configs won't be compatible with the new version:
* ```steps_num``` should be changed to ```horizon_length``` amd ```lr_threshold``` to ```kl_threshold```
| 3,558 | Markdown | 35.690721 | 151 | 0.737493 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/tests/simple_test.py | import pytest
def test_true():
assert True | 48 | Python | 8.799998 | 16 | 0.6875 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/docs/SMAC.md | ## Starcraft 2 Multiple Agents Results
* Starcraft 2 Multiple Agents Results with PPO (https://github.com/oxwhirl/smac)
* Every agent was controlled independently and has restricted information
* All the environments were trained with a default difficulty level 7
* No curriculum, just baseline PPO
* Full state information wasn't used for critic, actor and critic recieved the same agent observations
* Most results are significantly better by win rate and were trained on a single PC much faster than QMIX (https://arxiv.org/pdf/1902.04043.pdf), MAVEN (https://arxiv.org/pdf/1910.07483.pdf) or QTRAN
* No hyperparameter search
* 4 frames + conv1d actor-critic network
* Miniepoch num was set to 1, higher numbers didn't work
* Simple MLP networks didnot work good on hard envs
[](https://www.youtube.com/watch?v=F_IfFz-s-iQ)
# How to run configs:
# Pytorch
* ```python runner.py --train --file rl_games/configs/smac/3m_torch.yaml```
* ```python runner.py --play --file rl_games/configs/smac/3m_torch.yaml --checkpoint 'nn/3m_cnn'```
# Tensorflow
* ```python runner.py --tf --train --file rl_games/configs/smac/3m_torch.yaml```
* ```python runner.py --tf --play --file rl_games/configs/smac/3m_torch.yaml --checkpoint 'nn/3m_cnn'```
* ```tensorboard --logdir runs```
# Results on some environments:
* 2m_vs_1z took near 2 minutes to achive 100% WR
* corridor took near 2 hours for 95+% WR
* MMM2 4 hours for 90+% WR
* 6h_vs_8z got 82% WR after 8 hours of training
* 5m_vs_6m got 72% WR after 8 hours of training
# Plots:
FPS in these plots is calculated on per env basis except MMM2 (it was scaled by number of agents which is 10), to get a win rate per number of environmental steps info, the same as used in plots in QMIX, MAVEN, QTRAN or Deep Coordination Graphs (https://arxiv.org/pdf/1910.00091.pdf) papers FPS numbers under the horizontal axis should be devided by number of agents in player's team.
* 2m_vs_1z:

* 3s5z_vs_3s6z:

* 3s_vs_5z:

* corridor:

* 5m_vs_6m:

* MMM2:

| 2,266 | Markdown | 48.282608 | 384 | 0.735658 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/docs/OTHER.md | ## Old Tensorflow results
* Double dueling DQN vs DQN with the same parameters

Near 90 minutes to learn with this setup.
* Different DQN Configurations tests
Light grey is noisy 1-step dddqn.
Noisy 3-step dddqn was even faster.
Best network (configuration 5) needs near 20 minutes to learn, on NVIDIA 1080.
Currently the best setup for pong is noisy 3-step double dueling network.
In pong_runs.py different experiments could be found.
Less then 200k frames to take score > 18.

DQN has more optimistic Q value estimations.
# Other Games Results
This results are not stable. Just best games, for good average results you need to train network more then 10 million steps.
Some games need 50m steps.
* 5 million frames two step noisy double dueling dqn:
[](https://youtu.be/Lu9Cm9K_6ms)
* Random lucky game in Space Invaders after less then one hour learning:
[](https://www.youtube.com/watch?v=LO0RL437rh4)
# A2C and PPO Results
* More than 2 hours for Pong to achieve 20 score with one actor playing.
* 8 Hours for Supermario lvl1
[](https://www.youtube.com/watch?v=T9ujS3HIvMY)
* PPO with LSTM layers
[](https://www.youtube.com/watch?v=fjY4AWbmhHg)
 | 1,627 | Markdown | 36.860464 | 124 | 0.75968 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/docs/BRAX.md | # Brax (https://github.com/google/brax)
## How to run:
* **Ant** ```python runner.py --train --file rl_games/configs/brax/ppo_ant.yaml```
* **Humanoid** ```python runner.py --train --file rl_games/configs/brax/ppo_humanoid.yaml```
## Visualization:
* run **brax_visualization.ipynb**
## Results:
* **Ant** fps step: 1692066.6 fps total: 885603.1

* **Humanoid** fps step: 1244450.3 fps total: 661064.5

* **ur5e** fps step: 1116872.3 fps total: 627117.0


 | 672 | Markdown | 34.421051 | 92 | 0.671131 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/docs/ISAAC_GYM.md | ## Isaac Gym Results
https://developer.nvidia.com/isaac-gym
Coming.
| 69 | Markdown | 12.999997 | 38 | 0.753623 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/docs/CONFIG_PARAMS.md | # Yaml Config Description
Coming.
| 37 | Markdown | 8.499998 | 27 | 0.72973 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/torch_runner.py | import numpy as np
import copy
import torch
import yaml
from rl_games import envs
from rl_games.common import object_factory
from rl_games.common import env_configurations
from rl_games.common import experiment
from rl_games.common import tr_helpers
from rl_games.algos_torch import network_builder
from rl_games.algos_torch import model_builder
from rl_games.algos_torch import a2c_continuous
from rl_games.algos_torch import a2c_discrete
from rl_games.algos_torch import players
from rl_games.common.algo_observer import DefaultAlgoObserver
from rl_games.algos_torch import sac_agent
class Runner:
def __init__(self, algo_observer=None):
self.algo_factory = object_factory.ObjectFactory()
self.algo_factory.register_builder('a2c_continuous', lambda **kwargs : a2c_continuous.A2CAgent(**kwargs))
self.algo_factory.register_builder('a2c_discrete', lambda **kwargs : a2c_discrete.DiscreteA2CAgent(**kwargs))
self.algo_factory.register_builder('sac', lambda **kwargs: sac_agent.SACAgent(**kwargs))
#self.algo_factory.register_builder('dqn', lambda **kwargs : dqnagent.DQNAgent(**kwargs))
self.player_factory = object_factory.ObjectFactory()
self.player_factory.register_builder('a2c_continuous', lambda **kwargs : players.PpoPlayerContinuous(**kwargs))
self.player_factory.register_builder('a2c_discrete', lambda **kwargs : players.PpoPlayerDiscrete(**kwargs))
self.player_factory.register_builder('sac', lambda **kwargs : players.SACPlayer(**kwargs))
#self.player_factory.register_builder('dqn', lambda **kwargs : players.DQNPlayer(**kwargs))
self.model_builder = model_builder.ModelBuilder()
self.network_builder = network_builder.NetworkBuilder()
self.algo_observer = algo_observer
torch.backends.cudnn.benchmark = True
def reset(self):
pass
def load_config(self, params):
self.seed = params.get('seed', None)
self.algo_params = params['algo']
self.algo_name = self.algo_params['name']
self.load_check_point = params['load_checkpoint']
self.exp_config = None
if self.seed:
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
np.random.seed(self.seed)
if self.load_check_point:
print('Found checkpoint')
print(params['load_path'])
self.load_path = params['load_path']
self.model = self.model_builder.load(params)
self.config = copy.deepcopy(params['config'])
self.config['reward_shaper'] = tr_helpers.DefaultRewardsShaper(**self.config['reward_shaper'])
self.config['network'] = self.model
self.config['logdir'] = params['general'].get('logdir', './')
has_rnd_net = self.config.get('rnd_config', None) != None
if has_rnd_net:
print('Adding RND Network')
network = self.model_builder.network_factory.create(params['config']['rnd_config']['network']['name'])
network.load(params['config']['rnd_config']['network'])
self.config['rnd_config']['network'] = network
has_central_value_net = self.config.get('central_value_config', None) != None
if has_central_value_net:
print('Adding Central Value Network')
network = self.model_builder.network_factory.create(params['config']['central_value_config']['network']['name'])
network.load(params['config']['central_value_config']['network'])
self.config['central_value_config']['network'] = network
def load(self, yaml_conf):
self.default_config = yaml_conf['params']
self.load_config(copy.deepcopy(self.default_config))
if 'experiment_config' in yaml_conf:
self.exp_config = yaml_conf['experiment_config']
def get_prebuilt_config(self):
return self.config
def run_train(self):
print('Started to train')
if self.algo_observer is None:
self.algo_observer = DefaultAlgoObserver()
if self.exp_config:
self.experiment = experiment.Experiment(self.default_config, self.exp_config)
exp_num = 0
exp = self.experiment.get_next_config()
while exp is not None:
exp_num += 1
print('Starting experiment number: ' + str(exp_num))
self.reset()
self.load_config(exp)
if 'features' not in self.config:
self.config['features'] = {}
self.config['features']['observer'] = self.algo_observer
#if 'soft_augmentation' in self.config['features']:
# self.config['features']['soft_augmentation'] = SoftAugmentation(**self.config['features']['soft_augmentation'])
agent = self.algo_factory.create(self.algo_name, base_name='run', config=self.config)
self.experiment.set_results(*agent.train())
exp = self.experiment.get_next_config()
else:
self.reset()
self.load_config(self.default_config)
if 'features' not in self.config:
self.config['features'] = {}
self.config['features']['observer'] = self.algo_observer
#if 'soft_augmentation' in self.config['features']:
# self.config['features']['soft_augmentation'] = SoftAugmentation(**self.config['features']['soft_augmentation'])
agent = self.algo_factory.create(self.algo_name, base_name='run', config=self.config)
if self.load_check_point and (self.load_path is not None):
agent.restore(self.load_path)
agent.train()
def create_player(self):
return self.player_factory.create(self.algo_name, config=self.config)
def create_agent(self, obs_space, action_space):
return self.algo_factory.create(self.algo_name, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
def run(self, args):
if 'checkpoint' in args and args['checkpoint'] is not None:
if len(args['checkpoint']) > 0:
self.load_path = args['checkpoint']
if args['train']:
self.run_train()
elif args['play']:
print('Started to play')
player = self.create_player()
player.restore(self.load_path)
player.run()
else:
self.run_train() | 6,556 | Python | 43.304054 | 148 | 0.624619 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/tf14_runner.py | import tensorflow as tf
import numpy as np
import yaml
import ray
import copy
from rl_games.common import object_factory
from rl_games.common import env_configurations
from rl_games.common import experiment
from rl_games.common import tr_helpers
from rl_games.algos_tf14 import network_builder
from rl_games.algos_tf14 import model_builder
from rl_games.algos_tf14 import a2c_continuous
from rl_games.algos_tf14 import a2c_discrete
from rl_games.algos_tf14 import dqnagent
from rl_games.algos_tf14 import players
class Runner:
def __init__(self):
self.algo_factory = object_factory.ObjectFactory()
self.algo_factory.register_builder('a2c_continuous', lambda **kwargs : a2c_continuous.A2CAgent(**kwargs))
self.algo_factory.register_builder('a2c_discrete', lambda **kwargs : a2c_discrete.A2CAgent(**kwargs))
self.algo_factory.register_builder('dqn', lambda **kwargs : dqnagent.DQNAgent(**kwargs))
self.player_factory = object_factory.ObjectFactory()
self.player_factory.register_builder('a2c_continuous', lambda **kwargs : players.PpoPlayerContinuous(**kwargs))
self.player_factory.register_builder('a2c_discrete', lambda **kwargs : players.PpoPlayerDiscrete(**kwargs))
self.player_factory.register_builder('dqn', lambda **kwargs : players.DQNPlayer(**kwargs))
self.model_builder = model_builder.ModelBuilder()
self.network_builder = network_builder.NetworkBuilder()
self.sess = None
def reset(self):
gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction=0.8)
config = tf.ConfigProto(gpu_options=gpu_options)
tf.reset_default_graph()
if self.sess:
self.sess.close()
self.sess = tf.InteractiveSession(config=config)
def load_config(self, params):
self.seed = params.get('seed', None)
self.algo_params = params['algo']
self.algo_name = self.algo_params['name']
self.load_check_point = params['load_checkpoint']
self.exp_config = None
if self.seed:
tf.set_random_seed(self.seed)
np.random.seed(self.seed)
if self.load_check_point:
self.load_path = params['load_path']
self.model = self.model_builder.load(params)
self.config = copy.deepcopy(params['config'])
self.config['reward_shaper'] = tr_helpers.DefaultRewardsShaper(**self.config['reward_shaper'], is_torch=False)
self.config['network'] = self.model
def load(self, yaml_conf):
self.default_config = yaml_conf['params']
self.load_config(copy.deepcopy(self.default_config))
if 'experiment_config' in yaml_conf:
self.exp_config = yaml_conf['experiment_config']
def get_prebuilt_config(self):
return self.config
def run_train(self):
print('Started to train')
ray.init(object_store_memory=1024*1024*1000)
shapes = env_configurations.get_obs_and_action_spaces_from_config(self.config)
obs_space = shapes['observation_space']
action_space = shapes['action_space']
print('obs_space:', obs_space)
print('action_space:', action_space)
if self.exp_config:
self.experiment = experiment.Experiment(self.default_config, self.exp_config)
exp_num = 0
exp = self.experiment.get_next_config()
while exp is not None:
exp_num += 1
print('Starting experiment number: ' + str(exp_num))
self.reset()
self.load_config(exp)
agent = self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
self.experiment.set_results(*agent.train())
exp = self.experiment.get_next_config()
else:
self.reset()
self.load_config(self.default_config)
agent = self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
if self.load_check_point or (self.load_path is not None):
agent.restore(self.load_path)
agent.train()
def create_player(self):
return self.player_factory.create(self.algo_name, sess=self.sess, config=self.config)
def create_agent(self, obs_space, action_space):
return self.algo_factory.create(self.algo_name, sess=self.sess, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
def run(self, args):
if 'checkpoint' in args:
self.load_path = args['checkpoint']
if args['train']:
self.run_train()
elif args['play']:
print('Started to play')
player = self.player_factory.create(self.algo_name, sess=self.sess, config=self.config)
player.restore(self.load_path)
player.run()
ray.shutdown()
| 5,099 | Python | 39.8 | 175 | 0.643656 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/test_network.py | import torch
from torch import nn
import torch.nn.functional as F
class TestNet(nn.Module):
def __init__(self, params, **kwargs):
nn.Module.__init__(self)
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
num_inputs = 0
assert(type(input_shape) is dict)
for k,v in input_shape.items():
num_inputs +=v[0]
self.central_value = params.get('central_value', False)
self.value_size = kwargs.pop('value_size', 1)
self.linear1 = nn.Linear(num_inputs, 256)
self.linear2 = nn.Linear(256, 128)
self.linear3 = nn.Linear(128, 64)
self.mean_linear = nn.Linear(64, actions_num)
self.value_linear = nn.Linear(64, 1)
def is_rnn(self):
return False
def forward(self, obs):
obs = obs['obs']
obs = torch.cat([obs['pos'], obs['info']], axis=-1)
x = F.relu(self.linear1(obs))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
action = self.mean_linear(x)
value = self.value_linear(x)
if self.central_value:
return value, None
return action, value, None
from rl_games.algos_torch.network_builder import NetworkBuilder
class TestNetBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
def build(self, name, **kwargs):
return TestNet(self.params, **kwargs)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
| 1,596 | Python | 28.036363 | 63 | 0.589599 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/smac_env.py | import gym
import numpy as np
from smac.env import StarCraft2Env
class SMACEnv(gym.Env):
def __init__(self, name="3m", **kwargs):
gym.Env.__init__(self)
self.seed = kwargs.pop('seed', None)
self.reward_sparse = kwargs.get('reward_sparse', False)
self.use_central_value = kwargs.pop('central_value', False)
self.random_invalid_step = kwargs.pop('random_invalid_step', False)
self.replay_save_freq = kwargs.pop('replay_save_freq', 10000)
self.apply_agent_ids = kwargs.pop('apply_agent_ids', False)
self.env = StarCraft2Env(map_name=name, seed=self.seed, **kwargs)
self.env_info = self.env.get_env_info()
self._game_num = 0
self.n_actions = self.env_info["n_actions"]
self.n_agents = self.env_info["n_agents"]
self.action_space = gym.spaces.Discrete(self.n_actions)
one_hot_agents = 0
if self.apply_agent_ids:
one_hot_agents = self.n_agents
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(self.env_info['obs_shape']+one_hot_agents, ), dtype=np.float32)
self.state_space = gym.spaces.Box(low=0, high=1, shape=(self.env_info['state_shape'], ), dtype=np.float32)
self.obs_dict = {}
def _preproc_state_obs(self, state, obs):
# todo: remove from self
if self.apply_agent_ids:
num_agents = self.n_agents
obs = np.array(obs)
all_ids = np.eye(num_agents, dtype=np.float32)
obs = np.concatenate([obs, all_ids], axis=-1)
self.obs_dict["obs"] = np.array(obs)
self.obs_dict["state"] = np.array(state)
if self.use_central_value:
return self.obs_dict
else:
return self.obs_dict["obs"]
def get_number_of_agents(self):
return self.n_agents
def reset(self):
if self._game_num % self.replay_save_freq == 1:
print('saving replay')
self.env.save_replay()
self._game_num += 1
obs, state = self.env.reset() # rename, to think remove
obs_dict = self._preproc_state_obs(state, obs)
return obs_dict
def _preproc_actions(self, actions):
actions = actions.copy()
rewards = np.zeros_like(actions)
mask = self.get_action_mask()
for ind, action in enumerate(actions, start=0):
avail_actions = np.nonzero(mask[ind])[0]
if action not in avail_actions:
actions[ind] = np.random.choice(avail_actions)
#rewards[ind] = -0.05
return actions, rewards
def step(self, actions):
fixed_rewards = None
if self.random_invalid_step:
actions, fixed_rewards = self._preproc_actions(actions)
reward, done, info = self.env.step(actions)
if done:
battle_won = info.get('battle_won', False)
if not battle_won and self.reward_sparse:
reward = -1.0
obs = self.env.get_obs()
state = self.env.get_state()
obses = self._preproc_state_obs(state, obs)
rewards = np.repeat (reward, self.n_agents)
dones = np.repeat (done, self.n_agents)
if fixed_rewards is not None:
rewards += fixed_rewards
return obses, rewards, dones, info
def get_action_mask(self):
return np.array(self.env.get_avail_actions(), dtype=np.bool)
def has_action_mask(self):
return not self.random_invalid_step
| 3,500 | Python | 34.01 | 133 | 0.587714 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/connect4_selfplay.py | import gym
import numpy as np
from pettingzoo.classic import connect_four_v0
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
class ConnectFourSelfPlay(gym.Env):
def __init__(self, name="connect_four_v0", **kwargs):
gym.Env.__init__(self)
self.name = name
self.is_determenistic = kwargs.pop('is_determenistic', False)
self.is_human = kwargs.pop('is_human', False)
self.random_agent = kwargs.pop('random_agent', False)
self.config_path = kwargs.pop('config_path')
self.agent = None
self.env = connect_four_v0.env()#gym.make(name, **kwargs)
self.action_space = self.env.action_spaces['player_0']
observation_space = self.env.observation_spaces['player_0']
shp = observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(shp[:-1] + (shp[-1] * 2,)), dtype=np.uint8)
self.obs_deque = deque([], maxlen=2)
self.agent_id = 0
def _get_legal_moves(self, agent_id):
name = 'player_0' if agent_id == 0 else 'player_1'
action_ids = self.env.infos[name]['legal_moves']
mask = np.zeros(self.action_space.n, dtype = np.bool)
mask[action_ids] = True
return mask, action_ids
def env_step(self, action):
obs = self.env.step(action)
info = {}
name = 'player_0' if self.agent_id == 0 else 'player_1'
reward = self.env.rewards[name]
done = self.env.dones[name]
return obs, reward, done, info
def get_obs(self):
return np.concatenate(self.obs_deque,-1).astype(np.uint8) * 255
def reset(self):
if self.agent == None:
self.create_agent(self.config_path)
self.agent_id = np.random.randint(2)
obs = self.env.reset()
self.obs_deque.append(obs)
self.obs_deque.append(obs)
if self.agent_id == 1:
op_obs = self.get_obs()
op_obs = self.agent.obs_to_torch(op_obs)
mask, ids = self._get_legal_moves(0)
if self.is_human:
self.render()
opponent_action = int(input())
else:
if self.random_agent:
opponent_action = np.random.choice(ids, 1)[0]
else:
opponent_action = self.agent.get_masked_action(op_obs, mask, self.is_determenistic).item()
obs, _, _, _ = self.env_step(opponent_action)
self.obs_deque.append(obs)
return self.get_obs()
def create_agent(self, config):
with open(config, 'r') as stream:
config = yaml.safe_load(stream)
runner = Runner()
runner.load(config)
config = runner.get_prebuilt_config()
#'RAYLIB has bug here, CUDA_VISIBLE_DEVICES become unset'
if 'CUDA_VISIBLE_DEVICES' in os.environ:
os.environ.pop('CUDA_VISIBLE_DEVICES')
self.agent = runner.create_player()
self.agent.model.eval()
def step(self, action):
obs, reward, done, info = self.env_step(action)
self.obs_deque.append(obs)
if done:
if reward == 1:
info['battle_won'] = 1
else:
info['battle_won'] = 0
return self.get_obs(), reward, done, info
op_obs = self.get_obs()
op_obs = self.agent.obs_to_torch(op_obs)
mask, ids = self._get_legal_moves(1-self.agent_id)
if self.is_human:
self.render()
opponent_action = int(input())
else:
if self.random_agent:
opponent_action = np.random.choice(ids, 1)[0]
else:
opponent_action = self.agent.get_masked_action(op_obs, mask, self.is_determenistic).item()
obs, reward, done,_ = self.env_step(opponent_action)
if done:
if reward == -1:
info['battle_won'] = 0
else:
info['battle_won'] = 1
self.obs_deque.append(obs)
return self.get_obs(), reward, done, info
def render(self, mode='ansi'):
self.env.render(mode)
def update_weights(self, weigths):
self.agent.set_weights(weigths)
def get_action_mask(self):
mask, _ = self._get_legal_moves(self.agent_id)
return mask
def has_action_mask(self):
return True | 4,505 | Python | 33.396946 | 113 | 0.552719 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/__init__.py |
from rl_games.envs.connect4_network import ConnectBuilder
from rl_games.envs.test_network import TestNetBuilder
from rl_games.algos_torch import model_builder
model_builder.register_network('connect4net', ConnectBuilder)
model_builder.register_network('testnet', TestNetBuilder) | 282 | Python | 30.444441 | 61 | 0.833333 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/connect4_network.py | import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self):
super(ConvBlock, self).__init__()
self.action_size = 7
self.conv1 = nn.Conv2d(4, 128, 3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(128)
def forward(self, s):
s = s['obs'].contiguous()
#s = s.view(-1, 3, 6, 7) # batch_size x channels x board_x x board_y
s = F.relu(self.bn1(self.conv1(s)))
return s
class ResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
residual = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += residual
out = F.relu(out)
return out
class OutBlock(nn.Module):
def __init__(self):
super(OutBlock, self).__init__()
self.conv = nn.Conv2d(128, 3, kernel_size=1) # value head
self.bn = nn.BatchNorm2d(3)
self.fc1 = nn.Linear(3*6*7, 32)
self.fc2 = nn.Linear(32, 1)
self.conv1 = nn.Conv2d(128, 32, kernel_size=1) # policy head
self.bn1 = nn.BatchNorm2d(32)
self.fc = nn.Linear(6*7*32, 7)
def forward(self,s):
v = F.relu(self.bn(self.conv(s))) # value head
v = v.view(-1, 3*6*7) # batch_size X channel X height X width
v = F.relu(self.fc1(v))
v = F.relu(self.fc2(v))
v = torch.tanh(v)
p = F.relu(self.bn1(self.conv1(s))) # policy head
p = p.view(-1, 6*7*32)
p = self.fc(p)
return p, v, None
class ConnectNet(nn.Module):
def __init__(self, blocks):
super(ConnectNet, self).__init__()
self.blocks = blocks
self.conv = ConvBlock()
for block in range(self.blocks):
setattr(self, "res_%i" % block,ResBlock())
self.outblock = OutBlock()
def is_rnn(self):
return False
def forward(self,s):
s = s.permute((0, 3, 1, 2))
s = self.conv(s)
for block in range(self.blocks):
s = getattr(self, "res_%i" % block)(s)
s = self.outblock(s)
return s
from rl_games.algos_torch.network_builder import NetworkBuilder
class ConnectBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
self.blocks = params['blocks']
def build(self, name, **kwargs):
return ConnectNet(self.blocks)
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
| 2,992 | Python | 28.93 | 78 | 0.558489 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/brax.py |
from rl_games.common.ivecenv import IVecEnv
import gym
import numpy as np
import torch
import torch.utils.dlpack as tpack
def jax_to_torch(tensor):
from jax._src.dlpack import (to_dlpack,)
tensor = to_dlpack(tensor)
tensor = tpack.from_dlpack(tensor)
return tensor
def torch_to_jax(tensor):
from jax._src.dlpack import (from_dlpack,)
tensor = tpack.to_dlpack(tensor)
tensor = from_dlpack(tensor)
return tensor
class BraxEnv(IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
import brax
from brax import envs
import jax
import jax.numpy as jnp
self.batch_size = num_actors
env_fn = envs.create_fn(env_name=kwargs.pop('env_name', 'ant'))
self.env = env_fn(
action_repeat=1,
batch_size=num_actors,
episode_length=kwargs.pop('episode_length', 1000))
obs_high = np.inf * np.ones(self.env.observation_size)
self.observation_space = gym.spaces.Box(-obs_high, obs_high, dtype=np.float32)
action_high = np.ones(self.env.action_size)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
def step(first_state, state, action):
def test_done(a, b):
if a is first_state.done or a is first_state.metrics or a is first_state.reward:
return b
test_shape = [a.shape[0],] + [1 for _ in range(len(a.shape) - 1)]
return jnp.where(jnp.reshape(state.done, test_shape), a, b)
state = self.env.step(state, action)
state = jax.tree_multimap(test_done, first_state, state)
return state, state.obs, state.reward, state.done, {}
def reset(key):
state = self.env.reset(key)
return state, state.obs
self._reset = jax.jit(reset, backend='gpu')
self._step = jax.jit(step, backend='gpu')
def step(self, action):
action = torch_to_jax(action)
self.state, next_obs, reward, is_done, info = self._step(self.first_state, self.state, action)
#next_obs = np.asarray(next_obs).astype(np.float32)
#reward = np.asarray(reward).astype(np.float32)
#is_done = np.asarray(is_done).astype(np.long)
next_obs = jax_to_torch(next_obs)
reward = jax_to_torch(reward)
is_done = jax_to_torch(is_done)
return next_obs, reward, is_done, info
def reset(self):
import jax
import jax.numpy as jnp
rng = jax.random.PRNGKey(seed=0)
rng = jax.random.split(rng, self.batch_size)
self.first_state, _ = self._reset(rng)
self.state, obs = self._reset(rng)
#obs = np.asarray(obs).astype(np.float32)
return jax_to_torch(obs)
def get_number_of_agents(self):
return 1
def get_env_info(self):
info = {}
info['action_space'] = self.action_space
info['observation_space'] = self.observation_space
return info
def create_brax_env(**kwargs):
return BraxEnv("", kwargs.pop('num_actors', 256), **kwargs)
| 3,131 | Python | 32.677419 | 102 | 0.600767 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/multiwalker.py | import gym
import numpy as np
from pettingzoo.sisl import multiwalker_v6
import yaml
from rl_games.torch_runner import Runner
import os
from collections import deque
import rl_games.envs.connect4_network
class MultiWalker(gym.Env):
def __init__(self, name="multiwalker", **kwargs):
gym.Env.__init__(self)
self.name = name
self.env = multiwalker_v6.parallel_env()
self.use_central_value = kwargs.pop('central_value', False)
self.use_prev_actions = kwargs.pop('use_prev_actions', False)
self.apply_agent_ids = kwargs.pop('apply_agent_ids', False)
self.add_timeouts = kwargs.pop('add_timeouts', False)
self.action_space = self.env.action_spaces['walker_0']
self.steps_count = 0
obs_len = self.env.observation_spaces['walker_0'].shape[0]
add_obs = 0
if self.apply_agent_ids:
add_obs = 3
if self.use_prev_actions:
obs_len += self.action_space.shape[0]
self.observation_space = gym.spaces.Box(-1, 1, shape =(obs_len + add_obs,))
if self.use_central_value:
self.state_space = gym.spaces.Box(-1, 1, shape =(obs_len*3,))
def step(self, action):
self.steps_count += 1
actions = {'walker_0' : action[0], 'walker_1' : action[1], 'walker_2' : action[2],}
obs, reward, done, info = self.env.step(actions)
if self.use_prev_actions:
obs = {
k: np.concatenate([v, actions[k]]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
rewards = np.stack([reward['walker_0'], reward['walker_1'], reward['walker_2']])
dones = np.stack([done['walker_0'], done['walker_1'], done['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses, rewards, dones, info
def reset(self):
obs = self.env.reset()
self.steps_count = 0
if self.use_prev_actions:
zero_actions = np.zeros(self.action_space.shape[0])
obs = {
k: np.concatenate([v, zero_actions]) for k,v in obs.items()
}
obses = np.stack([obs['walker_0'], obs['walker_1'], obs['walker_2']])
if self.apply_agent_ids:
num_agents = 3
all_ids = np.eye(num_agents, dtype=np.float32)
obses = np.concatenate([obses, all_ids], axis=-1)
if self.use_central_value:
states = np.concatenate([obs['walker_0'], obs['walker_1'], obs['walker_2']])
obses = {
'obs' : obses,
'state': states
}
return obses
def render(self, mode='ansi'):
self.env.render(mode)
def get_number_of_agents(self):
return 3
def has_action_mask(self):
return False | 3,195 | Python | 37.047619 | 91 | 0.554617 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/slimevolley_selfplay.py | import gym
import numpy as np
import slimevolleygym
import yaml
from rl_games.torch_runner import Runner
import os
class SlimeVolleySelfplay(gym.Env):
def __init__(self, name="SlimeVolleyDiscrete-v0", **kwargs):
gym.Env.__init__(self)
self.name = name
self.is_determenistic = kwargs.pop('is_determenistic', False)
self.config_path = kwargs.pop('config_path')
self.agent = None
self.pos_scale = 1
self.neg_scale = kwargs.pop('neg_scale', 1)
self.sum_rewards = 0
self.env = gym.make(name, **kwargs)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
def reset(self):
if self.agent == None:
self.create_agent(self.config_path)
obs = self.env.reset()
self.opponent_obs = obs
self.sum_rewards = 0
return obs
def create_agent(self, config='rl_games/configs/ma/ppo_slime_self_play.yaml'):
with open(config, 'r') as stream:
config = yaml.safe_load(stream)
runner = Runner()
from rl_games.common.env_configurations import get_env_info
config['params']['config']['env_info'] = get_env_info(self)
runner.load(config)
config = runner.get_prebuilt_config()
'RAYLIB has bug here, CUDA_VISIBLE_DEVICES become unset'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
self.agent = runner.create_player()
def step(self, action):
op_obs = self.agent.obs_to_torch(self.opponent_obs)
opponent_action = self.agent.get_action(op_obs, self.is_determenistic).item()
obs, reward, done, info = self.env.step(action, opponent_action)
self.sum_rewards += reward
if reward < 0:
reward = reward * self.neg_scale
self.opponent_obs = info['otherObs']
if done:
info['battle_won'] = np.sign(self.sum_rewards)
return obs, reward, done, info
def render(self,mode):
self.env.render(mode)
def update_weights(self, weigths):
self.agent.set_weights(weigths)
| 2,148 | Python | 32.578124 | 85 | 0.607542 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/test/__init__.py | import gym
gym.envs.register(
id='TestRnnEnv-v0',
entry_point='rl_games.envs.test.rnn_env:TestRNNEnv',
max_episode_steps=100500,
)
gym.envs.register(
id='TestAsymmetricEnv-v0',
entry_point='rl_games.envs.test.test_asymmetric_env:TestAsymmetricCritic'
) | 279 | Python | 22.333331 | 78 | 0.709677 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/test/rnn_env.py | import gym
import numpy as np
class TestRNNEnv(gym.Env):
def __init__(self, **kwargs):
gym.Env.__init__(self)
self.obs_dict = {}
self.max_steps = kwargs.pop('max_steps', 21)
self.show_time = kwargs.pop('show_time', 1)
self.min_dist = kwargs.pop('min_dist', 2)
self.max_dist = kwargs.pop('max_dist', 8)
self.hide_object = kwargs.pop('hide_object', False)
self.use_central_value = kwargs.pop('use_central_value', False)
self.apply_dist_reward = kwargs.pop('apply_dist_reward', False)
self.apply_exploration_reward = kwargs.pop('apply_exploration_reward', False)
self.multi_head_value = kwargs.pop('multi_head_value', False)
if self.multi_head_value:
self.value_size = 2
else:
self.value_size = 1
self.multi_discrete_space = kwargs.pop('multi_discrete_space', False)
if self.multi_discrete_space:
self.action_space = gym.spaces.Tuple([gym.spaces.Discrete(2),gym.spaces.Discrete(3)])
else:
self.action_space = gym.spaces.Discrete(4)
self.multi_obs_space = kwargs.pop('multi_obs_space', False)
if self.multi_obs_space:
spaces = {
'pos': gym.spaces.Box(low=0, high=1, shape=(2, ), dtype=np.float32),
'info': gym.spaces.Box(low=0, high=1, shape=(4, ), dtype=np.float32),
}
self.observation_space = gym.spaces.Dict(spaces)
else:
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(6, ), dtype=np.float32)
self.state_space = self.observation_space
if self.apply_exploration_reward:
pass
self.reset()
def get_number_of_agents(self):
return 1
def reset(self):
self._curr_steps = 0
self._current_pos = [0,0]
bound = self.max_dist - self.min_dist
rand_dir = - 2 * np.random.randint(0, 2, (2,)) + 1
self._goal_pos = rand_dir * np.random.randint(self.min_dist, self.max_dist+1, (2,))
obs = np.concatenate([self._current_pos, self._goal_pos, [1, 0]], axis=None)
obs = obs.astype(np.float32)
if self.multi_obs_space:
obs = {
'pos': obs[:2],
'info': obs[2:]
}
if self.use_central_value:
obses = {}
obses["obs"] = obs
obses["state"] = obs
else:
obses = obs
return obses
def step_categorical(self, action):
if self._curr_steps > 1:
if action == 0:
self._current_pos[0] += 1
if action == 1:
self._current_pos[0] -= 1
if action == 2:
self._current_pos[1] += 1
if action == 3:
self._current_pos[1] -= 1
def step_multi_categorical(self, action):
if self._curr_steps > 1:
if action[0] == 0:
self._current_pos[0] += 1
if action[0] == 1:
self._current_pos[0] -= 1
if action[1] == 0:
self._current_pos[1] += 1
if action[1] == 1:
self._current_pos[1] -= 1
if action[1] == 2:
pass
def step(self, action):
info = {}
self._curr_steps += 1
if self.multi_discrete_space:
self.step_multi_categorical(action)
else:
self.step_categorical(action)
reward = [0.0, 0.0]
done = False
dist = self._current_pos - self._goal_pos
if (dist**2).sum() < 0.0001:
reward[0] = 1.0
info = {'scores' : 1}
done = True
elif self._curr_steps == self.max_steps:
info = {'scores' : 0}
done = True
dist_coef = -0.1
if self.apply_dist_reward:
reward[1] = dist_coef * np.abs(dist).sum() / self.max_dist
show_object = 0
if self.hide_object:
obs = np.concatenate([self._current_pos, [0,0], [show_object, self._curr_steps]], axis=None)
else:
show_object = 1
obs = np.concatenate([self._current_pos, self._goal_pos, [show_object, self._curr_steps]], axis=None)
obs = obs.astype(np.float32)
#state = state.astype(np.float32)
if self.multi_obs_space:
obs = {
'pos': obs[:2],
'info': obs[2:]
}
if self.use_central_value:
state = np.concatenate([self._current_pos, self._goal_pos, [show_object, self._curr_steps]], axis=None)
obses = {}
obses["obs"] = obs
if self.multi_obs_space:
obses["state"] = {
'pos': state[:2],
'info': state[2:]
}
else:
obses["state"] = state.astype(np.float32)
else:
obses = obs
if self.multi_head_value:
pass
else:
reward = reward[0] + reward[1]
return obses, np.array(reward).astype(np.float32), done, info
def has_action_mask(self):
return False | 5,217 | Python | 34.020134 | 115 | 0.500096 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/test/test_asymmetric_env.py | import gym
import numpy as np
from rl_games.common.wrappers import MaskVelocityWrapper
class TestAsymmetricCritic(gym.Env):
def __init__(self, wrapped_env_name, **kwargs):
gym.Env.__init__(self)
self.apply_mask = kwargs.pop('apply_mask', True)
self.use_central_value = kwargs.pop('use_central_value', True)
self.env = gym.make(wrapped_env_name)
if self.apply_mask:
if wrapped_env_name not in ["CartPole-v1", "Pendulum-v0", "LunarLander-v2", "LunarLanderContinuous-v2"]:
raise 'unsupported env'
self.mask = MaskVelocityWrapper(self.env, wrapped_env_name).mask
else:
self.mask = 1
self.n_agents = 1
self.use_central_value = True
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.state_space = self.env.observation_space
def get_number_of_agents(self):
return self.n_agents
def reset(self):
obs = self.env.reset()
obs_dict = {}
obs_dict["obs"] = obs * self.mask
obs_dict["state"] = obs
if self.use_central_value:
obses = obs_dict
else:
obses = obs_dict["obs"].astype(np.float32)
return obses
def step(self, actions):
obs, rewards, dones, info = self.env.step(actions)
obs_dict = {}
obs_dict["obs"] = obs * self.mask
obs_dict["state"] = obs
if self.use_central_value:
obses = obs_dict
else:
obses = obs_dict["obs"].astype(np.float32)
return obses, rewards, dones, info
def has_action_mask(self):
return False
| 1,715 | Python | 31.377358 | 116 | 0.580758 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/envs/diambra/diambra.py | import gym
import numpy as np
import os
import random
from diambra_environment.diambraGym import diambraGym
from diambra_environment.makeDiambraEnv import make_diambra_env
class DiambraEnv(gym.Env):
def __init__(self, **kwargs):
gym.Env.__init__(self)
self.seed = kwargs.pop('seed', None)
self.difficulty = kwargs.pop('difficulty', 3)
self.env_path = kwargs.pop('env_path', "/home/trrrrr/Documents/github/ml/diambra/DIAMBRAenvironment-main")
self.character = kwargs.pop('character', 'Raidou')
self.frame_stack = kwargs.pop('frame_stack', 3)
self.attacks_buttons = kwargs.pop('attacks_buttons', False)
self._game_num = 0
self.n_agents = 1
self.rank = random.randint(0, 100500)
repo_base_path = os.path.abspath(self.env_path) # Absolute path to your DIAMBRA environment
env_kwargs = {}
env_kwargs["gameId"] = "doapp"
env_kwargs["roms_path"] = os.path.join(repo_base_path, "roms/") # Absolute path to roms
env_kwargs["mame_diambra_step_ratio"] = 6
env_kwargs["render"] = False
env_kwargs["lock_fps"] = False # Locks to 60 FPS
env_kwargs["sound"] = env_kwargs["lock_fps"] and env_kwargs["render"]
env_kwargs["player"] = "Random"
env_kwargs["difficulty"] = self.difficulty
env_kwargs["characters"] = [[self.character, "Random"], [self.character, "Random"]]
env_kwargs["charOutfits"] = [2, 2]
gym_kwargs = {}
gym_kwargs["P2brain"] = None
gym_kwargs["continue_game"] = 0.0
gym_kwargs["show_final"] = False
gym_kwargs["gamePads"] = [None, None]
gym_kwargs["actionSpace"] = ["discrete", "multiDiscrete"]
#gym_kwargs["attackButCombinations"] = [False, False]
gym_kwargs["attackButCombinations"] = [self.attacks_buttons, self.attacks_buttons]
gym_kwargs["actBufLen"] = 12
wrapper_kwargs = {}
wrapper_kwargs["hwc_obs_resize"] = [128, 128, 1]
wrapper_kwargs["normalize_rewards"] = True
wrapper_kwargs["clip_rewards"] = False
wrapper_kwargs["frame_stack"] = self.frame_stack
wrapper_kwargs["dilation"] = 1
wrapper_kwargs["scale"] = True
wrapper_kwargs["scale_mod"] = 0
key_to_add = []
key_to_add.append("actionsBuf")
key_to_add.append("ownHealth")
key_to_add.append("oppHealth")
key_to_add.append("ownPosition")
key_to_add.append("oppPosition")
key_to_add.append("stage")
key_to_add.append("character")
self.env = make_diambra_env(diambraGym, env_prefix="Train" + str(self.rank), seed= self.rank,
diambra_kwargs=env_kwargs,
diambra_gym_kwargs=gym_kwargs,
wrapper_kwargs=wrapper_kwargs,
key_to_add=key_to_add)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
def _preproc_state_obs(self, obs):
return obs
def reset(self):
self._game_num += 1
obs = self.env.reset() # rename, to think remove
obs_dict = self._preproc_state_obs(obs)
return obs_dict
def step(self, actions):
obs, reward, done, info = self.env.step(actions)
return obs, reward, done, info
def has_action_mask(self):
return False | 3,496 | Python | 38.292134 | 114 | 0.588673 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/torch_ext.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.optimizer import Optimizer
numpy_to_torch_dtype_dict = {
np.dtype('bool') : torch.bool,
np.dtype('uint8') : torch.uint8,
np.dtype('int8') : torch.int8,
np.dtype('int16') : torch.int16,
np.dtype('int32') : torch.int32,
np.dtype('int64') : torch.int64,
np.dtype('float16') : torch.float16,
np.dtype('float32') : torch.float32,
np.dtype('float64') : torch.float64,
np.dtype('complex64') : torch.complex64,
np.dtype('complex128') : torch.complex128,
}
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma, reduce=True):
c1 = torch.log(p1_sigma/p0_sigma + 1e-5)
c2 = (p0_sigma**2 + (p1_mu - p0_mu)**2)/(2.0 * (p1_sigma**2 + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = kl.sum(dim=-1) # returning mean between all steps of sum between all actions
if reduce:
return kl.mean()
else:
return kl
def mean_mask(input, mask, sum_mask):
return (input * rnn_masks).sum() / sum_mask
def shape_whc_to_cwh(shape):
#if len(shape) == 2:
# return (shape[1], shape[0])
if len(shape) == 3:
return (shape[2], shape[0], shape[1])
return shape
def safe_filesystem_op(func, *args, **kwargs):
"""
This is to prevent spurious crashes related to saving checkpoints or restoring from checkpoints in a Network
Filesystem environment (i.e. NGC cloud or SLURM)
"""
num_attempts = 5
for attempt in range(num_attempts):
try:
return func(*args, **kwargs)
except Exception as exc:
print(f'Exception {exc} when trying to execute {func} with args:{args} and kwargs:{kwargs}...')
wait_sec = 2 ** attempt
print(f'Waiting {wait_sec} before trying again...')
time.sleep(wait_sec)
raise RuntimeError(f'Could not execute {func}, give up after {num_attempts} attempts...')
def safe_save(state, filename):
return safe_filesystem_op(torch.save, state, filename)
def safe_load(filename):
return safe_filesystem_op(torch.load, filename)
def save_checkpoint(filename, state):
print("=> saving checkpoint '{}'".format(filename + '.pth'))
safe_save(state, filename + '.pth')
def load_checkpoint(filename):
print("=> loading checkpoint '{}'".format(filename))
state = safe_load(filename)
return state
def parameterized_truncated_normal(uniform, mu, sigma, a, b):
normal = torch.distributions.normal.Normal(0, 1)
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
alpha_normal_cdf = normal.cdf(torch.from_numpy(np.array(alpha)))
p = alpha_normal_cdf + (normal.cdf(torch.from_numpy(np.array(beta))) - alpha_normal_cdf) * uniform
p = p.numpy()
one = np.array(1, dtype=p.dtype)
epsilon = np.array(np.finfo(p.dtype).eps, dtype=p.dtype)
v = np.clip(2 * p - 1, -one + epsilon, one - epsilon)
x = mu + sigma * np.sqrt(2) * torch.erfinv(torch.from_numpy(v))
x = torch.clamp(x, a, b)
return x
def truncated_normal(uniform, mu=0.0, sigma=1.0, a=-2, b=2):
return parameterized_truncated_normal(uniform, mu, sigma, a, b)
def sample_truncated_normal(shape=(), mu=0.0, sigma=1.0, a=-2, b=2):
return truncated_normal(torch.from_numpy(np.random.uniform(0, 1, shape)), mu, sigma, a, b)
def variance_scaling_initializer(tensor, mode='fan_in',scale = 2.0):
fan = torch.nn.init._calculate_correct_fan(tensor, mode)
print(fan, scale)
sigma = np.sqrt(scale / fan)
with torch.no_grad():
tensor[:] = sample_truncated_normal(tensor.size(), sigma=sigma)
return tensor
def random_sample(obs_batch, prob):
num_batches = obs_batch.size()[0]
permutation = torch.randperm(num_batches, device=obs_batch.device)
start = 0
end = int(prob * num_batches)
indices = permutation[start:end]
return torch.index_select(obs_batch, 0, indices)
def mean_list(val):
return torch.mean(torch.stack(val))
def apply_masks(losses, mask=None):
sum_mask = None
if mask is not None:
mask = mask.unsqueeze(1)
sum_mask = mask.numel()#
#sum_mask = mask.sum()
res_losses = [(l * mask).sum() / sum_mask for l in losses]
else:
res_losses = [torch.mean(l) for l in losses]
return res_losses, sum_mask
def normalization_with_masks(values, masks):
sum_mask = masks.sum()
values_mask = values * masks
values_mean = values_mask.sum() / sum_mask
min_sqr = ((((values_mask)**2)/sum_mask).sum() - ((values_mask/sum_mask).sum())**2)
values_std = torch.sqrt(min_sqr * sum_mask / (sum_mask-1))
normalized_values = (values_mask - values_mean) / (values_std + 1e-8)
return normalized_values
class CoordConv2d(nn.Conv2d):
pool = {}
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels + 2, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
@staticmethod
def get_coord(x):
key = int(x.size(0)), int(x.size(2)), int(x.size(3)), x.type()
if key not in CoordConv2d.pool:
theta = torch.Tensor([[[1, 0, 0], [0, 1, 0]]])
coord = torch.nn.functional.affine_grid(theta, torch.Size([1, 1, x.size(2), x.size(3)])).permute([0, 3, 1, 2]).repeat(
x.size(0), 1, 1, 1).type_as(x)
CoordConv2d.pool[key] = coord
return CoordConv2d.pool[key]
def forward(self, x):
return torch.nn.functional.conv2d(torch.cat([x, self.get_coord(x).type_as(x)], 1), self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class LayerNorm2d(nn.Module):
"""
Layer norm the just works on the channel axis for a Conv2d
Ref:
- code modified from https://github.com/Scitator/Run-Skeleton-Run/blob/master/common/modules/LayerNorm.py
- paper: https://arxiv.org/abs/1607.06450
Usage:
ln = LayerNormConv(3)
x = Variable(torch.rand((1,3,4,2)))
ln(x).size()
"""
def __init__(self, features, eps=1e-6):
super().__init__()
self.register_buffer("gamma", torch.ones(features).unsqueeze(-1).unsqueeze(-1))
self.register_buffer("beta", torch.ones(features).unsqueeze(-1).unsqueeze(-1))
self.eps = eps
self.features = features
def _check_input_dim(self, input):
if input.size(1) != self.gamma.nelement():
raise ValueError('got {}-feature tensor, expected {}'
.format(input.size(1), self.features))
def forward(self, x):
self._check_input_dim(x)
x_flat = x.transpose(1,-1).contiguous().view((-1, x.size(1)))
mean = x_flat.mean(0).unsqueeze(-1).unsqueeze(-1).expand_as(x)
std = x_flat.std(0).unsqueeze(-1).unsqueeze(-1).expand_as(x)
return self.gamma.expand_as(x) * (x - mean) / (std + self.eps) + self.beta.expand_as(x)
class DiscreteActionsEncoder(nn.Module):
def __init__(self, actions_max, mlp_out, emb_size, num_agents, use_embedding):
super().__init__()
self.actions_max = actions_max
self.emb_size = emb_size
self.num_agents = num_agents
self.use_embedding = use_embedding
if use_embedding:
self.embedding = torch.nn.Embedding(actions_max, emb_size)
else:
self.emb_size = actions_max
self.linear = torch.nn.Linear(self.emb_size * num_agents, mlp_out)
def forward(self, discrete_actions):
if self.use_embedding:
emb = self.embedding(discrete_actions)
else:
emb = torch.nn.functional.one_hot(discrete_actions, num_classes=self.actions_max)
emb = emb.view( -1, self.emb_size * self.num_agents).float()
emb = self.linear(emb)
return emb
def get_model_gradients(model):
grad_list = []
for param in model.parameters():
grad_list.append(param.grad)
return grad_list
def get_mean(v):
if len(v) > 0:
mean = np.mean(v)
else:
mean = 0
return mean
class CategoricalMaskedNaive(torch.distributions.Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=None):
self.masks = masks
if self.masks is None:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
inf_mask = torch.log(masks.float())
logits = logits + inf_mask
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def entropy(self):
if self.masks is None:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p[p_log_p != p_log_p] = 0
return -p_log_p.sum(-1)
class CategoricalMasked(torch.distributions.Categorical):
def __init__(self, probs=None, logits=None, validate_args=None, masks=None):
self.masks = masks
if masks is None:
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
else:
self.device = self.masks.device
logits = torch.where(self.masks, logits, torch.tensor(-1e+8).to(self.device))
super(CategoricalMasked, self).__init__(probs, logits, validate_args)
def rsample(self):
u = torch.distributions.Uniform(low=torch.zeros_like(self.logits, device = self.logits.device), high=torch.ones_like(self.logits, device = self.logits.device)).sample()
#print(u.size(), self.logits.size())
rand_logits = self.logits -(-u.log()).log()
return torch.max(rand_logits, axis=-1)[1]
def entropy(self):
if self.masks is None:
return super(CategoricalMasked, self).entropy()
p_log_p = self.logits * self.probs
p_log_p = torch.where(self.masks, p_log_p, torch.tensor(0.0).to(self.device))
return -p_log_p.sum(-1)
class AverageMeter(nn.Module):
def __init__(self, in_shape, max_size):
super(AverageMeter, self).__init__()
self.max_size = max_size
self.current_size = 0
self.register_buffer("mean", torch.zeros(in_shape, dtype = torch.float32))
def update(self, values):
size = values.size()[0]
if size == 0:
return
new_mean = torch.mean(values.float(), dim=0)
size = np.clip(size, 0, self.max_size)
old_size = min(self.max_size - size, self.current_size)
size_sum = old_size + size
self.current_size = size_sum
self.mean = (self.mean * old_size + new_mean * size) / size_sum
def clear(self):
self.current_size = 0
self.mean.fill_(0)
def __len__(self):
return self.current_size
def get_mean(self):
return self.mean.squeeze(0).cpu().numpy()
class IdentityRNN(nn.Module):
def __init__(self, in_shape, out_shape):
super(IdentityRNN, self).__init__()
assert(in_shape == out_shape)
self.identity = torch.nn.Identity()
def forward(self, x, h):
return self.identity(x), h
| 11,332 | Python | 35.092357 | 176 | 0.607395 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/sac_agent.py | from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common import vecenv
from rl_games.common import schedulers
from rl_games.common import experience
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from torch import optim
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import time
import os
class SACAgent:
def __init__(self, base_name, config):
print(config)
# TODO: Get obs shape and self.network
self.base_init(base_name, config)
self.num_seed_steps = config["num_seed_steps"]
self.gamma = config["gamma"]
self.critic_tau = config["critic_tau"]
self.batch_size = config["batch_size"]
self.init_alpha = config["init_alpha"]
self.learnable_temperature = config["learnable_temperature"]
self.replay_buffer_size = config["replay_buffer_size"]
self.num_steps_per_episode = config.get("num_steps_per_episode", 1)
self.normalize_input = config.get("normalize_input", False)
self.max_env_steps = config.get("max_env_steps", 1000) # temporary, in future we will use other approach
print(self.batch_size, self.num_actors, self.num_agents)
self.num_frames_per_epoch = self.num_actors * self.num_steps_per_episode
self.log_alpha = torch.tensor(np.log(self.init_alpha)).float().to(self.sac_device)
self.log_alpha.requires_grad = True
action_space = self.env_info['action_space']
self.actions_num = action_space.shape[0]
self.action_range = [
float(self.env_info['action_space'].low.min()),
float(self.env_info['action_space'].high.max())
]
obs_shape = torch_ext.shape_whc_to_cwh(self.obs_shape)
net_config = {
'obs_dim': self.env_info["observation_space"].shape[0],
'action_dim': self.env_info["action_space"].shape[0],
'actions_num' : self.actions_num,
'input_shape' : obs_shape
}
self.model = self.network.build(net_config)
self.model.to(self.sac_device)
print("Number of Agents", self.num_actors, "Batch Size", self.batch_size)
self.actor_optimizer = torch.optim.Adam(self.model.sac_network.actor.parameters(),
lr=self.config['actor_lr'],
betas=self.config.get("actor_betas", [0.9, 0.999]))
self.critic_optimizer = torch.optim.Adam(self.model.sac_network.critic.parameters(),
lr=self.config["critic_lr"],
betas=self.config.get("critic_betas", [0.9, 0.999]))
self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha],
lr=self.config["alpha_lr"],
betas=self.config.get("alphas_betas", [0.9, 0.999]))
self.replay_buffer = experience.VectorizedReplayBuffer(self.env_info['observation_space'].shape,
self.env_info['action_space'].shape,
self.replay_buffer_size,
self.sac_device)
self.target_entropy_coef = config.get("target_entropy_coef", 0.5)
self.target_entropy = self.target_entropy_coef * -self.env_info['action_space'].shape[0]
print("Target entropy", self.target_entropy)
self.step = 0
self.algo_observer = config['features']['observer']
# TODO: Is there a better way to get the maximum number of episodes?
self.max_episodes = torch.ones(self.num_actors, device=self.sac_device)*self.num_steps_per_episode
# self.episode_lengths = np.zeros(self.num_actors, dtype=int)
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.sac_device)
def base_init(self, base_name, config):
self.config = config
self.env_config = config.get('env_config', {})
self.num_actors = config.get('num_actors', 1)
self.env_name = config['env_name']
print("Env name:", self.env_name)
self.env_info = config.get('env_info')
if self.env_info is None:
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.env_info = self.vec_env.get_env_info()
self.sac_device = config.get('device', 'cuda:0')
#temporary:
self.ppo_device = self.sac_device
print('Env info:')
print(self.env_info)
self.rewards_shaper = config['reward_shaper']
self.observation_space = self.env_info['observation_space']
self.weight_decay = config.get('weight_decay', 0.0)
#self.use_action_masks = config.get('use_action_masks', False)
self.is_train = config.get('is_train', True)
self.c_loss = nn.MSELoss()
# self.c2_loss = nn.SmoothL1Loss()
self.save_best_after = config.get('save_best_after', 500)
self.print_stats = config.get('print_stats', True)
self.rnn_states = None
self.name = base_name
self.max_epochs = self.config.get('max_epochs', 1e6)
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_agents = self.env_info.get('agents', 1)
self.obs_shape = self.observation_space.shape
self.games_to_track = self.config.get('games_to_track', 100)
self.game_rewards = torch_ext.AverageMeter(1, self.games_to_track).to(self.sac_device)
self.game_lengths = torch_ext.AverageMeter(1, self.games_to_track).to(self.sac_device)
self.obs = None
self.min_alpha = torch.tensor(np.log(1)).float().to(self.sac_device)
self.frame = 0
self.update_time = 0
self.last_mean_rewards = -100500
self.play_time = 0
self.epoch_num = 0
# self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
# print("Run Directory:", config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.experiment_dir = config.get('logdir', './')
self.nn_dir = os.path.join(self.experiment_dir, 'nn')
self.summaries_dir = os.path.join(self.experiment_dir, 'runs')
os.makedirs(self.experiment_dir, exist_ok=True)
os.makedirs(self.nn_dir, exist_ok=True)
os.makedirs(self.summaries_dir, exist_ok=True)
self.writer = SummaryWriter(self.summaries_dir)
print("Run Directory:", self.summaries_dir)
self.is_tensor_obses = None
self.is_rnn = False
self.last_rnn_indices = None
self.last_state_indices = None
def init_tensors(self):
if self.observation_space.dtype == np.uint8:
torch_dtype = torch.uint8
else:
torch_dtype = torch.float32
batch_size = self.num_agents * self.num_actors
self.current_rewards = torch.zeros(batch_size, dtype=torch.float32, device=self.sac_device)
self.current_lengths = torch.zeros(batch_size, dtype=torch.long, device=self.sac_device)
self.dones = torch.zeros((batch_size,), dtype=torch.uint8, device=self.sac_device)
@property
def alpha(self):
return self.log_alpha.exp()
@property
def device(self):
return self.sac_device
def get_full_state_weights(self):
state = self.get_weights()
state['steps'] = self.step
state['actor_optimizer'] = self.actor_optimizer.state_dict()
state['critic_optimizer'] = self.critic_optimizer.state_dict()
state['log_alpha_optimizer'] = self.log_alpha_optimizer.state_dict()
return state
def get_weights(self):
state = {'actor': self.model.sac_network.actor.state_dict(),
'critic': self.model.sac_network.critic.state_dict(),
'critic_target': self.model.sac_network.critic_target.state_dict()}
return state
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def set_weights(self, weights):
self.model.sac_network.actor.load_state_dict(weights['actor'])
self.model.sac_network.critic.load_state_dict(weights['critic'])
self.model.sac_network.critic_target.load_state_dict(weights['critic_target'])
if self.normalize_input:
self.running_mean_std.load_state_dict(weights['running_mean_std'])
def set_full_state_weights(self, weights):
self.set_weights(weights)
self.step = weights['step']
self.actor_optimizer.load_state_dict(weights['actor_optimizer'])
self.critic_optimizer.load_state_dict(weights['critic_optimizer'])
self.log_alpha_optimizer.load_state_dict(weights['log_alpha_optimizer'])
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def set_eval(self):
self.model.eval()
if self.normalize_input:
self.running_mean_std.eval()
def set_train(self):
self.model.train()
if self.normalize_input:
self.running_mean_std.train()
def update_critic(self, obs, action, reward, next_obs, not_done,
step):
with torch.no_grad():
dist = self.model.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.model.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1, target_Q2) - self.alpha * log_prob
target_Q = reward + (not_done * self.gamma * target_V)
target_Q = target_Q.detach()
# get current Q estimates
current_Q1, current_Q2 = self.model.critic(obs, action)
critic1_loss = self.c_loss(current_Q1, target_Q)
critic2_loss = self.c_loss(current_Q2, target_Q)
critic_loss = critic1_loss + critic2_loss
self.critic_optimizer.zero_grad(set_to_none=True)
critic_loss.backward()
self.critic_optimizer.step()
return critic_loss.detach(), critic1_loss.detach(), critic2_loss.detach()
def update_actor_and_alpha(self, obs, step):
for p in self.model.sac_network.critic.parameters():
p.requires_grad = False
dist = self.model.actor(obs)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
entropy = dist.entropy().sum(-1, keepdim=True).mean()
actor_Q1, actor_Q2 = self.model.critic(obs, action)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (torch.max(self.alpha.detach(), self.min_alpha) * log_prob - actor_Q)
actor_loss = actor_loss.mean()
self.actor_optimizer.zero_grad(set_to_none=True)
actor_loss.backward()
self.actor_optimizer.step()
for p in self.model.sac_network.critic.parameters():
p.requires_grad = True
if self.learnable_temperature:
alpha_loss = (self.alpha *
(-log_prob - self.target_entropy).detach()).mean()
self.log_alpha_optimizer.zero_grad(set_to_none=True)
alpha_loss.backward()
self.log_alpha_optimizer.step()
else:
alpha_loss = None
return actor_loss.detach(), entropy.detach(), self.alpha.detach(), alpha_loss # TODO: maybe not self.alpha
def soft_update_params(self, net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def update(self, step):
obs, action, reward, next_obs, done = self.replay_buffer.sample(self.batch_size)
not_done = ~done
obs = self.preproc_obs(obs)
next_obs = self.preproc_obs(next_obs)
critic_loss, critic1_loss, critic2_loss = self.update_critic(obs, action, reward, next_obs, not_done, step)
actor_loss, entropy, alpha, alpha_loss = self.update_actor_and_alpha(obs, step)
actor_loss_info = actor_loss, entropy, alpha, alpha_loss
self.soft_update_params(self.model.sac_network.critic, self.model.sac_network.critic_target,
self.critic_tau)
return actor_loss_info, critic1_loss, critic2_loss
def preproc_obs(self, obs):
if isinstance(obs, dict):
obs = obs['obs']
if self.normalize_input:
obs = self.running_mean_std(obs)
return obs
def env_step(self, actions):
obs, rewards, dones, infos = self.vec_env.step(actions) # (obs_space) -> (n, obs_space)
self.step += self.num_actors
if self.is_tensor_obses:
return obs, rewards, dones, infos
else:
return torch.from_numpy(obs).to(self.sac_device), torch.from_numpy(rewards).to(self.sac_device), torch.from_numpy(dones).to(self.sac_device), infos
def env_reset(self):
with torch.no_grad():
obs = self.vec_env.reset()
if self.is_tensor_obses is None:
self.is_tensor_obses = torch.is_tensor(obs)
print("Observations are tensors:", self.is_tensor_obses)
if self.is_tensor_obses:
return obs.to(self.sac_device)
else:
return torch.from_numpy(obs).to(self.sac_device)
def act(self, obs, action_dim, sample=False):
obs = self.preproc_obs(obs)
dist = self.model.actor(obs)
actions = dist.sample() if sample else dist.mean
actions = actions.clamp(*self.action_range)
assert actions.ndim == 2
return actions
def extract_actor_stats(self, actor_losses, entropies, alphas, alpha_losses, actor_loss_info):
actor_loss, entropy, alpha, alpha_loss = actor_loss_info
actor_losses.append(actor_loss)
entropies.append(entropy)
if alpha_losses is not None:
alphas.append(alpha)
alpha_losses.append(alpha_loss)
def play_steps(self, random_exploration=False):
total_time_start = time.time()
total_update_time = 0
total_time = 0
step_time = 0.0
actor_losses = []
entropies = []
alphas = []
alpha_losses = []
critic1_losses = []
critic2_losses = []
obs = self.obs
for _ in range(self.num_steps_per_episode):
self.set_eval()
if random_exploration:
action = torch.rand((self.num_actors, *self.env_info["action_space"].shape), device=self.sac_device) * 2 - 1
else:
with torch.no_grad():
action = self.act(obs.float(), self.env_info["action_space"].shape, sample=True)
step_start = time.time()
with torch.no_grad():
next_obs, rewards, dones, infos = self.env_step(action)
step_end = time.time()
self.current_rewards += rewards
self.current_lengths += 1
total_time += step_end - step_start
step_time += (step_end - step_start)
all_done_indices = dones.nonzero(as_tuple=False)
done_indices = all_done_indices[::self.num_agents]
self.game_rewards.update(self.current_rewards[done_indices])
self.game_lengths.update(self.current_lengths[done_indices])
not_dones = 1.0 - dones.float()
self.algo_observer.process_infos(infos, done_indices)
no_timeouts = self.current_lengths != self.max_env_steps
dones = dones * no_timeouts
self.current_rewards = self.current_rewards * not_dones
self.current_lengths = self.current_lengths * not_dones
if isinstance(obs, dict):
obs = obs['obs']
if isinstance(next_obs, dict):
next_obs = next_obs['obs']
rewards = self.rewards_shaper(rewards)
#if torch.min(obs) < -150 or torch.max(obs) > 150:
# print('ATATATA')
#else:
self.replay_buffer.add(obs, action, torch.unsqueeze(rewards, 1), next_obs, torch.unsqueeze(dones, 1))
self.obs = obs = next_obs.clone()
if not random_exploration:
self.set_train()
update_time_start = time.time()
actor_loss_info, critic1_loss, critic2_loss = self.update(self.epoch_num)
update_time_end = time.time()
update_time = update_time_end - update_time_start
self.extract_actor_stats(actor_losses, entropies, alphas, alpha_losses, actor_loss_info)
critic1_losses.append(critic1_loss)
critic2_losses.append(critic2_loss)
else:
update_time = 0
total_update_time += update_time
total_time_end = time.time()
total_time = total_time_end - total_time_start
play_time = total_time - total_update_time
return step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses
def train_epoch(self):
if self.epoch_num < self.num_seed_steps:
step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.play_steps(random_exploration=True)
else:
step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.play_steps(random_exploration=False)
return step_time, play_time, total_update_time, total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses
def train(self):
self.init_tensors()
self.algo_observer.after_init(self)
self.last_mean_rewards = -100500
total_time = 0
# rep_count = 0
self.frame = 0
self.obs = self.env_reset()
while True:
self.epoch_num += 1
step_time, play_time, update_time, epoch_total_time, actor_losses, entropies, alphas, alpha_losses, critic1_losses, critic2_losses = self.train_epoch()
total_time += epoch_total_time
scaled_time = epoch_total_time
scaled_play_time = play_time
curr_frames = self.num_frames_per_epoch
self.frame += curr_frames
frame = self.frame #TODO: Fix frame
# print(frame)
self.writer.add_scalar('performance/step_inference_rl_update_fps', curr_frames / scaled_time, frame)
self.writer.add_scalar('performance/step_inference_fps', curr_frames / scaled_play_time, frame)
self.writer.add_scalar('performance/step_fps', curr_frames / step_time, frame)
self.writer.add_scalar('performance/rl_update_time', update_time, frame)
self.writer.add_scalar('performance/step_inference_time', play_time, frame)
self.writer.add_scalar('performance/step_time', step_time, frame)
if self.epoch_num >= self.num_seed_steps:
self.writer.add_scalar('losses/a_loss', torch_ext.mean_list(actor_losses).item(), frame)
self.writer.add_scalar('losses/c1_loss', torch_ext.mean_list(critic1_losses).item(), frame)
self.writer.add_scalar('losses/c2_loss', torch_ext.mean_list(critic2_losses).item(), frame)
self.writer.add_scalar('losses/entropy', torch_ext.mean_list(entropies).item(), frame)
if alpha_losses[0] is not None:
self.writer.add_scalar('losses/alpha_loss', torch_ext.mean_list(alpha_losses).item(), frame)
self.writer.add_scalar('info/alpha', torch_ext.mean_list(alphas).item(), frame)
self.writer.add_scalar('info/epochs', self.epoch_num, frame)
self.algo_observer.after_print_stats(frame, self.epoch_num, total_time)
mean_rewards = 0
mean_lengths = 0
if self.game_rewards.current_size > 0:
mean_rewards = self.game_rewards.get_mean()
mean_lengths = self.game_lengths.get_mean()
self.writer.add_scalar('rewards/step', mean_rewards, frame)
self.writer.add_scalar('rewards/iter', mean_rewards, self.epoch_num)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/step', mean_lengths, frame)
# self.writer.add_scalar('episode_lengths/iter', mean_lengths, epoch_num)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards and self.epoch_num >= self.save_best_after:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
# self.save("./nn/" + self.config['name'])
self.save(os.path.join(self.nn_dir, self.config['name']))
# if self.last_mean_rewards > self.config.get('score_to_win', float('inf')):
# print('Network won!')
# self.save("./nn/" + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards))
# return self.last_mean_rewards, self.epoch_num
if self.epoch_num > self.max_epochs:
# self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards))
self.save(os.path.join(self.nn_dir, 'last_' + self.config['name'] + 'ep=' + str(self.epoch_num) + 'rew=' + str(mean_rewards)))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, self.epoch_num
update_time = 0
if self.print_stats:
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'epoch: {self.epoch_num} fps step: {fps_step:.1f} fps total: {fps_total:.1f} reward: {mean_rewards:.3f} episode len: {mean_lengths:.3f}') | 22,630 | Python | 41.943074 | 186 | 0.595095 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/d2rl.py | import torch
class D2RLNet(torch.nn.Module):
def __init__(self, input_size,
units,
activations,
norm_func_name = None):
torch.nn.Module.__init__(self)
self.activations = torch.nn.ModuleList(activations)
self.linears = torch.nn.ModuleList([])
self.norm_layers = torch.nn.ModuleList([])
self.num_layers = len(units)
last_size = input_size
for i in range(self.num_layers):
self.linears.append(torch.nn.Linear(last_size, units[i]))
last_size = units[i] + input_size
if norm_func_name == 'layer_norm':
self.norm_layers.append(torch.nn.LayerNorm(units[i]))
elif norm_func_name == 'batch_norm':
self.norm_layers.append(torch.nn.BatchNorm1d(units[i]))
else:
self.norm_layers.append(torch.nn.Identity())
def forward(self, input):
x = self.linears[0](input)
x = self.activations[0](x)
x = self.norm_layers[0](x)
for i in range(1,self.num_layers):
x = torch.cat([x,input], dim=1)
x = self.linears[i](x)
x = self.norm_layers[i](x)
x = self.activations[i](x)
return x | 1,259 | Python | 37.181817 | 71 | 0.544083 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/players.py | from rl_games.common.player import BasePlayer
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common.tr_helpers import unsqueeze_obs
import gym
import torch
from torch import nn
import numpy as np
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class PpoPlayerContinuous(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
self.actions_num = self.action_space.shape[0]
self.actions_low = torch.from_numpy(self.action_space.low.copy()).float().to(self.device)
self.actions_high = torch.from_numpy(self.action_space.high.copy()).float().to(self.device)
self.mask = [False]
self.normalize_input = self.config['normalize_input']
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
self.running_mean_std.eval()
def get_action(self, obs, is_determenistic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
mu = res_dict['mus']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if is_determenistic:
current_action = mu
else:
current_action = action
current_action = torch.squeeze(current_action.detach())
return rescale_actions(self.actions_low, self.actions_high, torch.clamp(current_action, -1.0, 1.0))
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def reset(self):
self.init_rnn()
class PpoPlayerDiscrete(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
if type(self.action_space) is gym.spaces.Discrete:
self.actions_num = self.action_space.n
self.is_multi_discrete = False
if type(self.action_space) is gym.spaces.Tuple:
self.actions_num = [action.n for action in self.action_space]
self.is_multi_discrete = True
self.mask = [False]
self.normalize_input = self.config['normalize_input']
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_agents,
'value_size': self.value_size
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
if self.normalize_input:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
self.running_mean_std.eval()
def get_masked_action(self, obs, action_masks, is_determenistic = True):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
action_masks = torch.Tensor(action_masks).to(self.device)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'action_masks' : action_masks,
'rnn_states' : self.states
}
self.model.eval()
with torch.no_grad():
neglogp, value, action, logits, self.states = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_determenistic:
action = [torch.argmax(logit.detach(), axis=-1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_determenistic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def get_action(self, obs, is_determenistic = False):
if self.has_batch_dimension == False:
obs = unsqueeze_obs(obs)
obs = self._preproc_obs(obs)
self.model.eval()
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : obs,
'rnn_states' : self.states
}
with torch.no_grad():
res_dict = self.model(input_dict)
logits = res_dict['logits']
action = res_dict['actions']
self.states = res_dict['rnn_states']
if self.is_multi_discrete:
if is_determenistic:
action = [torch.argmax(logit.detach(), axis=1).squeeze() for logit in logits]
return torch.stack(action,dim=-1)
else:
return action.squeeze().detach()
else:
if is_determenistic:
return torch.argmax(logits.detach(), axis=-1).squeeze()
else:
return action.squeeze().detach()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.load_state_dict(checkpoint['model'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def reset(self):
self.init_rnn()
class SACPlayer(BasePlayer):
def __init__(self, config):
BasePlayer.__init__(self, config)
self.network = config['network']
self.actions_num = self.action_space.shape[0]
self.action_range = [
float(self.env_info['action_space'].low.min()),
float(self.env_info['action_space'].high.max())
]
obs_shape = torch_ext.shape_whc_to_cwh(self.state_shape)
self.normalize_input = False
config = {
'obs_dim': self.env_info["observation_space"].shape[0],
'action_dim': self.env_info["action_space"].shape[0],
'actions_num' : self.actions_num,
'input_shape' : obs_shape
}
self.model = self.network.build(config)
self.model.to(self.device)
self.model.eval()
self.is_rnn = self.model.is_rnn()
# if self.normalize_input:
# self.running_mean_std = RunningMeanStd(obs_shape).to(self.device)
# self.running_mean_std.eval()
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.model.sac_network.actor.load_state_dict(checkpoint['actor'])
self.model.sac_network.critic.load_state_dict(checkpoint['critic'])
self.model.sac_network.critic_target.load_state_dict(checkpoint['critic_target'])
if self.normalize_input:
self.running_mean_std.load_state_dict(checkpoint['running_mean_std'])
def get_action(self, obs, sample=False):
dist = self.model.actor(obs)
actions = dist.sample() if sample else dist.mean
actions = actions.clamp(*self.action_range).to(self.device)
assert actions.ndim == 2
return actions
def reset(self):
pass | 7,933 | Python | 36.074766 | 108 | 0.576831 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/self_play_manager.py | import numpy as np
class SelfPlayManager:
def __init__(self, config, writter):
self.config = config
self.writter = writter
self.update_score = self.config['update_score']
self.games_to_check = self.config['games_to_check']
self.check_scores = self.config.get('check_scores', False)
self.env_update_num = self.config.get('env_update_num', 1)
self.env_indexes = np.arange(start=0, stop=self.env_update_num)
self.updates_num = 0
def update(self, algo):
self.updates_num += 1
if self.check_scores:
data = algo.game_scores
else:
data = algo.game_rewards
if len(data) >= self.games_to_check:
mean_scores = data.get_mean()
mean_rewards = algo.game_rewards.get_mean()
if mean_scores > self.update_score:
print('Mean scores: ', mean_scores, ' mean rewards: ', mean_rewards, ' updating weights')
algo.clear_stats()
self.writter.add_scalar('selfplay/iters_update_weigths', self.updates_num, algo.frame)
algo.vec_env.set_weights(self.env_indexes, algo.get_weights())
self.env_indexes = (self.env_indexes + 1) % (algo.num_actors)
self.updates_num = 0
| 1,332 | Python | 40.656249 | 105 | 0.572072 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/sac_helper.py | # from rl_games.algos_torch.network_builder import NetworkBuilder
from torch import distributions as pyd
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
def entropy(self):
return self.base_dist.entropy()
| 1,720 | Python | 28.169491 | 137 | 0.647093 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/a2c_discrete.py | from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch import ppg_aux
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class DiscreteA2CAgent(a2c_common.DiscreteA2CBase):
def __init__(self, base_name, config):
a2c_common.DiscreteA2CBase.__init__(self, base_name, config)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1)
}
self.model = self.network.build(config)
self.model.to(self.ppo_device)
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.normalize_input:
if isinstance(self.observation_space, gym.spaces.Dict):
self.running_mean_std = RunningMeanStdObs(obs_shape).to(self.ppo_device)
else:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.ppo_device)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'num_steps' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'model' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', False)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if 'phasic_policy_gradients' in self.config:
self.has_phasic_policy_gradients = True
self.ppg_aux_loss = ppg_aux.PPGAux(self, self.config['phasic_policy_gradients'])
self.has_value_loss = (self.has_central_value \
and self.use_experimental_cv) \
or not self.has_phasic_policy_gradients
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
processed_obs = self._preproc_obs(obs['obs'])
action_masks = torch.BoolTensor(action_masks).to(self.ppo_device)
input_dict = {
'is_train': False,
'prev_actions': None,
'obs' : processed_obs,
'action_masks' : action_masks,
'rnn_states' : self.rnn_states
}
with torch.no_grad():
res_dict = self.model(input_dict)
if self.has_central_value:
input_dict = {
'is_train': False,
'states' : obs['states'],
#'actions' : action,
}
value = self.get_central_value(input_dict)
res_dict['values'] = value
if self.normalize_value:
value = self.value_mean_std(value, True)
if self.is_multi_discrete:
action_masks = torch.cat(action_masks, dim=-1)
res_dict['action_masks'] = action_masks
return res_dict
def train_actor_critic(self, input_dict):
self.set_train()
self.calc_gradients(input_dict)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.last_lr
return self.train_result
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr = self.last_lr
kl = 1.0
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
if self.use_action_masks:
batch_dict['action_masks'] = input_dict['action_masks']
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
a_loss = common_losses.actor_loss(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy = losses[0], losses[1], losses[2]
loss = a_loss + 0.5 *c_loss * self.critic_coef - entropy * self.entropy_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
kl_dist = 0.5 * ((old_action_log_probs_batch - action_log_probs)**2)
if self.is_rnn:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() # / sum_mask
else:
kl_dist = kl_dist.mean()
if self.has_phasic_policy_gradients:
c_loss = self.ppg_aux_loss.train_value(self,input_dict)
self.train_result = (a_loss, c_loss, entropy, kl_dist,self.last_lr, lr_mul)
| 7,889 | Python | 38.848485 | 142 | 0.566865 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/ppg_aux.py |
from rl_games.common import tr_helpers
from rl_games.algos_torch import torch_ext
from rl_games.common import common_losses
from rl_games.common.datasets import DatasetList
import torch
from torch import nn
from torch import optim
import copy
class PPGAux:
def __init__(self, algo, config):
self.config = config
self.writer = algo.writer
self.mini_epoch = config['mini_epochs']
self.mini_batch = config['minibatch_size']
self.mixed_precision = algo.mixed_precision
self.is_rnn = algo.network.is_rnn()
self.kl_coef = config.get('kl_coef', 1.0)
self.n_aux = config.get('n_aux', 16)
self.is_continuous = True
self.last_lr = config['learning_rate']
self.optimizer = optim.Adam(algo.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=algo.weight_decay)
self.scaler = torch.cuda.amp.GradScaler(enabled=self.mixed_precision)
self._freeze_grads(algo.model)
self.value_optimizer = optim.Adam(filter(lambda p: p.requires_grad, algo.model.parameters()), float(self.last_lr), eps=1e-08, weight_decay=algo.weight_decay)
self.value_scaler = torch.cuda.amp.GradScaler(enabled=self.mixed_precision)
self._unfreeze_grads(algo.model)
self.dataset_list = DatasetList()
def _freeze_grads(self, model):
for param in model.parameters():
param.requires_grad = False
model.a2c_network.value.weight.requires_grad = True
model.a2c_network.value.bias.requires_grad = True
def _unfreeze_grads(self, model):
for param in model.parameters():
param.requires_grad = True
def train_value(self, algo, input_dict):
value_preds_batch = input_dict['old_values']
return_batch = input_dict['returns']
obs_batch = input_dict['obs']
actions_batch = input_dict['actions']
obs_batch = algo._preproc_obs(obs_batch)
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = algo.model(batch_dict)
values = res_dict['values']
c_loss = common_losses.critic_loss(value_preds_batch, values, algo.e_clip, return_batch, algo.clip_value)
losses, sum_mask = torch_ext.apply_masks([c_loss], rnn_masks)
c_loss = losses[0]
loss = c_loss
if algo.multi_gpu:
self.value_optimizer.zero_grad()
else:
for param in algo.model.parameters():
param.grad = None
self.value_scaler.scale(loss).backward()
if algo.truncate_grads:
if algo.multi_gpu:
self.value_optimizer.synchronize()
self.value_scaler.unscale_(self.value_optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
with self.value_optimizer.skip_synchronize():
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
else:
self.value_scaler.unscale_(self.value_optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
else:
self.value_scaler.step(self.value_optimizer)
self.value_scaler.update()
return loss.detach()
def update(self, algo):
self.dataset_list.add_dataset(algo.dataset)
def train_net(self, algo):
self.update(algo)
if algo.epoch_num % self.n_aux != 0:
return
self.old_model = copy.deepcopy(algo.model)
self.old_model.eval()
dataset = self.dataset_list
for _ in range(self.mini_epoch):
for idx in range(len(dataset)):
loss_c, loss_kl = self.calc_gradients(algo, dataset[idx])
avg_loss_c = loss_c / len(dataset)
avg_loss_kl = loss_kl / len(dataset)
if self.writer != None:
self.writer.add_scalar('losses/pgg_loss_c', avg_loss_c, algo.frame)
self.writer.add_scalar('losses/pgg_loss_kl', avg_loss_kl, algo.frame)
self.dataset_list.clear()
def calc_gradients(self, algo, input_dict):
value_preds_batch = input_dict['old_values']
return_batch = input_dict['returns']
obs_batch = input_dict['obs']
actions_batch = input_dict['actions']
obs_batch = algo._preproc_obs(obs_batch)
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
#if self.use_action_masks:
# batch_dict['action_masks'] = input_dict['action_masks']
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
with torch.no_grad():
old_dict = self.old_model(batch_dict.copy())
res_dict = algo.model(batch_dict)
values = res_dict['values']
if 'mu' in res_dict:
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
mu = res_dict['mus']
sigma = res_dict['sigmas']
#kl_loss = torch_ext.policy_kl(mu, sigma.detach(), old_mu_batch, old_sigma_batch, False)
kl_loss = torch.abs(mu - old_mu_batch)
else:
kl_loss = algo.model.kl(res_dict, old_dict)
c_loss = common_losses.critic_loss(value_preds_batch, values, algo.e_clip, return_batch, algo.clip_value)
losses, sum_mask = torch_ext.apply_masks([c_loss, kl_loss.unsqueeze(1)], rnn_masks)
c_loss, kl_loss = losses[0], losses[1]
loss = c_loss + kl_loss * self.kl_coef
if algo.multi_gpu:
self.optimizer.zero_grad()
else:
for param in algo.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
if algo.truncate_grads:
if algo.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(algo.model.parameters(), algo.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
return c_loss, kl_loss
| 7,361 | Python | 39.010869 | 165 | 0.570439 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/central_value.py | import torch
from torch import nn
import numpy as np
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.common import schedulers
class CentralValueTrain(nn.Module):
def __init__(self, state_shape, value_size, ppo_device, num_agents, num_steps, num_actors, num_actions, seq_len, model, config, writter, max_epochs, multi_gpu):
nn.Module.__init__(self)
self.ppo_device = ppo_device
self.num_agents, self.num_steps, self.num_actors, self.seq_len = num_agents, num_steps, num_actors, seq_len
self.num_actions = num_actions
self.state_shape = state_shape
self.value_size = value_size
self.max_epochs = max_epochs
self.multi_gpu = multi_gpu
self.truncate_grads = config.get('truncate_grads', False)
state_config = {
'value_size' : value_size,
'input_shape' : state_shape,
'actions_num' : num_actions,
'num_agents' : num_agents,
'num_seqs' : num_actors
}
self.config = config
self.model = model.build('cvalue', **state_config)
self.lr = float(config['learning_rate'])
self.linear_lr = config.get('lr_schedule') == 'linear'
if self.linear_lr:
self.scheduler = schedulers.LinearScheduler(self.lr,
max_steps=self.max_epochs,
apply_to_entropy=False,
start_entropy_coef=0)
else:
self.scheduler = schedulers.IdentityScheduler()
self.mini_epoch = config['mini_epochs']
self.mini_batch = config['minibatch_size']
self.num_minibatches = self.num_steps * self.num_actors // self.mini_batch
self.clip_value = config['clip_value']
self.normalize_input = config['normalize_input']
self.writter = writter
self.weight_decay = config.get('weight_decay', 0.0)
self.optimizer = torch.optim.Adam(self.model.parameters(), float(self.lr), eps=1e-08, weight_decay=self.weight_decay)
self.frame = 0
self.epoch_num = 0
self.running_mean_std = None
self.grad_norm = config.get('grad_norm', 1)
self.truncate_grads = config.get('truncate_grads', False)
self.e_clip = config.get('e_clip', 0.2)
self.truncate_grad = self.config.get('truncate_grads', False)
if self.normalize_input:
self.running_mean_std = RunningMeanStd(state_shape)
self.is_rnn = self.model.is_rnn()
self.rnn_states = None
self.batch_size = self.num_steps * self.num_actors
if self.is_rnn:
self.rnn_states = self.model.get_default_rnn_state()
self.rnn_states = [s.to(self.ppo_device) for s in self.rnn_states]
num_seqs = self.num_steps * self.num_actors // self.seq_len
assert((self.num_steps * self.num_actors // self.num_minibatches) % self.seq_len == 0)
self.mb_rnn_states = [torch.zeros((s.size()[0], num_seqs, s.size()[2]), dtype = torch.float32, device=self.ppo_device) for s in self.rnn_states]
self.dataset = datasets.PPODataset(self.batch_size, self.mini_batch, True, self.is_rnn, self.ppo_device, self.seq_len)
def update_lr(self, lr):
if self.multi_gpu:
lr_tensor = torch.tensor([lr])
self.hvd.broadcast_value(lr_tensor, 'cv_learning_rate')
lr = lr_tensor.item()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def get_stats_weights(self):
if self.normalize_input:
return self.running_mean_std.state_dict()
else:
return {}
def set_stats_weights(self, weights):
self.running_mean_std.load_state_dict(weights)
def update_dataset(self, batch_dict):
value_preds = batch_dict['old_values']
returns = batch_dict['returns']
actions = batch_dict['actions']
rnn_masks = batch_dict['rnn_masks']
if self.num_agents > 1:
res = self.update_multiagent_tensors(value_preds, returns, actions, rnn_masks)
batch_dict['old_values'] = res[0]
batch_dict['returns'] = res[1]
batch_dict['actions'] = res[2]
if self.is_rnn:
batch_dict['rnn_states'] = self.mb_rnn_states
if self.num_agents > 1:
rnn_masks = res[3]
batch_dict['rnn_masks'] = rnn_masks
self.dataset.update_values_dict(batch_dict)
def _preproc_obs(self, obs_batch):
if type(obs_batch) is dict:
for k,v in obs_batch.items():
obs_batch[k] = self._preproc_obs(v)
else:
if obs_batch.dtype == torch.uint8:
obs_batch = obs_batch.float() / 255.0
if self.normalize_input:
obs_batch = self.running_mean_std(obs_batch)
return obs_batch
def pre_step_rnn(self, rnn_indices, state_indices):
if self.num_agents > 1:
rnn_indices = rnn_indices[::self.num_agents]
shifts = rnn_indices % (self.num_steps // self.seq_len)
rnn_indices = (rnn_indices - shifts) // self.num_agents + shifts
state_indices = state_indices[::self.num_agents] // self.num_agents
for s, mb_s in zip(self.rnn_states, self.mb_rnn_states):
mb_s[:, rnn_indices, :] = s[:, state_indices, :]
def post_step_rnn(self, all_done_indices):
all_done_indices = all_done_indices[::self.num_agents] // self.num_agents
for s in self.rnn_states:
s[:,all_done_indices,:] = s[:,all_done_indices,:] * 0.0
def forward(self, input_dict):
value, rnn_states = self.model(input_dict)
return value, rnn_states
def get_value(self, input_dict):
self.eval()
obs_batch = input_dict['states']
actions = input_dict.get('actions', None)
obs_batch = self._preproc_obs(obs_batch)
value, self.rnn_states = self.forward({'obs' : obs_batch, 'actions': actions,
'rnn_states': self.rnn_states})
if self.num_agents > 1:
value = value.repeat(1, self.num_agents)
value = value.view(value.size()[0]*self.num_agents, -1)
return value
def train_critic(self, input_dict):
self.train()
loss = self.calc_gradients(input_dict)
return loss.item()
def update_multiagent_tensors(self, value_preds, returns, actions, rnn_masks):
batch_size = self.batch_size
ma_batch_size = self.num_actors * self.num_agents * self.num_steps
value_preds = value_preds.view(self.num_actors, self.num_agents, self.num_steps, self.value_size).transpose(0,1)
returns = returns.view(self.num_actors, self.num_agents, self.num_steps, self.value_size).transpose(0,1)
value_preds = value_preds.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
returns = returns.contiguous().view(ma_batch_size, self.value_size)[:batch_size]
if self.is_rnn:
rnn_masks = rnn_masks.view(self.num_actors, self.num_agents, self.num_steps).transpose(0,1)
rnn_masks = rnn_masks.flatten(0)[:batch_size]
return value_preds, returns, actions, rnn_masks
def train_net(self):
self.train()
loss = 0
for _ in range(self.mini_epoch):
for idx in range(len(self.dataset)):
loss += self.train_critic(self.dataset[idx])
avg_loss = loss / (self.mini_epoch * self.num_minibatches)
self.epoch_num += 1
self.lr, _ = self.scheduler.update(self.lr, 0, self.epoch_num, 0, 0)
self.update_lr(self.lr)
self.frame += self.batch_size
if self.writter != None:
self.writter.add_scalar('losses/cval_loss', avg_loss, self.frame)
self.writter.add_scalar('info/cval_lr', self.lr, self.frame)
return avg_loss
def calc_gradients(self, batch):
obs_batch = self._preproc_obs(batch['obs'])
value_preds_batch = batch['old_values']
returns_batch = batch['returns']
actions_batch = batch['actions']
rnn_masks_batch = batch.get('rnn_masks')
batch_dict = {'obs' : obs_batch,
'actions' : actions_batch,
'seq_length' : self.seq_len }
if self.is_rnn:
batch_dict['rnn_states'] = batch['rnn_states']
values, _ = self.forward(batch_dict)
loss = common_losses.critic_loss(value_preds_batch, values, self.e_clip, returns_batch, self.clip_value)
losses, _ = torch_ext.apply_masks([loss], rnn_masks_batch)
loss = losses[0]
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
loss.backward()
#TODO: Refactor this ugliest code of they year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
#self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.optimizer.step()
else:
#self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.optimizer.step()
else:
self.optimizer.step()
return loss
| 9,703 | Python | 41.561403 | 164 | 0.587241 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/models.py | import rl_games.algos_torch.layers
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
import rl_games.common.divergence as divergence
from rl_games.algos_torch.torch_ext import CategoricalMasked
from torch.distributions import Categorical
from rl_games.algos_torch.sac_helper import SquashedNormal
class BaseModel():
def __init__(self):
pass
def is_rnn(self):
return False
def is_separate_critic(self):
return False
class ModelA2C(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2C.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['logits']
q = q_dict['logits']
return divergence.d_kl_discrete(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
action_masks = input_dict.get('action_masks', None)
prev_actions = input_dict.get('prev_actions', None)
logits, value, states = self.a2c_network(input_dict)
if is_train:
categorical = CategoricalMasked(logits=logits, masks=action_masks)
prev_neglogp = -categorical.log_prob(prev_actions)
entropy = categorical.entropy()
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'logits' : categorical.logits,
'values' : value,
'entropy' : entropy,
'rnn_states' : states
}
return result
else:
categorical = CategoricalMasked(logits=logits, masks=action_masks)
selected_action = categorical.sample().long()
neglogp = -categorical.log_prob(selected_action)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'logits' : categorical.logits,
'rnn_states' : states
}
return result
class ModelA2CMultiDiscrete(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2CMultiDiscrete.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['logits']
q = q_dict['logits']
return divergence.d_kl_discrete_list(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
action_masks = input_dict.get('action_masks', None)
prev_actions = input_dict.get('prev_actions', None)
logits, value, states = self.a2c_network(input_dict)
if is_train:
if action_masks is None:
categorical = [Categorical(logits=logit) for logit in logits]
else:
categorical = [CategoricalMasked(logits=logit, masks=mask) for logit, mask in zip(logits, action_masks)]
prev_actions = torch.split(prev_actions, 1, dim=-1)
prev_neglogp = [-c.log_prob(a.squeeze()) for c,a in zip(categorical, prev_actions)]
prev_neglogp = torch.stack(prev_neglogp, dim=-1).sum(dim=-1)
entropy = [c.entropy() for c in categorical]
entropy = torch.stack(entropy, dim=-1).sum(dim=-1)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'logits' : [c.logits for c in categorical],
'values' : value,
'entropy' : torch.squeeze(entropy),
'rnn_states' : states
}
return result
else:
if action_masks is None:
categorical = [Categorical(logits=logit) for logit in logits]
else:
categorical = [CategoricalMasked(logits=logit, masks=mask) for logit, mask in zip(logits, action_masks)]
selected_action = [c.sample().long() for c in categorical]
neglogp = [-c.log_prob(a.squeeze()) for c,a in zip(categorical, selected_action)]
selected_action = torch.stack(selected_action, dim=-1)
neglogp = torch.stack(neglogp, dim=-1).sum(dim=-1)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'logits' : [c.logits for c in categorical],
'rnn_states' : states
}
return result
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelA2CContinuous.Network(self.network_builder.build('a2c', **config))
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def kl(self, p_dict, q_dict):
p = p_dict['mu'], p_dict['sigma']
q = q_dict['mu'], q_dict['sigma']
return divergence.d_kl_normal(p, q)
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
mu, sigma, value, states = self.a2c_network(input_dict)
distr = torch.distributions.Normal(mu, sigma)
if is_train:
entropy = distr.entropy().sum(dim=-1)
prev_neglogp = -distr.log_prob(prev_actions).sum(dim=-1)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'value' : value,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
else:
selected_action = distr.sample().squeeze()
neglogp = -distr.log_prob(selected_action).sum(dim=-1)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : torch.squeeze(value),
'actions' : selected_action,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
net = self.network_builder.build('a2c', **config)
for name, _ in net.named_parameters():
print(name)
return ModelA2CContinuousLogStd.Network(net)
class Network(nn.Module):
def __init__(self, a2c_network):
nn.Module.__init__(self)
self.a2c_network = a2c_network
def is_rnn(self):
return self.a2c_network.is_rnn()
def get_default_rnn_state(self):
return self.a2c_network.get_default_rnn_state()
def forward(self, input_dict):
is_train = input_dict.get('is_train', True)
prev_actions = input_dict.get('prev_actions', None)
mu, logstd, value, states = self.a2c_network(input_dict)
sigma = torch.exp(logstd)
distr = torch.distributions.Normal(mu, sigma)
if is_train:
entropy = distr.entropy().sum(dim=-1)
prev_neglogp = self.neglogp(prev_actions, mu, sigma, logstd)
result = {
'prev_neglogp' : torch.squeeze(prev_neglogp),
'values' : value,
'entropy' : entropy,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
else:
selected_action = distr.sample()
neglogp = self.neglogp(selected_action, mu, sigma, logstd)
result = {
'neglogpacs' : torch.squeeze(neglogp),
'values' : value,
'actions' : selected_action,
'rnn_states' : states,
'mus' : mu,
'sigmas' : sigma
}
return result
def neglogp(self, x, mean, std, logstd):
return 0.5 * (((x - mean) / std)**2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ logstd.sum(dim=-1)
class ModelSACContinuous(BaseModel):
def __init__(self, network):
BaseModel.__init__(self)
self.network_builder = network
def build(self, config):
return ModelSACContinuous.Network(self.network_builder.build('sac', **config))
class Network(nn.Module):
def __init__(self, sac_network):
nn.Module.__init__(self)
self.sac_network = sac_network
def critic(self, obs, action):
return self.sac_network.critic(obs, action)
def critic_target(self, obs, action):
return self.sac_network.critic_target(obs, action)
def actor(self, obs):
return self.sac_network.actor(obs)
def is_rnn(self):
return False
def forward(self, input_dict):
is_train = input_dict.pop('is_train', True)
mu, sigma = self.sac_network(input_dict)
dist = SquashedNormal(mu, sigma)
return dist
| 10,919 | Python | 36.142857 | 140 | 0.514699 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/model_builder.py | from rl_games.common import object_factory
import rl_games.algos_torch
from rl_games.algos_torch import network_builder
from rl_games.algos_torch import models
NETWORK_REGISTRY = {}
def register_network(name, target_class):
NETWORK_REGISTRY[name] = lambda **kwargs : target_class()
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs : models.ModelA2C(network))
self.model_factory.register_builder('multi_discrete_a2c', lambda network, **kwargs : models.ModelA2CMultiDiscrete(network))
self.model_factory.register_builder('continuous_a2c', lambda network, **kwargs : models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd', lambda network, **kwargs : models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('soft_actor_critic', lambda network, **kwargs : models.ModelSACContinuous(network))
#self.model_factory.register_builder('dqn', lambda network, **kwargs : models.AtariDQN(network))
self.network_factory = object_factory.ObjectFactory()
self.network_factory.set_builders(NETWORK_REGISTRY)
self.network_factory.register_builder('actor_critic', lambda **kwargs : network_builder.A2CBuilder())
self.network_factory.register_builder('resnet_actor_critic', lambda **kwargs : network_builder.A2CResnetBuilder())
self.network_factory.register_builder('rnd_curiosity', lambda **kwargs : network_builder.RNDCuriosityBuilder())
self.network_factory.register_builder('soft_actor_critic', lambda **kwargs: network_builder.SACBuilder())
def load(self, params):
self.model_name = params['model']['name']
self.network_name = params['network']['name']
network = self.network_factory.create(self.network_name)
network.load(params['network'])
model = self.model_factory.create(self.model_name, network=network)
return model | 2,062 | Python | 53.289472 | 137 | 0.723084 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/moving_mean_std.py | import torch
import torch.nn as nn
import numpy as np
'''
updates moving statistics with momentum
'''
class MovingMeanStd(nn.Module):
def __init__(self, insize, momentum = 0.9998, epsilon=1e-05, per_channel=False, norm_only=False):
super(MovingMeanStd, self).__init__()
self.insize = insize
self.epsilon = epsilon
self.momentum = momentum
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("moving_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("moving_var", torch.ones(in_size, dtype = torch.float64))
def forward(self, input, unnorm=False):
if self.training:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.moving_mean = self.moving_mean * self.momentum + mean * (1 - self.momentum)
self.moving_var = self.moving_var * self.momentum + var * (1 - self.momentum)
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.moving_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.moving_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.moving_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.moving_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.moving_mean
current_var = self.moving_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y | 2,521 | Python | 42.482758 | 101 | 0.554145 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/layers.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class NoisyLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_weight", torch.zeros(out_features, in_features))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
self.register_buffer("epsilon_bias", torch.zeros(out_features))
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(3 / self.in_features)
self.weight.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, input):
self.epsilon_weight.normal_()
bias = self.bias
if bias is not None:
self.epsilon_bias.normal_()
bias = bias + self.sigma_bias * self.epsilon_bias.data
return F.linear(input, self.weight + self.sigma_weight * self.epsilon_weight.data, bias)
class NoisyFactorizedLinear(nn.Linear):
def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True):
super(NoisyFactorizedLinear, self).__init__(in_features, out_features, bias=bias)
sigma_init = sigma_zero / math.sqrt(in_features)
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
self.register_buffer("epsilon_input", torch.zeros(1, in_features))
self.register_buffer("epsilon_output", torch.zeros(out_features, 1))
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
def forward(self, input):
self.epsison_input.normal_()
self.epsilon_output.normal_()
func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x))
eps_in = func(self.epsilon_input.data)
eps_out = func(self.epsilon_output.data)
bias = self.bias
if bias is not None:
bias = bias + self.sigma_bias * eps_out.t()
noise_v = torch.mul(eps_in, eps_out)
return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)
class LSTMWithDones(nn.Module):
def __init__(self, input_sz: int, hidden_sz: int):
super().__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
self.weight_ih = nn.Parameter(torch.Tensor(input_sz, hidden_sz * 4))
self.weight_hh = nn.Parameter(torch.Tensor(hidden_sz, hidden_sz * 4))
self.bias = nn.Parameter(torch.Tensor(hidden_sz * 4))
self.init_weights()
def init_weights(self):
for p in self.parameters():
if p.data.ndimension() >= 2:
nn.init.xavier_uniform_(p.data)
else:
nn.init.zeros_(p.data)
def forward(self, x, dones, init_states):
"""Assumes x is of shape (batch, sequence, feature)"""
bs, seq_sz, _ = x.size()
hidden_seq = []
assert(init_states)
h_t, c_t = init_states
HS = self.hidden_size
for t in range(seq_sz):
d = dones[:, t]
h_t = h_t * (1 - d)
c_t = c_t * (1 - d)
x_t = x[:, t, :]
# batch the computations into a single matrix multiplication
gates = x_t @ self.weight_ih + h_t @ self.weight_hh + self.bias
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS*2]), # forget
torch.tanh(gates[:, HS*2:HS*3]),
torch.sigmoid(gates[:, HS*3:]), # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=1)
# reshape from shape (sequence, batch, feature) to (batch, sequence, feature)
hidden_seq = hidden_seq.transpose(1, 0).contiguous()
return hidden_seq, (h_t, c_t) | 4,148 | Python | 39.67647 | 96 | 0.578833 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/network_builder.py | from rl_games.common import object_factory
from rl_games.algos_torch import torch_ext
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
import numpy as np
from rl_games.algos_torch.d2rl import D2RLNet
from rl_games.algos_torch.sac_helper import SquashedNormal
def _create_initializer(func, **kwargs):
return lambda v : func(v, **kwargs)
class NetworkBuilder:
def __init__(self, **kwargs):
pass
def load(self, params):
pass
def build(self, name, **kwargs):
pass
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
class BaseNetwork(nn.Module):
def __init__(self, **kwargs):
nn.Module.__init__(self, **kwargs)
self.activations_factory = object_factory.ObjectFactory()
self.activations_factory.register_builder('relu', lambda **kwargs : nn.ReLU(**kwargs))
self.activations_factory.register_builder('tanh', lambda **kwargs : nn.Tanh(**kwargs))
self.activations_factory.register_builder('sigmoid', lambda **kwargs : nn.Sigmoid(**kwargs))
self.activations_factory.register_builder('elu', lambda **kwargs : nn.ELU(**kwargs))
self.activations_factory.register_builder('selu', lambda **kwargs : nn.SELU(**kwargs))
self.activations_factory.register_builder('softplus', lambda **kwargs : nn.Softplus(**kwargs))
self.activations_factory.register_builder('None', lambda **kwargs : nn.Identity())
self.init_factory = object_factory.ObjectFactory()
#self.init_factory.register_builder('normc_initializer', lambda **kwargs : normc_initializer(**kwargs))
self.init_factory.register_builder('const_initializer', lambda **kwargs : _create_initializer(nn.init.constant_,**kwargs))
self.init_factory.register_builder('orthogonal_initializer', lambda **kwargs : _create_initializer(nn.init.orthogonal_,**kwargs))
self.init_factory.register_builder('glorot_normal_initializer', lambda **kwargs : _create_initializer(nn.init.xavier_normal_,**kwargs))
self.init_factory.register_builder('glorot_uniform_initializer', lambda **kwargs : _create_initializer(nn.init.xavier_uniform_,**kwargs))
self.init_factory.register_builder('variance_scaling_initializer', lambda **kwargs : _create_initializer(torch_ext.variance_scaling_initializer,**kwargs))
self.init_factory.register_builder('random_uniform_initializer', lambda **kwargs : _create_initializer(nn.init.uniform_,**kwargs))
self.init_factory.register_builder('kaiming_normal', lambda **kwargs : _create_initializer(nn.init.kaiming_normal_,**kwargs))
self.init_factory.register_builder('orthogonal', lambda **kwargs : _create_initializer(nn.init.orthogonal_,**kwargs))
self.init_factory.register_builder('default', lambda **kwargs : nn.Identity() )
def is_separate_critic(self):
return False
def is_rnn(self):
return False
def get_default_rnn_state(self):
return None
def _calc_input_size(self, input_shape,cnn_layers=None):
if cnn_layers is None:
assert(len(input_shape) == 1)
return input_shape[0]
else:
return nn.Sequential(*cnn_layers)(torch.rand(1, *(input_shape))).flatten(1).data.size(1)
def _noisy_dense(self, inputs, units):
return layers.NoisyFactorizedLinear(inputs, units)
def _build_rnn(self, name, input, units, layers):
if name == 'identity':
return torch_ext.IdentityRNN(input, units)
if name == 'lstm':
return torch.nn.LSTM(input, units, layers, batch_first=True)
if name == 'gru':
return torch.nn.GRU(input, units, layers, batch_first=True)
if name == 'sru':
from sru import SRU
return SRU(input, units, layers, dropout=0, layer_norm=False)
def _build_sequential_mlp(self,
input_size,
units,
activation,
dense_func,
norm_only_first_layer=False,
norm_func_name = None):
print('build mlp:', input_size)
in_size = input_size
layers = []
need_norm = True
for unit in units:
layers.append(dense_func(in_size, unit))
layers.append(self.activations_factory.create(activation))
if not need_norm:
continue
if norm_only_first_layer and norm_func_name is not None:
need_norm = False
if norm_func_name == 'layer_norm':
layers.append(torch.nn.LayerNorm(unit))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm1d(unit))
in_size = unit
return nn.Sequential(*layers)
def _build_mlp(self,
input_size,
units,
activation,
dense_func,
norm_only_first_layer=False,
norm_func_name = None,
d2rl=False):
if d2rl:
act_layers = [self.activations_factory.create(activation) for i in range(len(units))]
return D2RLNet(input_size, units, act_layers, norm_func_name)
else:
return self._build_sequential_mlp(input_size, units, activation, dense_func, norm_func_name = None,)
def _build_conv(self, ctype, **kwargs):
print('conv_name:', ctype)
if ctype == 'conv2d':
return self._build_cnn2d(**kwargs)
if ctype == 'coord_conv2d':
return self._build_cnn2d(conv_func=torch_ext.CoordConv2d, **kwargs)
if ctype == 'conv1d':
return self._build_cnn1d(**kwargs)
def _build_cnn2d(self, input_shape, convs, activation, conv_func=torch.nn.Conv2d, norm_func_name=None):
in_channels = input_shape[0]
layers = []
for conv in convs:
layers.append(conv_func(in_channels=in_channels,
out_channels=conv['filters'],
kernel_size=conv['kernel_size'],
stride=conv['strides'], padding=conv['padding']))
conv_func=torch.nn.Conv2d
act = self.activations_factory.create(activation)
layers.append(act)
in_channels = conv['filters']
if norm_func_name == 'layer_norm':
layers.append(torch_ext.LayerNorm2d(in_channels))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm2d(in_channels))
return nn.Sequential(*layers)
def _build_cnn1d(self, input_shape, convs, activation, norm_func_name=None):
print('conv1d input shape:', input_shape)
in_channels = input_shape[0]
layers = []
for conv in convs:
layers.append(torch.nn.Conv1d(in_channels, conv['filters'], conv['kernel_size'], conv['strides'], conv['padding']))
act = self.activations_factory.create(activation)
layers.append(act)
in_channels = conv['filters']
if norm_func_name == 'layer_norm':
layers.append(torch.nn.LayerNorm(in_channels))
elif norm_func_name == 'batch_norm':
layers.append(torch.nn.BatchNorm2d(in_channels))
return nn.Sequential(*layers)
class A2CBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
self.value_size = kwargs.pop('value_size', 1)
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
NetworkBuilder.BaseNetwork.__init__(self)
self.load(params)
self.actor_cnn = nn.Sequential()
self.critic_cnn = nn.Sequential()
self.actor_mlp = nn.Sequential()
self.critic_mlp = nn.Sequential()
if self.has_cnn:
input_shape = torch_ext.shape_whc_to_cwh(input_shape)
cnn_args = {
'ctype' : self.cnn['type'],
'input_shape' : input_shape,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'norm_func_name' : self.normalization,
}
self.actor_cnn = self._build_conv(**cnn_args)
if self.separate:
self.critic_cnn = self._build_conv( **cnn_args)
mlp_input_shape = self._calc_input_size(input_shape, self.actor_cnn)
in_mlp_shape = mlp_input_shape
if len(self.units) == 0:
out_size = mlp_input_shape
else:
out_size = self.units[-1]
if self.has_rnn:
if not self.is_rnn_before_mlp:
rnn_in_size = out_size
out_size = self.rnn_units
if self.rnn_concat_input:
rnn_in_size += in_mlp_shape
else:
rnn_in_size = in_mlp_shape
in_mlp_shape = self.rnn_units
if self.separate:
self.a_rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
self.c_rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
if self.rnn_ln:
self.a_layer_norm = torch.nn.LayerNorm(self.rnn_units)
self.c_layer_norm = torch.nn.LayerNorm(self.rnn_units)
else:
self.rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
if self.rnn_ln:
self.layer_norm = torch.nn.LayerNorm(self.rnn_units)
mlp_args = {
'input_size' : in_mlp_shape,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
self.actor_mlp = self._build_mlp(**mlp_args)
if self.separate:
self.critic_mlp = self._build_mlp(**mlp_args)
self.value = torch.nn.Linear(out_size, self.value_size)
self.value_act = self.activations_factory.create(self.value_activation)
if self.is_discrete:
self.logits = torch.nn.Linear(out_size, actions_num)
'''
for multidiscrete actions num is a tuple
'''
if self.is_multi_discrete:
self.logits = torch.nn.ModuleList([torch.nn.Linear(out_size, num) for num in actions_num])
if self.is_continuous:
self.mu = torch.nn.Linear(out_size, actions_num)
self.mu_act = self.activations_factory.create(self.space_config['mu_activation'])
mu_init = self.init_factory.create(**self.space_config['mu_init'])
self.sigma_act = self.activations_factory.create(self.space_config['sigma_activation'])
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
if self.space_config['fixed_sigma']:
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=True, dtype=torch.float32), requires_grad=True)
else:
self.sigma = torch.nn.Linear(out_size, actions_num)
mlp_init = self.init_factory.create(**self.initializer)
if self.has_cnn:
cnn_init = self.init_factory.create(**self.cnn['initializer'])
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
cnn_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if self.is_continuous:
mu_init(self.mu.weight)
if self.space_config['fixed_sigma']:
sigma_init(self.sigma)
else:
sigma_init(self.sigma.weight)
def forward(self, obs_dict):
obs = obs_dict['obs']
states = obs_dict.get('rnn_states', None)
seq_length = obs_dict.get('seq_length', 1)
if self.has_cnn:
# for obs shape 4
# input expected shape (B, W, H, C)
# convert to (B, C, W, H)
if len(obs.shape) == 4:
obs = obs.permute((0, 3, 1, 2))
if self.separate:
a_out = c_out = obs
a_out = self.actor_cnn(a_out)
a_out = a_out.contiguous().view(a_out.size(0), -1)
c_out = self.critic_cnn(c_out)
c_out = c_out.contiguous().view(c_out.size(0), -1)
if self.has_rnn:
if not self.is_rnn_before_mlp:
a_out_in = a_out
c_out_in = c_out
a_out = self.actor_mlp(a_out_in)
c_out = self.critic_mlp(c_out_in)
if self.rnn_concat_input:
a_out = torch.cat([a_out, a_out_in], dim=1)
c_out = torch.cat([c_out, c_out_in], dim=1)
batch_size = a_out.size()[0]
num_seqs = batch_size // seq_length
a_out = a_out.reshape(num_seqs, seq_length, -1)
c_out = c_out.reshape(num_seqs, seq_length, -1)
if self.rnn_name == 'sru':
a_out =a_out.transpose(0,1)
c_out =c_out.transpose(0,1)
if len(states) == 2:
a_states = states[0]
c_states = states[1]
else:
a_states = states[:2]
c_states = states[2:]
a_out, a_states = self.a_rnn(a_out, a_states)
c_out, c_states = self.c_rnn(c_out, c_states)
if self.rnn_name == 'sru':
a_out = a_out.transpose(0,1)
c_out = c_out.transpose(0,1)
else:
if self.rnn_ln:
a_out = self.a_layer_norm(a_out)
c_out = self.c_layer_norm(c_out)
a_out = a_out.contiguous().reshape(a_out.size()[0] * a_out.size()[1], -1)
c_out = c_out.contiguous().reshape(c_out.size()[0] * c_out.size()[1], -1)
if type(a_states) is not tuple:
a_states = (a_states,)
c_states = (c_states,)
states = a_states + c_states
if self.is_rnn_before_mlp:
a_out = self.actor_mlp(a_out)
c_out = self.critic_mlp(c_out)
else:
a_out = self.actor_mlp(a_out)
c_out = self.critic_mlp(c_out)
value = self.value_act(self.value(c_out))
if self.is_discrete:
logits = self.logits(a_out)
return logits, value, states
if self.is_multi_discrete:
logits = [logit(a_out) for logit in self.logits]
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(a_out))
if self.space_config['fixed_sigma']:
sigma = mu * 0.0 + self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(a_out))
return mu, sigma, value, states
else:
out = obs
out = self.actor_cnn(out)
out = out.flatten(1)
if self.has_rnn:
out_in = out
if not self.is_rnn_before_mlp:
out_in = out
out = self.actor_mlp(out)
if self.rnn_concat_input:
out = torch.cat([out, out_in], dim=1)
batch_size = out.size()[0]
num_seqs = batch_size // seq_length
out = out.reshape(num_seqs, seq_length, -1)
if len(states) == 1:
states = states[0]
if self.rnn_name == 'sru':
out = out.transpose(0,1)
out, states = self.rnn(out, states)
out = out.contiguous().reshape(out.size()[0] * out.size()[1], -1)
if self.rnn_name == 'sru':
out = out.transpose(0,1)
if self.rnn_ln:
out = self.layer_norm(out)
if self.is_rnn_before_mlp:
out = self.actor_mlp(out)
if type(states) is not tuple:
states = (states,)
else:
out = self.actor_mlp(out)
value = self.value_act(self.value(out))
if self.central_value:
return value, states
if self.is_discrete:
logits = self.logits(out)
return logits, value, states
if self.is_multi_discrete:
logits = [logit(out) for logit in self.logits]
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(out))
if self.space_config['fixed_sigma']:
sigma = self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(out))
return mu, mu*0 + sigma, value, states
def is_separate_critic(self):
return self.separate
def is_rnn(self):
return self.has_rnn
def get_default_rnn_state(self):
if not self.has_rnn:
return None
num_layers = self.rnn_layers
if self.rnn_name == 'identity':
rnn_units = 1
else:
rnn_units = self.rnn_units
if self.rnn_name == 'lstm':
if self.separate:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
if self.separate:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),
torch.zeros((num_layers, self.num_seqs, rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, rnn_units)),)
def load(self, params):
self.separate = params.get('separate', False)
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_d2rl = params['mlp'].get('d2rl', False)
self.norm_only_first_layer = params['mlp'].get('norm_only_first_layer', False)
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_rnn = 'rnn' in params
self.has_space = 'space' in params
self.central_value = params.get('central_value', False)
self.joint_obs_actions_config = params.get('joint_obs_actions', None)
if self.has_space:
self.is_multi_discrete = 'multi_discrete'in params['space']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
elif self.is_multi_discrete:
self.space_config = params['space']['multi_discrete']
else:
self.is_discrete = False
self.is_continuous = False
self.is_multi_discrete = False
if self.has_rnn:
self.rnn_units = params['rnn']['units']
self.rnn_layers = params['rnn']['layers']
self.rnn_name = params['rnn']['name']
self.rnn_ln = params['rnn'].get('layer_norm', False)
self.is_rnn_before_mlp = params['rnn'].get('before_mlp', False)
self.rnn_concat_input = params['rnn'].get('concat_input', False)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
net = A2CBuilder.Network(self.params, **kwargs)
return net
class Conv2dAuto(nn.Conv2d):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2) # dynamic add padding based on the kernel_size
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, use_bn=False):
super().__init__()
self.use_bn = use_bn
self.conv = Conv2dAuto(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, bias=not use_bn)
if use_bn:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return x
class ResidualBlock(nn.Module):
def __init__(self, channels, activation='relu', use_bn=False, use_zero_init=True, use_attention=False):
super().__init__()
self.use_zero_init=use_zero_init
self.use_attention = use_attention
if use_zero_init:
self.alpha = nn.Parameter(torch.zeros(1))
self.activation = activation
self.conv1 = ConvBlock(channels, channels, use_bn)
self.conv2 = ConvBlock(channels, channels, use_bn)
self.activate1 = nn.ELU()
self.activate2 = nn.ELU()
if use_attention:
self.ca = ChannelAttention(channels)
self.sa = SpatialAttention()
def forward(self, x):
residual = x
x = self.activate1(x)
x = self.conv1(x)
x = self.activate2(x)
x = self.conv2(x)
if self.use_attention:
x = self.ca(x) * x
x = self.sa(x) * x
if self.use_zero_init:
x = x * self.alpha + residual
else:
x = x + residual
return x
class ImpalaSequential(nn.Module):
def __init__(self, in_channels, out_channels, activation='elu', use_bn=True, use_zero_init=False):
super().__init__()
self.conv = ConvBlock(in_channels, out_channels, use_bn)
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_block1 = ResidualBlock(out_channels, activation=activation, use_bn=use_bn, use_zero_init=use_zero_init)
self.res_block2 = ResidualBlock(out_channels, activation=activation, use_bn=use_bn, use_zero_init=use_zero_init)
def forward(self, x):
x = self.conv(x)
x = self.max_pool(x)
x = self.res_block1(x)
x = self.res_block2(x)
return x
class A2CResnetBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
input_shape = torch_ext.shape_whc_to_cwh(input_shape)
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
self.value_size = kwargs.pop('value_size', 1)
NetworkBuilder.BaseNetwork.__init__(self, **kwargs)
self.load(params)
self.cnn = self._build_impala(input_shape, self.conv_depths)
mlp_input_shape = self._calc_input_size(input_shape, self.cnn)
in_mlp_shape = mlp_input_shape
if len(self.units) == 0:
out_size = mlp_input_shape
else:
out_size = self.units[-1]
if self.has_rnn:
if not self.is_rnn_before_mlp:
rnn_in_size = out_size
out_size = self.rnn_units
else:
rnn_in_size = in_mlp_shape
in_mlp_shape = self.rnn_units
self.rnn = self._build_rnn(self.rnn_name, rnn_in_size, self.rnn_units, self.rnn_layers)
#self.layer_norm = torch.nn.LayerNorm(self.rnn_units)
mlp_args = {
'input_size' : in_mlp_shape,
'units' :self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear
}
self.mlp = self._build_mlp(**mlp_args)
self.value = torch.nn.Linear(out_size, self.value_size)
self.value_act = self.activations_factory.create(self.value_activation)
self.flatten_act = self.activations_factory.create(self.activation)
if self.is_discrete:
self.logits = torch.nn.Linear(out_size, actions_num)
if self.is_continuous:
self.mu = torch.nn.Linear(out_size, actions_num)
self.mu_act = self.activations_factory.create(self.space_config['mu_activation'])
mu_init = self.init_factory.create(**self.space_config['mu_init'])
self.sigma_act = self.activations_factory.create(self.space_config['sigma_activation'])
sigma_init = self.init_factory.create(**self.space_config['sigma_init'])
if self.space_config['fixed_sigma']:
self.sigma = nn.Parameter(torch.zeros(actions_num, requires_grad=True, dtype=torch.float32), requires_grad=True)
else:
self.sigma = torch.nn.Linear(out_size, actions_num)
mlp_init = self.init_factory.create(**self.initializer)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
#nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('elu'))
for m in self.mlp:
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if self.is_discrete:
mlp_init(self.logits.weight)
if self.is_continuous:
mu_init(self.mu.weight)
if self.space_config['fixed_sigma']:
sigma_init(self.sigma)
else:
sigma_init(self.sigma.weight)
mlp_init(self.value.weight)
def forward(self, obs_dict):
obs = obs_dict['obs']
obs = obs.permute((0, 3, 1, 2))
states = obs_dict.get('rnn_states', None)
seq_length = obs_dict.get('seq_length', 1)
out = obs
out = self.cnn(out)
out = out.flatten(1)
out = self.flatten_act(out)
if self.has_rnn:
if not self.is_rnn_before_mlp:
out = self.mlp(out)
batch_size = out.size()[0]
num_seqs = batch_size // seq_length
out = out.reshape(num_seqs, seq_length, -1)
if len(states) == 1:
states = states[0]
out, states = self.rnn(out, states)
out = out.contiguous().reshape(out.size()[0] * out.size()[1], -1)
#out = self.layer_norm(out)
if type(states) is not tuple:
states = (states,)
if self.is_rnn_before_mlp:
for l in self.mlp:
out = l(out)
else:
for l in self.mlp:
out = l(out)
value = self.value_act(self.value(out))
if self.is_discrete:
logits = self.logits(out)
return logits, value, states
if self.is_continuous:
mu = self.mu_act(self.mu(out))
if self.space_config['fixed_sigma']:
sigma = self.sigma_act(self.sigma)
else:
sigma = self.sigma_act(self.sigma(out))
return mu, mu*0 + sigma, value, states
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous' in params['space']
self.is_multi_discrete = 'multi_discrete'in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
elif self.is_multi_discrete:
self.space_config = params['space']['multi_discrete']
self.has_rnn = 'rnn' in params
if self.has_rnn:
self.rnn_units = params['rnn']['units']
self.rnn_layers = params['rnn']['layers']
self.rnn_name = params['rnn']['name']
self.is_rnn_before_mlp = params['rnn'].get('before_mlp', False)
self.has_cnn = True
self.conv_depths = params['cnn']['conv_depths']
def _build_impala(self, input_shape, depths):
in_channels = input_shape[0]
layers = nn.ModuleList()
for d in depths:
layers.append(ImpalaSequential(in_channels, d))
in_channels = d
return nn.Sequential(*layers)
def is_separate_critic(self):
return False
def is_rnn(self):
return self.has_rnn
def get_default_rnn_state(self):
num_layers = self.rnn_layers
if self.rnn_name == 'lstm':
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)),
torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
else:
return (torch.zeros((num_layers, self.num_seqs, self.rnn_units)))
def build(self, name, **kwargs):
net = A2CResnetBuilder.Network(self.params, **kwargs)
return net
class DiagGaussianActor(NetworkBuilder.BaseNetwork):
"""torch.distributions implementation of an diagonal Gaussian policy."""
def __init__(self, output_dim, log_std_bounds, **mlp_args):
super().__init__()
self.log_std_bounds = log_std_bounds
self.trunk = self._build_mlp(**mlp_args)
last_layer = list(self.trunk.children())[-2].out_features
self.trunk = nn.Sequential(*list(self.trunk.children()), nn.Linear(last_layer, output_dim))
def forward(self, obs):
mu, log_std = self.trunk(obs).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
#log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = torch.clamp(log_std, log_std_min, log_std_max)
#log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std + 1)
std = log_std.exp()
# TODO: Refactor
dist = SquashedNormal(mu, std)
# Modify to only return mu and std
return dist
class DoubleQCritic(NetworkBuilder.BaseNetwork):
"""Critic network, employes double Q-learning."""
def __init__(self, output_dim, **mlp_args):
super().__init__()
self.Q1 = self._build_mlp(**mlp_args)
last_layer = list(self.Q1.children())[-2].out_features
self.Q1 = nn.Sequential(*list(self.Q1.children()), nn.Linear(last_layer, output_dim))
self.Q2 = self._build_mlp(**mlp_args)
last_layer = list(self.Q2.children())[-2].out_features
self.Q2 = nn.Sequential(*list(self.Q2.children()), nn.Linear(last_layer, output_dim))
def forward(self, obs, action):
assert obs.size(0) == action.size(0)
obs_action = torch.cat([obs, action], dim=-1)
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
return q1, q2
class SACBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.params = params
def build(self, name, **kwargs):
net = SACBuilder.Network(self.params, **kwargs)
return net
class Network(NetworkBuilder.BaseNetwork):
def __init__(self, params, **kwargs):
actions_num = kwargs.pop('actions_num')
input_shape = kwargs.pop('input_shape')
obs_dim = kwargs.pop('obs_dim')
action_dim = kwargs.pop('action_dim')
self.num_seqs = num_seqs = kwargs.pop('num_seqs', 1)
NetworkBuilder.BaseNetwork.__init__(self)
self.load(params)
mlp_input_shape = input_shape
actor_mlp_args = {
'input_size' : obs_dim,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
critic_mlp_args = {
'input_size' : obs_dim + action_dim,
'units' : self.units,
'activation' : self.activation,
'norm_func_name' : self.normalization,
'dense_func' : torch.nn.Linear,
'd2rl' : self.is_d2rl,
'norm_only_first_layer' : self.norm_only_first_layer
}
print("Building Actor")
self.actor = self._build_actor(2*action_dim, self.log_std_bounds, **actor_mlp_args)
if self.separate:
print("Building Critic")
self.critic = self._build_critic(1, **critic_mlp_args)
print("Building Critic Target")
self.critic_target = self._build_critic(1, **critic_mlp_args)
self.critic_target.load_state_dict(self.critic.state_dict())
mlp_init = self.init_factory.create(**self.initializer)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
cnn_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
if isinstance(m, nn.Linear):
mlp_init(m.weight)
if getattr(m, "bias", None) is not None:
torch.nn.init.zeros_(m.bias)
def _build_critic(self, output_dim, **mlp_args):
return DoubleQCritic(output_dim, **mlp_args)
def _build_actor(self, output_dim, log_std_bounds, **mlp_args):
return DiagGaussianActor(output_dim, log_std_bounds, **mlp_args)
def forward(self, obs_dict):
"""TODO"""
obs = obs_dict['obs']
mu, sigma = self.actor(obs)
return mu, sigma
def is_separate_critic(self):
return self.separate
def load(self, params):
self.separate = params.get('separate', True)
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.is_d2rl = params['mlp'].get('d2rl', False)
self.norm_only_first_layer = params['mlp'].get('norm_only_first_layer', False)
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_space = 'space' in params
self.value_shape = params.get('value_shape', 1)
self.central_value = params.get('central_value', False)
self.joint_obs_actions_config = params.get('joint_obs_actions', None)
self.log_std_bounds = params.get('log_std_bounds', None)
if self.has_space:
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
else:
self.is_discrete = False
self.is_continuous = False
'''
class DQNBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_dueling = params['dueling']
self.atoms = params['atoms']
self.is_noisy = params['noisy']
self.normalization = params.get('normalization', None)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
is_train = kwargs.pop('is_train', True)
if self.is_noisy:
dense_layer = self._noisy_dense
else:
dense_layer = torch.nn.Linear
with tf.variable_scope(name, reuse=reuse):
out = input
if self.has_cnn:
cnn_args = {
'name' :'dqn_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out = self._build_conv(**cnn_args)
out = tf.contrib.layers.flatten(out)
mlp_args = {
'name' :'dqn_mlp',
'input' : out,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train,
'dense_func' : dense_layer
}
if self.is_dueling:
if len(self.units) > 1:
mlp_args['units'] = self.units[:-1]
out = self._build_mlp(**mlp_args)
hidden_value = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_val')
hidden_advantage = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_adv')
value = dense_layer(inputs=hidden_value, units=self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), activation=tf.identity, kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='value')
advantage = dense_layer(inputs=hidden_advantage, units= actions_num * self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='advantage')
advantage = tf.reshape(advantage, shape = [-1, actions_num, self.atoms])
value = tf.reshape(value, shape = [-1, 1, self.atoms])
q_values = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
else:
mlp_args['units'] = self.units
out = self._build_mlp('dqn_mlp', out, self.units, self.activation, self.initializer, self.regularizer)
q_values = dense_layer(inputs=out, units=actions_num *self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='q_vals')
q_values = tf.reshape(q_values, shape = [-1, actions_num, self.atoms])
if self.atoms == 1:
return tf.squeeze(q_values)
else:
return q_values
'''
| 43,505 | Python | 42.160714 | 301 | 0.520607 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/a2c_continuous.py | from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch.running_mean_std import RunningMeanStd, RunningMeanStdObs
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch import ppg_aux
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class A2CAgent(a2c_common.ContinuousA2CBase):
def __init__(self, base_name, config):
a2c_common.ContinuousA2CBase.__init__(self, base_name, config)
obs_shape = self.obs_shape
config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1)
}
self.model = self.network.build(config)
self.model.to(self.ppo_device)
self.states = None
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.normalize_input:
if isinstance(self.observation_space,gym.spaces.Dict):
self.running_mean_std = RunningMeanStdObs(obs_shape).to(self.ppo_device)
else:
self.running_mean_std = RunningMeanStd(obs_shape).to(self.ppo_device)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'num_steps' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'model' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', True)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if 'phasic_policy_gradients' in self.config:
self.has_phasic_policy_gradients = True
self.ppg_aux_loss = ppg_aux.PPGAux(self, self.config['phasic_policy_gradients'])
self.has_value_loss = (self.has_central_value \
and self.use_experimental_cv) \
or not self.has_phasic_policy_gradients
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr = self.last_lr
kl = 1.0
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
mu = res_dict['mus']
sigma = res_dict['sigmas']
a_loss = common_losses.actor_loss(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
b_loss = self.bound_loss(mu)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
loss = a_loss + 0.5 * c_loss * self.critic_coef - entropy * self.entropy_coef + b_loss * self.bounds_loss_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
#TODO: Refactor this ugliest code of they year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
reduce_kl = not self.is_rnn
kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
if self.is_rnn:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask
self.train_result = (a_loss, c_loss, entropy, \
kl_dist, self.last_lr, lr_mul, \
mu.detach(), sigma.detach(), b_loss)
def train_actor_critic(self, input_dict):
self.calc_gradients(input_dict)
return self.train_result
def bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.1
mu_loss_high = torch.clamp_max(mu - soft_bound, 0.0)**2
mu_loss_low = torch.clamp_max(-mu + soft_bound, 0.0)**2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss
| 7,399 | Python | 39.217391 | 142 | 0.570347 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_torch/running_mean_std.py | import torch
import torch.nn as nn
import numpy as np
'''
updates statistic from a full data
'''
class RunningMeanStd(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
super(RunningMeanStd, self).__init__()
print('RunningMeanStd: ', insize)
self.insize = insize
self.epsilon = epsilon
self.norm_only = norm_only
self.per_channel = per_channel
if per_channel:
if len(self.insize) == 3:
self.axis = [0,2,3]
if len(self.insize) == 2:
self.axis = [0,2]
if len(self.insize) == 1:
self.axis = [0]
in_size = self.insize[0]
else:
self.axis = [0]
in_size = insize
self.register_buffer("running_mean", torch.zeros(in_size, dtype = torch.float64))
self.register_buffer("running_var", torch.ones(in_size, dtype = torch.float64))
self.register_buffer("count", torch.ones((), dtype = torch.float64))
def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta**2 * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def forward(self, input, unnorm=False):
if self.training:
mean = input.mean(self.axis) # along channel axis
var = input.var(self.axis)
self.running_mean, self.running_var, self.count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count,
mean, var, input.size()[0] )
# change shape
if self.per_channel:
if len(self.insize) == 3:
current_mean = self.running_mean.view([1, self.insize[0], 1, 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1, 1]).expand_as(input)
if len(self.insize) == 2:
current_mean = self.running_mean.view([1, self.insize[0], 1]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0], 1]).expand_as(input)
if len(self.insize) == 1:
current_mean = self.running_mean.view([1, self.insize[0]]).expand_as(input)
current_var = self.running_var.view([1, self.insize[0]]).expand_as(input)
else:
current_mean = self.running_mean
current_var = self.running_var
# get output
if unnorm:
y = torch.clamp(input, min=-5.0, max=5.0)
y = torch.sqrt(current_var.float() + self.epsilon)*y + current_mean.float()
else:
if self.norm_only:
y = input/ torch.sqrt(current_var.float() + self.epsilon)
else:
y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
class RunningMeanStdObs(nn.Module):
def __init__(self, insize, epsilon=1e-05, per_channel=False, norm_only=False):
assert(insize is dict)
super(RunningMeanStdObs, self).__init__()
self.running_mean_std = nn.ModuleDict({
k : RunningMeanStd(v, epsilon, per_channel, norm_only) for k,v in insize.items()
})
def forward(self, input, unnorm=False):
res = {k : self.running_mean_std(v, unnorm) for k,v in input.items()}
return res | 3,757 | Python | 41.224719 | 152 | 0.558957 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/tensorflow_utils.py | import tensorflow as tf
import numpy as np
import collections
from collections import deque, OrderedDict
def unflatten(vector, shapes):
i = 0
arrays = []
for shape in shapes:
size = np.prod(shape, dtype=np.int)
array = vector[i:(i + size)].reshape(shape)
arrays.append(array)
i += size
assert len(vector) == i, "Passed weight does not have the correct shape."
return arrays
class TensorFlowVariables(object):
"""A class used to set and get weights for Tensorflow networks.
Attributes:
sess (tf.Session): The tensorflow session used to run assignment.
variables (Dict[str, tf.Variable]): Extracted variables from the loss
or additional variables that are passed in.
placeholders (Dict[str, tf.placeholders]): Placeholders for weights.
assignment_nodes (Dict[str, tf.Tensor]): Nodes that assign weights.
"""
def __init__(self, output, sess=None, input_variables=None):
"""Creates TensorFlowVariables containing extracted variables.
The variables are extracted by performing a BFS search on the
dependency graph with loss as the root node. After the tree is
traversed and those variables are collected, we append input_variables
to the collected variables. For each variable in the list, the
variable has a placeholder and assignment operation created for it.
Args:
output (tf.Operation, List[tf.Operation]): The tensorflow
operation to extract all variables from.
sess (tf.Session): Session used for running the get and set
methods.
input_variables (List[tf.Variables]): Variables to include in the
list.
"""
self.sess = sess
if not isinstance(output, (list, tuple)):
output = [output]
queue = deque(output)
variable_names = []
explored_inputs = set(output)
# We do a BFS on the dependency graph of the input function to find
# the variables.
while len(queue) != 0:
tf_obj = queue.popleft()
if tf_obj is None:
continue
# The object put into the queue is not necessarily an operation,
# so we want the op attribute to get the operation underlying the
# object. Only operations contain the inputs that we can explore.
if hasattr(tf_obj, "op"):
tf_obj = tf_obj.op
for input_op in tf_obj.inputs:
if input_op not in explored_inputs:
queue.append(input_op)
explored_inputs.add(input_op)
# Tensorflow control inputs can be circular, so we keep track of
# explored operations.
for control in tf_obj.control_inputs:
if control not in explored_inputs:
queue.append(control)
explored_inputs.add(control)
if "Variable" in tf_obj.node_def.op:
variable_names.append(tf_obj.node_def.name)
self.variables = OrderedDict()
variable_list = [
v for v in tf.global_variables()
if v.op.node_def.name in variable_names
]
if input_variables is not None:
variable_list += input_variables
for v in variable_list:
self.variables[v.op.node_def.name] = v
self.placeholders = {}
self.assignment_nodes = {}
# Create new placeholders to put in custom weights.
for k, var in self.variables.items():
self.placeholders[k] = tf.placeholder(
var.value().dtype,
var.get_shape().as_list(),
name="Placeholder_" + k)
self.assignment_nodes[k] = var.assign(self.placeholders[k])
def set_session(self, sess):
"""Sets the current session used by the class.
Args:
sess (tf.Session): Session to set the attribute with.
"""
self.sess = sess
def get_flat_size(self):
"""Returns the total length of all of the flattened variables.
Returns:
The length of all flattened variables concatenated.
"""
return sum(
np.prod(v.get_shape().as_list()) for v in self.variables.values())
def _check_sess(self):
"""Checks if the session is set, and if not throw an error message."""
assert self.sess is not None, ("The session is not set. Set the "
"session either by passing it into the "
"TensorFlowVariables constructor or by "
"calling set_session(sess).")
def get_flat(self):
"""Gets the weights and returns them as a flat array.
Returns:
1D Array containing the flattened weights.
"""
self._check_sess()
return np.concatenate([
v.eval(session=self.sess).flatten()
for v in self.variables.values()
])
def set_flat(self, new_weights):
"""Sets the weights to new_weights, converting from a flat array.
Note:
You can only set all weights in the network using this function,
i.e., the length of the array must match get_flat_size.
Args:
new_weights (np.ndarray): Flat array containing weights.
"""
self._check_sess()
shapes = [v.get_shape().as_list() for v in self.variables.values()]
arrays = unflatten(new_weights, shapes)
placeholders = [
self.placeholders[k] for k, v in self.variables.items()
]
self.sess.run(
list(self.assignment_nodes.values()),
feed_dict=dict(zip(placeholders, arrays)))
def get_weights(self):
"""Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights.
"""
self._check_sess()
return {
k: v.eval(session=self.sess)
for k, v in self.variables.items()
}
def set_weights(self, new_weights):
"""Sets the weights to new_weights.
Note:
Can set subsets of variables as well, by only passing in the
variables you want to be set.
Args:
new_weights (Dict): Dictionary mapping variable names to their
weights.
"""
self._check_sess()
assign_list = [
self.assignment_nodes[name] for name in new_weights.keys()
if name in self.assignment_nodes
]
assert assign_list, ("No variables in the input matched those in the "
"network. Possible cause: Two networks were "
"defined in the same TensorFlow graph. To fix "
"this, place each network definition in its own "
"tf.Graph.")
self.sess.run(
assign_list,
feed_dict={
self.placeholders[name]: value
for (name, value) in new_weights.items()
if name in self.placeholders
}) | 7,289 | Python | 39.5 | 79 | 0.571409 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/tf_moving_mean_std.py | import tensorflow as tf
from tensorflow.python.training.moving_averages import assign_moving_average
class MovingMeanStd(object):
def __init__(self, shape, epsilon, decay, clamp = 5.0):
self.moving_mean = tf.Variable(tf.constant(0.0, shape=shape, dtype=tf.float64), trainable=False)#, name='moving_mean')
self.moving_variance = tf.Variable(tf.constant(1.0, shape=shape, dtype=tf.float64), trainable=False)#, name='moving_variance' )
self.epsilon = epsilon
self.shape = shape
self.decay = decay
self.count = tf.Variable(tf.constant(epsilon, shape=shape, dtype=tf.float64), trainable=False)
self.clamp = clamp
def update_mean_var_count_from_moments(self, mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + tf.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
def normalize(self, x, train=True):
x64 = tf.cast(x, tf.float64)
if train:
shape = x.get_shape().as_list()
if (len(shape) == 2):
axis = [0]
if (len(shape) == 3):
axis = [0, 1]
if (len(shape) == 4):
axis = [0, 1, 2]
mean, var = tf.nn.moments(x64, axis)
new_mean, new_var, new_count = self.update_mean_var_count_from_moments(self.moving_mean, self.moving_variance, self.count, mean, var, tf.cast(tf.shape(x)[0], tf.float64))
mean_op = self.moving_mean.assign(new_mean)
var_op = self.moving_variance.assign(tf.maximum(new_var, 1e-2))
count_op = self.count.assign(new_count)
with tf.control_dependencies([mean_op, var_op, count_op]):
res = tf.cast((x64 - self.moving_mean) / (tf.sqrt(self.moving_variance)), tf.float32)
return tf.clip_by_value(res, -self.clamp, self.clamp)
else:
res = tf.cast((x64 - self.moving_mean) / (tf.sqrt(self.moving_variance)), tf.float32)
return tf.clip_by_value(res, -self.clamp, self.clamp) | 2,361 | Python | 49.255318 | 182 | 0.581957 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/players.py | from rl_games.common import env_configurations
from rl_games.algos_tf14 import dqnagent
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
class BasePlayer(object):
def __init__(self, sess, config):
self.config = config
self.sess = sess
self.env_name = self.config['env_name']
self.env_spaces = env_configurations.get_env_info(self.config)
self.obs_space, self.action_space, self.num_agents = self.env_spaces['observation_space'], self.env_spaces['action_space'], self.env_spaces['agents']
self.env = None
self.env_config = self.config.get('env_config', None)
def restore(self, fn):
raise NotImplementedError('restore')
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def create_env(self):
return env_configurations.configurations[self.env_name]['env_creator']()
def get_action(self, obs, is_determenistic = False):
raise NotImplementedError('step')
def get_masked_action(self, obs, mask, is_determenistic = False):
raise NotImplementedError('step')
def reset(self):
raise NotImplementedError('raise')
def run(self, n_games=1000, n_game_life = 1, render= False):
self.env = self.create_env()
sum_rewards = 0
sum_steps = 0
sum_game_res = 0
n_games = n_games * n_game_life
has_masks = False
has_masks_func = getattr(self.env, "has_action_mask", None) is not None
if has_masks_func:
has_masks = self.env.has_action_mask()
is_determenistic = True
for _ in range(n_games):
cr = 0
steps = 0
s = self.env.reset()
for _ in range(5000):
if has_masks:
masks = self.env.get_action_mask()
action = self.get_masked_action(s, masks, is_determenistic)
else:
action = self.get_action(s, is_determenistic)
s, r, done, info = self.env.step(action)
cr += r
steps += 1
if render:
self.env.render(mode = 'human')
if not np.isscalar(done):
done = done.any()
if done:
game_res = 0.0
if isinstance(info, dict):
if 'battle_won' in info:
game_res = info['battle_won']
if 'scores' in info:
game_res = info['scores']
print('reward:', np.mean(cr), 'steps:', steps, 'scores:', game_res)
sum_game_res += game_res
sum_rewards += np.mean(cr)
sum_steps += steps
break
print('av reward:', sum_rewards / n_games * n_game_life, 'av steps:', sum_steps / n_games * n_game_life, 'scores:', sum_game_res / n_games * n_game_life)
class PpoPlayerContinuous(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.network = config['network']
self.obs_ph = tf.placeholder('float32', (None, ) + self.obs_space.shape, name = 'obs')
self.actions_num = self.action_space.shape[0]
self.actions_low = self.action_space.low
self.actions_high = self.action_space.high
self.mask = [False]
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.normalize_input = self.config['normalize_input']
self.input_obs = self.obs_ph
if self.obs_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = self.obs_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=False)
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : 1,
'games_num' : 1,
'actions_num' : self.actions_num,
'prev_actions_ph' : None
}
self.last_state = None
if self.network.is_rnn():
self.neglop, self.value, self.action, _, self.mu, _, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.run_dict, reuse=False)
self.last_state = self.initial_state
else:
self.neglop, self.value, self.action, _, self.mu, _ = self.network(self.run_dict, reuse=False)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def get_action(self, obs, is_determenistic = True):
if is_determenistic:
ret_action = self.mu
else:
ret_action = self.action
if self.network.is_rnn():
action, self.last_state = self.sess.run([ret_action, self.lstm_state], {self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action = self.sess.run([ret_action], {self.obs_ph : obs})
action = np.squeeze(action)
return rescale_actions(self.actions_low, self.actions_high, np.clip(action, -1.0, 1.0))
def restore(self, fn):
self.saver.restore(self.sess, fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state
#self.mask = [True]
class PpoPlayerDiscrete(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.network = config['network']
self.use_action_masks = config.get('use_action_masks', False)
self.obs_ph = tf.placeholder(self.obs_space.dtype, (None, ) + self.obs_space.shape, name = 'obs')
self.actions_num = self.action_space.n
if self.use_action_masks:
print('using masks for action')
self.action_mask_ph = tf.placeholder('int32', (None, self.actions_num), name = 'actions_mask')
else:
self.action_mask_ph = None
self.mask = [False] * self.num_agents
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.normalize_input = self.config['normalize_input']
self.input_obs = self.obs_ph
if self.obs_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = self.obs_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=False)
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.num_agents,
'games_num' : self.num_agents,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
'action_mask_ph' : self.action_mask_ph
}
self.last_state = None
if self.network.is_rnn():
self.neglop , self.value, self.action, _,self.states_ph, self.masks_ph, self.lstm_state, self.initial_state, self.logits = self.network(self.run_dict, reuse=False)
self.last_state = self.initial_state * self.num_agents
else:
self.neglop , self.value, self.action, _, self.logits = self.network(self.run_dict, reuse=False)
self.variables = TensorFlowVariables([self.neglop, self.value, self.action], self.sess)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def get_action(self, obs, is_determenistic = True):
ret_action = self.action
if self.network.is_rnn():
action, self.last_state, logits = self.sess.run([ret_action, self.lstm_state, self.logits], {self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action, logits = self.sess.run([ret_action, self.logits], {self.obs_ph : obs})
if is_determenistic:
return np.argmax(logits, axis = -1).astype(np.int32)
else:
return int(np.squeeze(action))
def get_masked_action(self, obs, mask, is_determenistic = False):
#if is_determenistic:
ret_action = self.action
if self.network.is_rnn():
action, self.last_state, logits = self.sess.run([ret_action, self.lstm_state, self.logits], {self.action_mask_ph : mask, self.obs_ph : obs, self.states_ph : self.last_state, self.masks_ph : self.mask})
else:
action, logits = self.sess.run([ret_action, self.logits], {self.action_mask_ph : mask, self.obs_ph : obs})
if is_determenistic:
logits = np.array(logits)
return np.argmax(logits, axis = -1).astype(np.int32)
else:
return np.squeeze(action).astype(np.int32)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state
class DQNPlayer(BasePlayer):
def __init__(self, sess, config):
BasePlayer.__init__(self, sess, config)
self.dqn = dqnagent.DQNAgent(sess, 'player', self.obs_space, self.action_space, config)
def get_action(self, obs, is_determenistic = False):
return self.dqn.get_action(np.squeeze(obs), 0.0)
def restore(self, fn):
self.dqn.restore(fn)
def reset(self):
if self.network.is_rnn():
self.last_state = self.initial_state | 10,057 | Python | 38.754941 | 213 | 0.577608 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/networks.py | import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
tfd = tfp.distributions
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def sample_noise(shape, mean = 0.0, std = 1.0):
noise = tf.random_normal(shape, mean = mean, stddev = std)
return noise
# Added by Andrew Liao
# for NoisyNet-DQN (using Factorised Gaussian noise)
# modified from ```dense``` function
def noisy_dense(inputs, units, name, bias=True, activation=tf.identity, mean = 0.0, std = 1.0):
# the function used in eq.7,8
def f(x):
return tf.multiply(tf.sign(x), tf.pow(tf.abs(x), 0.5))
# Initializer of \mu and \sigma
mu_init = tf.random_uniform_initializer(minval=-1*1/np.power(inputs.get_shape().as_list()[1], 0.5),
maxval=1*1/np.power(inputs.get_shape().as_list()[1], 0.5))
sigma_init = tf.constant_initializer(0.4/np.power(inputs.get_shape().as_list()[1], 0.5))
# Sample noise from gaussian
p = sample_noise([inputs.get_shape().as_list()[1], 1], mean = 0.0, std = 1.0)
q = sample_noise([1, units], mean = 0.0, std = 1.0)
f_p = f(p); f_q = f(q)
w_epsilon = f_p*f_q; b_epsilon = tf.squeeze(f_q)
# w = w_mu + w_sigma*w_epsilon
w_mu = tf.get_variable(name + "/w_mu", [inputs.get_shape()[1], units], initializer=mu_init)
w_sigma = tf.get_variable(name + "/w_sigma", [inputs.get_shape()[1], units], initializer=sigma_init)
w = w_mu + tf.multiply(w_sigma, w_epsilon)
ret = tf.matmul(inputs, w)
if bias:
# b = b_mu + b_sigma*b_epsilon
b_mu = tf.get_variable(name + "/b_mu", [units], initializer=mu_init)
b_sigma = tf.get_variable(name + "/b_sigma", [units], initializer=sigma_init)
b = b_mu + tf.multiply(b_sigma, b_epsilon)
return activation(ret + b)
else:
return activation(ret)
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def lstm(xs, ms, s, scope, nh, nin):
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(), dtype=tf.float32 )
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init() )
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, nin):
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init())
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init())
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
tk = 0
for idx, (x, m) in enumerate(zip(xs, ms)):
print(tk)
tk = tk + 1
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
'''
used lstm from openai baseline as the most convenient way to work with dones.
TODO: try to use more efficient tensorflow way
'''
def openai_lstm(name, inputs, states_ph, dones_ph, units, env_num, batch_num, layer_norm=True):
nbatch = batch_num
nsteps = nbatch // env_num
print('nbatch: ', nbatch)
print('env_num: ', env_num)
dones_ph = tf.to_float(dones_ph)
inputs_seq = batch_to_seq(inputs, env_num, nsteps)
dones_seq = batch_to_seq(dones_ph, env_num, nsteps)
nin = inputs.get_shape()[1].value
with tf.variable_scope(name):
if layer_norm:
hidden_seq, final_state = lnlstm(inputs_seq, dones_seq, states_ph, scope='lnlstm', nin=nin, nh=units)
else:
hidden_seq, final_state = lstm(inputs_seq, dones_seq, states_ph, scope='lstm', nin=nin, nh=units)
hidden = seq_to_batch(hidden_seq)
initial_state = np.zeros(states_ph.shape.as_list(), dtype=float)
return [hidden, final_state, initial_state]
def distributional_output(inputs, actions_num, atoms_num):
distributed_qs = tf.layers.dense(inputs=inputs, activation=tf.nn.softmax, units=atoms_num * actions_num)
distributed_qs = tf.reshape(distributed_qs, shape = [-1, actions_num, atoms_num])
distributed_qs = tf.nn.softmax(distributed_qs, dim = -1)
return distributed_qs
def distributional_noisy_output(inputs, actions_num, atoms_num, name, mean = 0.0, std = 1.0):
distributed_qs = noisy_dense(inputs=inputs, name=name, activation=tf.nn.softmax, units=atoms_num * actions_num, mean=mean, std=std)
distributed_qs = tf.reshape(distributed_qs, shape = [-1, actions_num, atoms_num])
distributed_qs = tf.nn.softmax(distributed_qs, dim = -1)
return distributed_qs
def atari_conv_net(inputs):
NUM_FILTERS_1 = 32
NUM_FILTERS_2 = 64
NUM_FILTERS_3 = 64
conv1 = tf.layers.conv2d(inputs=inputs,
filters=NUM_FILTERS_1,
kernel_size=[8, 8],
strides=(4, 4),
activation=tf.nn.relu)
conv2 = tf.layers.conv2d(inputs=conv1,
filters=NUM_FILTERS_2,
kernel_size=[4, 4],
strides=(2, 2),
activation=tf.nn.relu)
conv3 = tf.layers.conv2d(inputs=conv2,
filters=NUM_FILTERS_3,
kernel_size=[3, 3],
strides=(1, 1),
activation=tf.nn.relu)
return conv3
def dqn_network(name, inputs, actions_num, atoms_num = 1, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten,
units=NUM_HIDDEN_NODES,
activation=tf.nn.relu)
if atoms_num == 1:
logits = tf.layers.dense(inputs=hidden, units=actions_num)
else:
logits = distributional_output(inputs=hidden, actions_num=actions_num, atoms_num=atoms_num)
return logits
'''
dueling_type = 'SIMPLE', 'AVERAGE', 'MAX'
'''
def dueling_dqn_network(name, inputs, actions_num, reuse=False, dueling_type = 'AVERAGE'):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hidden_advantage = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden_value, units=1)
advantage = tf.layers.dense(inputs=hidden_advantage, units=actions_num)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def dueling_dqn_network_with_batch_norm(name, inputs, actions_num, reuse=False, dueling_type = 'AVERAGE', is_train=True):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net_batch_norm(inputs, is_train)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hidden_advantage = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden_value, units=1)
advantage = tf.layers.dense(inputs=hidden_advantage, units=actions_num)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def noisy_dqn_network(name, inputs, actions_num, mean, std, atoms_num = 1, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = noisy_dense(inputs=flatten,
units=NUM_HIDDEN_NODES,
activation=tf.nn.relu, name = 'noisy_fc1')
if atoms_num == 1:
logits = noisy_dense(inputs=hidden, units=actions_num, name = 'noisy_fc2', mean = mean, std = std)
else:
logits = distributional_noisy_output(inputs=hidden, actions_num=actions_num, atoms_num = atoms_num, name = 'noisy_fc2', mean = mean, std = std)
return logits
'''
dueling_type = 'SIMPLE', 'AVERAGE', 'MAX'
'''
def noisy_dueling_dqn_network(name, inputs, actions_num, mean, std, reuse=False, dueling_type = 'AVERAGE'):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_v1', mean = mean, std = std)
hidden_advantage = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_a1', mean = mean, std = std)
value = noisy_dense(inputs=hidden_value, units=1, name = 'noisy_v2', mean = mean, std = std)
advantage = noisy_dense(inputs=hidden_advantage, units=actions_num, name = 'noisy_a2', mean = mean, std = std)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def noisy_dueling_dqn_network_with_batch_norm(name, inputs, actions_num, mean, std, reuse=False, dueling_type = 'AVERAGE', is_train=True):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net_batch_norm(inputs, is_train)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden_value = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_v1', mean = mean, std = std)
hidden_advantage = noisy_dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu, name = 'noisy_a1', mean = mean, std = std)
value = noisy_dense(inputs=hidden_value, units=1, name = 'noisy_v2', mean = mean, std = std)
advantage = noisy_dense(inputs=hidden_advantage, units=actions_num, name = 'noisy_a2', mean = mean, std = std)
outputs = None
if dueling_type == 'SIMPLE':
outputs = value + advantage
if dueling_type == 'AVERAGE':
outputs = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
if dueling_type == 'MAX':
outputs = value + advantage - tf.reduce_max(advantage, reduction_indices=1, keepdims=True)
return outputs
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def default_small_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False, activation=tf.nn.elu):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 128
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 32
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, kernel_initializer=normc_initializer(0.01), activation=None)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False, activation=tf.nn.elu):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, kernel_initializer=normc_initializer(1.0), activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, kernel_initializer=normc_initializer(1.0), activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None, kernel_initializer=hidden_init)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, kernel_initializer=normc_initializer(0.01), activation=None)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network_separated_logstd(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden_init = normc_initializer(1.0) # tf.random_normal_initializer(stddev= 1.0)
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu, kernel_initializer=hidden_init)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=tf.nn.elu, kernel_initializer=hidden_init)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None, kernel_initializer=hidden_init)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None,)
#std = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
#logstd = tf.layers.dense(inputs=hidden2a, units=actions_num)
logstd = tf.get_variable(name='log_std', shape=(actions_num), initializer=tf.constant_initializer(0.0), trainable=True)
return mu, mu * 0 + logstd, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def default_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden0 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.relu)
hidden1 = tf.layers.dense(inputs=hidden0, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden2, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2, units=actions_num, activation=None)
return logits, value
def default_a2c_lstm_network(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 128
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 64
LSTM_UNITS = 64
hidden0 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.relu)
hidden1 = tf.layers.dense(inputs=hidden0, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.float32, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden2, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def default_a2c_lstm_network_separated(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES0 = 256
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
LSTM_UNITS = 128
hidden0c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu)
hidden1c = tf.layers.dense(inputs=hidden0c, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu)
hidden0a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES0, activation=tf.nn.elu)
hidden1a = tf.layers.dense(inputs=hidden0a, units=NUM_HIDDEN_NODES1, activation=tf.nn.elu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
hidden = tf.concat((hidden1a, hidden1c), axis=1)
lstm_out, lstm_state, initial_state = openai_lstm('lstm_a', hidden, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
value = tf.layers.dense(inputs=lstm_outc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None, kernel_initializer=tf.random_uniform_initializer(-0.01, 0.01))
var = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_lstm_network_separated(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 32
NUM_HIDDEN_NODES2 = 32
#NUM_HIDDEN_NODES3 = 16
LSTM_UNITS = 16
hidden1c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
hidden1a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2* 2*LSTM_UNITS])
states_a, states_c = tf.split(states_ph, 2, axis=1)
lstm_outa, lstm_statae, initial_statea = openai_lstm('lstm_actions', hidden2a, dones_ph=dones_ph, states_ph=states_a, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
lstm_outc, lstm_statec, initial_statec = openai_lstm('lstm_critics', hidden2c, dones_ph=dones_ph, states_ph=states_c, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
initial_state = np.concatenate((initial_statea, initial_statec), axis=1)
lstm_state = tf.concat( values=(lstm_statae, lstm_statec), axis=1)
#lstm_outa = tf.layers.dense(inputs=lstm_outa, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
#lstm_outc = tf.layers.dense(inputs=lstm_outc, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=lstm_outc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_outa, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_lstm_network(name, inputs, actions_num, env_num, batch_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 32
NUM_HIDDEN_NODES2 = 32
LSTM_UNITS = 16
hidden1 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden2, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
def simple_a2c_network_separated(name, inputs, actions_num, activation = tf.nn.elu, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 64
NUM_HIDDEN_NODES2 = 64
hidden1c = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=activation)
hidden2c = tf.layers.dense(inputs=hidden1c, units=NUM_HIDDEN_NODES2, activation=activation)
hidden1a = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=activation)
hidden2a = tf.layers.dense(inputs=hidden1a, units=NUM_HIDDEN_NODES2, activation=activation)
value = tf.layers.dense(inputs=hidden2c, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2a, units=actions_num, activation=None)
return logits, value
def simple_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES1 = 128
NUM_HIDDEN_NODES2 = 64
hidden1 = tf.layers.dense(inputs=inputs, units=NUM_HIDDEN_NODES1, activation=tf.nn.relu)
hidden2 = tf.layers.dense(inputs=hidden1, units=NUM_HIDDEN_NODES2, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden2, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden2, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden2, units=actions_num, activation=None)
return logits, value
def atari_a2c_network_separated(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3a = atari_conv_net(inputs)
conv3c = atari_conv_net(inputs)
flattena = tf.contrib.layers.flatten(inputs = conv3a)
flattenc = tf.contrib.layers.flatten(inputs = conv3c)
hiddena = tf.layers.dense(inputs=flattena, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
hiddenc = tf.layers.dense(inputs=flattenc, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hiddenc, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hiddena, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hiddena, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hiddena, units=actions_num, activation=None)
return logits, value
def atari_a2c_network(name, inputs, actions_num, continuous=False, reuse=False):
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
value = tf.layers.dense(inputs=hidden, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=hidden, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=hidden, units=actions_num, activation=tf.nn.softplus)
return mu, var, value
else:
logits = tf.layers.dense(inputs=hidden, units=actions_num, activation=None)
return logits, value
def atari_a2c_network_lstm(name, inputs, actions_num, games_num, batch_num, continuous=False, reuse=False):
env_num = games_num
with tf.variable_scope(name, reuse=reuse):
NUM_HIDDEN_NODES = 512
LSTM_UNITS = 256
conv3 = atari_conv_net(inputs)
flatten = tf.contrib.layers.flatten(inputs = conv3)
hidden = tf.layers.dense(inputs=flatten, units=NUM_HIDDEN_NODES, activation=tf.nn.relu)
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [env_num, 2*LSTM_UNITS])
lstm_out, lstm_state, initial_state = openai_lstm('lstm_ac', hidden, dones_ph=dones_ph, states_ph=states_ph, units=LSTM_UNITS, env_num=env_num, batch_num=batch_num)
value = tf.layers.dense(inputs=lstm_out, units=1, activation=None)
if continuous:
mu = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.tanh)
var = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=tf.nn.softplus)
return mu, var, value, states_ph, dones_ph, lstm_state, initial_state
else:
logits = tf.layers.dense(inputs=lstm_out, units=actions_num, activation=None)
return logits, value, states_ph, dones_ph, lstm_state, initial_state
| 32,489 | Python | 50.984 | 181 | 0.645141 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/a2c_discrete.py | from rl_games.common import tr_helpers, vecenv
#from rl_games.algos_tf14 import networks
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque, OrderedDict
from tensorboardX import SummaryWriter
import gym
from datetime import datetime
def swap_and_flatten01(arr):
"""
swap and then flatten axes 0 and 1
"""
if arr is None:
return arr
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
class A2CAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
observation_shape = observation_space.shape
self.use_action_masks = config.get('use_action_masks', False)
self.is_train = config.get('is_train', True)
self.self_play = config.get('self_play', False)
self.name = base_name
self.config = config
self.env_name = config['env_name']
self.ppo = config['ppo']
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_actors = config['num_actors']
self.env_config = self.config.get('env_config', {})
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.num_agents = self.vec_env.get_number_of_agents()
self.horizon_length = config['horizon_length']
self.seq_len = self.config['seq_length']
self.normalize_advantage = config['normalize_advantage']
self.normalize_input = self.config['normalize_input']
self.state_shape = observation_shape
self.critic_coef = config['critic_coef']
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.sess = sess
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.ignore_dead_batches = self.config.get('ignore_dead_batches', False)
self.dones = np.asarray([False]*self.num_actors *self.num_agents, dtype=np.bool)
self.current_rewards = np.asarray([0]*self.num_actors *self.num_agents, dtype=np.float32)
self.current_lengths = np.asarray([0]*self.num_actors *self.num_agents, dtype=np.float32)
self.games_to_track = self.config.get('games_to_track', 100)
self.game_rewards = deque([], maxlen=self.games_to_track)
self.game_lengths = deque([], maxlen=self.games_to_track)
self.game_scores = deque([], maxlen=self.games_to_track)
self.obs_ph = tf.placeholder(observation_space.dtype, (None, ) + observation_shape, name = 'obs')
self.target_obs_ph = tf.placeholder(observation_space.dtype, (None, ) + observation_shape, name = 'target_obs')
self.actions_num = action_space.n
self.actions_ph = tf.placeholder('int32', (None,), name = 'actions')
if self.use_action_masks:
self.action_mask_ph = tf.placeholder('int32', (None, self.actions_num), name = 'actions_mask')
else:
self.action_mask_ph = None
self.old_logp_actions_ph = tf.placeholder('float32', (None, ), name = 'old_logpactions')
self.rewards_ph = tf.placeholder('float32', (None,), name = 'rewards')
self.old_values_ph = tf.placeholder('float32', (None,), name = 'old_values')
self.advantages_ph = tf.placeholder('float32', (None,), name = 'advantages')
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
self.input_obs = self.obs_ph
self.input_target_obs = self.target_obs_ph
if observation_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_target_obs = tf.to_float(self.input_target_obs) / 255.0
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, self.epoch_num, config['max_epochs'], end_learning_rate=0.001, power=config.get('decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, self.epoch_num,config['max_epochs'], decay_rate = config['decay_rate'])
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = observation_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=True)
self.input_target_obs = self.moving_mean_std.normalize(self.input_target_obs, train=False)
games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.train_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.config['minibatch_size'],
'games_num' : games_num,
'actions_num' : self.actions_num,
'prev_actions_ph' : self.actions_ph,
'action_mask_ph' : None
}
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_target_obs,
'batch_num' : self.num_actors * self.num_agents,
'games_num' : self.num_actors * self.num_agents,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
'action_mask_ph' : self.action_mask_ph
}
self.states = None
if self.network.is_rnn():
self.logp_actions ,self.state_values, self.action, self.entropy, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_states_ph, self.target_masks_ph, self.target_lstm_state, self.target_initial_state, self.logits = self.network(self.run_dict, reuse=True)
self.states = self.target_initial_state
else:
self.logp_actions ,self.state_values, self.action, self.entropy = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.logits = self.network(self.run_dict, reuse=True)
self.saver = tf.train.Saver()
self.variables = TensorFlowVariables([self.target_action, self.target_state_values, self.target_neglogp], self.sess)
if self.is_train:
self.setup_losses()
self.sess.run(tf.global_variables_initializer())
def setup_losses(self):
curr_e_clip = self.e_clip * self.lr_multiplier
if (self.ppo):
self.prob_ratio = tf.exp(self.old_logp_actions_ph - self.logp_actions)
self.pg_loss_unclipped = -tf.multiply(self.advantages_ph, self.prob_ratio)
self.pg_loss_clipped = -tf.multiply(self.advantages_ph, tf.clip_by_value(self.prob_ratio, 1.- curr_e_clip, 1.+ curr_e_clip))
self.actor_loss = tf.maximum(self.pg_loss_unclipped, self.pg_loss_clipped)
else:
self.actor_loss = self.logp_actions * self.advantages_ph
self.actor_loss = tf.reduce_mean(self.actor_loss)
self.c_loss = (tf.squeeze(self.state_values) - self.rewards_ph)**2
if self.clip_value:
self.cliped_values = self.old_values_ph + tf.clip_by_value(tf.squeeze(self.state_values) - self.old_values_ph, - curr_e_clip, curr_e_clip)
self.c_loss_clipped = tf.square(self.cliped_values - self.rewards_ph)
self.critic_loss = tf.maximum(self.c_loss, self.c_loss_clipped)
else:
self.critic_loss = self.c_loss
self.critic_loss = tf.reduce_mean(self.critic_loss)
self.kl_approx = 0.5 * tf.stop_gradient(tf.reduce_mean((self.old_logp_actions_ph - self.logp_actions)**2))
if self.is_adaptive_lr:
self.current_lr = tf.where(self.kl_approx > (2.0 * self.kl_threshold), tf.maximum(self.current_lr / 1.5, 1e-6), self.current_lr)
self.current_lr = tf.where(self.kl_approx < (0.5 * self.kl_threshold), tf.minimum(self.current_lr * 1.5, 1e-2), self.current_lr)
self.loss = self.actor_loss + 0.5 * self.critic_coef * self.critic_loss - self.config['entropy_coef'] * self.entropy
self.reg_loss = tf.losses.get_regularization_loss()
self.loss += self.reg_loss
self.train_step = tf.train.AdamOptimizer(self.current_lr * self.lr_multiplier)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
grads = tf.gradients(self.loss, self.weights)
if self.config['truncate_grads']:
grads, _ = tf.clip_by_global_norm(grads, self.grad_norm)
grads = list(zip(grads, self.weights))
self.train_op = self.train_step.apply_gradients(grads)
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def get_action_values(self, obs):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.target_obs_ph : obs}), None)
def get_masked_action_values(self, obs, action_masks):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp, self.logits]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.action_mask_ph: action_masks, self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.action_mask_ph: action_masks, self.target_obs_ph : obs}), None)
def get_values(self, obs):
if self.network.is_rnn():
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs})
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def play_steps(self):
# here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = []
epinfos = []
# for n in range number of steps
for _ in range(self.horizon_length):
if self.network.is_rnn():
mb_states.append(self.states)
if self.use_action_masks:
masks = self.vec_env.get_action_masks()
if self.use_action_masks:
actions, values, neglogpacs, _, self.states = self.get_masked_action_values(self.obs, masks)
else:
actions, values, neglogpacs, self.states = self.get_action_values(self.obs)
actions = np.squeeze(actions)
values = np.squeeze(values)
neglogpacs = np.squeeze(neglogpacs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones.copy())
self.obs[:], rewards, self.dones, infos = self.vec_env.step(actions)
self.current_rewards += rewards
self.current_lengths += 1
for reward, length, done, info in zip(self.current_rewards[::self.num_agents], self.current_lengths[::self.num_agents], self.dones[::self.num_agents], infos):
if done:
self.game_rewards.append(reward)
self.game_lengths.append(length)
game_res = 1.0
if isinstance(info, dict):
game_res = info.get('battle_won', 0.5)
self.game_scores.append(game_res)
self.current_rewards = self.current_rewards * (1.0 - self.dones)
self.current_lengths = self.current_lengths * (1.0 - self.dones)
shaped_rewards = self.rewards_shaper(rewards)
epinfos.append(infos)
mb_rewards.append(shaped_rewards)
#using openai baseline approach
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_states = np.asarray(mb_states, dtype=np.float32)
last_values = self.get_values(self.obs)
last_values = np.squeeze(last_values)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
if self.network.is_rnn():
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states )), epinfos)
else:
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), None, epinfos)
return result
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def train(self):
self.obs = self.vec_env.reset()
batch_size = self.horizon_length * self.num_actors * self.num_agents
batch_size_envs = self.horizon_length * self.num_actors
minibatch_size = self.config['minibatch_size']
mini_epochs_num = self.config['mini_epochs']
num_minibatches = batch_size // minibatch_size
last_lr = self.config['learning_rate']
frame = 0
update_time = 0
self.last_mean_rewards = -100500
play_time = 0
epoch_num = 0
max_epochs = self.config.get('max_epochs', 1e6)
start_time = time.time()
total_time = 0
rep_count = 0
while True:
play_time_start = time.time()
epoch_num = self.update_epoch()
frame += batch_size_envs
obses, returns, dones, actions, values, neglogpacs, lstm_states, _ = self.play_steps()
advantages = returns - values
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
a_losses = []
c_losses = []
entropies = []
kls = []
play_time_end = time.time()
play_time = play_time_end - play_time_start
update_time_start = time.time()
if self.network.is_rnn():
total_games = batch_size // self.seq_len
num_games_batch = minibatch_size // self.seq_len
game_indexes = np.arange(total_games)
flat_indexes = np.arange(total_games * self.seq_len).reshape(total_games, self.seq_len)
lstm_states = lstm_states[::self.seq_len]
for _ in range(0, mini_epochs_num):
np.random.shuffle(game_indexes)
for i in range(0, num_minibatches):
batch = range(i * num_games_batch, (i + 1) * num_games_batch)
mb_indexes = game_indexes[batch]
mbatch = flat_indexes[mb_indexes].ravel()
dict = {}
dict[self.old_values_ph] = values[mbatch]
dict[self.old_logp_actions_ph] = neglogpacs[mbatch]
dict[self.advantages_ph] = advantages[mbatch]
dict[self.rewards_ph] = returns[mbatch]
dict[self.actions_ph] = actions[mbatch]
dict[self.obs_ph] = obses[mbatch]
dict[self.masks_ph] = dones[mbatch]
dict[self.states_ph] = lstm_states[mb_indexes]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_approx, self.current_lr, self.lr_multiplier, self.train_op]
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
a_loss, c_loss, entropy, kl, last_lr, lr_mul,_, _ = self.sess.run(run_ops, dict)
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
else:
for _ in range(0, mini_epochs_num):
permutation = np.random.permutation(batch_size)
obses = obses[permutation]
returns = returns[permutation]
actions = actions[permutation]
values = values[permutation]
neglogpacs = neglogpacs[permutation]
advantages = advantages[permutation]
for i in range(0, num_minibatches):
batch = range(i * minibatch_size, (i + 1) * minibatch_size)
dict = {self.obs_ph: obses[batch], self.actions_ph : actions[batch], self.rewards_ph : returns[batch],
self.advantages_ph : advantages[batch], self.old_logp_actions_ph : neglogpacs[batch], self.old_values_ph : values[batch]}
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_approx, self.current_lr, self.lr_multiplier, self.train_op]
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
a_loss, c_loss, entropy, kl, last_lr, lr_mul, _, _ = self.sess.run(run_ops, dict)
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
update_time_end = time.time()
update_time = update_time_end - update_time_start
sum_time = update_time + play_time
total_time = update_time_end - start_time
if True:
scaled_time = self.num_agents * sum_time
print('frames per seconds: ', batch_size / scaled_time)
self.writer.add_scalar('performance/fps', batch_size / scaled_time, frame)
self.writer.add_scalar('performance/update_time', update_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('losses/a_loss', np.mean(a_losses), frame)
self.writer.add_scalar('losses/c_loss', np.mean(c_losses), frame)
self.writer.add_scalar('losses/entropy', np.mean(entropies), frame)
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', np.mean(kls), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
if len(self.game_rewards) > 0:
mean_rewards = np.mean(self.game_rewards)
mean_lengths = np.mean(self.game_lengths)
mean_scores = np.mean(self.game_scores)
self.writer.add_scalar('rewards/mean', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/mean', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/time', mean_scores, total_time)
if rep_count % 10 == 0:
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
rep_count += 1
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.config['name'])
if self.last_mean_rewards > self.config['score_to_win']:
print('Network won!')
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
if epoch_num > max_epochs:
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
print('MAX EPOCHS NUM!')
return self.last_mean_rewards, epoch_num
update_time = 0 | 22,809 | Python | 49.688889 | 232 | 0.577404 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/dqnagent.py | from rl_games.common import tr_helpers, vecenv, experience, env_configurations
from rl_games.common.categorical import CategoricalQ
from rl_games.algos_tf14 import networks, models
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque
from tensorboardX import SummaryWriter
from datetime import datetime
class DQNAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
observation_shape = observation_space.shape
actions_num = action_space.n
self.config = config
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.games_to_track = config.get('games_to_track', 100)
self.max_epochs = config.get('max_epochs', 1e6)
self.game_rewards = deque([], maxlen=self.games_to_track)
self.game_lengths = deque([], maxlen=self.games_to_track)
self.epoch_num = tf.Variable( tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, end_learning_rate=0.001, power=config.get(config, 'decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=self.max_epochs, decay_rate = config['decay_rate'])
self.env_name = config['env_name']
self.network = config['network']
self.state_shape = observation_shape
self.actions_num = actions_num
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.epsilon = self.config['epsilon']
self.rewards_shaper = self.config['reward_shaper']
self.epsilon_processor = tr_helpers.LinearValueProcessor(self.config['epsilon'], self.config['min_epsilon'], self.config['epsilon_decay_frames'])
self.beta_processor = tr_helpers.LinearValueProcessor(self.config['priority_beta'], self.config['max_beta'], self.config['beta_decay_frames'])
if self.env_name:
self.env = env_configurations.configurations[self.env_name]['env_creator']()
self.sess = sess
self.horizon_length = self.config['horizon_length']
self.states = deque([], maxlen=self.horizon_length)
self.is_prioritized = config['replay_buffer_type'] != 'normal'
self.atoms_num = self.config['atoms_num']
self.is_categorical = self.atoms_num > 1
if self.is_categorical:
self.v_min = self.config['v_min']
self.v_max = self.config['v_max']
self.delta_z = (self.v_max - self.v_min) / (self.atoms_num - 1)
self.all_z = tf.range(self.v_min, self.v_max + self.delta_z, self.delta_z)
self.categorical = CategoricalQ(self.atoms_num, self.v_min, self.v_max)
if not self.is_prioritized:
self.exp_buffer = experience.ReplayBuffer(config['replay_buffer_size'], observation_space)
else:
self.exp_buffer = experience.PrioritizedReplayBuffer(config['replay_buffer_size'], config['priority_alpha'], observation_space)
self.sample_weights_ph = tf.placeholder(tf.float32, shape= [None,] , name='sample_weights')
self.obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'obs_ph')
self.actions_ph = tf.placeholder(tf.int32, shape=[None,], name = 'actions_ph')
self.rewards_ph = tf.placeholder(tf.float32, shape=[None,], name = 'rewards_ph')
self.next_obs_ph = tf.placeholder(observation_space.dtype, shape=(None,) + self.state_shape , name = 'next_obs_ph')
self.is_done_ph = tf.placeholder(tf.float32, shape=[None,], name = 'is_done_ph')
self.is_not_done = 1 - self.is_done_ph
self.name = base_name
self.gamma = self.config['gamma']
self.gamma_step = self.gamma**self.horizon_length
self.input_obs = self.obs_ph
self.input_next_obs = self.next_obs_ph
if observation_space.dtype == np.uint8:
print('scaling obs')
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_next_obs = tf.to_float(self.input_next_obs) / 255.0
if self.atoms_num == 1:
self.setup_qvalues(actions_num)
else:
self.setup_cat_qvalues(actions_num)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
self.saver = tf.train.Saver()
self.assigns_op = [tf.assign(w_target, w_self, validate_shape=True) for w_self, w_target in zip(self.weights, self.target_weights)]
self.variables = TensorFlowVariables(self.qvalues, self.sess)
if self.env_name:
sess.run(tf.global_variables_initializer())
self._reset()
def _get_q(self, probs):
res = probs * self.all_z
return tf.reduce_sum(res, axis=2)
def get_weights(self):
return self.variables.get_flat()
def set_weights(self, weights):
return self.variables.set_flat(weights)
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def setup_cat_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.logits = self.network(config, reuse=False)
self.qvalues_c = tf.nn.softmax(self.logits, axis = 2)
self.qvalues = self._get_q(self.qvalues_c)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_logits = self.network(config, reuse=False)
self.target_qvalues_c = tf.nn.softmax(self.target_logits, axis = 2)
self.target_qvalues = self._get_q(self.target_qvalues_c)
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_logits = tf.stop_gradient(self.network(config, reuse=True))
self.next_qvalues_c = tf.nn.softmax(self.next_logits, axis = 2)
self.next_qvalues = self._get_q(self.next_qvalues_c)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_values = tf.reduce_sum(tf.expand_dims(tf.one_hot(self.actions_ph, actions_num), -1) * self.logits, reduction_indices = (1,))
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
else:
self.next_selected_actions = tf.argmax(self.target_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( tf.expand_dims(self.next_selected_actions_onehot, -1) * self.target_qvalues_c , reduction_indices = (1,) ))
self.proj_dir_ph = tf.placeholder(tf.float32, shape=[None, self.atoms_num], name = 'best_proj_dir')
log_probs = tf.nn.log_softmax( self.current_action_values, axis=1)
if self.is_prioritized:
# we need to return loss to update priority buffer
self.abs_errors = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1) + 1e-5
self.td_loss = self.abs_errors * self.sample_weights_ph
else:
self.td_loss = tf.reduce_sum(-log_probs * self.proj_dir_ph, axis = 1)
self.td_loss_mean = tf.reduce_mean(self.td_loss)
def setup_qvalues(self, actions_num):
config = {
'name' : 'agent',
'inputs' : self.input_obs,
'actions_num' : actions_num,
}
self.qvalues = self.network(config, reuse=False)
config = {
'name' : 'target',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.target_qvalues = tf.stop_gradient(self.network(config, reuse=False))
if self.config['is_double'] == True:
config = {
'name' : 'agent',
'inputs' : self.input_next_obs,
'actions_num' : actions_num,
}
self.next_qvalues = tf.stop_gradient(self.network(config, reuse=True))
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
self.target_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='target')
self.current_action_qvalues = tf.reduce_sum(tf.one_hot(self.actions_ph, actions_num) * self.qvalues, reduction_indices = 1)
if self.config['is_double'] == True:
self.next_selected_actions = tf.argmax(self.next_qvalues, axis = 1)
self.next_selected_actions_onehot = tf.one_hot(self.next_selected_actions, actions_num)
self.next_state_values_target = tf.stop_gradient( tf.reduce_sum( self.target_qvalues * self.next_selected_actions_onehot , reduction_indices=[1,] ))
else:
self.next_state_values_target = tf.stop_gradient(tf.reduce_max(self.target_qvalues, reduction_indices=1))
self.reference_qvalues = self.rewards_ph + self.gamma_step *self.is_not_done * self.next_state_values_target
if self.is_prioritized:
# we need to return l1 loss to update priority buffer
self.abs_errors = tf.abs(self.current_action_qvalues - self.reference_qvalues) + 1e-5
# the same as multiply gradients later (other way is used in different examples over internet)
self.td_loss = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.NONE) * self.sample_weights_ph
self.td_loss_mean = tf.reduce_mean(self.td_loss)
else:
self.td_loss_mean = tf.losses.huber_loss(self.current_action_qvalues, self.reference_qvalues, reduction=tf.losses.Reduction.MEAN)
self.reg_loss = tf.losses.get_regularization_loss()
self.td_loss_mean += self.reg_loss
self.learning_rate = self.config['learning_rate']
if self.env_name:
self.train_step = tf.train.AdamOptimizer(self.learning_rate * self.lr_multiplier).minimize(self.td_loss_mean, var_list=self.weights)
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def _reset(self):
self.states.clear()
if self.env_name:
self.state = self.env.reset()
self.total_reward = 0.0
self.total_shaped_reward = 0.0
self.step_count = 0
def get_qvalues(self, state):
return self.sess.run(self.qvalues, {self.obs_ph: state})
def get_action(self, state, epsilon=0.0):
if np.random.random() < epsilon:
action = self.env.action_space.sample()
else:
qvals = self.get_qvalues([state])
action = np.argmax(qvals)
return action
def play_steps(self, steps, epsilon=0.0):
done_reward = None
done_shaped_reward = None
done_steps = None
steps_rewards = 0
cur_gamma = 1
cur_states_len = len(self.states)
# always break after one
while True:
if cur_states_len > 0:
state = self.states[-1][0]
else:
state = self.state
action = self.get_action(state, epsilon)
new_state, reward, is_done, _ = self.env.step(action)
#reward = reward * (1 - is_done)
self.step_count += 1
self.total_reward += reward
shaped_reward = self.rewards_shaper(reward)
self.total_shaped_reward += shaped_reward
self.states.append([new_state, action, shaped_reward])
if len(self.states) < steps:
break
for i in range(steps):
sreward = self.states[i][2]
steps_rewards += sreward * cur_gamma
cur_gamma = cur_gamma * self.gamma
next_state, current_action, _ = self.states[0]
self.exp_buffer.add(self.state, current_action, steps_rewards, new_state, is_done)
self.state = next_state
break
if is_done:
done_reward = self.total_reward
done_steps = self.step_count
done_shaped_reward = self.total_shaped_reward
self._reset()
return done_reward, done_shaped_reward, done_steps
def load_weigths_into_target_network(self):
self.sess.run(self.assigns_op)
def sample_batch(self, exp_replay, batch_size):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch = exp_replay.sample(batch_size)
return {
self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch
}
def sample_prioritized_batch(self, exp_replay, batch_size, beta):
obs_batch, act_batch, reward_batch, next_obs_batch, is_done_batch, sample_weights, sample_idxes = exp_replay.sample(batch_size, beta)
batch = { self.obs_ph:obs_batch, self.actions_ph:act_batch, self.rewards_ph:reward_batch,
self.is_done_ph:is_done_batch, self.next_obs_ph:next_obs_batch, self.sample_weights_ph: sample_weights }
return [batch , sample_idxes]
def train(self):
mem_free_steps = 0
self.last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
self.load_weigths_into_target_network()
for _ in range(0, self.config['num_steps_fill_buffer']):
self.play_steps(self.horizon_length, self.epsilon)
steps_per_epoch = self.config['steps_per_epoch']
num_epochs_to_copy = self.config['num_epochs_to_copy']
batch_size = self.config['batch_size']
lives_reward = self.config['lives_reward']
episodes_to_log = self.config['episodes_to_log']
frame = 0
play_time = 0
update_time = 0
rewards = []
shaped_rewards = []
steps = []
losses = deque([], maxlen=100)
while True:
epoch_num = self.update_epoch()
t_play_start = time.time()
self.epsilon = self.epsilon_processor(frame)
self.beta = self.beta_processor(frame)
for _ in range(0, steps_per_epoch):
reward, shaped_reward, step = self.play_steps(self.horizon_length, self.epsilon)
if reward != None:
self.game_lengths.append(step)
self.game_rewards.append(reward)
#shaped_rewards.append(shaped_reward)
t_play_end = time.time()
play_time += t_play_end - t_play_start
# train
frame = frame + steps_per_epoch
t_start = time.time()
if self.is_categorical:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.horizon_length)
batch[self.proj_dir_ph] = projected
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
next_state_vals = self.sess.run([self.next_state_values_target], batch)[0]
projected = self.categorical.distr_projection(next_state_vals, batch[self.rewards_ph], batch[self.is_done_ph], self.gamma ** self.horizon_length)
batch[self.proj_dir_ph] = projected
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
else:
if self.is_prioritized:
batch, idxes = self.sample_prioritized_batch(self.exp_buffer, batch_size=batch_size, beta = self.beta)
_, loss_t, errors_update, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.abs_errors, self.lr_multiplier], batch)
self.exp_buffer.update_priorities(idxes, errors_update)
else:
batch = self.sample_batch(self.exp_buffer, batch_size=batch_size)
_, loss_t, lr_mul = self.sess.run([self.train_step, self.td_loss_mean, self.lr_multiplier], batch)
losses.append(loss_t)
t_end = time.time()
update_time += t_end - t_start
total_time += update_time
if frame % 1000 == 0:
mem_free_steps += 1
if mem_free_steps == 10:
mem_free_steps = 0
tr_helpers.free_mem()
sum_time = update_time + play_time
print('frames per seconds: ', 1000 / (sum_time))
self.writer.add_scalar('performance/fps', 1000 / sum_time, frame)
self.writer.add_scalar('performance/upd_time', update_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('losses/td_loss', np.mean(losses), frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/lr', self.learning_rate*lr_mul, frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
self.writer.add_scalar('info/epsilon', self.epsilon, frame)
if self.is_prioritized:
self.writer.add_scalar('beta', self.beta, frame)
update_time = 0
play_time = 0
num_games = len(self.game_rewards)
if num_games > 10:
d = num_games / lives_reward
mean_rewards = np.sum(self.game_rewards) / d
mean_lengths = np.sum(self.game_lengths) / d
self.writer.add_scalar('rewards/mean', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/mean', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
if self.last_mean_rewards > self.config['score_to_win']:
print('network won!')
return self.last_mean_rewards, epoch_num
#clear_output(True)
# adjust agent parameters
if frame % num_epochs_to_copy == 0:
self.load_weigths_into_target_network()
if epoch_num >= self.max_epochs:
print('Max epochs reached')
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(np.sum(self.game_rewards) * lives_reward / len(self.game_rewards)))
return self.last_mean_rewards, epoch_num
| 21,405 | Python | 48.322581 | 191 | 0.592245 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/models.py | import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
from rl_games.algos_tf14 import networks
tfd = tfp.distributions
def entry_stop_gradients(target, mask):
mask_h = tf.abs(mask-1)
return tf.stop_gradient(mask_h * target) + mask * target
class BaseModel(object):
def is_rnn(self):
return False
class ModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=False, is_train=is_train,reuse=reuse)
#if action_mask_ph is not None:
#masks = tf.layers.dense(tf.to_float(action_mask_ph), actions_num, activation=tf.nn.elu)
#logits = masks + logits
#logits = entry_stop_gradients(logits, tf.to_float(action_mask_ph))
probs = tf.nn.softmax(logits)
# Gumbel Softmax
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=probs)
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.stop_gradient(one_hot_actions))
return neglogp, value, action, entropy, logits
else:
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy
class ModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mu, sigma, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train = is_train, reuse=reuse)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma
class ModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
is_train = prev_actions_ph is not None
mean, logstd, value = self.network(name, inputs=inputs, actions_num=actions_num, continuous=True, is_train=True, reuse=reuse)
std = tf.exp(logstd)
norm_dist = tfd.Normal(mean, std)
action = mean + std * tf.random_normal(tf.shape(mean))
#action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph is None:
neglogp = self.neglogp(action, mean, std, logstd)
return neglogp, value, action, entropy, mean, std
prev_neglogp = self.neglogp(prev_actions_ph, mean, std, logstd)
return prev_neglogp, value, action, entropy, mean, std
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
class LSTMModelA2CContinuousLogStd(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def neglogp(self, x, mean, std, logstd):
return 0.5 * tf.reduce_sum(tf.square((x - mean) / std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(logstd, axis=-1)
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, logstd, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
std = tf.exp(logstd)
action = mu + std * tf.random_normal(tf.shape(mu))
norm_dist = tfd.Normal(mu, std)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, std, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2CContinuous(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def is_single_batched(self):
return False
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
is_train = prev_actions_ph is not None
mu, var, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=True, is_train=is_train, reuse=reuse)
sigma = tf.sqrt(var)
norm_dist = tfd.Normal(mu, sigma)
action = tf.squeeze(norm_dist.sample(1), axis=0)
#action = tf.clip_by_value(action, -1.0, 1.0)
entropy = tf.reduce_mean(tf.reduce_sum(norm_dist.entropy(), axis=-1))
if prev_actions_ph == None:
neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(action)+ 1e-6), axis=-1)
return neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
prev_neglogp = tf.reduce_sum(-tf.log(norm_dist.prob(prev_actions_ph) + 1e-6), axis=-1)
return prev_neglogp, value, action, entropy, mu, sigma, states_ph, masks_ph, lstm_state, initial_state
class LSTMModelA2C(BaseModel):
def __init__(self, network):
self.network = network
def is_rnn(self):
return True
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
prev_actions_ph = dict['prev_actions_ph']
games_num = dict['games_num']
batch_num = dict['batch_num']
action_mask_ph = dict.get('action_mask_ph', None)
is_train = prev_actions_ph is not None
logits, value, states_ph, masks_ph, lstm_state, initial_state = self.network(name=name, inputs=inputs, actions_num=actions_num,
games_num=games_num, batch_num=batch_num, continuous=False, is_train=is_train, reuse=reuse)
if not is_train:
u = tf.random_uniform(tf.shape(logits), dtype=logits.dtype)
rand_logits = logits - tf.log(-tf.log(u))
if action_mask_ph is not None:
inf_mask = tf.maximum(tf.log(tf.to_float(action_mask_ph)), tf.float32.min)
rand_logits = rand_logits + inf_mask
logits = logits + inf_mask
action = tf.argmax(rand_logits, axis=-1)
one_hot_actions = tf.one_hot(action, actions_num)
entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.nn.softmax(logits))
if not is_train:
neglogp = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=one_hot_actions)
return neglogp, value, action, entropy, states_ph, masks_ph, lstm_state, initial_state, logits
prev_neglogp = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=prev_actions_ph)
return prev_neglogp, value, None, entropy, states_ph, masks_ph, lstm_state, initial_state
class AtariDQN(BaseModel):
def __init__(self, network):
self.network = network
def __call__(self, dict, reuse=False):
name = dict['name']
inputs = dict['inputs']
actions_num = dict['actions_num']
'''
TODO: fix is_train
'''
is_train = name == 'agent'
return self.network(name=name, inputs=inputs, actions_num=actions_num, is_train=is_train, reuse=reuse)
| 10,090 | Python | 40.356557 | 167 | 0.599405 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/model_builder.py | from rl_games.common import object_factory
import rl_games.algos_tf14
from rl_games.algos_tf14 import network_builder
from rl_games.algos_tf14 import models
class ModelBuilder:
def __init__(self):
self.model_factory = object_factory.ObjectFactory()
self.model_factory.register_builder('discrete_a2c', lambda network, **kwargs : models.ModelA2C(network))
self.model_factory.register_builder('discrete_a2c_lstm', lambda network, **kwargs : models.LSTMModelA2C(network))
self.model_factory.register_builder('continuous_a2c', lambda network, **kwargs : models.ModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_logstd', lambda network, **kwargs : models.ModelA2CContinuousLogStd(network))
self.model_factory.register_builder('continuous_a2c_lstm', lambda network, **kwargs : models.LSTMModelA2CContinuous(network))
self.model_factory.register_builder('continuous_a2c_lstm_logstd', lambda network, **kwargs : models.LSTMModelA2CContinuousLogStd(network))
self.model_factory.register_builder('dqn', lambda network, **kwargs : models.AtariDQN(network))
self.network_factory = object_factory.ObjectFactory()
self.network_factory.register_builder('actor_critic', lambda **kwargs : network_builder.A2CBuilder())
self.network_factory.register_builder('dqn', lambda **kwargs : network_builder.DQNBuilder())
def load(self, params):
self.model_name = params['model']['name']
self.network_name = params['network']['name']
network = self.network_factory.create(self.network_name)
network.load(params['network'])
model = self.model_factory.create(self.model_name, network=network)
return model
| 1,761 | Python | 49.342856 | 146 | 0.721181 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/network_builder.py | import tensorflow as tf
import numpy as np
from rl_games.algos_tf14 import networks
from rl_games.common import object_factory
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class NetworkBuilder:
def __init__(self, **kwargs):
self.activations_factory = object_factory.ObjectFactory()
self.activations_factory.register_builder('relu', lambda **kwargs : tf.nn.relu)
self.activations_factory.register_builder('tanh', lambda **kwargs : tf.nn.tanh)
self.activations_factory.register_builder('sigmoid', lambda **kwargs : tf.nn.sigmoid)
self.activations_factory.register_builder('elu', lambda **kwargs : tf.nn.elu)
self.activations_factory.register_builder('selu', lambda **kwargs : tf.nn.selu)
self.activations_factory.register_builder('softplus', lambda **kwargs : tf.nn.softplus)
self.activations_factory.register_builder('None', lambda **kwargs : None)
self.init_factory = object_factory.ObjectFactory()
self.init_factory.register_builder('normc_initializer', lambda **kwargs : normc_initializer(**kwargs))
self.init_factory.register_builder('const_initializer', lambda **kwargs : tf.constant_initializer(**kwargs))
self.init_factory.register_builder('orthogonal_initializer', lambda **kwargs : tf.orthogonal_initializer(**kwargs))
self.init_factory.register_builder('glorot_normal_initializer', lambda **kwargs : tf.glorot_normal_initializer(**kwargs))
self.init_factory.register_builder('glorot_uniform_initializer', lambda **kwargs : tf.glorot_uniform_initializer(**kwargs))
self.init_factory.register_builder('variance_scaling_initializer', lambda **kwargs : tf.variance_scaling_initializer(**kwargs))
self.init_factory.register_builder('random_uniform_initializer', lambda **kwargs : tf.random_uniform_initializer(**kwargs))
self.init_factory.register_builder('None', lambda **kwargs : None)
self.regularizer_factory = object_factory.ObjectFactory()
self.regularizer_factory.register_builder('l1_regularizer', lambda **kwargs : tf.contrib.layers.l1_regularizer(**kwargs))
self.regularizer_factory.register_builder('l2_regularizer', lambda **kwargs : tf.contrib.layers.l2_regularizer(**kwargs))
self.regularizer_factory.register_builder('l1l2_regularizer', lambda **kwargs : tf.contrib.layers.l1l2_regularizer(**kwargs))
self.regularizer_factory.register_builder('None', lambda **kwargs : None)
def load(self, params):
pass
def build(self, name, **kwargs):
pass
def __call__(self, name, **kwargs):
return self.build(name, **kwargs)
def _noisy_dense(self, inputs, units, activation, kernel_initializer, kernel_regularizer, name):
return networks.noisy_dense(inputs, units, name, True, activation)
def _build_mlp(self,
name,
input,
units,
activation,
initializer,
regularizer,
norm_func_name = None,
dense_func = tf.layers.dense,
is_train=True):
out = input
ind = 0
for unit in units:
ind += 1
out = dense_func(out, units=unit,
activation=self.activations_factory.create(activation),
kernel_initializer = self.init_factory.create(**initializer),
kernel_regularizer = self.regularizer_factory.create(**regularizer),
#bias_initializer=tf.random_uniform_initializer(-0.1, 0.1),
name=name + str(ind))
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, training=is_train)
return out
def _build_lstm(self, name, input, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.float32, [batch_num])
states_ph = tf.placeholder(tf.float32, [games_num, 2*units])
lstm_out, lstm_state, initial_state = networks.openai_lstm(name, input, dones_ph=dones_ph, states_ph=states_ph, units=units, env_num=games_num, batch_num=batch_num)
return lstm_out, lstm_state, initial_state, dones_ph, states_ph
def _build_lstm2(self, name, inputs, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.bool, [batch_num])
states_ph = tf.placeholder(tf.float32, [games_num, 2*units])
hidden = tf.concat((inputs[0], inputs[1]), axis=1)
lstm_out, lstm_state, initial_state = networks.openai_lstm(name, hidden, dones_ph=dones_ph, states_ph=states_ph, units=units, env_num=games_num, batch_num=batch_num)
#lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
return lstm_out, lstm_state, initial_state, dones_ph, states_ph
def _build_lstm_sep(self, name, inputs, units, batch_num, games_num):
dones_ph = tf.placeholder(tf.bool, [batch_num], name='lstm_masks')
states_ph = tf.placeholder(tf.float32, [games_num, 4*units], name='lstm_states')
statesa, statesc = tf.split(states_ph, 2, axis=1)
a_out, lstm_statea, initial_statea = networks.openai_lstm(name +'a', inputs[0], dones_ph=dones_ph, states_ph=statesa, units=units, env_num=games_num, batch_num=batch_num)
c_out, lstm_statec, initial_statec = networks.openai_lstm(name + 'c', inputs[1], dones_ph=dones_ph, states_ph=statesc, units=units, env_num=games_num, batch_num=batch_num)
lstm_state = tf.concat([lstm_statea, lstm_statec], axis=1)
initial_state = np.concatenate([initial_statea, initial_statec], axis=1)
#lstm_outa, lstm_outc = tf.split(lstm_out, 2, axis=1)
return a_out, c_out, lstm_state, initial_state, dones_ph, states_ph
def _build_conv(self, ctype, **kwargs):
print('conv_name:', ctype)
if ctype == 'conv2d':
return self._build_cnn(**kwargs)
if ctype == 'conv1d':
return self._build_cnn1d(**kwargs)
def _build_cnn(self, name, input, convs, activation, initializer, regularizer, norm_func_name=None, is_train=True):
out = input
ind = 0
for conv in convs:
print(out.shape.as_list())
ind += 1
config = conv.copy()
config['filters'] = conv['filters']
config['padding'] = conv['padding']
config['kernel_size'] = [conv['kernel_size']] * 2
config['strides'] = [conv['strides']] * 2
config['activation'] = self.activations_factory.create(activation)
config['kernel_initializer'] = self.init_factory.create(**initializer)
config['kernel_regularizer'] = self.regularizer_factory.create(**regularizer)
config['name'] = name + str(ind)
out = tf.layers.conv2d(inputs=out, **config)
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, name='bn_'+ config['name'], training=is_train)
return out
def _build_cnn1d(self, name, input, convs, activation, initializer, regularizer, norm_func_name=None, is_train=True):
out = input
ind = 0
print('_build_cnn1d')
for conv in convs:
ind += 1
config = conv.copy()
config['activation'] = self.activations_factory.create(activation)
config['kernel_initializer'] = self.init_factory.create(**initializer)
config['kernel_regularizer'] = self.regularizer_factory.create(**regularizer)
config['name'] = name + str(ind)
#config['bias_initializer'] = tf.random_uniform_initializer,
# bias_initializer=tf.random_uniform_initializer(-0.1, 0.1)
out = tf.layers.conv1d(inputs=out, **config)
print('shapes of layer_' + str(ind), str(out.get_shape().as_list()))
if norm_func_name == 'layer_norm':
out = tf.contrib.layers.layer_norm(out)
elif norm_func_name == 'batch_norm':
out = tf.layers.batch_normalization(out, training=is_train)
return out
class A2CBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.separate = params['separate']
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_discrete = 'discrete' in params['space']
self.is_continuous = 'continuous'in params['space']
self.value_activation = params.get('value_activation', 'None')
self.normalization = params.get('normalization', None)
self.has_lstm = 'lstm' in params
if self.is_continuous:
self.space_config = params['space']['continuous']
elif self.is_discrete:
self.space_config = params['space']['discrete']
if self.has_lstm:
self.lstm_units = params['lstm']['units']
self.concated = params['lstm']['concated']
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
batch_num = kwargs.pop('batch_num', 1)
games_num = kwargs.pop('games_num', 1)
is_train = kwargs.pop('is_train', True)
with tf.variable_scope(name, reuse=reuse):
actor_input = critic_input = input
if self.has_cnn:
cnn_args = {
'name' :'actor_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
actor_input = self._build_conv(**cnn_args)
actor_input = tf.contrib.layers.flatten(actor_input)
critic_input = actor_input
if self.separate:
cnn_args['name'] = 'critic_cnn'
critic_input = self._build_conv( **cnn_args)
critic_input = tf.contrib.layers.flatten(critic_input)
mlp_args = {
'name' :'actor_fc',
'input' : actor_input,
'units' :self.units,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out_actor = self._build_mlp(**mlp_args)
if self.separate:
mlp_args['name'] = 'critic_fc'
mlp_args['input'] = critic_input
out_critic = self._build_mlp(**mlp_args)
if self.has_lstm:
if self.concated:
out_actor, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm2('lstm', [out_actor, out_critic], self.lstm_units, batch_num, games_num)
out_critic = out_actor
else:
out_actor, out_critic, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm_sep('lstm_', [out_actor, out_critic], self.lstm_units, batch_num, games_num)
else:
if self.has_lstm:
out_actor, lstm_state, initial_state, dones_ph, states_ph = self._build_lstm('lstm', out_actor, self.lstm_units, batch_num, games_num)
out_critic = out_actor
value = tf.layers.dense(out_critic, units = 1, kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.value_activation), name='value')
if self.is_continuous:
mu = tf.layers.dense(out_actor, units = actions_num, activation=self.activations_factory.create(self.space_config['mu_activation']),
kernel_initializer = self.init_factory.create(**self.space_config['mu_init']), name='mu')
if self.space_config['fixed_sigma']:
sigma_out = tf.get_variable(name='sigma_out', shape=(actions_num), initializer=self.init_factory.create(**self.space_config['sigma_init']), trainable=True)
else:
sigma_out = tf.layers.dense(out_actor, units = actions_num, kernel_initializer=self.init_factory.create(**self.space_config['sigma_init']), activation=self.activations_factory.create(self.space_config['sigma_activation']), name='sigma_out')
if self.has_lstm:
return mu, mu * 0 + sigma_out, value, states_ph, dones_ph, lstm_state, initial_state
return mu, mu * 0 + sigma_out, value
if self.is_discrete:
logits = tf.layers.dense(inputs=out_actor, units=actions_num, name='logits', kernel_initializer = self.init_factory.create(**self.initializer))
if self.has_lstm:
return logits, value, states_ph, dones_ph, lstm_state, initial_state
return logits, value
class DQNBuilder(NetworkBuilder):
def __init__(self, **kwargs):
NetworkBuilder.__init__(self)
def load(self, params):
self.units = params['mlp']['units']
self.activation = params['mlp']['activation']
self.initializer = params['mlp']['initializer']
self.regularizer = params['mlp']['regularizer']
self.is_dueling = params['dueling']
self.atoms = params['atoms']
self.is_noisy = params['noisy']
self.normalization = params.get('normalization', None)
if 'cnn' in params:
self.has_cnn = True
self.cnn = params['cnn']
else:
self.has_cnn = False
def build(self, name, **kwargs):
actions_num = kwargs.pop('actions_num')
input = kwargs.pop('inputs')
reuse = kwargs.pop('reuse')
is_train = kwargs.pop('is_train', True)
if self.is_noisy:
dense_layer = self._noisy_dense
else:
dense_layer = tf.layers.dense
with tf.variable_scope(name, reuse=reuse):
out = input
if self.has_cnn:
cnn_args = {
'name' :'dqn_cnn',
'ctype' : self.cnn['type'],
'input' : input,
'convs' :self.cnn['convs'],
'activation' : self.cnn['activation'],
'initializer' : self.cnn['initializer'],
'regularizer' : self.cnn['regularizer'],
'norm_func_name' : self.normalization,
'is_train' : is_train
}
out = self._build_conv(**cnn_args)
out = tf.contrib.layers.flatten(out)
mlp_args = {
'name' :'dqn_mlp',
'input' : out,
'activation' : self.activation,
'initializer' : self.initializer,
'regularizer' : self.regularizer,
'norm_func_name' : self.normalization,
'is_train' : is_train,
'dense_func' : dense_layer
}
if self.is_dueling:
if len(self.units) > 1:
mlp_args['units'] = self.units[:-1]
out = self._build_mlp(**mlp_args)
hidden_value = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_val')
hidden_advantage = dense_layer(inputs=out, units=self.units[-1], kernel_initializer = self.init_factory.create(**self.initializer), activation=self.activations_factory.create(self.activation), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='hidden_adv')
value = dense_layer(inputs=hidden_value, units=self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), activation=tf.identity, kernel_regularizer = self.regularizer_factory.create(**self.regularizer), name='value')
advantage = dense_layer(inputs=hidden_advantage, units= actions_num * self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='advantage')
advantage = tf.reshape(advantage, shape = [-1, actions_num, self.atoms])
value = tf.reshape(value, shape = [-1, 1, self.atoms])
q_values = value + advantage - tf.reduce_mean(advantage, reduction_indices=1, keepdims=True)
else:
mlp_args['units'] = self.units
out = self._build_mlp('dqn_mlp', out, self.units, self.activation, self.initializer, self.regularizer)
q_values = dense_layer(inputs=out, units=actions_num *self.atoms, kernel_initializer = self.init_factory.create(**self.initializer), kernel_regularizer = self.regularizer_factory.create(**self.regularizer), activation=tf.identity, name='q_vals')
q_values = tf.reshape(q_values, shape = [-1, actions_num, self.atoms])
if self.atoms == 1:
return tf.squeeze(q_values)
else:
return q_values
| 18,263 | Python | 51.034188 | 301 | 0.592345 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/algos_tf14/a2c_continuous.py | from rl_games.common import tr_helpers, vecenv
from rl_games.algos_tf14 import networks
from rl_games.algos_tf14.tensorflow_utils import TensorFlowVariables
from rl_games.algos_tf14.tf_moving_mean_std import MovingMeanStd
import tensorflow as tf
import numpy as np
import collections
import time
from collections import deque, OrderedDict
from tensorboardX import SummaryWriter
import gym
import ray
from datetime import datetime
def swap_and_flatten01(arr):
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
#(-1, 1) -> (low, high)
def rescale_actions(low, high, action):
d = (high - low) / 2.0
m = (high + low) / 2.0
scaled_action = action * d + m
return scaled_action
#(horizon_length, actions_num)
def policy_kl(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = np.log(p0_sigma/p1_sigma + 1e-5)
c2 = (np.square(p0_sigma) + np.square(p1_mu - p0_mu))/(2.0 *(np.square(p1_sigma) + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = np.mean(np.sum(kl, axis = -1)) # returning mean between all steps of sum between all actions
return kl
def policy_kl_tf(p0_mu, p0_sigma, p1_mu, p1_sigma):
c1 = tf.log(p1_sigma/p0_sigma + 1e-5)
c2 = (tf.square(p0_sigma) + tf.square(p1_mu - p0_mu))/(2.0 * (tf.square(p1_sigma) + 1e-5))
c3 = -1.0 / 2.0
kl = c1 + c2 + c3
kl = tf.reduce_mean(tf.reduce_sum(kl, axis=-1)) # returning mean between all steps of sum between all actions
return kl
class A2CAgent:
def __init__(self, sess, base_name, observation_space, action_space, config):
self.name = base_name
self.actions_low = action_space.low
self.actions_high = action_space.high
self.env_name = config['env_name']
self.ppo = config['ppo']
self.is_adaptive_lr = config['lr_schedule'] == 'adaptive'
self.is_polynom_decay_lr = config['lr_schedule'] == 'polynom_decay'
self.is_exp_decay_lr = config['lr_schedule'] == 'exp_decay'
self.lr_multiplier = tf.constant(1, shape=(), dtype=tf.float32)
self.e_clip = config['e_clip']
self.clip_value = config['clip_value']
self.network = config['network']
self.rewards_shaper = config['reward_shaper']
self.num_actors = config['num_actors']
self.env_config = config.get('env_config', {})
self.vec_env = vecenv.create_vec_env(self.env_name, self.num_actors, **self.env_config)
self.num_agents = self.vec_env.get_number_of_agents()
self.horizon_length = config['horizon_length']
self.normalize_advantage = config['normalize_advantage']
self.config = config
self.state_shape = observation_space.shape
self.critic_coef = config['critic_coef']
self.writer = SummaryWriter('runs/' + config['name'] + datetime.now().strftime("_%d-%H-%M-%S"))
self.sess = sess
self.grad_norm = config['grad_norm']
self.gamma = self.config['gamma']
self.tau = self.config['tau']
self.normalize_input = self.config['normalize_input']
self.seq_len = self.config['seq_length']
self.dones = np.asarray([False]*self.num_actors, dtype=np.bool)
self.current_rewards = np.asarray([0]*self.num_actors, dtype=np.float32)
self.current_lengths = np.asarray([0]*self.num_actors, dtype=np.float32)
self.game_rewards = deque([], maxlen=100)
self.game_lengths = deque([], maxlen=100)
self.obs_ph = tf.placeholder('float32', (None, ) + self.state_shape, name = 'obs')
self.target_obs_ph = tf.placeholder('float32', (None, ) + self.state_shape, name = 'target_obs')
self.actions_num = action_space.shape[0]
self.actions_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'actions')
self.old_mu_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'old_mu_ph')
self.old_sigma_ph = tf.placeholder('float32', (None,) + action_space.shape, name = 'old_sigma_ph')
self.old_neglogp_actions_ph = tf.placeholder('float32', (None, ), name = 'old_logpactions')
self.rewards_ph = tf.placeholder('float32', (None,), name = 'rewards')
self.old_values_ph = tf.placeholder('float32', (None,), name = 'old_values')
self.advantages_ph = tf.placeholder('float32', (None,), name = 'advantages')
self.learning_rate_ph = tf.placeholder('float32', (), name = 'lr_ph')
self.epoch_num = tf.Variable(tf.constant(0, shape=(), dtype=tf.float32), trainable=False)
self.update_epoch_op = self.epoch_num.assign(self.epoch_num + 1)
self.current_lr = self.learning_rate_ph
self.bounds_loss_coef = config.get('bounds_loss_coef', None)
if self.is_adaptive_lr:
self.kl_threshold = config['kl_threshold']
if self.is_polynom_decay_lr:
self.lr_multiplier = tf.train.polynomial_decay(1.0, global_step=self.epoch_num, decay_steps=config['max_epochs'], end_learning_rate=0.001, power=config.get('decay_power', 1.0))
if self.is_exp_decay_lr:
self.lr_multiplier = tf.train.exponential_decay(1.0, global_step=self.epoch_num, decay_steps=config['max_epochs'], decay_rate = config['decay_rate'])
self.input_obs = self.obs_ph
self.input_target_obs = self.target_obs_ph
if observation_space.dtype == np.uint8:
self.input_obs = tf.to_float(self.input_obs) / 255.0
self.input_target_obs = tf.to_float(self.input_target_obs) / 255.0
if self.normalize_input:
self.moving_mean_std = MovingMeanStd(shape = observation_space.shape, epsilon = 1e-5, decay = 0.99)
self.input_obs = self.moving_mean_std.normalize(self.input_obs, train=True)
self.input_target_obs = self.moving_mean_std.normalize(self.input_target_obs, train=False)
games_num = self.config['minibatch_size'] // self.seq_len # it is used only for current rnn implementation
self.train_dict = {
'name' : 'agent',
'inputs' : self.input_obs,
'batch_num' : self.config['minibatch_size'],
'games_num' : games_num,
'actions_num' : self.actions_num,
'prev_actions_ph' : self.actions_ph,
}
self.run_dict = {
'name' : 'agent',
'inputs' : self.input_target_obs,
'batch_num' : self.num_actors,
'games_num' : self.num_actors,
'actions_num' : self.actions_num,
'prev_actions_ph' : None,
}
self.states = None
if self.network.is_rnn():
self.neglogp_actions ,self.state_values, self.action, self.entropy, self.mu, self.sigma, self.states_ph, self.masks_ph, self.lstm_state, self.initial_state = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_mu, self.target_sigma, self.target_states_ph, self.target_masks_ph, self.target_lstm_state, self.target_initial_state = self.network(self.run_dict, reuse=True)
self.states = self.target_initial_state
else:
self.neglogp_actions ,self.state_values, self.action, self.entropy, self.mu, self.sigma = self.network(self.train_dict, reuse=False)
self.target_neglogp, self.target_state_values, self.target_action, _, self.target_mu, self.target_sigma = self.network(self.run_dict, reuse=True)
curr_e_clip = self.e_clip * self.lr_multiplier
if (self.ppo):
self.prob_ratio = tf.exp(self.old_neglogp_actions_ph - self.neglogp_actions)
self.prob_ratio = tf.clip_by_value(self.prob_ratio, 0.0, 16.0)
self.pg_loss_unclipped = -tf.multiply(self.advantages_ph, self.prob_ratio)
self.pg_loss_clipped = -tf.multiply(self.advantages_ph, tf.clip_by_value(self.prob_ratio, 1.- curr_e_clip, 1.+ curr_e_clip))
self.actor_loss = tf.reduce_mean(tf.maximum(self.pg_loss_unclipped, self.pg_loss_clipped))
else:
self.actor_loss = tf.reduce_mean(self.neglogp_actions * self.advantages_ph)
self.c_loss = (tf.squeeze(self.state_values) - self.rewards_ph)**2
if self.clip_value:
self.cliped_values = self.old_values_ph + tf.clip_by_value(tf.squeeze(self.state_values) - self.old_values_ph, -curr_e_clip, curr_e_clip)
self.c_loss_clipped = tf.square(self.cliped_values - self.rewards_ph)
self.critic_loss = tf.reduce_mean(tf.maximum(self.c_loss, self.c_loss_clipped))
else:
self.critic_loss = tf.reduce_mean(self.c_loss)
self._calc_kl_dist()
self.loss = self.actor_loss + 0.5 * self.critic_coef * self.critic_loss - self.config['entropy_coef'] * self.entropy
self._apply_bound_loss()
self.reg_loss = tf.losses.get_regularization_loss()
self.loss += self.reg_loss
self.train_step = tf.train.AdamOptimizer(self.current_lr * self.lr_multiplier)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='agent')
grads = tf.gradients(self.loss, self.weights)
if self.config['truncate_grads']:
grads, _ = tf.clip_by_global_norm(grads, self.grad_norm)
grads = list(zip(grads, self.weights))
self.train_op = self.train_step.apply_gradients(grads)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def _calc_kl_dist(self):
self.kl_dist = policy_kl_tf(self.mu, self.sigma, self.old_mu_ph, self.old_sigma_ph)
if self.is_adaptive_lr:
self.current_lr = tf.where(self.kl_dist > (2.0 * self.kl_threshold), tf.maximum(self.current_lr / 1.5, 1e-6), self.current_lr)
self.current_lr = tf.where(self.kl_dist < (0.5 * self.kl_threshold), tf.minimum(self.current_lr * 1.5, 1e-2), self.current_lr)
def _apply_bound_loss(self):
if self.bounds_loss_coef:
soft_bound = 1.1
mu_loss_high = tf.square(tf.maximum(0.0, self.mu - soft_bound))
mu_loss_low = tf.square(tf.maximum(0.0, -soft_bound - self.mu))
self.bounds_loss = tf.reduce_sum(mu_loss_high + mu_loss_low, axis=1)
self.loss += self.bounds_loss * self.bounds_loss_coef
else:
self.bounds_loss = None
def update_epoch(self):
return self.sess.run([self.update_epoch_op])[0]
def get_action_values(self, obs):
run_ops = [self.target_action, self.target_state_values, self.target_neglogp, self.target_mu, self.target_sigma]
if self.network.is_rnn():
run_ops.append(self.target_lstm_state)
return self.sess.run(run_ops, {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return (*self.sess.run(run_ops, {self.target_obs_ph : obs}), None)
def get_values(self, obs):
if self.network.is_rnn():
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs, self.target_states_ph : self.states, self.target_masks_ph : self.dones})
else:
return self.sess.run([self.target_state_values], {self.target_obs_ph : obs})
def play_steps(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs, mb_mus, mb_sigmas = [],[],[],[],[],[],[],[]
mb_states = []
epinfos = []
# For n in range number of steps
for _ in range(self.horizon_length):
if self.network.is_rnn():
mb_states.append(self.states)
actions, values, neglogpacs, mu, sigma, self.states = self.get_action_values(self.obs)
#actions = np.squeeze(actions)
values = np.squeeze(values)
neglogpacs = np.squeeze(neglogpacs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones.copy())
mb_mus.append(mu)
mb_sigmas.append(sigma)
self.obs[:], rewards, self.dones, infos = self.vec_env.step(rescale_actions(self.actions_low, self.actions_high, np.clip(actions, -1.0, 1.0)))
self.current_rewards += rewards
self.current_lengths += 1
for reward, length, done in zip(self.current_rewards, self.current_lengths, self.dones):
if done:
self.game_rewards.append(reward)
self.game_lengths.append(length)
shaped_rewards = self.rewards_shaper(rewards)
epinfos.append(infos)
mb_rewards.append(shaped_rewards)
self.current_rewards = self.current_rewards * (1.0 - self.dones)
self.current_lengths = self.current_lengths * (1.0 - self.dones)
#using openai baseline approach
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_mus = np.asarray(mb_mus, dtype=np.float32)
mb_sigmas = np.asarray(mb_sigmas, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_states = np.asarray(mb_states, dtype=np.float32)
last_values = self.get_values(self.obs)
last_values = np.squeeze(last_values)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.horizon_length)):
if t == self.horizon_length - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
if self.network.is_rnn():
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_mus, mb_sigmas, mb_states )), epinfos)
else:
result = (*map(swap_and_flatten01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_mus, mb_sigmas)), None, epinfos)
return result
def save(self, fn):
self.saver.save(self.sess, fn)
def restore(self, fn):
self.saver.restore(self.sess, fn)
def train(self):
max_epochs = self.config.get('max_epochs', 1e6)
self.obs = self.vec_env.reset()
batch_size = self.horizon_length * self.num_actors * self.num_agents
minibatch_size = self.config['minibatch_size']
mini_epochs_num = self.config['mini_epochs']
num_minibatches = batch_size // minibatch_size
last_lr = self.config['learning_rate']
self.last_mean_rewards = -100500
epoch_num = 0
frame = 0
update_time = 0
play_time = 0
start_time = time.time()
total_time = 0
while True:
play_time_start = time.time()
epoch_num = self.update_epoch()
frame += batch_size
obses, returns, dones, actions, values, neglogpacs, mus, sigmas, lstm_states, _ = self.play_steps()
advantages = returns - values
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
a_losses = []
c_losses = []
b_losses = []
entropies = []
kls = []
play_time_end = time.time()
play_time = play_time_end - play_time_start
update_time_start = time.time()
if self.network.is_rnn():
total_games = batch_size // self.seq_len
num_games_batch = minibatch_size // self.seq_len
game_indexes = np.arange(total_games)
flat_indexes = np.arange(total_games * self.seq_len).reshape(total_games, self.seq_len)
lstm_states = lstm_states[::self.seq_len]
for _ in range(0, mini_epochs_num):
np.random.shuffle(game_indexes)
for i in range(0, num_minibatches):
batch = range(i * num_games_batch, (i + 1) * num_games_batch)
mb_indexes = game_indexes[batch]
mbatch = flat_indexes[mb_indexes].ravel()
dict = {}
dict[self.old_values_ph] = values[mbatch]
dict[self.old_neglogp_actions_ph] = neglogpacs[mbatch]
dict[self.advantages_ph] = advantages[mbatch]
dict[self.rewards_ph] = returns[mbatch]
dict[self.actions_ph] = actions[mbatch]
dict[self.obs_ph] = obses[mbatch]
dict[self.old_mu_ph] = mus[mbatch]
dict[self.old_sigma_ph] = sigmas[mbatch]
dict[self.masks_ph] = dones[mbatch]
dict[self.states_ph] = lstm_states[mb_indexes]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_dist, self.current_lr, self.mu, self.sigma, self.lr_multiplier]
if self.bounds_loss is not None:
run_ops.append(self.bounds_loss)
run_ops.append(self.train_op)
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
res_dict = self.sess.run(run_ops, dict)
a_loss = res_dict[0]
c_loss = res_dict[1]
entropy = res_dict[2]
kl = res_dict[3]
last_lr = res_dict[4]
cmu = res_dict[5]
csigma = res_dict[6]
lr_mul = res_dict[7]
if self.bounds_loss is not None:
b_loss = res_dict[8]
b_losses.append(b_loss)
mus[mbatch] = cmu
sigmas[mbatch] = csigma
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
else:
for _ in range(0, mini_epochs_num):
permutation = np.random.permutation(batch_size)
obses = obses[permutation]
returns = returns[permutation]
actions = actions[permutation]
values = values[permutation]
neglogpacs = neglogpacs[permutation]
advantages = advantages[permutation]
mus = mus[permutation]
sigmas = sigmas[permutation]
for i in range(0, num_minibatches):
batch = range(i * minibatch_size, (i + 1) * minibatch_size)
dict = {self.obs_ph: obses[batch], self.actions_ph : actions[batch], self.rewards_ph : returns[batch],
self.advantages_ph : advantages[batch], self.old_neglogp_actions_ph : neglogpacs[batch], self.old_values_ph : values[batch]}
dict[self.old_mu_ph] = mus[batch]
dict[self.old_sigma_ph] = sigmas[batch]
dict[self.learning_rate_ph] = last_lr
run_ops = [self.actor_loss, self.critic_loss, self.entropy, self.kl_dist, self.current_lr, self.mu, self.sigma, self.lr_multiplier]
if self.bounds_loss is not None:
run_ops.append(self.bounds_loss)
run_ops.append(self.train_op)
run_ops.append(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
res_dict = self.sess.run(run_ops, dict)
a_loss = res_dict[0]
c_loss = res_dict[1]
entropy = res_dict[2]
kl = res_dict[3]
last_lr = res_dict[4]
cmu = res_dict[5]
csigma = res_dict[6]
lr_mul = res_dict[7]
if self.bounds_loss is not None:
b_loss = res_dict[8]
b_losses.append(b_loss)
mus[batch] = cmu
sigmas[batch] = csigma
a_losses.append(a_loss)
c_losses.append(c_loss)
kls.append(kl)
entropies.append(entropy)
update_time_end = time.time()
update_time = update_time_end - update_time_start
sum_time = update_time + play_time
total_time = update_time_end - start_time
if self.rank == 0:
scaled_time = sum_time # self.num_agents *
scaled_play_time = play_time # self.num_agents *
if self.print_stats:
fps_step = batch_size / scaled_play_time
fps_total = batch_size / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
# performance
self.writer.add_scalar('performance/total_fps', batch_size / sum_time, frame)
self.writer.add_scalar('performance/step_fps', batch_size / play_time, frame)
self.writer.add_scalar('performance/play_time', play_time, frame)
self.writer.add_scalar('performance/update_time', update_time, frame)
# losses
self.writer.add_scalar('losses/a_loss', np.mean(a_losses), frame)
self.writer.add_scalar('losses/c_loss', np.mean(c_losses), frame)
if len(b_losses) > 0:
self.writer.add_scalar('losses/bounds_loss', np.mean(b_losses), frame)
self.writer.add_scalar('losses/entropy', np.mean(entropies), frame)
# info
self.writer.add_scalar('info/last_lr', last_lr * lr_mul, frame)
self.writer.add_scalar('info/lr_mul', lr_mul, frame)
self.writer.add_scalar('info/e_clip', self.e_clip * lr_mul, frame)
self.writer.add_scalar('info/kl', np.mean(kls), frame)
self.writer.add_scalar('info/epochs', epoch_num, frame)
if len(self.game_rewards) > 0:
mean_rewards = np.mean(self.game_rewards)
mean_lengths = np.mean(self.game_lengths)
self.writer.add_scalar('rewards/frame', mean_rewards, frame)
self.writer.add_scalar('rewards/time', mean_rewards, total_time)
self.writer.add_scalar('episode_lengths/frame', mean_lengths, frame)
self.writer.add_scalar('episode_lengths/time', mean_lengths, total_time)
if mean_rewards > self.last_mean_rewards:
print('saving next best rewards: ', mean_rewards)
self.last_mean_rewards = mean_rewards
self.save("./nn/" + self.name)
if self.last_mean_rewards > self.config['score_to_win']:
self.save("./nn/" + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
if epoch_num > max_epochs:
print('MAX EPOCHS NUM!')
self.save("./nn/" + 'last_' + self.config['name'] + 'ep=' + str(epoch_num) + 'rew=' + str(mean_rewards))
return self.last_mean_rewards, epoch_num
update_time = 0
| 24,499 | Python | 48.295775 | 253 | 0.561982 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_pendulum_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: glorot_normal_initializer
gain: 0.01
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [32, 32]
activation: elu
initializer:
name: glorot_normal_initializer
gain: 2
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-3
name: pendulum
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: Pendulum-v0
ppo: true
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
schedule_type: legacy
kl_threshold: 0.016
normalize_input: False
bounds_loss_coef: 0
| 1,266 | YAML | 18.796875 | 41 | 0.559242 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_lunar.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: glorot_normal_initializer
#scal: 0.01
sigma_init:
name: const_initializer
value: 0
fixed_sigma: True
mlp:
units: [64, 64]
activation: relu
initializer:
name: glorot_normal_initializer
#gain: 2
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: LunarLanderContinuous-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0
| 1,271 | YAML | 18.875 | 41 | 0.558615 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_cartpole_masked_velocity_rnn.yaml |
#Cartpole without velocities lstm test
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [64, 64]
activation: relu
normalization: 'layer_norm'
norm_only_first_layer: True
initializer:
name: default
regularizer:
name: None
rnn:
name: 'lstm'
units: 64
layers: 1
before_mlp: False
concat_input: True
layer_norm: True
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: cartpole_vel_info
score_to_win: 500
grad_norm: 0.5
entropy_coef: 0.01
truncate_grads: True
env_name: CartPoleMaskedVelocity-v1
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 2048
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 4 | 1,117 | YAML | 17.327869 | 39 | 0.598926 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppg_walker.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_ppg
score_to_win: 290
grad_norm: 0.5
entropy_coef: 0 #-0.005
truncate_grads: False
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 1
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.004
normalize_input: False
bounds_loss_coef: 0.0005
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 1024
mini_epochs: 6
player:
render: True
determenistic: True
games_num: 200
| 1,536 | YAML | 20.347222 | 56 | 0.558594 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_continuous.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: openai_gym
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.001
env_config:
name: BipedalWalkerHardcore-v3
| 1,271 | YAML | 18.272727 | 39 | 0.552321 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_humanoid_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [256,128,64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: 'nn/humanoid_torch.pth'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'humanoid_torch'
score_to_win : 20000
grad_norm : 0.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexHumanoid
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 32
minibatch_size : 4096
mini_epochs : 4
critic_coef : 1
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : False
normalize_value : True
bounds_loss_coef: 0.000
max_epochs: 12000 | 1,468 | YAML | 18.851351 | 39 | 0.547684 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_reacher.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.02
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128]
activation: relu
initializer:
name: default
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn1:
name: lstm
units: 64
layers: 1
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: ReacherPyBulletEnv-v0
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: none
kl_threshold: 0.008
normalize_input: True
seq_length: 16
bounds_loss_coef: 0.00
max_epochs: 10000
weight_decay: 0.0001
player:
render: True
games_num: 200
experiment_config1:
start_exp: 0
start_sub_exp: 0
experiments:
- exp:
- path: config.bounds_loss_coef
value: [0.5]
| 1,593 | YAML | 18.925 | 56 | 0.549278 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_walker.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
scale: 2
load_checkpoint: False
load_path: './nn/last_walkerep=10001rew=108.35405.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 3e-4
name: walker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: BipedalWalker-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 256
minibatch_size: 256
mini_epochs: 4
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.005
normalize_input: True
bounds_loss_coef: 0.00
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
player:
render: True
determenistic: True
games_num: 200
| 1,408 | YAML | 19.720588 | 56 | 0.555398 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_pendulum.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
scale: 0.01
sigma_init:
name: const_initializer
value: 0
fixed_sigma: False
mlp:
units: [32, 32]
activation: elu
initializer:
name: default
scale: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: test
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.0
truncate_grads: True
env_name: Pendulum-v0
ppo: True
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 4
critic_coef: 1
lr_schedule: adaptive
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0
| 1,223 | YAML | 18.125 | 39 | 0.546198 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_revenge_rnd.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path:
network:
name: actor_critic
separate: False
value_shape: 2
space:
discrete:
cnn:
type: conv2d
activation: elu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [256, 512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
config:
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.999
tau: 0.9
learning_rate: 1e-4
name: atari
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.002
truncate_grads: True
env_name: atari_gym
ppo: true
e_clip: 0.1
clip_value: True
num_actors: 32
horizon_length: 512
minibatch_size: 4096
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
seq_length: 8
#lr_schedule: adaptive
# kl_threshold: 0.008
# bounds_loss_coef: 0.5
# max_epochs: 5000
env_config:
name: MontezumaRevengeNoFrameskip-v4
rnd_config:
scale_value: 1.0
episodic: True
episode_length: 256
gamma: 0.99
mini_epochs: 2
minibatch_size: 1024
learning_rate: 1e-4
network:
name: rnd_curiosity
cnn:
type: conv2d
activation: elu
initializer:
name: default
scale: 2
regularizer:
name: 'None'
rnd:
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
net:
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
rnd:
units: [512,512, 512]
net:
units: [512]
activation: elu
regularizer:
name: 'None'
initializer:
name: default
scale: 2 | 3,072 | YAML | 21.762963 | 42 | 0.427083 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_humanoid_torch_rnn.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
normalization: 'layer_norm'
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.01
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [256,128]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
rnn:
name: lstm
units: 64
layers: 1
before_mlp: False
load_checkpoint: True
load_path: 'nn/humanoid_torch_rnn.pth'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 8e-4
name : 'humanoid_torch_rnn'
score_to_win : 20000
grad_norm : 5
entropy_coef : 0
truncate_grads : True
env_name : FlexHumanoid
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 256
minibatch_size : 8192
mini_epochs : 4
critic_coef : 1
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
seq_length: 16
bounds_loss_coef: 0.000
weight_decay: 0.001
max_epochs: 6000 | 1,608 | YAML | 18.621951 | 40 | 0.54291 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_cartpole.yaml |
#Cartpole MLP
params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: path
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [32, 32]
activation: relu
initializer:
name: default
regularizer:
name: None
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 2e-4
name: cartpole_vel_info
score_to_win: 500
grad_norm: 1.0
entropy_coef: 0.01
truncate_grads: True
env_name: CartPole-v1
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 32
minibatch_size: 64
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
device: 'cuda:0' | 878 | YAML | 16.235294 | 29 | 0.592255 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_flex_ant_torch.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
# pytorch
name: default
scale: 0.02
# tf
# name: normc_initializer
# std: 0.01
sigma_init:
name: const_initializer
# value: 0 # tf
val: 0 # pytorch
fixed_sigma: True
mlp:
units: [128, 64]
activation: elu
initializer:
# pytorch
name: default
scale: 2
# tf
# name: normc_initializer
# std: 1
regularizer:
name: 'None' #'l2_regularizer'
#scale: 0.001
load_checkpoint: False
load_path: path
config:
reward_shaper:
scale_value: 0.01
normalize_advantage : True
gamma : 0.99
tau : 0.95
learning_rate : 3e-4
name : 'ant_torch'
score_to_win : 20000
grad_norm : 2.5
entropy_coef : 0.0
truncate_grads : True
env_name : FlexAnt
ppo : True
e_clip : 0.2
num_actors : 256
horizon_length : 16
minibatch_size : 4096
mini_epochs : 8
critic_coef : 2
clip_value : False
lr_schedule : adaptive
kl_threshold : 0.01
normalize_input : True
normalize_value : True
bounds_loss_coef: 0.0001
| 1,425 | YAML | 18.27027 | 39 | 0.53614 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_continuous_lstm.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_lstm_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: normc_initializer
std: 0.01
sigma_init:
name: const_initializer
value: 0.0
fixed_sigma: True
mlp:
units: [256, 256, 128]
activation: relu
initializer:
name: normc_initializer
std: 1
regularizer:
name: 'None'
lstm:
units: 128
concated: False
load_checkpoint: False
load_path: 'nn/runBipedalWalkerHardcore-v2'
config:
reward_shaper:
scale_value: 0.1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 1e-4
name: walker_lstm
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0.000
truncate_grads: True
env_name: BipedalWalkerHardcore-v2
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 512
minibatch_size: 2048
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
seq_length: 8
bounds_loss_coef: 0.5
max_epochs: 5000
| 1,334 | YAML | 19.227272 | 45 | 0.561469 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/carracing_ppo.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
load_checkpoint: False
load_path: 'nn/runCarRacing-v0'
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 0
- filters: 64
kernel_size: 4
strides: 2
padding: 0
- filters: 64
kernel_size: 3
strides: 1
padding: 0
mlp:
units: [512]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.9
learning_rate: 5e-4
name: racing
score_to_win: 900
grad_norm: 0.5
entropy_coef: 0.000
truncate_grads: True
env_name: CarRacing-v0
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 16
horizon_length: 128
minibatch_size: 1024
mini_epochs: 8
critic_coef: 1
lr_schedule: None
kl_threshold: 0.008
normalize_input: False
normalize_value: True
#lr_schedule: adaptive
# kl_threshold: 0.008
bounds_loss_coef: 0.001
# max_epochs: 5000
player:
render: True
deterministic: True | 1,684 | YAML | 18.593023 | 33 | 0.541568 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppg_walker_hardcore.yaml | params:
seed: 8
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128,64]
d2rl: False
activation: relu
initializer:
name: default
load_checkpoint: True
load_path: './nn/walker_hc_ppg.pth'
config:
reward_shaper:
#min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 5e-4
name: walker_hc_ppg
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0 #-0.005
truncate_grads: False
env_name: BipedalWalkerHardcore-v3
ppo: True
e_clip: 0.2
clip_value: False
num_actors: 16
horizon_length: 4096
minibatch_size: 8192
mini_epochs: 1
critic_coef: 2
schedule_type: 'standard'
lr_schedule: adaptive
kl_threshold: 0.004
normalize_input: False
bounds_loss_coef: 0.0005
max_epochs: 10000
normalize_value: True
#weight_decay: 0.0001
phasic_policy_gradients:
learning_rate: 5e-4
minibatch_size: 1024
mini_epochs: 6
player:
render: True
determenistic: True
games_num: 200
| 1,510 | YAML | 20.28169 | 41 | 0.559603 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/rainbow_dqn_breakout.yaml | params:
algo:
name: dqn
model:
name: dqn
load_checkpoint: False
load_path: 'nn/breakoutep=3638750.0rew=201.75'
network:
name: dqn
dueling: True
atoms: 51
noisy: True
cnn:
type: conv2d
activation: relu
initializer:
name: default
regularizer:
name: 'None'
convs:
- filters: 32
kernel_size: 8
strides: 4
padding: 'valid'
- filters: 64
kernel_size: 4
strides: 2
padding: 'valid'
- filters: 64
kernel_size: 3
strides: 1
padding: 'valid'
mlp:
units: [256]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
reward_shaper:
scale_value: 1
gamma : 0.99
learning_rate : 0.0001
steps_per_epoch : 4
batch_size : 32
epsilon : 0.00
min_epsilon : 0.00
epsilon_decay_frames : 1000000
num_epochs_to_copy : 10000
name : 'breakout'
env_name: BreakoutNoFrameskip-v4
is_double : True
score_to_win : 600
num_steps_fill_buffer : 100000
replay_buffer_type : 'prioritized'
replay_buffer_size : 1000000
priority_beta : 0.4
priority_alpha : 0.6
beta_decay_frames : 1000000
max_beta : 1
horizon_length : 3
episodes_to_log : 100
lives_reward : 5
atoms_num : 51
v_min : -10
v_max : 10
games_to_track : 100
lr_schedule : None
max_epochs: 10000000
| 1,525 | YAML | 19.346666 | 48 | 0.550164 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_smac.yaml | params:
algo:
name: a2c_discrete
model:
name: discrete_a2c
load_checkpoint: False
load_path: 'nn/sc2smac'
network:
name: actor_critic
separate: True
space:
discrete:
mlp:
units: [256, 128]
activation: relu
initializer:
name: default
regularizer:
name: 'None'
config:
name: 6h_vs_8z
reward_shaper:
scale_value: 1
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 1e-4
score_to_win: 1000
grad_norm: 0.5
entropy_coef: 0.001
truncate_grads: True
env_name: smac
ppo: true
e_clip: 0.2
clip_value: True
num_actors: 8
horizon_length: 128
minibatch_size: 3072
mini_epochs: 4
critic_coef: 1
lr_schedule: None
kl_threshold: 0.05
normalize_input: False
seq_length: 4
use_action_masks: True
env_config:
name: 6h_vs_8z
frames: 2
random_invalid_step: False | 979 | YAML | 17.148148 | 32 | 0.581205 |
vstrozzi/FRL-SHAC-Extension/externals/rl_games/rl_games/configs/ppo_multiwalker.yaml | params:
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256,128, 64]
d2rl: False
activation: relu
initializer:
name: default
load_checkpoint: False
load_path: './nn/multiwalker.pth'
config:
reward_shaper:
min_val: -1
scale_value: 0.1
normalize_advantage: True
gamma: 0.995
tau: 0.95
learning_rate: 1e-4
name: multiwalker
score_to_win: 300
grad_norm: 0.5
entropy_coef: 0
truncate_grads: True
env_name: multiwalker_env
ppo: True
e_clip: 0.2
use_experimental_cv: False
clip_value: False
num_actors: 16
horizon_length: 512
minibatch_size: 3072 #768 #3072 #1536
mini_epochs: 4
critic_coef: 1
schedule_type: 'standard'
lr_schedule: None
kl_threshold: 0.008
normalize_input: True
normalize_value: True
bounds_loss_coef: 0.0001
max_epochs: 10000
weight_decay: 0.0000
player:
render: True
games_num: 200
env_config:
central_value: True
use_prev_actions: True
apply_agent_ids: True
central_value_config:
minibatch_size: 512
mini_epochs: 4
learning_rate: 3e-4
clip_value: False
normalize_input: True
truncate_grads: False
network:
name: actor_critic
central_value: True
mlp:
units: [512, 256, 128]
activation: elu
initializer:
name: default | 1,881 | YAML | 20.632184 | 43 | 0.549176 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.